Merge branch 'master' into permission-cache-impl

This commit is contained in:
vsmk98 2020-03-27 11:06:19 +08:00
commit 75896d38bf
851 changed files with 202002 additions and 21998 deletions

3
.gitignore vendored
View File

@ -46,3 +46,6 @@ profile.cov
/dashboard/assets/package-lock.json
**/yarn-error.log
# QUORUM
generated-release-notes.md

View File

@ -79,26 +79,7 @@ matrix:
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
go run build/ci.go install
if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
QUORUM_IGNORE_TEST_PACKAGES=github.com/ethereum/go-ethereum/swarm go run build/ci.go test -coverage $TEST_PACKAGES
else
go run build/ci.go test -coverage $TEST_PACKAGES
fi
- if: tag IS blank
os: osx
osx_image: xcode9.2 # so we don't have to deal with Kernel Extension Consent UI which is never possible in CI
script:
- |
brew update
brew install caskroom/cask/brew-cask
brew cask install osxfuse
go run build/ci.go install
if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
QUORUM_IGNORE_TEST_PACKAGES=github.com/ethereum/go-ethereum/swarm go run build/ci.go test -coverage $TEST_PACKAGES
else
go run build/ci.go test -coverage $TEST_PACKAGES
fi
QUORUM_IGNORE_TEST_PACKAGES=github.com/ethereum/go-ethereum/swarm,github.com/ethereum/go-ethereum/cmd/swarm go run build/ci.go test -coverage $TEST_PACKAGES
- if: tag IS present
os: linux
dist: xenial
@ -111,7 +92,6 @@ matrix:
- if: tag IS present
os: osx
osx_image: xcode9.2
env: OUTPUT_FILE=geth_${TRAVIS_TAG}_darwin_amd64.tar.gz
script:
- build/env.sh go run build/ci.go install ./cmd/geth

View File

@ -155,3 +155,44 @@ geth-windows-amd64:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
@echo "Windows amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-* | grep amd64
# QUORUM - BEGIN
RELEASE_NOTES_FILE := generated-release-notes.md
LAST_RELEASE_VERSION := $(shell git describe --tags --abbrev=0 @^)
REQUIRED_ENV_VARS := QUORUM_RELEASE BINTRAY_USER BINTRAY_API_KEY GITHUB_TOKEN
release-prepare: release-tools
@[ "${QUORUM_RELEASE}" ] || (echo "Please provide QUORUM_RELEASE env variable" ; exit 1)
@rm -f $(RELEASE_NOTES_FILE)
@echo "Last release: $(LAST_RELEASE_VERSION)"
@echo "This release: ${QUORUM_RELEASE}"
@echo "${QUORUM_RELEASE}\n\n" > $(RELEASE_NOTES_FILE)
@git log --pretty=format:%s $(LAST_RELEASE_VERSION)..@ >> $(RELEASE_NOTES_FILE)
@echo "$(RELEASE_NOTES_FILE) has been created"
release: release-tools check-release-env
@jfrog bt version-show quorumengineering/quorum/geth/${QUORUM_RELEASE} --key ${BINTRAY_API_KEY} --user ${BINTRAY_USER}
@echo "\n\n| Filename | SHA256 Hash |" >> $(RELEASE_NOTES_FILE)
@echo "|:---------|:------------|" >> $(RELEASE_NOTES_FILE)
@curl -s -u ${BINTRAY_USER}:${BINTRAY_API_KEY} https://api.bintray.com/packages/quorumengineering/quorum/geth/versions/${QUORUM_RELEASE}/files \
| jq '.[] | select(.name | endswith(".asc") | not) | "|[\(.name)](https://bintray.com/quorumengineering/quorum/download_file?file_path=\(.path))|`\(.sha256)`|"' -r \
>> $(RELEASE_NOTES_FILE)
@hub release create -d -F $(RELEASE_NOTES_FILE) ${QUORUM_RELEASE}
# make sure all API Keys are set
check-release-env: $(REQUIRED_ENV_VARS)
@[ -f "$(RELEASE_NOTES_FILE)" ] || (echo "Please run 'make release-prepare' and edit the release notes"; exit 1)
$(REQUIRED_ENV_VARS):
@[ "${$@}" ] || (echo "Please provide $@ env variable" ; exit 1)
release-tools:
ifeq (, $(shell which hub))
@echo "Please install Github CLI from https://hub.github.com"
endif
ifeq (, $(shell which jfrog))
@echo "Please install JFrog CLI from https://jfrog.com/getcli"
endif
ifeq (, $(shell which jq))
@echo "Please install jq from https://stedolan.github.io/jq/download"
endif
# QUORUM - END

View File

@ -2,7 +2,10 @@
<a href="https://bit.ly/quorum-slack" target="_blank" rel="noopener"><img title="Quorum Slack" src="https://clh7rniov2.execute-api.us-east-1.amazonaws.com/Express/badge.svg" alt="Quorum Slack" /></a>
[![Build Status](https://travis-ci.org/jpmorganchase/quorum.svg?branch=master)](https://travis-ci.org/jpmorganchase/quorum)
[![Docker Cloud Build Status](https://img.shields.io/docker/cloud/build/quorumengineering/quorum)](https://hub.docker.com/r/quorumengineering/quorum/builds)
[![Documentation Status](https://readthedocs.org/projects/goquorum/badge/?version=latest)](http://docs.goquorum.com/en/latest/?badge=latest)
[![Download](https://api.bintray.com/packages/quorumengineering/quorum/geth/images/download.svg)](https://bintray.com/quorumengineering/quorum/geth/_latestVersion)
[![Docker Pulls](https://img.shields.io/docker/pulls/quorumengineering/quorum)](https://hub.docker.com/r/quorumengineering/quorum)
Quorum is an Ethereum-based distributed ledger protocol with transaction/contract privacy and new consensus mechanisms.
@ -44,7 +47,7 @@ The official docker containers can be found under https://hub.docker.com/u/quoru
* [Tessera](https://github.com/jpmorganchase/tessera): Java implementation of peer-to-peer encrypted message exchange for transaction privacy
* Quorum supported consensuses
* [Raft Consensus Documentation](https://docs.goquorum.com/en/latest/Consensus/raft/)
* [Istanbul BFT Consensus Documentation](https://github.com/ethereum/EIPs/issues/650): [RPC API](https://docs.goquorum.com/en/latest/Consensus/istanbul-rpc-api/) and [technical article](https://medium.com/getamis/istanbul-bft-ibft-c2758b7fe6ff). __Please note__ that updated istanbul-tools is now hosted in [this](https://github.com/jpmorganchase/istanbul-tools/) repository
* [Istanbul BFT Consensus Documentation](https://github.com/ethereum/EIPs/issues/650): [RPC API](https://docs.goquorum.com/en/latest/Consensus/ibft/istanbul-rpc-api.md) and [technical article](https://medium.com/getamis/istanbul-bft-ibft-c2758b7fe6ff). __Please note__ that updated istanbul-tools is now hosted in [this](https://github.com/jpmorganchase/istanbul-tools/) repository
* [Clique POA Consensus Documentation](https://github.com/ethereum/EIPs/issues/225) and a [guide to setup clique json](https://modalduality.org/posts/puppeth/) with [puppeth](https://blog.ethereum.org/2017/04/14/geth-1-6-puppeth-master/)
* Zero Knowledge on Quorum
* [ZSL](https://github.com/jpmorganchase/quorum/wiki/ZSL) wiki page and [documentation](https://github.com/jpmorganchase/zsl-q/blob/master/README.md)
@ -62,7 +65,7 @@ The following Quorum-related libraries/applications have been created by Third P
* [QuorumNetworkManager](https://github.com/ConsenSys/QuorumNetworkManager) - makes creating & managing Quorum networks easy
* [ERC20 REST service](https://github.com/blk-io/erc20-rest-service) - a Quorum-supported RESTful service for creating and managing ERC-20 tokens
* [Nethereum Quorum](https://github.com/Nethereum/Nethereum/tree/master/src/Nethereum.Quorum) - a .NET Quorum adapter
* [web3j-quorum](https://github.com/web3j/quorum) - an extension to the web3j Java library providing support for the Quorum API
* [web3j-quorum](https://github.com/web3j/web3j-quorum) - an extension to the web3j Java library providing support for the Quorum API
* [Apache Camel](http://github.com/apache/camel) - an Apache Camel component providing support for the Quorum API using web3j library. Here is the artcile describing how to use Apache Camel with Ethereum and Quorum https://medium.com/@bibryam/enterprise-integration-for-ethereum-fa67a1577d43
## Contributing

View File

@ -81,7 +81,9 @@ type ContractTransactor interface {
// for setting a reasonable default.
EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error)
// SendTransaction injects the transaction into the pending pool for execution.
SendTransaction(ctx context.Context, tx *types.Transaction) error
SendTransaction(ctx context.Context, tx *types.Transaction, args PrivateTxArgs) error
// PreparePrivateTransaction send the private transaction to Tessera/Constellation's /storeraw API using HTTP
PreparePrivateTransaction(data []byte, privateFrom string) ([]byte, error)
}
// ContractFilterer defines the methods needed to access log events using one-off

View File

@ -309,7 +309,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
// SendTransaction updates the pending block to include the given transaction.
// It panics if the transaction is invalid.
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction, args bind.PrivateTxArgs) error {
b.mu.Lock()
defer b.mu.Unlock()
@ -335,6 +335,11 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
return nil
}
// PreparePrivateTransaction dummy implementation
func (b *SimulatedBackend) PreparePrivateTransaction(data []byte, privateFrom string) ([]byte, error) {
return data, nil
}
// FilterLogs executes a log filter operation, blocking during execution and
// returning all the results in one batch.
//

View File

@ -34,6 +34,13 @@ import (
// sign the transaction before submission.
type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Transaction, error)
// Quorum
//
// Additional arguments in order to support transaction privacy
type PrivateTxArgs struct {
PrivateFor []string `json:"privateFor"`
}
// CallOpts is the collection of options to fine tune a contract call request.
type CallOpts struct {
Pending bool // Whether to operate on the pending state or the last known one
@ -54,6 +61,10 @@ type TransactOpts struct {
GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate)
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
// Quorum
PrivateFrom string // The public key of the Tessera/Constellation identity to send this tx from.
PrivateFor []string // The public keys of the Tessera/Constellation identities this tx is intended for.
}
// FilterOpts is the collection of options to fine tune filtering for events
@ -231,16 +242,36 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
} else {
rawTx = types.NewTransaction(nonce, c.address, value, gasLimit, gasPrice, input)
}
// If this transaction is private, we need to substitute the data payload
// with the hash of the transaction from tessera/constellation.
if opts.PrivateFor != nil {
var payload []byte
payload, err = c.transactor.PreparePrivateTransaction(rawTx.Data(), opts.PrivateFrom)
if err != nil {
return nil, err
}
rawTx = c.createPrivateTransaction(rawTx, payload)
}
// Choose signer to sign transaction
if opts.Signer == nil {
return nil, errors.New("no signer to authorize the transaction with")
}
signedTx, err := opts.Signer(types.HomesteadSigner{}, opts.From, rawTx)
var signedTx *types.Transaction
if rawTx.IsPrivate() {
signedTx, err = opts.Signer(types.QuorumPrivateTxSigner{}, opts.From, rawTx)
} else {
signedTx, err = opts.Signer(types.HomesteadSigner{}, opts.From, rawTx)
}
if err != nil {
return nil, err
}
if err := c.transactor.SendTransaction(ensureContext(opts.Context), signedTx); err != nil {
if err := c.transactor.SendTransaction(ensureContext(opts.Context), signedTx, PrivateTxArgs{PrivateFor: opts.PrivateFor}); err != nil {
return nil, err
}
return signedTx, nil
}
@ -340,6 +371,18 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log)
return parseTopics(out, indexed, log.Topics[1:])
}
// createPrivateTransaction replaces the payload of private transaction to the hash from Tessera/Constellation
func (c *BoundContract) createPrivateTransaction(tx *types.Transaction, payload []byte) *types.Transaction {
var privateTx *types.Transaction
if tx.To() == nil {
privateTx = types.NewContractCreation(tx.Nonce(), tx.Value(), tx.Gas(), tx.GasPrice(), payload)
} else {
privateTx = types.NewTransaction(tx.Nonce(), c.address, tx.Value(), tx.Gas(), tx.GasPrice(), payload)
}
privateTx.SetPrivate()
return privateTx
}
// ensureContext is a helper method to ensure a context is not nil, even if the
// user specified it as such.
func ensureContext(ctx context.Context) context.Context {

View File

@ -76,7 +76,7 @@ func TestWaitDeployed(t *testing.T) {
}()
// Send and mine the transaction.
backend.SendTransaction(ctx, tx)
backend.SendTransaction(ctx, tx, bind.PrivateTxArgs{})
backend.Commit()
select {

View File

@ -40,7 +40,7 @@ wget https://github.com/jpmorganchase/tessera/releases/download/tessera-0.8/tess
echo "---> tessera done"
echo "---> getting gauge jar ..."
wget https://github.com/getgauge/gauge/releases/download/v1.0.4/gauge-1.0.4-linux.x86_64.zip -O gauge.zip -q
wget https://github.com/getgauge/gauge/releases/download/v1.0.7/gauge-1.0.7-linux.x86_64.zip -O gauge.zip -q
sudo unzip -o gauge.zip -d /usr/local/bin
gauge telemetry off
cd ${TRAVIS_HOME}/quorum-acceptance-tests

View File

@ -41,6 +41,8 @@ import (
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
@ -483,7 +485,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
continue
}
// Submit the transaction and mark as funded if successful
if err := f.client.SendTransaction(context.Background(), signed); err != nil {
if err := f.client.SendTransaction(context.Background(), signed, bind.PrivateTxArgs{}); err != nil {
f.lock.Unlock()
if err = sendError(conn, err); err != nil {
log.Warn("Failed to send transaction transmission error to client", "err", err)

View File

@ -155,7 +155,24 @@ Passphrase: {{.InputLine "foobar"}}
}
}
func TestGethDoesntStartWithoutPrivateTransactionManagerVariableSet(t *testing.T) {
defer SetResetPrivateConfig("")()
datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
"--unlock", "f466859ead1932d743d622cb74fc058882e8648a")
geth.ExpectExit()
expectedText := "the PRIVATE_CONFIG environment variable must be specified for Quorum"
result := strings.TrimSpace(geth.StderrText())
if result != expectedText {
geth.Fatalf("bad stderr text. want '%s', got '%s'", expectedText, result)
}
}
func TestUnlockFlagWrongPassword(t *testing.T) {
defer SetResetPrivateConfig("ignore")()
datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
@ -222,6 +239,7 @@ func TestUnlockFlagPasswordFile(t *testing.T) {
}
func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) {
defer SetResetPrivateConfig("ignore")()
datadir := tmpDatadirWithKeystore(t)
geth := runGeth(t,
"--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",
@ -271,6 +289,7 @@ In order to avoid this warning, you need to remove the following duplicate key f
}
func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) {
defer SetResetPrivateConfig("ignore")()
store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes")
geth := runGeth(t,
"--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0",

View File

@ -109,7 +109,7 @@ be gzipped.`,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
The import-preimages command imports hash preimages from an RLP encoded stream.`,
The import-preimages command imports hash preimages from an RLP encoded stream.`,
}
exportPreimagesCommand = cli.Command{
Action: utils.MigrateFlags(exportPreimages),
@ -232,7 +232,7 @@ func importChain(ctx *cli.Context) error {
utils.Fatalf("This command requires an argument.")
}
stack := makeFullNode(ctx)
chain, chainDb := utils.MakeChain(ctx, stack)
chain, chainDb := utils.MakeChain(ctx, stack, true)
defer chainDb.Close()
// Start periodically gathering memory profiles
@ -326,7 +326,7 @@ func exportChain(ctx *cli.Context) error {
utils.Fatalf("This command requires an argument.")
}
stack := makeFullNode(ctx)
chain, _ := utils.MakeChain(ctx, stack)
chain, _ := utils.MakeChain(ctx, stack, true)
start := time.Now()
var err error
@ -392,7 +392,7 @@ func copyDb(ctx *cli.Context) error {
}
// Initialize a new chain for the running node to sync into
stack := makeFullNode(ctx)
chain, chainDb := utils.MakeChain(ctx, stack)
chain, chainDb := utils.MakeChain(ctx, stack, false)
syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
@ -464,7 +464,7 @@ func removeDB(ctx *cli.Context) error {
func dump(ctx *cli.Context) error {
stack := makeFullNode(ctx)
chain, chainDb := utils.MakeChain(ctx, stack)
chain, chainDb := utils.MakeChain(ctx, stack, false)
for _, arg := range ctx.Args() {
var block *types.Block
if hashish(arg) {

View File

@ -157,6 +157,12 @@ func enableWhisper(ctx *cli.Context) bool {
func makeFullNode(ctx *cli.Context) *node.Node {
stack, cfg := makeConfigNode(ctx)
// this must be done first to make sure plugin manager is fully up.
// any fatal at this point is safe
if cfg.Node.Plugins != nil {
utils.RegisterPluginService(stack, &cfg.Node, ctx.Bool(utils.PluginSkipVerifyFlag.Name), ctx.Bool(utils.PluginLocalVerifyFlag.Name), ctx.String(utils.PluginPublicKeyFlag.Name))
}
ethChan := utils.RegisterEthService(stack, &cfg.Eth)
if cfg.Node.IsPermissionEnabled() {
@ -217,7 +223,7 @@ func RegisterRaftService(stack *node.Node, ctx *cli.Context, cfg gethConfig, eth
blockTimeMillis := ctx.GlobalInt(utils.RaftBlockTimeFlag.Name)
datadir := ctx.GlobalString(utils.DataDirFlag.Name)
joinExistingId := ctx.GlobalInt(utils.RaftJoinExistingFlag.Name)
useDns := ctx.GlobalBool(utils.RaftDNSEnabledFlag.Name)
raftPort := uint16(ctx.GlobalInt(utils.RaftPortFlag.Name))
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
@ -255,7 +261,7 @@ func RegisterRaftService(stack *node.Node, ctx *cli.Context, cfg gethConfig, eth
}
ethereum := <-ethChan
return raft.New(ctx, ethereum.ChainConfig(), myId, raftPort, joinExisting, blockTimeNanos, ethereum, peers, datadir)
return raft.New(ctx, ethereum.ChainConfig(), myId, raftPort, joinExisting, blockTimeNanos, ethereum, peers, datadir, useDns)
}); err != nil {
utils.Fatalf("Failed to register the Raft service: %v", err)
}
@ -275,3 +281,9 @@ func quorumValidateConsensus(stack *node.Node, isRaft bool) {
utils.Fatalf("Consensus not specified. Exiting!!")
}
}
// quorumValidatePrivateTransactionManager returns whether the "PRIVATE_CONFIG"
// environment variable is set
func quorumValidatePrivateTransactionManager() bool {
return os.Getenv("PRIVATE_CONFIG") != ""
}

View File

@ -70,6 +70,7 @@ var genesis = `{
// Tests that a node embedded within a console can be started up properly and
// then terminated by closing the input stream.
func TestConsoleWelcome(t *testing.T) {
defer SetResetPrivateConfig("ignore")()
coinbase := "0x491937757d1b26e29c507b8d4c0b233c2747e68d"
datadir := setupIstanbul(t)
@ -107,6 +108,7 @@ at block: 0 ({{niltime}})
// Tests that a console can be attached to a running node via various means.
func TestIPCAttachWelcome(t *testing.T) {
defer SetResetPrivateConfig("ignore")()
// Configure the instance for IPC attachement
coinbase := "0x491937757d1b26e29c507b8d4c0b233c2747e68d"
var ipc string
@ -134,6 +136,7 @@ func TestIPCAttachWelcome(t *testing.T) {
}
func TestHTTPAttachWelcome(t *testing.T) {
defer SetResetPrivateConfig("ignore")()
coinbase := "0x491937757d1b26e29c507b8d4c0b233c2747e68d"
port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P
@ -152,6 +155,7 @@ func TestHTTPAttachWelcome(t *testing.T) {
}
func TestWSAttachWelcome(t *testing.T) {
defer SetResetPrivateConfig("ignore")()
coinbase := "0x491937757d1b26e29c507b8d4c0b233c2747e68d"
port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P
@ -231,3 +235,11 @@ func setupIstanbul(t *testing.T) string {
return datadir
}
func SetResetPrivateConfig(value string) func() {
existingValue := os.Getenv("PRIVATE_CONFIG")
os.Setenv("PRIVATE_CONFIG", value)
return func() {
os.Setenv("PRIVATE_CONFIG", existingValue)
}
}

View File

@ -18,6 +18,7 @@
package main
import (
"errors"
"fmt"
"math"
"os"
@ -134,14 +135,21 @@ var (
utils.EWASMInterpreterFlag,
utils.EVMInterpreterFlag,
configFileFlag,
// Quorum
utils.EnableNodePermissionFlag,
utils.RaftModeFlag,
utils.RaftBlockTimeFlag,
utils.RaftJoinExistingFlag,
utils.RaftPortFlag,
utils.RaftDNSEnabledFlag,
utils.EmitCheckpointsFlag,
utils.IstanbulRequestTimeoutFlag,
utils.IstanbulBlockPeriodFlag,
utils.PluginSettingsFlag,
utils.PluginSkipVerifyFlag,
utils.PluginLocalVerifyFlag,
utils.PluginPublicKeyFlag,
// End-Quorum
}
rpcFlags = []cli.Flag{
@ -271,6 +279,11 @@ func geth(ctx *cli.Context) error {
if args := ctx.Args(); len(args) > 0 {
return fmt.Errorf("invalid command: %q", args[0])
}
if !quorumValidatePrivateTransactionManager() {
return errors.New("the PRIVATE_CONFIG environment variable must be specified for Quorum")
}
node := makeFullNode(ctx)
startNode(ctx, node)

View File

@ -128,17 +128,6 @@ var AppHelpFlagGroups = []flagGroup{
utils.TxPoolLifetimeFlag,
},
},
{
Name: "ETHASH",
Flags: []cli.Flag{
utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,
utils.EthashDatasetDirFlag,
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
},
},
{
Name: "PERFORMANCE TUNING",
Flags: []cli.Flag{
@ -152,6 +141,10 @@ var AppHelpFlagGroups = []flagGroup{
Name: "QUORUM",
Flags: []cli.Flag{
utils.EnableNodePermissionFlag,
utils.PluginSettingsFlag,
utils.PluginSkipVerifyFlag,
utils.PluginLocalVerifyFlag,
utils.PluginPublicKeyFlag,
},
},
{
@ -161,6 +154,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.RaftBlockTimeFlag,
utils.RaftJoinExistingFlag,
utils.RaftPortFlag,
utils.RaftDNSEnabledFlag,
},
},
{

View File

@ -19,16 +19,16 @@ package utils
import (
"crypto/ecdsa"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/big"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/permission"
"time"
"github.com/ethereum/go-ethereum/accounts"
@ -38,7 +38,10 @@ import (
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/consensus/istanbul"
istanbulBackend "github.com/ethereum/go-ethereum/consensus/istanbul/backend"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
@ -59,6 +62,8 @@ import (
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/permission"
"github.com/ethereum/go-ethereum/plugin"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
"gopkg.in/urfave/cli.v1"
)
@ -594,6 +599,7 @@ var (
Usage: "The raft ID to assume when joining an pre-existing cluster",
Value: 0,
}
EmitCheckpointsFlag = cli.BoolFlag{
Name: "emitcheckpoints",
Usage: "If enabled, emit specially formatted logging checkpoints",
@ -603,13 +609,33 @@ var (
Usage: "The port to bind for the raft transport",
Value: 50400,
}
RaftDNSEnabledFlag = cli.BoolFlag{
Name: "raftdnsenable",
Usage: "Enable DNS resolution of peers",
}
// Quorum
EnableNodePermissionFlag = cli.BoolFlag{
Name: "permissioned",
Usage: "If enabled, the node will allow only a defined list of nodes to connect",
}
// Plugins settings
PluginSettingsFlag = cli.StringFlag{
Name: "plugins",
Usage: "The URI of configuration which describes plugins being used. E.g.: file:///opt/geth/plugins.json",
}
PluginLocalVerifyFlag = cli.BoolFlag{
Name: "plugins.localverify",
Usage: "If enabled, verify plugin integrity from local file system. This requires plugin signature file and PGP public key file to be available",
}
PluginPublicKeyFlag = cli.StringFlag{
Name: "plugins.publickey",
Usage: fmt.Sprintf("The URI of PGP public key for local plugin verification. E.g.: file:///opt/geth/pubkey.pgp.asc. This flag is only valid if --%s is set (default = file:///<pluginBaseDir>/%s)", PluginLocalVerifyFlag.Name, plugin.DefaultPublicKeyFile),
}
PluginSkipVerifyFlag = cli.BoolFlag{
Name: "plugins.skipverify",
Usage: "If enabled, plugin integrity is NOT verified",
}
// Istanbul settings
IstanbulRequestTimeoutFlag = cli.Uint64Flag{
Name: "istanbul.requesttimeout",
@ -878,10 +904,11 @@ func makeDatabaseHandles() int {
if err != nil {
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
}
if err := fdlimit.Raise(uint64(limit)); err != nil {
raised, err := fdlimit.Raise(uint64(limit))
if err != nil {
Fatalf("Failed to raise file descriptor allowance: %v", err)
}
return limit / 2 // Leave half for networking and other stuff
return int(raised / 2) // Leave half for networking and other stuff
}
// MakeAddress converts an account specified directly as a hex encoded string or
@ -1047,6 +1074,51 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
if ctx.GlobalIsSet(NoUSBFlag.Name) {
cfg.NoUSB = ctx.GlobalBool(NoUSBFlag.Name)
}
if err := setPlugins(ctx, cfg); err != nil {
Fatalf(err.Error())
}
}
// Quorum
//
// Read plugin settings from --plugins flag. Overwrite settings defined in --config if any
func setPlugins(ctx *cli.Context, cfg *node.Config) error {
if ctx.GlobalIsSet(PluginSettingsFlag.Name) {
// validate flag combination
if ctx.GlobalBool(PluginSkipVerifyFlag.Name) && ctx.GlobalBool(PluginLocalVerifyFlag.Name) {
return fmt.Errorf("only --%s or --%s must be set", PluginSkipVerifyFlag.Name, PluginLocalVerifyFlag.Name)
}
if !ctx.GlobalBool(PluginLocalVerifyFlag.Name) && ctx.GlobalIsSet(PluginPublicKeyFlag.Name) {
return fmt.Errorf("--%s is required for setting --%s", PluginLocalVerifyFlag.Name, PluginPublicKeyFlag.Name)
}
pluginSettingsURL, err := url.Parse(ctx.GlobalString(PluginSettingsFlag.Name))
if err != nil {
return fmt.Errorf("plugins: Invalid URL for --%s due to %s", PluginSettingsFlag.Name, err)
}
var pluginSettings plugin.Settings
r, err := urlReader(pluginSettingsURL)
if err != nil {
return fmt.Errorf("plugins: unable to create reader due to %s", err)
}
defer func() {
_ = r.Close()
}()
if err := json.NewDecoder(r).Decode(&pluginSettings); err != nil {
return fmt.Errorf("plugins: unable to parse settings due to %s", err)
}
pluginSettings.SetDefaults()
cfg.Plugins = &pluginSettings
}
return nil
}
func urlReader(u *url.URL) (io.ReadCloser, error) {
s := u.Scheme
switch s {
case "file":
return os.Open(filepath.Join(u.Host, u.Path))
}
return nil, fmt.Errorf("unsupported scheme %s", s)
}
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
@ -1221,12 +1293,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
}
cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive"
//Quorum - set gcmode=archive for Raft
if ctx.GlobalBool(RaftModeFlag.Name) {
log.Info("set gcmode=archive for Raft")
cfg.NoPruning = true
}
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
cfg.TrieCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
}
@ -1394,6 +1460,18 @@ func RegisterEthStatsService(stack *node.Node, url string) {
// Quorum
//
// Register plugin manager as a service in geth
func RegisterPluginService(stack *node.Node, cfg *node.Config, skipVerify bool, localVerify bool, publicKey string) {
if err := cfg.ResolvePluginBaseDir(); err != nil {
Fatalf("plugins: unable to resolve plugin base dir due to %s", err)
}
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
return plugin.NewPluginManager(cfg.UserIdent, cfg.Plugins, skipVerify, localVerify, publicKey)
}); err != nil {
Fatalf("plugins: Failed to register the Plugins service: %v", err)
}
}
// Configure smart-contract-based permissioning service
func RegisterPermissionService(ctx *cli.Context, stack *node.Node) {
if err := stack.Register(func(sctx *node.ServiceContext) (node.Service, error) {
@ -1467,17 +1545,41 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
}
// MakeChain creates a chain manager from set command line flags.
func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) {
var err error
func MakeChain(ctx *cli.Context, stack *node.Node, useExist bool) (chain *core.BlockChain, chainDb ethdb.Database) {
var (
config *params.ChainConfig
err error
)
chainDb = MakeChainDatabase(ctx, stack)
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Fatalf("%v", err)
if useExist {
stored := rawdb.ReadCanonicalHash(chainDb, 0)
if (stored == common.Hash{}) {
Fatalf("No existing genesis")
}
config = rawdb.ReadChainConfig(chainDb, stored)
} else {
config, _, err = core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Fatalf("%v", err)
}
}
var engine consensus.Engine
if config.Clique != nil {
engine = clique.New(config.Clique, chainDb)
} else if config.Istanbul != nil {
// for IBFT
istanbulConfig := istanbul.DefaultConfig
if config.Istanbul.Epoch != 0 {
istanbulConfig.Epoch = config.Istanbul.Epoch
}
istanbulConfig.ProposerPolicy = istanbul.ProposerPolicy(config.Istanbul.ProposerPolicy)
istanbulConfig.Ceil2Nby3Block = config.Istanbul.Ceil2Nby3Block
engine = istanbulBackend.New(istanbulConfig, stack.GetNodeKey(), chainDb)
} else if config.IsQuorum {
// for Raft
engine = ethash.NewFullFaker()
} else {
engine = ethash.NewFaker()
if !ctx.GlobalBool(FakePoWFlag.Name) {
@ -1495,15 +1597,8 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
}
trieWriteCacheDisabled := ctx.GlobalString(GCModeFlag.Name) == "archive"
//Quorum - set gcmode=archive for Raft
if !trieWriteCacheDisabled && ctx.GlobalBool(RaftModeFlag.Name) {
log.Info("set gcmode=archive for Raft")
trieWriteCacheDisabled = true
}
cache := &core.CacheConfig{
Disabled: trieWriteCacheDisabled,
Disabled: ctx.GlobalString(GCModeFlag.Name) == "archive",
TrieNodeLimit: eth.DefaultConfig.TrieCache,
TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
}

79
cmd/utils/flags_test.go Normal file
View File

@ -0,0 +1,79 @@
package utils
import (
"flag"
"io/ioutil"
"os"
"path"
"testing"
"github.com/ethereum/go-ethereum/node"
"github.com/stretchr/testify/assert"
"gopkg.in/urfave/cli.v1"
)
func TestSetPlugins_whenPluginsNotEnabled(t *testing.T) {
arbitraryNodeConfig := &node.Config{}
arbitraryCLIContext := cli.NewContext(nil, &flag.FlagSet{}, nil)
assert.NoError(t, setPlugins(arbitraryCLIContext, arbitraryNodeConfig))
assert.Nil(t, arbitraryNodeConfig.Plugins)
}
func TestSetPlugins_whenInvalidFlagsCombination(t *testing.T) {
arbitraryNodeConfig := &node.Config{}
fs := &flag.FlagSet{}
fs.String(PluginSettingsFlag.Name, "", "")
fs.Bool(PluginSkipVerifyFlag.Name, true, "")
fs.Bool(PluginLocalVerifyFlag.Name, true, "")
fs.String(PluginPublicKeyFlag.Name, "", "")
arbitraryCLIContext := cli.NewContext(nil, fs, nil)
assert.NoError(t, arbitraryCLIContext.GlobalSet(PluginSettingsFlag.Name, "arbitrary value"))
verifyErrorMessage(t, arbitraryCLIContext, arbitraryNodeConfig, "only --plugins.skipverify or --plugins.localverify must be set")
assert.NoError(t, arbitraryCLIContext.GlobalSet(PluginSkipVerifyFlag.Name, "false"))
assert.NoError(t, arbitraryCLIContext.GlobalSet(PluginLocalVerifyFlag.Name, "false"))
assert.NoError(t, arbitraryCLIContext.GlobalSet(PluginPublicKeyFlag.Name, "arbitry value"))
verifyErrorMessage(t, arbitraryCLIContext, arbitraryNodeConfig, "--plugins.localverify is required for setting --plugins.publickey")
}
func TestSetPlugins_whenInvalidPluginSettingsURL(t *testing.T) {
arbitraryNodeConfig := &node.Config{}
fs := &flag.FlagSet{}
fs.String(PluginSettingsFlag.Name, "", "")
arbitraryCLIContext := cli.NewContext(nil, fs, nil)
assert.NoError(t, arbitraryCLIContext.GlobalSet(PluginSettingsFlag.Name, "arbitrary value"))
verifyErrorMessage(t, arbitraryCLIContext, arbitraryNodeConfig, "plugins: unable to create reader due to unsupported scheme ")
}
func TestSetPlugins_whenTypical(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "q-")
if err != nil {
t.Fatal(err)
}
defer func() {
_ = os.RemoveAll(tmpDir)
}()
arbitraryJSONFile := path.Join(tmpDir, "arbitary.json")
if err := ioutil.WriteFile(arbitraryJSONFile, []byte("{}"), 0644); err != nil {
t.Fatal(err)
}
arbitraryNodeConfig := &node.Config{}
fs := &flag.FlagSet{}
fs.String(PluginSettingsFlag.Name, "", "")
arbitraryCLIContext := cli.NewContext(nil, fs, nil)
assert.NoError(t, arbitraryCLIContext.GlobalSet(PluginSettingsFlag.Name, "file://"+arbitraryJSONFile))
assert.NoError(t, setPlugins(arbitraryCLIContext, arbitraryNodeConfig))
assert.NotNil(t, arbitraryNodeConfig.Plugins)
}
func verifyErrorMessage(t *testing.T, ctx *cli.Context, cfg *node.Config, expectedMsg string) {
err := setPlugins(ctx, cfg)
assert.EqualError(t, err, expectedMsg)
}

View File

@ -0,0 +1,71 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package fdlimit
import "syscall"
// hardlimit is the number of file descriptors allowed at max by the kernel.
const hardlimit = 10240
// Raise tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
// Returns the size it was set to (may differ from the desired 'max')
func Raise(max uint64) (uint64, error) {
// Get the current limit
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
// Try to update the limit to the max allowance
limit.Cur = limit.Max
if limit.Cur > max {
limit.Cur = max
}
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
// MacOS can silently apply further caps, so retrieve the actually set limit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
return limit.Cur, nil
}
// Current retrieves the number of file descriptors allowed to be opened by this
// process.
func Current() (int, error) {
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
return int(limit.Cur), nil
}
// Maximum retrieves the maximum number of file descriptors this process is
// allowed to request for itself.
func Maximum() (int, error) {
// Retrieve the maximum allowed by dynamic OS limits
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
// Cap it to OPEN_MAX (10240) because macos is a special snowflake
if limit.Max > hardlimit {
limit.Max = hardlimit
}
return int(limit.Max), nil
}

View File

@ -26,11 +26,11 @@ import "syscall"
// Raise tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
func Raise(max uint64) error {
func Raise(max uint64) (uint64, error) {
// Get the current limit
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return err
return 0, err
}
// Try to update the limit to the max allowance
limit.Cur = limit.Max
@ -38,9 +38,12 @@ func Raise(max uint64) error {
limit.Cur = int64(max)
}
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return err
return 0, err
}
return nil
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
return uint64(limit.Cur), nil
}
// Current retrieves the number of file descriptors allowed to be opened by this

View File

@ -36,7 +36,7 @@ func TestFileDescriptorLimits(t *testing.T) {
if limit, err := Current(); err != nil || limit <= 0 {
t.Fatalf("failed to retrieve file descriptor limit (%d): %v", limit, err)
}
if err := Raise(uint64(target)); err != nil {
if _, err := Raise(uint64(target)); err != nil {
t.Fatalf("failed to raise file allowance")
}
if limit, err := Current(); err != nil || limit < target {

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build linux darwin netbsd openbsd solaris
// +build linux netbsd openbsd solaris
package fdlimit
@ -22,11 +22,12 @@ import "syscall"
// Raise tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
func Raise(max uint64) error {
// Returns the size it was set to (may differ from the desired 'max')
func Raise(max uint64) (uint64, error) {
// Get the current limit
var limit syscall.Rlimit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return err
return 0, err
}
// Try to update the limit to the max allowance
limit.Cur = limit.Max
@ -34,9 +35,13 @@ func Raise(max uint64) error {
limit.Cur = max
}
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return err
return 0, err
}
return nil
// MacOS can silently apply further caps, so retrieve the actually set limit
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}
return limit.Cur, nil
}
// Current retrieves the number of file descriptors allowed to be opened by this

View File

@ -16,28 +16,31 @@
package fdlimit
import "errors"
import "fmt"
// hardlimit is the number of file descriptors allowed at max by the kernel.
const hardlimit = 16384
// Raise tries to maximize the file descriptor allowance of this process
// to the maximum hard-limit allowed by the OS.
func Raise(max uint64) error {
func Raise(max uint64) (uint64, error) {
// This method is NOP by design:
// * Linux/Darwin counterparts need to manually increase per process limits
// * On Windows Go uses the CreateFile API, which is limited to 16K files, non
// changeable from within a running process
// This way we can always "request" raising the limits, which will either have
// or not have effect based on the platform we're running on.
if max > 16384 {
return errors.New("file descriptor limit (16384) reached")
if max > hardlimit {
return hardlimit, fmt.Errorf("file descriptor limit (%d) reached", hardlimit)
}
return nil
return max, nil
}
// Current retrieves the number of file descriptors allowed to be opened by this
// process.
func Current() (int, error) {
// Please see Raise for the reason why we use hard coded 16K as the limit
return 16384, nil
return hardlimit, nil
}
// Maximum retrieves the maximum number of file descriptors this process is

View File

@ -29,6 +29,66 @@ type API struct {
istanbul *backend
}
// BlockSigners is contains who created and who signed a particular block, denoted by its number and hash
type BlockSigners struct {
Number uint64
Hash common.Hash
Author common.Address
Committers []common.Address
}
// NodeAddress returns the public address that is used to sign block headers in IBFT
func (api *API) NodeAddress() common.Address {
return api.istanbul.Address()
}
// GetSignersFromBlock returns the signers and minter for a given block number, or the
// latest block available if none is specified
func (api *API) GetSignersFromBlock(number *rpc.BlockNumber) (*BlockSigners, error) {
// Retrieve the requested block number (or current if none requested)
var header *types.Header
if number == nil || *number == rpc.LatestBlockNumber {
header = api.chain.CurrentHeader()
} else {
header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
}
if header == nil {
return nil, errUnknownBlock
}
return api.signers(header)
}
// GetSignersFromBlockByHash returns the signers and minter for a given block hash
func (api *API) GetSignersFromBlockByHash(hash common.Hash) (*BlockSigners, error) {
header := api.chain.GetHeaderByHash(hash)
if header == nil {
return nil, errUnknownBlock
}
return api.signers(header)
}
func (api *API) signers(header *types.Header) (*BlockSigners, error) {
author, err := api.istanbul.Author(header)
if err != nil {
return nil, err
}
committers, err := api.istanbul.Signers(header)
if err != nil {
return nil, err
}
return &BlockSigners{
Number: header.Number.Uint64(),
Hash: header.Hash(),
Author: author,
Committers: committers,
}, nil
}
// GetSnapshot retrieves the state snapshot at a given block.
func (api *API) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) {
// Retrieve the requested block number (or current if none requested)

View File

@ -27,8 +27,8 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/istanbul"
istanbulCore "github.com/ethereum/go-ethereum/consensus/istanbul/core"
"github.com/ethereum/go-ethereum/consensus/istanbul/validator"
istanbulCore "github.com/ethereum/go-ethereum/consensus/istanbul/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto/sha3"
@ -103,6 +103,31 @@ func (sb *backend) Author(header *types.Header) (common.Address, error) {
return ecrecover(header)
}
// Signers extracts all the addresses who have signed the given header
// It will extract for each seal who signed it, regardless of if the seal is
// repeated
func (sb *backend) Signers(header *types.Header) ([]common.Address, error) {
extra, err := types.ExtractIstanbulExtra(header)
if err != nil {
return []common.Address{}, err
}
var addrs []common.Address
proposalSeal := istanbulCore.PrepareCommittedSeal(header.Hash())
// 1. Get committed seals from current header
for _, seal := range extra.CommittedSeal {
// 2. Get the original address by seal and parent block hash
addr, err := istanbul.GetSignatureAddress(proposalSeal, seal)
if err != nil {
sb.logger.Error("not a valid address", "err", err)
return nil, errInvalidSignature
}
addrs = append(addrs, addr)
}
return addrs, nil
}
// VerifyHeader checks whether a header conforms to the consensus rules of a
// given engine. Verifying the seal may be done optionally here, or explicitly
// via the VerifySeal method.
@ -271,26 +296,20 @@ func (sb *backend) verifyCommittedSeals(chain consensus.ChainReader, header *typ
validators := snap.ValSet.Copy()
// Check whether the committed seals are generated by parent's validators
validSeal := 0
proposalSeal := istanbulCore.PrepareCommittedSeal(header.Hash())
// 1. Get committed seals from current header
for _, seal := range extra.CommittedSeal {
// 2. Get the original address by seal and parent block hash
addr, err := istanbul.GetSignatureAddress(proposalSeal, seal)
if err != nil {
sb.logger.Error("not a valid address", "err", err)
return errInvalidSignature
}
// Every validator can have only one seal. If more than one seals are signed by a
// validator, the validator cannot be found and errInvalidCommittedSeals is returned.
committers, err := sb.Signers(header)
if err != nil {
return err
}
for _, addr := range committers {
if validators.RemoveValidator(addr) {
validSeal += 1
} else {
return errInvalidCommittedSeals
validSeal++
continue
}
return errInvalidCommittedSeals
}
// The length of validSeal should be larger than number of faulty node + 1
if validSeal <= 2*snap.ValSet.F() {
// The length of validSeal should be larger than number of faulty node + 1
if validSeal <= snap.ValSet.F() {
return errInvalidCommittedSeals
}
@ -416,42 +435,45 @@ func (sb *backend) Seal(chain consensus.ChainReader, block *types.Block, results
return err
}
// wait for the timestamp of header, use this to adjust the block period
delay := time.Unix(block.Header().Time.Int64(), 0).Sub(now())
select {
case <-time.After(delay):
case <-stop:
results <- nil
return nil
}
delay := time.Unix(header.Time.Int64(), 0).Sub(now())
// get the proposed block hash and clear it if the seal() is completed.
sb.sealMu.Lock()
sb.proposedBlockHash = block.Hash()
clear := func() {
sb.proposedBlockHash = common.Hash{}
sb.sealMu.Unlock()
}
defer clear()
// post block into Istanbul engine
go sb.EventMux().Post(istanbul.RequestEvent{
Proposal: block,
})
for {
go func() {
// wait for the timestamp of header, use this to adjust the block period
select {
case result := <-sb.commitCh:
// if the block hash and the hash from channel are the same,
// return the result. Otherwise, keep waiting the next hash.
if result != nil && block.Hash() == result.Hash() {
results <- result
return nil
}
case <-time.After(delay):
case <-stop:
results <- nil
return nil
return
}
}
// get the proposed block hash and clear it if the seal() is completed.
sb.sealMu.Lock()
sb.proposedBlockHash = block.Hash()
defer func() {
sb.proposedBlockHash = common.Hash{}
sb.sealMu.Unlock()
}()
// post block into Istanbul engine
go sb.EventMux().Post(istanbul.RequestEvent{
Proposal: block,
})
for {
select {
case result := <-sb.commitCh:
// if the block hash and the hash from channel are the same,
// return the result. Otherwise, keep waiting the next hash.
if result != nil && block.Hash() == result.Hash() {
results <- result
return
}
case <-stop:
results <- nil
return
}
}
}()
return nil
}
// update timestamp and signature of the block based on its number of transactions

View File

@ -195,30 +195,44 @@ func TestSealCommittedOtherHash(t *testing.T) {
chain, engine := newBlockChain(4)
block := makeBlockWithoutSeal(chain, engine, chain.Genesis())
otherBlock := makeBlockWithoutSeal(chain, engine, block)
expectedCommittedSeal := append([]byte{1, 2, 3}, bytes.Repeat([]byte{0x00}, types.IstanbulExtraSeal-3)...)
eventSub := engine.EventMux().Subscribe(istanbul.RequestEvent{})
eventLoop := func() {
blockOutputChannel := make(chan *types.Block)
stopChannel := make(chan struct{})
go func() {
select {
case ev := <-eventSub.Chan():
_, ok := ev.Data.(istanbul.RequestEvent)
if !ok {
if _, ok := ev.Data.(istanbul.RequestEvent); !ok {
t.Errorf("unexpected event comes: %v", reflect.TypeOf(ev.Data))
}
engine.Commit(otherBlock, [][]byte{})
if err := engine.Commit(otherBlock, [][]byte{expectedCommittedSeal}); err != nil {
t.Error(err.Error())
}
}
eventSub.Unsubscribe()
}
go eventLoop()
seal := func() {
engine.Seal(chain, block, nil, make(chan struct{}))
t.Error("seal should not be completed")
}
go seal()
}()
go func() {
if err := engine.Seal(chain, block, blockOutputChannel, stopChannel); err != nil {
t.Error(err.Error())
}
}()
const timeoutDura = 2 * time.Second
timeout := time.NewTimer(timeoutDura)
select {
case <-timeout.C:
// wait 2 seconds to ensure we cannot get any blocks from Istanbul
case <-blockOutputChannel:
t.Error("Wrong block found!")
default:
//no block found, stop the sealing
close(stopChannel)
}
select {
case output := <-blockOutputChannel:
if output != nil {
t.Error("Block not nil!")
}
}
}

View File

@ -16,6 +16,8 @@
package istanbul
import "math/big"
type ProposerPolicy uint64
const (
@ -28,6 +30,7 @@ type Config struct {
BlockPeriod uint64 `toml:",omitempty"` // Default minimum difference between two consecutive block's timestamps in second
ProposerPolicy ProposerPolicy `toml:",omitempty"` // The policy for proposer selection
Epoch uint64 `toml:",omitempty"` // The number of blocks after which to checkpoint and reset the pending votes
Ceil2Nby3Block *big.Int `toml:",omitempty"` // Number of confirmations required to move from one state to next [2F + 1 to Ceil(2N/3)]
}
var DefaultConfig = &Config{
@ -35,4 +38,5 @@ var DefaultConfig = &Config{
BlockPeriod: 1,
ProposerPolicy: RoundRobin,
Epoch: 30000,
Ceil2Nby3Block: big.NewInt(0),
}

View File

@ -72,7 +72,7 @@ func (c *core) handleCommit(msg *message, src istanbul.Validator) error {
//
// If we already have a proposal, we may have chance to speed up the consensus process
// by committing the proposal without PREPARE messages.
if c.current.Commits.Size() > 2*c.valSet.F() && c.state.Cmp(StateCommitted) < 0 {
if c.current.Commits.Size() >= c.QuorumSize() && c.state.Cmp(StateCommitted) < 0 {
// Still need to call LockHash here since state can skip Prepared state and jump directly to the Committed state.
c.current.LockHash()
c.commit()

View File

@ -191,8 +191,8 @@ OUTER:
if r0.state != StatePrepared {
t.Errorf("state mismatch: have %v, want %v", r0.state, StatePrepared)
}
if r0.current.Commits.Size() > 2*r0.valSet.F() {
t.Errorf("the size of commit messages should be less than %v", 2*r0.valSet.F()+1)
if r0.current.Commits.Size() >= r0.QuorumSize() {
t.Errorf("the size of commit messages should be less than %v", r0.QuorumSize())
}
if r0.current.IsHashLocked() {
t.Errorf("block should not be locked")
@ -200,12 +200,12 @@ OUTER:
continue
}
// core should have 2F+1 prepare messages
if r0.current.Commits.Size() <= 2*r0.valSet.F() {
t.Errorf("the size of commit messages should be larger than 2F+1: size %v", r0.current.Commits.Size())
// core should have 2F+1 before Ceil2Nby3Block or Ceil(2N/3) prepare messages
if r0.current.Commits.Size() < r0.QuorumSize() {
t.Errorf("the size of commit messages should be larger than 2F+1 or Ceil(2N/3): size %v", r0.QuorumSize())
}
// check signatures large than 2F+1
// check signatures large than F
signedCount := 0
committedSeals := v0.committedMsgs[0].committedSeals
for _, validator := range r0.valSet.List() {
@ -216,8 +216,8 @@ OUTER:
}
}
}
if signedCount <= 2*r0.valSet.F() {
t.Errorf("the expected signed count should be larger than %v, but got %v", 2*r0.valSet.F(), signedCount)
if signedCount <= r0.valSet.F() {
t.Errorf("the expected signed count should be larger than %v, but got %v", r0.valSet.F(), signedCount)
}
if !r0.current.IsHashLocked() {
t.Errorf("block should be locked")

View File

@ -342,6 +342,15 @@ func (c *core) checkValidatorSignature(data []byte, sig []byte) (common.Address,
return istanbul.CheckValidatorSignature(c.valSet, data, sig)
}
func (c *core) QuorumSize() int {
if c.config.Ceil2Nby3Block == nil || (c.current != nil && c.current.sequence.Cmp(c.config.Ceil2Nby3Block) < 0) {
c.logger.Trace("Confirmation Formula used 2F+ 1")
return (2 * c.valSet.F()) + 1
}
c.logger.Trace("Confirmation Formula used ceil(2N/3)")
return int(math.Ceil(float64(2*c.valSet.Size()) / 3))
}
// PrepareCommittedSeal returns a committed seal for the given hash
func PrepareCommittedSeal(hash common.Hash) []byte {
var buf bytes.Buffer

View File

@ -17,6 +17,7 @@
package core
import (
"github.com/ethereum/go-ethereum/common"
"math/big"
"reflect"
"testing"
@ -80,3 +81,20 @@ func TestNewRequest(t *testing.T) {
}
}
}
func TestQuorumSize(t *testing.T) {
N := uint64(4)
F := uint64(1)
sys := NewTestSystemWithBackend(N, F)
backend := sys.backends[0]
c := backend.engine.(*core)
valSet := c.valSet
for i := 1; i <= 1000; i++ {
valSet.AddValidator(common.StringToAddress(string(i)))
if 2*c.QuorumSize() <= (valSet.Size()+valSet.F()) || 2*c.QuorumSize() > (valSet.Size()+valSet.F()+2) {
t.Errorf("quorumSize constraint failed, expected value (2*QuorumSize > Size+F && 2*QuorumSize <= Size+F+2) to be:%v, got: %v, for size: %v", true, false, valSet.Size())
}
}
}

View File

@ -43,4 +43,6 @@ var (
errFailedDecodeCommit = errors.New("failed to decode COMMIT")
// errFailedDecodeMessageSet is returned when the message set is malformed.
errFailedDecodeMessageSet = errors.New("failed to decode message set")
// errInvalidSigner is returned when the message is signed by a validator different than message sender
errInvalidSigner = errors.New("message not signed by the sender")
)

View File

@ -59,7 +59,7 @@ func (c *core) handlePrepare(msg *message, src istanbul.Validator) error {
// Change to Prepared state if we've received enough PREPARE messages or it is locked
// and we are in earlier state before Prepared state.
if ((c.current.IsHashLocked() && prepare.Digest == c.current.GetLockedHash()) || c.current.GetPrepareOrCommitSize() > 2*c.valSet.F()) &&
if ((c.current.IsHashLocked() && prepare.Digest == c.current.GetLockedHash()) || c.current.GetPrepareOrCommitSize() >= c.QuorumSize()) &&
c.state.Cmp(StatePrepared) < 0 {
c.current.LockHash()
c.setState(StatePrepared)

View File

@ -17,6 +17,7 @@
package core
import (
"math"
"math/big"
"reflect"
"testing"
@ -156,12 +157,11 @@ func TestHandlePrepare(t *testing.T) {
errInconsistentSubject,
},
{
// less than 2F+1
func() *testSystem {
sys := NewTestSystemWithBackend(N, F)
// save less than 2*F+1 replica
sys.backends = sys.backends[2*int(F)+1:]
// save less than Ceil(2*N/3) replica
sys.backends = sys.backends[int(math.Ceil(float64(2*N)/3)):]
for i, backend := range sys.backends {
c := backend.engine.(*core)
@ -214,8 +214,8 @@ OUTER:
if r0.state != StatePreprepared {
t.Errorf("state mismatch: have %v, want %v", r0.state, StatePreprepared)
}
if r0.current.Prepares.Size() > 2*r0.valSet.F() {
t.Errorf("the size of PREPARE messages should be less than %v", 2*r0.valSet.F()+1)
if r0.current.Prepares.Size() >= r0.QuorumSize() {
t.Errorf("the size of PREPARE messages should be less than %v", r0.QuorumSize())
}
if r0.current.IsHashLocked() {
t.Errorf("block should not be locked")
@ -224,12 +224,12 @@ OUTER:
continue
}
// core should have 2F+1 PREPARE messages
if r0.current.Prepares.Size() <= 2*r0.valSet.F() {
t.Errorf("the size of PREPARE messages should be larger than 2F+1: size %v", r0.current.Commits.Size())
// core should have 2F+1 before Ceil2Nby3Block and Ceil(2N/3) after Ceil2Nby3Block PREPARE messages
if r0.current.Prepares.Size() < r0.QuorumSize() {
t.Errorf("the size of PREPARE messages should be larger than 2F+1 or ceil(2N/3): size %v", r0.current.Commits.Size())
}
// a message will be delivered to backend if 2F+1
// a message will be delivered to backend if ceil(2N/3)
if int64(len(v0.sentMsgs)) != 1 {
t.Errorf("the Send() should be called once: times %v", len(test.system.backends[0].sentMsgs))
}

View File

@ -82,9 +82,9 @@ func (c *core) handlePreprepare(msg *message, src istanbul.Validator) error {
// Verify the proposal we received
if duration, err := c.backend.Verify(preprepare.Proposal); err != nil {
logger.Warn("Failed to verify proposal", "err", err, "duration", duration)
// if it's a future block, we will handle it again after the duration
if err == consensus.ErrFutureBlock {
logger.Info("Proposed block will be handled in the future", "err", err, "duration", duration)
c.stopFuturePreprepareTimer()
c.futurePreprepareTimer = time.AfterFunc(duration, func() {
c.sendEvent(backlogEvent{
@ -93,6 +93,7 @@ func (c *core) handlePreprepare(msg *message, src istanbul.Validator) error {
})
})
} else {
logger.Warn("Failed to verify proposal", "err", err, "duration", duration)
c.sendNextRoundChange()
}
return err

View File

@ -98,8 +98,8 @@ func (c *core) handleRoundChange(msg *message, src istanbul.Validator) error {
c.sendRoundChange(roundView.Round)
}
return nil
} else if num == int(2*c.valSet.F()+1) && (c.waitingForRoundChange || cv.Round.Cmp(roundView.Round) < 0) {
// We've received 2f+1 ROUND CHANGE messages, start a new round immediately.
} else if num == c.QuorumSize() && (c.waitingForRoundChange || cv.Round.Cmp(roundView.Round) < 0) {
// We've received 2f+1/Ceil(2N/3) ROUND CHANGE messages, start a new round immediately.
c.startNewRound(roundView.Round)
return nil
} else if cv.Round.Cmp(roundView.Round) < 0 {

View File

@ -109,8 +109,8 @@ func (self *testSystemBackend) Verify(proposal istanbul.Proposal) (time.Duration
}
func (self *testSystemBackend) Sign(data []byte) ([]byte, error) {
testLogger.Warn("not sign any data")
return data, nil
testLogger.Info("returning current backend address so that CheckValidatorSignature returns the same value")
return self.address.Bytes(), nil
}
func (self *testSystemBackend) CheckSignature([]byte, common.Address, []byte) error {
@ -118,7 +118,7 @@ func (self *testSystemBackend) CheckSignature([]byte, common.Address, []byte) er
}
func (self *testSystemBackend) CheckValidatorSignature(data []byte, sig []byte) (common.Address, error) {
return common.Address{}, nil
return common.BytesToAddress(sig), nil
}
func (self *testSystemBackend) Hash(b interface{}) common.Hash {

View File

@ -17,6 +17,7 @@
package core
import (
"bytes"
"fmt"
"io"
@ -137,10 +138,15 @@ func (m *message) FromPayload(b []byte, validateFn func([]byte, []byte) (common.
return err
}
_, err = validateFn(payload, m.Signature)
signerAdd, err := validateFn(payload, m.Signature)
if err != nil {
return err
}
if bytes.Compare(signerAdd.Bytes(), m.Address.Bytes()) != 0 {
return errInvalidSigner
}
}
// Still return the message even the err is not nil
return err
return nil
}
func (m *message) Payload() ([]byte, error) {

View File

@ -124,10 +124,11 @@ func testSubjectWithSignature(t *testing.T) {
subjectPayload, _ := Encode(s)
// 1. Encode test
address := common.HexToAddress("0x1234567890")
m := &message{
Code: msgPreprepare,
Msg: subjectPayload,
Address: common.HexToAddress("0x1234567890"),
Address: address,
Signature: expectedSig,
CommittedSeal: []byte{},
}
@ -141,7 +142,7 @@ func testSubjectWithSignature(t *testing.T) {
// 2.1 Test normal validate func
decodedMsg := new(message)
err = decodedMsg.FromPayload(msgPayload, func(data []byte, sig []byte) (common.Address, error) {
return common.Address{}, nil
return address, nil
})
if err != nil {
t.Errorf("error mismatch: have %v, want nil", err)

View File

@ -936,6 +936,23 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
bc.mu.Lock()
defer bc.mu.Unlock()
// Quorum
// Write private state changes to database
privateRoot, err := privateState.Commit(bc.chainConfig.IsEIP158(block.Number()))
if err != nil {
return NonStatTy, err
}
if err := WritePrivateStateRoot(bc.db, block.Root(), privateRoot); err != nil {
log.Error("Failed writing private state root", "err", err)
return NonStatTy, err
}
// Explicit commit for privateStateTriedb
privateTriedb := bc.privateStateCache.TrieDB()
if err := privateTriedb.Commit(privateRoot, false); err != nil {
return NonStatTy, err
}
// /Quorum
currentBlock := bc.CurrentBlock()
localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
externTd := new(big.Int).Add(block.Difficulty(), ptd)
@ -953,17 +970,6 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
}
triedb := bc.stateCache.TrieDB()
// Explicit commit for privateStateTriedb to handle Raft db issues
if privateState != nil {
privateRoot, err := privateState.Commit(bc.chainConfig.IsEIP158(block.Number()))
if err != nil {
return NonStatTy, err
}
privateTriedb := bc.privateStateCache.TrieDB()
if err := privateTriedb.Commit(privateRoot, false); err != nil {
return NonStatTy, err
}
}
// If we're running an archive node, always flush
if bc.cacheConfig.Disabled {
@ -1149,6 +1155,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
// If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
// QUORUM
if bc.chainConfig.IsQuorum && bc.chainConfig.Istanbul == nil && bc.chainConfig.Clique == nil {
// Only returns an error for raft mode
return i, events, coalescedLogs, ErrAbortBlocksProcessing
}
// END QUORUM
break
}
// If the header is a banned one, straight out abort
@ -1263,17 +1275,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
return i, events, coalescedLogs, err
}
// Quorum
// Write private state changes to database
if privateStateRoot, err = privateState.Commit(bc.Config().IsEIP158(block.Number())); err != nil {
return i, events, coalescedLogs, err
}
if err := WritePrivateStateRoot(bc.db, block.Root(), privateStateRoot); err != nil {
return i, events, coalescedLogs, err
}
allReceipts := mergeReceipts(receipts, privateReceipts)
// /Quorum
proctime := time.Since(bstart)
// Write the block to the chain and get the status.

View File

@ -32,4 +32,7 @@ var (
// ErrNonceTooHigh is returned if the nonce of a transaction is higher than the
// next one expected based on the local chain.
ErrNonceTooHigh = errors.New("nonce too high")
// ErrAbortBlocksProcessing is returned if bc.insertChain is interrupted under raft mode
ErrAbortBlocksProcessing = errors.New("abort during blocks processing")
)

View File

@ -20,7 +20,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/private"
"github.com/ethereum/go-ethereum/private/constellation"
"github.com/ethereum/go-ethereum/private/privatetransactionmanager"
)
// callmsg is the message type used for call transactions in the private state test
@ -128,7 +128,7 @@ func runConstellation() (*osExec.Cmd, error) {
if constellationErr != nil {
return nil, constellationErr
}
private.P = constellation.MustNew(cfgFile.Name())
private.P = privatetransactionmanager.MustNew(cfgFile.Name())
return constellationCmd, nil
}

View File

@ -1323,3 +1323,8 @@ func checkAccount(fromAcct common.Address, toAcct *common.Address) error {
}
return nil
}
// helper function to return chainHeadChannel size
func GetChainHeadChannleSize() int {
return chainHeadChanSize
}

View File

@ -139,7 +139,7 @@ func validateEvents(events chan NewTxsEvent, count int) error {
case ev := <-events:
received = append(received, ev.Txs...)
case <-time.After(time.Second):
return fmt.Errorf("event #%d not fired", received)
return fmt.Errorf("event #%d not fired", len(received))
}
}
if len(received) > count {

View File

@ -63,6 +63,7 @@ func (b *Bloom) Add(d *big.Int) {
b.SetBytes(bin.Bytes())
}
// Quorum
// OrBloom executes an Or operation on the bloom
func (b *Bloom) OrBloom(bl []byte) {
bin := new(big.Int).SetBytes(b[:])

View File

@ -488,7 +488,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
ret, err := run(evm, contract, nil, false)
var maxCodeSize int
if evm.ChainConfig().MaxCodeSize > 0 {
if evm.chainConfig.IsMaxCodeSizeChangeBlock(evm.BlockNumber) && evm.ChainConfig().MaxCodeSize > 0 {
maxCodeSize = int(evm.ChainConfig().MaxCodeSize * 1024)
} else {
maxCodeSize = params.MaxCodeSize

View File

@ -13,7 +13,7 @@ func TestRandomG2Marshal(t *testing.T) {
t.Error(err)
continue
}
t.Logf("%d: %x\n", n, g2.Marshal())
t.Logf("%v: %x\n", n, g2.Marshal())
}
}

View File

@ -13,7 +13,7 @@ func TestRandomG2Marshal(t *testing.T) {
t.Error(err)
continue
}
t.Logf("%d: %x\n", n, g2.Marshal())
t.Logf("%v: %x\n", n, g2.Marshal())
}
}

View File

@ -27,19 +27,19 @@
There are a few ways in which you can run Cakeshop (see the sections below for details on each, as well as [configuration](https://github.com/jpmorganchase/cakeshop/blob/master/docs/configuration.md#geth) page):
1. **Default mode**: _Used when you want Cakeshop to start up an Ethereum node._
1\. **Default mode**: _Used when you want Cakeshop to start up an Ethereum node._
Running Cakeshop in the Default mode will start up Cakeshop and also start running a regular geth node (on a private/test network).
Running Cakeshop in the Default mode will start up Cakeshop and also start running a regular geth node (on a private/test network).
2. **'Attach/Unmanaged' mode**: _Used when you want to attach Cakeshop to an already running Ethereum-like node._
2\. **'Attach/Unmanaged' mode**: _Used when you want to attach Cakeshop to an already running Ethereum-like node._
Running Cakeshop in 'Attach' a.k.a 'unmanaged' mode will initialize Cakeshop but not start it nor start any Ethereum node. Once Cakeshop initialization is complete you can configure it to use the RPC details of your running node . When you then start Cakeshop it will attach to your node.
Running Cakeshop in 'Attach' a.k.a 'unmanaged' mode will initialize Cakeshop but not start it nor start any Ethereum node. Once Cakeshop initialization is complete you can configure it to use the RPC details of your running node . When you then start Cakeshop it will attach to your node.
NOTE: if different parties on the network are using Cakeshop to deploy contracts to the network then they need to ensure they are using the same ContractRegistry address. See details below for setting up the ContractRegistry address in this case.
NOTE: if different parties on the network are using Cakeshop to deploy contracts to the network then they need to ensure they are using the same ContractRegistry address. See details below for setting up the ContractRegistry address in this case.
3. **Multi-Instance Set Up**: _Used when you want to run Cakeshop on more than one node in your network._
3\. **Multi-Instance Set Up**: _Used when you want to run Cakeshop on more than one node in your network._
Cakeshop is currently designed such that a given instance of Cakeshop works directly with a single Ethereum-like node, however you can set up multiple instances of Cakeshop on the same machine (each which could either have been started in 'Default' mode or 'Attach' mode) such that each can talk to a different node.
Cakeshop is currently designed such that a given instance of Cakeshop works directly with a single Ethereum-like node, however you can set up multiple instances of Cakeshop on the same machine (each which could either have been started in 'Default' mode or 'Attach' mode) such that each can talk to a different node.
NOTE: you can use the Attach mode and/or Multi-Instance setup configuration to run Cakeshop on [Quorum](https://github.com/jpmorganchase/quorum) nodes. See below for connecting Cakeshop to the [7nodes](https://github.com/jpmorganchase/quorum-examples/tree/master/examples/7nodes) network from the quorum-examples repo.
@ -50,8 +50,8 @@ NOTE: you can use the Attach mode and/or Multi-Instance setup configuration to r
1. In a terminal window run:
```
$ cd path/to/cakeshop/war
$ java -jar cakeshop.war
$ cd path/to/cakeshop/war
$ java -jar cakeshop.war
```
2. Open **http://localhost:8080/** in your browser (Firefox/Chrome supported)
@ -62,20 +62,32 @@ NOTE: you can use the Attach mode and/or Multi-Instance setup configuration to r
```
$ cd path/to/cakeshop/war
# The 'example' arg below will unpack the war file and set up the cakeshop data folders but will not actually start a node
$ java -jar cakeshop.war example
$ java -jar cakeshop.war example
```
2. Navigate to path/to/cakeshop/war/data/local
3. Make the following edits to the application.properties file:
* set `geth.url` to the `rpcport` of your ethereum node, i.e. if your geth `rpcport` is 22001 then `geth.url=http\://localhost\:22001`
* ensure `geth.auto.start` is set to `false`
* ensure `geth.auto.stop` is set to `false`
```
geth.auto.start=false
geth.auto.stop=false
```
4. Run:
```
$ java -jar cakeshop.war
$ java -jar cakeshop.war
```
5. Open **http://localhost:8080/** in your browser (Firefox/Chrome supported)
6. The dropdown menu on the top right of the page should show "Manage Nodes" if you haven't attached to any yet. Click on that to go to the Manage Nodes page.
7. Click Add Node and input the RPC url of your Quorum node (i.e. http://localhost:22000) and the path to the Tessera P2P Party Info endpoint (i.e. http://localhost:9001/partyinfo).
8. Once added, click on View to attach to the node and return to the main Cakeshop page
### Multi-Instance Setup
Although Cakeshop currently has a one-to-one mapping with the underlying Ethereum-like node that it connects to, it is possible to have multiple Cakeshop instances running on the same machine, each connecting to a different Ethereum-like node. The best way to achieve this is to create separate Cakeshop folders for each node and then attach to each separately. You should also configure the ContractRegistry address as per the below:
@ -105,8 +117,8 @@ Although Cakeshop currently has a one-to-one mapping with the underlying Ethereu
cd node1
CAKESHOP_SHARED_CONFIG=".." java -jar ../cakeshop.war example
```
2. Assuming you want to attach to an existing node, navigate to /myNetwork/node1/ and edit **application.properties** per the instructions for [attach mode](#attach-mode) as described above
2. Assuming you want to attach to an existing node, navigate to /myNetwork/node1/ and edit **application.properties** per the instructions for [attach mode](#attach-mode) as described above
3. In terminal window 2 run:
@ -114,20 +126,20 @@ Although Cakeshop currently has a one-to-one mapping with the underlying Ethereu
cd myNetwork/node2
CAKESHOP_SHARED_CONFIG=".." java -jar ../cakeshop.war example
```
4. Navigate to myNetwork/node2 and edit **application.properties** per the instructions for [attach mode](#attach-mode) as described above
4. Navigate to myNetwork/node2 and edit **application.properties** per the instructions for [attach mode](#attach-mode) as described above
5. In terminal window 1 run:
```
CAKESHOP_SHARED_CONFIG=".." java -jar ../cakeshop.war
```
6. In terminal window 2 run:
```
CAKESHOP_SHARED_CONFIG=".." java -Dserver.port=8081 -jar cakeshop.war # Cakeshop will now be available on localhost:8081
```
7. In browser window 1 open http://localhost:8080/
8. In browser window 2 open http://localhost:8081/

View File

@ -2,10 +2,10 @@
With no need for POW/POS in a permissioned network, Quorum instead offers multiple consensus mechanisms that are more appropriate for consortium chains:
* __Raft-based Consensus__: A consensus model for faster blocktimes, transaction finality, and on-demand block creation. See [Raft-based consensus for Ethereum/Quorum](../raft) for more information
* __Raft-based Consensus__: A consensus model for faster blocktimes, transaction finality, and on-demand block creation. See [Raft-based consensus for Ethereum/Quorum](../raft/raft) for more information
* __Istanbul BFT (Byzantine Fault Tolerance) Consensus__: A PBFT-inspired consensus algorithm with transaction finality, by AMIS. See [Istanbul BFT Consensus documentation](https://github.com/ethereum/EIPs/issues/650), the [RPC API](../istanbul-rpc-api), and this [technical web article](https://medium.com/getamis/istanbul-bft-ibft-c2758b7fe6ff) for more information
* __Istanbul BFT (Byzantine Fault Tolerance) Consensus__: A PBFT-inspired consensus algorithm with immediate transaction finality, by AMIS. See [Istanbul BFT Consensus documentation](../ibft/ibft), the [RPC API](../ibft/istanbul-rpc-api), and this [technical web article](https://medium.com/getamis/istanbul-bft-ibft-c2758b7fe6ff) for more information
* __Clique POA Consensus__: a default POA consensus algorithm bundled with Go Ethereum. See [Clique POA Consensus Documentation](https://github.com/ethereum/EIPs/issues/225) and a [guide to setup clique json](https://hackernoon.com/hands-on-creating-your-own-local-private-geth-node-beginner-friendly-3d45902cc612) with [puppeth](https://blog.ethereum.org/2017/04/14/geth-1-6-puppeth-master/)

View File

@ -0,0 +1,69 @@
# IBFT parameters
## CLI options
### Block period
`--istanbul.blockperiod 1`
Setting the block period is used for how long blocks should be minted by the validators. It is also used for validation
of block times by all nodes, so should not be changed after deciding a value for the network.
The setting is a positive integer, and measures the minimum numbers of seconds before the next block is considered
valid.
The default value is `1`.
### Request timeout
`--istanbul.requesttimeout 10000`
The request timeout is the timeout at which IBFT will seek to trigger a new round if the previous one did not complete.
This period increases are the timeout is hit more often. This parameter sets the minimum timeout in the case of normal
operation and is measured in milliseconds.
The default value is `10000`.
## Genesis file options
Within the `genesis.json` file, there is an area for IBFT specific configuration, much like a Clique network
configuration.
The options are as follows:
```
{
"config": {
"istanbul": {
"epoch": 30000,
"policy": 0,
"ceil2Nby3Block": 0
},
...
},
...
}
```
### Epoch
The epoch specifies the number of blocks that should pass before pending validator votes are reset. When the
`blocknumber%EPOCH == 0`, the votes are reset in order to prevent a single vote from becoming stale. If the existing
vote was still due to take place, then it must be resubmitted, along with all its votes.
### Policy
The policy refers to the proposer selection policy, which is either `ROUND_ROBIN` or `STICKY`.
A value of `0` denotes a `ROUND_ROBIN` policy, where the next expected proposer is the next in queue. Once a proposer
has submitted a valid block, they join the back of the queue and must wait their turn again.
A value of `1` denotes a `STICKY` proposer policy, where a single proposer is selected to mint blocks and does so until
such a time as they go offline or are otherwise unreachable.
### ceil2Nby3Block
The `ceil2Nby3Block` sets the block number from which to use an updated formula for calculating the number of faulty
nodes. This was introduced to enable existing network the ability to upgrade at a point in the future of the network, as
it is incompatible with the existing formula. For new networks, it is recommended to set this value to `0` to use the
updated formula immediately.
To update this value, the same process can be followed as other hard-forks.

184
docs/Consensus/ibft/ibft.md Normal file
View File

@ -0,0 +1,184 @@
# IBFT Consensus Overview
## Introduction
Istanbul Byzantine Fault Tolerant (IBFT) consensus is inspired by Castro-Liskov 99 [paper](http://pmg.csail.mit.edu/papers/osdi99.pdf). IBFT inherits from the original PBFT by using a 3-phase consensus, `PRE-PREPARE`, `PREPARE` and `COMMIT`. The system can tolerate at most `F` faulty nodes in a `N` validator network, where `N = 3F + 1`.
## Implementation
### Terminology
- `Validator`: Block validation participant.
- `Proposer`: A block validation participant that is chosen to propose block in a consensus round.
- `Round`: Consensus round. A round starts with the proposer creating a block proposal and ends with a block commitment or round change.
- `Proposal`: New block generation proposal which is undergoing consensus processing.
- `Sequence`: Sequence number of a proposal. A sequence number should be greater than all previous sequence numbers. Currently each proposed block height is its associated sequence number.
- `Backlog`: The storage to keep future consensus messages.
- `Round state`: Consensus messages of a specific sequence and round, including pre-prepare message, prepare message, and commit message.
- `Consensus proof`: The commitment signatures of a block that can prove the block has gone through the consensus process.
- `Snapshot`: The validator voting state from last epoch.
### Consensus
Istanbul BFT Consensus protocol begins at Round `0` with the validators picking a proposer from themselves in a round robin fashion. The proposer will then propose a new block proposal and broadcast it along with the `PRE-PREPARE` message. Upon receiving the `PRE-PREPARE` message from the proposer, other validators validate the incoming proposal and enter the state of `PRE-PREPARED` and broadcast `PREPARE` message. This step is to make sure all validators are working on the same sequence and on the same round. When `ceil(2N/3)` of `PREPARE` messages is received by the validator from other validators, the validator switches to the state of `PREPARED` and broadcasts `COMMIT` message. This step is to inform other validators that it accepts the proposed block and is going to insert the block to the chain. Lastly, validators wait for `ceil(2N/3)` of `COMMIT` messages to enter `COMMITTED` state and then append the block to the chain.
Blocks in Istanbul BFT protocol are final, which means that there are no forks and any valid block must be somewhere in the main chain. To prevent a faulty node from generating a totally different chain from the main chain, each validator appends `ceil(2N/3)` of received `COMMIT` signatures to `extraData` field in the header before inserting it into the chain. Thus all blocks are self-verifiable. However, the dynamic `extraData` would cause an issue on block hash calculation. Since the same block from different validators can have different set of `COMMIT` signatures, the same block can have different block hashes as well. To solve this, we calculate the block hash by excluding the `COMMIT` signatures part. Therefore, we can still keep the block/block hash consistency as well as put the consensus proof in the block header.
#### Consensus States
Istanbul BFT is a state machine replication algorithm. Each validator maintains a state machine replica in order to reach block consensus. Various states in IBFT consensus are,
- `NEW ROUND`: Proposer to send new block proposal. Validators wait for `PRE-PREPARE` message.
- `PRE-PREPARED`: A validator has received `PRE-PREPARE` message and broadcasts `PREPARE` message. Then it waits for `ceil(2N/3)` of `PREPARE` or `COMMIT` messages.
- `PREPARED`: A validator has received `ceil(2N/3)` of `PREPARE` messages and broadcasts `COMMIT` messages. Then it waits for `ceil(2N/3)` of `COMMIT` messages.
- `COMMITTED`: A validator has received `ceil(2N/3)` of `COMMIT` messages and is able to insert the proposed block into the blockchain.
- `FINAL COMMITTED`: A new block is successfully inserted into the blockchain and the validator is ready for the next round.
- `ROUND CHANGE`: A validator is waiting for `ceil(2N/3)` of `ROUND CHANGE` messages on the same proposed round number.
**State Transitions**:
![State Transitions](images/IBFTStateTransition.png)
- `NEW ROUND` -> `PRE-PREPARED`:
- **Proposer** collects transactions from txpool.
- **Proposer** generates a block proposal and broadcasts it to validators. It then enters the `PRE-PREPARED` state.
- Each **validator** enters `PRE-PREPARED` upon receiving the `PRE-PREPARE` message with the following conditions:
- Block proposal is from the valid proposer.
- Block header is valid.
- Block proposal's sequence and round match the **validator**'s state.
- **Validator** broadcasts `PREPARE` message to other validators.
- `PRE-PREPARED` -> `PREPARED`:
- Validator receives `ceil(2N/3)` of valid `PREPARE` messages to enter `PREPARED` state. Valid messages conform to the following conditions:
- Matched sequence and round.
- Matched block hash.
- Messages are from known validators.
- Validator broadcasts `COMMIT` message upon entering `PREPARED` state.
- `PREPARED` -> `COMMITTED`:
- **Validator** receives `ceil(2N/3)` of valid `COMMIT` messages to enter `COMMITTED` state. Valid messages conform to the following conditions:
- Matched sequence and round.
- Matched block hash.
- Messages are from known validators.
- `COMMITTED` -> `FINAL COMMITTED`:
- **Validator** appends `ceil(2N/3)` commitment signatures to `extraData` and tries to insert the block into the blockchain.
- **Validator** enters `FINAL COMMITTED` state when insertion succeeds.
- `FINAL COMMITTED` -> `NEW ROUND`:
- **Validators** pick a new **proposer** and begin a new round timer.
#### Round change flow
- There are three conditions that would trigger `ROUND CHANGE`:
- Round change timer expires.
- Invalid `PREPREPARE` message.
- Block insertion fails.
- When a validator notices that one of the above conditions applies, it broadcasts a `ROUND CHANGE` message along with the proposed round number and waits for `ROUND CHANGE` messages from other validators. The proposed round number is selected based on following condition:
- If the validator has received `ROUND CHANGE` messages from its peers, it picks the largest round number which has `F + 1` of `ROUND CHANGE` messages.
- Otherwise, it picks `1 + current round number` as the proposed round number.
- Whenever a validator receives `F + 1` of `ROUND CHANGE` messages on the same proposed round number, it compares the received one with its own. If the received is larger, the validator broadcasts `ROUND CHANGE` message again with the received number.
- Upon receiving `ceil(2N/3)` of `ROUND CHANGE` messages on the same proposed round number, the **validator** exits the round change loop, calculates the new **proposer**, and then enters `NEW ROUND` state.
- Another condition that a validator jumps out of round change loop is when it receives verified block(s) through peer synchronization.
#### Proposer selection
Currently we support two policies: **round robin** and **sticky proposer**.
- Round robin: Round robin is the default proposer selection policy. In this setting proposer will change in every block and round change.
- Sticky proposer: in a sticky proposer setting, proposer will change only when a round change happens.
#### Validator list voting
Istanbul BFT uses a similar validator voting mechanism as Clique and copies most of the content from Clique [EIP](https://github.com/ethereum/EIPs/issues/225). Every epoch transaction resets the validator voting, meaning any pending votes for adding/removing a validator are reset.
For all transactions blocks:
- Proposer can cast one vote to propose a change to the validators list.
- Only the latest proposal per target beneficiary is kept from a single validator.
- Votes are tallied live as the chain progresses (concurrent proposals allowed).
- Proposals reaching majority consensus `VALIDATOR_LIMIT` come into effect immediately.
- Invalid proposals are not to be penalized for client implementation simplicity.
- A proposal coming into effect entails discarding all pending votes for that proposal (both for and against) and starts with a clean slate.
#### Future message and backlog
In an asynchronous network environment, one may receive future messages which cannot be processed in the current state. For example, a validator can receive `COMMIT` messages on `NEW ROUND`. We call this kind of message a "future message." When a validator receives a future message, it will put the message into its **backlog** and try to process later whenever possible.
#### Constants
Istanbul BFT define the following constants
- `EPOCH_LENGTH`: Default: 30000 blocks. Number of blocks after which to checkpoint and reset the pending votes.
- `REQUEST_TIMEOUT`: Timeout for each consensus round before firing a round change in millisecond.
- `BLOCK_PERIOD`: Minimum timestamp difference in seconds between two consecutive blocks.
- `PROPOSER_POLICY`: Proposer selection policy, defaults to round robin.
- `ISTANBUL_DIGEST`: Fixed magic number `0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365` of `mixDigest` in block header for Istanbul block identification.
- `DEFAULT_DIFFICULTY`: Default block difficulty, which is set to `0x0000000000000001`.
- `EXTRA_VANITY`: Fixed number of extra-data prefix bytes reserved for proposer vanity.
- Suggested `32` bytes to retain the current extra-data allowance and/or use.
- `NONCE_AUTH`: Magic nonce number `0xffffffffffffffff` to vote on adding a validator.
- `NONCE_DROP`: Magic nonce number `0x0000000000000000` to vote on removing a validator.
- `UNCLE_HASH`: Always `Keccak256(RLP([]))` as uncles are meaningless outside of PoW.
- `PREPREPARE_MSG_CODE`: Fixed number `0`. Message code for `PREPREPARE` message.
- `PREPARE_MSG_CODE`: Fixed number `1`. Message code for `PREPARE` message.
- `COMMIT_MSG_CODE`: Fixed number `2`. Message code for `COMMIT` message.
- `ROUND_CHANGE_MSG_CODE`: Fixed number `3`. Message code for `ROUND CHANGE` message
- `VALIDATOR_LIMIT`: Number of validators to pass an authorization or de-authorization proposal.
- Must be `floor(N / 2) + 1` to enforce majority consensus on a chain.
#### Block Header
Istanbul BFT does not add new block header fields. Instead, it follows Clique in repurposing the `ethash` header fields as follows:
- `nonce`: Proposer proposal regarding the account defined by the beneficiary field.
- Should be `NONCE_DROP` to propose deauthorizing beneficiary as an existing validator.
- Should be `NONCE_AUTH` to propose authorizing beneficiary as a new validator.
- **Must** be filled with zeroes, `NONCE_DROP` or `NONCE_AUTH`
- `mixHash`: Fixed magic number `0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365` for Istanbul block identification.
- `ommersHash`: Must be `UNCLE_HASH` as uncles are meaningless outside of PoW.
- `timestamp`: Must be at least the parent timestamp + `BLOCK_PERIOD`
- `difficulty`: Must be filled with `0x0000000000000001`.
- `extraData`: Combined field for signer vanity and RLP encoded Istanbul extra data, where Istanbul extra data contains validator list, proposer seal, and commit seals. Istanbul extra data is defined as follows:
```
type IstanbulExtra struct {
Validators []common.Address //Validator addresses
Seal []byte //Proposer seal 65 bytes
CommittedSeal [][]byte //Committed seal, 65 * len(Validators) bytes
}
```
Thus the `extraData` would be in the form of `EXTRA_VANITY | ISTANBUL_EXTRA` where `|` represents a fixed index to separate vanity and Istanbul extra data (not an actual character for separator).
- First `EXTRA_VANITY` bytes (fixed) may contain arbitrary proposer vanity data.
- `ISTANBUL_EXTRA` bytes are the RLP encoded Istanbul extra data calculated from `RLP(IstanbulExtra)`, where `RLP()` is RLP encoding function, and `IstanbulExtra` is the Istanbul extra data.
- `Validators`: The list of validators, which **must** be sorted in ascending order.
- `Seal`: The proposer's signature sealing of the header.
- `CommittedSeal`: The list of commitment signature seals as consensus proof.
#### Block hash, proposer seal and committed seals
The Istanbul block hash calculation is different from the `ethash` block hash calculation due to the following reasons:
1. The proposer needs to put proposer's seal in `extraData` to prove the block is signed by the chosen proposer.
2. The validators need to put `ceil(2N/3)` of committed seals as consensus proof in `extraData` to prove the block has gone through consensus.
The calculation is still similar to the `ethash` block hash calculation, with the exception that we need to deal with `extraData`. We calculate the fields as follows:
##### Proposer seal calculation
By the time of proposer seal calculation, the committed seals are still unknown, so we calculate the seal with those unknowns empty. The calculation is as follows:
- `Proposer seal`: `SignECDSA(Keccak256(RLP(Header)), PrivateKey)`
- `PrivateKey`: Proposer's private key.
- `Header`: Same as `ethash` header only with a different `extraData`.
- `extraData`: `vanity | RLP(IstanbulExtra)`, where in the `IstanbulExtra`, `CommittedSeal` and `Seal` are empty arrays.
##### Block hash calculation
While calculating block hash, we need to exclude committed seals since that data is dynamic between different validators. Therefore, we make `CommittedSeal` an empty array while calculating the hash. The calculation is:
- `Header`: Same as `ethash` header only with a different `extraData`.
- `extraData`: `vanity | RLP(IstanbulExtra)`, where in the `IstanbulExtra`, `CommittedSeal` is an empty array.
##### Consensus proof
Before inserting a block into the blockchain, each validator needs to collect `ceil(2N/3)` of committed seals from other validators to compose a consensus proof. Once it receives enough committed seals, it will fill the `CommittedSeal` in `IstanbulExtra`, recalculate the `extraData`, and then insert the block into the blockchain. **Note** that since committed seals can differ by different sources, we exclude that part while calculating the block hash as in the previous section.
Committed seal calculation:
Committed seal is calculated by each of the validators signing the hash along with `COMMIT_MSG_CODE` message code of its private key. The calculation is as follows:
- `Committed seal`: `SignECDSA(Keccak256(CONCAT(Hash, COMMIT_MSG_CODE)), PrivateKey)`.
- `CONCAT(Hash, COMMIT_MSG_CODE)`: Concatenate block hash and `COMMIT_MSG_CODE` bytes.
- `PrivateKey`: Signing validator's private key.
## Provenance
Istanbul BFT implementation in Quorum is based on [EIP 650](https://github.com/ethereum/EIPs/issues/650). It has been updated since the EIP was opened to resolve safety issues by introducing locking.

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

View File

@ -84,3 +84,48 @@ istanbul.propose(address, auth)
#### Parameters
`String` - The address of candidate
`bool` - `true` votes in and `false` votes out
### istanbul.nodeAddress
Retrieves the public address that is used to sign proposals, which is derived from the nodes `nodekey`.
```
istanbul.nodeAddress()
```
#### Returns
`string` - The nodes public signing address
### istanbul.getSignersFromBlock
Retrieves the public addresses for whose seals are included in the block. This means that they participated in the
consensus for this block and attested to its validity.
A block number may be optionally given, or else the current block is assumed.
```
istanbul.getSignersFromBlock(blockNumber)
```
#### Parameters
`Number` - The block number to retrieve
#### Returns
`Object` -
- `number`: `Number` - The retrieved block's number
- `hash`: `String` - The retrieved block's hash
- `author`: `String` - The address of the block proposer
- `committers`: `[]String` - The list of all addresses whose seal appears in this block
### istanbul.getSignersFromBlockByHash
Retrieves the public addresses for whose seals are included in the block. This means that they participated in the
consensus for this block and attested to its validity. A block hash must be given, and does NOT default to the current
latest block.
```
istanbul.getSignersFromBlockByHash(blockHash)
```
#### Parameters
`String` - The hash of the block to retrieve
#### Returns
`Object` -
- `number`: `Number` - The retrieved block's number
- `hash`: `String` - The retrieved block's hash
- `author`: `String` - The address of the block proposer
- `committers`: `[]String` - The list of all addresses whose seal appears in this block

View File

@ -0,0 +1,192 @@
# Raft RPC API
# APIs
### raft_cluster
Returns the details of all nodes part of the raft cluster
#### Parameters
None
#### Returns
* `hostName`: DNS name or the host IP address
* `nodeActive`: true if the node is active in raft cluster else false
* `nodeId`: enode id of the node
* `p2pPort`: p2p port
* `raftId`: raft id of the node
* `raftPort`: raft port
* `role`: role of the node in raft quorum. Can be minter/ verifier/ learner. In case there is no leader at network level it will be returned as `""`
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_cluster", "id":10}' --header "Content-Type: application/json"
// Response
{"jsonrpc":"2.0","id":10,"result":[{"raftId":1,"nodeId":"ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef","p2pPort":21000,"raftPort":50401,"hostname":"127.0.0.1","role":"minter","nodeActive":true},{"raftId":3,"nodeId":"579f786d4e2830bbcc02815a27e8a9bacccc9605df4dc6f20bcc1a6eb391e7225fff7cb83e5b4ecd1f3a94d8b733803f2f66b7e871961e7b029e22c155c3a778","p2pPort":21002,"raftPort":50403,"hostname":"127.0.0.1","role":"verifier","nodeActive":true},{"raftId":2,"nodeId":"0ba6b9f606a43a95edc6247cdb1c1e105145817be7bcafd6b2c0ba15d58145f0dc1a194f70ba73cd6f4cdd6864edc7687f311254c7555cc32e4d45aeb1b80416","p2pPort":21001,"raftPort":50402,"hostname":"127.0.0.1","role":"verifier","nodeActive":true}]}
```
```javascript tab="geth console"
> raft.cluster
[{
hostname: "127.0.0.1",
nodeActive: true,
nodeId: "0ba6b9f606a43a95edc6247cdb1c1e105145817be7bcafd6b2c0ba15d58145f0dc1a194f70ba73cd6f4cdd6864edc7687f311254c7555cc32e4d45aeb1b80416",
p2pPort: 21001,
raftId: 2,
raftPort: 50402,
role: "verifier"
}, {
hostname: "127.0.0.1",
nodeActive: true,
nodeId: "579f786d4e2830bbcc02815a27e8a9bacccc9605df4dc6f20bcc1a6eb391e7225fff7cb83e5b4ecd1f3a94d8b733803f2f66b7e871961e7b029e22c155c3a778",
p2pPort: 21002,
raftId: 3,
raftPort: 50403,
role: "verifier"
}, {
hostname: "127.0.0.1",
nodeActive: true,
nodeId: "ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef",
p2pPort: 21000,
raftId: 1,
raftPort: 50401,
role: "minter"
}]
```
### raft_role
Returns the role of the current node in raft cluster
#### Parameters
None
#### Returns
* `result`: role of the node in raft cluster. Can be minter/ verifier/ learner. In case there is no leader at network level it will be returned as `""`
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_role", "id":10}' --header "Content-Type: application/json"
// Response
{"jsonrpc":"2.0","id":10,"result":"verifier"}
```
```javascript tab="geth console"
> raft.role
"minter"
```
### raft_leader
Returns enode id of the leader node
#### Parameters
None
#### Returns
* `result`: enode id of the leader
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_leader", "id":10}' --header "Content-Type: application/json"
// Response
{"jsonrpc":"2.0","id":10,"result":"ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef"}
```
```javascript tab="geth console"
> raft.leader
"ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef"
```
If there is no leader at the network level, the call to the api will result in the following error:
```javascript
> raft.leader
Error: no leader is currently elected
at web3.js:3143:20
at web3.js:6347:15
at get (web3.js:6247:38)
at <unknown>
```
### raft_addPeer
API for adding a new peer to the network.
#### Parameters
* `enodeId`: enode id of the node to be added to the network
#### Returns
* `result`: raft id for the node being added
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_addPeer","params": ["enode://3701f007bfa4cb26512d7df18e6bbd202e8484a6e11d387af6e482b525fa25542d46ff9c99db87bd419b980c24a086117a397f6d8f88e74351b41693880ea0cb@127.0.0.1:21004?discport=0&raftport=50405"], "id":10}' --header "Content-Type: application/json"
// Response
{"jsonrpc":"2.0","id":10,"result":5}
```
```javascript tab="geth console"
> raft.addPeer("enode://3701f007bfa4cb26512d7df18e6bbd202e8484a6e11d387af6e482b525fa25542d46ff9c99db87bd419b980c24a086117a397f6d8f88e74351b41693880ea0cb@127.0.0.1:21004?discport=0&raftport=50405")
5
```
The new node can join the network with `geth` option of `--raftjoinexisting <<raftId>>`
If the node being added is already part of the network the of the network, the following error is thrown:
```javascript
> raft.addPeer("enode://3701f007bfa4cb26512d7df18e6bbd202e8484a6e11d387af6e482b525fa25542d46ff9c99db87bd419b980c24a086117a397f6d8f88e74351b41693880ea0cb@127.0.0.1:21004?discport=0&raftport=50405")
Error: node with this enode has already been added to the cluster: f06c06f1e958cb2edf90d8bfb912de287f9b047b4228436e94b5b78e3ee16171
at web3.js:3143:20
at web3.js:6347:15
at web3.js:5081:36
at <anonymous>:1:1
```
### raft_removePeer
API to remove a node from raft cluster
#### Parameters
* `raftId` : raft id of the node to be removed from the cluster
#### Returns
* `result`: null
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_removePeer","params": [4], "id":10}' --header "Content-Type: application/json"
// Response
{"jsonrpc":"2.0","id":10,"result":null}
```
```javascript tab="geth console"
> raft.removePeer(4)
null
```
### raft_addLearner
API to add a new node to the network as a learner node. The learner node syncs with network and can transact but will not be part of raft quorum and hence will not provide block confirmation to minter node.
#### Parameters
* `enodeId`
#### Returns
* `result`: raft id for the node being added
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_addLearner","params": ["enode://3701f007bfa4cb26512d7df18e6bbd202e8484a6e11d387af6e482b525fa25542d46ff9c99db87bd419b980c24a086117a397f6d8f88e74351b41693880ea0cb@127.0.0.1:21004?discport=0&raftport=50405"], "id":10}' --header "Content-Type: application/json"
// Response
{"jsonrpc":"2.0","id":10,"result":5}
```
```javascript tab="geth console"
> raft.addLearner("enode://3701f007bfa4cb26512d7df18e6bbd202e8484a6e11d387af6e482b525fa25542d46ff9c99db87bd419b980c24a086117a397f6d8f88e74351b41693880ea0cb@127.0.0.1:21004?discport=0&raftport=50405")
5
```
### raft_promoteToPeer
API for promoting a learner node to peer and thus be part of the raft quorum.
#### Parameters
* `raftId`: raft id of the node to be promoted
#### Returns
* `result`: true or false
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_promoteToPeer","params": [4], "id":10}' --header "Content-Type: application/json"
// Response
{// Response
{"jsonrpc":"2.0","id":10,"result":true}
```
```javascript tab="geth console"
> raft.promoteToPeer(4)
true
```

View File

@ -1,20 +1,45 @@
# Raft-based consensus for Ethereum/Quorum
# Raft Consensus Overview
## Introduction
The link attached holds an implementation of a [Raft](https://raft.github.io)-based consensus mechanism (using [etcd](https://github.com/coreos/etcd)'s [Raft implementation](https://github.com/coreos/etcd/tree/master/raft)) as an alternative to Ethereum's default proof-of-work. This is useful for closed-membership/consortium settings where byzantine fault tolerance is not a requirement, and there is a desire for faster blocktimes (on the order of milliseconds instead of seconds) and transaction finality (the absence of forking.) Also, compared with QuorumChain, this consensus mechanism does not "unnecessarily" create empty blocks, and effectively creates blocks "on-demand."
Quorum includes an implementation of a [Raft](https://raft.github.io)-based consensus mechanism (using [etcd](https://github.com/coreos/etcd)'s [Raft implementation](https://github.com/coreos/etcd/tree/master/raft)) as an alternative to Ethereum's default proof-of-work. This is useful for closed-membership/consortium settings where byzantine fault tolerance is not a requirement, and there is a desire for faster blocktimes (on the order of milliseconds instead of seconds) and transaction finality (the absence of forking.) This consensus mechanism does not "unnecessarily" create empty blocks, and effectively creates blocks "on-demand."
When the `geth` binary is passed the `--raft` flag, the node will operate in "raft mode."
## Some implementation basics
Note: Though we use the etcd implementation of the Raft protocol, we speak of "Raft" more broadly to refer to the Raft protocol, and its use to achieve consensus for Quorum/Ethereum.
!!! note
Though we use the etcd implementation of the Raft protocol, we speak of "Raft" more broadly to refer to the Raft protocol, and its use to achieve consensus for Quorum/Ethereum.
Both Raft and Ethereum have their own notion of a "node":
In Raft, a node in normal operation is either a "leader" or a "follower." There is a single leader for the entire cluster, which all log entries must flow through. There's also the concept of a "candidate", but only during leader election. We won't go into more detail about Raft here, because by design these details are opaque to applications built on it.
In Raft, a node in normal operation can be a "leader", "follower" or "learner." There is a single leader for the entire cluster, through which all log entries must flow through. There's also the concept of a "candidate", but only during leader election. We won't go into more detail about Raft here, because by design these details are opaque to applications built on it. A Raft network can be started with a set of verifiers and one of them would get elected as a leader when the network starts. If the leader node dies, re-election is triggered and new leader is elected by the network. Once the network is up additional verifier nodes(peers) or learner nodes can be added to this network. Brief summary of each type of nodes is given below:
In vanilla Ethereum, there is no such thing as a "leader" or "follower." It's possible for any node in the network to mine a new block -- which is akin to being the leader for that round.
### Leader
- mints blocks and sends the blocks to the verifier and learner nodes
- takes part in voting during re-election and can become verifier if it does not win majority of votes
- the network triggers re-election if the leader node dies.
- can add/remove learner/verifier and promote learner to verifier
### Verifier
- follows the leader
- applies the blocks minted by the leader
- takes part in voting during re-election and can become leader if it wins majority of votes
- sends confirmation to leader
- can add/remove learner/verifier and promote learner to verifier
### Learner
- follows the leader
- applies the blocks minted by the leader
- cannot take part in voting during re-election
- cannot become a verifier on its own
- it needs to be promoted to be a verifier by a leader or verifier
- it cannot add learner/verifier or promote learner to verifier
- it cannot remove other learner/verifier but it can remove itself
It should be noted that when a node is added/removed as a verifier (peer), it impacts the Raft Quorum. However adding a node as learner does not change the Raft Quroum. Hence its recommended that when a new node is added to a long running network, the node is first added as a learner. Once the learner node syncs fully with the network, it can then be promoted to become a verifier.
In vanilla Ethereum, there is no such thing as a "leader", "learner" or "follower." It's possible for any node in the network to mine a new block -- which is akin to being the leader for that round.
In Raft-based consensus, we impose a one-to-one correspondence between Raft and Ethereum nodes: each Ethereum node is also a Raft node, and by convention, the leader of the Raft cluster is the only Ethereum node that should mine (or "mint") new blocks. A minter is responsible for bundling transactions into a block just like an Ethereum miner, but does not present a proof of work.
@ -23,9 +48,11 @@ Ethereum | Raft
minter | leader
verifier | follower
A learner node is passive node that just syncs blocks and can initiate transactions.
The main reasons we co-locate the leader and minter are (1) convenience, in that Raft ensures there is only one leader at a time, and (2) to avoid a network hop from a node minting blocks to the leader, through which all Raft writes must flow. Our implementation watches Raft leadership changes -- if a node becomes a leader it will start minting, and if a node loses its leadership, it will stop minting.
An observant reader might note that during raft leadership transitions, there could be a small period of time where more than one node might assume that it has minting duties; we detail how correctness is preserved in more detail later in this document.
An observant reader might note that during raft leadership transitions, there could be a small period of time where more than one node might assume that it has minting duties; we detail how correctness is preserved in the [Chain extension, races, and correctness](#chain-extension-races-and-correctness) section.
We use the existing Ethereum p2p transport layer to communicate transactions between nodes, but we communicate blocks only through the Raft transport layer. They are created by the minter and flow from there to the rest of the cluster, always in the same order, via Raft.
@ -33,11 +60,12 @@ When the minter creates a block, unlike in vanilla Ethereum where the block is w
From the point of view of Ethereum, Raft is integrated via an implementation of the [`Service`](https://godoc.org/github.com/jpmorganchase/quorum/node#Service) interface in [`node/service.go`](https://github.com/jpmorganchase/quorum/blob/master/node/service.go): "an individual protocol that can be registered into a node". Other examples of services are [`Ethereum`](https://godoc.org/github.com/jpmorganchase/quorum/eth#Ethereum) and [`Whisper`](https://godoc.org/github.com/jpmorganchase/quorum/whisper/whisperv5#Whisper).
## The lifecycle of a transaction
Let's follow the lifecycle of a typical transaction:
#### on any node (whether minter or verifier):
#### on any node (whether minter, verifier or learner):
1. The transaction is submitted via an RPC call to geth.
2. Using the existing (p2p) transaction propagation mechanism in Ethereum, the transaction is announced to all peers and, because our cluster is currently configured to use "static nodes," every transaction is sent to all peers in the cluster.
@ -45,7 +73,7 @@ Let's follow the lifecycle of a typical transaction:
#### on the minter:
3. It reaches the minter, where it's included in the next block (see `mintNewBlock`) via the transaction pool.
4. Block creation triggers a [`NewMinedBlockEvent`](https://godoc.org/github.com/jpmorganchase/quorum/core#NewMinedBlockEvent), which the Raft protocol manager receives via its subscription `minedBlockSub`. The `minedBroadcastLoop` (in raft/handler.go) puts this new block to the `ProtocolManager.blockProposalC` channel.
4. Block creation triggers a [`NewMinedBlockEvent`](https://godoc.org/github.com/jpmorganchase/quorum/core#NewMinedBlockEvent), which the Raft protocol manager receives via its subscription `minedBlockSub`. The `minedBroadcastLoop` (in `raft/handler.go`) puts this new block to the `ProtocolManager.blockProposalC` channel.
5. `serveLocalProposals` is waiting at the other end of the channel. Its job is to RLP-encode blocks and propose them to Raft. Once it flows through Raft, this block will likely become the new head of the blockchain (on all nodes.)
#### on every node:
@ -57,9 +85,9 @@ Let's follow the lifecycle of a typical transaction:
8. The block is now handled by `applyNewChainHead`. This method checks whether the block extends the chain (i.e. it's parent is the current head of the chain; see below). If it does not extend the chain, it is simply ignored as a no-op. If it does extend chain, the block is validated and then written as the new head of the chain by [`InsertChain`](https://godoc.org/github.com/jpmorganchase/quorum/core#BlockChain.InsertChain).
9. A [`ChainHeadEvent`](https://godoc.org/github.com/jpmorganchase/quorum/core#ChainHeadEvent) is posted to notify listeners that a new block has been accepted. This is relevant to us because:
* It removes the relevant transaction from the transaction pool.
* It removes the relevant transaction from `speculativeChain`'s `proposedTxes` (see below).
* It triggers `requestMinting` in (minter.go), telling the node to schedule the minting of a new block if any more transactions are pending.
* It removes the relevant transaction from the transaction pool.
* It removes the relevant transaction from `speculativeChain`'s `proposedTxes` (see below).
* It triggers `requestMinting` in (`minter.go`), telling the node to schedule the minting of a new block if any more transactions are pending.
The transaction is now available on all nodes in the cluster with complete finality. Because Raft guarantees a single ordering of entries stored in its log, and because everything that is committed is guaranteed to remain so, there is no forking of the blockchain built upon Raft.
@ -118,13 +146,13 @@ This default of 50ms is configurable via the `--raftblocktime` flag to geth.
One of the ways our approach differs from vanilla Ethereum is that we introduce a new concept of "speculative minting." This is not strictly required for the core functionality of Raft-based Ethereum consensus, but rather it is an optimization that affords lower latency between blocks (or: faster transaction "finality.")
It takes some time for a block to flow through Raft (consensus) to become the head of the chain. If we synchronously waited for a block to become the new head of the chain before creating the new block, any transactions that we receive would take more time to make it into the chain.
It takes some time for a block to flow through Raft (consensus) and become the head of the chain. If we synchronously waited for a block to become the new head of the chain before creating the new block, any transactions that we receive would take more time to make it into the chain.
In speculative minting we allow the creation of a new block (and its proposal to Raft) before its parent has made it all the way through Raft and into the blockchain.
Since this can happen repeatedly, these blocks (which each have a reference to their parent block) can form a sort of chain. We call this a "speculative chain."
During the course of operation that a speculative chain forms, we keep track of the subset of transactions in the pool that we have already put into blocks (in the speculative chain) that have not yet made it into the blockchain (and whereupon a [`core.ChainHeadEvent`](https://godoc.org/github.com/jpmorganchase/quorum/core#ChainHeadEvent) occurs.) These are called "proposed transactions" (see speculative_chain.go).
During the course of operation that a speculative chain forms, we keep track of the subset of transactions in the pool that we have already put into blocks (in the speculative chain) that have not yet made it into the blockchain (and whereupon a [`core.ChainHeadEvent`](https://godoc.org/github.com/jpmorganchase/quorum/core#ChainHeadEvent) occurs.) These are called "proposed transactions" (see `speculative_chain.go`).
Per the presence of "races" (as we detail above), it is possible that a block somewhere in the middle of a speculative chain ends up not making into the chain. In this scenario an [`InvalidRaftOrdering`](https://godoc.org/github.com/jpmorganchase/quorum/raft#InvalidRaftOrdering) event will occur, and we clean up the state of the speculative chain accordingly.
@ -135,14 +163,14 @@ There is currently no limit to the length of these speculative chains, but we pl
* `head`: The last-created speculative block. This can be `nil` if the last-created block is already included in the blockchain.
* `proposedTxes`: The set of transactions which have been proposed to Raft in some block, but not yet included in the blockchain.
* `unappliedBlocks`: A queue of blocks which have been proposed to Raft but not yet committed to the blockchain.
- When minting a new block, we enqueue it at the end of this queue
- `accept` is called to remove the oldest speculative block when it's accepted into the blockchain.
- When an [`InvalidRaftOrdering`](https://godoc.org/github.com/jpmorganchase/quorum/raft#InvalidRaftOrdering) occurs, we unwind the queue by popping the most recent blocks from the "new end" of the queue until we find the invalid block. We must repeatedly remove these "newer" speculative blocks because they are all dependent on a block that we know has not been included in the blockchain.
- When minting a new block, we enqueue it at the end of this queue
- `accept` is called to remove the oldest speculative block when it's accepted into the blockchain.
- When an [`InvalidRaftOrdering`](https://godoc.org/github.com/jpmorganchase/quorum/raft#InvalidRaftOrdering) occurs, we unwind the queue by popping the most recent blocks from the "new end" of the queue until we find the invalid block. We must repeatedly remove these "newer" speculative blocks because they are all dependent on a block that we know has not been included in the blockchain.
* `expectedInvalidBlockHashes`: The set of blocks which build on an invalid block, but haven't passsed through Raft yet. We remove these as we get them back. When these non-extending blocks come back through Raft we remove them from the speculative chain. We use this set as a "guard" against trying to trim the speculative chain when we shouldn't.
## The Raft transport layer
We communicate blocks over the HTTP transport layer built in to etcd Raft. It's also (at least theoretically) possible to use p2p protocol built-in to Ethereum as a transport for Raft. In our testing we found the default etcd HTTP transport to be more reliable than the p2p (at least as implemented in geth) under high load.
We communicate blocks over the HTTP transport layer built in to etcd Raft. It's also (at least theoretically) possible to use the p2p protocol built-in to Ethereum as a transport for Raft. In our testing we found the default etcd HTTP transport to be more reliable than the p2p (at least as implemented in geth) under high load.
Quorum listens on port 50400 by default for the raft transport, but this is configurable with the `--raftport` flag.
@ -154,8 +182,12 @@ Currently Raft-based consensus requires that all _initial_ nodes in the cluster
To remove a node from the cluster, attach to a JS console and issue `raft.removePeer(raftId)`, where `raftId` is the number of the node you wish to remove. For initial nodes in the cluster, this number is the 1-indexed position of the node's enode ID in the static peers list. Once a node has been removed from the cluster, it is permanent; this raft ID can not ever re-connect to the cluster in the future, and the party must re-join the cluster with a new raft ID.
To add a node to the cluster, attach to a JS console and issue `raft.addPeer(enodeId)`. Note that like the enode IDs listed in the static peers JSON file, this enode ID should include a `raftport` querystring parameter. This call will allocate and return a raft ID that was not already in use. After `addPeer`, start the new geth node with the flag `--raftjoinexisting RAFTID` in addition to `--raft`.
* To add a verifier node to the cluster, attach to a JS console and issue `raft.addPeer(enodeId)`
* To add a learner node to the cluster, attach to a JS console and issue `raft.addLearner(enodeId)`
* To promote a learner to become verifier in the cluster, attach to a JS console of leader/verifier node and issue `raft.promoteToPeer(raftId)`.
Note that like the enode IDs listed in the static peers JSON file, this enode ID should include a `raftport` querystring parameter. This call will allocate and return a raft ID that was not already in use. After `addPeer`, start the new geth node with the flag `--raftjoinexisting RAFTID` in addition to `--raft`.
## FAQ
Answers to frequently asked questions can be found on the main [Quorum FAQ page](../FAQ.md).
Answers to frequently asked questions can be found on the main [Quorum FAQ page](../../FAQ.md).

View File

@ -41,14 +41,27 @@
Unfortunately, that is not possible. Quorum nodes configured with raft will only be able to work correctly with other nodes running raft consensus. This applies to all other supported consensus algorithms.
??? info "Quorum version compatibility table"
| | Adding new node v2.0.x | Adding new node v2.1.x | Adding new node v2.2.x |
| ----------------------------------- | ---------------------- | ---------------------- | ---------------------- |
| Existing chain consisting of v2.0.x | <span style="color:green;">block sync<br /> public txn<br /> private txn</span> | <span style="color:red;">block sync</span> | <span style="color:red;">block sync</span> |
| Existing chain consisting of v2.1.x | <span style="color:red;">block sync</span> | <span style="color:green;">block sync<br /> public txn<br /> private txn</span> | <span style="color:green;">block sync<br /> public txn<br /> private txn</span> |
| Existing chain consisting of v2.2.x | <span style="color:red;">block sync</span> | <span style="color:green;">block sync<br /> public txn<br /> private txn</span> | <span style="color:green;">block sync<br /> public txn<br /> private txn</span> |
| | Adding new node v2.0.x | Adding new node v2.1.x - v2.5.x |
| ----------------------------------- | ---------------------- | ---------------------- |
| Existing chain consisting of v2.0.x | <span style="color:green;">block sync<br /> public txn<br /> private txn</span> | <span style="color:red;">block sync</span> |
| Existing chain consisting of v2.1.x - v2.5.0 | <span style="color:red;">block sync</span> | <span style="color:green;">block sync<br /> public txn<br /> private txn</span> |
**Note:** While every Quorum v2 client will be able to connect to any other v2 client, the usefullness will be severely degraded. <span style="color:red;">Red color</span> signifies that while connectivity is possible, <span style="color:red;">red colored</span> versions will be unable to send public or private txns to the rest of the net due to the EIP155 changes in the signer implemented in newer versions.
??? info "Quorum to Geth version mapping"
| Quorum v2.0.x - v2.1.1 | Quorum v2.2.0 - v2.2.1 | Quorum v2.2.2 - v2.5.0 |
| ---------------------- | ---------------------- | ---------------------- |
| Geth v1.7.2 | Geth v1.8.12 | Geth v1.8.18 |
### Tessera FAQ
??? question "What does enabling 'disablePeerDiscovery' mean?"
It means the node will only communicate with the nodes defined in the configuration file. Upto version 0.10.2, the nodes still accepts transactions from undiscovered nodes. From version 0.10.3 the node blocks all communication with undiscovered nodes.
??? info "Upgrading to Tessera version 0.10.+ from verion 0.9.+ and below"
Due to 'database file unable to open' issue with H2 DB upgrade from version 1.4.196 direct to version 1.4.200 as explained [here](https://github.com/h2database/h2database/issues/2263), our recommended mitigation strategy is to upgrade to version 1.4.199 first before upgrading to version 1.4.200 i.e., first upgrade to Tessera 0.10.0 before upgrading to higher versions.
### Raft FAQ
??? question "Could you have a single- or two-node cluster? More generally, could you have an even number of nodes?"
@ -60,7 +73,7 @@
* It saves one network call communicating the block to the leader.
* It provides a simple way to choose a minter. If we didn't use the Raft leader we'd have to build in "minter election" at a higher level.
Additionally there could even be multiple minters running at the same time, but this would produce contention for which blocks actually extend the chain, reducing the productivity of the cluster (see "races" above).
Additionally there could even be multiple minters running at the same time, but this would produce contention for which blocks actually extend the chain, reducing the productivity of the cluster (see [Raft: Chain extension, races, and correctness](../Consensus/raft/#chain-extension-races-and-correctness) above).
??? question "I thought there were no forks in a Raft-based blockchain. What's the deal with "speculative minting"?"
"Speculative chains" are not forks in the blockchain. They represent a series ("chain") of blocks that have been sent through Raft, after which each of the blocks may or may not actually end up being included in *the blockchain*.

31
docs/Features/dns.md Normal file
View File

@ -0,0 +1,31 @@
# DNS for Quorum
DNS support in Quorum has two distinct areas, usage in the static nodes file and usage in the
node discovery protocol. You are free to use one and not the other, or to mix them as the use case
requires.
## Static nodes
Static nodes are nodes we keep reference to even if the node is not alive, so that is the nodes comes alive,
then we can connect to it. Hostnames are permitted here, and are resolved once at startup. If a static peer goes offline
and its IP address changes, then it is expected that that peer would re-establish the connection in a fully static
network, or have discovery enabled.
## Discovery
DNS is not supported for the discovery protocol. Use a bootnode instead, which can use a DNS name that is repeatedly
resolved.
## Compatibility
For Raft, the whole network must be on version 2.4.0 of Quorum for DNS to function properly. DNS must
be explicitly enabled using the `--raftdnsenable` flag for each node once the node has migrated to version 2.4.0 of Quorum
The network runs fine when some nodes are in 2.4.0 version and some in older version as long as this feature is not enabled. For safe migration the recommended approach is as below:
* migrate the nodes to `geth` 2.4.0 version without using `--raftdnsenable` flag
* once the network is fully migrated, restart the nodes with `--raftdnsenable` to enable the feature
Please note that in a partially migrated network (where some nodes are on version 2.4.0 and others on lower version) **with DNS feature enabled** for migrated nodes, `raft.addPeer` should not be invoked with Hostname till entire network migrates to 2.4.0 version. If invoked, this call will crash all nodes running in older version and these nodes will have to restarted with `geth` of version 2.4.0 of Quorum. `raft.addPeer` can still be invoked with IP address and network will work fine.
### Note
In a network where all nodes are running on Quorum version 2.4.0, with few nodes enabled for DNS, we recommend the
`--verbosity` to be 3 or below. We have observed that nodes which are not enabled for DNS fail to restart if
`raft.addPeer` is invoked with host name if `--verbosity` is set above 3.

View File

@ -0,0 +1,48 @@
# Backup & Restore of Quorum Nodes
Quorum supports export and import of chain data with built in tooling. This is an effective node backup mechanism
adapted for the specific needs of Quorum such as private transactions, permissioning, and supported consensus
algorithms.
!!! note
Quorum chain data import and export must run after `geth` process is stopped.
### Node Backup (Export)
Backup functionality mimics original `geth export` command. Quorum export accepts 3 arguments:
1. Export file name **required**
3. First block
4. Last block *are optional but must be provided together when used*
##### Sample command
`geth export <export file name> --datadir <geth data dir>`
### Node Restore (Import)
Restore functionality mimics original `geth import` command but requires transaction manager environment variable.
Quorum import must run on a new node with an initialized `--datadir` after `geth init` has been executed. Restore
supports arbitrary number of import files (at least 1).
!!! warning
If private transactions are used in the chain data, Private Transaction Manager process for the original exported
node must be running on the PTM ipc endpoint during import chain. Otherwise, nil pointer exceptions will be raised.
##### Sample command
`PRIVATE_CONFIG=<PTM ipc endpoint> geth import <import file names...> --datadir <geth data dir>`
### Special Consensus Considerations
##### IBFT
IBFT block data contains sealer information in the header, to restore a copy of exported chain data, the new node must
be initialized use an IBFT genesis file with exact same validator set encoded in extra data field as original exported
node's genesis.
##### Raft
Raft backup do not account for current Raft state. An exported chain data from a Raft cluster can only be used by
new nodes being added to that same cluster only.

View File

@ -55,13 +55,16 @@ Let's go through step by step instructions to setup a Quorum node with Raft cons
"config": {
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"chainId": 10,
"eip150Block": 0,
"eip155Block": 0,
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"eip158Block": 0,
"maxCodeSize": 35,
"maxCodeSizeChangeBlock" : 0,
"isQuorum": true
},
},
"difficulty": "0x0",
"extraData": "0x0000000000000000000000000000000000000000000000000000000000000000",
"gasLimit": "0xE0000000",

View File

@ -21,11 +21,14 @@
"config": {
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"chainId": 10,
"eip150Block": 0,
"eip155Block": 0,
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"eip158Block": 0,
"maxCodeSize": 35,
"maxCodeSizeChangeBlock" : 0,
"isQuorum": true
},
"difficulty": "0x0",

View File

@ -0,0 +1,291 @@
# Adding and removing IBFT validators
Over the lifetime of an IBFT network, validators will need to be added and removed as authorities change.
Here we will showcase adding a new validator to an IBFT network, as well as removing an existing one.
## Adding a node to the validator set
Adding a node to the IBFT validator set is relatively easy once a node is part of the network.
It does not matter whether the node is already online or not, as the process to add the new node as a validator only
needs the *existing* validators.
!!! warning
If you are adding multiple validators before they are brought online, make sure you don't go over the BFT limit and cause the chain to stop progressing.
Adding a new validator requires that a majority of existing validators propose the new node to be added. This is
achieved by calling the `propose` RPC method with the value `true` and replacing the address to your required one:
```bash
$ geth attach /qdata/dd/geth.ipc
> istanbul.propose("0xb131288f355bc27090e542ae0be213c20350b767", true);
null
```
This indicates that the current node wishes to add address `0xb131288f355bc27090e542ae0be213c20350b767` as a new
validator.
### Example
You can find the resources required to run the examples in the
[quorum-examples](https://github.com/jpmorganchase/quorum-examples/tree/master/examples/ibft_validator_set_changes)
repository.
1. The examples use `docker-compose` for the container definitions. If you are following along by copying the commands
described, then it is important to set the project name for Docker Compose, or to remember to change the prefix for
your directory. See [Docker documentation](https://docs.docker.com/compose/reference/envvars/#compose_project_name)
for more details.
To set the project name, run the following:
```bash
$ export COMPOSE_PROJECT_NAME=addnode
```
2. Bring up the network, which contains 7 nodes, of which 6 are validators.
```bash
$ docker-compose -f ibft-6-validators.yml up
```
We will be adding the 7th node as a validator. You may notice in the logs of node 7 messages along the lines of
`node7_1 | WARN [01-20|10:37:16.034] Block sealing failed err=unauthorized`. This is because
the node was started up with minting enabled, but doesn't have the authority to create blocks, and so throws this
error.
3. Now we need to propose node 7 as a new proposer from the existing nodes.
!!! note
Remember, you could do this stage before starting node 7 in your network
We need a majority of existing validators to propose the new node before the changes will take effect.
Lets start with node 1 and see what happens:
```bash
# Propose node 7 from node 1
$ docker exec -it addnode_node1_1 geth --exec 'istanbul.propose("0xb131288f355bc27090e542ae0be213c20350b767", true);' attach /qdata/dd/geth.ipc
null
# Wait about 5 seconds, and then run:
$ docker exec -it addnode_node1_1 geth --exec 'istanbul.getSnapshot();' attach /qdata/dd/geth.ipc
{
epoch: 30000,
hash: "0xf814863d809ce3a683ee0a2197b15a8152d2696fc9c4e47cd82d0bd5cdaa3e45",
number: 269,
policy: 0,
tally: {
0xb131288f355bc27090e542ae0be213c20350b767: {
authorize: true,
votes: 1
}
},
validators: ["0x6571d97f340c8495b661a823f2c2145ca47d63c2", "0x8157d4437104e3b8df4451a85f7b2438ef6699ff", "0xb912de287f9b047b4228436e94b5b78e3ee16171", "0xd8dba507e85f116b1f7e231ca8525fc9008a6966", "0xe36cbeb565b061217930767886474e3cde903ac5", "0xf512a992f3fb749857d758ffda1330e590fa915e"],
votes: [{
address: "0xb131288f355bc27090e542ae0be213c20350b767",
authorize: true,
block: 268,
validator: "0xd8dba507e85f116b1f7e231ca8525fc9008a6966"
}]
}
```
Let's break this down.
Firstly, we proposed the address `0xb131288f355bc27090e542ae0be213c20350b767` to be added; that is what the `true`
parameter is for. If we had set it to `false`, that means we want to remove an existing validator with that address.
Secondly, we fetched the current snapshot, which gives us an insight into the current running state of the voting.
We can see that the new address has 1 vote under the `tally` section, and that one vote is described under the
`votes` section. So we know our vote was registered!
4. Let's run this from node 2 and see similar results:
```bash
$ docker exec -it addnode_node2_1 geth --exec 'istanbul.propose("0xb131288f355bc27090e542ae0be213c20350b767", true);' attach /qdata/dd/geth.ipc
null
# Again, you may have to wait 5 - 10 seconds for the snapshot to show the vote
$ docker exec -it addnode_node2_1 geth --exec 'istanbul.getSnapshot();' attach /qdata/dd/geth.ipc
{
epoch: 30000,
hash: "0x93efcd458f3b875902a4532bb77d5e7ebb701791ea95486ecd58baf682312d74",
number: 391,
policy: 0,
tally: {
0xb131288f355bc27090e542ae0be213c20350b767: {
authorize: true,
votes: 2
}
},
validators: ["0x6571d97f340c8495b661a823f2c2145ca47d63c2", "0x8157d4437104e3b8df4451a85f7b2438ef6699ff", "0xb912de287f9b047b4228436e94b5b78e3ee16171", "0xd8dba507e85f116b1f7e231ca8525fc9008a6966", "0xe36cbeb565b061217930767886474e3cde903ac5", "0xf512a992f3fb749857d758ffda1330e590fa915e"],
votes: [{
address: "0xb131288f355bc27090e542ae0be213c20350b767",
authorize: true,
block: 388,
validator: "0xd8dba507e85f116b1f7e231ca8525fc9008a6966"
}, {
address: "0xb131288f355bc27090e542ae0be213c20350b767",
authorize: true,
block: 390,
validator: "0x6571d97f340c8495b661a823f2c2145ca47d63c2"
}]
}
```
True to form, we have the second vote registered!
5. Ok, let's finally vote on nodes 3 and 4.
```bash
$ docker exec -it addnode_node3_1 geth --exec 'istanbul.propose("0xb131288f355bc27090e542ae0be213c20350b767", true);' attach /qdata/dd/geth.ipc
null
$ docker exec -it addnode_node4_1 geth --exec 'istanbul.propose("0xb131288f355bc27090e542ae0be213c20350b767", true);' attach /qdata/dd/geth.ipc
null
```
6. Now we have a majority of votes, let's check the snapshot again:
```bash
docker exec -it addnode_node1_1 geth --exec 'istanbul.getSnapshot();' attach /qdata/dd/geth.ipc
{
epoch: 30000,
hash: "0xd4234184538297f71f5b7024a2e11f51f06b4f569ebd9e3644abd391b8c66101",
number: 656,
policy: 0,
tally: {},
validators: ["0x6571d97f340c8495b661a823f2c2145ca47d63c2", "0x8157d4437104e3b8df4451a85f7b2438ef6699ff", "0xb131288f355bc27090e542ae0be213c20350b767", "0xb912de287f9b047b4228436e94b5b78e3ee16171", "0xd8dba507e85f116b1f7e231ca8525fc9008a6966", "0xe36cbeb565b061217930767886474e3cde903ac5", "0xf512a992f3fb749857d758ffda1330e590fa915e"],
votes: []
}
```
We can see that the votes have now been wiped clean, ready for a new round. Additionally, the address we were adding,
`0xb131288f355bc27090e542ae0be213c20350b767` now exists within the `validators` list!
Lastly, the `unauthorized` messages that node 7 was giving before has stopped, as it now has the authority to mint
blocks.
## Removing a node from the validator set
Removing a validator is very similar to adding a node, but this time we want to propose nodes with the value `false`,
to indicate we are deauthorising them. It does not matter whether the node is still online or not, as it doesn't
require any input from the node being removed.
!!! warning
Be aware when removing nodes that cross the BFT boundary, e.g. going from 10 validators to 9, as this may impact the chains ability to progress if other nodes are offline
Removing a new validator requires that a majority of existing validators propose the new node to be removed. This is
achieved by calling the `propose` RPC method with the value `false` and replacing the address to your required one:
```bash
$ geth attach /qdata/dd/geth.ipc
> istanbul.propose("0xb131288f355bc27090e542ae0be213c20350b767", false);
null
```
### Example
You can find the resources required to run the examples in the
[quorum-examples](https://github.com/jpmorganchase/quorum-examples/tree/master/examples/ibft_validator_set_changes)
repository.
1. The examples use `docker-compose` for the container definitions. If you are following along by copying the commands
described, then it is important to set the project name for Docker Compose, or to remember to change the prefix for
your directory. See [Docker documentation](https://docs.docker.com/compose/reference/envvars/#compose_project_name)
for more details.
To set the project name, run the following:
```bash
$ export COMPOSE_PROJECT_NAME=addnode
```
2. Bring up the network, which contains 7 nodes, of which 6 are validators.
```bash
# Set the environment variable for docker-compose
$ export COMPOSE_PROJECT_NAME=addnode
# Start the 7 node network, of which 6 are validators
$ docker-compose -f ibft-6-validators.yml up
```
3. Now we need to propose node 6 as the node to remove.
!!! note
We need a majority of existing validators to propose the new node before the changes will take effect.
Lets start with node 1 and see what happens:
```bash
# Propose node 7 from node 1
$ docker exec -it addnode_node1_1 geth --exec 'istanbul.propose("0x8157d4437104e3b8df4451a85f7b2438ef6699ff", false);' attach /qdata/dd/geth.ipc
null
# Wait about 5 seconds, and then run:
$ docker exec -it addnode_node1_1 geth --exec 'istanbul.getSnapshot();' attach /qdata/dd/geth.ipc
{
epoch: 30000,
hash: "0xba9f9b72cad90ae8aee39f352b45f21d5ed5535b4479743e3f39b231fd717792",
number: 140,
policy: 0,
tally: {
0x8157d4437104e3b8df4451a85f7b2438ef6699ff: {
authorize: false,
votes: 1
}
},
validators: ["0x6571d97f340c8495b661a823f2c2145ca47d63c2", "0x8157d4437104e3b8df4451a85f7b2438ef6699ff", "0xb912de287f9b047b4228436e94b5b78e3ee16171", "0xd8dba507e85f116b1f7e231ca8525fc9008a6966", "0xe36cbeb565b061217930767886474e3cde903ac5", "0xf512a992f3fb749857d758ffda1330e590fa915e"],
votes: [{
address: "0x8157d4437104e3b8df4451a85f7b2438ef6699ff",
authorize: false,
block: 136,
validator: "0xd8dba507e85f116b1f7e231ca8525fc9008a6966"
}]
}
```
Let's break this down.
Firstly, we proposed the address `0x8157d4437104e3b8df4451a85f7b2438ef6699ff` to be removed; that is what the
`false` parameter is for.
Secondly, we fetched the current snapshot, which gives us an insight into the current running state of the voting.
We can see that the proposed address has 1 vote under the `tally` section, and that one vote is described under the
`votes` section. Here, the `authorize` section is set to `false`, which is inline with our proposal to *remove* the
validator.
4. We need to get a majority, so let's run the proposal on 3 more nodes:
```bash
$ docker exec -it addnode_node2_1 geth --exec 'istanbul.propose("0x8157d4437104e3b8df4451a85f7b2438ef6699ff", false);' attach /qdata/dd/geth.ipc
null
$ docker exec -it addnode_node3_1 geth --exec 'istanbul.propose("0x8157d4437104e3b8df4451a85f7b2438ef6699ff", false);' attach /qdata/dd/geth.ipc
null
$ docker exec -it addnode_node4_1 geth --exec 'istanbul.propose("0x8157d4437104e3b8df4451a85f7b2438ef6699ff", false);' attach /qdata/dd/geth.ipc
null
```
5. Let's check the snapshot now all the required votes are in:
```bash
$ docker exec -it addnode_node1_1 geth --exec 'istanbul.getSnapshot();' attach /qdata/dd/geth.ipc
{
epoch: 30000,
hash: "0x25815a32b086926875ea2c44686e4b20effabc731b2b121ebf0e0f395101eea5",
number: 470,
policy: 0,
tally: {},
validators: ["0x6571d97f340c8495b661a823f2c2145ca47d63c2", "0xb912de287f9b047b4228436e94b5b78e3ee16171", "0xd8dba507e85f116b1f7e231ca8525fc9008a6966", "0xe36cbeb565b061217930767886474e3cde903ac5", "0xf512a992f3fb749857d758ffda1330e590fa915e"],
votes: []
}
```
The validator has been removed from the `validators` list, and we are left with the other 5 still present. You will
also see in the logs of node 6 a message like
`node6_1 | WARN [01-20|11:35:52.044] Block sealing failed err=unauthorized`. This is because it is still minting
blocks, but realises it does not have the authority to push them to any of the other nodes on the network (you will
also see this message for node 7, which was never authorised but still set up to mine).
## See also
- [Adding a new node to the network](/How-To-Guides/adding_nodes)

View File

@ -0,0 +1,388 @@
# Node addition examples
Below are some scenarios for adding a new node into a network, with a mix of different options such as
consensus algorithm, permissioning and discovery.
You can find the resources required to run the examples in the
[quorum-examples](https://github.com/jpmorganchase/quorum-examples/tree/master/examples/adding_nodes) repository.
Checkout the repository through `git` or otherwise download all the resources your local machine to follow along.
The examples use `docker-compose` for the container definitions. If you are following along by copying the commands
described, then it is important to set the project name for Docker Compose, or to remember to change the prefix for
your directory. See [Docker documentation](https://docs.docker.com/compose/reference/envvars/#compose_project_name)
for more details.
To set the project name, run the following:
```bash
$ export COMPOSE_PROJECT_NAME=addnode
```
## Non-permimssioned IBFT with discovery
An example using IBFT, no permissioning and discover enabled via a bootnode.
There are no static peers in this network; instead, every node is set to talk to node 1 via the CLI flag
`--bootnodes enode://ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef@172.16.239.11:21000`.
Node 1 will forward the details of all the nodes it knows about (in this case, everyone) and they will then initiate their
own connections.
1. Bring up an initial network of 6 nodes.
```bash
# Ensure any old network is removed
$ docker-compose -f ibft-non-perm-bootnode.yml down
# Bring up 6 nodes
$ docker-compose -f ibft-non-perm-bootnode.yml up node1 node2 node3 node4 node5 node6
```
2. Send in a public transaction and check it is minted.
!!! note
* The block creation period is set to 2 seconds, so you may have to wait upto that amount of time for the transaction to be minted.
* The transaction hashes will likely be different, but the contract addresses will be the same for your network.
```bash
# Send in the transaction
$ docker exec -it addnode_node1_1 geth --exec 'loadScript("/examples/public-contract.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0xd1bf0c15546802e5a121f79d0d8e6f0fa45d4961ef8ab9598885d28084cfa909 waiting to be mined...
true
# Retrieve the value of the contract
$ docker exec -it addnode_node1_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
42
```
We created a transaction, in this case with hash `0xd1bf0c15546802e5a121f79d0d8e6f0fa45d4961ef8ab9598885d28084cfa909`,
and then retrieved its value, which was set to be `42`.
3. Bring up the last node. This node also has its bootnodes set to be node 1, so at startup will try to establish a
connection to node 1 only. After this, node 1 will share which nodes it knows about, and node 7 can then initiate
connections with those peers.
```bash
# Bring up node 7
$ docker-compose -f ibft-non-perm-bootnode.yml up node7
```
4. Let's check to see if the nodes are in sync. If they are, they will have similar block numbers, which is enough for
this example; there are other ways to tell if nodes are on the same chain, e.g. matching block hashes.
!!! note
Depending on timing, the second may have an extra block or two.
```bash
# Fetch the latest block number for node 1
$ docker exec -it addnode_node1_1 geth --exec 'eth.blockNumber' attach /qdata/dd/geth.ipc
45
# Fetch the latest block number for node 7
$ docker exec -it addnode_node7_1 geth --exec 'eth.blockNumber' attach /qdata/dd/geth.ipc
45
```
5. We can check that the transaction and contract we sent earlier now exist on node 7.
```bash
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
42
```
6. To be sure we have two way communication, let's send a transaction from node 7 to the network.
```bash
$ docker exec -it addnode_node7_1 geth --exec 'loadScript("/examples/public-contract.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0x84cefc3aab8ce5797dc73c70db604e5c8830fc7c2cf215876eb34fff533e2725 waiting to be mined...
true
```
7. Finally, we can check if the transaction was minted and the contract executed on each node.
```bash
# Check on node 1
$ docker exec -it addnode_node1_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1349f3e1b8d71effb47b840594ff27da7e603d17"); private.get();' attach /qdata/dd/geth.ipc
42
# Check on node 7
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1349f3e1b8d71effb47b840594ff27da7e603d17"); private.get();' attach /qdata/dd/geth.ipc
42
```
And that's it. We deployed a working 6 node network, and then added a 7th node afterwards; this 7th node was able to
read existing public data, as well as deploy its own transactions and contracts for others to see!
## Non-permissioned RAFT with discovery disabled
This example walks through adding a new node to a RAFT network. This network does not have permissioning for the
Ethereum peer-to-peer layer, and makes it connections solely based on who is listed in the nodes `static-nodes.json`
file.
1. Bring up an initial network of 6 nodes.
```bash
# Ensure any old network is removed
$ docker-compose -f raft-non-perm-nodiscover.yml down
# Bring up 6 nodes
$ docker-compose -f raft-non-perm-nodiscover.yml up node1 node2 node3 node4 node5 node6
```
2. Send in a public transaction and check it is minted.
!!! note
* The transaction hashes will likely be different, but the contract addresses will be the same for your network.
```bash
# Send in the transaction
$ docker exec -it addnode_node1_1 geth --exec 'loadScript("/examples/public-contract.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0xd1bf0c15546802e5a121f79d0d8e6f0fa45d4961ef8ab9598885d28084cfa909 waiting to be mined...
true
# Retrieve the value of the contract
$ docker exec -it addnode_node1_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
42
```
We created a transaction, in this case with hash `0xd1bf0c15546802e5a121f79d0d8e6f0fa45d4961ef8ab9598885d28084cfa909`,
and then retrieved its value, which was set to be `42`.
3. We need to add the new peer to the RAFT network before it joins, otherwise the existing nodes will reject it from
the RAFT communication layer; we also need to know what ID the new node should join with.
```bash
# Add the new node
$ docker exec -it addnode_node1_1 geth --exec 'raft.addPeer("enode://239c1f044a2b03b6c4713109af036b775c5418fe4ca63b04b1ce00124af00ddab7cc088fc46020cdc783b6207efe624551be4c06a994993d8d70f684688fb7cf@172.16.239.17:21000?discport=0&raftport=50400")' attach /qdata/dd/geth.ipc
7
```
The return value is the RAFT ID of the new node. When the node joins the network for the first time, it will need
this ID number handy. If it was lost, you can always view the full network, including IDs, by running the
`raft.cluster` command on an existing node.
4. Bring up the last node. Here, we pass the newly created ID number as a flag into the startup of node 7. This lets
the node know to not bootstrap a new network from the contents of `static-nodes.json`, but to connect to an existing
node there are fetch any bootstrap information.
```bash
# Bring up node 7
$ QUORUM_GETH_ARGS="--raftjoinexisting 7" docker-compose -f raft-non-perm-nodiscover.yml up node7
```
5. Let's check to see if the nodes are in sync. We can do by seeing if we have the contract that we viewer earlier on
node 7.
```bash
# Fetch the contracts value on node 7
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
42
```
6. To be sure we have two way communication, let's send a transaction from node 7 to the network.
```bash
$ docker exec -it addnode_node7_1 geth --exec 'loadScript("/examples/public-contract.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0x84cefc3aab8ce5797dc73c70db604e5c8830fc7c2cf215876eb34fff533e2725 waiting to be mined...
true
```
7. Finally, we can check if the transaction was minted and the contract executed on each node.
```bash
# Check on node 1
$ docker exec -it addnode_node1_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1349f3e1b8d71effb47b840594ff27da7e603d17"); private.get();' attach /qdata/dd/geth.ipc
42
# Check on node 7
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1349f3e1b8d71effb47b840594ff27da7e603d17"); private.get();' attach /qdata/dd/geth.ipc
42
```
And that's it. We deployed a working 6 node network, and then added a 7th node afterwards; this 7th node was able to
read existing public data, as well as deploy its own transactions and contracts for others to see!
## Permissioned RAFT with discovery disabled
This example walks through adding a new node to a RAFT network. This network does have permissioning enabled for the
Ethereum peer-to-peer layer; this means that for any Ethereum tasks, such as syncing the initial blockchain or
propagating transactions, the node must appear is others nodes' `permissioned-nodes.json` file.
1. Bring up an initial network of 6 nodes.
```bash
# Ensure any old network is removed
$ docker-compose -f raft-perm-nodiscover.yml down
# Bring up 6 nodes
$ docker-compose -f raft-perm-nodiscover.yml up node1 node2 node3 node4 node5 node6
```
2. Send in a public transaction and check it is minted.
!!! note
* The transaction hashes will likely be different, but the contract addresses will be the same for your network.
```bash
# Send in the transaction
$ docker exec -it addnode_node1_1 geth --exec 'loadScript("/examples/public-contract.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0xd1bf0c15546802e5a121f79d0d8e6f0fa45d4961ef8ab9598885d28084cfa909 waiting to be mined...
true
# Retrieve the value of the contract
$ docker exec -it addnode_node1_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
42
```
We created a transaction, in this case with hash `0xd1bf0c15546802e5a121f79d0d8e6f0fa45d4961ef8ab9598885d28084cfa909`,
and then retrieved its value, which was set to be `42`.
3. We need to add the new peer to the RAFT network before it joins, otherwise the existing nodes will reject it from
the RAFT communication layer; we also need to know what ID the new node should join with.
```bash
# Add the new node
$ docker exec -it addnode_node1_1 geth --exec 'raft.addPeer("enode://239c1f044a2b03b6c4713109af036b775c5418fe4ca63b04b1ce00124af00ddab7cc088fc46020cdc783b6207efe624551be4c06a994993d8d70f684688fb7cf@172.16.239.17:21000?discport=0&raftport=50400")' attach /qdata/dd/geth.ipc
7
```
The return value is the RAFT ID of the new node. When the node joins the network for the first time, it will need
this ID number handy. If it was lost, you can always view the full network, including IDs, by running the
`raft.cluster` command on an existing node.
4. Bring up the last node. Here, we pass the newly created ID number as a flag into the startup of node 7. This lets
the node know to not bootstrap a new network from the contents of `static-nodes.json`, but to connect to an existing
node there are fetch any bootstrap information.
```bash
# Bring up node 7
$ QUORUM_GETH_ARGS="--raftjoinexisting 7" docker-compose -f raft-non-perm-nodiscover.yml up node7
```
5. Let's check to see if the nodes are in sync. We can do by seeing if we have the contract that we viewer earlier on
node 7.
```bash
# Fetch the contracts value on node 7
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
0
```
The value here is `0`, not the expected `42`! Node 7 is unable to sync the blockchain because the other peers in the
network are refusing to allow connections from node 7, due to it being missing in the `permissioned-nodes.json` file.
This does not affect the RAFT layer, so if node 7 was already is sync, it could still receive new blocks; this is
okay though, since it would be permissioned on the RAFT side by virtue of being part of the RAFT cluster.
6. Let's update the permissioned nodes list on node 1, which will allow node 7 to connect to it.
```bash
$ docker exec -it addnode_node1_1 cp /extradata/static-nodes-7.json /qdata/dd/permissioned-nodes.json
$
```
7. Node 7 should now be synced up through node 1. Let's see if we can see the contract we made earlier.
!!! note
Quorum attempts to re-establish nodes every 30 seconds, so you may have to wait for the sync to happen.
```bash
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
42
```
8. To be sure we have two way communication, let's send a transaction from node 7 to the network.
```bash
$ docker exec -it addnode_node7_1 geth --exec 'loadScript("/examples/public-contract.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0x84cefc3aab8ce5797dc73c70db604e5c8830fc7c2cf215876eb34fff533e2725 waiting to be mined...
true
```
9. Finally, we can check if the transaction was minted and the contract executed on each node.
```bash
# Check on node 1
$ docker exec -it addnode_node1_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1349f3e1b8d71effb47b840594ff27da7e603d17"); private.get();' attach /qdata/dd/geth.ipc
42
# Check on node 7
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1349f3e1b8d71effb47b840594ff27da7e603d17"); private.get();' attach /qdata/dd/geth.ipc
42
```
And that's it. We deployed a working 6 node network, and then added a 7th node afterwards; this 7th node was able to
read existing public data, as well as deploy its own transactions and contracts for others to see!
## Adding a Private Transaction Manager
This is a simple example of adding a new Tessera instance to an existing network. For simplicity,
the steps to add the Quorum node are omitted, but are those followed in the IBFT example.
Here, a Tessera node is added without any of the discovery options specified, meaning that the
IP Whitelist isn't used, nor is key discovery disabled.
1. Start up the initial 6 node network.
```bash
# Ensure any old network is removed
$ docker-compose -f tessera-add.yml down
# Bring up 6 nodes
$ docker-compose -f tessera-add.yml up node1 node2 node3 node4 node5 node6
```
2. We can verify that private transactions can be sent by sending one from node 1 to node 6.
We can also see that since node 7 doesn't exist yet, we can't send private transactions to it.
```bash
# Send a private transaction from node 1 to node 6
$ docker exec -it addnode_node1_1 geth --exec 'loadScript("/examples/private-contract-6.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0xc8a5de4bb79d4a8c3c1156917968ca9b2965f2514732fc1cff357ec999b9aba4 waiting to be mined...
true
# Success!
$ docker exec -it addnode_node1_1 geth --exec 'loadScript("/examples/private-contract-7.js")' attach /qdata/dd/geth.ipc
err creating contract Error: Non-200 status code: &{Status:404 Not Found StatusCode:404 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Server:[Jetty(9.4.z-SNAPSHOT)] Date:[Thu, 16 Jan 2020 12:44:19 GMT] Content-Type:[text/plain] Content-Length:[73]] Body:0xc028e87d40 ContentLength:73 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0xc000287200 TLS:<nil>}
true
# An expected failure. The script content didn't succeed, but the script itself was run okay, so true was still returned
```
3. Let's first bring up node 7, then we can inspect what is happening and the configuration used.
```bash
# Bring up node 7
$ docker-compose -f tessera-add.yml up node7
$ docker exec -it addnode_node7_1 cat /qdata/tm/tessera-config.json
# ...some output...
```
The last command will output Tessera 7's configuration.
The pieces we are interested in here are the following:
```json
{
"useWhiteList": false,
"peer": [
{
"url": "http://txmanager1:9000"
}
],
...
}
```
We can see that the whitelist is not enabled, discovery is not specified so defaults to enabled,
and we have a single peer to start off with, which is node 1.
This is all that is needed to connect to an existing network. Shortly after starting up, Tessera
will ask node 1 about all it's peers, and then will keep a record of them for it's own use. From
then on, all the nodes will know about node 7 and can send private transactions to it.
4. Let's try it! Let's send a private transaction from node 1 to the newly added node 7.
```bash
# Sending a transaction from node 1 to node 7
$ docker exec -it addnode_node1_1 geth --exec 'loadScript("/examples/private-contract-7.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0x3e3b50768ffdb51979677ddb58f48abdabb82a3fd4f0bac5b3d1ad8014e954e9 waiting to be mined...
true
```
We got a success this time! Tessera 7 has been accepted into the network and can interact with the
other existing nodes.

View File

@ -0,0 +1,227 @@
# Adding nodes to the network
Adding new nodes to an existing network can range from a common occurence to never happening.
In public blockchains, such as the Ethereum Mainnet, new nodes continuously join and talk to the existing network.
In permissioned blockchains, this may not happen as often, but it still an important task to achieve as your network
evolves.
When adding new nodes to the network, it is important understand that the Quorum network and Private Transaction
Manager network are distinct and do not overlap in any way. Therefore, options applicable to one are not applicable to
the other. In some cases, they may have their own options to achieve similar tasks, but must be specified separately.
## Prerequisites
- [Quorum installed](/Getting%20Started/Installing.md)
- [Tessera/Constellation installed](/Getting%20Started/Installing.md) if using private transactions
- A running network (see [Creating a Network From Scratch](/Getting%20Started/Creating-A-Network-From-Scratch))
## Adding Quorum nodes
Adding a new Quorum node is the most common operation, as you can choose to run a Quorum node with or without a Private
Transaction Manager, but rarely will one do the opposite.
### Raft
1. On an *existing* node, add the new peer to the raft network
```
> raft.addPeer("enode://239c1f044a2b03b6c4713109af036b775c5418fe4ca63b04b1ce00124af00ddab7cc088fc46020cdc783b6207efe624551be4c06a994993d8d70f684688fb7cf@127.0.0.1:21006?discport=0&raftport=50407")
7
```
So in this example, our new node has a Raft ID of `7`.
2. If you are using permissioning, or discovery for Ethereum p2p, please refer [here](#extra-options).
3. We now need to initialise the new node with the network's genesis configuration.
!!! note
Where you obtain this from will be dependent on the network. You may get it from an existing peer, or a network operator, or elsewhere entirely.
Initialising the new node is exactly the same an the original nodes.
```bash
$ geth --datadir qdata/dd7 init genesis.json
```
4. Now we can start up the new node and let it sync with the network. The main difference now is the use of the
`--raftjoinexisting` flag, which lets the node know that it is joining an existing network, which is handled
differently internally. The Raft ID obtained in step 1 is passed as a parameter to this flag.
```bash
$ PRIVATE_CONFIG=ignore geth --datadir qdata/dd7 ... OTHER ARGS ... --raft --raftport 50407 --rpcport 22006 --port 21006 --raftjoinexisting 7
```
The new node is now up and running, and will start syncing the blockchain from existing peers. Once this has
completed, it can send new transactions just as any other peer.
### IBFT/Clique
Adding nodes to an IBFT/Clique network is a bit simpler, as it only needs to configure itself rather then be
pre-allocated on the network (permissioning aside).
1. Initialise the new node with the network's genesis configuration.
!!! note
Where you obtain this from will be dependent on the network. You may get it from an existing peer, or a network operator, or elsewhere entirely.
Initialising the new node is exactly the same an the original nodes.
```bash
$ geth --datadir qdata/dd7 init genesis.json
```
2. If you are using permissioning or discovery for Ethereum peer-to-peer, please refer [here](#extra-options).
3. Start the new node, pointing either to a `bootnode` or listing an existing peer in the `static-nodes.json` file.
Once a connection is established, the node will start syncing the blockchain, after which transactions can be sent.
### Extra options
Some options take effect regardless of the consensus mechanism used.
#### Permissioned nodes
If using the `permissioned-nodes.json` file for permissioning, then you must make sure this file is updated on all
nodes before the new node is able to communicate with existing nodes. You do not need to restart any nodes in
order for the changes to take effect.
#### Static node connections
If not using peer-to-peer node discovery (i.e. you have specified `--nodiscover`), then the only connections a node
made will be to peers defined in the `static-nodes.json` file. When adding a new node, you should make sure you have
peers defined in its `static-nodes.json` file. The more peers you have defined here, the better network connectivity
and fault tolerance you have.
!!! note
* You do not need to update the existing peers static nodes for the connection to be established, although it is good practise to do so.
* You do not need to specify every peer in your static nodes file if you do not wish to connect to every peer directly.
#### Peer-to-peer discovery
If you are using discovery, then more options *in addition* to static nodes become available.
- Any nodes that are connected to your peers, which at the start will be ones defined in the static node list, will
then be visible by you, allowing you to connect to them; this is done automatically.
- You may specify any number of bootnodes, defined by the `--bootnodes` parameter. This takes a commas separated list
of enode URIs, similar to the `static-nodes.json` file. These act in the same way as static nodes, letting you connect
to them and then find out about other peers, whom you then connect to.
!!! note
If you have discovery disabled, this means you will not try to find other nodes to connect to, but others can still find and connect to you.
## Adding Private Transaction Managers
In this tutorial, there will be no focus on the advanced features of adding a new Private Transaction Manager (PTM).
This tutorial uses [Tessera](https://github.com/jpmorganchase/tessera) for any examples.
Adding a new node to the PTM is relatively straight forward, but there are a lot of extra options that can be used,
which is what will be explained here.
### Adding a new PTM node
In a basic setting, adding a new PTM node is as simple as making sure you have one of the existing nodes listed in your
peer list.
In Tessera, this would equate to the following in the configuration file:
```json
{
"peers": [
{
"url": "http://existingpeer1.com:8080"
}
]
}
```
From there, Tessera will connect to that peer and discover all the other PTM nodes in the network, connecting to each
of them in turn.
!!! note
You may want to include multiple peers in the peer list in case any of them are offline/unreachable.
### IP whitelisting
The IP Whitelist that Tessera provides allows you restrict connections much like the `permissioned-nodes.json` file
does for Quorum. Only IP addresses/hostnames listed in your peers list will be allowed to connect to you.
See the [Tessera configuration page](/Privacy/Tessera/Configuration/Configuration%20Overview#whitelist) for details on setting it up.
In order to make sure the new node is accepted into the network:
1. You will need to add the new peer to each of the existing nodes before communication is allowed.
Tessera provides a way to do this without needing to restart an already running node:
```bash
$ java -jar tessera.jar admin -configfile /path/to/existing-node-config.json -addpeer http://newpeer.com:8080
```
2. The new peer can be started, setting the `peers` configuration to mirror the existing network.
e.g. if there are 3 existing nodes in the network, then the new nodes configuration will look like this:
```json
{
"peers": [
{
"url": "http://existingpeer1.com:8080"
},
{
"url": "http://existingpeer2.com:8080"
},
{
"url": "http://existingpeer3.com:8080"
}
]
}
```
The new node will allow incoming connections from the existing peers, and then existing peers will allow incoming
connections from the new peer!
### Discovery
Tessera discovery is very similar to the IP whitelist. The difference being that the IP whitelist blocks
communications between nodes, whereas disabling discovery only affects which public keys we keep track of.
See the [Tessera configuration page](/Privacy/Tessera/Configuration/Configuration%20Overview#disabling-peer-discovery) for
details on setting it up.
When discovery is disabled, Tessera will only allow keys that are owned by a node in its peer list to be available to
the users. This means that if any keys are found that are owned by a node NOT in our peer list, they are discarded and
private transactions cannot be sent to that public key.
!!! note
This does not affect incoming transactions. Someone not in your peer list can still send transactions to your node, unless you also enable the IP Whitelist option.
In order to make sure the new node is accepted into the network:
1. You will need to add the new peer to each of the existing nodes before they will accept public keys that are linked
to the new peer.
Tessera provides a way to do this without needing to restart an already running node:
```bash
$ java -jar tessera.jar admin -configfile /path/to/existing-node-config.json -addpeer http://newpeer.com:8080
```
2. The new peer can be started, setting the `peers` configuration to mirror the existing network.
e.g. if there are 3 existing nodes in the network, then the new nodes configuration will look like this:
```json
{
"peers": [
{
"url": "http://existingpeer1.com:8080"
},
{
"url": "http://existingpeer2.com:8080"
},
{
"url": "http://existingpeer3.com:8080"
}
]
}
```
The new node will now record public keys belonging to the existing peers, and then existing peers will record
public keys belonging to the new peer; this allows private transactions to be sent both directions!
## Examples
For a walkthrough of some examples that put into action the above, check out [this guide](/How-To-Guides/add_node_examples)!

View File

@ -8,7 +8,7 @@ The permissions smart contract design follows the Proxy-Implementation-Storage p
* `PermissionsInterface.sol`: This is the interface contract and holds the interfaces for permissions related actions. It has no business logic and forwards requests to the current implementation contract
* `PermissionsImplementation.sol`: This contract has the business logic for the permissions actions. It can receive requests only from a valid interface as defined in `PermissionsUpgradable.sol` and interacts with all the storage contracts for respective actions.
* `OrgManager.sol`: This contract stores data for organizations and sub organizations. It can receive requests from a valid implementation contract as defined in `PermissionsUpgrdable.sol`
* `AccountManager.sol`: This contract receives requests from a valid implementation contract as defined in `PermissionsUpgrdable.sol`. It stores the data of all accounts, their linkage to organization and various roles. The contract also stores the status of an account. The account can be in any of the following status - `PendingApproval`, `Active`, `Inactive`, `Suspended`, `Blacklisted` or `Revoked`
* `AccountManager.sol`: This contract receives requests from a valid implementation contract as defined in `PermissionsUpgrdable.sol`. It stores the data of all accounts, their linkage to organization and various roles. The contract also stores the status of an account. The account can be in any of the following status - `PendingApproval`, `Active`, `Suspended`, `Blacklisted` or `Revoked`
* `NodeManager.sol`: This contract receives requests from a valid implementation contract as defined in `PermissionsUpgrdable.sol`. It stores the data of a node, its linkage to an organization or sub organization, and status of the node. The node can be in any one of the following status - `PendingApproval`, `Approved`, `Deactivated` or `Blacklisted`
* `RoleManager.sol`: This contract receives requests from a valid implementation contract as defined in `PermissionsUpgrdable.sol`. It stores data for various roles and the organization to which it is linked. The access at role level can be any one of the following:
- `Readonly` which allows only read operations

View File

@ -849,7 +849,6 @@ The table below indicates the numeric value for various account status.
| Not In List | 0 |
| Pending Approval | 1 |
| Active | 2 |
| Inactive | 3 |
| Suspended | 4 |
| Blacklisted | 5 |
| Revoked | 6 |

View File

@ -0,0 +1,142 @@
title: Internals - Pluggable Architecture - Quorum
## Background
### Go Plugin
`geth` is written in the Go programming language. [Go 1.8 introduced](https://golang.org/doc/go1.8#plugin) a new plugin architecture
which allows for the creation of plugins (via `plugin` build mode) and to use these plugins at runtime (via `plugin` package).
In order to utilize this architecture, there are strict requirements in developing plugins.
By using the network RPC interface, the plugin is independently built and distributed without having to rebuild `geth`.
Especially with gRPC interfaces, plugins can be written in different languages (see our [examples](../PluginDevelopment/#examples)).
This makes it easy for you to build a prototype feature or even a proprietary plugin for your organization's internal use.
We use HashiCorp's [`go-plugin`](https://github.com/hashicorp/go-plugin) library as it fits our asks
and it has been proven in many plugin-based production systems.
### Why we decided to use plugins
There are number of benefits:
- Dynamically-linked binaries (which you get when using plugins) are much smaller than statically compiled binaries.
- We value the ability to isolate failures. E.g.: Quorum client would continue mining/validating even if security plugin has crashed.
- Easily enables support for open source plugins written in languages other than Go.
## Design
```plantuml
skinparam componentStyle uml2
skinparam shadowing false
skinparam backgroundColor transparent
skinparam rectangle {
roundCorner<<component>> 25
}
file "JSON File" as json
file "TOML File" as toml
note left of toml : Standard Ethereum Config
note right of json : Quorum Plugin Settings
node "geth" <<process>> {
rectangle "CLI Flags" as flags
frame "plugin.Settings" as settings {
storage "Plugin1\nDefinition" as pd1
storage "Plugin2\nDefinition" as pd2
storage "Plugin Central\nConnectivity" as pcc
}
json <-down- flags : "via\n""--plugins"""
toml <-down- flags : "via\n""--config"""
flags -down-> settings : populate
interface """node.Service""" as service
rectangle """plugin.PluginManager""" <<geth service>> as pm
note right of pm
registered and managed
as standard ""geth""
service life cycle
end note
pm -up- service
pm -up- settings
card "arbitrary" <<component>> as arbitrary
interface "internal1" as i1
interface "internal2" as i2
interface "internal3" as i3
package "Plugin Interface 1" {
rectangle "Plugin1" <<template>> as p1
rectangle "Gateway1" <<adapter>> as p1gw1
rectangle "Gateway2" <<adapter>> as p1gw2
interface "grpc service interface1A" as grpcI1A
interface "grpc service interface1B" as grpcI1B
rectangle "GRPC Stub Client1" <<grpc client>> as grpcC1
}
package "Plugin Interface 2" {
rectangle "Plugin2" <<template>> as p2
rectangle "Gateway" <<adapter>> as p2gw
interface "grpc service interface2" as grpcI2
rectangle "GRPC Stub Client2" <<grpc client>> as grpcC2
}
pm -- p1
pm -- p2
arbitrary --( i1
arbitrary --( i2
arbitrary --( i3
p1gw1 -- i1
p1gw2 -- i2
p2gw -- i3
p1 -- p1gw1
p1 -- p1gw2
p2 -- p2gw
grpcC1 --( grpcI1A
grpcC1 --( grpcI1B
grpcC2 --( grpcI2
p1gw1 --> grpcC1 : use
p1gw2 --> grpcC1 : use
p2gw --> grpcC2 : use
}
node "Plugin1" <<process>> {
rectangle "Implementation" <<grpc server>> as impl1
}
node "Plugin2" <<process>> {
rectangle "Implementation" <<grpc server>> as impl2
}
impl1 -up- grpcI1A
impl1 -up- grpcI1B
impl2 -up- grpcI2
```
### Discovery
The Quorum client reads the plugin [settings](../Settings) file to determine which plugins are going to be loaded and searches for installed plugins
(`<name>-<version>.zip` files) in the plugin `baseDir` (defaults to `<datadir>/plugins`). If the required plugin doesnt exist in the path, Quorum will attempt to use the configured `plugin central` to download the plugin.
### PluginManager
The `PluginManager` manages the plugins being used inside `geth`. It reads the [configuration](../Settings) and builds a registry of plugins.
`PluginManager` implements the standard `Service` interface in `geth`, hence being embedded into the `geth` service life cycle, i.e.: expose service APIs, start and stop.
The `PluginManager` service is registered as early as possible in the node lifecycle. This is to ensure the node fails fast if an issue is encountered when registering the `PluginManager`, so as not to impact other services.
### Plugin Reloading
The `PluginManager` exposes an API (`admin_reloadPlugin`) that allows reloading a plugin. This attempts to restart the current plugin process.
Any changes to the plugin config after initial node start will be applied when reloading the plugin.
This is demonstrated in the [HelloWorld plugin example](../Overview/#example-helloworld-plugin).

View File

@ -0,0 +1,140 @@
title: Overview - Pluggable Architecture - Quorum
The Quorum client is a modified `geth` client. One of the unique enhancements
is the pluggable architecture which allows adding additional features as plugins to the core `geth`,
providing extensibility, flexibility, and isolation of Quorum features.
## Benefits
This enhancement provides a number of benefits, including:
1. Allowing the implementation of certain components of the Quorum client to be changed at configuration time.
1. Supporting our community to improve the Quorum client with their own innovative implementations of the supported pluggable components.
1. Decoupling new Quorum-specific features from core `geth` thereby simplifying the process of pulling in changes from upstream `geth`.
## How it works?
Each plugin exposes an implementation for a specific [plugin interface](https://github.com/jpmorganchase/quorum-plugin-definitions) (or see `Pluggable Architecture -> Plugins` for more details)
Plugins are executed as a separate process and communicate with the main Quorum client `geth` process
over a [gRPC](https://grpc.io/) interface.
The plugin implementation must adhere to certain gRPC services defined in a `.proto` file corresponding to the plugin interface.
Plugins can be written in different languages as gRPC provides a mechanism to generate stub code from `.proto` files.
The network communication and RPC are handled automatically by the [high-level plugin library](https://github.com/hashicorp/go-plugin).
## Installing Plugins
Currently plugins must be manually installed into a directory (defaults to `plugins` directory inside `geth` data directory - default can be overriden by setting `baseDir` in [plugins settings](../Settings)).
## Using Plugins
[Plugins settings file](../Settings) contains a JSON that describes what plugins to be used.
Then start `geth` with `--plugins` as below:
```bash
geth ... \
--plugins file:///<path>/<to>/plugins.json
```
## Plugin Integrity Verification
Plugin Central Server can be used to download and verify plugin integrity using [PGP](https://en.wikipedia.org/wiki/Pretty_Good_Privacy).
The architecture enables the same verification process locally via `--plugins.localverify` and `--plugins.publickey` flags or
remotely with custom plugin central - reference the [`Settings`](../Settings/) section for more information on how to support custom plugin central.
If the flag `--plugins.skipverify` is provided at runtime the plugin verification process will be disabled.
!!! warning
Using `--plugins.skipverify` is not advised for production settings and it should be avoided as it introduces security risks.
## Example: `HelloWorld` plugin
The plugin interface is implemented in Go and Java. In this example, `HelloWorld` plugin exposes a JSON RPC endpoint
to return a greeting message in the configured language.
This plugin is [reloadable](../Internals/#plugin-reloading). It means that the plugin can take changes from its JSON configuration.
### Build plugin distribution file
1. Clone plugin repository
```bash
git clone --recursive https://github.com/jpmorganchase/quorum-plugin-hello-world.git
cd quorum-plugin-hello-world
```
1. Here we will use Go implementation of the plugin
```bash
quorum-plugin-hello-world cd go
quorum-plugin-hello-world/go make
```
`quorum-plugin-hello-world-1.0.0.zip` is now created in `build` directory.
Noticed that there's a file `hello-world-plugin-config.json` which is the JSON configuration file for the plugin.
### Start Quorum with plugin support
1. Build Quorum
```bash
git clone https://github.com/jpmorganchase/quorum.git
cd quorum
quorum make geth
```
1. Copy `HelloWorld` plugin distribution file and its JSON configuration `hello-world-plugin-config.json` to `build/bin`
1. Create `geth-plugin-settings.json`
```
quorum cat > build/bin/geth-plugin-settings.json <<EOF
{
"baseDir": "./build/bin",
"providers": {
"helloworld": {
"name":"quorum-plugin-hello-world",
"version":"1.0.0",
"config": "file://./build/bin/hello-world-plugin-config.json"
}
}
}
EOF
```
1. Run `geth` with plugin
```bash
quorum PRIVATE_CONFIG=ignore \
geth \
--nodiscover \
--verbosity 5 \
--networkid 10 \
--raft \
--raftjoinexisting 1 \
--datadir ./build/_workspace/test \
--rpc \
--rpcapi eth,debug,admin,net,web3,plugin@helloworld \
--plugins file://./build/bin/geth-plugin-settings.json \
--plugins.skipverify
```
`ps -ef | grep helloworld` would reveal the `HelloWorld` plugin process
### Test the plugin
1. Call the JSON RPC
```bash
quorum curl -X POST http://localhost:8545 \
-H "Content-type: application/json" \
--data '{"jsonrpc":"2.0","method":"plugin@helloworld_greeting","params":["Quorum Plugin"],"id":1}'
{"jsonrpc":"2.0","id":1,"result":"Hello Quorum Plugin!"}
```
1. Update plugin config to support `es` language
```bash
# update language to "es"
quorum vi build/bin/hello-world-plugin-config.json
```
1. Reload the plugin
```bash
quorum curl -X POST http://localhost:8545 \
-H "Content-type: application/json" \
--data '{"jsonrpc":"2.0","method":"admin_reloadPlugin","params":["helloworld"],"id":1}'
{"jsonrpc":"2.0","id":1,"result":true}
```
1. Call the JSON RPC
```bash
quorum curl -X POST http://localhost:8545 \
-H "Content-type: application/json" \
--data '{"jsonrpc":"2.0","method":"plugin@helloworld_greeting","params":["Quorum Plugin"],"id":1}'
{"jsonrpc":"2.0","id":1,"result":"Hola Quorum Plugin!"}
```

View File

@ -0,0 +1,102 @@
title: Plugin Development - Pluggable Architecture - Quorum
We leverage HashiCorp's [`go-plugin`](https://github.com/hashicorp/go-plugin) to enable our plugin-based architecture using gRPC.
We recommend reading the [`go-plugin` gRPC examples](https://github.com/hashicorp/go-plugin/tree/master/examples/grpc).
Some advanced topics which are not available in the `go-plugin` documentation will be covered here.
## Life Cycle
A plugin is started as a separate process and communicates with the Quorum client host process via gRPC service interfaces.
This is done over a mutually-authenticated TLS connection on the local machine. The implementation is done inside `go-plugin`
library. Usage is simplest when developing plugins in Golang. For plugins written in other languages, plugin authors need to have
an understanding of the following lifecycle (see [Advanced topics for non-Go plugins](#advanced-topics-for-non-go-plugins) for more info):
1. `geth` looks for the plugin distribution file after reading the plugin definition from settings
1. `geth` verifies the plugin distribution file integrity
1. `geth` generates a self-signed certificate (aka client certificate)
1. `geth` spawns the plugin with the client certificate
1. The plugin imports the client certificate and generates a self-signed server certificate for its RPC server
1. The plugin includes the RPC server certificate in the handshake
1. `geth` imports the plugin RPC server certificate
1. `geth` and the plugin communicate via RPC over TLS using mutual TLS
Each plugin must implement the [`PluginInitializer`](#plugininitializer) gRPC service interface.
After the plugin process is successfully started and connection with the Quorum client is successfully established,
Quorum client invokes [`Init()`](#proto.PluginInitialization.Request) gRPC method in order to initialize the plugin with configuration data
read from the plugin definition's `config` field in [settings](../Settings/#plugindefinition) file.
## Distribution
### File format
Plugin distribution file must be a ZIP file. File name format is `<name>-<version>.zip`.
`<name>` and `<version>` must be the same as the values defined in the [`PluginDefinition` object](../Settings/#plugindefinition) in the settings file.
### Metadata
A plugin metadata file `plugin-meta.json` must be included in the distribution ZIP file.
`plugin-meta.json` contains a valid JSON object which has a flat structure with key value pairs.
Although the JSON object can include any desired information.
There are mandatory key value pairs which must be present.
```json
{
"name": string,
"version": string,
"entrypoint": string,
"parameters": array(string),
...
}
```
| Fields | Description |
|:-------------|:-------------------------------------------------------------------|
| `name` | (**Required**) Name of the plugin |
| `version` | (**Required**) Version of the plugin |
| `entrypoint` | (**Required**) Command to execute the plugin process |
| `parameters` | (**Optional**) Command parameters to be passed to the plugin process |
E.g.:
```json
{
"name": "quorum-plugin-helloWorld",
"version": "1.0.0",
"entrypoint": "helloWorldPlugin"
}
```
## Advanced topics for non-Go plugins
Writing non-Go plugins is well-documented in [`go-plugin` Github](https://github.com/hashicorp/go-plugin/blob/master/docs/guide-plugin-write-non-go.md).
Some additional advanced topics are described here.
### Magic Cookie
Magic Cookie key and value are used as a very basic verification that a plugin is intended to be launched.
This is not a security measure, just a UX feature.
Magic Cookie key and value are injected as an environment variable while executing the plugin process.
```
QUORUM_PLUGIN_MAGIC_COOKIE="CB9F51969613126D93468868990F77A8470EB9177503C5A38D437FEFF7786E0941152E05C06A9A3313391059132A7F9CED86C0783FE63A8B38F01623C8257664"
```
The plugin and the Quorum client's magic cookies are compared. If they are equal then the plugin is loaded. If they are not equal, the plugin should show human-friendly output.
### Mutual TLS Authentication
The Quorum client requires the plugin to authenticate and secure the connection via mutual TLS.
`PLUGIN_CLIENT_CERT` environment variable is populated with Quorum Client certificate (in PEM format).
A plugin would need to include this certificate to its trusted certificate pool, then
generate a self-signed certificate and append the base64-encoded value of the certificate (in DER format)
in the [handshake](https://github.com/hashicorp/go-plugin/blob/master/docs/internals.md#handshake) message.
<a name="plugininitializer"></a>
{!./PluggableArchitecture/Plugins/init_interface.md!}
## Examples
Please visit [Overview](../Overview/#example-helloworld-plugin) page for a built-in HelloWorld plugin example.

View File

@ -0,0 +1,25 @@
title: helloworld - Plugin Implementation - Quorum
# `helloworld` Plugin
| Version | Language |
|:--------|:---------|
| 1.0.0 | Go |
## Configuration
```json
{
"language": string
}
```
| Fields | Description |
|:-----------|:-------------------------------------------------------------------------|
| `language` | A string indicating the language to greet. Supported values: `en` or `es` |
## Change Log
### v1.0.0
Initial release

View File

@ -0,0 +1,71 @@
<!-- This is auto generated file from running `go generate` in plugin/proto folder. Please do not edit -->
<a name="helloworld.proto"></a>
## helloworld.proto
This plugin interface is to demonstrate a hello world plugin example
### Services
<a name="proto.PluginGreeting"></a>
#### `PluginGreeting`
Greeting remote service saying Hello in English and Spanish
| Method Name | Request Type | Response Type | Description |
| ----------- | ------------ | ------------- | ------------|
| Greeting | [PluginHelloWorld.Request](#proto.PluginHelloWorld.Request) | [PluginHelloWorld.Response](#proto.PluginHelloWorld.Response) | |
<!-- end services -->
### Messsages
<a name="proto.PluginHelloWorld"></a>
#### `PluginHelloWorld`
A wrapper logically groups other messages
<a name="proto.PluginHelloWorld.Request"></a>
#### `PluginHelloWorld.Request`
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| msg | [string](#string) | | a message to the plugin |
<a name="proto.PluginHelloWorld.Response"></a>
#### `PluginHelloWorld.Response`
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| msg | [string](#string) | | a response message from the plugin |
<!-- end messages -->
<!-- end enums -->
<!-- end HasExtensions -->

View File

@ -0,0 +1,72 @@
<!-- This is auto generated file from running `go generate` in plugin/proto folder. Please do not edit -->
<a name="init.proto"></a>
## init.proto
It is __mandatory__ that every plugin must implement this RPC service
Via this service, plugins receive a raw configuration sent by `geth`.
It's up to the plugin to interpret and parse the configuration then do the initialization
to make sure the plugin is ready to serve
### Services
<a name="proto_common.PluginInitializer"></a>
#### `PluginInitializer`
`Required`
RPC service to initialize the plugin after plugin process is started successfully
| Method Name | Request Type | Response Type | Description |
| ----------- | ------------ | ------------- | ------------|
| Init | [PluginInitialization.Request](#proto_common.PluginInitialization.Request) | [PluginInitialization.Response](#proto_common.PluginInitialization.Response) | |
<!-- end services -->
### Messsages
<a name="proto_common.PluginInitialization"></a>
#### `PluginInitialization`
A wrapper message to logically group other messages
<a name="proto_common.PluginInitialization.Request"></a>
#### `PluginInitialization.Request`
Initialization data for the plugin
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| hostIdentity | [string](#string) | | `geth` node identity |
| rawConfiguration | [bytes](#bytes) | | Raw configuration to be processed by the plugin |
<a name="proto_common.PluginInitialization.Response"></a>
#### `PluginInitialization.Response`
<!-- end messages -->
<!-- end enums -->
<!-- end HasExtensions -->

View File

@ -0,0 +1,87 @@
title: Settings - Pluggable Architecture - Quorum
`geth` can load plugins from:
- JSON file which is passed via `--plugins` flag
- Ethereum TOML configuration file which is passed via `--config` flag
```json tab="JSON"
{
"baseDir": string,
"central": object(PluginCentralConfiguration),
"providers": {
<string>: object(PluginDefinition)
}
}
```
```toml tab="TOML"
[Node.Plugins]
BaseDir = string
[Node.Plugins.Central]
.. = .. from object(PluginCentralConfiguration)
[[Node.Plugins.Providers]]
[[Node.Plugins.Providers.<string>]]
.. = .. from object(PluginDefinition)
```
| Fields | Description |
|:------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `baseDir` | A string indicating the local directory from where plugins are read. If empty, defaults to `<datadir>/plugins`. <br/> To read from arbitrary enviroment variable (e.g: `MY_BASE_DIR`), provide value `env://MY_BASE_DIR` |
| `central` | A configuration of the remote plugin central. See [PluginCentralConfiguration](#plugincentralconfiguration) |
| `providers` | A map of the supported plugin interfaces being used (e.g. `helloworld`), mapped to their respective plugin provider definitions (see [PluginDefinition](#plugindefinition)) |
| `<string>` | A string constant indicates the plugin interface. E.g: `helloworld`. |
## `PluginCentralConfiguration`
[Plugin Integrity Verification](../Overview/#plugin-integrity-verification) uses the Quorum Plugin Central Server by default.
Modifying this section configures your own local plugin central for Plugin Integrity Verification:
```json tab="JSON"
{
"baseURL": string,
"certFingerprint": string,
"publicKeyURI": string,
"insecureSkipTLSVerify": bool
}
```
```toml tab="TOML"
BaseURL = string
CertFingerPrint = string
PublicKeyURI = string
InsecureSkipTLSVerify = bool
```
| Fields | Description |
|:------------------------|:--------------------------------------------------------------------------------------------------------------------------|
| `baseURL` | A string indicating the remote plugin central URL (ex.`https://plugins.mycorp.com`) |
| `certFingerprint` | A string containing hex representation of the http server public key finger print <br/>to be used for certificate pinning |
| `publicKeyURI` | A string defining the location of the PGP public key <br/>to be used to perform the signature verification |
| `insecureSkipTLSVerify` | If true, **do not** verify the server's certificate chain and host name |
## `PluginDefinition`
Defines the plugin and its configuration
```json tab="JSON"
{
"name": string,
"version": string,
"config": file/string/array/object
}
```
```toml tab="TOML"
Name = string
Version = string
Config = file/string/array/object
```
| Fields | Description |
|:----------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `name` | A string specifying the name of the plugin |
| `version` | A string specifying the version of the plugin |
| `config` | Value can be: <ul><li>uri format: supports the following schemes<ul><li>`file`: location of plugin config file to be read. E.g.: `file:///opt/plugin.cfg`</li><li>`env`: value from an environment variable. E.g.: `env://MY_CONFIG_JSON`<br/>To indicate value is a file location: append `?type=file`. E.g.: `env://MY_CONFIG_FILE?type=file`</li></ul><li>string: an arbitrary JSON string</li><li>array: a valid JSON array E.g.: `["1", "2", "3"]`</li><li>object: a valid JSON object. E.g.: `{"foo" : "bar"}`</li></ul> |

View File

@ -29,6 +29,54 @@ Tessera's database uses JDBC to connect to an external database. Any valid JDBC
}
```
#### Obfuscate database password in config file
Certain entries in the Tessera config file must be obfuscated in order to prevent any attempts from attackers to gain access to critical parts of the application (e.g. database). The database password can be encrypted using [Jasypt](http://www.jasypt.org) to avoid it being exposed as plain text in the configuration file.
To enable this feature, simply replace your plain-text database password with its encrypted value and wrap it inside an `ENC()` function.
```json
"jdbc": {
"username": "sa",
"password": "ENC(ujMeokIQ9UFHSuBYetfRjQTpZASgaua3)",
"url": "jdbc:h2:/qdata/c1/db1",
"autoCreateTables": true
}
```
Being a Password-Based Encryptor, Jasypt requires a secret key (password) and a configured algorithm to encrypt/decrypt this config entry. This password can either be loaded into Tessera from file system or user input. For file system input, the location of this secret file needs to be set in Environment Variable `TESSERA_CONFIG_SECRET`
If the database password is not wrapped inside `ENC()`, Tessera will simply treat it as a plain-text password however this approach is not recommended for production environments.
!!! note
Jasypt encryption is currently only available for the `jdbc.password` field
##### How to encrypt database password
1. Download and unzip [Jasypt](http://www.jasypt.org) and redirect to the `bin` directory
1. Encrypt the password
``` bash
$ ./encrypt.sh input=dbpassword password=quorum
----ENVIRONMENT-----------------
Runtime: Oracle Corporation Java HotSpot(TM) 64-Bit Server VM 25.171-b11
----ARGUMENTS-------------------
input: dbpassword
password: quorum
----OUTPUT----------------------
rJ70hNidkrpkTwHoVn2sGSp3h3uBWxjb
```
1. Place the wrapped output, `ENC(rJ70hNidkrpkTwHoVn2sGSp3h3uBWxjb)`, in the config json file
---
### Server
@ -135,6 +183,29 @@ Unix Socket:
### TLS/SSL: server sub-config
See [TLS/SSL](../TLS) page.
### CORS: server sub-config
For the ThirdParty server type it may be relevant to configure CORS.
```
{
"app":"ThirdParty",
"enabled": true,
"serverAddress": "http://localhost:9081",
"communicationType" : "REST",
"cors" : {
"allowedMethods" : ["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD"],
"allowedOrigins" : ["http://localhost:63342"],
"allowedHeaders" : ["content-type"],
"allowCredentials" : true
}
},
```
The configurable fields are:
* `allowedMethods` - the list of allowed HTTP methods. If omitted the default list containing `"GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD"` is used.
* `allowedOrigins` - the list of domains from which to accept cross origin requests (browser enforced). Each entry in the list can contain the "*" (wildcard) character which matches any sequence of characters. Ex: "*locahost" would match "http://localhost" or "https://localhost". There is no default for this field.
* `allowedHeaders` - the list of allowed headers. If omitted the request `Access-Control-Request-Headers` are copied into the response as `Access-Control-Allow-Headers`.
* `allowCredentials` - the value for the `Access-Control-Allow-Credentials` response header. If omitted the default `true` value would be used.
### InfluxDB Config: server sub-config
Configuration details to allow Tessera to record monitoring data to a running InfluxDB instance.
```
@ -194,4 +265,37 @@ Default configuration for this is `false` as this is BREAKABLE change to lower v
---
### Encryptor - Supporting alternative curves in Tessera
By default Tessera uses the [NaCl(salt)](https://nacl.cr.yp.to/) library in order to encrypt private payloads (which uses a particular combination of Curve25519, Salsa20, and Poly1305 under the hood).
Alternative curves/symmetric ciphers can be used by configuring the EC Encryptor (which relies on JCA to perform a similar logic to NaCl).
This is a feature introduced in Tessera v0.10.2. Providing no `encryptor` configuration results in the standard pre-v0.10.2 Tessera behaviour.
```
"encryptor": {
"type":"EC",
"properties":{
"symmetricCipher":"AES/GCM/NoPadding",
"ellipticCurve":"secp256r1",
"nonceLength":"24",
"sharedKeyLength":"32"
}
}
```
Field|Default Value|Description
-------------|-------------|-----------
`type`|`NACL`|The encryptor type. Possible values are `EC` or `NACL`.
If `type` is set to `EC`, the following `properties` fields can also be configured:
Field|Default Value|Description
-------------|-------------|-----------
`ellipticCurve`|`secp256r1`|The elliptic curve to use. See [SunEC provider](https://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html#SunEC) for other options. Depending on the JCE provider you are using there may be additional curves available.
`symmetricCipher`|`AES/GCM/NoPadding`|The symmetric cipher to use for encrypting data (GCM IS MANDATORY as an initialisation vector is supplied during encryption).
`nonceLength`|`24`|The nonce length (used as the initialization vector - IV - for symmetric encryption).
`sharedKeyLength`|`32`|The key length used for symmetric encryption (keep in mind the key derivation operation always produces 32 byte keys - so the encryption algorithm must support it).
---

View File

@ -1,9 +1,31 @@
!!! warning "Change from Tessera v0.10.2+"
The `keys.keyData.passwords` field is no longer supported as of Tessera v0.10.2.
Instead, use `keys.keyData.passwordFile` or utilise the [CLI password prompt](#providing-key-passwords-at-runtime) when starting the node.
Tessera uses cryptographic keys to provide transaction privacy.
You can use existing private/public key pairs as well as use Tessera to generate new key pairs for you. See [Generating & securing keys](../../Tessera%20Services/Keys/Keys) for more info.
```
```json tab="v0.10.3 onwards"
"keys": {
"passwordFile": "Path",
"keyVaultConfig": [
{
"keyVaultType": "Enumeration: AZURE, HASHICORP, AWS",
"properties": "Map[string]string"
}
],
"keyData": [
{
// The data for a private/public key pair
}
]
}
```
```json tab="v0.10.2"
"keys": {
"passwords": [],
"passwordFile": "Path",
"azureKeyVaultConfig": {
"url": "Url"
@ -16,7 +38,34 @@ You can use existing private/public key pairs as well as use Tessera to generate
},
"keyData": [
{
//The data for a private/public key pair
// The data for a private/public key pair
}
]
}
```
```json tab="v0.10.1 and earlier"
"keys": {
"passwords": [],
"passwordFile": "Path",
"azureKeyVaultConfig": {
"url": "Url"
},
"hashicorpKeyVaultConfig": {
"url": "Url",
"approlePath": "String",
"tlsKeyStorePath": "Path",
"tlsTrustStorePath": "Path"
},
"keyVaultConfig": {
"keyVaultConfigType": "AWS",
"properties": {
"endpoint": "Url"
}
},
"keyData": [
{
// The data for a private/public key pair
}
]
}
@ -25,11 +74,14 @@ You can use existing private/public key pairs as well as use Tessera to generate
## KeyData
Key pairs can be provided in several ways:
#### 1. Direct key pairs
Direct key pairs are convenient but are the least secure configuration option available, as you expose your private key in the configuration file. More secure options are available and preferable for production environments.
### Direct key pairs
The key pair data is provided in plain text in the configfile:
```
!!! warning
Direct key pairs and unprotected inline key pairs are convenient but are the least secure configuration options available as the private key is exposed in the configuration file. The other options available are more secure and recommended for production environments.
The key pair data is provided in plain text in the configfile.
```json
"keys": {
"keyData": [
{
@ -40,9 +92,14 @@ The key pair data is provided in plain text in the configfile:
}
```
#### 2. Inline key pairs
The public key is provided in plain text. The private key is provided through additional config:
```
### Inline key pairs
#### Unprotected
!!! warning
Direct key pairs and unprotected inline key pairs are convenient but are the least secure configuration options available as the private key is exposed in the configuration file. The other options available are more secure and recommended for production environments.
The key pair data is provided in plain text in the configfile. The plain text private key is provided in a `config` json object:
```json
"keys": {
"keyData": [
{
@ -58,11 +115,12 @@ The public key is provided in plain text. The private key is provided through a
}
```
This allows for the use of Argon2 password-secured private keys by including the corresponding Argon2 settings in the additional config:
#### Protected
The public key is provided in plain text. The private key must be password-protected using Argon2. The corresponding encrypted data is provided in the `config` json object.
```
```json tab="v0.10.2 onwards"
"keys": {
"passwords": ["password"],
"passwordFile": "/path/to/pwds.txt",
"keyData": [
{
"config": {
@ -85,9 +143,133 @@ This allows for the use of Argon2 password-secured private keys by including the
}
```
#### 3. Azure Key Vault key pairs
The keys in the pair are stored as secrets in an Azure Key Vault. This requires providing the vault url and the secret IDs for both keys:
```json tab="v0.10.1 and earlier"
"keys": {
"passwords": ["password"],
"passwordFile": "/path/to/pwds.txt",
"keyData": [
{
"config": {
"data": {
"aopts": {
"variant": "id",
"memory": 1048576,
"iterations": 10,
"parallelism": 4,
},
"snonce": "x3HUNXH6LQldKtEv3q0h0hR4S12Ur9pC",
"asalt": "7Sem2tc6fjEfW3yYUDN/kSslKEW0e1zqKnBCWbZu2Zw=",
"sbox": "d0CmRus0rP0bdc7P7d/wnOyEW14pwFJmcLbdu2W3HmDNRWVJtoNpHrauA/Sr5Vxc"
},
"type": "argon2sbox"
},
"publicKey": "/+UuD63zItL1EbjxkKUljMgG8Z1w0AJ8pNOR4iq2yQc="
}
]
}
```
Passwords must be provided so that Tessera can decrypt and use the private keys. Passwords can be provided in multiple ways:
| | Description |
|--------|--------------|
| File | `"passwordFile": "/path/to/pwds.txt"`<br/>Must contain only one password per line. Empty lines should be used for unlocked keys. Passwords must be provided in the order that key pairs are defined in the config. |
| Direct | `"passwords": ["pwd1", "pwd2", ...]`<br/>Empty strings should be used for unlocked keys. Passwords must be provided in the order that key pairs are defined in the config. Not recommended for production use. |
| CLI | Tessera will prompt on the CLI for the passwords of any encrypted keys that have not had passwords provided in the config. This process only needs to be performed once, when starting the node. |
### Filesystem key pairs
The keys in the pair are stored in files:
```json tab="v0.10.2 onwards"
"keys": {
"passwordFile": "/path/to/pwds.txt",
"keyData": [
{
"privateKeyPath": "/path/to/privateKey.key",
"publicKeyPath": "/path/to/publicKey.pub"
}
]
}
```
```json tab="v0.10.1 and earlier"
"keys": {
"passwords": ["password"],
"passwordFile": "/path/to/pwds.txt",
"keyData": [
{
"privateKeyPath": "/path/to/privateKey.key",
"publicKeyPath": "/path/to/publicKey.pub"
}
]
}
```
The contents of the public key file must contain the public key only, e.g.:
```
/+UuD63zItL1EbjxkKUljMgG8Z1w0AJ8pNOR4iq2yQc=
```
The contents of the private key file must contain the private key in the Inline key pair format, e.g.:
```json
{
"type" : "unlocked",
"data" : {
"bytes" : "DK0HDgMWJKtZVaP31mPhk6TJNACfVzz7VZv2PsQZeKM="
}
}
```
or
```json
{
"data": {
"aopts": {
"variant": "id",
"memory": 1048576,
"iterations": 10,
"parallelism": 4,
},
"snonce": "x3HUNXH6LQldKtEv3q0h0hR4S12Ur9pC",
"asalt": "7Sem2tc6fjEfW3yYUDN/kSslKEW0e1zqKnBCWbZu2Zw=",
"sbox": "d0CmRus0rP0bdc7P7d/wnOyEW14pwFJmcLbdu2W3HmDNRWVJtoNpHrauA/Sr5Vxc"
},
"type": "argon2sbox"
}
```
Passwords must be provided so that Tessera can decrypt and use the private keys. Passwords can be provided in multiple ways:
| | Description |
|--------|--------------|
| File | `"passwordFile": "/path/to/pwds.txt"`<br/>Must contain only one password per line. Empty lines should be used for unlocked keys. Passwords must be provided in the order that key pairs are defined in the config. |
| Direct | `"passwords": ["pwd1", "pwd2", ...]`<br/>Empty strings should be used for unlocked keys. Passwords must be provided in the order that key pairs are defined in the config. Not recommended for production use. |
| CLI | Tessera will prompt on the CLI for the passwords of any encrypted keys that have not had passwords provided in the config. This process only needs to be performed once, when starting the node. |
### Azure Key Vault key pairs
The keys in the pair are stored as secrets in an Azure Key Vault. This requires providing the vault url and the secret IDs for both keys:
```json tab="v0.10.3 onwards"
"keys": {
"keyVaultConfig": {
"keyVaultType": "AZURE",
"properties": {
"url": "https://my-vault.vault.azure.net"
}
},
"keyData": [
{
"azureVaultPrivateKeyId": "Key",
"azureVaultPublicKeyId": "Pub",
"azureVaultPublicKeyVersion": "bvfw05z4cbu11ra2g94e43v9xxewqdq7",
"azureVaultPrivateKeyVersion": "0my1ora2dciijx5jq9gv07sauzs5wjo2"
}
]
}
```
```json tab="v0.10.2 and earlier"
"keys": {
"azureKeyVaultConfig": {
"url": "https://my-vault.vault.azure.net"
@ -105,11 +287,34 @@ The keys in the pair are stored as secrets in an Azure Key Vault. This requires
This example configuration will retrieve the specified versions of the secrets `Key` and `Pub` from the key vault with DNS name `https://my-vault.vault.azure.net`. If no version is specified then the latest version of the secret is retrieved.
> Environment variables must be set if using an Azure Key Vault, for more information see [Setting up an Azure Key Vault](../../Tessera%20Services/Keys/Setting%20up%20an%20Azure%20Key%20Vault)
!!! info
Environment variables must be set if using an Azure Key Vault, for more information see [Setting up an Azure Key Vault](../../Tessera%20Services/Keys/Setting%20up%20an%20Azure%20Key%20Vault)
#### 4. Hashicorp Vault key pairs
### Hashicorp Vault key pairs
The keys in the pair are stored as a secret in a Hashicorp Vault. Additional configuration can also be provided if the Vault is configured to use TLS and if the AppRole auth method is being used at a different path to the default (`approle`):
```json tab="v0.10.3 onwards"
"keyVaultConfig": {
"keyVaultType": "HASHICORP",
"properties": {
"url": "https://localhost:8200",
"tlsKeyStorePath": "/path/to/keystore.jks",
"tlsTrustStorePath": "/path/to/truststore.jks",
"approlePath": "not-default"
}
},
"keyData": [
{
"hashicorpVaultSecretEngineName": "engine",
"hashicorpVaultSecretName": "secret",
"hashicorpVaultSecretVersion": 1,
"hashicorpVaultPrivateKeyId": "privateKey",
"hashicorpVaultPublicKeyId": "publicKey",
}
]
```
```json tab="v0.10.2 and earlier"
"hashicorpKeyVaultConfig": {
"url": "https://localhost:8200",
"tlsKeyStorePath": "/path/to/keystore.jks",
@ -132,48 +337,84 @@ This example configuration will retrieve version 1 of the secret `engine/secret`
If no `hashicorpVaultSecretVersion` is provided then the latest version for the secret will be retrieved by default.
Tessera requires TLS certificates and keys to be stored in `.jks` Java keystore format. If the `.jks` files are password protected then the following environment variables must be set:
* `HASHICORP_CLIENT_KEYSTORE_PWD`
* `HASHICORP_CLIENT_TRUSTSTORE_PWD`
> If using a Hashicorp Vault additional environment variables must be set and a version 2 K/V secret engine must be enabled. For more information see [Setting up a Hashicorp Vault](../../Tessera%20Services/Keys/Setting%20up%20a%20Hashicorp%20Vault).
!!! info
If using a Hashicorp Vault additional environment variables must be set and a version 2 K/V secret engine must be enabled. For more information see [Setting up a Hashicorp Vault](../../Tessera%20Services/Keys/Setting%20up%20a%20Hashicorp%20Vault).
#### 5. Filesystem key pairs
The keys in the pair are stored in files:
```
### AWS Secrets Manager key pairs
The keys in the pair are stored as secrets in the _AWS Secrets Manager_. This requires providing the secret IDs for both keys. The endpoint is optional as the _AWS SDK_ can fallback to its inbuilt property retrieval chain (e.g. using the environment variable `AWS_REGION` or `~/.aws/config` file - see [the AWS docs](https://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/credentials.html) for similar behaviour explained in the context of credentials):
```json tab="v0.10.3 onwards"
"keys": {
"passwordFile": "/path/to/passwords",
"keyData": [
{
"privateKeyPath": "/path/to/privateKey.key",
"publicKeyPath": "/path/to/publicKey.pub"
}
]
}
```
The contents of the public key file must contain the public key only, e.g.:
```
/+UuD63zItL1EbjxkKUljMgG8Z1w0AJ8pNOR4iq2yQc=
"keyVaultConfig": {
"keyVaultConfigType": "AWS",
"properties": {
"endpoint": "https://secretsmanager.us-west-2.amazonaws.com"
}
},
"keyData": [
{
"awsSecretsManagerPublicKeyId": "secretIdPub",
"awsSecretsManagerPrivateKeyId": "secretIdKey"
}
]
}
```
The contents of the private key file must contain the private key in the config format, e.g.:
```
{
"type" : "unlocked",
"data" : {
"bytes" : "DK0HDgMWJKtZVaP31mPhk6TJNACfVzz7VZv2PsQZeKM="
}
}
```
This example configuration will retrieve the secrets `secretIdPub` and `secretIdKey` from the _AWS Secrets Manager_ using the endpoint `https://secretsmanager.us-west-2.amazonaws.com`.
!!! info
A `Credential should be scoped to a valid region` error when starting means that the region specified in the `endpoint` differs from the region the AWS SDK has retrieved from its [property retrieval chain](https://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/credentials.html). This can be resolved by setting the `AWS_REGION` environment variable to the same region as defined in the `endpoint`.
!!! info
Environment variables must be set if using an _AWS Secrets Manager_, for more information see [Setting up an AWS Secrets Manager](../../Tessera%20Services/Keys/Setting%20up%20an%20AWS%20Secrets%20Manager)
## Providing key passwords at runtime
Tessera will start a CLI password prompt if it has incomplete password data for its locked keys. This prompt can be used to provide the required passwords for each key without having to provide them in the configfile itself.
For example:
```bash
tessera -configfile path/to/config.json
Password for key[0] missing or invalid.
Attempt 1 of 2. Enter a password for the key
2019-12-09 13:48:16.159 [main] INFO c.q.t.config.keys.KeyEncryptorImpl - Decrypting private key
2019-12-09 13:48:19.364 [main] INFO c.q.t.config.keys.KeyEncryptorImpl - Decrypted private key
# Tessera startup continues as normal
```
## Multiple Keys
If wished, multiple key pairs can be specified for a Tessera node. In this case, any one of the public keys can be used to address a private transaction to that node. Tessera will sequentially try each key to find one that can decrypt the payload. This can be used, for example, to simplify key rotation.
Note that multiple key pairs can only be set up within the configuration file, not via separate filesystem key files.
## Viewing the keys registered for a node
An ADMIN API endpoint `/config/keypairs` exists to allow you to view the public keys of the key pairs currently in use by your Tessera node. This requires configuring an ADMIN server in the node's configuration file, as described in [Configuration Overview](../Configuration%20Overview).
For Tessera v0.10.2 onwards the ThirdParty API `/keys` endpoint can be used to view the public keys of the key pairs currently in use by your Tessera node.
For Tessera v0.10.1 and earlier, an ADMIN API endpoint `/config/keypairs` exists to allow you to view the public keys of the key pairs currently in use by your Tessera node.
A sample response for the request `adminhost:port/config/keypairs` is:
```json
```json tab="v0.10.2 onwards"
request: thirdpartyhost:port/keys
{
"keys" : [
{
"key" : "oNspPPgszVUFw0qmGFfWwh1uxVUXgvBxleXORHj07g8="
},
{
"key" : "ABn6zhBth2qpdrJXp98IvjExV212ALl3j4U//nj4FAI="
}
]
}
```
```json tab="v0.10.1 and earlier"
request: adminhost:port/config/keypairs
[
{
"publicKey" : "oNspPPgszVUFw0qmGFfWwh1uxVUXgvBxleXORHj07g8="
@ -183,3 +424,5 @@ A sample response for the request `adminhost:port/config/keypairs` is:
}
]
```
The corresponding server must be configured in the node's configuration file, as described in [Configuration Overview](../Configuration%20Overview).

View File

@ -4,8 +4,77 @@ Tessera configuration varies by version as new features are added or changed. Be
| Version |
| ------------- |
| [0.10 - latest release](../Tessera%20v0.10.0%20sample%20settings) |
| [0.10.3](../Tessera%20v0.10.3%20sample%20settings) |
| [0.10.2](../Tessera%20v0.10.2%20sample%20settings) |
| [0.10](../Tessera%20v0.10.0%20sample%20settings) |
| [0.9](../Tessera%20v0.9%20sample%20settings) |
| [0.8](../Tessera%20v0.8%20sample%20settings) |
| [0.7.3](../Tessera%20v0.7.3%20sample%20settings) |
## Changelist
### 0.10.3
- The `keys.azureKeyVaultConfig` and `keys.hashicorpKeyVaultConfig` fields are now deprecated. Instead, the generic `keys.keyVaultConfig` should be used. See [Keys Config](../Keys) for more info.
### 0.10.2
- The `keys.keyData.passwords` field is no longer supported. Instead, use `keys.keyData.passwordFile` or utilise the [CLI password prompt](../Keys#providing-key-passwords-at-runtime) when starting the node.
- Added configuration to choose alternative curves/symmetric ciphers. If no encryptor configuration is provided it will default to NaCl (see [Supporting alternative curves in Tessera](../Configuration Overview#supporting-alternative-curves-in-tessera) for more details).
e.g.
```json
{
"encryptor": {
"type":"EC",
"properties":{
"symmetricCipher":"AES/GCM/NoPadding",
"ellipticCurve":"secp256r1",
"nonceLength":"24",
"sharedKeyLength":"32"
}
},
...
}
```
### 0.10
- Added feature-toggle for remote key validation. Disabled by default.
```json
{
"features": {
"enableRemoteKeyValidation": false
},
...
}
```
### 0.9
- Collapsed server socket definitions into a single property `serverAddress`, e.g.
```json
{
"serverConfigs": [
{
"serverSocket": {
"type":"INET",
"port": 9001,
"hostName": "http://localhost"
},
...
}
],
...
}
```
becomes
```json
{
"serverConfigs": [
{
"serverAddress": "http://localhost:9001",
...
}
],
...
}
```
### 0.8
- Added modular server configurations

View File

@ -0,0 +1,153 @@
**Changes:**
- The `keys.keyData.passwords` field is no longer supported. Instead, use `keys.keyData.passwordFile` or utilise the [CLI password prompt](../Keys#providing-key-passwords-at-runtime) when starting the node.
- Added configuration to choose alternative curves/symmetric ciphers. If no encryptor configuration is provided it will default to NaCl (see [Supporting alternative curves in Tessera](../Configuration Overview#supporting-alternative-curves-in-tessera) for more details).
e.g.
```
"encryptor": {
"type":"EC",
"properties":{
"symmetricCipher":"AES/GCM/NoPadding",
"ellipticCurve":"secp256r1",
"nonceLength":"24",
"sharedKeyLength":"32"
}
}
```
**Sample:**
```json
{
"useWhiteList": "boolean",
"jdbc": {
"url": "String",
"username": "String",
"password": "String"
},
"serverConfigs": [
{
"app": "ENCLAVE",
// Defines us using a remote enclave, leave out if using built-in enclave
"enabled": true,
"serverAddress": "http://localhost:9081",
//Where to find the remote enclave
"communicationType": "REST"
},
{
"app": "ThirdParty",
"enabled": true,
"serverAddress": "http://localhost:9081",
"bindingAddress": "String - url with port e.g. http://127.0.0.1:9081",
"communicationType": "REST"
},
{
"app": "Q2T",
"enabled": true,
"serverAddress": "unix:/tmp/tm.ipc",
"communicationType": "REST"
},
{
"app": "P2P",
"enabled": true,
"serverAddress": "http://localhost:9001",
"bindingAddress": "String - url with port e.g. http://127.0.0.1:9001",
"sslConfig": {
"tls": "enum STRICT,OFF",
"generateKeyStoreIfNotExisted": "boolean",
"serverKeyStore": "Path",
"serverTlsKeyPath": "Path",
"serverTlsCertificatePath": "Path",
"serverKeyStorePassword": "String",
"serverTrustStore": "Path",
"serverTrustCertificates": [
"Path..."
],
"serverTrustStorePassword": "String",
"serverTrustMode": "Enumeration: CA, TOFU, WHITELIST, CA_OR_TOFU, NONE",
"clientKeyStore": "Path",
"clientTlsKeyPath": "Path",
"clientTlsCertificatePath": "Path",
"clientKeyStorePassword": "String",
"clientTrustStore": "Path",
"clientTrustCertificates": [
"Path..."
],
"clientTrustStorePassword": "String",
"clientTrustMode": "Enumeration: CA, TOFU, WHITELIST, CA_OR_TOFU, NONE",
"knownClientsFile": "Path",
"knownServersFile": "Path"
},
"communicationType": "REST"
}
],
"peer": [
{
"url": "url e.g. http://127.0.0.1:9000/"
}
],
"keys": {
"passwordFile": "Path",
"azureKeyVaultConfig": {
"url": "Azure Key Vault url"
},
"hashicorpKeyVaultConfig": {
"url": "Hashicorp Vault url",
"approlePath": "String (defaults to 'approle' if not set)",
"tlsKeyStorePath": "Path to jks key store",
"tlsTrustStorePath": "Path to jks trust store"
},
"keyData": [
{
"config": {
"data": {
"aopts": {
"variant": "Enum : id,d or i",
"memory": "int",
"iterations": "int",
"parallelism": "int"
},
"bytes": "String",
"snonce": "String",
"asalt": "String",
"sbox": "String",
"password": "String"
},
"type": "Enum: argon2sbox or unlocked. If unlocked is defined then config data is required. "
},
"privateKey": "String",
"privateKeyPath": "Path",
"azureVaultPrivateKeyId": "String",
"azureVaultPrivateKeyVersion": "String",
"publicKey": "String",
"publicKeyPath": "Path",
"azureVaultPublicKeyId": "String",
"azureVaultPublicKeyVersion": "String",
"hashicorpVaultSecretEngineName": "String",
"hashicorpVaultSecretName": "String",
"hashicorpVaultSecretVersion": "Integer (defaults to 0 (latest) if not set)",
"hashicorpVaultPrivateKeyId": "String",
"hashicorpVaultPublicKeyId": "String"
}
]
},
"alwaysSendTo": [
"String..."
],
"unixSocketFile": "Path",
"features": {
"enableRemoteKeyValidation": false
},
"encryptor": {
"type": "Enumeration: NACL, EC",
"properties":{
"symmetricCipher":"String (defaults to AES/GCM/NoPadding if type = EC)",
"ellipticCurve": "String (defaults to secp256r1 if type = EC)",
"nonceLength": "String (defaults to 24 if type = EC)",
"sharedKeyLength": "String (defaults to 32 if type = EC)"
}
}
}
```

View File

@ -0,0 +1,133 @@
**Changes:**
- The `keys.azureKeyVaultConfig` and `keys.hashicorpKeyVaultConfig` fields are now deprecated. Instead, the generic `keys.keyVaultConfig` should be used. See [Keys Config](../Keys) for more info.
**Sample:**
```json
{
"useWhiteList": "boolean",
"jdbc": {
"url": "String",
"username": "String",
"password": "String"
},
"serverConfigs": [
{
"app": "ENCLAVE",
// Defines us using a remote enclave, leave out if using built-in enclave
"enabled": true,
"serverAddress": "http://localhost:9081",
//Where to find the remote enclave
"communicationType": "REST"
},
{
"app": "ThirdParty",
"enabled": true,
"serverAddress": "http://localhost:9081",
"bindingAddress": "String - url with port e.g. http://127.0.0.1:9081",
"communicationType": "REST"
},
{
"app": "Q2T",
"enabled": true,
"serverAddress": "unix:/tmp/tm.ipc",
"communicationType": "REST"
},
{
"app": "P2P",
"enabled": true,
"serverAddress": "http://localhost:9001",
"bindingAddress": "String - url with port e.g. http://127.0.0.1:9001",
"sslConfig": {
"tls": "enum STRICT,OFF",
"generateKeyStoreIfNotExisted": "boolean",
"serverKeyStore": "Path",
"serverTlsKeyPath": "Path",
"serverTlsCertificatePath": "Path",
"serverKeyStorePassword": "String",
"serverTrustStore": "Path",
"serverTrustCertificates": [
"Path..."
],
"serverTrustStorePassword": "String",
"serverTrustMode": "Enumeration: CA, TOFU, WHITELIST, CA_OR_TOFU, NONE",
"clientKeyStore": "Path",
"clientTlsKeyPath": "Path",
"clientTlsCertificatePath": "Path",
"clientKeyStorePassword": "String",
"clientTrustStore": "Path",
"clientTrustCertificates": [
"Path..."
],
"clientTrustStorePassword": "String",
"clientTrustMode": "Enumeration: CA, TOFU, WHITELIST, CA_OR_TOFU, NONE",
"knownClientsFile": "Path",
"knownServersFile": "Path"
},
"communicationType": "REST"
}
],
"peer": [
{
"url": "url e.g. http://127.0.0.1:9000/"
}
],
"keys": {
"passwordFile": "Path",
"keyVaultConfig": {
"keyVaultType": "Enumeration: AZURE, HASHICORP, AWS",
"properties": "Map[string]string"
},
"keyData": [
{
"config": {
"data": {
"aopts": {
"variant": "Enum : id,d or i",
"memory": "int",
"iterations": "int",
"parallelism": "int"
},
"bytes": "String",
"snonce": "String",
"asalt": "String",
"sbox": "String",
"password": "String"
},
"type": "Enum: argon2sbox or unlocked. If unlocked is defined then config data is required. "
},
"privateKey": "String",
"privateKeyPath": "Path",
"azureVaultPrivateKeyId": "String",
"azureVaultPrivateKeyVersion": "String",
"publicKey": "String",
"publicKeyPath": "Path",
"azureVaultPublicKeyId": "String",
"azureVaultPublicKeyVersion": "String",
"hashicorpVaultSecretEngineName": "String",
"hashicorpVaultSecretName": "String",
"hashicorpVaultSecretVersion": "Integer (defaults to 0 (latest) if not set)",
"hashicorpVaultPrivateKeyId": "String",
"hashicorpVaultPublicKeyId": "String"
}
]
},
"alwaysSendTo": [
"String..."
],
"unixSocketFile": "Path",
"features": {
"enableRemoteKeyValidation": false
},
"encryptor": {
"type": "Enumeration: NACL, EC",
"properties":{
"symmetricCipher":"String (defaults to AES/GCM/NoPadding if type = EC)",
"ellipticCurve": "String (defaults to secp256r1 if type = EC)",
"nonceLength": "String (defaults to 24 if type = EC)",
"sharedKeyLength": "String (defaults to 32 if type = EC)"
}
}
}
```

View File

@ -0,0 +1,51 @@
# CLI config overrides (v0.10.2 and earlier)
Standard Tessera CLI options are prefixed with a single hyphen (e.g. `-configfile <PATH>`), whilst the config override options are prefixed with a double hyphen (e.g. `--alwaysSendTo <STRING[]...>`). Use `tessera help` to see a complete list of CLI options.
If a config value is included in both the `configfile` and the CLI, then the CLI value will take precedence. The exceptions to this rule are the `--peer.url <STRING>` and `--alwaysSendTo <STRING[]...>` options. Instead of overriding, these CLI options append to any peer or alwaysSendTo urls in the provided `configfile`. For example, if the following was provided in a `configfile`:
```json
{
...
"peer": [
{
"url": "http://localhost:9001"
}
],
alwaysSendTo:[
"giizjhZQM6peq52O7icVFxdTmTYinQSUsvyhXzgZqkE="
],
...
}
```
and Tessera was run with the following overrides:
```bash
tessera -configfile path/to/file --peer.url http://localhost:9002 --peer.url http://localhost:9003 --alwaysSendTo /+UuD63zItL1EbjxkKUljMgG8Z1w0AJ8pNOR4iq2yQc= --alwaysSendTo UfNSeSGySeKg11DVNEnqrUtxYRVor4+CvluI8tVv62Y=
```
then Tessera will be started with the following equivalent configuration:
```json
{
...
"peer": [
{
"url": "http://localhost:9001"
},
{
"url": "http://localhost:9002"
},
{
"url": "http://localhost:9003"
}
],
alwaysSendTo:[
"giizjhZQM6peq52O7icVFxdTmTYinQSUsvyhXzgZqkE=",
"/+UuD63zItL1EbjxkKUljMgG8Z1w0AJ8pNOR4iq2yQc="
"UfNSeSGySeKg11DVNEnqrUtxYRVor4+CvluI8tVv62Y="
],
...
}
```
As demonstrated in this example, in certain cases multiple values can be provided by repeating the CLI option. This is supported for the `peer.url`, `alwaysSendTo`, `server.sslConfig.serverTrustCertificates` and `server.sslConfig.clientTrustCertificates` options.
!!! info
The only `encryptor` field that can be overriden is `encryptor.type`. All other `encryptor` fields can only be set in the configfile. See [encryptor config](../../../Configuration/Configuration Overview/#encryptor-supporting-alternative-curves-in-tessera) for more details.

View File

@ -0,0 +1,49 @@
# CLI config overrides
The `-o, --override` option is used to define overrides as key/value pairs. The key is the json path of the field to be overwritten.
For example, given `configfile.json`:
```json
{
...,
"jdbc" : {
"username" : "sa",
"password" : "",
"url" : "jdbc:h2:/path/to/db1;MODE=Oracle;TRACE_LEVEL_SYSTEM_OUT=0",
"autoCreateTables" : true,
"fetchSize" : 0
},
"peer" : [
{
"url" : "http://127.0.0.1:9001"
}
]
}
```
The command:
```bash
tessera --configfile configfile.json -o jdbc.username=username-override --override peer[1].url=http://peer-override:9001
```
will start Tessera with the following effective config:
```json
{
...,
"jdbc" : {
"username" : "username-override",
"password" : "",
"url" : "jdbc:h2:/path/to/db1;MODE=Oracle;TRACE_LEVEL_SYSTEM_OUT=0",
"autoCreateTables" : true,
"fetchSize" : 0
},
"peer" : [
{
"url" : "http://127.0.0.1:9001"
},
{
"url" : "http://peer-override:9001"
}
]
}
```

View File

@ -1,47 +1,8 @@
CLI options can be used to add to, or override, configuration defined in a `configfile`.
CLI options can be used to add to, or override, configuration defined in a `configfile`.
Standard Tessera CLI options are prefixed with a single hyphen (e.g. `-configfile <PATH>`), whilst the config override options are prefixed with a double hyphen (e.g. `--alwaysSendTo <STRING[]...>`). Use `tessera help` to see a complete list of CLI options.
`tessera help` and `tessera <command> help` will show the available list of commands and options.
If a config value is included in both the `configfile` and the CLI, then the CLI value will take precendence. The exceptions to this rule are the `--peer.url <STRING>` and `--alwaysSendTo <STRING[]...>` options. Instead of overriding, these CLI options append to any peer or alwaysSendTo urls in the provided `configfile`. For example, if the following was provided in a `configfile`:
```
{
...
"peer": [
{
"url": "http://localhost:9001"
}
],
alwaysSendTo:[
"giizjhZQM6peq52O7icVFxdTmTYinQSUsvyhXzgZqkE="
],
...
}
```
and Tessera was run with the following overrides:
```
tessera -configfile path/to/file --peer.url http://localhost:9002 --peer.url http://localhost:9003 --alwaysSendTo /+UuD63zItL1EbjxkKUljMgG8Z1w0AJ8pNOR4iq2yQc= --alwaysSendTo UfNSeSGySeKg11DVNEnqrUtxYRVor4+CvluI8tVv62Y=
```
then Tessera will be started with the following equivalent configuration:
```
{
...
"peer": [
{
"url": "http://localhost:9001"
},
{
"url": "http://localhost:9002"
},
{
"url": "http://localhost:9003"
}
],
alwaysSendTo:[
"giizjhZQM6peq52O7icVFxdTmTYinQSUsvyhXzgZqkE=",
"/+UuD63zItL1EbjxkKUljMgG8Z1w0AJ8pNOR4iq2yQc="
"UfNSeSGySeKg11DVNEnqrUtxYRVor4+CvluI8tVv62Y="
],
...
}
```
As demonstrated in this example, in certain cases multiple values can be provided by repeating the CLI option. This is supported for the `peer.url`, `alwaysSendTo`, `server.sslConfig.serverTrustCertificates` and `server.sslConfig.clientTrustCertificates` options.
| Version |
| ------------- |
| [0.10.3 onwards](../Using CLI to override config v0.10.3) |
| [0.10.2 and earlier](../Using CLI to override config pre-v0.10.3) |

View File

@ -1,71 +1,105 @@
## Generating keys
Key generation can be used in multiple ways:
1. Generate a key pair and save in new files `.pub` and `.key`:
```
tessera -keygen
```
This command will require interactive input for passwords.
If you wish to generate an unlocked key, `/dev/null` can be used for stdin to tell the application not to expect any input (version 0.8 only):
```
# Version 0.8+
tessera -keygen < /dev/null
### File-stored keys
Generate a key pair and save in new files `new.pub` and `new.key` (will start an interactive prompt to provide passwords):
```
tessera -keygen -filename new
```
Multiple key pairs can be generated at the same time by providing a comma-separated list of values:
```
tessera -keygen -filename /path/to/key1,/path/to/key2
```
# Version 0.7.x or before
printf "\n\n" | tessera -keygen
```
To generate an unlocked key, the following can be used to tell Tessera to not expect any input:
The `-filename` option can be used to specify alternate filepaths. Multiple key pairs can be generated at the same time by providing a comma-separated list of values:
```
tessera -keygen -filename /path/to/key1,/path/to/key2
```
```bash tab="v0.8.x onwards"
tessera -keygen < /dev/null
```
1. Generate a key pair and save to an Azure Key Vault, with DNS name `<url>`, as secrets with IDs `Pub` and `Key`:
```
tessera -keygen -keygenvaulttype AZURE -keygenvaulturl <url>
```
The `-filename` option can be used to specify alternate IDs. Multiple key pairs can be generated at the same time by providing a comma-separated list of values:
```
tessera -keygen -keygenvaulttype AZURE -keygenvaulturl <url> -filename id1,id2
```
**Note: If saving new keys with the same ID as keys that already exist in the vault, the existing keys will be replaced by the newer version.**
> Environment variables must be set if using an Azure Key Vault, for more information see [Setting up an Azure key vault](../Setting%20up%20an%20Azure%20Key%20Vault)
1. Generate a key pair and save to a Hashicorp Vault at the secret path `secretEngine/secretName` with IDs `publicKey` and `privateKey`:
```bash
tessera -keygen -keygenvaulttype HASHICORP -keygenvaulturl <url> \
-keygenvaultsecretengine secretEngine -filename secretName
```
Options exist for configuring TLS and AppRole authentication (by default the AppRole path is set to `approle`):
```bash
tessera -keygen -keygenvaulttype HASHICORP -keygenvaulturl <url> \
-keygenvaultsecretengine <secretEngineName> -filename <secretName> \
-keygenvaultkeystore <JKS file> -keygenvaulttruststore <JKS file> \
-keygenvaultapprole <authpath>
```
The `-filename` option can be used to generate and store multiple key pairs at the same time:
```bash
tessera -keygen -keygenvaulttype HASHICORP -keygenvaulturl <url> \
-keygenvaultsecretengine secretEngine -filename myNode/keypairA,myNode/keypairB
```
**Saving a new key pair to an existing secret will overwrite the values stored at that secret. Previous versions of secrets may be retained and be retrievable by Tessera depending on how the K/V secrets engine is configured. See [Keys](../../../Configuration/Keys) for more information on configuring Tessera for use with Vault.**
> Environment variables must be set if using a Hashicorp Vault, and a version 2 K/V secret engine must be enabled. For more information see [Setting up a Hashicorp Vault](../Setting%20up%20a%20Hashicorp%20Vault).
```bash tab="v0.7.x and earlier"
printf "\n\n" | tessera -keygen
```
1. Generate a key pair, save to files and then start Tessera using a provided config
```
tessera -keygen -configfile /path/to/config.json
```
```
tessera -keygen -filename key1 -configfile /path/to/config.json
```
Tessera loads `config.json` as usual and includes the newly generated key data before starting.
### Azure Key Vault-stored keys
Generate a key pair as secrets with IDs `Pub` and `Key` and save to an Azure Key Vault with DNS name `<url>`:
```
tessera -keygen -keygenvaulttype AZURE -keygenvaulturl <url>
```
The `-filename` option can be used to specify alternate IDs. Multiple key pairs can be generated at the same time by providing a comma-separated list of values:
```
tessera -keygen -keygenvaulttype AZURE -keygenvaulturl <url> -filename id1,id2
```
!!! warning
If saving new keys with the same ID as keys that already exist in the vault, the existing keys will be replaced by the newer version. When doing this, make sure to [specify the correct secret version in your Tessera configuration](../../../Configuration/Keys/#azure-key-vault-key-pairs)
!!! note
Environment variables must be set if using an Azure Key Vault, for more information see [Setting up an Azure key vault](../Setting%20up%20an%20Azure%20Key%20Vault)
### Hashicorp Vault-stored keys
Generate a key pair and save to a Hashicorp Vault at the secret path `secretEngine/secretName` with IDs `publicKey` and `privateKey`:
```bash
tessera -keygen -keygenvaulttype HASHICORP -keygenvaulturl <url> \
-keygenvaultsecretengine secretEngine -filename secretName
```
Options exist for configuring TLS and AppRole authentication (by default the AppRole path is set to `approle`):
```bash
tessera -keygen -keygenvaulttype HASHICORP -keygenvaulturl <url> \
-keygenvaultsecretengine <secretEngineName> -filename <secretName> \
-keygenvaultkeystore <JKS file> -keygenvaulttruststore <JKS file> \
-keygenvaultapprole <authpath>
```
The `-filename` option can be used to generate and store multiple key pairs at the same time:
```bash
tessera -keygen -keygenvaulttype HASHICORP -keygenvaulturl <url> \
-keygenvaultsecretengine secretEngine -filename myNode/keypairA,myNode/keypairB
```
!!! warning
Saving a new key pair to an existing secret will overwrite the values stored at that secret. Previous versions of secrets may be retained and be retrievable by Tessera depending on how the K/V secrets engine is configured. When doing this, make sure to [specify the correct secret version in your Tessera configuration](../../../Configuration/Keys/#hashicorp-vault-key-pairs)
!!! note
Environment variables must be set if using a Hashicorp Vault, and a version 2 K/V secret engine must be enabled. For more information see [Setting up a Hashicorp Vault](../Setting%20up%20a%20Hashicorp%20Vault)
An updated `.json` configfile is printed to the terminal (or to a file if using the `-output` CLI option). No changes are made to the `config.json` file itself.
### AWS Secrets Manager-stored keys
Generate a key pair and save to an AWS Secrets Manager, with endpoint `<url>`, as secrets with IDs `Pub` and `Key`:
```bash
tessera -keygen -keygenvaulttype AWS -keygenvaulturl <url>
```
The `-filename` option can be used to specify alternate IDs. Multiple key pairs can be generated at the same time by providing a comma-separated list of values:
```bash
tessera -keygen -keygenvaulttype AWS -keygenvaulturl <url> -filename id1,id2
```
!!! note
Environment variables must be set if using an AWS Secrets Manager, for more information see [Setting up an AWS Secrets Manager](../Setting%20up%20an%20AWS%20Secrets%20Manager)
### Updating a configfile with newly generated keys
Any newly generated keys must be added to a Tessera `.json` configfile. Often it is easiest to do this manually.
However, the `tessera keygen` `-configfile` option can be used to automatically update a configfile after key generation. This is particularly useful when scripting.
```
tessera -keygen -filename key1 -configfile /path/to/config.json --configout /path/to/new.json --pwdout /path/to/new.pwds
```
The above command will prompt for a password and generate the `key1` pair as usual. The Tessera configuration from `/path/to/config.json` will be read, updated and saved to `/path/to/new.json`. New passwords will be appended to the existing password file as defined in `/path/to/config.json` and written to `/path/to/new.pwds`.
If the `--configout` and `--pwdout` options are not provided, the updated `.json` config will be printed to the terminal.
!!! note "Note: Differences between v0.10.3 and earlier versions"
Before Tessera version 0.10.3 the node would start after updating the configfile.
In v0.10.3, this behaviour was removed to ensure clearer distinction of responsibilities between each Tessera command. The same behaviour can be achieved in v0.10.3 onwards by running:
```
tessera keygen ... -output /path/to/new.json
tessera -configfile /path/to/new.json
```
## Securing private keys
Generated private keys can be encrypted with a password. This is prompted for on the console during key generation. After generating password-protected keys, the password must be added to your configuration to ensure Tessera can read the keys. The password is not saved anywhere but must be added to the configuration else the key will not be able to be decrypted.
@ -112,3 +146,8 @@ Password update can be used in multiple ways. Running any of these commands wil
tessera --keys.keyData.privateKeyPath <path to keyfile> --keys.keyData.config.data.aopts.algorithm <algorithm> --keys.keyData.config.data.aopts.iterations <iterations> --keys.keyData.config.data.aopts.memory <memory> --keys.keyData.config.data.aopts.parallelism <parallelism>
```
All options have been overriden here but only the options you wish to alter from their defaults need to be provided.
## Using alternative curve key types
By default the `-keygen` and `-updatepassword` commands generate and update [NaCl](https://nacl.cr.yp.to/) compatible keys.
As of Tessera v0.10.2, the `--encryptor.type=EC` CLI option can be provided to generate/update keys of different types. See [encryptor config](../../../Configuration/Configuration Overview/#encryptor-supporting-alternative-curves-in-tessera) for more details.

View File

@ -0,0 +1,29 @@
The private/public key pairs used by Tessera can be [stored](../Keys) in and [retrieved](../../../Configuration/Keys) from a key vault, preventing the need to store the keys locally.
This page details how to set up and configure an _AWS Secrets Manager_ for use with Tessera.
The _AWS Secrets Manager_ documentation provides much of the information needed to get started. The information in this section has been taken from the following pages of the _AWS_ documentation:
* [AWS Secrets Manager User Guide](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)
* [AWS SDK for Java Developer Guide](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/welcome.html)
## Creating the AWS Secrets Manager
Once you have set up your AWS profile, you will be able to use AWS Secrets Manager.
## Enabling Tessera to use the _AWS Secrets Manager_
### Environment Variables
If using an _AWS Secrets Manager_, configuration credentials can be provided in many ways as outlined in the [AWS docs - Supplying and Retrieving AWS Credentials](https://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/credentials.html).
To use environment variables set the following:
1. `AWS_REGION`: region_to_connect_to (i.e. us-west-2)
1. `AWS_ACCESS_KEY_ID`: your_access_key_id
1. `AWS_SECRET_ACCESS_KEY`: your_secret_access_key
The `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` for a particular user can be retrieved from the [AWS IAM Management Console](https://console.aws.amazon.com/iam).
### Dependencies
The AWS Secrets Manager dependencies are included in the `tessera-app-<version>-app.jar`. If using the `tessera-simple-<version>-app.jar` then `aws-key-vault-<version>-all.jar` must be added to the classpath.

View File

@ -2,7 +2,7 @@
All interfaces can be set to run over HTTP, GRPC or HTTP-over-Unix-Sockets.
### gRPC (for inter-node communication)
### gRPC for inter-node communication (Deprecated)
We currently have an implementation of gRPC for peer node communication as experiment API. This is not enabled on Quorum yet, but between Tessera nodes they can be enabled by adding in a couple of properties in the configuration file as child elements of `serverConfig`.
@ -12,6 +12,9 @@ We currently have an implementation of gRPC for peer node communication as exper
Please note that communication between Quorum and Tessera are still via unix socket. This communication flag provides additional options for Tessera peer-to-peer communication. If gRPC is the option specified, please ensure the peers urls are provided with the appropriate ports.
!!! info
gRPC as a protocol for peer-to-peer communication will be removed from Tessera version 0.10.2
---
### Tessera to Tessera - Public API
@ -41,6 +44,8 @@ The following endpoints are advertised on this interface:
* `/version`
* `/upcheck`
* `/storeraw`
* `/keys`
* `/partyinfo/keys`
### Quorum to Tessera - Private API
@ -49,6 +54,7 @@ Quorum uses this API to:
- Send and receive details of private transactions
The following endpoints are advertised on this interface:
- `/version`
- `/upcheck`
- `/sendraw`
@ -60,12 +66,13 @@ The following endpoints are advertised on this interface:
### Admin API
Admins should use this API to:
- Access information about the Tessera node
- Make changes to the configuration of the Tessera node
The following endpoints are advertised on this API:
- `/peers` - Add to, and retrieve from, the Tessera node's peers list
- `/keypairs` - Retrieve all public keys or search for a particular public key in use by the Tessera node
## API Details

View File

@ -0,0 +1,49 @@
# Getting Started with the Quorum Plugin for Remix
1. Go to the [Remix IDE](https://remix.ethereum.org), click on the Plugins tab, scroll down to **Quorum Network**, and Activate.
![quorum_network](./images/quorum_network.png)
2. Accept the permission to allow the plugin to retrieve compilation results. This allows our plugin to use the solidity compiler to get the compiled contract binary to deploy to your Quorum node.
![permission](./images/permission.png)
3. The plugin should now be included in the icons on the left side. Click on the Quorum icon to show the plugin.
![quorum_tab](./images/tab_icon.png)
4. Input the Geth RPC url and hit enter. If you are currently running the quorum-examples 7nodes network, the first node's url is http://localhost:22000
![geth_rpc](./images/geth_rpc.png)
5. If the node is running, the plugin should now say Connected and the rest of the UI will have appeared.
![ui_ready](./images/ui_ready.png)
6. The Quorum plugin uses results from Remix's Solidity compiler, so pull up some contract code and compile it like you normally would in Remix. The plugin will automatically receive the compiled code on each new compilation.
7. Once you have a contract compiled, it will automatically be selected in the Compiled Contracts dropdown. Input any constructor values and deploy.
![deploy](./images/deploy.png)
8. If successful, the contract will show up in a collapsed view under 'Deployed Contracts'. Click the caret to expand.
![contract_collapsed](./images/contract_collapsed.png)
9. From here you can call methods on the contract.
![method_call](./images/method_call.png)
10. To create a private contract, add your Tessera public keys one at a time to the Private For multi-select box. Press enter after inputting each one to save and select.
![private_add](./images/private_add.png)
11. Add as many peers as you want, then deploy the contract again like you did in step 7.
![private_multiple](./images/private_multiple.png)
12. After deploying and expanding the new contract, you should see the public keys that you selected in the widget. Every method call will include the selected keys automatically.
![deployed_private](./images/deployed_private.png)
13. Please open a github issue or reach out to us on our [Slack](https://bit.ly/quorum-slack) with any feedback or questions!

View File

@ -0,0 +1,17 @@
# Quorum Plugin for Remix
The Quorum plugin for Ethereum's Remix IDE adds support for creating and interacting with private contracts on a Quorum network.
![screenshot](./images/quorum-remix.png "screenshot")
## Getting Started
Just go to the [Remix IDE](https://remix.ethereum.org) and activate the **Quorum Network** plugin on the plugins page. For step-by-step instructions, go to the [Getting Started](../Getting%20started) doc.
## Common Issues
**HTTP/HTTPS:**
- Most browsers will not allow you to connect to an HTTP resource if you are currently on an HTTPS page. Since our plugin is currently loaded from HTTPS, it will not let you connect to a Quorum node that doesn't have an https url. **Chrome makes an exception for localhost**, so you should be able to connect to http://localhost:22000, for example. Firefox seems to be a little more strict than Chrome at the moment and does not allow these localhost calls. We are tracking this issue in [quorum-remix#8](https://github.com/jpmorganchase/quorum-remix/issues/8), but until that is fixed please use Chrome or another browser that doesn't block these requests.
## Contributing
Quorum Plugin for Remix is built on open source and we invite you to contribute enhancements. Upon review you will be required to complete a Contributor License Agreement (CLA) before we are able to merge. If you have any questions about the contribution process, please feel free to send an email to [info@goquorum.com](mailto:info@goquorum.com).

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 184 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 416 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.9 KiB

Some files were not shown because too many files have changed in this diff Show More