Merge branch 'master' into docs/AWSSecretsManagerKeyVault

This commit is contained in:
Krish1979 2020-02-10 12:56:48 +00:00 committed by GitHub
commit ca0b625436
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 1389 additions and 45 deletions

View File

@ -128,17 +128,6 @@ var AppHelpFlagGroups = []flagGroup{
utils.TxPoolLifetimeFlag,
},
},
{
Name: "ETHASH",
Flags: []cli.Flag{
utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,
utils.EthashDatasetDirFlag,
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
},
},
{
Name: "PERFORMANCE TUNING",
Flags: []cli.Flag{

View File

@ -27,19 +27,19 @@
There are a few ways in which you can run Cakeshop (see the sections below for details on each, as well as [configuration](https://github.com/jpmorganchase/cakeshop/blob/master/docs/configuration.md#geth) page):
1. **Default mode**: _Used when you want Cakeshop to start up an Ethereum node._
1\. **Default mode**: _Used when you want Cakeshop to start up an Ethereum node._
Running Cakeshop in the Default mode will start up Cakeshop and also start running a regular geth node (on a private/test network).
Running Cakeshop in the Default mode will start up Cakeshop and also start running a regular geth node (on a private/test network).
2. **'Attach/Unmanaged' mode**: _Used when you want to attach Cakeshop to an already running Ethereum-like node._
2\. **'Attach/Unmanaged' mode**: _Used when you want to attach Cakeshop to an already running Ethereum-like node._
Running Cakeshop in 'Attach' a.k.a 'unmanaged' mode will initialize Cakeshop but not start it nor start any Ethereum node. Once Cakeshop initialization is complete you can configure it to use the RPC details of your running node . When you then start Cakeshop it will attach to your node.
Running Cakeshop in 'Attach' a.k.a 'unmanaged' mode will initialize Cakeshop but not start it nor start any Ethereum node. Once Cakeshop initialization is complete you can configure it to use the RPC details of your running node . When you then start Cakeshop it will attach to your node.
NOTE: if different parties on the network are using Cakeshop to deploy contracts to the network then they need to ensure they are using the same ContractRegistry address. See details below for setting up the ContractRegistry address in this case.
NOTE: if different parties on the network are using Cakeshop to deploy contracts to the network then they need to ensure they are using the same ContractRegistry address. See details below for setting up the ContractRegistry address in this case.
3. **Multi-Instance Set Up**: _Used when you want to run Cakeshop on more than one node in your network._
3\. **Multi-Instance Set Up**: _Used when you want to run Cakeshop on more than one node in your network._
Cakeshop is currently designed such that a given instance of Cakeshop works directly with a single Ethereum-like node, however you can set up multiple instances of Cakeshop on the same machine (each which could either have been started in 'Default' mode or 'Attach' mode) such that each can talk to a different node.
Cakeshop is currently designed such that a given instance of Cakeshop works directly with a single Ethereum-like node, however you can set up multiple instances of Cakeshop on the same machine (each which could either have been started in 'Default' mode or 'Attach' mode) such that each can talk to a different node.
NOTE: you can use the Attach mode and/or Multi-Instance setup configuration to run Cakeshop on [Quorum](https://github.com/jpmorganchase/quorum) nodes. See below for connecting Cakeshop to the [7nodes](https://github.com/jpmorganchase/quorum-examples/tree/master/examples/7nodes) network from the quorum-examples repo.
@ -50,8 +50,8 @@ NOTE: you can use the Attach mode and/or Multi-Instance setup configuration to r
1. In a terminal window run:
```
$ cd path/to/cakeshop/war
$ java -jar cakeshop.war
$ cd path/to/cakeshop/war
$ java -jar cakeshop.war
```
2. Open **http://localhost:8080/** in your browser (Firefox/Chrome supported)
@ -62,20 +62,30 @@ NOTE: you can use the Attach mode and/or Multi-Instance setup configuration to r
```
$ cd path/to/cakeshop/war
# The 'example' arg below will unpack the war file and set up the cakeshop data folders but will not actually start a node
$ java -jar cakeshop.war example
$ java -jar cakeshop.war example
```
2. Navigate to path/to/cakeshop/war/data/local
3. Make the following edits to the application.properties file:
* ensure `geth.auto.start` is set to `false`
* ensure `geth.auto.stop` is set to `false`
```
geth.auto.start=false
geth.auto.stop=false
```
4. Run:
```
$ java -jar cakeshop.war
$ java -jar cakeshop.war
```
5. Open **http://localhost:8080/** in your browser (Firefox/Chrome supported)
6. The dropdown menu on the top right of the page should show "Manage Nodes" if you haven't attached to any yet. Click on that to go to the Manage Nodes page.
7. Click Add Node and input the RPC url of your Quorum node (i.e. http://localhost:22000) and the path to the Tessera P2P Party Info endpoint (i.e. http://localhost:9001/partyinfo).
8. Once added, click on View to attach to the node and return to the main Cakeshop page
### Multi-Instance Setup
@ -107,8 +117,8 @@ Although Cakeshop currently has a one-to-one mapping with the underlying Ethereu
cd node1
CAKESHOP_SHARED_CONFIG=".." java -jar ../cakeshop.war example
```
2. Assuming you want to attach to an existing node, navigate to /myNetwork/node1/ and edit **application.properties** per the instructions for [attach mode](#attach-mode) as described above
2. Assuming you want to attach to an existing node, navigate to /myNetwork/node1/ and edit **application.properties** per the instructions for [attach mode](#attach-mode) as described above
3. In terminal window 2 run:
@ -116,20 +126,20 @@ Although Cakeshop currently has a one-to-one mapping with the underlying Ethereu
cd myNetwork/node2
CAKESHOP_SHARED_CONFIG=".." java -jar ../cakeshop.war example
```
4. Navigate to myNetwork/node2 and edit **application.properties** per the instructions for [attach mode](#attach-mode) as described above
4. Navigate to myNetwork/node2 and edit **application.properties** per the instructions for [attach mode](#attach-mode) as described above
5. In terminal window 1 run:
```
CAKESHOP_SHARED_CONFIG=".." java -jar ../cakeshop.war
```
6. In terminal window 2 run:
```
CAKESHOP_SHARED_CONFIG=".." java -Dserver.port=8081 -jar cakeshop.war # Cakeshop will now be available on localhost:8081
```
7. In browser window 1 open http://localhost:8080/
8. In browser window 2 open http://localhost:8081/

View File

@ -5,7 +5,7 @@ With no need for POW/POS in a permissioned network, Quorum instead offers multip
* __Raft-based Consensus__: A consensus model for faster blocktimes, transaction finality, and on-demand block creation. See [Raft-based consensus for Ethereum/Quorum](../raft) for more information
* __Istanbul BFT (Byzantine Fault Tolerance) Consensus__: A PBFT-inspired consensus algorithm with transaction finality, by AMIS. See [Istanbul BFT Consensus documentation](https://github.com/ethereum/EIPs/issues/650), the [RPC API](../ibft/istanbul-rpc-api.md), and this [technical web article](https://medium.com/getamis/istanbul-bft-ibft-c2758b7fe6ff) for more information
* __Istanbul BFT (Byzantine Fault Tolerance) Consensus__: A PBFT-inspired consensus algorithm with immediate transaction finality, by AMIS. See [Istanbul BFT Consensus documentation](../ibft/ibft), the [RPC API](../ibft/istanbul-rpc-api), and this [technical web article](https://medium.com/getamis/istanbul-bft-ibft-c2758b7fe6ff) for more information
* __Clique POA Consensus__: a default POA consensus algorithm bundled with Go Ethereum. See [Clique POA Consensus Documentation](https://github.com/ethereum/EIPs/issues/225) and a [guide to setup clique json](https://hackernoon.com/hands-on-creating-your-own-local-private-geth-node-beginner-friendly-3d45902cc612) with [puppeth](https://blog.ethereum.org/2017/04/14/geth-1-6-puppeth-master/)

184
docs/Consensus/ibft/ibft.md Normal file
View File

@ -0,0 +1,184 @@
# IBFT Consensus Overview
## Introduction
Istanbul Byzantine Fault Tolerant (IBFT) consensus is inspired by Castro-Liskov 99 [paper](http://pmg.csail.mit.edu/papers/osdi99.pdf). IBFT inherits from the original PBFT by using a 3-phase consensus, `PRE-PREPARE`, `PREPARE` and `COMMIT`. The system can tolerate at most `F` faulty nodes in a `N` validator network, where `N = 3F + 1`.
## Implementation
### Terminology
- `Validator`: Block validation participant.
- `Proposer`: A block validation participant that is chosen to propose block in a consensus round.
- `Round`: Consensus round. A round starts with the proposer creating a block proposal and ends with a block commitment or round change.
- `Proposal`: New block generation proposal which is undergoing consensus processing.
- `Sequence`: Sequence number of a proposal. A sequence number should be greater than all previous sequence numbers. Currently each proposed block height is its associated sequence number.
- `Backlog`: The storage to keep future consensus messages.
- `Round state`: Consensus messages of a specific sequence and round, including pre-prepare message, prepare message, and commit message.
- `Consensus proof`: The commitment signatures of a block that can prove the block has gone through the consensus process.
- `Snapshot`: The validator voting state from last epoch.
### Consensus
Istanbul BFT Consensus protocol begins at Round `0` with the validators picking a proposer from themselves in a round robin fashion. The proposer will then propose a new block proposal and broadcast it along with the `PRE-PREPARE` message. Upon receiving the `PRE-PREPARE` message from the proposer, other validators validate the incoming proposal and enter the state of `PRE-PREPARED` and broadcast `PREPARE` message. This step is to make sure all validators are working on the same sequence and on the same round. When `ceil(2N/3)` of `PREPARE` messages is received by the validator from other validators, the validator switches to the state of `PREPARED` and broadcasts `COMMIT` message. This step is to inform other validators that it accepts the proposed block and is going to insert the block to the chain. Lastly, validators wait for `ceil(2N/3)` of `COMMIT` messages to enter `COMMITTED` state and then append the block to the chain.
Blocks in Istanbul BFT protocol are final, which means that there are no forks and any valid block must be somewhere in the main chain. To prevent a faulty node from generating a totally different chain from the main chain, each validator appends `ceil(2N/3)` of received `COMMIT` signatures to `extraData` field in the header before inserting it into the chain. Thus all blocks are self-verifiable. However, the dynamic `extraData` would cause an issue on block hash calculation. Since the same block from different validators can have different set of `COMMIT` signatures, the same block can have different block hashes as well. To solve this, we calculate the block hash by excluding the `COMMIT` signatures part. Therefore, we can still keep the block/block hash consistency as well as put the consensus proof in the block header.
#### Consensus States
Istanbul BFT is a state machine replication algorithm. Each validator maintains a state machine replica in order to reach block consensus. Various states in IBFT consensus are,
- `NEW ROUND`: Proposer to send new block proposal. Validators wait for `PRE-PREPARE` message.
- `PRE-PREPARED`: A validator has received `PRE-PREPARE` message and broadcasts `PREPARE` message. Then it waits for `ceil(2N/3)` of `PREPARE` or `COMMIT` messages.
- `PREPARED`: A validator has received `ceil(2N/3)` of `PREPARE` messages and broadcasts `COMMIT` messages. Then it waits for `ceil(2N/3)` of `COMMIT` messages.
- `COMMITTED`: A validator has received `ceil(2N/3)` of `COMMIT` messages and is able to insert the proposed block into the blockchain.
- `FINAL COMMITTED`: A new block is successfully inserted into the blockchain and the validator is ready for the next round.
- `ROUND CHANGE`: A validator is waiting for `ceil(2N/3)` of `ROUND CHANGE` messages on the same proposed round number.
**State Transitions**:
![State Transitions](images/IBFTStateTransition.png)
- `NEW ROUND` -> `PRE-PREPARED`:
- **Proposer** collects transactions from txpool.
- **Proposer** generates a block proposal and broadcasts it to validators. It then enters the `PRE-PREPARED` state.
- Each **validator** enters `PRE-PREPARED` upon receiving the `PRE-PREPARE` message with the following conditions:
- Block proposal is from the valid proposer.
- Block header is valid.
- Block proposal's sequence and round match the **validator**'s state.
- **Validator** broadcasts `PREPARE` message to other validators.
- `PRE-PREPARED` -> `PREPARED`:
- Validator receives `ceil(2N/3)` of valid `PREPARE` messages to enter `PREPARED` state. Valid messages conform to the following conditions:
- Matched sequence and round.
- Matched block hash.
- Messages are from known validators.
- Validator broadcasts `COMMIT` message upon entering `PREPARED` state.
- `PREPARED` -> `COMMITTED`:
- **Validator** receives `ceil(2N/3)` of valid `COMMIT` messages to enter `COMMITTED` state. Valid messages conform to the following conditions:
- Matched sequence and round.
- Matched block hash.
- Messages are from known validators.
- `COMMITTED` -> `FINAL COMMITTED`:
- **Validator** appends `ceil(2N/3)` commitment signatures to `extraData` and tries to insert the block into the blockchain.
- **Validator** enters `FINAL COMMITTED` state when insertion succeeds.
- `FINAL COMMITTED` -> `NEW ROUND`:
- **Validators** pick a new **proposer** and begin a new round timer.
#### Round change flow
- There are three conditions that would trigger `ROUND CHANGE`:
- Round change timer expires.
- Invalid `PREPREPARE` message.
- Block insertion fails.
- When a validator notices that one of the above conditions applies, it broadcasts a `ROUND CHANGE` message along with the proposed round number and waits for `ROUND CHANGE` messages from other validators. The proposed round number is selected based on following condition:
- If the validator has received `ROUND CHANGE` messages from its peers, it picks the largest round number which has `F + 1` of `ROUND CHANGE` messages.
- Otherwise, it picks `1 + current round number` as the proposed round number.
- Whenever a validator receives `F + 1` of `ROUND CHANGE` messages on the same proposed round number, it compares the received one with its own. If the received is larger, the validator broadcasts `ROUND CHANGE` message again with the received number.
- Upon receiving `ceil(2N/3)` of `ROUND CHANGE` messages on the same proposed round number, the **validator** exits the round change loop, calculates the new **proposer**, and then enters `NEW ROUND` state.
- Another condition that a validator jumps out of round change loop is when it receives verified block(s) through peer synchronization.
#### Proposer selection
Currently we support two policies: **round robin** and **sticky proposer**.
- Round robin: Round robin is the default proposer selection policy. In this setting proposer will change in every block and round change.
- Sticky proposer: in a sticky proposer setting, proposer will change only when a round change happens.
#### Validator list voting
Istanbul BFT uses a similar validator voting mechanism as Clique and copies most of the content from Clique [EIP](https://github.com/ethereum/EIPs/issues/225). Every epoch transaction resets the validator voting, meaning any pending votes for adding/removing a validator are reset.
For all transactions blocks:
- Proposer can cast one vote to propose a change to the validators list.
- Only the latest proposal per target beneficiary is kept from a single validator.
- Votes are tallied live as the chain progresses (concurrent proposals allowed).
- Proposals reaching majority consensus `VALIDATOR_LIMIT` come into effect immediately.
- Invalid proposals are not to be penalized for client implementation simplicity.
- A proposal coming into effect entails discarding all pending votes for that proposal (both for and against) and starts with a clean slate.
#### Future message and backlog
In an asynchronous network environment, one may receive future messages which cannot be processed in the current state. For example, a validator can receive `COMMIT` messages on `NEW ROUND`. We call this kind of message a "future message." When a validator receives a future message, it will put the message into its **backlog** and try to process later whenever possible.
#### Constants
Istanbul BFT define the following constants
- `EPOCH_LENGTH`: Default: 30000 blocks. Number of blocks after which to checkpoint and reset the pending votes.
- `REQUEST_TIMEOUT`: Timeout for each consensus round before firing a round change in millisecond.
- `BLOCK_PERIOD`: Minimum timestamp difference in seconds between two consecutive blocks.
- `PROPOSER_POLICY`: Proposer selection policy, defaults to round robin.
- `ISTANBUL_DIGEST`: Fixed magic number `0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365` of `mixDigest` in block header for Istanbul block identification.
- `DEFAULT_DIFFICULTY`: Default block difficulty, which is set to `0x0000000000000001`.
- `EXTRA_VANITY`: Fixed number of extra-data prefix bytes reserved for proposer vanity.
- Suggested `32` bytes to retain the current extra-data allowance and/or use.
- `NONCE_AUTH`: Magic nonce number `0xffffffffffffffff` to vote on adding a validator.
- `NONCE_DROP`: Magic nonce number `0x0000000000000000` to vote on removing a validator.
- `UNCLE_HASH`: Always `Keccak256(RLP([]))` as uncles are meaningless outside of PoW.
- `PREPREPARE_MSG_CODE`: Fixed number `0`. Message code for `PREPREPARE` message.
- `PREPARE_MSG_CODE`: Fixed number `1`. Message code for `PREPARE` message.
- `COMMIT_MSG_CODE`: Fixed number `2`. Message code for `COMMIT` message.
- `ROUND_CHANGE_MSG_CODE`: Fixed number `3`. Message code for `ROUND CHANGE` message
- `VALIDATOR_LIMIT`: Number of validators to pass an authorization or de-authorization proposal.
- Must be `floor(N / 2) + 1` to enforce majority consensus on a chain.
#### Block Header
Istanbul BFT does not add new block header fields. Instead, it follows Clique in repurposing the `ethash` header fields as follows:
- `nonce`: Proposer proposal regarding the account defined by the beneficiary field.
- Should be `NONCE_DROP` to propose deauthorizing beneficiary as an existing validator.
- Should be `NONCE_AUTH` to propose authorizing beneficiary as a new validator.
- **Must** be filled with zeroes, `NONCE_DROP` or `NONCE_AUTH`
- `mixHash`: Fixed magic number `0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365` for Istanbul block identification.
- `ommersHash`: Must be `UNCLE_HASH` as uncles are meaningless outside of PoW.
- `timestamp`: Must be at least the parent timestamp + `BLOCK_PERIOD`
- `difficulty`: Must be filled with `0x0000000000000001`.
- `extraData`: Combined field for signer vanity and RLP encoded Istanbul extra data, where Istanbul extra data contains validator list, proposer seal, and commit seals. Istanbul extra data is defined as follows:
```
type IstanbulExtra struct {
Validators []common.Address //Validator addresses
Seal []byte //Proposer seal 65 bytes
CommittedSeal [][]byte //Committed seal, 65 * len(Validators) bytes
}
```
Thus the `extraData` would be in the form of `EXTRA_VANITY | ISTANBUL_EXTRA` where `|` represents a fixed index to separate vanity and Istanbul extra data (not an actual character for separator).
- First `EXTRA_VANITY` bytes (fixed) may contain arbitrary proposer vanity data.
- `ISTANBUL_EXTRA` bytes are the RLP encoded Istanbul extra data calculated from `RLP(IstanbulExtra)`, where `RLP()` is RLP encoding function, and `IstanbulExtra` is the Istanbul extra data.
- `Validators`: The list of validators, which **must** be sorted in ascending order.
- `Seal`: The proposer's signature sealing of the header.
- `CommittedSeal`: The list of commitment signature seals as consensus proof.
#### Block hash, proposer seal and committed seals
The Istanbul block hash calculation is different from the `ethash` block hash calculation due to the following reasons:
1. The proposer needs to put proposer's seal in `extraData` to prove the block is signed by the chosen proposer.
2. The validators need to put `ceil(2N/3)` of committed seals as consensus proof in `extraData` to prove the block has gone through consensus.
The calculation is still similar to the `ethash` block hash calculation, with the exception that we need to deal with `extraData`. We calculate the fields as follows:
##### Proposer seal calculation
By the time of proposer seal calculation, the committed seals are still unknown, so we calculate the seal with those unknowns empty. The calculation is as follows:
- `Proposer seal`: `SignECDSA(Keccak256(RLP(Header)), PrivateKey)`
- `PrivateKey`: Proposer's private key.
- `Header`: Same as `ethash` header only with a different `extraData`.
- `extraData`: `vanity | RLP(IstanbulExtra)`, where in the `IstanbulExtra`, `CommittedSeal` and `Seal` are empty arrays.
##### Block hash calculation
While calculating block hash, we need to exclude committed seals since that data is dynamic between different validators. Therefore, we make `CommittedSeal` an empty array while calculating the hash. The calculation is:
- `Header`: Same as `ethash` header only with a different `extraData`.
- `extraData`: `vanity | RLP(IstanbulExtra)`, where in the `IstanbulExtra`, `CommittedSeal` is an empty array.
##### Consensus proof
Before inserting a block into the blockchain, each validator needs to collect `ceil(2N/3)` of committed seals from other validators to compose a consensus proof. Once it receives enough committed seals, it will fill the `CommittedSeal` in `IstanbulExtra`, recalculate the `extraData`, and then insert the block into the blockchain. **Note** that since committed seals can differ by different sources, we exclude that part while calculating the block hash as in the previous section.
Committed seal calculation:
Committed seal is calculated by each of the validators signing the hash along with `COMMIT_MSG_CODE` message code of its private key. The calculation is as follows:
- `Committed seal`: `SignECDSA(Keccak256(CONCAT(Hash, COMMIT_MSG_CODE)), PrivateKey)`.
- `CONCAT(Hash, COMMIT_MSG_CODE)`: Concatenate block hash and `COMMIT_MSG_CODE` bytes.
- `PrivateKey`: Signing validator's private key.
## Provenance
Istanbul BFT implementation in Quorum is based on [EIP 650](https://github.com/ethereum/EIPs/issues/650). It has been updated since the EIP was opened to resolve safety issues by introducing locking.

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

View File

@ -0,0 +1,192 @@
# Raft RPC API
# APIs
### raft_cluster
Returns the details of all nodes part of the raft cluster
#### Parameters
None
#### Returns
* `hostName`: DNS name or the host IP address
* `nodeActive`: true if the node is active in raft cluster else false
* `nodeId`: enode id of the node
* `p2pPort`: p2p port
* `raftId`: raft id of the node
* `raftPort`: raft port
* `role`: role of the node in raft quorum. Can be minter/ verifier/ learner. In case there is no leader at network level it will be returned as `""`
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_cluster", "id":10}' --header "Content-Type: application/json"
// Response
{"jsonrpc":"2.0","id":10,"result":[{"raftId":1,"nodeId":"ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef","p2pPort":21000,"raftPort":50401,"hostname":"127.0.0.1","role":"minter","nodeActive":true},{"raftId":3,"nodeId":"579f786d4e2830bbcc02815a27e8a9bacccc9605df4dc6f20bcc1a6eb391e7225fff7cb83e5b4ecd1f3a94d8b733803f2f66b7e871961e7b029e22c155c3a778","p2pPort":21002,"raftPort":50403,"hostname":"127.0.0.1","role":"verifier","nodeActive":true},{"raftId":2,"nodeId":"0ba6b9f606a43a95edc6247cdb1c1e105145817be7bcafd6b2c0ba15d58145f0dc1a194f70ba73cd6f4cdd6864edc7687f311254c7555cc32e4d45aeb1b80416","p2pPort":21001,"raftPort":50402,"hostname":"127.0.0.1","role":"verifier","nodeActive":true}]}
```
```javascript tab="geth console"
> raft.cluster
[{
hostname: "127.0.0.1",
nodeActive: true,
nodeId: "0ba6b9f606a43a95edc6247cdb1c1e105145817be7bcafd6b2c0ba15d58145f0dc1a194f70ba73cd6f4cdd6864edc7687f311254c7555cc32e4d45aeb1b80416",
p2pPort: 21001,
raftId: 2,
raftPort: 50402,
role: "verifier"
}, {
hostname: "127.0.0.1",
nodeActive: true,
nodeId: "579f786d4e2830bbcc02815a27e8a9bacccc9605df4dc6f20bcc1a6eb391e7225fff7cb83e5b4ecd1f3a94d8b733803f2f66b7e871961e7b029e22c155c3a778",
p2pPort: 21002,
raftId: 3,
raftPort: 50403,
role: "verifier"
}, {
hostname: "127.0.0.1",
nodeActive: true,
nodeId: "ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef",
p2pPort: 21000,
raftId: 1,
raftPort: 50401,
role: "minter"
}]
```
### raft_role
Returns the role of the current node in raft cluster
#### Parameters
None
#### Returns
* `result`: role of the node in raft cluster. Can be minter/ verifier/ learner. In case there is no leader at network level it will be returned as `""`
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_role", "id":10}' --header "Content-Type: application/json"
// Response
{"jsonrpc":"2.0","id":10,"result":"verifier"}
```
```javascript tab="geth console"
> raft.role
"minter"
```
### raft_leader
Returns enode id of the leader node
#### Parameters
None
#### Returns
* `result`: enode id of the leader
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_leader", "id":10}' --header "Content-Type: application/json"
// Response
{"jsonrpc":"2.0","id":10,"result":"ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef"}
```
```javascript tab="geth console"
> raft.leader
"ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef"
```
If there is no leader at the network level, the call to the api will result in the following error:
```javascript
> raft.leader
Error: no leader is currently elected
at web3.js:3143:20
at web3.js:6347:15
at get (web3.js:6247:38)
at <unknown>
```
### raft_addPeer
API for adding a new peer to the network.
#### Parameters
* `enodeId`: enode id of the node to be added to the network
#### Returns
* `result`: raft id for the node being added
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_addPeer","params": ["enode://3701f007bfa4cb26512d7df18e6bbd202e8484a6e11d387af6e482b525fa25542d46ff9c99db87bd419b980c24a086117a397f6d8f88e74351b41693880ea0cb@127.0.0.1:21004?discport=0&raftport=50405"], "id":10}' --header "Content-Type: application/json"
// Response
{"jsonrpc":"2.0","id":10,"result":5}
```
```javascript tab="geth console"
> raft.addPeer("enode://3701f007bfa4cb26512d7df18e6bbd202e8484a6e11d387af6e482b525fa25542d46ff9c99db87bd419b980c24a086117a397f6d8f88e74351b41693880ea0cb@127.0.0.1:21004?discport=0&raftport=50405")
5
```
The new node can join the network with `geth` option of `--raftjoinexisting <<raftId>>`
If the node being added is already part of the network the of the network, the following error is thrown:
```javascript
> raft.addPeer("enode://3701f007bfa4cb26512d7df18e6bbd202e8484a6e11d387af6e482b525fa25542d46ff9c99db87bd419b980c24a086117a397f6d8f88e74351b41693880ea0cb@127.0.0.1:21004?discport=0&raftport=50405")
Error: node with this enode has already been added to the cluster: f06c06f1e958cb2edf90d8bfb912de287f9b047b4228436e94b5b78e3ee16171
at web3.js:3143:20
at web3.js:6347:15
at web3.js:5081:36
at <anonymous>:1:1
```
### raft_removePeer
API to remove a node from raft cluster
#### Parameters
* `raftId` : raft id of the node to be removed from the cluster
#### Returns
* `result`: null
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_removePeer","params": [4], "id":10}' --header "Content-Type: application/json"
// Response
{"jsonrpc":"2.0","id":10,"result":null}
```
```javascript tab="geth console"
> raft.removePeer(4)
null
```
### raft_addLearner
API to add a new node to the network as a learner node. The learner node syncs with network and can transact but will not be part of raft quorum and hence will not provide block confirmation to minter node.
#### Parameters
* `enodeId`
#### Returns
* `result`: raft id for the node being added
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_addLearner","params": ["enode://3701f007bfa4cb26512d7df18e6bbd202e8484a6e11d387af6e482b525fa25542d46ff9c99db87bd419b980c24a086117a397f6d8f88e74351b41693880ea0cb@127.0.0.1:21004?discport=0&raftport=50405"], "id":10}' --header "Content-Type: application/json"
// Response
{"jsonrpc":"2.0","id":10,"result":5}
```
```javascript tab="geth console"
> raft.addLearner("enode://3701f007bfa4cb26512d7df18e6bbd202e8484a6e11d387af6e482b525fa25542d46ff9c99db87bd419b980c24a086117a397f6d8f88e74351b41693880ea0cb@127.0.0.1:21004?discport=0&raftport=50405")
5
```
### raft_promoteToPeer
API for promoting a learner node to peer and thus be part of the raft quorum.
#### Parameters
* `raftId`: raft id of the node to be promoted
#### Returns
* `result`: true or false
#### Examples
```jshelllanguage tab="JSON RPC"
// Request
curl -X POST http://127.0.0.1:22001 --data '{"jsonrpc":"2.0","method":"raft_promoteToPeer","params": [4], "id":10}' --header "Content-Type: application/json"
// Response
{// Response
{"jsonrpc":"2.0","id":10,"result":true}
```
```javascript tab="geth console"
> raft.promoteToPeer(4)
true
```

View File

@ -1,4 +1,4 @@
# Raft-based consensus for Ethereum/Quorum
# Raft Consensus Overview
## Introduction
@ -190,4 +190,4 @@ Note that like the enode IDs listed in the static peers JSON file, this enode ID
## FAQ
Answers to frequently asked questions can be found on the main [Quorum FAQ page](../FAQ.md).
Answers to frequently asked questions can be found on the main [Quorum FAQ page](../../FAQ.md).

View File

@ -0,0 +1,291 @@
# Adding and removing IBFT validators
Over the lifetime of an IBFT network, validators will need to be added and removed as authorities change.
Here we will showcase adding a new validator to an IBFT network, as well as removing an existing one.
## Adding a node to the validator set
Adding a node to the IBFT validator set is relatively easy once a node is part of the network.
It does not matter whether the node is already online or not, as the process to add the new node as a validator only
needs the *existing* validators.
!!! warning
If you are adding multiple validators before they are brought online, make sure you don't go over the BFT limit and cause the chain to stop progressing.
Adding a new validator requires that a majority of existing validators propose the new node to be added. This is
achieved by calling the `propose` RPC method with the value `true` and replacing the address to your required one:
```bash
$ geth attach /qdata/dd/geth.ipc
> istanbul.propose("0xb131288f355bc27090e542ae0be213c20350b767", true);
null
```
This indicates that the current node wishes to add address `0xb131288f355bc27090e542ae0be213c20350b767` as a new
validator.
### Example
You can find the resources required to run the examples in the
[quorum-examples](https://github.com/jpmorganchase/quorum-examples/tree/master/examples/ibft_validator_set_changes)
repository.
1. The examples use `docker-compose` for the container definitions. If you are following along by copying the commands
described, then it is important to set the project name for Docker Compose, or to remember to change the prefix for
your directory. See [Docker documentation](https://docs.docker.com/compose/reference/envvars/#compose_project_name)
for more details.
To set the project name, run the following:
```bash
$ export COMPOSE_PROJECT_NAME=addnode
```
2. Bring up the network, which contains 7 nodes, of which 6 are validators.
```bash
$ docker-compose -f ibft-6-validators.yml up
```
We will be adding the 7th node as a validator. You may notice in the logs of node 7 messages along the lines of
`node7_1 | WARN [01-20|10:37:16.034] Block sealing failed err=unauthorized`. This is because
the node was started up with minting enabled, but doesn't have the authority to create blocks, and so throws this
error.
3. Now we need to propose node 7 as a new proposer from the existing nodes.
!!! note
Remember, you could do this stage before starting node 7 in your network
We need a majority of existing validators to propose the new node before the changes will take effect.
Lets start with node 1 and see what happens:
```bash
# Propose node 7 from node 1
$ docker exec -it addnode_node1_1 geth --exec 'istanbul.propose("0xb131288f355bc27090e542ae0be213c20350b767", true);' attach /qdata/dd/geth.ipc
null
# Wait about 5 seconds, and then run:
$ docker exec -it addnode_node1_1 geth --exec 'istanbul.getSnapshot();' attach /qdata/dd/geth.ipc
{
epoch: 30000,
hash: "0xf814863d809ce3a683ee0a2197b15a8152d2696fc9c4e47cd82d0bd5cdaa3e45",
number: 269,
policy: 0,
tally: {
0xb131288f355bc27090e542ae0be213c20350b767: {
authorize: true,
votes: 1
}
},
validators: ["0x6571d97f340c8495b661a823f2c2145ca47d63c2", "0x8157d4437104e3b8df4451a85f7b2438ef6699ff", "0xb912de287f9b047b4228436e94b5b78e3ee16171", "0xd8dba507e85f116b1f7e231ca8525fc9008a6966", "0xe36cbeb565b061217930767886474e3cde903ac5", "0xf512a992f3fb749857d758ffda1330e590fa915e"],
votes: [{
address: "0xb131288f355bc27090e542ae0be213c20350b767",
authorize: true,
block: 268,
validator: "0xd8dba507e85f116b1f7e231ca8525fc9008a6966"
}]
}
```
Let's break this down.
Firstly, we proposed the address `0xb131288f355bc27090e542ae0be213c20350b767` to be added; that is what the `true`
parameter is for. If we had set it to `false`, that means we want to remove an existing validator with that address.
Secondly, we fetched the current snapshot, which gives us an insight into the current running state of the voting.
We can see that the new address has 1 vote under the `tally` section, and that one vote is described under the
`votes` section. So we know our vote was registered!
4. Let's run this from node 2 and see similar results:
```bash
$ docker exec -it addnode_node2_1 geth --exec 'istanbul.propose("0xb131288f355bc27090e542ae0be213c20350b767", true);' attach /qdata/dd/geth.ipc
null
# Again, you may have to wait 5 - 10 seconds for the snapshot to show the vote
$ docker exec -it addnode_node2_1 geth --exec 'istanbul.getSnapshot();' attach /qdata/dd/geth.ipc
{
epoch: 30000,
hash: "0x93efcd458f3b875902a4532bb77d5e7ebb701791ea95486ecd58baf682312d74",
number: 391,
policy: 0,
tally: {
0xb131288f355bc27090e542ae0be213c20350b767: {
authorize: true,
votes: 2
}
},
validators: ["0x6571d97f340c8495b661a823f2c2145ca47d63c2", "0x8157d4437104e3b8df4451a85f7b2438ef6699ff", "0xb912de287f9b047b4228436e94b5b78e3ee16171", "0xd8dba507e85f116b1f7e231ca8525fc9008a6966", "0xe36cbeb565b061217930767886474e3cde903ac5", "0xf512a992f3fb749857d758ffda1330e590fa915e"],
votes: [{
address: "0xb131288f355bc27090e542ae0be213c20350b767",
authorize: true,
block: 388,
validator: "0xd8dba507e85f116b1f7e231ca8525fc9008a6966"
}, {
address: "0xb131288f355bc27090e542ae0be213c20350b767",
authorize: true,
block: 390,
validator: "0x6571d97f340c8495b661a823f2c2145ca47d63c2"
}]
}
```
True to form, we have the second vote registered!
5. Ok, let's finally vote on nodes 3 and 4.
```bash
$ docker exec -it addnode_node3_1 geth --exec 'istanbul.propose("0xb131288f355bc27090e542ae0be213c20350b767", true);' attach /qdata/dd/geth.ipc
null
$ docker exec -it addnode_node4_1 geth --exec 'istanbul.propose("0xb131288f355bc27090e542ae0be213c20350b767", true);' attach /qdata/dd/geth.ipc
null
```
6. Now we have a majority of votes, let's check the snapshot again:
```bash
docker exec -it addnode_node1_1 geth --exec 'istanbul.getSnapshot();' attach /qdata/dd/geth.ipc
{
epoch: 30000,
hash: "0xd4234184538297f71f5b7024a2e11f51f06b4f569ebd9e3644abd391b8c66101",
number: 656,
policy: 0,
tally: {},
validators: ["0x6571d97f340c8495b661a823f2c2145ca47d63c2", "0x8157d4437104e3b8df4451a85f7b2438ef6699ff", "0xb131288f355bc27090e542ae0be213c20350b767", "0xb912de287f9b047b4228436e94b5b78e3ee16171", "0xd8dba507e85f116b1f7e231ca8525fc9008a6966", "0xe36cbeb565b061217930767886474e3cde903ac5", "0xf512a992f3fb749857d758ffda1330e590fa915e"],
votes: []
}
```
We can see that the votes have now been wiped clean, ready for a new round. Additionally, the address we were adding,
`0xb131288f355bc27090e542ae0be213c20350b767` now exists within the `validators` list!
Lastly, the `unauthorized` messages that node 7 was giving before has stopped, as it now has the authority to mint
blocks.
## Removing a node from the validator set
Removing a validator is very similar to adding a node, but this time we want to propose nodes with the value `false`,
to indicate we are deauthorising them. It does not matter whether the node is still online or not, as it doesn't
require any input from the node being removed.
!!! warning
Be aware when removing nodes that cross the BFT boundary, e.g. going from 10 validators to 9, as this may impact the chains ability to progress if other nodes are offline
Removing a new validator requires that a majority of existing validators propose the new node to be removed. This is
achieved by calling the `propose` RPC method with the value `false` and replacing the address to your required one:
```bash
$ geth attach /qdata/dd/geth.ipc
> istanbul.propose("0xb131288f355bc27090e542ae0be213c20350b767", false);
null
```
### Example
You can find the resources required to run the examples in the
[quorum-examples](https://github.com/jpmorganchase/quorum-examples/tree/master/examples/ibft_validator_set_changes)
repository.
1. The examples use `docker-compose` for the container definitions. If you are following along by copying the commands
described, then it is important to set the project name for Docker Compose, or to remember to change the prefix for
your directory. See [Docker documentation](https://docs.docker.com/compose/reference/envvars/#compose_project_name)
for more details.
To set the project name, run the following:
```bash
$ export COMPOSE_PROJECT_NAME=addnode
```
2. Bring up the network, which contains 7 nodes, of which 6 are validators.
```bash
# Set the environment variable for docker-compose
$ export COMPOSE_PROJECT_NAME=addnode
# Start the 7 node network, of which 6 are validators
$ docker-compose -f ibft-6-validators.yml up
```
3. Now we need to propose node 6 as the node to remove.
!!! note
We need a majority of existing validators to propose the new node before the changes will take effect.
Lets start with node 1 and see what happens:
```bash
# Propose node 7 from node 1
$ docker exec -it addnode_node1_1 geth --exec 'istanbul.propose("0x8157d4437104e3b8df4451a85f7b2438ef6699ff", false);' attach /qdata/dd/geth.ipc
null
# Wait about 5 seconds, and then run:
$ docker exec -it addnode_node1_1 geth --exec 'istanbul.getSnapshot();' attach /qdata/dd/geth.ipc
{
epoch: 30000,
hash: "0xba9f9b72cad90ae8aee39f352b45f21d5ed5535b4479743e3f39b231fd717792",
number: 140,
policy: 0,
tally: {
0x8157d4437104e3b8df4451a85f7b2438ef6699ff: {
authorize: false,
votes: 1
}
},
validators: ["0x6571d97f340c8495b661a823f2c2145ca47d63c2", "0x8157d4437104e3b8df4451a85f7b2438ef6699ff", "0xb912de287f9b047b4228436e94b5b78e3ee16171", "0xd8dba507e85f116b1f7e231ca8525fc9008a6966", "0xe36cbeb565b061217930767886474e3cde903ac5", "0xf512a992f3fb749857d758ffda1330e590fa915e"],
votes: [{
address: "0x8157d4437104e3b8df4451a85f7b2438ef6699ff",
authorize: false,
block: 136,
validator: "0xd8dba507e85f116b1f7e231ca8525fc9008a6966"
}]
}
```
Let's break this down.
Firstly, we proposed the address `0x8157d4437104e3b8df4451a85f7b2438ef6699ff` to be removed; that is what the
`false` parameter is for.
Secondly, we fetched the current snapshot, which gives us an insight into the current running state of the voting.
We can see that the proposed address has 1 vote under the `tally` section, and that one vote is described under the
`votes` section. Here, the `authorize` section is set to `false`, which is inline with our proposal to *remove* the
validator.
4. We need to get a majority, so let's run the proposal on 3 more nodes:
```bash
$ docker exec -it addnode_node2_1 geth --exec 'istanbul.propose("0x8157d4437104e3b8df4451a85f7b2438ef6699ff", false);' attach /qdata/dd/geth.ipc
null
$ docker exec -it addnode_node3_1 geth --exec 'istanbul.propose("0x8157d4437104e3b8df4451a85f7b2438ef6699ff", false);' attach /qdata/dd/geth.ipc
null
$ docker exec -it addnode_node4_1 geth --exec 'istanbul.propose("0x8157d4437104e3b8df4451a85f7b2438ef6699ff", false);' attach /qdata/dd/geth.ipc
null
```
5. Let's check the snapshot now all the required votes are in:
```bash
$ docker exec -it addnode_node1_1 geth --exec 'istanbul.getSnapshot();' attach /qdata/dd/geth.ipc
{
epoch: 30000,
hash: "0x25815a32b086926875ea2c44686e4b20effabc731b2b121ebf0e0f395101eea5",
number: 470,
policy: 0,
tally: {},
validators: ["0x6571d97f340c8495b661a823f2c2145ca47d63c2", "0xb912de287f9b047b4228436e94b5b78e3ee16171", "0xd8dba507e85f116b1f7e231ca8525fc9008a6966", "0xe36cbeb565b061217930767886474e3cde903ac5", "0xf512a992f3fb749857d758ffda1330e590fa915e"],
votes: []
}
```
The validator has been removed from the `validators` list, and we are left with the other 5 still present. You will
also see in the logs of node 6 a message like
`node6_1 | WARN [01-20|11:35:52.044] Block sealing failed err=unauthorized`. This is because it is still minting
blocks, but realises it does not have the authority to push them to any of the other nodes on the network (you will
also see this message for node 7, which was never authorised but still set up to mine).
## See also
- [Adding a new node to the network](/How-To-Guides/adding_nodes)

View File

@ -0,0 +1,388 @@
# Node addition examples
Below are some scenarios for adding a new node into a network, with a mix of different options such as
consensus algorithm, permissioning and discovery.
You can find the resources required to run the examples in the
[quorum-examples](https://github.com/jpmorganchase/quorum-examples/tree/master/examples/adding_nodes) repository.
Checkout the repository through `git` or otherwise download all the resources your local machine to follow along.
The examples use `docker-compose` for the container definitions. If you are following along by copying the commands
described, then it is important to set the project name for Docker Compose, or to remember to change the prefix for
your directory. See [Docker documentation](https://docs.docker.com/compose/reference/envvars/#compose_project_name)
for more details.
To set the project name, run the following:
```bash
$ export COMPOSE_PROJECT_NAME=addnode
```
## Non-permimssioned IBFT with discovery
An example using IBFT, no permissioning and discover enabled via a bootnode.
There are no static peers in this network; instead, every node is set to talk to node 1 via the CLI flag
`--bootnodes enode://ac6b1096ca56b9f6d004b779ae3728bf83f8e22453404cc3cef16a3d9b96608bc67c4b30db88e0a5a6c6390213f7acbe1153ff6d23ce57380104288ae19373ef@172.16.239.11:21000`.
Node 1 will forward the details of all the nodes it knows about (in this case, everyone) and they will then initiate their
own connections.
1. Bring up an initial network of 6 nodes.
```bash
# Ensure any old network is removed
$ docker-compose -f ibft-non-perm-bootnode.yml down
# Bring up 6 nodes
$ docker-compose -f ibft-non-perm-bootnode.yml up node1 node2 node3 node4 node5 node6
```
2. Send in a public transaction and check it is minted.
!!! note
* The block creation period is set to 2 seconds, so you may have to wait upto that amount of time for the transaction to be minted.
* The transaction hashes will likely be different, but the contract addresses will be the same for your network.
```bash
# Send in the transaction
$ docker exec -it addnode_node1_1 geth --exec 'loadScript("/examples/public-contract.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0xd1bf0c15546802e5a121f79d0d8e6f0fa45d4961ef8ab9598885d28084cfa909 waiting to be mined...
true
# Retrieve the value of the contract
$ docker exec -it addnode_node1_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
42
```
We created a transaction, in this case with hash `0xd1bf0c15546802e5a121f79d0d8e6f0fa45d4961ef8ab9598885d28084cfa909`,
and then retrieved its value, which was set to be `42`.
3. Bring up the last node. This node also has its bootnodes set to be node 1, so at startup will try to establish a
connection to node 1 only. After this, node 1 will share which nodes it knows about, and node 7 can then initiate
connections with those peers.
```bash
# Bring up node 7
$ docker-compose -f ibft-non-perm-bootnode.yml up node7
```
4. Let's check to see if the nodes are in sync. If they are, they will have similar block numbers, which is enough for
this example; there are other ways to tell if nodes are on the same chain, e.g. matching block hashes.
!!! note
Depending on timing, the second may have an extra block or two.
```bash
# Fetch the latest block number for node 1
$ docker exec -it addnode_node1_1 geth --exec 'eth.blockNumber' attach /qdata/dd/geth.ipc
45
# Fetch the latest block number for node 7
$ docker exec -it addnode_node7_1 geth --exec 'eth.blockNumber' attach /qdata/dd/geth.ipc
45
```
5. We can check that the transaction and contract we sent earlier now exist on node 7.
```bash
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
42
```
6. To be sure we have two way communication, let's send a transaction from node 7 to the network.
```bash
$ docker exec -it addnode_node7_1 geth --exec 'loadScript("/examples/public-contract.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0x84cefc3aab8ce5797dc73c70db604e5c8830fc7c2cf215876eb34fff533e2725 waiting to be mined...
true
```
7. Finally, we can check if the transaction was minted and the contract executed on each node.
```bash
# Check on node 1
$ docker exec -it addnode_node1_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1349f3e1b8d71effb47b840594ff27da7e603d17"); private.get();' attach /qdata/dd/geth.ipc
42
# Check on node 7
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1349f3e1b8d71effb47b840594ff27da7e603d17"); private.get();' attach /qdata/dd/geth.ipc
42
```
And that's it. We deployed a working 6 node network, and then added a 7th node afterwards; this 7th node was able to
read existing public data, as well as deploy its own transactions and contracts for others to see!
## Non-permissioned RAFT with discovery disabled
This example walks through adding a new node to a RAFT network. This network does not have permissioning for the
Ethereum peer-to-peer layer, and makes it connections solely based on who is listed in the nodes `static-nodes.json`
file.
1. Bring up an initial network of 6 nodes.
```bash
# Ensure any old network is removed
$ docker-compose -f raft-non-perm-nodiscover.yml down
# Bring up 6 nodes
$ docker-compose -f raft-non-perm-nodiscover.yml up node1 node2 node3 node4 node5 node6
```
2. Send in a public transaction and check it is minted.
!!! note
* The transaction hashes will likely be different, but the contract addresses will be the same for your network.
```bash
# Send in the transaction
$ docker exec -it addnode_node1_1 geth --exec 'loadScript("/examples/public-contract.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0xd1bf0c15546802e5a121f79d0d8e6f0fa45d4961ef8ab9598885d28084cfa909 waiting to be mined...
true
# Retrieve the value of the contract
$ docker exec -it addnode_node1_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
42
```
We created a transaction, in this case with hash `0xd1bf0c15546802e5a121f79d0d8e6f0fa45d4961ef8ab9598885d28084cfa909`,
and then retrieved its value, which was set to be `42`.
3. We need to add the new peer to the RAFT network before it joins, otherwise the existing nodes will reject it from
the RAFT communication layer; we also need to know what ID the new node should join with.
```bash
# Add the new node
$ docker exec -it addnode_node1_1 geth --exec 'raft.addPeer("enode://239c1f044a2b03b6c4713109af036b775c5418fe4ca63b04b1ce00124af00ddab7cc088fc46020cdc783b6207efe624551be4c06a994993d8d70f684688fb7cf@172.16.239.17:21000?discport=0&raftport=50400")' attach /qdata/dd/geth.ipc
7
```
The return value is the RAFT ID of the new node. When the node joins the network for the first time, it will need
this ID number handy. If it was lost, you can always view the full network, including IDs, by running the
`raft.cluster` command on an existing node.
4. Bring up the last node. Here, we pass the newly created ID number as a flag into the startup of node 7. This lets
the node know to not bootstrap a new network from the contents of `static-nodes.json`, but to connect to an existing
node there are fetch any bootstrap information.
```bash
# Bring up node 7
$ QUORUM_GETH_ARGS="--raftjoinexisting 7" docker-compose -f raft-non-perm-nodiscover.yml up node7
```
5. Let's check to see if the nodes are in sync. We can do by seeing if we have the contract that we viewer earlier on
node 7.
```bash
# Fetch the contracts value on node 7
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
42
```
6. To be sure we have two way communication, let's send a transaction from node 7 to the network.
```bash
$ docker exec -it addnode_node7_1 geth --exec 'loadScript("/examples/public-contract.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0x84cefc3aab8ce5797dc73c70db604e5c8830fc7c2cf215876eb34fff533e2725 waiting to be mined...
true
```
7. Finally, we can check if the transaction was minted and the contract executed on each node.
```bash
# Check on node 1
$ docker exec -it addnode_node1_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1349f3e1b8d71effb47b840594ff27da7e603d17"); private.get();' attach /qdata/dd/geth.ipc
42
# Check on node 7
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1349f3e1b8d71effb47b840594ff27da7e603d17"); private.get();' attach /qdata/dd/geth.ipc
42
```
And that's it. We deployed a working 6 node network, and then added a 7th node afterwards; this 7th node was able to
read existing public data, as well as deploy its own transactions and contracts for others to see!
## Permissioned RAFT with discovery disabled
This example walks through adding a new node to a RAFT network. This network does have permissioning enabled for the
Ethereum peer-to-peer layer; this means that for any Ethereum tasks, such as syncing the initial blockchain or
propagating transactions, the node must appear is others nodes' `permissioned-nodes.json` file.
1. Bring up an initial network of 6 nodes.
```bash
# Ensure any old network is removed
$ docker-compose -f raft-perm-nodiscover.yml down
# Bring up 6 nodes
$ docker-compose -f raft-perm-nodiscover.yml up node1 node2 node3 node4 node5 node6
```
2. Send in a public transaction and check it is minted.
!!! note
* The transaction hashes will likely be different, but the contract addresses will be the same for your network.
```bash
# Send in the transaction
$ docker exec -it addnode_node1_1 geth --exec 'loadScript("/examples/public-contract.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0xd1bf0c15546802e5a121f79d0d8e6f0fa45d4961ef8ab9598885d28084cfa909 waiting to be mined...
true
# Retrieve the value of the contract
$ docker exec -it addnode_node1_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
42
```
We created a transaction, in this case with hash `0xd1bf0c15546802e5a121f79d0d8e6f0fa45d4961ef8ab9598885d28084cfa909`,
and then retrieved its value, which was set to be `42`.
3. We need to add the new peer to the RAFT network before it joins, otherwise the existing nodes will reject it from
the RAFT communication layer; we also need to know what ID the new node should join with.
```bash
# Add the new node
$ docker exec -it addnode_node1_1 geth --exec 'raft.addPeer("enode://239c1f044a2b03b6c4713109af036b775c5418fe4ca63b04b1ce00124af00ddab7cc088fc46020cdc783b6207efe624551be4c06a994993d8d70f684688fb7cf@172.16.239.17:21000?discport=0&raftport=50400")' attach /qdata/dd/geth.ipc
7
```
The return value is the RAFT ID of the new node. When the node joins the network for the first time, it will need
this ID number handy. If it was lost, you can always view the full network, including IDs, by running the
`raft.cluster` command on an existing node.
4. Bring up the last node. Here, we pass the newly created ID number as a flag into the startup of node 7. This lets
the node know to not bootstrap a new network from the contents of `static-nodes.json`, but to connect to an existing
node there are fetch any bootstrap information.
```bash
# Bring up node 7
$ QUORUM_GETH_ARGS="--raftjoinexisting 7" docker-compose -f raft-non-perm-nodiscover.yml up node7
```
5. Let's check to see if the nodes are in sync. We can do by seeing if we have the contract that we viewer earlier on
node 7.
```bash
# Fetch the contracts value on node 7
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
0
```
The value here is `0`, not the expected `42`! Node 7 is unable to sync the blockchain because the other peers in the
network are refusing to allow connections from node 7, due to it being missing in the `permissioned-nodes.json` file.
This does not affect the RAFT layer, so if node 7 was already is sync, it could still receive new blocks; this is
okay though, since it would be permissioned on the RAFT side by virtue of being part of the RAFT cluster.
6. Let's update the permissioned nodes list on node 1, which will allow node 7 to connect to it.
```bash
$ docker exec -it addnode_node1_1 cp /extradata/static-nodes-7.json /qdata/dd/permissioned-nodes.json
$
```
7. Node 7 should now be synced up through node 1. Let's see if we can see the contract we made earlier.
!!! note
Quorum attempts to re-establish nodes every 30 seconds, so you may have to wait for the sync to happen.
```bash
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1932c48b2bf8102ba33b4a6b545c32236e342f34"); private.get();' attach /qdata/dd/geth.ipc
42
```
8. To be sure we have two way communication, let's send a transaction from node 7 to the network.
```bash
$ docker exec -it addnode_node7_1 geth --exec 'loadScript("/examples/public-contract.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0x84cefc3aab8ce5797dc73c70db604e5c8830fc7c2cf215876eb34fff533e2725 waiting to be mined...
true
```
9. Finally, we can check if the transaction was minted and the contract executed on each node.
```bash
# Check on node 1
$ docker exec -it addnode_node1_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1349f3e1b8d71effb47b840594ff27da7e603d17"); private.get();' attach /qdata/dd/geth.ipc
42
# Check on node 7
$ docker exec -it addnode_node7_1 geth --exec 'var private = eth.contract([{"constant":true,"inputs":[],"name":"get","outputs":[{"name":"retVal","type":"uint256"}],"payable":false,"type":"function"}]).at("0x1349f3e1b8d71effb47b840594ff27da7e603d17"); private.get();' attach /qdata/dd/geth.ipc
42
```
And that's it. We deployed a working 6 node network, and then added a 7th node afterwards; this 7th node was able to
read existing public data, as well as deploy its own transactions and contracts for others to see!
## Adding a Private Transaction Manager
This is a simple example of adding a new Tessera instance to an existing network. For simplicity,
the steps to add the Quorum node are omitted, but are those followed in the IBFT example.
Here, a Tessera node is added without any of the discovery options specified, meaning that the
IP Whitelist isn't used, nor is key discovery disabled.
1. Start up the initial 6 node network.
```bash
# Ensure any old network is removed
$ docker-compose -f tessera-add.yml down
# Bring up 6 nodes
$ docker-compose -f tessera-add.yml up node1 node2 node3 node4 node5 node6
```
2. We can verify that private transactions can be sent by sending one from node 1 to node 6.
We can also see that since node 7 doesn't exist yet, we can't send private transactions to it.
```bash
# Send a private transaction from node 1 to node 6
$ docker exec -it addnode_node1_1 geth --exec 'loadScript("/examples/private-contract-6.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0xc8a5de4bb79d4a8c3c1156917968ca9b2965f2514732fc1cff357ec999b9aba4 waiting to be mined...
true
# Success!
$ docker exec -it addnode_node1_1 geth --exec 'loadScript("/examples/private-contract-7.js")' attach /qdata/dd/geth.ipc
err creating contract Error: Non-200 status code: &{Status:404 Not Found StatusCode:404 Proto:HTTP/1.1 ProtoMajor:1 ProtoMinor:1 Header:map[Server:[Jetty(9.4.z-SNAPSHOT)] Date:[Thu, 16 Jan 2020 12:44:19 GMT] Content-Type:[text/plain] Content-Length:[73]] Body:0xc028e87d40 ContentLength:73 TransferEncoding:[] Close:false Uncompressed:false Trailer:map[] Request:0xc000287200 TLS:<nil>}
true
# An expected failure. The script content didn't succeed, but the script itself was run okay, so true was still returned
```
3. Let's first bring up node 7, then we can inspect what is happening and the configuration used.
```bash
# Bring up node 7
$ docker-compose -f tessera-add.yml up node7
$ docker exec -it addnode_node7_1 cat /qdata/tm/tessera-config.json
# ...some output...
```
The last command will output Tessera 7's configuration.
The pieces we are interested in here are the following:
```json
{
"useWhiteList": false,
"peer": [
{
"url": "http://txmanager1:9000"
}
],
...
}
```
We can see that the whitelist is not enabled, discovery is not specified so defaults to enabled,
and we have a single peer to start off with, which is node 1.
This is all that is needed to connect to an existing network. Shortly after starting up, Tessera
will ask node 1 about all it's peers, and then will keep a record of them for it's own use. From
then on, all the nodes will know about node 7 and can send private transactions to it.
4. Let's try it! Let's send a private transaction from node 1 to the newly added node 7.
```bash
# Sending a transaction from node 1 to node 7
$ docker exec -it addnode_node1_1 geth --exec 'loadScript("/examples/private-contract-7.js")' attach /qdata/dd/geth.ipc
Contract transaction send: TransactionHash: 0x3e3b50768ffdb51979677ddb58f48abdabb82a3fd4f0bac5b3d1ad8014e954e9 waiting to be mined...
true
```
We got a success this time! Tessera 7 has been accepted into the network and can interact with the
other existing nodes.

View File

@ -0,0 +1,227 @@
# Adding nodes to the network
Adding new nodes to an existing network can range from a common occurence to never happening.
In public blockchains, such as the Ethereum Mainnet, new nodes continuously join and talk to the existing network.
In permissioned blockchains, this may not happen as often, but it still an important task to achieve as your network
evolves.
When adding new nodes to the network, it is important understand that the Quorum network and Private Transaction
Manager network are distinct and do not overlap in any way. Therefore, options applicable to one are not applicable to
the other. In some cases, they may have their own options to achieve similar tasks, but must be specified separately.
## Prerequisites
- [Quorum installed](/Getting%20Started/Installing.md)
- [Tessera/Constellation installed](/Getting%20Started/Installing.md) if using private transactions
- A running network (see [Creating a Network From Scratch](/Getting%20Started/Creating-A-Network-From-Scratch))
## Adding Quorum nodes
Adding a new Quorum node is the most common operation, as you can choose to run a Quorum node with or without a Private
Transaction Manager, but rarely will one do the opposite.
### Raft
1. On an *existing* node, add the new peer to the raft network
```
> raft.addPeer("enode://239c1f044a2b03b6c4713109af036b775c5418fe4ca63b04b1ce00124af00ddab7cc088fc46020cdc783b6207efe624551be4c06a994993d8d70f684688fb7cf@127.0.0.1:21006?discport=0&raftport=50407")
7
```
So in this example, our new node has a Raft ID of `7`.
2. If you are using permissioning, or discovery for Ethereum p2p, please refer [here](#extra-options).
3. We now need to initialise the new node with the network's genesis configuration.
!!! note
Where you obtain this from will be dependent on the network. You may get it from an existing peer, or a network operator, or elsewhere entirely.
Initialising the new node is exactly the same an the original nodes.
```bash
$ geth --datadir qdata/dd7 init genesis.json
```
4. Now we can start up the new node and let it sync with the network. The main difference now is the use of the
`--raftjoinexisting` flag, which lets the node know that it is joining an existing network, which is handled
differently internally. The Raft ID obtained in step 1 is passed as a parameter to this flag.
```bash
$ PRIVATE_CONFIG=ignore geth --datadir qdata/dd7 ... OTHER ARGS ... --raft --raftport 50407 --rpcport 22006 --port 21006 --raftjoinexisting 7
```
The new node is now up and running, and will start syncing the blockchain from existing peers. Once this has
completed, it can send new transactions just as any other peer.
### IBFT/Clique
Adding nodes to an IBFT/Clique network is a bit simpler, as it only needs to configure itself rather then be
pre-allocated on the network (permissioning aside).
1. Initialise the new node with the network's genesis configuration.
!!! note
Where you obtain this from will be dependent on the network. You may get it from an existing peer, or a network operator, or elsewhere entirely.
Initialising the new node is exactly the same an the original nodes.
```bash
$ geth --datadir qdata/dd7 init genesis.json
```
2. If you are using permissioning or discovery for Ethereum peer-to-peer, please refer [here](#extra-options).
3. Start the new node, pointing either to a `bootnode` or listing an existing peer in the `static-nodes.json` file.
Once a connection is established, the node will start syncing the blockchain, after which transactions can be sent.
### Extra options
Some options take effect regardless of the consensus mechanism used.
#### Permissioned nodes
If using the `permissioned-nodes.json` file for permissioning, then you must make sure this file is updated on all
nodes before the new node is able to communicate with existing nodes. You do not need to restart any nodes in
order for the changes to take effect.
#### Static node connections
If not using peer-to-peer node discovery (i.e. you have specified `--nodiscover`), then the only connections a node
made will be to peers defined in the `static-nodes.json` file. When adding a new node, you should make sure you have
peers defined in its `static-nodes.json` file. The more peers you have defined here, the better network connectivity
and fault tolerance you have.
!!! note
* You do not need to update the existing peers static nodes for the connection to be established, although it is good practise to do so.
* You do not need to specify every peer in your static nodes file if you do not wish to connect to every peer directly.
#### Peer-to-peer discovery
If you are using discovery, then more options *in addition* to static nodes become available.
- Any nodes that are connected to your peers, which at the start will be ones defined in the static node list, will
then be visible by you, allowing you to connect to them; this is done automatically.
- You may specify any number of bootnodes, defined by the `--bootnodes` parameter. This takes a commas separated list
of enode URIs, similar to the `static-nodes.json` file. These act in the same way as static nodes, letting you connect
to them and then find out about other peers, whom you then connect to.
!!! note
If you have discovery disabled, this means you will not try to find other nodes to connect to, but others can still find and connect to you.
## Adding Private Transaction Managers
In this tutorial, there will be no focus on the advanced features of adding a new Private Transaction Manager (PTM).
This tutorial uses [Tessera](https://github.com/jpmorganchase/tessera) for any examples.
Adding a new node to the PTM is relatively straight forward, but there are a lot of extra options that can be used,
which is what will be explained here.
### Adding a new PTM node
In a basic setting, adding a new PTM node is as simple as making sure you have one of the existing nodes listed in your
peer list.
In Tessera, this would equate to the following in the configuration file:
```json
{
"peers": [
{
"url": "http://existingpeer1.com:8080"
}
]
}
```
From there, Tessera will connect to that peer and discover all the other PTM nodes in the network, connecting to each
of them in turn.
!!! note
You may want to include multiple peers in the peer list in case any of them are offline/unreachable.
### IP whitelisting
The IP Whitelist that Tessera provides allows you restrict connections much like the `permissioned-nodes.json` file
does for Quorum. Only IP addresses/hostnames listed in your peers list will be allowed to connect to you.
See the [Tessera configuration page](/Privacy/Tessera/Configuration/Configuration%20Overview#whitelist) for details on setting it up.
In order to make sure the new node is accepted into the network:
1. You will need to add the new peer to each of the existing nodes before communication is allowed.
Tessera provides a way to do this without needing to restart an already running node:
```bash
$ java -jar tessera.jar admin -configfile /path/to/existing-node-config.json -addpeer http://newpeer.com:8080
```
2. The new peer can be started, setting the `peers` configuration to mirror the existing network.
e.g. if there are 3 existing nodes in the network, then the new nodes configuration will look like this:
```json
{
"peers": [
{
"url": "http://existingpeer1.com:8080"
},
{
"url": "http://existingpeer2.com:8080"
},
{
"url": "http://existingpeer3.com:8080"
}
]
}
```
The new node will allow incoming connections from the existing peers, and then existing peers will allow incoming
connections from the new peer!
### Discovery
Tessera discovery is very similar to the IP whitelist. The difference being that the IP whitelist blocks
communications between nodes, whereas disabling discovery only affects which public keys we keep track of.
See the [Tessera configuration page](/Privacy/Tessera/Configuration/Configuration%20Overview#disabling-peer-discovery) for
details on setting it up.
When discovery is disabled, Tessera will only allow keys that are owned by a node in its peer list to be available to
the users. This means that if any keys are found that are owned by a node NOT in our peer list, they are discarded and
private transactions cannot be sent to that public key.
!!! note
This does not affect incoming transactions. Someone not in your peer list can still send transactions to your node, unless you also enable the IP Whitelist option.
In order to make sure the new node is accepted into the network:
1. You will need to add the new peer to each of the existing nodes before they will accept public keys that are linked
to the new peer.
Tessera provides a way to do this without needing to restart an already running node:
```bash
$ java -jar tessera.jar admin -configfile /path/to/existing-node-config.json -addpeer http://newpeer.com:8080
```
2. The new peer can be started, setting the `peers` configuration to mirror the existing network.
e.g. if there are 3 existing nodes in the network, then the new nodes configuration will look like this:
```json
{
"peers": [
{
"url": "http://existingpeer1.com:8080"
},
{
"url": "http://existingpeer2.com:8080"
},
{
"url": "http://existingpeer3.com:8080"
}
]
}
```
The new node will now record public keys belonging to the existing peers, and then existing peers will record
public keys belonging to the new peer; this allows private transactions to be sent both directions!
## Examples
For a walkthrough of some examples that put into action the above, check out [this guide](/How-To-Guides/add_node_examples)!

View File

@ -14,7 +14,7 @@ The following parameters are of interest to be collected and analyzed:
!!! success "Ensure all activities of Quorum hosts are being logged to centralized log system"
!!! success "Centralized log system most be able to provide query capabilites over the following parameters:"
!!! success "Centralized log system must be able to provide query capabilites over the following parameters:"
- Ethereum accounts on the network
- Active ledger, transaction manager nodes in the network
- Public and Private transaction rates per account in the network.

View File

@ -20,8 +20,11 @@ nav:
- Quorum API: Getting Started/api.md
- Consensus:
- Consensus: Consensus/Consensus.md
- Raft: Consensus/raft.md
- Raft BFT:
- Consensus/raft/raft.md
- Consensus/raft/raft-rpc-api.md
- Istanbul BFT:
- Overview: Consensus/ibft/ibft.md
- Consensus/ibft/istanbul-rpc-api.md
- Consensus/ibft/ibft-parameters.md
- Transaction Processing: Transaction Processing/Transaction Processing.md
@ -81,6 +84,9 @@ nav:
- Getting Started: RemixPlugin/Getting started.md
- Quorum Features:
- DNS: Features/dns.md
- How-To Guides:
- Adding new nodes: How-To-Guides/adding_nodes.md
- Adding IBFT validators: How-To-Guides/add_ibft_validator.md
- Product Roadmap: roadmap.md
- FAQ: FAQ.md

View File

@ -1,5 +1,10 @@
package raft
import (
"errors"
"github.com/coreos/etcd/pkg/types"
)
type RaftNodeInfo struct {
ClusterSize int `json:"clusterSize"`
Role string `json:"role"`
@ -19,22 +24,49 @@ func NewPublicRaftAPI(raftService *RaftService) *PublicRaftAPI {
}
func (s *PublicRaftAPI) Role() string {
if err := s.checkIfNodeInCluster(); err != nil {
return ""
}
_, err := s.raftService.raftProtocolManager.LeaderAddress()
if err != nil {
return ""
}
return s.raftService.raftProtocolManager.NodeInfo().Role
}
// helper function to check if self node is part of cluster
func (s *PublicRaftAPI) checkIfNodeInCluster() error {
if s.raftService.raftProtocolManager.IsIDRemoved(uint64(s.raftService.raftProtocolManager.raftId)) {
return errors.New("node not part of raft cluster. operations not allowed")
}
return nil
}
func (s *PublicRaftAPI) AddPeer(enodeId string) (uint16, error) {
if err := s.checkIfNodeInCluster(); err != nil {
return 0, err
}
return s.raftService.raftProtocolManager.ProposeNewPeer(enodeId, false)
}
func (s *PublicRaftAPI) AddLearner(enodeId string) (uint16, error) {
if err := s.checkIfNodeInCluster(); err != nil {
return 0, err
}
return s.raftService.raftProtocolManager.ProposeNewPeer(enodeId, true)
}
func (s *PublicRaftAPI) PromoteToPeer(raftId uint16) (bool, error) {
if err := s.checkIfNodeInCluster(); err != nil {
return false, err
}
return s.raftService.raftProtocolManager.PromoteToPeer(raftId)
}
func (s *PublicRaftAPI) RemovePeer(raftId uint16) error {
if err := s.checkIfNodeInCluster(); err != nil {
return err
}
return s.raftService.raftProtocolManager.ProposePeerRemoval(raftId)
}
@ -48,33 +80,57 @@ func (s *PublicRaftAPI) Leader() (string, error) {
}
func (s *PublicRaftAPI) Cluster() ([]ClusterInfo, error) {
// check if the node has already been removed from cluster
// if yes return nil
if err := s.checkIfNodeInCluster(); err != nil {
return []ClusterInfo{}, nil
}
nodeInfo := s.raftService.raftProtocolManager.NodeInfo()
if nodeInfo.Role == "" {
return []ClusterInfo{}, nil
}
noLeader := false
leaderAddr, err := s.raftService.raftProtocolManager.LeaderAddress()
if err != nil {
if err == errNoLeaderElected && s.Role() == "" {
noLeader = true
if s.raftService.raftProtocolManager.NodeInfo().Role == "" {
return []ClusterInfo{}, nil
}
return []ClusterInfo{}, err
}
peerAddresses := append(nodeInfo.PeerAddresses, nodeInfo.Address)
clustInfo := make([]ClusterInfo, len(peerAddresses))
for i, a := range peerAddresses {
role := ""
if a.RaftId == leaderAddr.RaftId {
role = "minter"
} else if s.raftService.raftProtocolManager.isLearner(a.RaftId) {
role = "learner"
} else if s.raftService.raftProtocolManager.isVerifier(a.RaftId) {
role = "verifier"
if !noLeader {
if a.RaftId == leaderAddr.RaftId {
role = "minter"
} else if s.raftService.raftProtocolManager.isLearner(a.RaftId) {
role = "learner"
} else if s.raftService.raftProtocolManager.isVerifier(a.RaftId) {
role = "verifier"
}
}
clustInfo[i] = ClusterInfo{*a, role}
clustInfo[i] = ClusterInfo{*a, role, s.checkIfNodeIsActive(a.RaftId)}
}
return clustInfo, nil
}
// checkIfNodeIsActive checks if the raft node is active
// if the raft node is active ActiveSince returns non-zero time
func (s *PublicRaftAPI) checkIfNodeIsActive(raftId uint16) bool {
if raftId == s.raftService.raftProtocolManager.raftId {
return true
}
activeSince := s.raftService.raftProtocolManager.transport.ActiveSince(types.ID(raftId))
if activeSince.IsZero() {
return false
}
return true
}
func (s *PublicRaftAPI) GetRaftId(enodeId string) (uint16, error) {
return s.raftService.raftProtocolManager.FetchRaftId(enodeId)
}

View File

@ -29,7 +29,8 @@ type Address struct {
type ClusterInfo struct {
Address
Role string `json:"role"`
Role string `json:"role"`
NodeActive bool `json:"nodeActive"`
}
func newAddress(raftId uint16, raftPort int, node *enode.Node, withHostname bool) *Address {