Merge branch 'master' into feature/permissioning-1.8.18-suborgs-docs

This commit is contained in:
Samer Falah 2019-05-22 09:24:54 -04:00 committed by GitHub
commit 321f7b27f8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 424 additions and 84 deletions

View File

@ -74,20 +74,30 @@ matrix:
os: linux
dist: xenial
script:
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
- |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
go run build/ci.go install
if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
QUORUM_IGNORE_TEST_PACKAGES=github.com/ethereum/go-ethereum/swarm go run build/ci.go test -coverage $TEST_PACKAGES
else
go run build/ci.go test -coverage $TEST_PACKAGES
fi
- if: tag IS blank
os: osx
osx_image: xcode9.2 # so we don't have to deal with Kernel Extension Consent UI which is never possible in CI
script:
- brew update
- brew install caskroom/cask/brew-cask
- brew cask install osxfuse
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
- |
brew update
brew install caskroom/cask/brew-cask
brew cask install osxfuse
go run build/ci.go install
if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
QUORUM_IGNORE_TEST_PACKAGES=github.com/ethereum/go-ethereum/swarm go run build/ci.go test -coverage $TEST_PACKAGES
else
go run build/ci.go test -coverage $TEST_PACKAGES
fi
- if: tag IS present
os: linux

View File

@ -329,6 +329,7 @@ func doTest(cmdline []string) {
packages = flag.CommandLine.Args()
}
packages = build.ExpandPackagesNoVendor(packages)
packages = build.IgnorePackages(packages)
// Run the actual tests.
// Test a single package at a time. CI builders are slow

View File

@ -166,7 +166,7 @@ func (c *core) IsProposer() bool {
}
func (c *core) IsCurrentProposal(blockHash common.Hash) bool {
return c.current.pendingRequest != nil && c.current.pendingRequest.Proposal.Hash() == blockHash
return c.current != nil && c.current.pendingRequest != nil && c.current.pendingRequest.Proposal.Hash() == blockHash
}
func (c *core) commit() {

40
docs/FAQ.md Normal file
View File

@ -0,0 +1,40 @@
??? question "I've ran into an issue with Quorum, where do I get support?"
There are two places Quorum engineering team monitors on an on-going basis: issues in this and related repositories and on Quorum Slack. Quorum Slack is the best place to query the community and get immediate help. Auto-inviter is available [here](https://clh7rniov2.execute-api.us-east-1.amazonaws.com/Express/).
??? question "How does Quorum achieve Transaction Privacy?"
Quorum achieves Transaction Privacy by:
1. Enabling transaction Senders to create a private transaction by marking who is privy to that transaction via the `privateFor` parameter
2. Replacing the payload of a private transaction with a hash of the encrypted payload, such that the original payload is not visible to participants who are not privy to the transaction
3. Storing encrypted private data off-chain in a separate component called the Transaction Manager (provided by [Constellation](https://github.com/jpmorganchase/constellation) or [Tessera](https://github.com/jpmorganchase/tessera)). The Transaction Manager distributes the encrypted data to other parties that are privy to the transaction and returns the decrypted payload to those parties
Please see the [Transaction Processing](../Transaction%20Processing/Transaction%20Processing) page for more info.
??? question "How does Quorum achieve consensus on Private Transactions?"
In standard Ethereum, all nodes process all transactions and so each node has the same state root. In Quorum, nodes process all 'public' transactions (which might include reference data or market data contracts for example) but only process the private transactions that they are party to.
Quorum nodes maintain two Patricia Merkle Tries: one for private state and one for public state. As a result, block validation includes a **state** check on the new-to-Quorum `public state root`. Block validation also includes a check of the `global Transaction hash`, which is a hash of **all** Transactions in a block - private and public. This means that each node is able to validate that it has the same set of Transactions as other nodes. Since the EVM is provably deterministic through the synchronized public state root, and that the Private Transaction inputs are known to be in sync across nodes (global Transaction Hash), private state synchronization across nodes can be implied. In addition, Quorum provides an API call, `eth_storageRoot`, that returns the private state hash for a given transaction at a given block height, that can optionally be called at the application layer to specifically perform an off-chain state validation with a counterparty.
Please see the [Quorum Consensus](../Consensus/Consensus) and [Transaction Processing](../Transaction%20Processing/Transaction%20Processing) pages for more info.
??? question "Are there any restrictions on the transaction size for private transactions (since they are encrypted)?"
The only restriction is the gas limit on the transaction. Constellation/Tessera does not have a size limit (although maybe it should be possible to set one). If anything, performing large transactions as private transactions will improve performance because most of the network only sees hash digests. In terms of performance of transferring large data blobs between geographically distributed nodes, it would be equivalent performance to PGP encrypting the file and transferring it over http/https..so very fast. If you are doing sequential transactions then of course you will have to wait for those transfers, but there is no special overhead by the payload being large if you are doing separate/concurrent transactions, subject to network bandwidth limits. Constellation/Tessera does everything in parallel.
??? question "Should I include originating node in private transaction?"
No, you should not. In Quorum, including originating node's `privateFor` will result in an error. If you would like to create a private contract that is visible to the originating node only please use this format: `privateFor: []` per https://github.com/jpmorganchase/quorum/pull/165
??? question "Is it possible to run a Quorum node without Transaction Manager?"
It is possible to run a node without a corresponding Transaction Manager, to do this instead of a matching Tessera/Constellation node's socket configuration should be set to `PRIVATE_CONFIG=ignore ...`. The node running such configuration is not going to broadcast matching private keys (please ensure that there is no transaction manager running for it) and will be unable to participate in any private transactions.
??? info "Known Raft consensus node misconfiguration"
Please see https://github.com/jpmorganchase/quorum/issues/410
??? question "Is there an official docker image for Quorum/Constellation/Tessera?"
Yes! The [official docker containers](https://hub.docker.com/u/quorumengineering/):
`quorumengineering/quorum:latest`
`quorumengineering/constellation:latest`
`quorumengineering/tessera:latest`
??? question "Can I mix Quorum nodes with different consensus configuration?"
Unfortunately, that is not possible. Quorum nodes configured with raft will only be able to work correctly with other nodes running raft consensus. This applies to all other supported consensus algorithms.

View File

@ -17,12 +17,17 @@ Your node is now operational and you may attach to it with `geth attach new-node
2. Retrieve current chains `genesis.json` and `static-nodes.json`. `static-nodes.json` should be placed into new nodes data dir
3. Initialize new node with `geth --datadir new-node-2 init genesis.json`
4. Edit `static-nodes.json` and add new entry for the new node you are configuring (should be last)
5. Start your node and send into background with `PRIVATE_CONFIG=ignore nohup geth --datadir new-node-2 --nodiscover --verbosity 5 --networkid 31337 --raft --raftport 50005 --rpc --rpcaddr 0.0.0.0 --rpcport 22005 --rpcapi admin,db,eth,debug,miner,net,shh,txpool,personal,web3,quorum,raft --emitcheckpoints --port 21005 2>>node.log &`
6. Connect to an already running node of the chain and execute `raft.addPeer('enode://new-nodes-enode-address-from-step-6-of-the-above@127.0.0.1:21005?discport=0&raftport=50005')`
5. Connect to an already running node of the chain and execute `raft.addPeer('enode://new-nodes-enode-address-from-step-6-of-the-above@127.0.0.1:21001?discport=0&raftport=50001')`
6. Start your node and send into background with `PRIVATE_CONFIG=ignore nohup geth --datadir new-node-2 --nodiscover --verbosity 5 --networkid 31337 --raft --raftport 50001 --raftjoinexisting RAFT_ID --rpc --rpcaddr 0.0.0.0 --rpcport 22001 --rpcapi admin,db,eth,debug,miner,net,shh,txpool,personal,web3,quorum,raft --emitcheckpoints --port 21001 2>>node.log &`, where `RAFT_ID` is the response of the `raft.addPeer()` command in step 5.
7. Optional: share new `static-nodes.json` with all other chain participants
Your additional node is now operational and is part of the same chain as the previously set up node.
### Removing node
1. Connect to an already running node of the chain and execute `raft.cluster` and get the `RAFT_ID` corresponding to the node that needs to be removed
2. Run `raft.removePeer(RAFT_ID)`
3. Stop the `geth` process corresponding to the node that was removed.
## Quorum with Istanbul BFT consensus
@ -42,6 +47,40 @@ Your node is now operational and you may attach to it with `geth attach data/get
Please note that istanbul-tools may be used to generate X number of nodes, more information is available in the [docs](https://github.com/jpmorganchase/istanbul-tools).
### Adding additional validator
1. Create a working directory for the new node that needs to be added
2. Change into the working directory for the new node and run `istanbul setup --num 1 --verbose --quorum --save`. This will generate the validator details including Address, NodeInfo and genesis.json
3. Copy the address of the validator and run `istanbul.propose(<address>, true)` from more than half the number of current validators.
4. Verify that the new validator has been added to the list of validators by running `istanbul.getValidators()`
5. Build Quorum as described in the [getting set up](../Setup%20Overview%20%26%20Quickstart) section. Ensure that PATH contains geth
6. Copy `static-nodes.json` and genesis.json from the existing chain. `static-nodes.json` should be placed into new nodes data dir
7. Edit `static-nodes.json` and add the new validators node info to the end of the file. New validators node info can be got from the output of `istanbul setup --num 1 --verbose --quorum --save` command that was run in step 2. Update the IP address and port of the node info to match the IP address of the validator and port you want to use.
8. Copy the nodekey that was generated by `istanbul setup` command to the `geth` directory inside the working directory
9. Generate one or more accounts for this node using `geth --datadir new-node-1 account new` and take down the account address.
10. Initialize new node with `geth --datadir new-node-1 init genesis.json`
11. Start the node and send into background with `PRIVATE_CONFIG=ignore nohup geth --datadir data --permissioned --nodiscover --istanbul.blockperiod 5 --syncmode full --mine --minerthreads 1 --verbosity 5 --networkid 10 --rpc --rpcaddr 0.0.0.0 --rpcport YOUR_NODES_RPC_PORT_NUMBER --rpcapi admin,db,eth,debug,miner,net,shh,txpool,personal,web3,quorum,istanbul --emitcheckpoints --port YOUR_NODES_PORT_NUMBER 2>>node.log &`, remember to replace `YOUR_NODES_RPC_PORT_NUMBER` and `YOUR_NODES_PORT_NUMBER` with your node's designated port numbers. `YOUR_NODES_PORT_NUMBER` must match the port number for this node decided on in part 7
### Removing validator
1. Attach to a running validator and run `istanbul.getValidators()` and identify the address of the validator that needs to be removed
2. Run `istanbul.propose(<address>, false)` by passing the address of the validator that needs to be removed from more than half current validators
3. Verify that the validator has been removed by running `istanbul.getValidators()`
4. Stop the `geth` process corresponding to the validator that was removed.
### Adding non-validator node
1. Create a working directory for the new node that needs to be added
2. Change into the working directory for the new node and run `istanbul setup --num 1 --verbose --quorum --save`. This will generate the node details including Address, NodeInfo and genesis.json
3. Build Quorum as described in the [getting set up](../Setup%20Overview%20%26%20Quickstart) section. Ensure that PATH contains geth
4. Copy `static-nodes.json` and genesis.json from the existing chain. `static-nodes.json` should be placed into new nodes data dir
5. Edit `static-nodes.json` and add the new node info to the end of the file. New node info can be got from the output of `istanbul setup --num 1 --verbose --quorum --save` command that was run in step 2. Update the IP address and port of the node info to match the IP address of the validator and port you want to use.
6. Copy the nodekey that was generated by `istanbul setup` command to the `geth` directory inside the working directory
7. Generate one or more accounts for this node using `geth --datadir new-node-1 account new` and take down the account address.
8. Initialize new node with `geth --datadir new-node-1 init genesis.json`
9. Start the node and send into background with `PRIVATE_CONFIG=ignore nohup geth --datadir data --permissioned --nodiscover --istanbul.blockperiod 5 --syncmode full --verbosity 5 --networkid 10 --rpc --rpcaddr 0.0.0.0 --rpcport YOUR_NODES_RPC_PORT_NUMBER --rpcapi admin,db,eth,debug,net,shh,txpool,personal,web3,quorum,istanbul --emitcheckpoints --port YOUR_NODES_PORT_NUMBER 2>>node.log &`, remember to replace `YOUR_NODES_RPC_PORT_NUMBER` and `YOUR_NODES_PORT_NUMBER` with your node's designated port numbers. `YOUR_NODES_PORT_NUMBER` must match the port number for this node decided on in step 5
### Removing non-validator node
1. Stop the `geth` process corresponding to the node that needs to be removed.
## Adding privacy transaction manager
### Tessera

View File

@ -54,8 +54,9 @@ issues with the version of curl bundled with Vagrant.
### Setting up Docker
1. Install Docker (https://www.docker.com/get-started)
* If your Docker distribution does not contain `docker-compose`, follow [this](https://docs.docker.com/compose/install/) to install Docker Compose
* Make sure your Docker daemon has at least 4G memory
- If your Docker distribution does not contain `docker-compose`, follow [this](https://docs.docker.com/compose/install/) to install Docker Compose
- Make sure your Docker daemon has at least 4G memory
- Required Docker Engine 18.02.0+ and Docker Compose 1.21+
1. Download and run `docker-compose`
```sh
git clone https://github.com/jpmorganchase/quorum-examples
@ -88,13 +89,14 @@ issues with the version of curl bundled with Vagrant.
#### Troubleshooting Docker
1. Docker is frozen
* Check if your Docker daemon is allocated enough memory (minimum 4G)
- Check if your Docker daemon is allocated enough memory (minimum 4G)
1. Tessera is crashed due to missing file/directory
* This is due to the location of `quorum-examples` folder is not shared
* Please refer to Docker documentation for more details:
* [Docker Desktop for Windows](https://docs.docker.com/docker-for-windows/troubleshoot/#shared-drives)
* [Docker Desktop for Mac](https://docs.docker.com/docker-for-mac/#file-sharing)
* [Docker Machine](https://docs.docker.com/machine/overview/): this depends on what Docker machine provider is used. Please refer to its documentation on how to configure shared folders/drives
- This is due to the location of `quorum-examples` folder is not shared
- Please refer to Docker documentation for more details:
- [Docker Desktop for Windows](https://docs.docker.com/docker-for-windows/troubleshoot/#shared-drives)
- [Docker Desktop for Mac](https://docs.docker.com/docker-for-mac/#file-sharing)
- [Docker Machine](https://docs.docker.com/machine/overview/): this depends on what Docker machine provider is used. Please refer to its documentation on how to configure shared folders/drives
1. If you run Docker inside Docker, make sure to run the container with `--privileged`
### Setting up locally

View File

@ -1,10 +1,236 @@
## Using Splunk
Tessera logs can be interpreted by Splunk to allow for monitoring and analysis. The general steps to set up Splunk monitoring for a network of Tessera nodes are:
Tessera can be used with InfluxDB and Prometheus time-series databases to record API usage metrics. The data recorded can be visualised either by creating a custom dashboard or by using an existing dashboarding tool such as Grafana.
1. If one does not already exist, set up a central Splunk instance (a Receiver) on a separate host.
1. Configure the Tessera hosts to forward their logging info to the Receiver by:
1. Providing Logback configuration to Tessera as a CLI arg on start-up to specify the format of the logging output (e.g. save to a file).
This is achieved by providing an XML/Groovy config file defining the logging-level and Logback Appenders to use, for example:
In addition, Tessera logs can be searched, analyzed and monitored using Splunk. Splunk can be set up in such a way that the logs for multiple Tessera nodes in a network are accessible from a single centralized Splunk instance.
## API Metrics
Tessera can record the following usage metrics for each endpoint of its API:
* Average Response Time
* Max Response Time
* Min Response Time
* Request Count
* Requests Per Second
These metrics can be stored in an InfluxDB or Prometheus time-series database for further analysis.
* [InfluxDB](https://www.influxdata.com/time-series-platform/influxdb/) should be used when it is preferred for metrics to be "pushed" from Tessera to the DB (i.e. Tessera starts a service which periodically writes the latest metrics to the DB by calling the DBs API)
* [Prometheus](https://prometheus.io/) should be used when it is preferred for metrics to be "pulled" from Tessera by the DB (i.e. Tessera exposes a `/metrics` API endpoint which the DB periodically calls to fetch the latest metrics)
Both databases integrate well with the open source dashboard editor [Grafana](https://grafana.com/) to allow for easy creation of dashboards to visualise the data being captured from Tessera.
### Using InfluxDB
See the [InfluxDB documentation](https://docs.influxdata.com/influxdb) for details on how to set up an InfluxDB database ready for use with Tessera. A summary of the steps is as follows:
1. [Install InfluxDB](https://docs.influxdata.com/influxdb/v1.7/introduction/installation/)
1. Start the InfluxDB server
```bash
influxd -config /path/to/influx.conf
```
For local development/testing the default configuration file (Linux: `/etc/influxdb/influxdb.conf`, macOS: `/usr/local/etc/influxdb.conf`), should be sufficient. For further configuration options see [Configuring InfluxDB](https://docs.influxdata.com/influxdb/v1.7/administration/config/)
1. Connect to the InfluxDB server using the [`influx` CLI](https://docs.influxdata.com/influxdb/v1.7/tools/shell/) and create a new DB. If using the default config, this is simply:
```bash
influx
> CREATE DATABASE myDb
```
1. To view data stored in the database use the [Influx Query Language](https://docs.influxdata.com/influxdb/v1.7/query_language/)
```bash
influx
> USE myDb
> SHOW MEASUREMENTS
> SELECT * FROM <measurement>
```
!!! info
The InfluxDB HTTP API can be called directly as an alternative to using the `influx` CLI
Each Tessera server type (i.e. `P2P`, `Q2T`, `ADMIN`, `THIRDPARTY`, `ENCLAVE`) can be configured to store API metrics in an InfluxDB. These servers can be configured to store metrics to the same DB or separate ones. Not all servers need to be configured to store metrics.
To configure a server to use an InfluxDB, add `influxConfig` to the server config. For example:
```json
"serverConfigs": [
{
"app":"Q2T",
"enabled": true,
"serverAddress":"unix:/path/to/tm.ipc",
"communicationType" : "REST",
"influxConfig": {
"serverAddress": "https://localhost:8086", // InfluxDB server address
"dbName": "myDb", // InfluxDB DB name (DB must already exist)
"pushIntervalInSecs": 15, // How frequently Tessera will push new metrics to the DB
"sslConfig": { // Config required if InfluxDB server is using TLS
"tls": "STRICT",
"sslConfigType": "CLIENT_ONLY",
"clientTrustMode": "CA",
"clientTrustStore": "/path/to/truststore.jks",
"clientTrustStorePassword": "password",
"clientKeyStore": "path/to/truststore.jks",
"clientKeyStorePassword": "password"
}
}
},
{
"app":"P2P",
"enabled": true,
"serverAddress":"http://localhost:9001",
"communicationType" : "REST",
"influxConfig": {
"serverAddress": "http://localhost:8087",
"dbName": "anotherDb",
"pushIntervalInSecs": 15
}
}
]
```
#### InfluxDB TLS Configuration
InfluxDB supports 1-way TLS. This allows clients to validate the identity of the InfluxDB server and provides data encryption.
See [Enabling HTTPS with InfluxDB](https://docs.influxdata.com/influxdb/v1.7/administration/https_setup/) for details on how to secure an InfluxDB server with TLS. A summary of the steps is as follows:
1. Obtain a CA/self-signed certificate and key (either as separate `.crt` and `.key` files or as a combined `.pem` file)
1. Enable HTTPS in `influx.conf`:
``` bash
# Determines whether HTTPS is enabled.
https-enabled = true
# The SSL certificate to use when HTTPS is enabled.
https-certificate = "/path/to/certAndKey.pem"
# Use a separate private key location.
https-private-key = "/path/to/certAndKey.pem"
```
1. Restart the InfluxDB server to apply the config changes
To allow Tessera to communicate with a TLS-secured InfluxDB, `sslConfig` must be provided. To configure Tessera as the client in 1-way TLS:
```json
"sslConfig": {
"tls": "STRICT",
"sslConfigType": "CLIENT_ONLY",
"clientTrustMode": "CA",
"clientTrustStore": "/path/to/truststore.jks",
"clientTrustStorePassword": "password",
"clientKeyStore": "path/to/truststore.jks",
"clientKeyStorePassword": "password",
"environmentVariablePrefix": "INFLUX"
}
```
where `truststore.jks` is a Java KeyStore format file containing the trusted certificates for the Tessera client (e.g. the certificate of the CA used to create the InfluxDB certificate).
If securing the keystore with a password this password should be provided. Passwords can be provided either in the config (e.g. `clientTrustStorePassword`) or as environment variables (using `environmentVariablePrefix` and setting `<PREFIX>_TESSERA_CLIENT_TRUSTSTORE_PWD`). The [TLS Config](../../Configuration/TLS) documentation explains this in more detail.
As Tessera expects 2-way TLS, a `.jks` file for the `clientKeyStore` must also be provided. This will not be used so can simply be set as the truststore.
### Using Prometheus
The [Prometheus documentation](https://prometheus.io/docs/introduction/overview/) provides all the information needed to get Prometheus setup and ready to integrate with Tessera. The [Prometheus First Steps](https://prometheus.io/docs/introduction/first_steps/) is a good starting point. A summary of the steps to store Tessera metrics in a Prometheus DB are as follows:
1. Install Prometheus
1. Create a `prometheus.yml` configuration file to provide Prometheus with the necessary information to pull metrics from Tessera. A simple Prometheus config for use with the [7nodes example network](../../../../Getting Started/7Nodes) is:
```yaml
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: tessera-7nodes
static_configs:
- targets: ['localhost:9001', 'localhost:9002', 'localhost:9003', 'localhost:9004', 'localhost:9005', 'localhost:9006', 'localhost:9007']
```
1. Start Tessera. As Tessera always exposes the `metrics` endpoint no additional configuration of Tessera is required
1. Start Prometheus
```bash
prometheus --config.file=prometheus.yml
```
1. To view data stored in the database, access the Prometheus UI (by default `localhost:9090`, this address can be changed in `prometheus.yml`) and use the [Prometheus Query Language](https://prometheus.io/docs/prometheus/latest/querying/basics/)
### Creating a Grafana dashboard
Grafana can be used to create dashboards from data stored in InfluxDB or Prometheus databases. See the [Grafana documentation](http://docs.grafana.org/) and [Grafana Getting Started](https://grafana.com/docs/guides/getting_started/) for details on how to set up a Grafana instance and integrate it with databases. A summary of the steps is as follows:
1. [Install and start Grafana](https://grafana.com/docs/) as described for your OS (if using the default config, Grafana will start on port `3000` and require login/password `admin/admin` to access the dashboard)
1. Create a Data Source to provide the necessary details to connect to the database
1. Create a new Dashboard
1. Add panels to the dashboard. Panels are the graphs, tables, statistics etc. that make up a dashboard. The New Panel wizard allows the components of the panel to be configured:
* Queries: Details the query to use retrieve data from the datasource, see the following links for info on using the Query Editor for [InfluxDB](https://grafana.com/docs/features/datasources/influxdb/) and [Prometheus](https://grafana.com/docs/features/datasources/prometheus/)
* Visualization: How to present the data queried, including panel type, axis headings etc.
#### Example dashboard
[![example-grafana-dashboard.png](../../../../images/tessera/monitoring/example-grafana-dashboard.png)](../../../../images/tessera/monitoring/example-grafana-dashboard.png)
To create this dashboard, a [7nodes example network](../../../../Getting Started/7Nodes) was started, with each Tessera node configured to store its `P2P` and `Q2T` metrics to the same InfluxDB. Several runs of the Quorum Acceptance Tests were run against this network to simulate network activity.
As can be seen in the top-right corner, the dashboard was set to only show data collected in the past 15 mins.
To create a dashboard similar to this:
1. Create an InfluxDB datasource within Grafana:
1. Hover over the cog icon in the left sidebar
1. Data Sources
1. Add data source
1. Select the type of DB to connect to (e.g. InfluxDB or Prometheus)
1. Fill out the form to provide all necessary DB connection information, e.g.:
[![grafana-influxdb-datasource.png](../../../../images/tessera/monitoring/grafana-influxdb-datasource.png)](../../../../images/tessera/monitoring/grafana-influxdb-datasource.png)
1. Create a new dashboard
1. Hover over the plus icon in the left sidebar
1. Dashboard
1. Add Query to configure the first panel
1. Add Panel in the top-right to add additional panels
[![grafana-new-dashboard.png](../../../../images/tessera/monitoring/grafana-new-dashboard.png)](../../../../images/tessera/monitoring/grafana-new-dashboard.png)
!!! note
For each of the following examples, additional options such as titles, axis labels and formatting can be configured by navigating the menus in the left-hand sidebar
[![grafana-panel-sidebar.png](../../../../images/tessera/monitoring/grafana-panel-sidebar.png)](../../../../images/tessera/monitoring/grafana-panel-sidebar.png)
1. Create *sendRaw requests* panel
1. Select the correct datasource from the *Queries to* dropdown list
1. Construct the query as shown in the below image. This retrieves the data for the `sendraw` API from the InfluxDB, finds the sum of the `RequestCount` for this data (i.e. the total number of requests) and groups by `instance` (i.e. each Tessera node). `time($_interval)` automatically scales the graph resolution for the time range and graph width.
[![grafana-send-raw-query.png](../../../../images/tessera/monitoring/grafana-send-raw-query.png)](../../../../images/tessera/monitoring/grafana-send-raw-query.png)
This panel shows the number of private payloads sent to Tessera using the `sendraw` API over time.
1. Create *receiveRaw requests* panel
1. Select the correct datasource from the *Queries to* dropdown list
1. Construct the query as shown in the below image. This retrieves the data for the `receiveraw` API from the InfluxDB, finds the sum of the `RequestCount` for this data (i.e. the total number of requests) and groups by `instance` (i.e. each Tessera node). `time($_interval)` automatically scales the graph resolution for the time range and graph width.
[![grafana-receive-raw-query.png](../../../../images/tessera/monitoring/grafana-receive-raw-query.png)](../../../../images/tessera/monitoring/grafana-receive-raw-query.png)
This panel shows the number of private payloads retrieved from Tessera using the `receiveraw` API over time.
1. Create *partyinfo request rate (Tessera network health)* panel
1. Select the correct datasource from the *Queries to* dropdown list
1. Construct the query as shown in the below image. This retrieves the data for the `partyinfo` API from the InfluxDB, finds the non-negative derivative of the `RequestCount` for this data and groups by `instance` (i.e. each Tessera node). `non_negative_derivative(1s)` calculates the per second change in `RequestCount` and ignores negative values that will occur if a node is stopped and restarted.
[![grafana-partyinfo-rate.png](../../../../images/tessera/monitoring/grafana-partyinfo-rate.png)](../../../../images/tessera/monitoring/grafana-partyinfo-rate.png)
This panel shows the rate of POST requests per second to `partyinfo`. For this network of 7 healthy nodes, this rate fluctuates between 5.5 and 6.5 requests/sec. At approx 09:37 node 1 was killed and the partyinfo rate across all nodes immediately drops. This is because they are no longer receiving requests to their `partyinfo` API from node 1. At 09:41 node 1 is restarted and the rates return to their original values.
This metric can be used as an indirect method of monitoring the health of the network. Using some of the more advanced InfluxDB query options available in Grafana and the other metrics measurements available it may be possible to make this result more explicit.
[Alerts and rules](https://grafana.com/docs/alerting/notifications/) can be configured to determine when a node has disconnected and send notifications to pre-configured channels (e.g. Slack, email, etc.).
1. Create *sendRaw rate* panel
1. Select the correct datasource from the *Queries to* dropdown list
1. Construct the query as shown in the below image. This retrieves the data for the `sendraw` API from the InfluxDB, finds the sum of the `RequestRate` for this data and groups by `instance` (i.e. each Tessera node). `time($_interval)` automatically scales the graph resolution for the time range and graph width.
[![grafana-sendraw-rate-query.png](../../../../images/tessera/monitoring/grafana-sendraw-rate-query.png)](../../../../images/tessera/monitoring/grafana-sendraw-rate-query.png)
The POST `sendraw` API is used by Quorum whenever a private transaction is sent using the `eth_sendTransaction` or `personal_sendTransaction` API. This panel gives a good indication of the private tx throughput in Quorum. Note that if the `sendraw` API is called by another process, the count will not be a true representation of Quorum traffic.
## Monitoring a Tessera network with Splunk
Splunk can be used to search, analyze and monitor the logs of Tessera nodes.
To consolidate the logs from multiple Tessera nodes in a network requires setting up Splunk and Splunk Universal Forwarders. The following pages from the Splunk documentation are a good starting point for understanding how to achieve this:
* [Consolidate data from multiple hosts](http://docs.splunk.com/Documentation/Forwarder/7.1.2/Forwarder/Consolidatedatafrommultiplehosts)
* [Set up the Universal Forwarder](http://docs.splunk.com/Documentation/Splunk/7.1.2/Forwarding/EnableforwardingonaSplunkEnterpriseinstance#Set_up_the_universal_forwarder)
* [Configure the Universal Forwarder](http://docs.splunk.com/Documentation/Forwarder/7.1.2/Forwarder/Configuretheuniversalforwarder)
* [Enable a receiver](http://docs.splunk.com/Documentation/Forwarder/7.1.2/Forwarder/Enableareceiver)
The general steps to consolidate the logs for a Tessera network in Splunk are:
1. Set up a central Splunk instance if one does not already exist. Typically this will be on a separate host to the hosts running the Tessera nodes. This is known as the *Receiver*.
1. Configure the Tessera hosts to forward their node's logs to the *Receiver* by:
1. Configuring the format and output location of the node's logs. This is achieved by configuring logback (the logging framework used by Tessera) at node start-up.
The following example XML configures logback to save Tessera's logs to a file. See the [Logback documentation](https://logback.qos.ch/manual/configuration.html#syntax) for more information on configuring logback:
``` xml
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
@ -18,65 +244,20 @@ Tessera logs can be interpreted by Splunk to allow for monitoring and analysis.
<logger name="org.glassfish.jersey.internal.inject.Providers" level="ERROR" />
<logger name="org.hibernate.validator.internal.util.Version" level="ERROR" />
<logger name="org.hibernate.validator.internal.engine.ConfigurationImpl" level="ERROR" />
<root level="INFO">
<appender-ref ref="FILE"/>
</root>
</configuration>
```
To start Tessera with an XML configuration file:
``` bash
java -Dlogback.configurationFile=/path/to/logback-config.xml -jar /path/to/tessera-app-<version>-app.jar -configfile /path/to/config.json
```
Further information can be found in the [Logback documentation](https://logback.qos.ch/manual/configuration.html#syntax).
1. Set up Splunk Universal Forwarders on each Tessera host. These are lightweight Splunk clients that will be used to collect and pass logging data to the central Splunk instance for analysis.
1. Set up the central Splunk instance to listen and receive logging data from the Universal Forwarders
1. Set up Splunk *Universal Forwarders* (lightweight Splunk clients) on each Tessera host to forward log data for their node to the *Receiver*
1. Set up the Splunk *Receiver* to listen and receive logging data from the *Universal Forwarders*
Further information about setting up Splunk and Universal Forwarders can be found in the Splunk documentation. The following pages are a good starting point:
* [Consolidate data from multiple hosts](http://docs.splunk.com/Documentation/Forwarder/7.1.2/Forwarder/Consolidatedatafrommultiplehosts)
* [Set up the Universal Forwarder](http://docs.splunk.com/Documentation/Splunk/7.1.2/Forwarding/EnableforwardingonaSplunkEnterpriseinstance#Set_up_the_universal_forwarder)
* [Configure the Universal Forwarder](http://docs.splunk.com/Documentation/Forwarder/7.1.2/Forwarder/Configuretheuniversalforwarder)
* [Enable a receiver](http://docs.splunk.com/Documentation/Forwarder/7.1.2/Forwarder/Enableareceiver)
## Jersey Web Server Metrics
Simple Jersey web server metrics for a Tessera node can be monitored if desired. Tessera can store this performance data in a time-series database. Two open-source database options are available for use, depending on your particular use-case:
* [InfluxDB](https://www.influxdata.com/time-series-platform/influxdb/): For 'push'-style data transmission
* [Prometheus](https://prometheus.io/): For 'pull'-style data transmission
To set up monitoring requires the installation and configuration of one of these database offerings. Both databases integrate well with the open source metrics dashboard editor [Grafana](https://grafana.com/) to allow for easy creation of dashboards to visualise the data being captured from Tessera.
### Using InfluxDB
The [InfluxDB documentation](https://docs.influxdata.com/influxdb) provides all the information needed to get InfluxDB setup and ready to integrate with Tessera. A summary of the steps is as follows:
1. Download and install InfluxDB
1. Create an InfluxDB database
1. Add configuration details to the `server` section of your Tessera config file to allow Tessera to post metrics data to the InfluxDB host. An example configuration using InfluxDB's default hostName and port is (truncated for clarity):
```json
"server": {
"influxConfig": {
"port": 8086,
"hostName": "http://localhost",
"dbName": "tessera_demo",
"pushIntervalInSecs": 60
}
}
```
With `influxConfig` provided, Tessera will collect metrics data and push it to the InfluxDB service periodically based on the value set for `pushIntervalInSecs`
1. You can use the `influx` CLI to query the database and view the data that is being stored
### Using Prometheus
The [Prometheus documentation](https://prometheus.io/docs/) provides all the information needed to get Prometheus setup and ready to integrate with Tessera. A summary of the steps is as follows:
1. Download and install Prometheus
1. Configure `prometheus.yml` to give the Prometheus instance the necessary information to pull metrics from each of the Tessera nodes. As Prometheus is pull-based, no additional config needs to be added to Tessera
1. Go to `localhost:9090` (or whatever host and port have been defined for the Prometheus instance) to see the Prometheus UI and view the data that is being stored
### Creating dashboards with Grafana
Once Tessera usage data is being stored in either InfluxDB or Prometheus, Grafana can be used to easily create dashboards to visualise that data. The [Grafana documentation](http://docs.grafana.org/) provides all the information needed to set up a Grafana instance and integrate it with both of these time-series databases. A summary of the steps is as follows:
1. Download and install Grafana
1. Create a new Data Source to connect Grafana with your database of choice
1. Create a new Dashboard
1. Create charts and other elements for the dashboard

Binary file not shown.

After

Width:  |  Height:  |  Size: 408 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 164 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 346 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 402 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 400 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 430 KiB

View File

@ -177,3 +177,27 @@ func ExpandPackagesNoVendor(patterns []string) []string {
}
return patterns
}
// Read QUORUM_IGNORE_TEST_PACKAGES env and remove from packages
func IgnorePackages(packages []string) []string {
ignore := os.Getenv("QUORUM_IGNORE_TEST_PACKAGES")
if ignore == "" {
return packages
}
ret := make([]string, 0, len(packages))
ignorePackages := strings.Split(ignore, ",")
for _, p := range packages {
mustInclude := true
for _, ig := range ignorePackages {
if strings.Index(p, strings.TrimSpace(ig)) == 0 {
mustInclude = false
break
}
}
if mustInclude {
ret = append(ret, p)
}
}
return ret
}

View File

@ -0,0 +1,40 @@
package build
import (
"os"
"testing"
testifyassert "github.com/stretchr/testify/assert"
)
func TestIgnorePackages_whenTypical(t *testing.T) {
assert := testifyassert.New(t)
arbitraryPackages := []string{"abc", "xyz/abc"}
actual := IgnorePackages(arbitraryPackages)
assert.Equal(arbitraryPackages, actual)
}
func TestIgnorePackages_whenIgnoreOnePackage(t *testing.T) {
assert := testifyassert.New(t)
arbitraryPackages := []string{"abc", "xyz/abc"}
assert.NoError(os.Setenv("QUORUM_IGNORE_TEST_PACKAGES", "abc"))
actual := IgnorePackages(arbitraryPackages)
assert.Equal([]string{arbitraryPackages[1]}, actual)
}
func TestIgnorePackages_whenIgnorePackages(t *testing.T) {
assert := testifyassert.New(t)
arbitraryPackages := []string{"abc", "xyz/abc/opq"}
assert.NoError(os.Setenv("QUORUM_IGNORE_TEST_PACKAGES", "abc, xyz/abc"))
actual := IgnorePackages(arbitraryPackages)
assert.Len(actual, 0)
}

View File

@ -399,7 +399,7 @@ func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs
data := []byte(*args.Data)
if len(data) > 0 {
log.Info("sending private tx", "data", fmt.Sprintf("%x", data), "privatefrom", args.PrivateFrom, "privatefor", args.PrivateFor)
data, err := private.P.Send(data, args.PrivateFrom, args.PrivateFor)
data, err = private.P.Send(data, args.PrivateFrom, args.PrivateFor)
log.Info("sent private tx", "data", fmt.Sprintf("%x", data), "privatefrom", args.PrivateFrom, "privatefor", args.PrivateFor)
if err != nil {
return common.Hash{}, err

View File

@ -605,7 +605,9 @@ func (w *worker) resultLoop() {
allReceipts := mergeReceipts(work.receipts, work.privateReceipts)
// Commit block and state to database.
w.mu.Lock()
stat, err := w.chain.WriteBlockWithState(block, allReceipts, work.state, nil)
w.mu.Unlock()
if err != nil {
log.Error("Failed writWriteBlockAndStating block to chain", "err", err)
continue

View File

@ -58,6 +58,7 @@ nav:
- How it works: Privacy/Constellation/How constellation works.md
- Sample Configuration: Privacy/Constellation/Sample Configuration.md
- Running Constellation: Privacy/Constellation/Installation & Running.md
- FAQ: FAQ.md
theme:
name: 'material'

View File

@ -67,7 +67,7 @@ var keys = []string{
"7184c1701569e3a4c4d2ddce691edd983b81e42e09196d332e1ae2f1e062cff4",
}
const NumNodes = 16 // must not exceed the number of keys (32)
const NumNodes = 3 // must not exceed the number of keys (32)
type TestData struct {
counter [NumNodes]int

View File

@ -525,7 +525,7 @@ func TestExpiry(t *testing.T) {
}
// wait till expired or timeout
for j := 0; j < 20; j++ {
for j := 0; j < 50; j++ {
time.Sleep(100 * time.Millisecond)
if len(w.Envelopes()) == 0 {
expired = true