Merge remote-tracking branch 'origin/master'

This commit is contained in:
Peter Fox 2018-09-20 10:36:14 +01:00
commit ffcb3f4635
19 changed files with 382 additions and 233 deletions

View File

@ -1,46 +1,23 @@
# simplifed version of the upstream travis configuration
language: go
go_import_path: github.com/ethereum/go-ethereum
sudo: false
matrix:
include:
- os: linux
dist: trusty
sudo: required
go: 1.7.x
script:
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf
- go run build/ci.go install
- go run build/ci.go test -coverage
- os: linux
dist: trusty
sudo: required
go: 1.8.x
script:
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf
- go run build/ci.go install
- go run build/ci.go test -coverage
# These are the latest Go versions.
- os: linux
dist: trusty
sudo: required
go: 1.9.x
script:
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf
- go run build/ci.go install
- go run build/ci.go test -coverage -misspell
- go run build/ci.go test -coverage $TEST_PACKAGES
- os: osx
osx_image: xcode9.2 # so we don't have to deal with Kernel Extension Consent UI which is never possible in CI
go: 1.9.x
sudo: required
script:
@ -48,149 +25,4 @@ matrix:
- brew install caskroom/cask/brew-cask
- brew cask install osxfuse
- go run build/ci.go install
- go run build/ci.go test -coverage -misspell
# This builder does the Ubuntu PPA and Linux Azure uploads
- os: linux
dist: trusty
sudo: required
go: 1.9.x
env:
- ubuntu-ppa
- azure-linux
addons:
apt:
packages:
- devscripts
- debhelper
- dput
- gcc-multilib
- fakeroot
script:
# Build for the primary platforms that Trusty can manage
- go run build/ci.go debsrc -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -upload ppa:ethereum/ethereum
- go run build/ci.go install
- go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go install -arch 386
- go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
# Switch over GCC to cross compilation (breaks 386, hence why do it here only)
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross
- sudo ln -s /usr/include/asm-generic /usr/include/asm
- GOARM=5 CC=arm-linux-gnueabi-gcc go run build/ci.go install -arch arm
- GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- GOARM=6 CC=arm-linux-gnueabi-gcc go run build/ci.go install -arch arm
- GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- GOARM=7 CC=arm-linux-gnueabihf-gcc go run build/ci.go install -arch arm
- GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- CC=aarch64-linux-gnu-gcc go run build/ci.go install -arch arm64
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
# This builder does the Linux Azure MIPS xgo uploads
- os: linux
dist: trusty
sudo: required
services:
- docker
go: 1.9.x
env:
- azure-linux-mips
script:
- go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done
- go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done
- go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done
- go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
# This builder does the Android Maven and Azure uploads
- os: linux
dist: precise # Needed for the android tools
addons:
apt:
packages:
- oracle-java8-installer
- oracle-java8-set-default
language: android
android:
components:
- platform-tools
- tools
- android-15
- android-19
- android-24
env:
- azure-android
- maven-android
before_install:
- curl https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
script:
# Build the Android archive and upload it to Maven Central and Azure
- curl https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip -o android-ndk-r14b.zip
- unzip -q android-ndk-r14b.zip && rm android-ndk-r14b.zip
- mv android-ndk-r14b $HOME
- export ANDROID_NDK=$HOME/android-ndk-r14b
- mkdir -p $GOPATH/src/github.com/ethereum
- ln -s `pwd` $GOPATH/src/github.com/ethereum
- go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
- os: osx
go: 1.9.x
env:
- azure-osx
- azure-ios
- cocoapods-ios
script:
- go run build/ci.go install
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds
# Build the iOS framework and upload it to CocoaPods and Azure
- gem uninstall cocoapods -a -x
- gem install cocoapods
- mv ~/.cocoapods/repos/master ~/.cocoapods/repos/master.bak
- sed -i '.bak' 's/repo.join/!repo.join/g' $(dirname `gem which cocoapods`)/cocoapods/sources_manager.rb
- if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then git clone --depth=1 https://github.com/CocoaPods/Specs.git ~/.cocoapods/repos/master && pod setup --verbose; fi
- xctool -version
- xcrun simctl list
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
# This builder does the Azure archive purges to avoid accumulating junk
- os: linux
dist: trusty
sudo: required
go: 1.9.x
env:
- azure-purge
script:
- go run build/ci.go purge -store gethstore/builds -days 14
install:
- go get golang.org/x/tools/cmd/cover
script:
- go run build/ci.go install
- go run build/ci.go test -coverage
notifications:
webhooks:
urls:
- https://webhooks.gitter.im/e/e09ccdce1048c5e03445
on_success: change
on_failure: always
- go run build/ci.go test -coverage $TEST_PACKAGES

View File

@ -4,13 +4,15 @@ FROM golang:1.9-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers
ADD . /go-ethereum
RUN cd /go-ethereum && make geth
RUN cd /go-ethereum && make geth bootnode
# Pull Geth into a second stage deploy alpine container
FROM alpine:latest
RUN apk add --no-cache ca-certificates
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
COPY --from=builder /go-ethereum/build/bin/bootnode /usr/local/bin/
EXPOSE 8545 8546 30303 30303/udp
ENTRYPOINT ["geth"]

View File

@ -16,6 +16,10 @@ geth:
@echo "Done building."
@echo "Run \"$(GOBIN)/geth\" to launch geth."
bootnode:
build/env.sh go run build/ci.go install ./cmd/bootnode
@echo "Done building bootnode."
swarm:
build/env.sh go run build/ci.go install ./cmd/swarm
@echo "Done building."

View File

@ -34,10 +34,13 @@ Further documentation can be found in the [docs](docs/) folder and on the [wiki]
* [Quorum Wiki](https://github.com/jpmorganchase/quorum/wiki)
* [quorum-examples](https://github.com/jpmorganchase/quorum-examples): Quorum demonstration examples
* [Quorum Community Slack Inviter](https://clh7rniov2.execute-api.us-east-1.amazonaws.com/Express/): Quorum Slack community entry point
* [Constellation](https://github.com/jpmorganchase/constellation): Haskell implementation of peer-to-peer encrypted message exchange for transaction privacy
* [Tessera](https://github.com/jpmorganchase/tessera): Java implementation of peer-to-peer encrypted message exchange for transaction privacy
* [Raft Consensus Documentation](raft/doc.md)
* [Istanbul BFT Consensus Documentation](https://github.com/ethereum/EIPs/issues/650): [RPC API](https://github.com/getamis/go-ethereum/wiki/RPC-API) and [technical article](https://medium.com/getamis/istanbul-bft-ibft-c2758b7fe6ff)
* Quorum Transaction Managers
* [Constellation](https://github.com/jpmorganchase/constellation): Haskell implementation of peer-to-peer encrypted message exchange for transaction privacy
* [Tessera](https://github.com/jpmorganchase/tessera): Java implementation of peer-to-peer encrypted message exchange for transaction privacy
* Quorum supported consensuses
* [Raft Consensus Documentation](raft/doc.md)
* [Istanbul BFT Consensus Documentation](https://github.com/ethereum/EIPs/issues/650): [RPC API](https://github.com/getamis/go-ethereum/wiki/RPC-API) and [technical article](https://medium.com/getamis/istanbul-bft-ibft-c2758b7fe6ff)
* [Clique POA Consensus Documentation](https://github.com/ethereum/EIPs/issues/225) and a [guide to setup clique json](https://modalduality.org/posts/puppeth/) with [puppeth](https://blog.ethereum.org/2017/04/14/geth-1-6-puppeth-master/)
* [ZSL](https://github.com/jpmorganchase/quorum/wiki/ZSL) wiki page and [documentation](https://github.com/jpmorganchase/zsl-q/blob/master/README.md)
* [quorum-tools](https://github.com/jpmorganchase/quorum-tools): local cluster orchestration, and integration testing tool

View File

@ -299,7 +299,7 @@ func TestCacheFind(t *testing.T) {
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
var list []accounts.Account
for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
for d := 200 * time.Millisecond; d < 16*time.Second; d *= 2 {
list = ks.Accounts()
if reflect.DeepEqual(list, wantAccounts) {
// ks should have also received change notifications
@ -349,6 +349,9 @@ func TestUpdatedKeyfileContents(t *testing.T) {
return
}
// needed so that modTime of `file` is different to its current value after forceCopyFile
time.Sleep(1000 * time.Millisecond)
// Now replace file contents
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
t.Fatal(err)
@ -362,6 +365,9 @@ func TestUpdatedKeyfileContents(t *testing.T) {
return
}
// needed so that modTime of `file` is different to its current value after forceCopyFile
time.Sleep(1000 * time.Millisecond)
// Now replace file contents again
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
t.Fatal(err)
@ -374,6 +380,10 @@ func TestUpdatedKeyfileContents(t *testing.T) {
t.Error(err)
return
}
// needed so that modTime of `file` is different to its current value after ioutil.WriteFile
time.Sleep(1000 * time.Millisecond)
// Now replace file contents with crap
if err := ioutil.WriteFile(file, []byte("foo"), 0644); err != nil {
t.Fatal(err)

View File

@ -88,7 +88,8 @@ func (c *core) storeBacklog(msg *message, src istanbul.Validator) {
c.backlogsMu.Lock()
defer c.backlogsMu.Unlock()
backlog := c.backlogs[src]
logger.Debug("Retrieving backlog queue", "for", src.Address(), "backlogs_size", len(c.backlogs))
backlog := c.backlogs[src.Address()]
if backlog == nil {
backlog = prque.New()
}
@ -107,18 +108,23 @@ func (c *core) storeBacklog(msg *message, src istanbul.Validator) {
backlog.Push(msg, toPriority(msg.Code, p.View))
}
}
c.backlogs[src] = backlog
c.backlogs[src.Address()] = backlog
}
func (c *core) processBacklog() {
c.backlogsMu.Lock()
defer c.backlogsMu.Unlock()
for src, backlog := range c.backlogs {
for srcAddress, backlog := range c.backlogs {
if backlog == nil {
continue
}
_, src := c.valSet.GetByAddress(srcAddress)
if src == nil {
// validator is not available
delete(c.backlogs, srcAddress)
continue
}
logger := c.logger.New("from", src, "state", c.state)
isFuture := false

View File

@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/istanbul"
"github.com/ethereum/go-ethereum/consensus/istanbul/validator"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
@ -168,14 +167,15 @@ func TestCheckMessage(t *testing.T) {
func TestStoreBacklog(t *testing.T) {
c := &core{
logger: log.New("backend", "test", "id", 0),
backlogs: make(map[istanbul.Validator]*prque.Prque),
valSet: newTestValidatorSet(1),
backlogs: make(map[common.Address]*prque.Prque),
backlogsMu: new(sync.Mutex),
}
v := &istanbul.View{
Round: big.NewInt(10),
Sequence: big.NewInt(10),
}
p := validator.New(common.StringToAddress("12345667890"))
p := c.valSet.GetByIndex(0)
// push preprepare msg
preprepare := &istanbul.Preprepare{
View: v,
@ -187,7 +187,7 @@ func TestStoreBacklog(t *testing.T) {
Msg: prepreparePayload,
}
c.storeBacklog(m, p)
msg := c.backlogs[p].PopItem()
msg := c.backlogs[p.Address()].PopItem()
if !reflect.DeepEqual(msg, m) {
t.Errorf("message mismatch: have %v, want %v", msg, m)
}
@ -204,7 +204,7 @@ func TestStoreBacklog(t *testing.T) {
Msg: subjectPayload,
}
c.storeBacklog(m, p)
msg = c.backlogs[p].PopItem()
msg = c.backlogs[p.Address()].PopItem()
if !reflect.DeepEqual(msg, m) {
t.Errorf("message mismatch: have %v, want %v", msg, m)
}
@ -215,7 +215,7 @@ func TestStoreBacklog(t *testing.T) {
Msg: subjectPayload,
}
c.storeBacklog(m, p)
msg = c.backlogs[p].PopItem()
msg = c.backlogs[p.Address()].PopItem()
if !reflect.DeepEqual(msg, m) {
t.Errorf("message mismatch: have %v, want %v", msg, m)
}
@ -226,7 +226,7 @@ func TestStoreBacklog(t *testing.T) {
Msg: subjectPayload,
}
c.storeBacklog(m, p)
msg = c.backlogs[p].PopItem()
msg = c.backlogs[p.Address()].PopItem()
if !reflect.DeepEqual(msg, m) {
t.Errorf("message mismatch: have %v, want %v", msg, m)
}
@ -238,7 +238,8 @@ func TestProcessFutureBacklog(t *testing.T) {
}
c := &core{
logger: log.New("backend", "test", "id", 0),
backlogs: make(map[istanbul.Validator]*prque.Prque),
valSet: newTestValidatorSet(1),
backlogs: make(map[common.Address]*prque.Prque),
backlogsMu: new(sync.Mutex),
backend: backend,
current: newRoundState(&istanbul.View{
@ -254,7 +255,7 @@ func TestProcessFutureBacklog(t *testing.T) {
Round: big.NewInt(10),
Sequence: big.NewInt(10),
}
p := validator.New(common.StringToAddress("12345667890"))
p := c.valSet.GetByIndex(0)
// push a future msg
subject := &istanbul.Subject{
View: v,
@ -329,8 +330,9 @@ func testProcessBacklog(t *testing.T, msg *message) {
}
c := &core{
logger: log.New("backend", "test", "id", 0),
backlogs: make(map[istanbul.Validator]*prque.Prque),
backlogs: make(map[common.Address]*prque.Prque),
backlogsMu: new(sync.Mutex),
valSet: vset,
backend: backend,
state: State(msg.Code),
current: newRoundState(&istanbul.View{

View File

@ -42,7 +42,7 @@ func New(backend istanbul.Backend, config *istanbul.Config) Engine {
handlerWg: new(sync.WaitGroup),
logger: log.New("address", backend.Address()),
backend: backend,
backlogs: make(map[istanbul.Validator]*prque.Prque),
backlogs: make(map[common.Address]*prque.Prque),
backlogsMu: new(sync.Mutex),
pendingRequests: prque.New(),
pendingRequestsMu: new(sync.Mutex),
@ -73,7 +73,7 @@ type core struct {
waitingForRoundChange bool
validateFn func([]byte, []byte) (common.Address, error)
backlogs map[istanbul.Validator]*prque.Prque
backlogs map[common.Address]*prque.Prque
backlogsMu *sync.Mutex
current *roundState

View File

@ -2,13 +2,18 @@ package core
import (
"bytes"
"errors"
"fmt"
"html/template"
"io/ioutil"
"math/big"
"net"
"net/http"
"os"
osExec "os/exec"
"path"
"path/filepath"
"strings"
"testing"
"time"
@ -126,6 +131,136 @@ func runConstellation() (*osExec.Cmd, error) {
return constellationCmd, nil
}
func runTessera() (*osExec.Cmd, error) {
tesseraVersion := "0.6"
// make sure JRE is available
if err := osExec.Command("java").Start(); err != nil {
return nil, fmt.Errorf("runTessera: java not available - %s", err.Error())
}
// download binary from github/release
dir, err := ioutil.TempDir("", "tessera")
if err != nil {
return nil, err
}
defer os.RemoveAll(dir)
resp, err := http.Get(fmt.Sprintf("https://github.com/jpmorganchase/tessera/releases/download/tessera-%s/tessera-app-%s-app.jar", tesseraVersion, tesseraVersion))
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
tesseraJar := filepath.Join(dir, "tessera.jar")
if err := ioutil.WriteFile(tesseraJar, data, os.FileMode(0644)); err != nil {
return nil, err
}
// create config.json file
here, err := os.Getwd()
if err != nil {
return nil, err
}
if err = os.MkdirAll(path.Join(dir, "qdata"), 0755); err != nil {
return nil, err
}
tmIPCFile := filepath.Join(dir, "qdata", "tm.ipc")
keyData, err := ioutil.ReadFile(filepath.Join(here, "constellation-test-keys", "tm1.key"))
if err != nil {
return nil, err
}
publicKeyData, err := ioutil.ReadFile(filepath.Join(here, "constellation-test-keys", "tm1.pub"))
if err != nil {
return nil, err
}
tesseraConfigFile := filepath.Join(dir, "config.json")
if err := ioutil.WriteFile(tesseraConfigFile, []byte(fmt.Sprintf(`
{
"useWhiteList": false,
"jdbc": {
"username": "sa",
"password": "",
"url": "jdbc:h2:./qdata/c0/db0;MODE=Oracle;TRACE_LEVEL_SYSTEM_OUT=0"
},
"server": {
"port": 9000,
"hostName": "http://localhost",
"sslConfig": {
"tls": "OFF",
"generateKeyStoreIfNotExisted": true,
"serverKeyStore": "./qdata/c1/server1-keystore",
"serverKeyStorePassword": "quorum",
"serverTrustStore": "./qdata/c1/server-truststore",
"serverTrustStorePassword": "quorum",
"serverTrustMode": "TOFU",
"knownClientsFile": "./qdata/c1/knownClients",
"clientKeyStore": "./c1/client1-keystore",
"clientKeyStorePassword": "quorum",
"clientTrustStore": "./c1/client-truststore",
"clientTrustStorePassword": "quorum",
"clientTrustMode": "TOFU",
"knownServersFile": "./qdata/c1/knownServers"
}
},
"peer": [
{
"url": "http://localhost:9000"
}
],
"keys": {
"passwords": [],
"keyData": [
{
"config": %s,
"publicKey": "%s"
}
]
},
"alwaysSendTo": [],
"unixSocketFile": "%s"
}
`, string(keyData), string(publicKeyData), tmIPCFile)), os.FileMode(0644)); err != nil {
return nil, err
}
cmdStatusChan := make(chan error)
cmd := osExec.Command("java", "-Xms128M", "-Xmx128M", "-jar", tesseraJar, "-configFile", tesseraConfigFile)
// run tessera
go func() {
err := cmd.Start()
cmdStatusChan <- err
}()
// wait for tessera to come up
go func() {
waitingErr := errors.New("waiting")
checkFunc := func() error {
conn, err := net.Dial("unix", tmIPCFile)
if err != nil {
return waitingErr
}
if _, err := conn.Write([]byte("GET /upcheck HTTP/1.0\r\n\r\n")); err != nil {
return waitingErr
}
result, err := ioutil.ReadAll(conn)
if err != nil || string(result) != "I'm up!" {
return waitingErr
}
return nil
}
for {
time.Sleep(3 * time.Second)
if err := checkFunc(); err != nil && err != waitingErr {
cmdStatusChan <- err
}
}
}()
if err := <-cmdStatusChan; err != nil {
return nil, err
}
// wait until tessera is up
return cmd, nil
}
// 600a600055600060006001a1
// [1] PUSH1 0x0a (store value)
// [3] PUSH1 0x00 (store addr)
@ -146,7 +281,13 @@ func TestPrivateTransaction(t *testing.T) {
constellationCmd, err := runConstellation()
if err != nil {
t.Fatal(err)
if strings.Contains(err.Error(), "executable file not found") {
if constellationCmd, err = runTessera(); err != nil {
t.Fatal(err)
}
} else {
t.Fatal(err)
}
}
defer constellationCmd.Process.Kill()

View File

@ -117,6 +117,7 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common
// Create a new environment which holds all relevant information
// about the transaction and calling mechanisms.
vmenv := vm.NewEVM(context, statedb, privateState, config, cfg)
// Apply the transaction to the current state (included in the env)
_, gas, failed, err := ApplyMessage(vmenv, msg, gp)
if err != nil {
@ -132,9 +133,13 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common
}
usedGas.Add(usedGas, gas)
// If this is a private transaction, the public receipt should always
// indicate success.
publicFailed := !(config.IsQuorum && tx.IsPrivate()) && failed
// Create a new receipt for the transaction, storing the intermediate root and gas used by the tx
// based on the eip phase, we're passing wether the root touch-delete accounts.
receipt := types.NewReceipt(root, failed, usedGas)
receipt := types.NewReceipt(root, publicFailed, usedGas)
receipt.TxHash = tx.Hash()
receipt.GasUsed = new(big.Int).Set(gas)
// if the transaction created a contract, store the creation address in the receipt.

View File

@ -48,3 +48,100 @@ web3.eth.sendTransaction({
}
});
```
***
## JSON RPC Privacy API Reference
__In addition to the JSON-RPC provided by Ethereum, Quorum exposes below two API calls.__
#### eth_storageRoot
Returns the storage root of given address (Contract/Account etc)
##### Parameters
address, block number (hex)
##### Returns
`String` - 32 Bytes storageroot hash as HEX string at latest block height. When blocknumber is given, it provides the storageroot hash at that block height.
##### Example
```js
// Request
curl -X POST http://127.0.0.1:22000 --data '{"jsonrpc": "2.0", "method": "eth_storageRoot", "params":["0x1349f3e1b8d71effb47b840594ff27da7e603d17"], "id": 67}'
// Response
{
"id":67,
"jsonrpc": "2.0",
"result": "0x81d1fa699f807735499cf6f7df860797cf66f6a66b565cfcda3fae3521eb6861"
}
```
```js
// When block number is provided...
// Request
curl -X POST http://127.0.0.1:22000 --data '{"jsonrpc": "2.0", "method": "eth_storageRoot", "params":["0x1349f3e1b8d71effb47b840594ff27da7e603d17","0x1"], "id": 67}'
// Response
{
"id":67,
"jsonrpc": "2.0",
"result": "0x81d1fa699f807735499cf6f7df860797cf66f6a66b565cfcda3fae3521eb6861"
}
// After private state of the contract is changed from '42' to '99'
// Request
curl -X POST http://127.0.0.1:22000 --data '{"jsonrpc": "2.0", "method": "eth_storageRoot", "params":["0x1349f3e1b8d71effb47b840594ff27da7e603d17","0x2"], "id": 67}'
// Response
{
"id":67,
"jsonrpc": "2.0",
"result": "0x0edb0e520c35df37a0d080d5245c9b8f9e1f9d7efab77c067d1e12c0a71299da"
}
```
***
#### eth_getQuorumPayload
Returns the unencrypted payload from Tessera/constellation
##### Parameters
Transaction payload hash in Hex format
##### Returns
`String` - unencrypted transaction payload in HEX format.
##### Example
```js
// Request
curl -X POST http://127.0.0.1:22000 --data '{"jsonrpc":"2.0", "method":"eth_getQuorumPayload", "params":["0x5e902fa2af51b186468df6ffc21fd2c26235f4959bf900fc48c17dc1774d86d046c0e466230225845ddf2cf98f23ede5221c935aac27476e77b16604024bade0"], "id":67}'
// Response
{
"id":67,
"jsonrpc": "2.0",
"result": "0x6060604052341561000f57600080fd5b604051602080610149833981016040528080519060200190919050505b806000819055505b505b610104806100456000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680632a1afcd914605157806360fe47b11460775780636d4ce63c146097575b600080fd5b3415605b57600080fd5b606160bd565b6040518082815260200191505060405180910390f35b3415608157600080fd5b6095600480803590602001909190505060c3565b005b341560a157600080fd5b60a760ce565b6040518082815260200191505060405180910390f35b60005481565b806000819055505b50565b6000805490505b905600a165627a7a72305820d5851baab720bba574474de3d09dbeaabc674a15f4dd93b974908476542c23f00029000000000000000000000000000000000000000000000000000000000000002a"
}
// On a node which is not party to the transaction
// Response
{
"id":67,
"jsonrpc": "2.0",
"result": "0x"
}
```

View File

@ -704,6 +704,7 @@ func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
tester := newTester()
defer tester.terminate()
@ -1166,6 +1167,8 @@ func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 6
func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
tester := newTester()
defer tester.terminate()
@ -1198,6 +1201,8 @@ func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(
func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
tester := newTester()
defer tester.terminate()
@ -1310,6 +1315,8 @@ func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDr
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
t.Parallel()
// Define the disconnection requirement for individual hash fetch errors
tests := []struct {
result error
@ -1665,12 +1672,26 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// This test reproduces an issue where unexpected deliveries would
// block indefinitely if they arrived at the right time.
func TestDeliverHeadersHang62(t *testing.T) { testDeliverHeadersHang(t, 62, FullSync) }
func TestDeliverHeadersHang63Full(t *testing.T) { testDeliverHeadersHang(t, 63, FullSync) }
func TestDeliverHeadersHang63Fast(t *testing.T) { testDeliverHeadersHang(t, 63, FastSync) }
func TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64, FullSync) }
func TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) }
func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) }
// We use data driven subtests to manage this so that it will be parallel on its own
// and not with the other tests, avoiding intermittent failures.
func TestDeliverHeadersHang(t *testing.T) {
testCases := []struct {
protocol int
syncMode SyncMode
}{
{62, FullSync},
{63, FullSync},
{63, FastSync},
{64, FullSync},
{64, FastSync},
{64, LightSync},
}
for _, tc := range testCases {
t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
})
}
}
type floodingTestPeer struct {
peer Peer
@ -1703,7 +1724,7 @@ func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int
// Deliver the actual requested headers.
go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
// None of the extra deliveries should block.
timeout := time.After(15 * time.Second)
timeout := time.After(60 * time.Second)
for i := 0; i < cap(deliveriesDone); i++ {
select {
case <-deliveriesDone:
@ -1732,7 +1753,6 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
tester.downloader.peers.peers["peer"].peer,
tester,
}
if err := tester.sync("peer", nil, mode); err != nil {
t.Errorf("sync failed: %v", err)
}
@ -1742,12 +1762,28 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
// Tests that if fast sync aborts in the critical section, it can restart a few
// times before giving up.
func TestFastCriticalRestartsFail63(t *testing.T) { testFastCriticalRestarts(t, 63, false) }
func TestFastCriticalRestartsFail64(t *testing.T) { testFastCriticalRestarts(t, 64, false) }
func TestFastCriticalRestartsCont63(t *testing.T) { testFastCriticalRestarts(t, 63, true) }
func TestFastCriticalRestartsCont64(t *testing.T) { testFastCriticalRestarts(t, 64, true) }
// We use data driven subtests to manage this so that it will be parallel on its own
// and not with the other tests, avoiding intermittent failures.
func TestFastCriticalRestarts(t *testing.T) {
testCases := []struct {
protocol int
progress bool
}{
{63, false},
{64, false},
{63, true},
{64, true},
}
for _, tc := range testCases {
t.Run(fmt.Sprintf("protocol %d progress %v", tc.protocol, tc.progress), func(t *testing.T) {
testFastCriticalRestarts(t, tc.protocol, tc.progress)
})
}
}
func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
t.Parallel()
tester := newTester()
defer tester.terminate()
@ -1776,6 +1812,7 @@ func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
if i == 0 {
time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
if tester.downloader.fsPivotLock == nil {
time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too
t.Fatalf("pivot block not locked in after critical section failure")

View File

@ -60,7 +60,7 @@ func testStatusMsgErrors(t *testing.T, protocol int) {
},
{
code: StatusMsg, data: statusData{uint32(protocol), 999, td, currentBlock, genesis},
wantError: errResp(ErrNetworkIdMismatch, "999 (!= 1)"),
wantError: errResp(ErrNetworkIdMismatch, "999 (!= %d)", DefaultConfig.NetworkId),
},
{
code: StatusMsg, data: statusData{uint32(protocol), DefaultConfig.NetworkId, td, currentBlock, common.Hash{3}},

View File

@ -628,7 +628,11 @@ web3._extend({
new web3._extend.Property({
name: 'leader',
getter: 'raft_leader'
})
}),
new web3._extend.Property({
name: 'cluster',
getter: 'raft_cluster'
}),
]
})
`

View File

@ -150,7 +150,7 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, coinbase com
unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth),
}
if _, ok := engine.(consensus.Istanbul); ok || !config.IsQuorum {
if _, ok := engine.(consensus.Istanbul); ok || !config.IsQuorum || config.Clique != nil {
// Subscribe TxPreEvent for tx pool
worker.txSub = eth.TxPool().SubscribeTxPreEvent(worker.txCh)
// Subscribe events for blockchain

View File

@ -35,5 +35,10 @@ func (s *PublicRaftAPI) Leader() (string, error) {
if nil != err {
return "", err
}
return addr.nodeId.String(), nil
return addr.NodeId.String(), nil
}
func (s *PublicRaftAPI) Cluster() []*Address {
nodeInfo := s.raftService.raftProtocolManager.NodeInfo()
return append(nodeInfo.PeerAddresses, nodeInfo.Address)
}

View File

@ -624,17 +624,17 @@ func (pm *ProtocolManager) entriesToApply(allEntries []raftpb.Entry) (entriesToA
}
func raftUrl(address *Address) string {
return fmt.Sprintf("http://%s:%d", address.ip, address.raftPort)
return fmt.Sprintf("http://%s:%d", address.Ip, address.RaftPort)
}
func (pm *ProtocolManager) addPeer(address *Address) {
pm.mu.Lock()
defer pm.mu.Unlock()
raftId := address.raftId
raftId := address.RaftId
// Add P2P connection:
p2pNode := discover.NewNode(address.nodeId, address.ip, 0, uint16(address.p2pPort))
p2pNode := discover.NewNode(address.NodeId, address.Ip, 0, uint16(address.P2pPort))
pm.p2pServer.AddPeer(p2pNode)
// Add raft transport connection:

View File

@ -5,28 +5,29 @@ import (
"net"
"fmt"
"log"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/rlp"
"log"
)
// Serializable information about a Peer. Sufficient to build `etcdRaft.Peer`
// or `discover.Node`.
type Address struct {
raftId uint16
nodeId discover.NodeID
ip net.IP
p2pPort uint16
raftPort uint16
RaftId uint16 `json:"raftId"`
NodeId discover.NodeID `json:"nodeId"`
Ip net.IP `json:"ip"`
P2pPort uint16 `json:"p2pPort"`
RaftPort uint16 `json:"raftPort"`
}
func newAddress(raftId uint16, raftPort uint16, node *discover.Node) *Address {
return &Address{
raftId: raftId,
nodeId: node.ID,
ip: node.IP,
p2pPort: node.TCP,
raftPort: raftPort,
RaftId: raftId,
NodeId: node.ID,
Ip: node.IP,
P2pPort: node.TCP,
RaftPort: raftPort,
}
}
@ -37,7 +38,7 @@ type Peer struct {
}
func (addr *Address) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, []interface{}{addr.raftId, addr.nodeId, addr.ip, addr.p2pPort, addr.raftPort})
return rlp.Encode(w, []interface{}{addr.RaftId, addr.NodeId, addr.Ip, addr.P2pPort, addr.RaftPort})
}
func (addr *Address) DecodeRLP(s *rlp.Stream) error {
@ -53,7 +54,7 @@ func (addr *Address) DecodeRLP(s *rlp.Stream) error {
if err := s.Decode(&temp); err != nil {
return err
} else {
addr.raftId, addr.nodeId, addr.ip, addr.p2pPort, addr.raftPort = temp.RaftId, temp.NodeId, temp.Ip, temp.P2pPort, temp.RaftPort
addr.RaftId, addr.NodeId, addr.Ip, addr.P2pPort, addr.RaftPort = temp.RaftId, temp.NodeId, temp.Ip, temp.P2pPort, temp.RaftPort
return nil
}
}

View File

@ -30,7 +30,7 @@ type ByRaftId []Address
func (a ByRaftId) Len() int { return len(a) }
func (a ByRaftId) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByRaftId) Less(i, j int) bool { return a[i].raftId < a[j].raftId }
func (a ByRaftId) Less(i, j int) bool { return a[i].RaftId < a[j].RaftId }
func (pm *ProtocolManager) buildSnapshot() *Snapshot {
pm.mu.RLock()
@ -140,17 +140,17 @@ func (pm *ProtocolManager) updateClusterMembership(newConfState raftpb.ConfState
for _, tempAddress := range addresses {
address := tempAddress // Allocate separately on the heap for each iteration.
if address.raftId == pm.raftId {
if address.RaftId == pm.raftId {
// If we're a newcomer to an existing cluster, this is where we learn
// our own Address.
pm.setLocalAddress(&address)
} else {
pm.mu.RLock()
existingPeer := pm.peers[address.raftId]
existingPeer := pm.peers[address.RaftId]
pm.mu.RUnlock()
if existingPeer == nil {
log.Info("adding new raft peer", "raft id", address.raftId)
log.Info("adding new raft peer", "raft id", address.RaftId)
pm.addPeer(&address)
}
}