mirror of https://github.com/poanetwork/quorum.git
debug merge attempt
This commit is contained in:
parent
4b44bea1d7
commit
622e98ad51
|
@ -103,7 +103,7 @@ func (b *SimulatedBackend) Rollback() {
|
||||||
|
|
||||||
func (b *SimulatedBackend) rollback() {
|
func (b *SimulatedBackend) rollback() {
|
||||||
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {})
|
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {})
|
||||||
statedb, _ := b.blockchain.State()
|
statedb, _, _ := b.blockchain.State()
|
||||||
|
|
||||||
b.pendingBlock = blocks[0]
|
b.pendingBlock = blocks[0]
|
||||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
||||||
|
@ -265,7 +265,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
|
||||||
|
|
||||||
// callContract implements common code between normal and pending contract calls.
|
// callContract implements common code between normal and pending contract calls.
|
||||||
// state is modified during execution, make sure to copy it if necessary.
|
// state is modified during execution, make sure to copy it if necessary.
|
||||||
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb, privateState *state.StateDB) ([]byte, *big.Int, bool, error) {
|
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, statedb, privateState *state.StateDB) ([]byte, uint64, bool, error) {
|
||||||
// Ensure message is initialized properly.
|
// Ensure message is initialized properly.
|
||||||
if call.GasPrice == nil {
|
if call.GasPrice == nil {
|
||||||
call.GasPrice = big.NewInt(1)
|
call.GasPrice = big.NewInt(1)
|
||||||
|
@ -312,7 +312,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
||||||
}
|
}
|
||||||
block.AddTxWithChain(b.blockchain, tx)
|
block.AddTxWithChain(b.blockchain, tx)
|
||||||
})
|
})
|
||||||
statedb, _ := b.blockchain.State()
|
statedb, _, _ := b.blockchain.State()
|
||||||
|
|
||||||
b.pendingBlock = blocks[0]
|
b.pendingBlock = blocks[0]
|
||||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
||||||
|
@ -391,7 +391,7 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
|
||||||
}
|
}
|
||||||
block.OffsetTime(int64(adjustment.Seconds()))
|
block.OffsetTime(int64(adjustment.Seconds()))
|
||||||
})
|
})
|
||||||
statedb, _ := b.blockchain.State()
|
statedb, _, _ := b.blockchain.State()
|
||||||
|
|
||||||
b.pendingBlock = blocks[0]
|
b.pendingBlock = blocks[0]
|
||||||
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
|
||||||
|
|
|
@ -520,105 +520,3 @@ import org.ethereum.geth.internal.*;
|
||||||
}
|
}
|
||||||
{{end}}
|
{{end}}
|
||||||
`
|
`
|
||||||
|
|
||||||
// tmplSourceJava is the Java source template use to generate the contract binding
|
|
||||||
// based on.
|
|
||||||
const tmplSourceJava = `
|
|
||||||
// This file is an automatically generated Java binding. Do not modify as any
|
|
||||||
// change will likely be lost upon the next re-generation!
|
|
||||||
|
|
||||||
package {{.Package}};
|
|
||||||
|
|
||||||
import org.ethereum.geth.*;
|
|
||||||
import org.ethereum.geth.internal.*;
|
|
||||||
|
|
||||||
{{range $contract := .Contracts}}
|
|
||||||
public class {{.Type}} {
|
|
||||||
// ABI is the input ABI used to generate the binding from.
|
|
||||||
public final static String ABI = "{{.InputABI}}";
|
|
||||||
|
|
||||||
{{if .InputBin}}
|
|
||||||
// BYTECODE is the compiled bytecode used for deploying new contracts.
|
|
||||||
public final static byte[] BYTECODE = "{{.InputBin}}".getBytes();
|
|
||||||
|
|
||||||
// deploy deploys a new Ethereum contract, binding an instance of {{.Type}} to it.
|
|
||||||
public static {{.Type}} deploy(TransactOpts auth, EthereumClient client{{range .Constructor.Inputs}}, {{bindtype .Type}} {{.Name}}{{end}}) throws Exception {
|
|
||||||
Interfaces args = Geth.newInterfaces({{(len .Constructor.Inputs)}});
|
|
||||||
{{range $index, $element := .Constructor.Inputs}}
|
|
||||||
args.set({{$index}}, Geth.newInterface()); args.get({{$index}}).set{{namedtype (bindtype .Type) .Type}}({{.Name}});
|
|
||||||
{{end}}
|
|
||||||
return new {{.Type}}(Geth.deployContract(auth, ABI, BYTECODE, client, args));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal constructor used by contract deployment.
|
|
||||||
private {{.Type}}(BoundContract deployment) {
|
|
||||||
this.Address = deployment.getAddress();
|
|
||||||
this.Deployer = deployment.getDeployer();
|
|
||||||
this.Contract = deployment;
|
|
||||||
}
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
// Ethereum address where this contract is located at.
|
|
||||||
public final Address Address;
|
|
||||||
|
|
||||||
// Ethereum transaction in which this contract was deployed (if known!).
|
|
||||||
public final Transaction Deployer;
|
|
||||||
|
|
||||||
// Contract instance bound to a blockchain address.
|
|
||||||
private final BoundContract Contract;
|
|
||||||
|
|
||||||
// Creates a new instance of {{.Type}}, bound to a specific deployed contract.
|
|
||||||
public {{.Type}}(Address address, EthereumClient client) throws Exception {
|
|
||||||
this(Geth.bindContract(address, ABI, client));
|
|
||||||
}
|
|
||||||
|
|
||||||
{{range .Calls}}
|
|
||||||
{{if gt (len .Normalized.Outputs) 1}}
|
|
||||||
// {{capitalise .Normalized.Name}}Results is the output of a call to {{.Normalized.Name}}.
|
|
||||||
public class {{capitalise .Normalized.Name}}Results {
|
|
||||||
{{range $index, $item := .Normalized.Outputs}}public {{bindtype .Type}} {{if ne .Name ""}}{{.Name}}{{else}}Return{{$index}}{{end}};
|
|
||||||
{{end}}
|
|
||||||
}
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.Id}}.
|
|
||||||
//
|
|
||||||
// Solidity: {{.Original.String}}
|
|
||||||
public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else}}{{range .Normalized.Outputs}}{{bindtype .Type}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type}} {{.Name}}{{end}}) throws Exception {
|
|
||||||
Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}});
|
|
||||||
{{range $index, $item := .Normalized.Inputs}}args.set({{$index}}, Geth.newInterface()); args.get({{$index}}).set{{namedtype (bindtype .Type) .Type}}({{.Name}});
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
Interfaces results = Geth.newInterfaces({{(len .Normalized.Outputs)}});
|
|
||||||
{{range $index, $item := .Normalized.Outputs}}Interface result{{$index}} = Geth.newInterface(); result{{$index}}.setDefault{{namedtype (bindtype .Type) .Type}}(); results.set({{$index}}, result{{$index}});
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
if (opts == null) {
|
|
||||||
opts = Geth.newCallOpts();
|
|
||||||
}
|
|
||||||
this.Contract.call(opts, results, "{{.Original.Name}}", args);
|
|
||||||
{{if gt (len .Normalized.Outputs) 1}}
|
|
||||||
{{capitalise .Normalized.Name}}Results result = new {{capitalise .Normalized.Name}}Results();
|
|
||||||
{{range $index, $item := .Normalized.Outputs}}result.{{if ne .Name ""}}{{.Name}}{{else}}Return{{$index}}{{end}} = results.get({{$index}}).get{{namedtype (bindtype .Type) .Type}}();
|
|
||||||
{{end}}
|
|
||||||
return result;
|
|
||||||
{{else}}{{range .Normalized.Outputs}}return results.get(0).get{{namedtype (bindtype .Type) .Type}}();{{end}}
|
|
||||||
{{end}}
|
|
||||||
}
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
{{range .Transacts}}
|
|
||||||
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.Id}}.
|
|
||||||
//
|
|
||||||
// Solidity: {{.Original.String}}
|
|
||||||
public Transaction {{.Normalized.Name}}(TransactOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type}} {{.Name}}{{end}}) throws Exception {
|
|
||||||
Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}});
|
|
||||||
{{range $index, $item := .Normalized.Inputs}}args.set({{$index}}, Geth.newInterface()); args.get({{$index}}).set{{namedtype (bindtype .Type) .Type}}({{.Name}});
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
return this.Contract.transact(opts, "{{.Original.Name}}" , args);
|
|
||||||
}
|
|
||||||
{{end}}
|
|
||||||
}
|
|
||||||
{{end}}
|
|
||||||
`
|
|
||||||
|
|
|
@ -474,7 +474,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||||
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
|
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
|
||||||
|
|
||||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
|
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
|
||||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId)
|
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.lock.Unlock()
|
f.lock.Unlock()
|
||||||
if err = sendError(conn, err); err != nil {
|
if err = sendError(conn, err); err != nil {
|
||||||
|
|
|
@ -18,18 +18,17 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"time"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
cli "gopkg.in/urfave/cli.v1"
|
cli "gopkg.in/urfave/cli.v1"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/contracts/release"
|
|
||||||
"github.com/ethereum/go-ethereum/dashboard"
|
"github.com/ethereum/go-ethereum/dashboard"
|
||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
|
@ -162,6 +161,7 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||||
|
|
||||||
if ctx.GlobalBool(utils.RaftModeFlag.Name) {
|
if ctx.GlobalBool(utils.RaftModeFlag.Name) {
|
||||||
RegisterRaftService(stack, ctx, cfg, ethChan)
|
RegisterRaftService(stack, ctx, cfg, ethChan)
|
||||||
|
}
|
||||||
|
|
||||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||||
utils.RegisterDashboardService(stack, &cfg.Dashboard, gitCommit)
|
utils.RegisterDashboardService(stack, &cfg.Dashboard, gitCommit)
|
||||||
|
@ -184,21 +184,6 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||||
if cfg.Ethstats.URL != "" {
|
if cfg.Ethstats.URL != "" {
|
||||||
utils.RegisterEthStatsService(stack, cfg.Ethstats.URL)
|
utils.RegisterEthStatsService(stack, cfg.Ethstats.URL)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the release oracle service so it boots along with node.
|
|
||||||
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
|
||||||
config := release.Config{
|
|
||||||
Oracle: relOracle,
|
|
||||||
Major: uint32(params.VersionMajor),
|
|
||||||
Minor: uint32(params.VersionMinor),
|
|
||||||
Patch: uint32(params.VersionPatch),
|
|
||||||
}
|
|
||||||
commit, _ := hex.DecodeString(gitCommit)
|
|
||||||
copy(config.Commit[:], commit)
|
|
||||||
return release.NewReleaseService(ctx, config)
|
|
||||||
}); err != nil {
|
|
||||||
utils.Fatalf("Failed to register the Geth release oracle service: %v", err)
|
|
||||||
}
|
|
||||||
return stack
|
return stack
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,6 @@ var AppHelpFlagGroups = []flagGroup{
|
||||||
utils.TestnetFlag,
|
utils.TestnetFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.OttomanFlag,
|
utils.OttomanFlag,
|
||||||
utils.DevModeFlag,
|
|
||||||
utils.SyncModeFlag,
|
utils.SyncModeFlag,
|
||||||
utils.GCModeFlag,
|
utils.GCModeFlag,
|
||||||
utils.EthStatsURLFlag,
|
utils.EthStatsURLFlag,
|
||||||
|
@ -313,39 +312,6 @@ func flagCategory(flag cli.Flag) string {
|
||||||
return "MISC"
|
return "MISC"
|
||||||
}
|
}
|
||||||
|
|
||||||
// byCategory sorts an array of flagGroup by Name in the order
|
|
||||||
// defined in AppHelpFlagGroups.
|
|
||||||
type byCategory []flagGroup
|
|
||||||
|
|
||||||
func (a byCategory) Len() int { return len(a) }
|
|
||||||
func (a byCategory) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
||||||
func (a byCategory) Less(i, j int) bool {
|
|
||||||
iCat, jCat := a[i].Name, a[j].Name
|
|
||||||
iIdx, jIdx := len(AppHelpFlagGroups), len(AppHelpFlagGroups) // ensure non categorized flags come last
|
|
||||||
|
|
||||||
for i, group := range AppHelpFlagGroups {
|
|
||||||
if iCat == group.Name {
|
|
||||||
iIdx = i
|
|
||||||
}
|
|
||||||
if jCat == group.Name {
|
|
||||||
jIdx = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return iIdx < jIdx
|
|
||||||
}
|
|
||||||
|
|
||||||
func flagCategory(flag cli.Flag) string {
|
|
||||||
for _, category := range AppHelpFlagGroups {
|
|
||||||
for _, flg := range category.Flags {
|
|
||||||
if flg.GetName() == flag.GetName() {
|
|
||||||
return category.Name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "MISC"
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Override the default app help template
|
// Override the default app help template
|
||||||
cli.AppHelpTemplate = AppHelpTemplate
|
cli.AppHelpTemplate = AppHelpTemplate
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
|
|
@ -189,27 +189,6 @@ var (
|
||||||
Usage: "Maximum number of LES client peers",
|
Usage: "Maximum number of LES client peers",
|
||||||
Value: eth.DefaultConfig.LightPeers,
|
Value: eth.DefaultConfig.LightPeers,
|
||||||
}
|
}
|
||||||
LightModeFlag = cli.BoolFlag{
|
|
||||||
Name: "light",
|
|
||||||
Usage: "Enable light client mode",
|
|
||||||
}
|
|
||||||
defaultSyncMode = eth.DefaultConfig.SyncMode
|
|
||||||
SyncModeFlag = TextMarshalerFlag{
|
|
||||||
Name: "syncmode",
|
|
||||||
Usage: `Blockchain sync mode ("fast", "full", or "light")`,
|
|
||||||
Value: &defaultSyncMode,
|
|
||||||
}
|
|
||||||
|
|
||||||
LightServFlag = cli.IntFlag{
|
|
||||||
Name: "lightserv",
|
|
||||||
Usage: "Maximum percentage of time allowed for serving LES requests (0-90)",
|
|
||||||
Value: 0,
|
|
||||||
}
|
|
||||||
LightPeersFlag = cli.IntFlag{
|
|
||||||
Name: "lightpeers",
|
|
||||||
Usage: "Maximum number of LES client peers",
|
|
||||||
Value: 20,
|
|
||||||
}
|
|
||||||
LightKDFFlag = cli.BoolFlag{
|
LightKDFFlag = cli.BoolFlag{
|
||||||
Name: "lightkdf",
|
Name: "lightkdf",
|
||||||
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
||||||
|
@ -1170,6 +1149,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||||
case ctx.GlobalBool(OttomanFlag.Name):
|
case ctx.GlobalBool(OttomanFlag.Name):
|
||||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||||
cfg.NetworkId = 5
|
cfg.NetworkId = 5
|
||||||
|
}
|
||||||
cfg.Genesis = core.DefaultOttomanGenesisBlock()
|
cfg.Genesis = core.DefaultOttomanGenesisBlock()
|
||||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||||
// Create new developer account or reuse existing one
|
// Create new developer account or reuse existing one
|
||||||
|
|
|
@ -575,10 +575,6 @@ func sendMsg(payload []byte) common.Hash {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("failed to create new message: %s", err)
|
utils.Fatalf("failed to create new message: %s", err)
|
||||||
}
|
}
|
||||||
<<<<<<< variant A
|
|
||||||
>>>>>>> variant B
|
|
||||||
|
|
||||||
======= end
|
|
||||||
envelope, err := msg.Wrap(¶ms)
|
envelope, err := msg.Wrap(¶ms)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("failed to seal message: %v \n", err)
|
fmt.Printf("failed to seal message: %v \n", err)
|
||||||
|
|
|
@ -683,3 +683,8 @@ func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API {
|
||||||
Public: false,
|
Public: false,
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Protocol implements consensus.Engine.Protocol
|
||||||
|
func (c *Clique) Protocol() consensus.Protocol {
|
||||||
|
return consensus.EthProtocol
|
||||||
|
}
|
||||||
|
|
|
@ -1,47 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// +build !go1.8
|
|
||||||
|
|
||||||
package ethash
|
|
||||||
|
|
||||||
// cacheSize calculates and returns the size of the ethash verification cache that
|
|
||||||
// belongs to a certain block number. The cache size grows linearly, however, we
|
|
||||||
// always take the highest prime below the linearly growing threshold in order to
|
|
||||||
// reduce the risk of accidental regularities leading to cyclic behavior.
|
|
||||||
func cacheSize(block uint64) uint64 {
|
|
||||||
// If we have a pre-generated value, use that
|
|
||||||
epoch := int(block / epochLength)
|
|
||||||
if epoch < len(cacheSizes) {
|
|
||||||
return cacheSizes[epoch]
|
|
||||||
}
|
|
||||||
// We don't have a way to verify primes fast before Go 1.8
|
|
||||||
panic("fast prime testing unsupported in Go < 1.8")
|
|
||||||
}
|
|
||||||
|
|
||||||
// datasetSize calculates and returns the size of the ethash mining dataset that
|
|
||||||
// belongs to a certain block number. The dataset size grows linearly, however, we
|
|
||||||
// always take the highest prime below the linearly growing threshold in order to
|
|
||||||
// reduce the risk of accidental regularities leading to cyclic behavior.
|
|
||||||
func datasetSize(block uint64) uint64 {
|
|
||||||
// If we have a pre-generated value, use that
|
|
||||||
epoch := int(block / epochLength)
|
|
||||||
if epoch < len(datasetSizes) {
|
|
||||||
return datasetSizes[epoch]
|
|
||||||
}
|
|
||||||
// We don't have a way to verify primes fast before Go 1.8
|
|
||||||
panic("fast prime testing unsupported in Go < 1.8")
|
|
||||||
}
|
|
|
@ -1,57 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package ethash
|
|
||||||
|
|
||||||
import "math/big"
|
|
||||||
|
|
||||||
// cacheSize calculates and returns the size of the ethash verification cache that
|
|
||||||
// belongs to a certain block number. The cache size grows linearly, however, we
|
|
||||||
// always take the highest prime below the linearly growing threshold in order to
|
|
||||||
// reduce the risk of accidental regularities leading to cyclic behavior.
|
|
||||||
func cacheSize(block uint64) uint64 {
|
|
||||||
// If we have a pre-generated value, use that
|
|
||||||
epoch := int(block / epochLength)
|
|
||||||
if epoch < len(cacheSizes) {
|
|
||||||
return cacheSizes[epoch]
|
|
||||||
}
|
|
||||||
// No known cache size, calculate manually (sanity branch only)
|
|
||||||
size := uint64(cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes)
|
|
||||||
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
|
|
||||||
size -= 2 * hashBytes
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
// datasetSize calculates and returns the size of the ethash mining dataset that
|
|
||||||
// belongs to a certain block number. The dataset size grows linearly, however, we
|
|
||||||
// always take the highest prime below the linearly growing threshold in order to
|
|
||||||
// reduce the risk of accidental regularities leading to cyclic behavior.
|
|
||||||
func datasetSize(block uint64) uint64 {
|
|
||||||
// If we have a pre-generated value, use that
|
|
||||||
epoch := int(block / epochLength)
|
|
||||||
if epoch < len(datasetSizes) {
|
|
||||||
return datasetSizes[epoch]
|
|
||||||
}
|
|
||||||
// No known dataset size, calculate manually (sanity branch only)
|
|
||||||
size := uint64(datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes)
|
|
||||||
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
|
|
||||||
size -= 2 * mixBytes
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
|
@ -1,46 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package ethash
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
// Tests whether the dataset size calculator works correctly by cross checking the
|
|
||||||
// hard coded lookup table with the value generated by it.
|
|
||||||
func TestSizeCalculations(t *testing.T) {
|
|
||||||
var tests []uint64
|
|
||||||
|
|
||||||
// Verify all the cache sizes from the lookup table
|
|
||||||
defer func(sizes []uint64) { cacheSizes = sizes }(cacheSizes)
|
|
||||||
tests, cacheSizes = cacheSizes, []uint64{}
|
|
||||||
|
|
||||||
for i, test := range tests {
|
|
||||||
if size := cacheSize(uint64(i*epochLength) + 1); size != test {
|
|
||||||
t.Errorf("cache %d: cache size mismatch: have %d, want %d", i, size, test)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Verify all the dataset sizes from the lookup table
|
|
||||||
defer func(sizes []uint64) { datasetSizes = sizes }(datasetSizes)
|
|
||||||
tests, datasetSizes = datasetSizes, []uint64{}
|
|
||||||
|
|
||||||
for i, test := range tests {
|
|
||||||
if size := datasetSize(uint64(i*epochLength) + 1); size != test {
|
|
||||||
t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", i, size, test)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -30,8 +30,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
=======
|
|
||||||
// Tests whether the dataset size calculator works correctly by cross checking the
|
// Tests whether the dataset size calculator works correctly by cross checking the
|
||||||
// hard coded lookup table with the value generated by it.
|
// hard coded lookup table with the value generated by it.
|
||||||
func TestSizeCalculations(t *testing.T) {
|
func TestSizeCalculations(t *testing.T) {
|
||||||
|
@ -48,7 +46,6 @@ func TestSizeCalculations(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
// Tests that verification caches can be correctly generated.
|
// Tests that verification caches can be correctly generated.
|
||||||
func TestCacheGeneration(t *testing.T) {
|
func TestCacheGeneration(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
@ -707,13 +704,8 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||||
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||||
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||||
Difficulty: big.NewInt(167925187834220),
|
Difficulty: big.NewInt(167925187834220),
|
||||||
<<<<<<< HEAD
|
|
||||||
GasLimit: big.NewInt(4015682),
|
|
||||||
GasUsed: big.NewInt(0),
|
|
||||||
=======
|
|
||||||
GasLimit: 4015682,
|
GasLimit: 4015682,
|
||||||
GasUsed: 0,
|
GasUsed: 0,
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
Time: big.NewInt(1488928920),
|
Time: big.NewInt(1488928920),
|
||||||
Extra: []byte("www.bw.com"),
|
Extra: []byte("www.bw.com"),
|
||||||
MixDigest: common.HexToHash("0x3e140b0784516af5e5ec6730f2fb20cca22f32be399b9e4ad77d32541f798cd0"),
|
MixDigest: common.HexToHash("0x3e140b0784516af5e5ec6730f2fb20cca22f32be399b9e4ad77d32541f798cd0"),
|
||||||
|
@ -727,12 +719,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||||
|
|
||||||
go func(idx int) {
|
go func(idx int) {
|
||||||
defer pend.Done()
|
defer pend.Done()
|
||||||
<<<<<<< HEAD
|
|
||||||
|
|
||||||
ethash := New(cachedir, 0, 1, "", 0, 0)
|
|
||||||
=======
|
|
||||||
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal})
|
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal})
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
|
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
|
||||||
t.Errorf("proc %d: block verification failed: %v", idx, err)
|
t.Errorf("proc %d: block verification failed: %v", idx, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,18 +36,12 @@ import (
|
||||||
|
|
||||||
// Ethash proof-of-work protocol constants.
|
// Ethash proof-of-work protocol constants.
|
||||||
var (
|
var (
|
||||||
<<<<<<< HEAD
|
|
||||||
frontierBlockReward *big.Int = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
|
||||||
byzantiumBlockReward *big.Int = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
|
||||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
|
||||||
|
|
||||||
nanosecond2017Timestamp = mustParseRfc3339("2017-01-01T00:00:00+00:00").UnixNano()
|
|
||||||
=======
|
|
||||||
FrontierBlockReward *big.Int = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
FrontierBlockReward *big.Int = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
||||||
ByzantiumBlockReward *big.Int = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
ByzantiumBlockReward *big.Int = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
||||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||||
allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks
|
allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
|
nanosecond2017Timestamp = mustParseRfc3339("2017-01-01T00:00:00+00:00").UnixNano()
|
||||||
)
|
)
|
||||||
|
|
||||||
// Various error messages to mark blocks invalid. These should be private to
|
// Various error messages to mark blocks invalid. These should be private to
|
||||||
|
@ -61,16 +55,11 @@ var (
|
||||||
errDuplicateUncle = errors.New("duplicate uncle")
|
errDuplicateUncle = errors.New("duplicate uncle")
|
||||||
errUncleIsAncestor = errors.New("uncle is ancestor")
|
errUncleIsAncestor = errors.New("uncle is ancestor")
|
||||||
errDanglingUncle = errors.New("uncle's parent is not ancestor")
|
errDanglingUncle = errors.New("uncle's parent is not ancestor")
|
||||||
<<<<<<< HEAD
|
|
||||||
errNonceOutOfRange = errors.New("nonce out of range")
|
|
||||||
=======
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
errInvalidDifficulty = errors.New("non-positive difficulty")
|
errInvalidDifficulty = errors.New("non-positive difficulty")
|
||||||
errInvalidMixDigest = errors.New("invalid mix digest")
|
errInvalidMixDigest = errors.New("invalid mix digest")
|
||||||
errInvalidPoW = errors.New("invalid proof-of-work")
|
errInvalidPoW = errors.New("invalid proof-of-work")
|
||||||
)
|
)
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
func mustParseRfc3339(str string) time.Time {
|
func mustParseRfc3339(str string) time.Time {
|
||||||
time, err := time.Parse(time.RFC3339, str)
|
time, err := time.Parse(time.RFC3339, str)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -79,8 +68,6 @@ func mustParseRfc3339(str string) time.Time {
|
||||||
return time
|
return time
|
||||||
}
|
}
|
||||||
|
|
||||||
=======
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
// Author implements consensus.Engine, returning the header's coinbase as the
|
// Author implements consensus.Engine, returning the header's coinbase as the
|
||||||
// proof-of-work verified author of the block.
|
// proof-of-work verified author of the block.
|
||||||
func (ethash *Ethash) Author(header *types.Header) (common.Address, error) {
|
func (ethash *Ethash) Author(header *types.Header) (common.Address, error) {
|
||||||
|
@ -91,11 +78,7 @@ func (ethash *Ethash) Author(header *types.Header) (common.Address, error) {
|
||||||
// stock Ethereum ethash engine.
|
// stock Ethereum ethash engine.
|
||||||
func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
|
func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
|
||||||
// If we're running a full engine faking, accept any input as valid
|
// If we're running a full engine faking, accept any input as valid
|
||||||
<<<<<<< HEAD
|
|
||||||
if ethash.fakeFull {
|
|
||||||
=======
|
|
||||||
if ethash.config.PowMode == ModeFullFake {
|
if ethash.config.PowMode == ModeFullFake {
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Short circuit if the header is known, or it's parent not
|
// Short circuit if the header is known, or it's parent not
|
||||||
|
@ -116,11 +99,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.He
|
||||||
// a results channel to retrieve the async verifications.
|
// a results channel to retrieve the async verifications.
|
||||||
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||||
// If we're running a full engine faking, accept any input as valid
|
// If we're running a full engine faking, accept any input as valid
|
||||||
<<<<<<< HEAD
|
|
||||||
if ethash.fakeFull || len(headers) == 0 {
|
|
||||||
=======
|
|
||||||
if ethash.config.PowMode == ModeFullFake || len(headers) == 0 {
|
if ethash.config.PowMode == ModeFullFake || len(headers) == 0 {
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
abort, results := make(chan struct{}), make(chan error, len(headers))
|
abort, results := make(chan struct{}), make(chan error, len(headers))
|
||||||
for i := 0; i < len(headers); i++ {
|
for i := 0; i < len(headers); i++ {
|
||||||
results <- nil
|
results <- nil
|
||||||
|
@ -200,11 +179,7 @@ func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainReader, headers []
|
||||||
// rules of the stock Ethereum ethash engine.
|
// rules of the stock Ethereum ethash engine.
|
||||||
func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
|
func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
|
||||||
// If we're running a full engine faking, accept any input as valid
|
// If we're running a full engine faking, accept any input as valid
|
||||||
<<<<<<< HEAD
|
|
||||||
if ethash.fakeFull {
|
|
||||||
=======
|
|
||||||
if ethash.config.PowMode == ModeFullFake {
|
if ethash.config.PowMode == ModeFullFake {
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Verify that there are at most 2 uncles included in this block
|
// Verify that there are at most 2 uncles included in this block
|
||||||
|
@ -257,21 +232,14 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo
|
||||||
// See YP section 4.3.4. "Block Header Validity"
|
// See YP section 4.3.4. "Block Header Validity"
|
||||||
func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
|
func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
|
||||||
// Ensure that the header's extra-data section is of a reasonable size
|
// Ensure that the header's extra-data section is of a reasonable size
|
||||||
<<<<<<< HEAD
|
|
||||||
maximumExtraDataSize := params.GetMaximumExtraDataSize(chain.Config().IsQuorum)
|
|
||||||
if uint64(len(header.Extra)) > maximumExtraDataSize {
|
|
||||||
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), maximumExtraDataSize)
|
|
||||||
=======
|
|
||||||
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
|
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
|
||||||
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
|
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
// Verify the header's timestamp
|
// Verify the header's timestamp
|
||||||
if uncle {
|
if uncle {
|
||||||
if header.Time.Cmp(math.MaxBig256) > 0 {
|
if header.Time.Cmp(math.MaxBig256) > 0 {
|
||||||
return errLargeBlockTime
|
return errLargeBlockTime
|
||||||
}
|
}
|
||||||
<<<<<<< HEAD
|
|
||||||
} else if !chain.Config().IsQuorum {
|
} else if !chain.Config().IsQuorum {
|
||||||
if header.Time.Cmp(big.NewInt(time.Now().Unix())) > 0 {
|
if header.Time.Cmp(big.NewInt(time.Now().Unix())) > 0 {
|
||||||
return consensus.ErrFutureBlock
|
return consensus.ErrFutureBlock
|
||||||
|
@ -291,47 +259,18 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||||
if header.Time.Cmp(big.NewInt(time.Now().Unix())) > 0 {
|
if header.Time.Cmp(big.NewInt(time.Now().Unix())) > 0 {
|
||||||
return consensus.ErrFutureBlock
|
return consensus.ErrFutureBlock
|
||||||
}
|
}
|
||||||
=======
|
|
||||||
} else {
|
|
||||||
if header.Time.Cmp(big.NewInt(time.Now().Add(allowedFutureBlockTime).Unix())) > 0 {
|
|
||||||
return consensus.ErrFutureBlock
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if header.Time.Cmp(parent.Time) <= 0 {
|
if header.Time.Cmp(parent.Time) <= 0 {
|
||||||
return errZeroBlockTime
|
return errZeroBlockTime
|
||||||
}
|
}
|
||||||
// Verify the block's difficulty based in it's timestamp and parent's difficulty
|
// Verify the block's difficulty based in it's timestamp and parent's difficulty
|
||||||
<<<<<<< HEAD
|
|
||||||
expected := CalcDifficulty(chain.Config(), header.Time.Uint64(), parent)
|
|
||||||
=======
|
|
||||||
expected := ethash.CalcDifficulty(chain, header.Time.Uint64(), parent)
|
expected := ethash.CalcDifficulty(chain, header.Time.Uint64(), parent)
|
||||||
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
if expected.Cmp(header.Difficulty) != 0 {
|
if expected.Cmp(header.Difficulty) != 0 {
|
||||||
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
|
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
|
||||||
}
|
}
|
||||||
// Verify that the gas limit is <= 2^63-1
|
// Verify that the gas limit is <= 2^63-1
|
||||||
<<<<<<< HEAD
|
|
||||||
if header.GasLimit.Cmp(math.MaxBig63) > 0 {
|
|
||||||
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, math.MaxBig63)
|
|
||||||
}
|
|
||||||
// Verify that the gasUsed is <= gasLimit
|
|
||||||
if header.GasUsed.Cmp(header.GasLimit) > 0 {
|
|
||||||
return fmt.Errorf("invalid gasUsed: have %v, gasLimit %v", header.GasUsed, header.GasLimit)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that the gas limit remains within allowed bounds
|
|
||||||
diff := new(big.Int).Set(parent.GasLimit)
|
|
||||||
diff = diff.Sub(diff, header.GasLimit)
|
|
||||||
diff.Abs(diff)
|
|
||||||
|
|
||||||
limit := new(big.Int).Set(parent.GasLimit)
|
|
||||||
limit = limit.Div(limit, params.GasLimitBoundDivisor)
|
|
||||||
|
|
||||||
if diff.Cmp(limit) >= 0 || header.GasLimit.Cmp(params.MinGasLimit) < 0 {
|
|
||||||
return fmt.Errorf("invalid gas limit: have %v, want %v += %v", header.GasLimit, parent.GasLimit, limit)
|
|
||||||
=======
|
|
||||||
cap := uint64(0x7fffffffffffffff)
|
cap := uint64(0x7fffffffffffffff)
|
||||||
if header.GasLimit > cap {
|
if header.GasLimit > cap {
|
||||||
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
|
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
|
||||||
|
@ -350,7 +289,6 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||||
|
|
||||||
if uint64(diff) >= limit || header.GasLimit < params.MinGasLimit {
|
if uint64(diff) >= limit || header.GasLimit < params.MinGasLimit {
|
||||||
return fmt.Errorf("invalid gas limit: have %d, want %d += %d", header.GasLimit, parent.GasLimit, limit)
|
return fmt.Errorf("invalid gas limit: have %d, want %d += %d", header.GasLimit, parent.GasLimit, limit)
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
// Verify that the block number is parent's +1
|
// Verify that the block number is parent's +1
|
||||||
if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 {
|
if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 {
|
||||||
|
@ -375,9 +313,6 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||||
// the difficulty that a new block should have when created at time
|
// the difficulty that a new block should have when created at time
|
||||||
// given the parent block's time and difficulty.
|
// given the parent block's time and difficulty.
|
||||||
<<<<<<< HEAD
|
|
||||||
// TODO (karalabe): Move the chain maker into this package and make this private!
|
|
||||||
=======
|
|
||||||
func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
|
func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
|
||||||
return CalcDifficulty(chain.Config(), time, parent)
|
return CalcDifficulty(chain.Config(), time, parent)
|
||||||
}
|
}
|
||||||
|
@ -385,7 +320,6 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, p
|
||||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||||
// the difficulty that a new block should have when created at time
|
// the difficulty that a new block should have when created at time
|
||||||
// given the parent block's time and difficulty.
|
// given the parent block's time and difficulty.
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
||||||
next := new(big.Int).Add(parent.Number, big1)
|
next := new(big.Int).Add(parent.Number, big1)
|
||||||
switch {
|
switch {
|
||||||
|
@ -438,11 +372,7 @@ func calcDifficultyByzantium(time uint64, parent *types.Header) *big.Int {
|
||||||
if x.Cmp(bigMinus99) < 0 {
|
if x.Cmp(bigMinus99) < 0 {
|
||||||
x.Set(bigMinus99)
|
x.Set(bigMinus99)
|
||||||
}
|
}
|
||||||
<<<<<<< HEAD
|
|
||||||
// (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
|
||||||
=======
|
|
||||||
// parent_diff + (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99))
|
// parent_diff + (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99))
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
y.Div(parent.Difficulty, params.DifficultyBoundDivisor)
|
y.Div(parent.Difficulty, params.DifficultyBoundDivisor)
|
||||||
x.Mul(y, x)
|
x.Mul(y, x)
|
||||||
x.Add(parent.Difficulty, x)
|
x.Add(parent.Difficulty, x)
|
||||||
|
@ -451,11 +381,7 @@ func calcDifficultyByzantium(time uint64, parent *types.Header) *big.Int {
|
||||||
if x.Cmp(params.MinimumDifficulty) < 0 {
|
if x.Cmp(params.MinimumDifficulty) < 0 {
|
||||||
x.Set(params.MinimumDifficulty)
|
x.Set(params.MinimumDifficulty)
|
||||||
}
|
}
|
||||||
<<<<<<< HEAD
|
|
||||||
// calculate a fake block numer for the ice-age delay:
|
|
||||||
=======
|
|
||||||
// calculate a fake block number for the ice-age delay:
|
// calculate a fake block number for the ice-age delay:
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
// https://github.com/ethereum/EIPs/pull/669
|
// https://github.com/ethereum/EIPs/pull/669
|
||||||
// fake_block_number = min(0, block.number - 3_000_000
|
// fake_block_number = min(0, block.number - 3_000_000
|
||||||
fakeBlockNumber := new(big.Int)
|
fakeBlockNumber := new(big.Int)
|
||||||
|
@ -480,11 +406,7 @@ func calcDifficultyByzantium(time uint64, parent *types.Header) *big.Int {
|
||||||
// the difficulty that a new block should have when created at time given the
|
// the difficulty that a new block should have when created at time given the
|
||||||
// parent block's time and difficulty. The calculation uses the Homestead rules.
|
// parent block's time and difficulty. The calculation uses the Homestead rules.
|
||||||
func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int {
|
func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int {
|
||||||
<<<<<<< HEAD
|
|
||||||
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.mediawiki
|
|
||||||
=======
|
|
||||||
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md
|
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
// algorithm:
|
// algorithm:
|
||||||
// diff = (parent_diff +
|
// diff = (parent_diff +
|
||||||
// (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
// (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
||||||
|
@ -565,15 +487,10 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
|
||||||
// VerifySeal implements consensus.Engine, checking whether the given block satisfies
|
// VerifySeal implements consensus.Engine, checking whether the given block satisfies
|
||||||
// the PoW difficulty requirements.
|
// the PoW difficulty requirements.
|
||||||
func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
|
func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
|
||||||
<<<<<<< HEAD
|
|
||||||
isQuorum := chain != nil && chain.Config().IsQuorum
|
isQuorum := chain != nil && chain.Config().IsQuorum
|
||||||
|
|
||||||
// If we're running a fake PoW, accept any seal as valid
|
|
||||||
if ethash.fakeMode {
|
|
||||||
=======
|
|
||||||
// If we're running a fake PoW, accept any seal as valid
|
// If we're running a fake PoW, accept any seal as valid
|
||||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
time.Sleep(ethash.fakeDelay)
|
time.Sleep(ethash.fakeDelay)
|
||||||
if ethash.fakeFail == header.Number.Uint64() {
|
if ethash.fakeFail == header.Number.Uint64() {
|
||||||
return errInvalidPoW
|
return errInvalidPoW
|
||||||
|
@ -584,30 +501,11 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
|
||||||
if ethash.shared != nil {
|
if ethash.shared != nil {
|
||||||
return ethash.shared.VerifySeal(chain, header)
|
return ethash.shared.VerifySeal(chain, header)
|
||||||
}
|
}
|
||||||
<<<<<<< HEAD
|
|
||||||
// Sanity check that the block number is below the lookup table size (60M blocks)
|
|
||||||
number := header.Number.Uint64()
|
|
||||||
if number/epochLength >= uint64(len(cacheSizes)) {
|
|
||||||
// Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check)
|
|
||||||
return errNonceOutOfRange
|
|
||||||
}
|
|
||||||
=======
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
// Ensure that we have a valid difficulty for the block
|
// Ensure that we have a valid difficulty for the block
|
||||||
if header.Difficulty.Sign() <= 0 {
|
if header.Difficulty.Sign() <= 0 {
|
||||||
return errInvalidDifficulty
|
return errInvalidDifficulty
|
||||||
}
|
}
|
||||||
// Recompute the digest and PoW value and verify against the header
|
// Recompute the digest and PoW value and verify against the header
|
||||||
<<<<<<< HEAD
|
|
||||||
cache := ethash.cache(number)
|
|
||||||
|
|
||||||
size := datasetSize(number)
|
|
||||||
if ethash.tester {
|
|
||||||
size = 32 * 1024
|
|
||||||
}
|
|
||||||
digest, result := hashimotoLight(size, cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
|
||||||
if !isQuorum && !bytes.Equal(header.MixDigest[:], digest) {
|
|
||||||
=======
|
|
||||||
number := header.Number.Uint64()
|
number := header.Number.Uint64()
|
||||||
|
|
||||||
cache := ethash.cache(number)
|
cache := ethash.cache(number)
|
||||||
|
@ -620,19 +518,14 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
|
||||||
// until after the call to hashimotoLight so it's not unmapped while being used.
|
// until after the call to hashimotoLight so it's not unmapped while being used.
|
||||||
runtime.KeepAlive(cache)
|
runtime.KeepAlive(cache)
|
||||||
|
|
||||||
if !bytes.Equal(header.MixDigest[:], digest) {
|
if !!isQuorum && bytes.Equal(header.MixDigest[:], digest) {
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
return errInvalidMixDigest
|
return errInvalidMixDigest
|
||||||
}
|
}
|
||||||
target := new(big.Int).Div(maxUint256, header.Difficulty)
|
target := new(big.Int).Div(maxUint256, header.Difficulty)
|
||||||
if new(big.Int).SetBytes(result).Cmp(target) > 0 {
|
if new(big.Int).SetBytes(result).Cmp(target) > 0 {
|
||||||
<<<<<<< HEAD
|
|
||||||
if !isQuorum {
|
if !isQuorum {
|
||||||
return errInvalidPoW
|
return errInvalidPoW
|
||||||
}
|
}
|
||||||
=======
|
|
||||||
return errInvalidPoW
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -644,12 +537,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header)
|
||||||
if parent == nil {
|
if parent == nil {
|
||||||
return consensus.ErrUnknownAncestor
|
return consensus.ErrUnknownAncestor
|
||||||
}
|
}
|
||||||
<<<<<<< HEAD
|
|
||||||
header.Difficulty = CalcDifficulty(chain.Config(), header.Time.Uint64(), parent)
|
|
||||||
|
|
||||||
=======
|
|
||||||
header.Difficulty = ethash.CalcDifficulty(chain, header.Time.Uint64(), parent)
|
header.Difficulty = ethash.CalcDifficulty(chain, header.Time.Uint64(), parent)
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -657,11 +545,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header)
|
||||||
// setting the final state and assembling the block.
|
// setting the final state and assembling the block.
|
||||||
func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
|
||||||
// Accumulate any block and uncle rewards and commit the final state root
|
// Accumulate any block and uncle rewards and commit the final state root
|
||||||
<<<<<<< HEAD
|
|
||||||
AccumulateRewards(chain.Config(), state, header, uncles)
|
AccumulateRewards(chain.Config(), state, header, uncles)
|
||||||
=======
|
|
||||||
accumulateRewards(chain.Config(), state, header, uncles)
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
|
||||||
|
|
||||||
// Header seems complete, assemble into a block and return
|
// Header seems complete, assemble into a block and return
|
||||||
|
@ -677,20 +561,11 @@ var (
|
||||||
// AccumulateRewards credits the coinbase of the given block with the mining
|
// AccumulateRewards credits the coinbase of the given block with the mining
|
||||||
// reward. The total reward consists of the static block reward and rewards for
|
// reward. The total reward consists of the static block reward and rewards for
|
||||||
// included uncles. The coinbase of each uncle block is also rewarded.
|
// included uncles. The coinbase of each uncle block is also rewarded.
|
||||||
<<<<<<< HEAD
|
|
||||||
// TODO (karalabe): Move the chain maker into this package and make this private!
|
|
||||||
func AccumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
func AccumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
||||||
// Select the correct block reward based on chain progression
|
|
||||||
blockReward := frontierBlockReward
|
|
||||||
if config.IsByzantium(header.Number) {
|
|
||||||
blockReward = byzantiumBlockReward
|
|
||||||
=======
|
|
||||||
func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
|
||||||
// Select the correct block reward based on chain progression
|
// Select the correct block reward based on chain progression
|
||||||
blockReward := FrontierBlockReward
|
blockReward := FrontierBlockReward
|
||||||
if config.IsByzantium(header.Number) {
|
if config.IsByzantium(header.Number) {
|
||||||
blockReward = ByzantiumBlockReward
|
blockReward = ByzantiumBlockReward
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
// Accumulate the rewards for the miner and any included uncles
|
// Accumulate the rewards for the miner and any included uncles
|
||||||
reward := new(big.Int).Set(blockReward)
|
reward := new(big.Int).Set(blockReward)
|
||||||
|
|
|
@ -71,10 +71,7 @@ func TestCalcDifficulty(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
config := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1150000)}
|
config := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1150000)}
|
||||||
<<<<<<< HEAD
|
|
||||||
=======
|
|
||||||
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
for name, test := range tests {
|
for name, test := range tests {
|
||||||
number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
|
number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
|
||||||
diff := CalcDifficulty(config, test.CurrentTimestamp, &types.Header{
|
diff := CalcDifficulty(config, test.CurrentTimestamp, &types.Header{
|
||||||
|
|
|
@ -26,10 +26,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
<<<<<<< HEAD
|
|
||||||
=======
|
|
||||||
"runtime"
|
"runtime"
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -38,14 +35,9 @@ import (
|
||||||
mmap "github.com/edsrzf/mmap-go"
|
mmap "github.com/edsrzf/mmap-go"
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
<<<<<<< HEAD
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
metrics "github.com/rcrowley/go-metrics"
|
|
||||||
=======
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/hashicorp/golang-lru/simplelru"
|
"github.com/hashicorp/golang-lru/simplelru"
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var ErrInvalidDumpMagic = errors.New("invalid dump magic")
|
var ErrInvalidDumpMagic = errors.New("invalid dump magic")
|
||||||
|
@ -55,11 +47,7 @@ var (
|
||||||
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||||
|
|
||||||
// sharedEthash is a full instance that can be shared between multiple users.
|
// sharedEthash is a full instance that can be shared between multiple users.
|
||||||
<<<<<<< HEAD
|
|
||||||
sharedEthash = New("", 3, 0, "", 1, 0)
|
|
||||||
=======
|
|
||||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal})
|
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal})
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
|
|
||||||
// algorithmRevision is the data structure version used for file naming.
|
// algorithmRevision is the data structure version used for file naming.
|
||||||
algorithmRevision = 23
|
algorithmRevision = 23
|
||||||
|
@ -156,19 +144,6 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint
|
||||||
return memoryMap(path)
|
return memoryMap(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
// cache wraps an ethash cache with some metadata to allow easier concurrent use.
|
|
||||||
type cache struct {
|
|
||||||
epoch uint64 // Epoch for which this cache is relevant
|
|
||||||
|
|
||||||
dump *os.File // File descriptor of the memory mapped cache
|
|
||||||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
|
||||||
|
|
||||||
cache []uint32 // The actual cache data content (may be memory mapped)
|
|
||||||
used time.Time // Timestamp of the last use for smarter eviction
|
|
||||||
once sync.Once // Ensures the cache is generated only once
|
|
||||||
lock sync.Mutex // Ensures thread safety for updating the usage time
|
|
||||||
=======
|
|
||||||
// lru tracks caches or datasets by their last use time, keeping at most N of them.
|
// lru tracks caches or datasets by their last use time, keeping at most N of them.
|
||||||
type lru struct {
|
type lru struct {
|
||||||
what string
|
what string
|
||||||
|
@ -234,31 +209,17 @@ type cache struct {
|
||||||
// interface to be usable in an LRU cache.
|
// interface to be usable in an LRU cache.
|
||||||
func newCache(epoch uint64) interface{} {
|
func newCache(epoch uint64) interface{} {
|
||||||
return &cache{epoch: epoch}
|
return &cache{epoch: epoch}
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// generate ensures that the cache content is generated before use.
|
// generate ensures that the cache content is generated before use.
|
||||||
func (c *cache) generate(dir string, limit int, test bool) {
|
func (c *cache) generate(dir string, limit int, test bool) {
|
||||||
c.once.Do(func() {
|
c.once.Do(func() {
|
||||||
<<<<<<< HEAD
|
|
||||||
// If we have a testing cache, generate and return
|
|
||||||
if test {
|
|
||||||
c.cache = make([]uint32, 1024/4)
|
|
||||||
generateCache(c.cache, c.epoch, seedHash(c.epoch*epochLength+1))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// If we don't store anything on disk, generate and return
|
|
||||||
size := cacheSize(c.epoch*epochLength + 1)
|
|
||||||
seed := seedHash(c.epoch*epochLength + 1)
|
|
||||||
|
|
||||||
=======
|
|
||||||
size := cacheSize(c.epoch*epochLength + 1)
|
size := cacheSize(c.epoch*epochLength + 1)
|
||||||
seed := seedHash(c.epoch*epochLength + 1)
|
seed := seedHash(c.epoch*epochLength + 1)
|
||||||
if test {
|
if test {
|
||||||
size = 1024
|
size = 1024
|
||||||
}
|
}
|
||||||
// If we don't store anything on disk, generate and return.
|
// If we don't store anything on disk, generate and return.
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
if dir == "" {
|
if dir == "" {
|
||||||
c.cache = make([]uint32, size/4)
|
c.cache = make([]uint32, size/4)
|
||||||
generateCache(c.cache, c.epoch, seed)
|
generateCache(c.cache, c.epoch, seed)
|
||||||
|
@ -272,13 +233,10 @@ func (c *cache) generate(dir string, limit int, test bool) {
|
||||||
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
||||||
logger := log.New("epoch", c.epoch)
|
logger := log.New("epoch", c.epoch)
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
=======
|
|
||||||
// We're about to mmap the file, ensure that the mapping is cleaned up when the
|
// We're about to mmap the file, ensure that the mapping is cleaned up when the
|
||||||
// cache becomes unused.
|
// cache becomes unused.
|
||||||
runtime.SetFinalizer(c, (*cache).finalizer)
|
runtime.SetFinalizer(c, (*cache).finalizer)
|
||||||
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
// Try to load the file from disk and memory map it
|
// Try to load the file from disk and memory map it
|
||||||
var err error
|
var err error
|
||||||
c.dump, c.mmap, c.cache, err = memoryMap(path)
|
c.dump, c.mmap, c.cache, err = memoryMap(path)
|
||||||
|
@ -305,40 +263,17 @@ func (c *cache) generate(dir string, limit int, test bool) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
// release closes any file handlers and memory maps open.
|
|
||||||
func (c *cache) release() {
|
|
||||||
if c.mmap != nil {
|
|
||||||
c.mmap.Unmap()
|
|
||||||
c.mmap = nil
|
|
||||||
}
|
|
||||||
if c.dump != nil {
|
|
||||||
c.dump.Close()
|
|
||||||
c.dump = nil
|
|
||||||
=======
|
|
||||||
// finalizer unmaps the memory and closes the file.
|
// finalizer unmaps the memory and closes the file.
|
||||||
func (c *cache) finalizer() {
|
func (c *cache) finalizer() {
|
||||||
if c.mmap != nil {
|
if c.mmap != nil {
|
||||||
c.mmap.Unmap()
|
c.mmap.Unmap()
|
||||||
c.dump.Close()
|
c.dump.Close()
|
||||||
c.mmap, c.dump = nil, nil
|
c.mmap, c.dump = nil, nil
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
|
// dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
|
||||||
type dataset struct {
|
type dataset struct {
|
||||||
<<<<<<< HEAD
|
|
||||||
epoch uint64 // Epoch for which this cache is relevant
|
|
||||||
|
|
||||||
dump *os.File // File descriptor of the memory mapped cache
|
|
||||||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
|
||||||
|
|
||||||
dataset []uint32 // The actual cache data content
|
|
||||||
used time.Time // Timestamp of the last use for smarter eviction
|
|
||||||
once sync.Once // Ensures the cache is generated only once
|
|
||||||
lock sync.Mutex // Ensures thread safety for updating the usage time
|
|
||||||
=======
|
|
||||||
epoch uint64 // Epoch for which this cache is relevant
|
epoch uint64 // Epoch for which this cache is relevant
|
||||||
dump *os.File // File descriptor of the memory mapped cache
|
dump *os.File // File descriptor of the memory mapped cache
|
||||||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
mmap mmap.MMap // Memory map itself to unmap before releasing
|
||||||
|
@ -350,29 +285,11 @@ type dataset struct {
|
||||||
// interface to be usable in an LRU cache.
|
// interface to be usable in an LRU cache.
|
||||||
func newDataset(epoch uint64) interface{} {
|
func newDataset(epoch uint64) interface{} {
|
||||||
return &dataset{epoch: epoch}
|
return &dataset{epoch: epoch}
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// generate ensures that the dataset content is generated before use.
|
// generate ensures that the dataset content is generated before use.
|
||||||
func (d *dataset) generate(dir string, limit int, test bool) {
|
func (d *dataset) generate(dir string, limit int, test bool) {
|
||||||
d.once.Do(func() {
|
d.once.Do(func() {
|
||||||
<<<<<<< HEAD
|
|
||||||
// If we have a testing dataset, generate and return
|
|
||||||
if test {
|
|
||||||
cache := make([]uint32, 1024/4)
|
|
||||||
generateCache(cache, d.epoch, seedHash(d.epoch*epochLength+1))
|
|
||||||
|
|
||||||
d.dataset = make([]uint32, 32*1024/4)
|
|
||||||
generateDataset(d.dataset, d.epoch, cache)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// If we don't store anything on disk, generate and return
|
|
||||||
csize := cacheSize(d.epoch*epochLength + 1)
|
|
||||||
dsize := datasetSize(d.epoch*epochLength + 1)
|
|
||||||
seed := seedHash(d.epoch*epochLength + 1)
|
|
||||||
|
|
||||||
=======
|
|
||||||
csize := cacheSize(d.epoch*epochLength + 1)
|
csize := cacheSize(d.epoch*epochLength + 1)
|
||||||
dsize := datasetSize(d.epoch*epochLength + 1)
|
dsize := datasetSize(d.epoch*epochLength + 1)
|
||||||
seed := seedHash(d.epoch*epochLength + 1)
|
seed := seedHash(d.epoch*epochLength + 1)
|
||||||
|
@ -381,7 +298,6 @@ func (d *dataset) generate(dir string, limit int, test bool) {
|
||||||
dsize = 32 * 1024
|
dsize = 32 * 1024
|
||||||
}
|
}
|
||||||
// If we don't store anything on disk, generate and return
|
// If we don't store anything on disk, generate and return
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
if dir == "" {
|
if dir == "" {
|
||||||
cache := make([]uint32, csize/4)
|
cache := make([]uint32, csize/4)
|
||||||
generateCache(cache, d.epoch, seed)
|
generateCache(cache, d.epoch, seed)
|
||||||
|
@ -397,13 +313,10 @@ func (d *dataset) generate(dir string, limit int, test bool) {
|
||||||
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
||||||
logger := log.New("epoch", d.epoch)
|
logger := log.New("epoch", d.epoch)
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
=======
|
|
||||||
// We're about to mmap the file, ensure that the mapping is cleaned up when the
|
// We're about to mmap the file, ensure that the mapping is cleaned up when the
|
||||||
// cache becomes unused.
|
// cache becomes unused.
|
||||||
runtime.SetFinalizer(d, (*dataset).finalizer)
|
runtime.SetFinalizer(d, (*dataset).finalizer)
|
||||||
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
// Try to load the file from disk and memory map it
|
// Try to load the file from disk and memory map it
|
||||||
var err error
|
var err error
|
||||||
d.dump, d.mmap, d.dataset, err = memoryMap(path)
|
d.dump, d.mmap, d.dataset, err = memoryMap(path)
|
||||||
|
@ -433,24 +346,12 @@ func (d *dataset) generate(dir string, limit int, test bool) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
// release closes any file handlers and memory maps open.
|
|
||||||
func (d *dataset) release() {
|
|
||||||
if d.mmap != nil {
|
|
||||||
d.mmap.Unmap()
|
|
||||||
d.mmap = nil
|
|
||||||
}
|
|
||||||
if d.dump != nil {
|
|
||||||
d.dump.Close()
|
|
||||||
d.dump = nil
|
|
||||||
=======
|
|
||||||
// finalizer closes any file handlers and memory maps open.
|
// finalizer closes any file handlers and memory maps open.
|
||||||
func (d *dataset) finalizer() {
|
func (d *dataset) finalizer() {
|
||||||
if d.mmap != nil {
|
if d.mmap != nil {
|
||||||
d.mmap.Unmap()
|
d.mmap.Unmap()
|
||||||
d.dump.Close()
|
d.dump.Close()
|
||||||
d.mmap, d.dump = nil, nil
|
d.mmap, d.dump = nil, nil
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -458,19 +359,12 @@ func (d *dataset) finalizer() {
|
||||||
func MakeCache(block uint64, dir string) {
|
func MakeCache(block uint64, dir string) {
|
||||||
c := cache{epoch: block / epochLength}
|
c := cache{epoch: block / epochLength}
|
||||||
c.generate(dir, math.MaxInt32, false)
|
c.generate(dir, math.MaxInt32, false)
|
||||||
<<<<<<< HEAD
|
|
||||||
c.release()
|
|
||||||
=======
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MakeDataset generates a new ethash dataset and optionally stores it to disk.
|
// MakeDataset generates a new ethash dataset and optionally stores it to disk.
|
||||||
func MakeDataset(block uint64, dir string) {
|
func MakeDataset(block uint64, dir string) {
|
||||||
d := dataset{epoch: block / epochLength}
|
d := dataset{epoch: block / epochLength}
|
||||||
d.generate(dir, math.MaxInt32, false)
|
d.generate(dir, math.MaxInt32, false)
|
||||||
<<<<<<< HEAD
|
|
||||||
d.release()
|
|
||||||
=======
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mode defines the type and amount of PoW verification an ethash engine makes.
|
// Mode defines the type and amount of PoW verification an ethash engine makes.
|
||||||
|
@ -493,30 +387,15 @@ type Config struct {
|
||||||
DatasetsInMem int
|
DatasetsInMem int
|
||||||
DatasetsOnDisk int
|
DatasetsOnDisk int
|
||||||
PowMode Mode
|
PowMode Mode
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ethash is a consensus engine based on proot-of-work implementing the ethash
|
// Ethash is a consensus engine based on proot-of-work implementing the ethash
|
||||||
// algorithm.
|
// algorithm.
|
||||||
type Ethash struct {
|
type Ethash struct {
|
||||||
<<<<<<< HEAD
|
|
||||||
cachedir string // Data directory to store the verification caches
|
|
||||||
cachesinmem int // Number of caches to keep in memory
|
|
||||||
cachesondisk int // Number of caches to keep on disk
|
|
||||||
dagdir string // Data directory to store full mining datasets
|
|
||||||
dagsinmem int // Number of mining datasets to keep in memory
|
|
||||||
dagsondisk int // Number of mining datasets to keep on disk
|
|
||||||
|
|
||||||
caches map[uint64]*cache // In memory caches to avoid regenerating too often
|
|
||||||
fcache *cache // Pre-generated cache for the estimated future epoch
|
|
||||||
datasets map[uint64]*dataset // In memory datasets to avoid regenerating too often
|
|
||||||
fdataset *dataset // Pre-generated dataset for the estimated future epoch
|
|
||||||
=======
|
|
||||||
config Config
|
config Config
|
||||||
|
|
||||||
caches *lru // In memory caches to avoid regenerating too often
|
caches *lru // In memory caches to avoid regenerating too often
|
||||||
datasets *lru // In memory datasets to avoid regenerating too often
|
datasets *lru // In memory datasets to avoid regenerating too often
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
|
|
||||||
// Mining related fields
|
// Mining related fields
|
||||||
rand *rand.Rand // Properly seeded random source for nonces
|
rand *rand.Rand // Properly seeded random source for nonces
|
||||||
|
@ -525,14 +404,7 @@ type Ethash struct {
|
||||||
hashrate metrics.Meter // Meter tracking the average hashrate
|
hashrate metrics.Meter // Meter tracking the average hashrate
|
||||||
|
|
||||||
// The fields below are hooks for testing
|
// The fields below are hooks for testing
|
||||||
<<<<<<< HEAD
|
|
||||||
tester bool // Flag whether to use a smaller test dataset
|
|
||||||
shared *Ethash // Shared PoW verifier to avoid cache regeneration
|
shared *Ethash // Shared PoW verifier to avoid cache regeneration
|
||||||
fakeMode bool // Flag whether to disable PoW checking
|
|
||||||
fakeFull bool // Flag whether to disable all consensus rules
|
|
||||||
=======
|
|
||||||
shared *Ethash // Shared PoW verifier to avoid cache regeneration
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
fakeFail uint64 // Block number which fails PoW check even in fake mode
|
fakeFail uint64 // Block number which fails PoW check even in fake mode
|
||||||
fakeDelay time.Duration // Time delay to sleep for before returning from verify
|
fakeDelay time.Duration // Time delay to sleep for before returning from verify
|
||||||
|
|
||||||
|
@ -540,30 +412,6 @@ type Ethash struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a full sized ethash PoW scheme.
|
// New creates a full sized ethash PoW scheme.
|
||||||
<<<<<<< HEAD
|
|
||||||
func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) *Ethash {
|
|
||||||
if cachesinmem <= 0 {
|
|
||||||
log.Warn("One ethash cache must always be in memory", "requested", cachesinmem)
|
|
||||||
cachesinmem = 1
|
|
||||||
}
|
|
||||||
if cachedir != "" && cachesondisk > 0 {
|
|
||||||
log.Info("Disk storage enabled for ethash caches", "dir", cachedir, "count", cachesondisk)
|
|
||||||
}
|
|
||||||
if dagdir != "" && dagsondisk > 0 {
|
|
||||||
log.Info("Disk storage enabled for ethash DAGs", "dir", dagdir, "count", dagsondisk)
|
|
||||||
}
|
|
||||||
return &Ethash{
|
|
||||||
cachedir: cachedir,
|
|
||||||
cachesinmem: cachesinmem,
|
|
||||||
cachesondisk: cachesondisk,
|
|
||||||
dagdir: dagdir,
|
|
||||||
dagsinmem: dagsinmem,
|
|
||||||
dagsondisk: dagsondisk,
|
|
||||||
caches: make(map[uint64]*cache),
|
|
||||||
datasets: make(map[uint64]*dataset),
|
|
||||||
update: make(chan struct{}),
|
|
||||||
hashrate: metrics.NewMeter(),
|
|
||||||
=======
|
|
||||||
func New(config Config) *Ethash {
|
func New(config Config) *Ethash {
|
||||||
if config.CachesInMem <= 0 {
|
if config.CachesInMem <= 0 {
|
||||||
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||||
|
@ -581,86 +429,58 @@ func New(config Config) *Ethash {
|
||||||
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
|
||||||
update: make(chan struct{}),
|
update: make(chan struct{}),
|
||||||
hashrate: metrics.NewMeter(),
|
hashrate: metrics.NewMeter(),
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTester creates a small sized ethash PoW scheme useful only for testing
|
// NewTester creates a small sized ethash PoW scheme useful only for testing
|
||||||
// purposes.
|
// purposes.
|
||||||
func NewTester() *Ethash {
|
func NewTester() *Ethash {
|
||||||
<<<<<<< HEAD
|
|
||||||
return &Ethash{
|
|
||||||
cachesinmem: 1,
|
|
||||||
caches: make(map[uint64]*cache),
|
|
||||||
datasets: make(map[uint64]*dataset),
|
|
||||||
tester: true,
|
|
||||||
update: make(chan struct{}),
|
|
||||||
hashrate: metrics.NewMeter(),
|
|
||||||
}
|
|
||||||
=======
|
|
||||||
return New(Config{CachesInMem: 1, PowMode: ModeTest})
|
return New(Config{CachesInMem: 1, PowMode: ModeTest})
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
|
// NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
|
||||||
// all blocks' seal as valid, though they still have to conform to the Ethereum
|
// all blocks' seal as valid, though they still have to conform to the Ethereum
|
||||||
// consensus rules.
|
// consensus rules.
|
||||||
func NewFaker() *Ethash {
|
func NewFaker() *Ethash {
|
||||||
<<<<<<< HEAD
|
|
||||||
return &Ethash{fakeMode: true}
|
|
||||||
=======
|
|
||||||
return &Ethash{
|
return &Ethash{
|
||||||
config: Config{
|
config: Config{
|
||||||
PowMode: ModeFake,
|
PowMode: ModeFake,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
|
// NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
|
||||||
// accepts all blocks as valid apart from the single one specified, though they
|
// accepts all blocks as valid apart from the single one specified, though they
|
||||||
// still have to conform to the Ethereum consensus rules.
|
// still have to conform to the Ethereum consensus rules.
|
||||||
func NewFakeFailer(fail uint64) *Ethash {
|
func NewFakeFailer(fail uint64) *Ethash {
|
||||||
<<<<<<< HEAD
|
|
||||||
return &Ethash{fakeMode: true, fakeFail: fail}
|
|
||||||
=======
|
|
||||||
return &Ethash{
|
return &Ethash{
|
||||||
config: Config{
|
config: Config{
|
||||||
PowMode: ModeFake,
|
PowMode: ModeFake,
|
||||||
},
|
},
|
||||||
fakeFail: fail,
|
fakeFail: fail,
|
||||||
}
|
}
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
|
// NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
|
||||||
// accepts all blocks as valid, but delays verifications by some time, though
|
// accepts all blocks as valid, but delays verifications by some time, though
|
||||||
// they still have to conform to the Ethereum consensus rules.
|
// they still have to conform to the Ethereum consensus rules.
|
||||||
func NewFakeDelayer(delay time.Duration) *Ethash {
|
func NewFakeDelayer(delay time.Duration) *Ethash {
|
||||||
<<<<<<< HEAD
|
|
||||||
return &Ethash{fakeMode: true, fakeDelay: delay}
|
|
||||||
=======
|
|
||||||
return &Ethash{
|
return &Ethash{
|
||||||
config: Config{
|
config: Config{
|
||||||
PowMode: ModeFake,
|
PowMode: ModeFake,
|
||||||
},
|
},
|
||||||
fakeDelay: delay,
|
fakeDelay: delay,
|
||||||
}
|
}
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFullFaker creates an ethash consensus engine with a full fake scheme that
|
// NewFullFaker creates an ethash consensus engine with a full fake scheme that
|
||||||
// accepts all blocks as valid, without checking any consensus rules whatsoever.
|
// accepts all blocks as valid, without checking any consensus rules whatsoever.
|
||||||
func NewFullFaker() *Ethash {
|
func NewFullFaker() *Ethash {
|
||||||
<<<<<<< HEAD
|
|
||||||
return &Ethash{fakeMode: true, fakeFull: true}
|
|
||||||
=======
|
|
||||||
return &Ethash{
|
return &Ethash{
|
||||||
config: Config{
|
config: Config{
|
||||||
PowMode: ModeFullFake,
|
PowMode: ModeFullFake,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewShared creates a full sized ethash PoW shared between all requesters running
|
// NewShared creates a full sized ethash PoW shared between all requesters running
|
||||||
|
@ -672,65 +492,6 @@ func NewShared() *Ethash {
|
||||||
// cache tries to retrieve a verification cache for the specified block number
|
// cache tries to retrieve a verification cache for the specified block number
|
||||||
// by first checking against a list of in-memory caches, then against caches
|
// by first checking against a list of in-memory caches, then against caches
|
||||||
// stored on disk, and finally generating one if none can be found.
|
// stored on disk, and finally generating one if none can be found.
|
||||||
<<<<<<< HEAD
|
|
||||||
func (ethash *Ethash) cache(block uint64) []uint32 {
|
|
||||||
epoch := block / epochLength
|
|
||||||
|
|
||||||
// If we have a PoW for that epoch, use that
|
|
||||||
ethash.lock.Lock()
|
|
||||||
|
|
||||||
current, future := ethash.caches[epoch], (*cache)(nil)
|
|
||||||
if current == nil {
|
|
||||||
// No in-memory cache, evict the oldest if the cache limit was reached
|
|
||||||
for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.cachesinmem {
|
|
||||||
var evict *cache
|
|
||||||
for _, cache := range ethash.caches {
|
|
||||||
if evict == nil || evict.used.After(cache.used) {
|
|
||||||
evict = cache
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete(ethash.caches, evict.epoch)
|
|
||||||
evict.release()
|
|
||||||
|
|
||||||
log.Trace("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used)
|
|
||||||
}
|
|
||||||
// If we have the new cache pre-generated, use that, otherwise create a new one
|
|
||||||
if ethash.fcache != nil && ethash.fcache.epoch == epoch {
|
|
||||||
log.Trace("Using pre-generated cache", "epoch", epoch)
|
|
||||||
current, ethash.fcache = ethash.fcache, nil
|
|
||||||
} else {
|
|
||||||
log.Trace("Requiring new ethash cache", "epoch", epoch)
|
|
||||||
current = &cache{epoch: epoch}
|
|
||||||
}
|
|
||||||
ethash.caches[epoch] = current
|
|
||||||
|
|
||||||
// If we just used up the future cache, or need a refresh, regenerate
|
|
||||||
if ethash.fcache == nil || ethash.fcache.epoch <= epoch {
|
|
||||||
if ethash.fcache != nil {
|
|
||||||
ethash.fcache.release()
|
|
||||||
}
|
|
||||||
log.Trace("Requiring new future ethash cache", "epoch", epoch+1)
|
|
||||||
future = &cache{epoch: epoch + 1}
|
|
||||||
ethash.fcache = future
|
|
||||||
}
|
|
||||||
// New current cache, set its initial timestamp
|
|
||||||
current.used = time.Now()
|
|
||||||
}
|
|
||||||
ethash.lock.Unlock()
|
|
||||||
|
|
||||||
// Wait for generation finish, bump the timestamp and finalize the cache
|
|
||||||
current.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
|
|
||||||
|
|
||||||
current.lock.Lock()
|
|
||||||
current.used = time.Now()
|
|
||||||
current.lock.Unlock()
|
|
||||||
|
|
||||||
// If we exhausted the future cache, now's a good time to regenerate it
|
|
||||||
if future != nil {
|
|
||||||
go future.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
|
|
||||||
}
|
|
||||||
return current.cache
|
|
||||||
=======
|
|
||||||
func (ethash *Ethash) cache(block uint64) *cache {
|
func (ethash *Ethash) cache(block uint64) *cache {
|
||||||
epoch := block / epochLength
|
epoch := block / epochLength
|
||||||
currentI, futureI := ethash.caches.get(epoch)
|
currentI, futureI := ethash.caches.get(epoch)
|
||||||
|
@ -745,72 +506,11 @@ func (ethash *Ethash) cache(block uint64) *cache {
|
||||||
go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
|
go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
|
||||||
}
|
}
|
||||||
return current
|
return current
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// dataset tries to retrieve a mining dataset for the specified block number
|
// dataset tries to retrieve a mining dataset for the specified block number
|
||||||
// by first checking against a list of in-memory datasets, then against DAGs
|
// by first checking against a list of in-memory datasets, then against DAGs
|
||||||
// stored on disk, and finally generating one if none can be found.
|
// stored on disk, and finally generating one if none can be found.
|
||||||
<<<<<<< HEAD
|
|
||||||
func (ethash *Ethash) dataset(block uint64) []uint32 {
|
|
||||||
epoch := block / epochLength
|
|
||||||
|
|
||||||
// If we have a PoW for that epoch, use that
|
|
||||||
ethash.lock.Lock()
|
|
||||||
|
|
||||||
current, future := ethash.datasets[epoch], (*dataset)(nil)
|
|
||||||
if current == nil {
|
|
||||||
// No in-memory dataset, evict the oldest if the dataset limit was reached
|
|
||||||
for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.dagsinmem {
|
|
||||||
var evict *dataset
|
|
||||||
for _, dataset := range ethash.datasets {
|
|
||||||
if evict == nil || evict.used.After(dataset.used) {
|
|
||||||
evict = dataset
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete(ethash.datasets, evict.epoch)
|
|
||||||
evict.release()
|
|
||||||
|
|
||||||
log.Trace("Evicted ethash dataset", "epoch", evict.epoch, "used", evict.used)
|
|
||||||
}
|
|
||||||
// If we have the new cache pre-generated, use that, otherwise create a new one
|
|
||||||
if ethash.fdataset != nil && ethash.fdataset.epoch == epoch {
|
|
||||||
log.Trace("Using pre-generated dataset", "epoch", epoch)
|
|
||||||
current = &dataset{epoch: ethash.fdataset.epoch} // Reload from disk
|
|
||||||
ethash.fdataset = nil
|
|
||||||
} else {
|
|
||||||
log.Trace("Requiring new ethash dataset", "epoch", epoch)
|
|
||||||
current = &dataset{epoch: epoch}
|
|
||||||
}
|
|
||||||
ethash.datasets[epoch] = current
|
|
||||||
|
|
||||||
// If we just used up the future dataset, or need a refresh, regenerate
|
|
||||||
if ethash.fdataset == nil || ethash.fdataset.epoch <= epoch {
|
|
||||||
if ethash.fdataset != nil {
|
|
||||||
ethash.fdataset.release()
|
|
||||||
}
|
|
||||||
log.Trace("Requiring new future ethash dataset", "epoch", epoch+1)
|
|
||||||
future = &dataset{epoch: epoch + 1}
|
|
||||||
ethash.fdataset = future
|
|
||||||
}
|
|
||||||
// New current dataset, set its initial timestamp
|
|
||||||
current.used = time.Now()
|
|
||||||
}
|
|
||||||
ethash.lock.Unlock()
|
|
||||||
|
|
||||||
// Wait for generation finish, bump the timestamp and finalize the cache
|
|
||||||
current.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester)
|
|
||||||
|
|
||||||
current.lock.Lock()
|
|
||||||
current.used = time.Now()
|
|
||||||
current.lock.Unlock()
|
|
||||||
|
|
||||||
// If we exhausted the future dataset, now's a good time to regenerate it
|
|
||||||
if future != nil {
|
|
||||||
go future.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester)
|
|
||||||
}
|
|
||||||
return current.dataset
|
|
||||||
=======
|
|
||||||
func (ethash *Ethash) dataset(block uint64) *dataset {
|
func (ethash *Ethash) dataset(block uint64) *dataset {
|
||||||
epoch := block / epochLength
|
epoch := block / epochLength
|
||||||
currentI, futureI := ethash.datasets.get(epoch)
|
currentI, futureI := ethash.datasets.get(epoch)
|
||||||
|
@ -826,7 +526,6 @@ func (ethash *Ethash) dataset(block uint64) *dataset {
|
||||||
}
|
}
|
||||||
|
|
||||||
return current
|
return current
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Threads returns the number of mining threads currently enabled. This doesn't
|
// Threads returns the number of mining threads currently enabled. This doesn't
|
||||||
|
@ -877,11 +576,8 @@ func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API {
|
||||||
func SeedHash(block uint64) []byte {
|
func SeedHash(block uint64) []byte {
|
||||||
return seedHash(block)
|
return seedHash(block)
|
||||||
}
|
}
|
||||||
<<<<<<< HEAD
|
|
||||||
|
|
||||||
// Protocol implements consensus.Engine.Protocol
|
// Protocol implements consensus.Engine.Protocol
|
||||||
func (ethash *Ethash) Protocol() consensus.Protocol {
|
func (ethash *Ethash) Protocol() consensus.Protocol {
|
||||||
return consensus.EthProtocol
|
return consensus.EthProtocol
|
||||||
}
|
}
|
||||||
=======
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
|
|
|
@ -17,15 +17,11 @@
|
||||||
package ethash
|
package ethash
|
||||||
|
|
||||||
import (
|
import (
|
||||||
<<<<<<< HEAD
|
|
||||||
"math/big"
|
|
||||||
=======
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
@ -46,8 +42,6 @@ func TestTestMode(t *testing.T) {
|
||||||
t.Fatalf("unexpected verification error: %v", err)
|
t.Fatalf("unexpected verification error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
<<<<<<< HEAD
|
|
||||||
=======
|
|
||||||
|
|
||||||
// This test checks that cache lru logic doesn't crash under load.
|
// This test checks that cache lru logic doesn't crash under load.
|
||||||
// It reproduces https://github.com/ethereum/go-ethereum/issues/14943
|
// It reproduces https://github.com/ethereum/go-ethereum/issues/14943
|
||||||
|
@ -83,4 +77,3 @@ func verifyTest(wg *sync.WaitGroup, e *Ethash, workerIndex, epochs int) {
|
||||||
e.VerifySeal(nil, head)
|
e.VerifySeal(nil, head)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
|
|
|
@ -34,11 +34,7 @@ import (
|
||||||
// the block's difficulty requirements.
|
// the block's difficulty requirements.
|
||||||
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
|
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
|
||||||
// If we're running a fake PoW, simply return a 0 nonce immediately
|
// If we're running a fake PoW, simply return a 0 nonce immediately
|
||||||
<<<<<<< HEAD
|
|
||||||
if ethash.fakeMode {
|
|
||||||
=======
|
|
||||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
header := block.Header()
|
header := block.Header()
|
||||||
header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
|
header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
|
||||||
return block.WithSeal(header), nil
|
return block.WithSeal(header), nil
|
||||||
|
@ -101,16 +97,9 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop
|
||||||
func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan struct{}, found chan *types.Block) {
|
func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan struct{}, found chan *types.Block) {
|
||||||
// Extract some data from the header
|
// Extract some data from the header
|
||||||
var (
|
var (
|
||||||
<<<<<<< HEAD
|
|
||||||
header = block.Header()
|
header = block.Header()
|
||||||
hash = header.HashNoNonce().Bytes()
|
hash = header.HashNoNonce().Bytes()
|
||||||
target = new(big.Int).Div(maxUint256, header.Difficulty)
|
target = new(big.Int).Div(maxUint256, header.Difficulty)
|
||||||
|
|
||||||
=======
|
|
||||||
header = block.Header()
|
|
||||||
hash = header.HashNoNonce().Bytes()
|
|
||||||
target = new(big.Int).Div(maxUint256, header.Difficulty)
|
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
number = header.Number.Uint64()
|
number = header.Number.Uint64()
|
||||||
dataset = ethash.dataset(number)
|
dataset = ethash.dataset(number)
|
||||||
)
|
)
|
||||||
|
@ -121,21 +110,14 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
|
||||||
)
|
)
|
||||||
logger := log.New("miner", id)
|
logger := log.New("miner", id)
|
||||||
logger.Trace("Started ethash search for new nonces", "seed", seed)
|
logger.Trace("Started ethash search for new nonces", "seed", seed)
|
||||||
<<<<<<< HEAD
|
|
||||||
=======
|
|
||||||
search:
|
search:
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-abort:
|
case <-abort:
|
||||||
// Mining terminated, update stats and abort
|
// Mining terminated, update stats and abort
|
||||||
logger.Trace("Ethash nonce search aborted", "attempts", nonce-seed)
|
logger.Trace("Ethash nonce search aborted", "attempts", nonce-seed)
|
||||||
ethash.hashrate.Mark(attempts)
|
ethash.hashrate.Mark(attempts)
|
||||||
<<<<<<< HEAD
|
|
||||||
return
|
|
||||||
=======
|
|
||||||
break search
|
break search
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
// We don't have to update hash rate on every nonce, so update after after 2^X nonces
|
// We don't have to update hash rate on every nonce, so update after after 2^X nonces
|
||||||
|
@ -145,11 +127,7 @@ search:
|
||||||
attempts = 0
|
attempts = 0
|
||||||
}
|
}
|
||||||
// Compute the PoW value of this nonce
|
// Compute the PoW value of this nonce
|
||||||
<<<<<<< HEAD
|
|
||||||
digest, result := hashimotoFull(dataset, hash, nonce)
|
|
||||||
=======
|
|
||||||
digest, result := hashimotoFull(dataset.dataset, hash, nonce)
|
digest, result := hashimotoFull(dataset.dataset, hash, nonce)
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
|
if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
|
||||||
// Correct nonce found, create a new header with it
|
// Correct nonce found, create a new header with it
|
||||||
header = types.CopyHeader(header)
|
header = types.CopyHeader(header)
|
||||||
|
@ -163,19 +141,12 @@ search:
|
||||||
case <-abort:
|
case <-abort:
|
||||||
logger.Trace("Ethash nonce found but discarded", "attempts", nonce-seed, "nonce", nonce)
|
logger.Trace("Ethash nonce found but discarded", "attempts", nonce-seed, "nonce", nonce)
|
||||||
}
|
}
|
||||||
<<<<<<< HEAD
|
|
||||||
return
|
|
||||||
=======
|
|
||||||
break search
|
break search
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
nonce++
|
nonce++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
<<<<<<< HEAD
|
|
||||||
=======
|
|
||||||
// Datasets are unmapped in a finalizer. Ensure that the dataset stays live
|
// Datasets are unmapped in a finalizer. Ensure that the dataset stays live
|
||||||
// during sealing so it's not unmapped while being read.
|
// during sealing so it's not unmapped while being read.
|
||||||
runtime.KeepAlive(dataset)
|
runtime.KeepAlive(dataset)
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,6 +100,11 @@ type backend struct {
|
||||||
knownMessages *lru.ARCCache // the cache of self messages
|
knownMessages *lru.ARCCache // the cache of self messages
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// zekun: HACK
|
||||||
|
func (sb *backend) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
|
||||||
|
return new(big.Int)
|
||||||
|
}
|
||||||
|
|
||||||
// Address implements istanbul.Backend.Address
|
// Address implements istanbul.Backend.Address
|
||||||
func (sb *backend) Address() common.Address {
|
func (sb *backend) Address() common.Address {
|
||||||
return sb.address
|
return sb.address
|
||||||
|
|
|
@ -29,12 +29,12 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
metrics "github.com/ethereum/go-ethereum/metrics"
|
metrics "github.com/ethereum/go-ethereum/metrics"
|
||||||
goMetrics "github.com/rcrowley/go-metrics"
|
|
||||||
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
|
||||||
)
|
)
|
||||||
|
|
||||||
// New creates an Istanbul consensus core
|
// New creates an Istanbul consensus core
|
||||||
func New(backend istanbul.Backend, config *istanbul.Config) Engine {
|
func New(backend istanbul.Backend, config *istanbul.Config) Engine {
|
||||||
|
r := metrics.NewRegistry()
|
||||||
c := &core{
|
c := &core{
|
||||||
config: config,
|
config: config,
|
||||||
address: backend.Address(),
|
address: backend.Address(),
|
||||||
|
@ -47,10 +47,15 @@ func New(backend istanbul.Backend, config *istanbul.Config) Engine {
|
||||||
pendingRequests: prque.New(),
|
pendingRequests: prque.New(),
|
||||||
pendingRequestsMu: new(sync.Mutex),
|
pendingRequestsMu: new(sync.Mutex),
|
||||||
consensusTimestamp: time.Time{},
|
consensusTimestamp: time.Time{},
|
||||||
roundMeter: metrics.NewMeter("consensus/istanbul/core/round"),
|
roundMeter: metrics.NewMeter(),
|
||||||
sequenceMeter: metrics.NewMeter("consensus/istanbul/core/sequence"),
|
sequenceMeter: metrics.NewMeter(),
|
||||||
consensusTimer: metrics.NewTimer("consensus/istanbul/core/consensus"),
|
consensusTimer: metrics.NewTimer(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r.Register("consensus/istanbul/core/round", c.roundMeter)
|
||||||
|
r.Register("consensus/istanbul/core/sequence", c.sequenceMeter)
|
||||||
|
r.Register("consensus/istanbul/core/consensus", c.consensusTimer)
|
||||||
|
|
||||||
c.validateFn = c.checkValidatorSignature
|
c.validateFn = c.checkValidatorSignature
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
@ -87,11 +92,11 @@ type core struct {
|
||||||
|
|
||||||
consensusTimestamp time.Time
|
consensusTimestamp time.Time
|
||||||
// the meter to record the round change rate
|
// the meter to record the round change rate
|
||||||
roundMeter goMetrics.Meter
|
roundMeter metrics.Meter
|
||||||
// the meter to record the sequence update rate
|
// the meter to record the sequence update rate
|
||||||
sequenceMeter goMetrics.Meter
|
sequenceMeter metrics.Meter
|
||||||
// the timer to record consensus duration (from accepting a preprepare to final committed stage)
|
// the timer to record consensus duration (from accepting a preprepare to final committed stage)
|
||||||
consensusTimer goMetrics.Timer
|
consensusTimer metrics.Timer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *core) finalizeMessage(msg *message) ([]byte, error) {
|
func (c *core) finalizeMessage(msg *message) ([]byte, error) {
|
||||||
|
|
|
@ -1,8 +1,4 @@
|
||||||
<<<<<<< HEAD
|
|
||||||
FROM alpine:3.5
|
|
||||||
=======
|
|
||||||
FROM alpine:3.7
|
FROM alpine:3.7
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
||||||
|
|
|
@ -1,8 +1,4 @@
|
||||||
<<<<<<< HEAD
|
|
||||||
FROM alpine:3.5
|
|
||||||
=======
|
|
||||||
FROM alpine:3.7
|
FROM alpine:3.7
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
||||||
|
|
|
@ -1,14 +1,5 @@
|
||||||
FROM ubuntu:xenial
|
FROM ubuntu:xenial
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
RUN \
|
|
||||||
apt-get update && apt-get upgrade -q -y && \
|
|
||||||
apt-get install -y --no-install-recommends golang git make gcc libc-dev ca-certificates && \
|
|
||||||
git clone --depth 1 https://github.com/ethereum/go-ethereum && \
|
|
||||||
(cd go-ethereum && make geth) && \
|
|
||||||
cp go-ethereum/build/bin/geth /geth && \
|
|
||||||
apt-get remove -y golang git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
|
||||||
=======
|
|
||||||
ENV PATH=/usr/lib/go-1.9/bin:$PATH
|
ENV PATH=/usr/lib/go-1.9/bin:$PATH
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
|
@ -18,7 +9,6 @@ RUN \
|
||||||
(cd go-ethereum && make geth) && \
|
(cd go-ethereum && make geth) && \
|
||||||
cp go-ethereum/build/bin/geth /geth && \
|
cp go-ethereum/build/bin/geth /geth && \
|
||||||
apt-get remove -y golang-1.9 git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
apt-get remove -y golang-1.9 git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
rm -rf /go-ethereum
|
rm -rf /go-ethereum
|
||||||
|
|
||||||
EXPOSE 8545
|
EXPOSE 8545
|
||||||
|
|
|
@ -1,16 +1,8 @@
|
||||||
<<<<<<< HEAD
|
|
||||||
FROM alpine:3.5
|
|
||||||
|
|
||||||
RUN \
|
|
||||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
|
||||||
git clone --depth 1 --branch release/1.7 https://github.com/ethereum/go-ethereum && \
|
|
||||||
=======
|
|
||||||
FROM alpine:3.7
|
FROM alpine:3.7
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
||||||
git clone --depth 1 --branch release/1.8 https://github.com/ethereum/go-ethereum && \
|
git clone --depth 1 --branch release/1.8 https://github.com/ethereum/go-ethereum && \
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
(cd go-ethereum && make geth) && \
|
(cd go-ethereum && make geth) && \
|
||||||
cp go-ethereum/build/bin/geth /geth && \
|
cp go-ethereum/build/bin/geth /geth && \
|
||||||
apk del go git make gcc musl-dev linux-headers && \
|
apk del go git make gcc musl-dev linux-headers && \
|
||||||
|
|
|
@ -1,14 +1,5 @@
|
||||||
FROM ubuntu:xenial
|
FROM ubuntu:xenial
|
||||||
|
|
||||||
<<<<<<< HEAD
|
|
||||||
RUN \
|
|
||||||
apt-get update && apt-get upgrade -q -y && \
|
|
||||||
apt-get install -y --no-install-recommends golang git make gcc libc-dev ca-certificates && \
|
|
||||||
git clone --depth 1 --branch release/1.7 https://github.com/ethereum/go-ethereum && \
|
|
||||||
(cd go-ethereum && make geth) && \
|
|
||||||
cp go-ethereum/build/bin/geth /geth && \
|
|
||||||
apt-get remove -y golang git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
|
||||||
=======
|
|
||||||
ENV PATH=/usr/lib/go-1.9/bin:$PATH
|
ENV PATH=/usr/lib/go-1.9/bin:$PATH
|
||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
|
@ -18,7 +9,6 @@ RUN \
|
||||||
(cd go-ethereum && make geth) && \
|
(cd go-ethereum && make geth) && \
|
||||||
cp go-ethereum/build/bin/geth /geth && \
|
cp go-ethereum/build/bin/geth /geth && \
|
||||||
apt-get remove -y golang-1.9 git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
apt-get remove -y golang-1.9 git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
||||||
>>>>>>> core/release/1.8
|
|
||||||
rm -rf /go-ethereum
|
rm -rf /go-ethereum
|
||||||
|
|
||||||
EXPOSE 8545
|
EXPOSE 8545
|
||||||
|
|
|
@ -350,9 +350,9 @@ func (bc *BlockChain) GasLimit() uint64 {
|
||||||
defer bc.mu.RUnlock()
|
defer bc.mu.RUnlock()
|
||||||
|
|
||||||
if bc.Config().IsQuorum {
|
if bc.Config().IsQuorum {
|
||||||
return math.MaxBig256 // HACK(joel) a very large number
|
return math.MaxBig256.Uint64() // HACK(joel) a very large number
|
||||||
} else {
|
} else {
|
||||||
return bc.currentBlock.GasLimit()
|
return bc.CurrentBlock().GasLimit()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1186,6 +1186,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
||||||
} else {
|
} else {
|
||||||
parent = chain[i-1]
|
parent = chain[i-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// alias state.New because we introduce a variable named state on the next line
|
||||||
|
stateNew := state.New
|
||||||
|
|
||||||
state, err := state.New(parent.Root(), bc.stateCache)
|
state, err := state.New(parent.Root(), bc.stateCache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return i, events, coalescedLogs, err
|
return i, events, coalescedLogs, err
|
||||||
|
@ -1214,10 +1218,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
||||||
|
|
||||||
// Quorum
|
// Quorum
|
||||||
// Write private state changes to database
|
// Write private state changes to database
|
||||||
if privateStateRoot, err = privateState.CommitTo(bc.chainDb, bc.config.IsEIP158(block.Number())); err != nil {
|
if privateStateRoot, err = privateState.Commit(bc.Config().IsEIP158(block.Number())); err != nil {
|
||||||
return i, events, coalescedLogs, err
|
return i, events, coalescedLogs, err
|
||||||
}
|
}
|
||||||
if err := WritePrivateStateRoot(bc.chainDb, block.Root(), privateStateRoot); err != nil {
|
if err := WritePrivateStateRoot(bc.db, block.Root(), privateStateRoot); err != nil {
|
||||||
return i, events, coalescedLogs, err
|
return i, events, coalescedLogs, err
|
||||||
}
|
}
|
||||||
// /Quorum
|
// /Quorum
|
||||||
|
|
|
@ -44,10 +44,10 @@ func (cg *callHelper) MakeCall(private bool, key *ecdsa.PrivateKey, to common.Ad
|
||||||
cg.header.Number = new(big.Int)
|
cg.header.Number = new(big.Int)
|
||||||
cg.header.Time = new(big.Int).SetUint64(43)
|
cg.header.Time = new(big.Int).SetUint64(43)
|
||||||
cg.header.Difficulty = new(big.Int).SetUint64(1000488)
|
cg.header.Difficulty = new(big.Int).SetUint64(1000488)
|
||||||
cg.header.GasLimit = new(big.Int).SetUint64(4700000)
|
cg.header.GasLimit = 4700000
|
||||||
|
|
||||||
signer := types.MakeSigner(params.QuorumTestChainConfig, cg.header.Number)
|
signer := types.MakeSigner(params.QuorumTestChainConfig, cg.header.Number)
|
||||||
tx, err := types.SignTx(types.NewTransaction(cg.TxNonce(from), to, new(big.Int), big.NewInt(1000000), new(big.Int), input), signer, key)
|
tx, err := types.SignTx(types.NewTransaction(cg.TxNonce(from), to, new(big.Int), 1000000, new(big.Int), input), signer, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,7 @@ func (cg *callHelper) MakeCall(private bool, key *ecdsa.PrivateKey, to common.Ad
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(joel): can we just pass nil instead of bc?
|
// TODO(joel): can we just pass nil instead of bc?
|
||||||
bc, _ := NewBlockChain(cg.db, params.QuorumTestChainConfig, ethash.NewFaker(), vm.Config{})
|
bc, _ := NewBlockChain(cg.db, nil, params.QuorumTestChainConfig, ethash.NewFaker(), vm.Config{})
|
||||||
context := NewEVMContext(msg, &cg.header, bc, &from)
|
context := NewEVMContext(msg, &cg.header, bc, &from)
|
||||||
vmenv := vm.NewEVM(context, publicState, privateState, params.QuorumTestChainConfig, vm.Config{})
|
vmenv := vm.NewEVM(context, publicState, privateState, params.QuorumTestChainConfig, vm.Config{})
|
||||||
_, _, _, err = ApplyMessage(vmenv, msg, cg.gp)
|
_, _, _, err = ApplyMessage(vmenv, msg, cg.gp)
|
||||||
|
@ -77,7 +77,7 @@ func (cg *callHelper) MakeCall(private bool, key *ecdsa.PrivateKey, to common.Ad
|
||||||
|
|
||||||
// MakeCallHelper returns a new callHelper
|
// MakeCallHelper returns a new callHelper
|
||||||
func MakeCallHelper() *callHelper {
|
func MakeCallHelper() *callHelper {
|
||||||
memdb, _ := ethdb.NewMemDatabase()
|
memdb := ethdb.NewMemDatabase()
|
||||||
db := state.NewDatabase(memdb)
|
db := state.NewDatabase(memdb)
|
||||||
|
|
||||||
publicState, err := state.New(common.Hash{}, db)
|
publicState, err := state.New(common.Hash{}, db)
|
||||||
|
@ -91,7 +91,7 @@ func MakeCallHelper() *callHelper {
|
||||||
cg := &callHelper{
|
cg := &callHelper{
|
||||||
db: memdb,
|
db: memdb,
|
||||||
nonces: make(map[common.Address]uint64),
|
nonces: make(map[common.Address]uint64),
|
||||||
gp: new(GasPool).AddGas(big.NewInt(5000000)),
|
gp: new(GasPool).AddGas(5000000),
|
||||||
PublicState: publicState,
|
PublicState: publicState,
|
||||||
PrivateState: privateState,
|
PrivateState: privateState,
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,7 +98,7 @@ func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) {
|
||||||
b.SetCoinbase(common.Address{})
|
b.SetCoinbase(common.Address{})
|
||||||
}
|
}
|
||||||
b.statedb.Prepare(tx.Hash(), common.Hash{}, len(b.txs))
|
b.statedb.Prepare(tx.Hash(), common.Hash{}, len(b.txs))
|
||||||
receipt, _, _, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.statedb, &b.header, tx, b.header.GasUsed, vm.Config{})
|
receipt, _, _, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.statedb, b.header, tx, &b.header.GasUsed, vm.Config{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,663 @@
|
||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DatabaseReader wraps the Get method of a backing data store.
|
||||||
|
type DatabaseReader interface {
|
||||||
|
Get(key []byte) (value []byte, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DatabaseDeleter wraps the Delete method of a backing data store.
|
||||||
|
type DatabaseDeleter interface {
|
||||||
|
Delete(key []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
headHeaderKey = []byte("LastHeader")
|
||||||
|
headBlockKey = []byte("LastBlock")
|
||||||
|
headFastKey = []byte("LastFast")
|
||||||
|
|
||||||
|
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`).
|
||||||
|
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
|
||||||
|
tdSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + tdSuffix -> td
|
||||||
|
numSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + numSuffix -> hash
|
||||||
|
blockHashPrefix = []byte("H") // blockHashPrefix + hash -> num (uint64 big endian)
|
||||||
|
bodyPrefix = []byte("b") // bodyPrefix + num (uint64 big endian) + hash -> block body
|
||||||
|
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
|
||||||
|
lookupPrefix = []byte("l") // lookupPrefix + hash -> transaction/receipt lookup metadata
|
||||||
|
bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
|
||||||
|
|
||||||
|
preimagePrefix = "secure-key-" // preimagePrefix + hash -> preimage
|
||||||
|
configPrefix = []byte("ethereum-config-") // config prefix for the db
|
||||||
|
|
||||||
|
// Chain index prefixes (use `i` + single byte to avoid mixing data types).
|
||||||
|
BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
|
||||||
|
|
||||||
|
// used by old db, now only used for conversion
|
||||||
|
oldReceiptsPrefix = []byte("receipts-")
|
||||||
|
oldTxMetaSuffix = []byte{0x01}
|
||||||
|
|
||||||
|
ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
|
||||||
|
|
||||||
|
preimageCounter = metrics.NewCounter()
|
||||||
|
preimageHitCounter = metrics.NewCounter()
|
||||||
|
|
||||||
|
privateRootPrefix = []byte("P")
|
||||||
|
privateblockReceiptsPrefix = []byte("Pr") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
|
||||||
|
privateReceiptPrefix = []byte("Prs")
|
||||||
|
privateBloomPrefix = []byte("Pb")
|
||||||
|
)
|
||||||
|
|
||||||
|
// txLookupEntry is a positional metadata to help looking up the data content of
|
||||||
|
// a transaction or receipt given only its hash.
|
||||||
|
type txLookupEntry struct {
|
||||||
|
BlockHash common.Hash
|
||||||
|
BlockIndex uint64
|
||||||
|
Index uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeBlockNumber encodes a block number as big endian uint64
|
||||||
|
func encodeBlockNumber(number uint64) []byte {
|
||||||
|
enc := make([]byte, 8)
|
||||||
|
binary.BigEndian.PutUint64(enc, number)
|
||||||
|
return enc
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCanonicalHash retrieves a hash assigned to a canonical block number.
|
||||||
|
func GetCanonicalHash(db DatabaseReader, number uint64) common.Hash {
|
||||||
|
data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...))
|
||||||
|
if len(data) == 0 {
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
return common.BytesToHash(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// missingNumber is returned by GetBlockNumber if no header with the
|
||||||
|
// given block hash has been stored in the database
|
||||||
|
const missingNumber = uint64(0xffffffffffffffff)
|
||||||
|
|
||||||
|
// GetBlockNumber returns the block number assigned to a block hash
|
||||||
|
// if the corresponding header is present in the database
|
||||||
|
func GetBlockNumber(db DatabaseReader, hash common.Hash) uint64 {
|
||||||
|
data, _ := db.Get(append(blockHashPrefix, hash.Bytes()...))
|
||||||
|
if len(data) != 8 {
|
||||||
|
return missingNumber
|
||||||
|
}
|
||||||
|
return binary.BigEndian.Uint64(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeadHeaderHash retrieves the hash of the current canonical head block's
|
||||||
|
// header. The difference between this and GetHeadBlockHash is that whereas the
|
||||||
|
// last block hash is only updated upon a full block import, the last header
|
||||||
|
// hash is updated already at header import, allowing head tracking for the
|
||||||
|
// light synchronization mechanism.
|
||||||
|
func GetHeadHeaderHash(db DatabaseReader) common.Hash {
|
||||||
|
data, _ := db.Get(headHeaderKey)
|
||||||
|
if len(data) == 0 {
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
return common.BytesToHash(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeadBlockHash retrieves the hash of the current canonical head block.
|
||||||
|
func GetHeadBlockHash(db DatabaseReader) common.Hash {
|
||||||
|
data, _ := db.Get(headBlockKey)
|
||||||
|
if len(data) == 0 {
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
return common.BytesToHash(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeadFastBlockHash retrieves the hash of the current canonical head block during
|
||||||
|
// fast synchronization. The difference between this and GetHeadBlockHash is that
|
||||||
|
// whereas the last block hash is only updated upon a full block import, the last
|
||||||
|
// fast hash is updated when importing pre-processed blocks.
|
||||||
|
func GetHeadFastBlockHash(db DatabaseReader) common.Hash {
|
||||||
|
data, _ := db.Get(headFastKey)
|
||||||
|
if len(data) == 0 {
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
return common.BytesToHash(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
|
||||||
|
// if the header's not found.
|
||||||
|
func GetHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
|
||||||
|
data, _ := db.Get(headerKey(hash, number))
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeader retrieves the block header corresponding to the hash, nil if none
|
||||||
|
// found.
|
||||||
|
func GetHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Header {
|
||||||
|
data := GetHeaderRLP(db, hash, number)
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
header := new(types.Header)
|
||||||
|
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
||||||
|
log.Error("Invalid block header RLP", "hash", hash, "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return header
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
||||||
|
func GetBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
|
||||||
|
data, _ := db.Get(blockBodyKey(hash, number))
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
func headerKey(hash common.Hash, number uint64) []byte {
|
||||||
|
return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func blockBodyKey(hash common.Hash, number uint64) []byte {
|
||||||
|
return append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBody retrieves the block body (transactons, uncles) corresponding to the
|
||||||
|
// hash, nil if none found.
|
||||||
|
func GetBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
|
||||||
|
data := GetBodyRLP(db, hash, number)
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
body := new(types.Body)
|
||||||
|
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
|
||||||
|
log.Error("Invalid block body RLP", "hash", hash, "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTd retrieves a block's total difficulty corresponding to the hash, nil if
|
||||||
|
// none found.
|
||||||
|
func GetTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int {
|
||||||
|
data, _ := db.Get(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash[:]...), tdSuffix...))
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
td := new(big.Int)
|
||||||
|
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
|
||||||
|
log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return td
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlock retrieves an entire block corresponding to the hash, assembling it
|
||||||
|
// back from the stored header and body. If either the header or body could not
|
||||||
|
// be retrieved nil is returned.
|
||||||
|
//
|
||||||
|
// Note, due to concurrent download of header and block body the header and thus
|
||||||
|
// canonical hash can be stored in the database but the body data not (yet).
|
||||||
|
func GetBlock(db DatabaseReader, hash common.Hash, number uint64) *types.Block {
|
||||||
|
// Retrieve the block header and body contents
|
||||||
|
header := GetHeader(db, hash, number)
|
||||||
|
if header == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
body := GetBody(db, hash, number)
|
||||||
|
if body == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Reassemble the block and return
|
||||||
|
return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlockReceipts retrieves the receipts generated by the transactions included
|
||||||
|
// in a block given by its hash.
|
||||||
|
func GetBlockReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts {
|
||||||
|
data, _ := db.Get(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash[:]...))
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
storageReceipts := []*types.ReceiptForStorage{}
|
||||||
|
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
|
||||||
|
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
receipts := make(types.Receipts, len(storageReceipts))
|
||||||
|
for i, receipt := range storageReceipts {
|
||||||
|
receipts[i] = (*types.Receipt)(receipt)
|
||||||
|
}
|
||||||
|
return receipts
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTxLookupEntry retrieves the positional metadata associated with a transaction
|
||||||
|
// hash to allow retrieving the transaction or receipt by hash.
|
||||||
|
func GetTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64, uint64) {
|
||||||
|
// Load the positional metadata from disk and bail if it fails
|
||||||
|
data, _ := db.Get(append(lookupPrefix, hash.Bytes()...))
|
||||||
|
if len(data) == 0 {
|
||||||
|
return common.Hash{}, 0, 0
|
||||||
|
}
|
||||||
|
// Parse and return the contents of the lookup entry
|
||||||
|
var entry txLookupEntry
|
||||||
|
if err := rlp.DecodeBytes(data, &entry); err != nil {
|
||||||
|
log.Error("Invalid lookup entry RLP", "hash", hash, "err", err)
|
||||||
|
return common.Hash{}, 0, 0
|
||||||
|
}
|
||||||
|
return entry.BlockHash, entry.BlockIndex, entry.Index
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTransaction retrieves a specific transaction from the database, along with
|
||||||
|
// its added positional metadata.
|
||||||
|
func GetTransaction(db DatabaseReader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
|
||||||
|
// Retrieve the lookup metadata and resolve the transaction from the body
|
||||||
|
blockHash, blockNumber, txIndex := GetTxLookupEntry(db, hash)
|
||||||
|
|
||||||
|
if blockHash != (common.Hash{}) {
|
||||||
|
body := GetBody(db, blockHash, blockNumber)
|
||||||
|
if body == nil || len(body.Transactions) <= int(txIndex) {
|
||||||
|
log.Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash, "index", txIndex)
|
||||||
|
return nil, common.Hash{}, 0, 0
|
||||||
|
}
|
||||||
|
return body.Transactions[txIndex], blockHash, blockNumber, txIndex
|
||||||
|
}
|
||||||
|
// Old transaction representation, load the transaction and it's metadata separately
|
||||||
|
data, _ := db.Get(hash.Bytes())
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil, common.Hash{}, 0, 0
|
||||||
|
}
|
||||||
|
var tx types.Transaction
|
||||||
|
if err := rlp.DecodeBytes(data, &tx); err != nil {
|
||||||
|
return nil, common.Hash{}, 0, 0
|
||||||
|
}
|
||||||
|
// Retrieve the blockchain positional metadata
|
||||||
|
data, _ = db.Get(append(hash.Bytes(), oldTxMetaSuffix...))
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil, common.Hash{}, 0, 0
|
||||||
|
}
|
||||||
|
var entry txLookupEntry
|
||||||
|
if err := rlp.DecodeBytes(data, &entry); err != nil {
|
||||||
|
return nil, common.Hash{}, 0, 0
|
||||||
|
}
|
||||||
|
return &tx, entry.BlockHash, entry.BlockIndex, entry.Index
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReceipt retrieves a specific transaction receipt from the database, along with
|
||||||
|
// its added positional metadata.
|
||||||
|
func GetReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Hash, uint64, uint64) {
|
||||||
|
// Retrieve the lookup metadata and resolve the receipt from the receipts
|
||||||
|
blockHash, blockNumber, receiptIndex := GetTxLookupEntry(db, hash)
|
||||||
|
|
||||||
|
if blockHash != (common.Hash{}) {
|
||||||
|
receipts := GetBlockReceipts(db, blockHash, blockNumber)
|
||||||
|
if len(receipts) <= int(receiptIndex) {
|
||||||
|
log.Error("Receipt refereced missing", "number", blockNumber, "hash", blockHash, "index", receiptIndex)
|
||||||
|
return nil, common.Hash{}, 0, 0
|
||||||
|
}
|
||||||
|
return receipts[receiptIndex], blockHash, blockNumber, receiptIndex
|
||||||
|
}
|
||||||
|
// Old receipt representation, load the receipt and set an unknown metadata
|
||||||
|
data, _ := db.Get(append(oldReceiptsPrefix, hash[:]...))
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil, common.Hash{}, 0, 0
|
||||||
|
}
|
||||||
|
var receipt types.ReceiptForStorage
|
||||||
|
err := rlp.DecodeBytes(data, &receipt)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Invalid receipt RLP", "hash", hash, "err", err)
|
||||||
|
}
|
||||||
|
return (*types.Receipt)(&receipt), common.Hash{}, 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBloomBits retrieves the compressed bloom bit vector belonging to the given
|
||||||
|
// section and bit index from the.
|
||||||
|
func GetBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) []byte {
|
||||||
|
key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint16(key[1:], uint16(bit))
|
||||||
|
binary.BigEndian.PutUint64(key[3:], section)
|
||||||
|
|
||||||
|
bits, _ := db.Get(key)
|
||||||
|
return bits
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteCanonicalHash stores the canonical hash for the given block number.
|
||||||
|
func WriteCanonicalHash(db ethdb.Putter, hash common.Hash, number uint64) error {
|
||||||
|
key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
|
||||||
|
if err := db.Put(key, hash.Bytes()); err != nil {
|
||||||
|
log.Crit("Failed to store number to hash mapping", "err", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteHeadHeaderHash stores the head header's hash.
|
||||||
|
func WriteHeadHeaderHash(db ethdb.Putter, hash common.Hash) error {
|
||||||
|
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
|
||||||
|
log.Crit("Failed to store last header's hash", "err", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteHeadBlockHash stores the head block's hash.
|
||||||
|
func WriteHeadBlockHash(db ethdb.Putter, hash common.Hash) error {
|
||||||
|
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
|
||||||
|
log.Crit("Failed to store last block's hash", "err", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteHeadFastBlockHash stores the fast head block's hash.
|
||||||
|
func WriteHeadFastBlockHash(db ethdb.Putter, hash common.Hash) error {
|
||||||
|
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
|
||||||
|
log.Crit("Failed to store last fast block's hash", "err", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteHeader serializes a block header into the database.
|
||||||
|
func WriteHeader(db ethdb.Putter, header *types.Header) error {
|
||||||
|
data, err := rlp.EncodeToBytes(header)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hash := header.Hash().Bytes()
|
||||||
|
num := header.Number.Uint64()
|
||||||
|
encNum := encodeBlockNumber(num)
|
||||||
|
key := append(blockHashPrefix, hash...)
|
||||||
|
if err := db.Put(key, encNum); err != nil {
|
||||||
|
log.Crit("Failed to store hash to number mapping", "err", err)
|
||||||
|
}
|
||||||
|
key = append(append(headerPrefix, encNum...), hash...)
|
||||||
|
if err := db.Put(key, data); err != nil {
|
||||||
|
log.Crit("Failed to store header", "err", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteBody serializes the body of a block into the database.
|
||||||
|
func WriteBody(db ethdb.Putter, hash common.Hash, number uint64, body *types.Body) error {
|
||||||
|
data, err := rlp.EncodeToBytes(body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return WriteBodyRLP(db, hash, number, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteBodyRLP writes a serialized body of a block into the database.
|
||||||
|
func WriteBodyRLP(db ethdb.Putter, hash common.Hash, number uint64, rlp rlp.RawValue) error {
|
||||||
|
key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||||
|
if err := db.Put(key, rlp); err != nil {
|
||||||
|
log.Crit("Failed to store block body", "err", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteTd serializes the total difficulty of a block into the database.
|
||||||
|
func WriteTd(db ethdb.Putter, hash common.Hash, number uint64, td *big.Int) error {
|
||||||
|
data, err := rlp.EncodeToBytes(td)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...)
|
||||||
|
if err := db.Put(key, data); err != nil {
|
||||||
|
log.Crit("Failed to store block total difficulty", "err", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteBlock serializes a block into the database, header and body separately.
|
||||||
|
func WriteBlock(db ethdb.Putter, block *types.Block) error {
|
||||||
|
// Store the body first to retain database consistency
|
||||||
|
if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Store the header too, signaling full block ownership
|
||||||
|
if err := WriteHeader(db, block.Header()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteBlockReceipts stores all the transaction receipts belonging to a block
|
||||||
|
// as a single receipt slice. This is used during chain reorganisations for
|
||||||
|
// rescheduling dropped transactions.
|
||||||
|
func WriteBlockReceipts(db ethdb.Putter, hash common.Hash, number uint64, receipts types.Receipts) error {
|
||||||
|
// Convert the receipts into their storage form and serialize them
|
||||||
|
storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
|
||||||
|
for i, receipt := range receipts {
|
||||||
|
storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
|
||||||
|
}
|
||||||
|
bytes, err := rlp.EncodeToBytes(storageReceipts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Store the flattened receipt slice
|
||||||
|
key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||||
|
if err := db.Put(key, bytes); err != nil {
|
||||||
|
log.Crit("Failed to store block receipts", "err", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteTxLookupEntries stores a positional metadata for every transaction from
|
||||||
|
// a block, enabling hash based transaction and receipt lookups.
|
||||||
|
func WriteTxLookupEntries(db ethdb.Putter, block *types.Block) error {
|
||||||
|
// Iterate over each transaction and encode its metadata
|
||||||
|
for i, tx := range block.Transactions() {
|
||||||
|
entry := txLookupEntry{
|
||||||
|
BlockHash: block.Hash(),
|
||||||
|
BlockIndex: block.NumberU64(),
|
||||||
|
Index: uint64(i),
|
||||||
|
}
|
||||||
|
data, err := rlp.EncodeToBytes(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := db.Put(append(lookupPrefix, tx.Hash().Bytes()...), data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteBloomBits writes the compressed bloom bits vector belonging to the given
|
||||||
|
// section and bit index.
|
||||||
|
func WriteBloomBits(db ethdb.Putter, bit uint, section uint64, head common.Hash, bits []byte) {
|
||||||
|
key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint16(key[1:], uint16(bit))
|
||||||
|
binary.BigEndian.PutUint64(key[3:], section)
|
||||||
|
|
||||||
|
if err := db.Put(key, bits); err != nil {
|
||||||
|
log.Crit("Failed to store bloom bits", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteCanonicalHash removes the number to hash canonical mapping.
|
||||||
|
func DeleteCanonicalHash(db DatabaseDeleter, number uint64) {
|
||||||
|
db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteHeader removes all block header data associated with a hash.
|
||||||
|
func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||||
|
db.Delete(append(blockHashPrefix, hash.Bytes()...))
|
||||||
|
db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBody removes all block body data associated with a hash.
|
||||||
|
func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||||
|
db.Delete(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteTd removes all block total difficulty data associated with a hash.
|
||||||
|
func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||||
|
db.Delete(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBlock removes all block data associated with a hash.
|
||||||
|
func DeleteBlock(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||||
|
DeleteBlockReceipts(db, hash, number)
|
||||||
|
DeleteHeader(db, hash, number)
|
||||||
|
DeleteBody(db, hash, number)
|
||||||
|
DeleteTd(db, hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBlockReceipts removes all receipt data associated with a block hash.
|
||||||
|
func DeleteBlockReceipts(db DatabaseDeleter, hash common.Hash, number uint64) {
|
||||||
|
db.Delete(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteTxLookupEntry removes all transaction data associated with a hash.
|
||||||
|
func DeleteTxLookupEntry(db DatabaseDeleter, hash common.Hash) {
|
||||||
|
db.Delete(append(lookupPrefix, hash.Bytes()...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreimageTable returns a Database instance with the key prefix for preimage entries.
|
||||||
|
func PreimageTable(db ethdb.Database) ethdb.Database {
|
||||||
|
return ethdb.NewTable(db, preimagePrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WritePreimages writes the provided set of preimages to the database. `number` is the
|
||||||
|
// current block number, and is used for debug messages only.
|
||||||
|
func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash][]byte) error {
|
||||||
|
table := PreimageTable(db)
|
||||||
|
batch := table.NewBatch()
|
||||||
|
hitCount := 0
|
||||||
|
for hash, preimage := range preimages {
|
||||||
|
if _, err := table.Get(hash.Bytes()); err != nil {
|
||||||
|
batch.Put(hash.Bytes(), preimage)
|
||||||
|
hitCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
preimageCounter.Inc(int64(len(preimages)))
|
||||||
|
preimageHitCounter.Inc(int64(hitCount))
|
||||||
|
if hitCount > 0 {
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
return fmt.Errorf("preimage write fail for block %d: %v", number, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlockChainVersion reads the version number from db.
|
||||||
|
func GetBlockChainVersion(db DatabaseReader) int {
|
||||||
|
var vsn uint
|
||||||
|
enc, _ := db.Get([]byte("BlockchainVersion"))
|
||||||
|
rlp.DecodeBytes(enc, &vsn)
|
||||||
|
return int(vsn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteBlockChainVersion writes vsn as the version number to db.
|
||||||
|
func WriteBlockChainVersion(db ethdb.Putter, vsn int) {
|
||||||
|
enc, _ := rlp.EncodeToBytes(uint(vsn))
|
||||||
|
db.Put([]byte("BlockchainVersion"), enc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteChainConfig writes the chain config settings to the database.
|
||||||
|
func WriteChainConfig(db ethdb.Putter, hash common.Hash, cfg *params.ChainConfig) error {
|
||||||
|
// short circuit and ignore if nil config. GetChainConfig
|
||||||
|
// will return a default.
|
||||||
|
if cfg == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
jsonChainConfig, err := json.Marshal(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return db.Put(append(configPrefix, hash[:]...), jsonChainConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetChainConfig will fetch the network settings based on the given hash.
|
||||||
|
func GetChainConfig(db DatabaseReader, hash common.Hash) (*params.ChainConfig, error) {
|
||||||
|
jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...))
|
||||||
|
if len(jsonChainConfig) == 0 {
|
||||||
|
return nil, ErrChainConfigNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
var config params.ChainConfig
|
||||||
|
if err := json.Unmarshal(jsonChainConfig, &config); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindCommonAncestor returns the last common ancestor of two block headers
|
||||||
|
func FindCommonAncestor(db DatabaseReader, a, b *types.Header) *types.Header {
|
||||||
|
for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
|
||||||
|
a = GetHeader(db, a.ParentHash, a.Number.Uint64()-1)
|
||||||
|
if a == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for an := a.Number.Uint64(); an < b.Number.Uint64(); {
|
||||||
|
b = GetHeader(db, b.ParentHash, b.Number.Uint64()-1)
|
||||||
|
if b == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for a.Hash() != b.Hash() {
|
||||||
|
a = GetHeader(db, a.ParentHash, a.Number.Uint64()-1)
|
||||||
|
if a == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b = GetHeader(db, b.ParentHash, b.Number.Uint64()-1)
|
||||||
|
if b == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetPrivateStateRoot(db ethdb.Database, blockRoot common.Hash) common.Hash {
|
||||||
|
root, _ := db.Get(append(privateRootPrefix, blockRoot[:]...))
|
||||||
|
return common.BytesToHash(root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func WritePrivateStateRoot(db ethdb.Database, blockRoot, root common.Hash) error {
|
||||||
|
return db.Put(append(privateRootPrefix, blockRoot[:]...), root[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// WritePrivateBlockBloom creates a bloom filter for the given receipts and saves it to the database
|
||||||
|
// with the number given as identifier (i.e. block number).
|
||||||
|
func WritePrivateBlockBloom(db ethdb.Database, number uint64, receipts types.Receipts) error {
|
||||||
|
rbloom := types.CreateBloom(receipts)
|
||||||
|
return db.Put(append(privateBloomPrefix, encodeBlockNumber(number)...), rbloom[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPrivateBlockBloom retrieves the private bloom associated with the given number.
|
||||||
|
func GetPrivateBlockBloom(db ethdb.Database, number uint64) (bloom types.Bloom) {
|
||||||
|
data, _ := db.Get(append(privateBloomPrefix, encodeBlockNumber(number)...))
|
||||||
|
if len(data) > 0 {
|
||||||
|
bloom = types.BytesToBloom(data)
|
||||||
|
}
|
||||||
|
return bloom
|
||||||
|
}
|
|
@ -0,0 +1,388 @@
|
||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests block header storage and retrieval operations.
|
||||||
|
func TestHeaderStorage(t *testing.T) {
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
|
||||||
|
// Create a test header to move around the database and make sure it's really new
|
||||||
|
header := &types.Header{Number: big.NewInt(42), Extra: []byte("test header")}
|
||||||
|
if entry := GetHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
|
||||||
|
t.Fatalf("Non existent header returned: %v", entry)
|
||||||
|
}
|
||||||
|
// Write and verify the header in the database
|
||||||
|
if err := WriteHeader(db, header); err != nil {
|
||||||
|
t.Fatalf("Failed to write header into database: %v", err)
|
||||||
|
}
|
||||||
|
if entry := GetHeader(db, header.Hash(), header.Number.Uint64()); entry == nil {
|
||||||
|
t.Fatalf("Stored header not found")
|
||||||
|
} else if entry.Hash() != header.Hash() {
|
||||||
|
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
|
||||||
|
}
|
||||||
|
if entry := GetHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
|
||||||
|
t.Fatalf("Stored header RLP not found")
|
||||||
|
} else {
|
||||||
|
hasher := sha3.NewKeccak256()
|
||||||
|
hasher.Write(entry)
|
||||||
|
|
||||||
|
if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
|
||||||
|
t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Delete the header and verify the execution
|
||||||
|
DeleteHeader(db, header.Hash(), header.Number.Uint64())
|
||||||
|
if entry := GetHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
|
||||||
|
t.Fatalf("Deleted header returned: %v", entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests block body storage and retrieval operations.
|
||||||
|
func TestBodyStorage(t *testing.T) {
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
|
||||||
|
// Create a test body to move around the database and make sure it's really new
|
||||||
|
body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
|
||||||
|
|
||||||
|
hasher := sha3.NewKeccak256()
|
||||||
|
rlp.Encode(hasher, body)
|
||||||
|
hash := common.BytesToHash(hasher.Sum(nil))
|
||||||
|
|
||||||
|
if entry := GetBody(db, hash, 0); entry != nil {
|
||||||
|
t.Fatalf("Non existent body returned: %v", entry)
|
||||||
|
}
|
||||||
|
// Write and verify the body in the database
|
||||||
|
if err := WriteBody(db, hash, 0, body); err != nil {
|
||||||
|
t.Fatalf("Failed to write body into database: %v", err)
|
||||||
|
}
|
||||||
|
if entry := GetBody(db, hash, 0); entry == nil {
|
||||||
|
t.Fatalf("Stored body not found")
|
||||||
|
} else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
|
||||||
|
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
|
||||||
|
}
|
||||||
|
if entry := GetBodyRLP(db, hash, 0); entry == nil {
|
||||||
|
t.Fatalf("Stored body RLP not found")
|
||||||
|
} else {
|
||||||
|
hasher := sha3.NewKeccak256()
|
||||||
|
hasher.Write(entry)
|
||||||
|
|
||||||
|
if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
|
||||||
|
t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Delete the body and verify the execution
|
||||||
|
DeleteBody(db, hash, 0)
|
||||||
|
if entry := GetBody(db, hash, 0); entry != nil {
|
||||||
|
t.Fatalf("Deleted body returned: %v", entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests block storage and retrieval operations.
|
||||||
|
func TestBlockStorage(t *testing.T) {
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
|
||||||
|
// Create a test block to move around the database and make sure it's really new
|
||||||
|
block := types.NewBlockWithHeader(&types.Header{
|
||||||
|
Extra: []byte("test block"),
|
||||||
|
UncleHash: types.EmptyUncleHash,
|
||||||
|
TxHash: types.EmptyRootHash,
|
||||||
|
ReceiptHash: types.EmptyRootHash,
|
||||||
|
})
|
||||||
|
if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
||||||
|
t.Fatalf("Non existent block returned: %v", entry)
|
||||||
|
}
|
||||||
|
if entry := GetHeader(db, block.Hash(), block.NumberU64()); entry != nil {
|
||||||
|
t.Fatalf("Non existent header returned: %v", entry)
|
||||||
|
}
|
||||||
|
if entry := GetBody(db, block.Hash(), block.NumberU64()); entry != nil {
|
||||||
|
t.Fatalf("Non existent body returned: %v", entry)
|
||||||
|
}
|
||||||
|
// Write and verify the block in the database
|
||||||
|
if err := WriteBlock(db, block); err != nil {
|
||||||
|
t.Fatalf("Failed to write block into database: %v", err)
|
||||||
|
}
|
||||||
|
if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry == nil {
|
||||||
|
t.Fatalf("Stored block not found")
|
||||||
|
} else if entry.Hash() != block.Hash() {
|
||||||
|
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
||||||
|
}
|
||||||
|
if entry := GetHeader(db, block.Hash(), block.NumberU64()); entry == nil {
|
||||||
|
t.Fatalf("Stored header not found")
|
||||||
|
} else if entry.Hash() != block.Header().Hash() {
|
||||||
|
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header())
|
||||||
|
}
|
||||||
|
if entry := GetBody(db, block.Hash(), block.NumberU64()); entry == nil {
|
||||||
|
t.Fatalf("Stored body not found")
|
||||||
|
} else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
|
||||||
|
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body())
|
||||||
|
}
|
||||||
|
// Delete the block and verify the execution
|
||||||
|
DeleteBlock(db, block.Hash(), block.NumberU64())
|
||||||
|
if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
||||||
|
t.Fatalf("Deleted block returned: %v", entry)
|
||||||
|
}
|
||||||
|
if entry := GetHeader(db, block.Hash(), block.NumberU64()); entry != nil {
|
||||||
|
t.Fatalf("Deleted header returned: %v", entry)
|
||||||
|
}
|
||||||
|
if entry := GetBody(db, block.Hash(), block.NumberU64()); entry != nil {
|
||||||
|
t.Fatalf("Deleted body returned: %v", entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that partial block contents don't get reassembled into full blocks.
|
||||||
|
func TestPartialBlockStorage(t *testing.T) {
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
block := types.NewBlockWithHeader(&types.Header{
|
||||||
|
Extra: []byte("test block"),
|
||||||
|
UncleHash: types.EmptyUncleHash,
|
||||||
|
TxHash: types.EmptyRootHash,
|
||||||
|
ReceiptHash: types.EmptyRootHash,
|
||||||
|
})
|
||||||
|
// Store a header and check that it's not recognized as a block
|
||||||
|
if err := WriteHeader(db, block.Header()); err != nil {
|
||||||
|
t.Fatalf("Failed to write header into database: %v", err)
|
||||||
|
}
|
||||||
|
if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
||||||
|
t.Fatalf("Non existent block returned: %v", entry)
|
||||||
|
}
|
||||||
|
DeleteHeader(db, block.Hash(), block.NumberU64())
|
||||||
|
|
||||||
|
// Store a body and check that it's not recognized as a block
|
||||||
|
if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
|
||||||
|
t.Fatalf("Failed to write body into database: %v", err)
|
||||||
|
}
|
||||||
|
if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
||||||
|
t.Fatalf("Non existent block returned: %v", entry)
|
||||||
|
}
|
||||||
|
DeleteBody(db, block.Hash(), block.NumberU64())
|
||||||
|
|
||||||
|
// Store a header and a body separately and check reassembly
|
||||||
|
if err := WriteHeader(db, block.Header()); err != nil {
|
||||||
|
t.Fatalf("Failed to write header into database: %v", err)
|
||||||
|
}
|
||||||
|
if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
|
||||||
|
t.Fatalf("Failed to write body into database: %v", err)
|
||||||
|
}
|
||||||
|
if entry := GetBlock(db, block.Hash(), block.NumberU64()); entry == nil {
|
||||||
|
t.Fatalf("Stored block not found")
|
||||||
|
} else if entry.Hash() != block.Hash() {
|
||||||
|
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests block total difficulty storage and retrieval operations.
|
||||||
|
func TestTdStorage(t *testing.T) {
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
|
||||||
|
// Create a test TD to move around the database and make sure it's really new
|
||||||
|
hash, td := common.Hash{}, big.NewInt(314)
|
||||||
|
if entry := GetTd(db, hash, 0); entry != nil {
|
||||||
|
t.Fatalf("Non existent TD returned: %v", entry)
|
||||||
|
}
|
||||||
|
// Write and verify the TD in the database
|
||||||
|
if err := WriteTd(db, hash, 0, td); err != nil {
|
||||||
|
t.Fatalf("Failed to write TD into database: %v", err)
|
||||||
|
}
|
||||||
|
if entry := GetTd(db, hash, 0); entry == nil {
|
||||||
|
t.Fatalf("Stored TD not found")
|
||||||
|
} else if entry.Cmp(td) != 0 {
|
||||||
|
t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td)
|
||||||
|
}
|
||||||
|
// Delete the TD and verify the execution
|
||||||
|
DeleteTd(db, hash, 0)
|
||||||
|
if entry := GetTd(db, hash, 0); entry != nil {
|
||||||
|
t.Fatalf("Deleted TD returned: %v", entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that canonical numbers can be mapped to hashes and retrieved.
|
||||||
|
func TestCanonicalMappingStorage(t *testing.T) {
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
|
||||||
|
// Create a test canonical number and assinged hash to move around
|
||||||
|
hash, number := common.Hash{0: 0xff}, uint64(314)
|
||||||
|
if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) {
|
||||||
|
t.Fatalf("Non existent canonical mapping returned: %v", entry)
|
||||||
|
}
|
||||||
|
// Write and verify the TD in the database
|
||||||
|
if err := WriteCanonicalHash(db, hash, number); err != nil {
|
||||||
|
t.Fatalf("Failed to write canonical mapping into database: %v", err)
|
||||||
|
}
|
||||||
|
if entry := GetCanonicalHash(db, number); entry == (common.Hash{}) {
|
||||||
|
t.Fatalf("Stored canonical mapping not found")
|
||||||
|
} else if entry != hash {
|
||||||
|
t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash)
|
||||||
|
}
|
||||||
|
// Delete the TD and verify the execution
|
||||||
|
DeleteCanonicalHash(db, number)
|
||||||
|
if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) {
|
||||||
|
t.Fatalf("Deleted canonical mapping returned: %v", entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that head headers and head blocks can be assigned, individually.
|
||||||
|
func TestHeadStorage(t *testing.T) {
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
|
||||||
|
blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
|
||||||
|
blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
|
||||||
|
blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")})
|
||||||
|
|
||||||
|
// Check that no head entries are in a pristine database
|
||||||
|
if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) {
|
||||||
|
t.Fatalf("Non head header entry returned: %v", entry)
|
||||||
|
}
|
||||||
|
if entry := GetHeadBlockHash(db); entry != (common.Hash{}) {
|
||||||
|
t.Fatalf("Non head block entry returned: %v", entry)
|
||||||
|
}
|
||||||
|
if entry := GetHeadFastBlockHash(db); entry != (common.Hash{}) {
|
||||||
|
t.Fatalf("Non fast head block entry returned: %v", entry)
|
||||||
|
}
|
||||||
|
// Assign separate entries for the head header and block
|
||||||
|
if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil {
|
||||||
|
t.Fatalf("Failed to write head header hash: %v", err)
|
||||||
|
}
|
||||||
|
if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil {
|
||||||
|
t.Fatalf("Failed to write head block hash: %v", err)
|
||||||
|
}
|
||||||
|
if err := WriteHeadFastBlockHash(db, blockFast.Hash()); err != nil {
|
||||||
|
t.Fatalf("Failed to write fast head block hash: %v", err)
|
||||||
|
}
|
||||||
|
// Check that both heads are present, and different (i.e. two heads maintained)
|
||||||
|
if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() {
|
||||||
|
t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
|
||||||
|
}
|
||||||
|
if entry := GetHeadBlockHash(db); entry != blockFull.Hash() {
|
||||||
|
t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
|
||||||
|
}
|
||||||
|
if entry := GetHeadFastBlockHash(db); entry != blockFast.Hash() {
|
||||||
|
t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that positional lookup metadata can be stored and retrieved.
|
||||||
|
func TestLookupStorage(t *testing.T) {
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
|
||||||
|
tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), big.NewInt(111), big.NewInt(1111), big.NewInt(11111), []byte{0x11, 0x11, 0x11})
|
||||||
|
tx2 := types.NewTransaction(2, common.BytesToAddress([]byte{0x22}), big.NewInt(222), big.NewInt(2222), big.NewInt(22222), []byte{0x22, 0x22, 0x22})
|
||||||
|
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), big.NewInt(3333), big.NewInt(33333), []byte{0x33, 0x33, 0x33})
|
||||||
|
txs := []*types.Transaction{tx1, tx2, tx3}
|
||||||
|
|
||||||
|
block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil)
|
||||||
|
|
||||||
|
// Check that no transactions entries are in a pristine database
|
||||||
|
for i, tx := range txs {
|
||||||
|
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil {
|
||||||
|
t.Fatalf("tx #%d [%x]: non existent transaction returned: %v", i, tx.Hash(), txn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Insert all the transactions into the database, and verify contents
|
||||||
|
if err := WriteBlock(db, block); err != nil {
|
||||||
|
t.Fatalf("failed to write block contents: %v", err)
|
||||||
|
}
|
||||||
|
if err := WriteTxLookupEntries(db, block); err != nil {
|
||||||
|
t.Fatalf("failed to write transactions: %v", err)
|
||||||
|
}
|
||||||
|
for i, tx := range txs {
|
||||||
|
if txn, hash, number, index := GetTransaction(db, tx.Hash()); txn == nil {
|
||||||
|
t.Fatalf("tx #%d [%x]: transaction not found", i, tx.Hash())
|
||||||
|
} else {
|
||||||
|
if hash != block.Hash() || number != block.NumberU64() || index != uint64(i) {
|
||||||
|
t.Fatalf("tx #%d [%x]: positional metadata mismatch: have %x/%d/%d, want %x/%v/%v", i, tx.Hash(), hash, number, index, block.Hash(), block.NumberU64(), i)
|
||||||
|
}
|
||||||
|
if tx.String() != txn.String() {
|
||||||
|
t.Fatalf("tx #%d [%x]: transaction mismatch: have %v, want %v", i, tx.Hash(), txn, tx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Delete the transactions and check purge
|
||||||
|
for i, tx := range txs {
|
||||||
|
DeleteTxLookupEntry(db, tx.Hash())
|
||||||
|
if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil {
|
||||||
|
t.Fatalf("tx #%d [%x]: deleted transaction returned: %v", i, tx.Hash(), txn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that receipts associated with a single block can be stored and retrieved.
|
||||||
|
func TestBlockReceiptStorage(t *testing.T) {
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
|
||||||
|
receipt1 := &types.Receipt{
|
||||||
|
Status: types.ReceiptStatusFailed,
|
||||||
|
CumulativeGasUsed: big.NewInt(1),
|
||||||
|
Logs: []*types.Log{
|
||||||
|
{Address: common.BytesToAddress([]byte{0x11})},
|
||||||
|
{Address: common.BytesToAddress([]byte{0x01, 0x11})},
|
||||||
|
},
|
||||||
|
TxHash: common.BytesToHash([]byte{0x11, 0x11}),
|
||||||
|
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
|
||||||
|
GasUsed: big.NewInt(111111),
|
||||||
|
}
|
||||||
|
receipt2 := &types.Receipt{
|
||||||
|
PostState: common.Hash{2}.Bytes(),
|
||||||
|
CumulativeGasUsed: big.NewInt(2),
|
||||||
|
Logs: []*types.Log{
|
||||||
|
{Address: common.BytesToAddress([]byte{0x22})},
|
||||||
|
{Address: common.BytesToAddress([]byte{0x02, 0x22})},
|
||||||
|
},
|
||||||
|
TxHash: common.BytesToHash([]byte{0x22, 0x22}),
|
||||||
|
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
|
||||||
|
GasUsed: big.NewInt(222222),
|
||||||
|
}
|
||||||
|
receipts := []*types.Receipt{receipt1, receipt2}
|
||||||
|
|
||||||
|
// Check that no receipt entries are in a pristine database
|
||||||
|
hash := common.BytesToHash([]byte{0x03, 0x14})
|
||||||
|
if rs := GetBlockReceipts(db, hash, 0); len(rs) != 0 {
|
||||||
|
t.Fatalf("non existent receipts returned: %v", rs)
|
||||||
|
}
|
||||||
|
// Insert the receipt slice into the database and check presence
|
||||||
|
if err := WriteBlockReceipts(db, hash, 0, receipts); err != nil {
|
||||||
|
t.Fatalf("failed to write block receipts: %v", err)
|
||||||
|
}
|
||||||
|
if rs := GetBlockReceipts(db, hash, 0); len(rs) == 0 {
|
||||||
|
t.Fatalf("no receipts returned")
|
||||||
|
} else {
|
||||||
|
for i := 0; i < len(receipts); i++ {
|
||||||
|
rlpHave, _ := rlp.EncodeToBytes(rs[i])
|
||||||
|
rlpWant, _ := rlp.EncodeToBytes(receipts[i])
|
||||||
|
|
||||||
|
if !bytes.Equal(rlpHave, rlpWant) {
|
||||||
|
t.Fatalf("receipt #%d: receipt mismatch: have %v, want %v", i, rs[i], receipts[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Delete the receipt slice and check purge
|
||||||
|
DeleteBlockReceipts(db, hash, 0)
|
||||||
|
if rs := GetBlockReceipts(db, hash, 0); len(rs) != 0 {
|
||||||
|
t.Fatalf("deleted receipts returned: %v", rs)
|
||||||
|
}
|
||||||
|
}
|
|
@ -175,28 +175,6 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
|
||||||
return genesis.Config, hash, &GenesisMismatchError{stored, hash}
|
return genesis.Config, hash, &GenesisMismatchError{stored, hash}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
root := statedb.IntermediateRoot(false)
|
|
||||||
head := &types.Header{
|
|
||||||
Number: new(big.Int).SetUint64(g.Number),
|
|
||||||
Nonce: types.EncodeNonce(g.Nonce),
|
|
||||||
Time: new(big.Int).SetUint64(g.Timestamp),
|
|
||||||
ParentHash: g.ParentHash,
|
|
||||||
Extra: g.ExtraData,
|
|
||||||
GasLimit: new(big.Int).SetUint64(g.GasLimit),
|
|
||||||
GasUsed: new(big.Int).SetUint64(g.GasUsed),
|
|
||||||
Difficulty: g.Difficulty,
|
|
||||||
MixDigest: g.Mixhash,
|
|
||||||
Coinbase: g.Coinbase,
|
|
||||||
Root: root,
|
|
||||||
}
|
|
||||||
if g.GasLimit == 0 {
|
|
||||||
head.GasLimit = params.GenesisGasLimit
|
|
||||||
}
|
|
||||||
if g.Difficulty == nil {
|
|
||||||
head.Difficulty = params.GenesisDifficulty
|
|
||||||
}
|
|
||||||
return types.NewBlock(head, nil, nil, nil), statedb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the existing chain configuration.
|
// Get the existing chain configuration.
|
||||||
newcfg := genesis.configOrDefault(stored)
|
newcfg := genesis.configOrDefault(stored)
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -94,11 +94,6 @@ func (s *stateObject) empty() bool {
|
||||||
return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash)
|
return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// empty returns whether the account is considered empty.
|
|
||||||
func (s *stateObject) empty() bool {
|
|
||||||
return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Account is the Ethereum consensus representation of accounts.
|
// Account is the Ethereum consensus representation of accounts.
|
||||||
// These objects are stored in the main account trie.
|
// These objects are stored in the main account trie.
|
||||||
type Account struct {
|
type Account struct {
|
||||||
|
|
|
@ -103,12 +103,12 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.IsQuorum && tx.GasPrice() != nil && tx.GasPrice().Cmp(common.Big0) > 0 {
|
if config.IsQuorum && tx.GasPrice() != nil && tx.GasPrice().Cmp(common.Big0) > 0 {
|
||||||
return nil, nil, nil, ErrInvalidGasPrice
|
return nil, nil, 0, ErrInvalidGasPrice
|
||||||
}
|
}
|
||||||
|
|
||||||
msg, err := tx.AsMessage(types.MakeSigner(config, header.Number))
|
msg, err := tx.AsMessage(types.MakeSigner(config, header.Number))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, 0, err
|
||||||
}
|
}
|
||||||
// Create a new context to be used in the EVM environment
|
// Create a new context to be used in the EVM environment
|
||||||
context := NewEVMContext(msg, header, bc, author)
|
context := NewEVMContext(msg, header, bc, author)
|
||||||
|
@ -118,7 +118,7 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common
|
||||||
// Apply the transaction to the current state (included in the env)
|
// Apply the transaction to the current state (included in the env)
|
||||||
_, gas, failed, err := ApplyMessage(vmenv, msg, gp)
|
_, gas, failed, err := ApplyMessage(vmenv, msg, gp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, nil, 0, err
|
||||||
}
|
}
|
||||||
// Update the state with pending changes
|
// Update the state with pending changes
|
||||||
var root []byte
|
var root []byte
|
||||||
|
|
|
@ -22,7 +22,6 @@ import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
@ -139,6 +138,7 @@ func NewStateTransition(evm *vm.EVM, msg Message, gp *GasPool) *StateTransition
|
||||||
|
|
||||||
func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool) ([]byte, uint64, bool, error) {
|
func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool) ([]byte, uint64, bool, error) {
|
||||||
return NewStateTransition(evm, msg, gp).TransitionDb()
|
return NewStateTransition(evm, msg, gp).TransitionDb()
|
||||||
|
}
|
||||||
|
|
||||||
// to returns the recipient of the message.
|
// to returns the recipient of the message.
|
||||||
func (st *StateTransition) to() common.Address {
|
func (st *StateTransition) to() common.Address {
|
||||||
|
@ -217,8 +217,6 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
|
||||||
} else {
|
} else {
|
||||||
data = st.data
|
data = st.data
|
||||||
}
|
}
|
||||||
st.refundGas()
|
|
||||||
st.state.AddBalance(st.evm.Coinbase, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.gasPrice))
|
|
||||||
|
|
||||||
// Pay intrinsic gas
|
// Pay intrinsic gas
|
||||||
gas, err := IntrinsicGas(st.data, contractCreation, homestead)
|
gas, err := IntrinsicGas(st.data, contractCreation, homestead)
|
||||||
|
|
|
@ -234,7 +234,6 @@ func newTxList(strict bool) *txList {
|
||||||
strict: strict,
|
strict: strict,
|
||||||
txs: newTxSortedMap(),
|
txs: newTxSortedMap(),
|
||||||
costcap: new(big.Int),
|
costcap: new(big.Int),
|
||||||
gascap: new(big.Int),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -854,7 +854,6 @@ func (pool *TxPool) Status(hashes []common.Hash) []TxStatus {
|
||||||
status[i] = TxStatusQueued
|
status[i] = TxStatusQueued
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pool.promoteExecutables(addrs)
|
|
||||||
}
|
}
|
||||||
return status
|
return status
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -169,6 +170,10 @@ type Block struct {
|
||||||
ReceivedFrom interface{}
|
ReceivedFrom interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *Block) String() string {
|
||||||
|
return fmt.Sprintf("{Header: %v}", b.header)
|
||||||
|
}
|
||||||
|
|
||||||
// DeprecatedTd is an old relic for extracting the TD of a block. It is in the
|
// DeprecatedTd is an old relic for extracting the TD of a block. It is in the
|
||||||
// code solely to facilitate upgrading the database from the old format to the
|
// code solely to facilitate upgrading the database from the old format to the
|
||||||
// new, after which it should be deleted. Do not use!
|
// new, after which it should be deleted. Do not use!
|
||||||
|
|
|
@ -1,434 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package bn256 implements a particular bilinear group at the 128-bit security level.
|
|
||||||
//
|
|
||||||
// Bilinear groups are the basis of many of the new cryptographic protocols
|
|
||||||
// that have been proposed over the past decade. They consist of a triplet of
|
|
||||||
// groups (G₁, G₂ and GT) such that there exists a function e(g₁ˣ,g₂ʸ)=gTˣʸ
|
|
||||||
// (where gₓ is a generator of the respective group). That function is called
|
|
||||||
// a pairing function.
|
|
||||||
//
|
|
||||||
// This package specifically implements the Optimal Ate pairing over a 256-bit
|
|
||||||
// Barreto-Naehrig curve as described in
|
|
||||||
// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible
|
|
||||||
// with the implementation described in that paper.
|
|
||||||
package bn256
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"io"
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BUG(agl): this implementation is not constant time.
|
|
||||||
// TODO(agl): keep GF(p²) elements in Mongomery form.
|
|
||||||
|
|
||||||
// G1 is an abstract cyclic group. The zero value is suitable for use as the
|
|
||||||
// output of an operation, but cannot be used as an input.
|
|
||||||
type G1 struct {
|
|
||||||
p *curvePoint
|
|
||||||
}
|
|
||||||
|
|
||||||
// RandomG1 returns x and g₁ˣ where x is a random, non-zero number read from r.
|
|
||||||
func RandomG1(r io.Reader) (*big.Int, *G1, error) {
|
|
||||||
var k *big.Int
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for {
|
|
||||||
k, err = rand.Int(r, Order)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if k.Sign() > 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return k, new(G1).ScalarBaseMult(k), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *G1) String() string {
|
|
||||||
return "bn256.G1" + g.p.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CurvePoints returns p's curve points in big integer
|
|
||||||
func (e *G1) CurvePoints() (*big.Int, *big.Int, *big.Int, *big.Int) {
|
|
||||||
return e.p.x, e.p.y, e.p.z, e.p.t
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScalarBaseMult sets e to g*k where g is the generator of the group and
|
|
||||||
// then returns e.
|
|
||||||
func (e *G1) ScalarBaseMult(k *big.Int) *G1 {
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newCurvePoint(nil)
|
|
||||||
}
|
|
||||||
e.p.Mul(curveGen, k, new(bnPool))
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScalarMult sets e to a*k and then returns e.
|
|
||||||
func (e *G1) ScalarMult(a *G1, k *big.Int) *G1 {
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newCurvePoint(nil)
|
|
||||||
}
|
|
||||||
e.p.Mul(a.p, k, new(bnPool))
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add sets e to a+b and then returns e.
|
|
||||||
// BUG(agl): this function is not complete: a==b fails.
|
|
||||||
func (e *G1) Add(a, b *G1) *G1 {
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newCurvePoint(nil)
|
|
||||||
}
|
|
||||||
e.p.Add(a.p, b.p, new(bnPool))
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Neg sets e to -a and then returns e.
|
|
||||||
func (e *G1) Neg(a *G1) *G1 {
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newCurvePoint(nil)
|
|
||||||
}
|
|
||||||
e.p.Negative(a.p)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal converts n to a byte slice.
|
|
||||||
func (n *G1) Marshal() []byte {
|
|
||||||
n.p.MakeAffine(nil)
|
|
||||||
|
|
||||||
xBytes := new(big.Int).Mod(n.p.x, P).Bytes()
|
|
||||||
yBytes := new(big.Int).Mod(n.p.y, P).Bytes()
|
|
||||||
|
|
||||||
// Each value is a 256-bit number.
|
|
||||||
const numBytes = 256 / 8
|
|
||||||
|
|
||||||
ret := make([]byte, numBytes*2)
|
|
||||||
copy(ret[1*numBytes-len(xBytes):], xBytes)
|
|
||||||
copy(ret[2*numBytes-len(yBytes):], yBytes)
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal sets e to the result of converting the output of Marshal back into
|
|
||||||
// a group element and then returns e.
|
|
||||||
func (e *G1) Unmarshal(m []byte) (*G1, bool) {
|
|
||||||
// Each value is a 256-bit number.
|
|
||||||
const numBytes = 256 / 8
|
|
||||||
|
|
||||||
if len(m) != 2*numBytes {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newCurvePoint(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.p.x.SetBytes(m[0*numBytes : 1*numBytes])
|
|
||||||
e.p.y.SetBytes(m[1*numBytes : 2*numBytes])
|
|
||||||
|
|
||||||
if e.p.x.Sign() == 0 && e.p.y.Sign() == 0 {
|
|
||||||
// This is the point at infinity.
|
|
||||||
e.p.y.SetInt64(1)
|
|
||||||
e.p.z.SetInt64(0)
|
|
||||||
e.p.t.SetInt64(0)
|
|
||||||
} else {
|
|
||||||
e.p.z.SetInt64(1)
|
|
||||||
e.p.t.SetInt64(1)
|
|
||||||
|
|
||||||
if !e.p.IsOnCurve() {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return e, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// G2 is an abstract cyclic group. The zero value is suitable for use as the
|
|
||||||
// output of an operation, but cannot be used as an input.
|
|
||||||
type G2 struct {
|
|
||||||
p *twistPoint
|
|
||||||
}
|
|
||||||
|
|
||||||
// RandomG1 returns x and g₂ˣ where x is a random, non-zero number read from r.
|
|
||||||
func RandomG2(r io.Reader) (*big.Int, *G2, error) {
|
|
||||||
var k *big.Int
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for {
|
|
||||||
k, err = rand.Int(r, Order)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if k.Sign() > 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return k, new(G2).ScalarBaseMult(k), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *G2) String() string {
|
|
||||||
return "bn256.G2" + g.p.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CurvePoints returns the curve points of p which includes the real
|
|
||||||
// and imaginary parts of the curve point.
|
|
||||||
func (e *G2) CurvePoints() (*gfP2, *gfP2, *gfP2, *gfP2) {
|
|
||||||
return e.p.x, e.p.y, e.p.z, e.p.t
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScalarBaseMult sets e to g*k where g is the generator of the group and
|
|
||||||
// then returns out.
|
|
||||||
func (e *G2) ScalarBaseMult(k *big.Int) *G2 {
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newTwistPoint(nil)
|
|
||||||
}
|
|
||||||
e.p.Mul(twistGen, k, new(bnPool))
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScalarMult sets e to a*k and then returns e.
|
|
||||||
func (e *G2) ScalarMult(a *G2, k *big.Int) *G2 {
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newTwistPoint(nil)
|
|
||||||
}
|
|
||||||
e.p.Mul(a.p, k, new(bnPool))
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add sets e to a+b and then returns e.
|
|
||||||
// BUG(agl): this function is not complete: a==b fails.
|
|
||||||
func (e *G2) Add(a, b *G2) *G2 {
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newTwistPoint(nil)
|
|
||||||
}
|
|
||||||
e.p.Add(a.p, b.p, new(bnPool))
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal converts n into a byte slice.
|
|
||||||
func (n *G2) Marshal() []byte {
|
|
||||||
n.p.MakeAffine(nil)
|
|
||||||
|
|
||||||
xxBytes := new(big.Int).Mod(n.p.x.x, P).Bytes()
|
|
||||||
xyBytes := new(big.Int).Mod(n.p.x.y, P).Bytes()
|
|
||||||
yxBytes := new(big.Int).Mod(n.p.y.x, P).Bytes()
|
|
||||||
yyBytes := new(big.Int).Mod(n.p.y.y, P).Bytes()
|
|
||||||
|
|
||||||
// Each value is a 256-bit number.
|
|
||||||
const numBytes = 256 / 8
|
|
||||||
|
|
||||||
ret := make([]byte, numBytes*4)
|
|
||||||
copy(ret[1*numBytes-len(xxBytes):], xxBytes)
|
|
||||||
copy(ret[2*numBytes-len(xyBytes):], xyBytes)
|
|
||||||
copy(ret[3*numBytes-len(yxBytes):], yxBytes)
|
|
||||||
copy(ret[4*numBytes-len(yyBytes):], yyBytes)
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal sets e to the result of converting the output of Marshal back into
|
|
||||||
// a group element and then returns e.
|
|
||||||
func (e *G2) Unmarshal(m []byte) (*G2, bool) {
|
|
||||||
// Each value is a 256-bit number.
|
|
||||||
const numBytes = 256 / 8
|
|
||||||
|
|
||||||
if len(m) != 4*numBytes {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newTwistPoint(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.p.x.x.SetBytes(m[0*numBytes : 1*numBytes])
|
|
||||||
e.p.x.y.SetBytes(m[1*numBytes : 2*numBytes])
|
|
||||||
e.p.y.x.SetBytes(m[2*numBytes : 3*numBytes])
|
|
||||||
e.p.y.y.SetBytes(m[3*numBytes : 4*numBytes])
|
|
||||||
|
|
||||||
if e.p.x.x.Sign() == 0 &&
|
|
||||||
e.p.x.y.Sign() == 0 &&
|
|
||||||
e.p.y.x.Sign() == 0 &&
|
|
||||||
e.p.y.y.Sign() == 0 {
|
|
||||||
// This is the point at infinity.
|
|
||||||
e.p.y.SetOne()
|
|
||||||
e.p.z.SetZero()
|
|
||||||
e.p.t.SetZero()
|
|
||||||
} else {
|
|
||||||
e.p.z.SetOne()
|
|
||||||
e.p.t.SetOne()
|
|
||||||
|
|
||||||
if !e.p.IsOnCurve() {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return e, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// GT is an abstract cyclic group. The zero value is suitable for use as the
|
|
||||||
// output of an operation, but cannot be used as an input.
|
|
||||||
type GT struct {
|
|
||||||
p *gfP12
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *GT) String() string {
|
|
||||||
return "bn256.GT" + g.p.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScalarMult sets e to a*k and then returns e.
|
|
||||||
func (e *GT) ScalarMult(a *GT, k *big.Int) *GT {
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newGFp12(nil)
|
|
||||||
}
|
|
||||||
e.p.Exp(a.p, k, new(bnPool))
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add sets e to a+b and then returns e.
|
|
||||||
func (e *GT) Add(a, b *GT) *GT {
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newGFp12(nil)
|
|
||||||
}
|
|
||||||
e.p.Mul(a.p, b.p, new(bnPool))
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Neg sets e to -a and then returns e.
|
|
||||||
func (e *GT) Neg(a *GT) *GT {
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newGFp12(nil)
|
|
||||||
}
|
|
||||||
e.p.Invert(a.p, new(bnPool))
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal converts n into a byte slice.
|
|
||||||
func (n *GT) Marshal() []byte {
|
|
||||||
n.p.Minimal()
|
|
||||||
|
|
||||||
xxxBytes := n.p.x.x.x.Bytes()
|
|
||||||
xxyBytes := n.p.x.x.y.Bytes()
|
|
||||||
xyxBytes := n.p.x.y.x.Bytes()
|
|
||||||
xyyBytes := n.p.x.y.y.Bytes()
|
|
||||||
xzxBytes := n.p.x.z.x.Bytes()
|
|
||||||
xzyBytes := n.p.x.z.y.Bytes()
|
|
||||||
yxxBytes := n.p.y.x.x.Bytes()
|
|
||||||
yxyBytes := n.p.y.x.y.Bytes()
|
|
||||||
yyxBytes := n.p.y.y.x.Bytes()
|
|
||||||
yyyBytes := n.p.y.y.y.Bytes()
|
|
||||||
yzxBytes := n.p.y.z.x.Bytes()
|
|
||||||
yzyBytes := n.p.y.z.y.Bytes()
|
|
||||||
|
|
||||||
// Each value is a 256-bit number.
|
|
||||||
const numBytes = 256 / 8
|
|
||||||
|
|
||||||
ret := make([]byte, numBytes*12)
|
|
||||||
copy(ret[1*numBytes-len(xxxBytes):], xxxBytes)
|
|
||||||
copy(ret[2*numBytes-len(xxyBytes):], xxyBytes)
|
|
||||||
copy(ret[3*numBytes-len(xyxBytes):], xyxBytes)
|
|
||||||
copy(ret[4*numBytes-len(xyyBytes):], xyyBytes)
|
|
||||||
copy(ret[5*numBytes-len(xzxBytes):], xzxBytes)
|
|
||||||
copy(ret[6*numBytes-len(xzyBytes):], xzyBytes)
|
|
||||||
copy(ret[7*numBytes-len(yxxBytes):], yxxBytes)
|
|
||||||
copy(ret[8*numBytes-len(yxyBytes):], yxyBytes)
|
|
||||||
copy(ret[9*numBytes-len(yyxBytes):], yyxBytes)
|
|
||||||
copy(ret[10*numBytes-len(yyyBytes):], yyyBytes)
|
|
||||||
copy(ret[11*numBytes-len(yzxBytes):], yzxBytes)
|
|
||||||
copy(ret[12*numBytes-len(yzyBytes):], yzyBytes)
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal sets e to the result of converting the output of Marshal back into
|
|
||||||
// a group element and then returns e.
|
|
||||||
func (e *GT) Unmarshal(m []byte) (*GT, bool) {
|
|
||||||
// Each value is a 256-bit number.
|
|
||||||
const numBytes = 256 / 8
|
|
||||||
|
|
||||||
if len(m) != 12*numBytes {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.p == nil {
|
|
||||||
e.p = newGFp12(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.p.x.x.x.SetBytes(m[0*numBytes : 1*numBytes])
|
|
||||||
e.p.x.x.y.SetBytes(m[1*numBytes : 2*numBytes])
|
|
||||||
e.p.x.y.x.SetBytes(m[2*numBytes : 3*numBytes])
|
|
||||||
e.p.x.y.y.SetBytes(m[3*numBytes : 4*numBytes])
|
|
||||||
e.p.x.z.x.SetBytes(m[4*numBytes : 5*numBytes])
|
|
||||||
e.p.x.z.y.SetBytes(m[5*numBytes : 6*numBytes])
|
|
||||||
e.p.y.x.x.SetBytes(m[6*numBytes : 7*numBytes])
|
|
||||||
e.p.y.x.y.SetBytes(m[7*numBytes : 8*numBytes])
|
|
||||||
e.p.y.y.x.SetBytes(m[8*numBytes : 9*numBytes])
|
|
||||||
e.p.y.y.y.SetBytes(m[9*numBytes : 10*numBytes])
|
|
||||||
e.p.y.z.x.SetBytes(m[10*numBytes : 11*numBytes])
|
|
||||||
e.p.y.z.y.SetBytes(m[11*numBytes : 12*numBytes])
|
|
||||||
|
|
||||||
return e, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pair calculates an Optimal Ate pairing.
|
|
||||||
func Pair(g1 *G1, g2 *G2) *GT {
|
|
||||||
return >{optimalAte(g2.p, g1.p, new(bnPool))}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PairingCheck calculates the Optimal Ate pairing for a set of points.
|
|
||||||
func PairingCheck(a []*G1, b []*G2) bool {
|
|
||||||
pool := new(bnPool)
|
|
||||||
|
|
||||||
acc := newGFp12(pool)
|
|
||||||
acc.SetOne()
|
|
||||||
|
|
||||||
for i := 0; i < len(a); i++ {
|
|
||||||
if a[i].p.IsInfinity() || b[i].p.IsInfinity() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
acc.Mul(acc, miller(b[i].p, a[i].p, pool), pool)
|
|
||||||
}
|
|
||||||
ret := finalExponentiation(acc, pool)
|
|
||||||
acc.Put(pool)
|
|
||||||
|
|
||||||
return ret.IsOne()
|
|
||||||
}
|
|
||||||
|
|
||||||
// bnPool implements a tiny cache of *big.Int objects that's used to reduce the
|
|
||||||
// number of allocations made during processing.
|
|
||||||
type bnPool struct {
|
|
||||||
bns []*big.Int
|
|
||||||
count int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pool *bnPool) Get() *big.Int {
|
|
||||||
if pool == nil {
|
|
||||||
return new(big.Int)
|
|
||||||
}
|
|
||||||
|
|
||||||
pool.count++
|
|
||||||
l := len(pool.bns)
|
|
||||||
if l == 0 {
|
|
||||||
return new(big.Int)
|
|
||||||
}
|
|
||||||
|
|
||||||
bn := pool.bns[l-1]
|
|
||||||
pool.bns = pool.bns[:l-1]
|
|
||||||
return bn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pool *bnPool) Put(bn *big.Int) {
|
|
||||||
if pool == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pool.bns = append(pool.bns, bn)
|
|
||||||
pool.count--
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pool *bnPool) Count() int {
|
|
||||||
return pool.count
|
|
||||||
}
|
|
|
@ -1,304 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package bn256
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/rand"
|
|
||||||
"math/big"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGFp2Invert(t *testing.T) {
|
|
||||||
pool := new(bnPool)
|
|
||||||
|
|
||||||
a := newGFp2(pool)
|
|
||||||
a.x.SetString("23423492374", 10)
|
|
||||||
a.y.SetString("12934872398472394827398470", 10)
|
|
||||||
|
|
||||||
inv := newGFp2(pool)
|
|
||||||
inv.Invert(a, pool)
|
|
||||||
|
|
||||||
b := newGFp2(pool).Mul(inv, a, pool)
|
|
||||||
if b.x.Int64() != 0 || b.y.Int64() != 1 {
|
|
||||||
t.Fatalf("bad result for a^-1*a: %s %s", b.x, b.y)
|
|
||||||
}
|
|
||||||
|
|
||||||
a.Put(pool)
|
|
||||||
b.Put(pool)
|
|
||||||
inv.Put(pool)
|
|
||||||
|
|
||||||
if c := pool.Count(); c > 0 {
|
|
||||||
t.Errorf("Pool count non-zero: %d\n", c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isZero(n *big.Int) bool {
|
|
||||||
return new(big.Int).Mod(n, P).Int64() == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func isOne(n *big.Int) bool {
|
|
||||||
return new(big.Int).Mod(n, P).Int64() == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGFp6Invert(t *testing.T) {
|
|
||||||
pool := new(bnPool)
|
|
||||||
|
|
||||||
a := newGFp6(pool)
|
|
||||||
a.x.x.SetString("239487238491", 10)
|
|
||||||
a.x.y.SetString("2356249827341", 10)
|
|
||||||
a.y.x.SetString("082659782", 10)
|
|
||||||
a.y.y.SetString("182703523765", 10)
|
|
||||||
a.z.x.SetString("978236549263", 10)
|
|
||||||
a.z.y.SetString("64893242", 10)
|
|
||||||
|
|
||||||
inv := newGFp6(pool)
|
|
||||||
inv.Invert(a, pool)
|
|
||||||
|
|
||||||
b := newGFp6(pool).Mul(inv, a, pool)
|
|
||||||
if !isZero(b.x.x) ||
|
|
||||||
!isZero(b.x.y) ||
|
|
||||||
!isZero(b.y.x) ||
|
|
||||||
!isZero(b.y.y) ||
|
|
||||||
!isZero(b.z.x) ||
|
|
||||||
!isOne(b.z.y) {
|
|
||||||
t.Fatalf("bad result for a^-1*a: %s", b)
|
|
||||||
}
|
|
||||||
|
|
||||||
a.Put(pool)
|
|
||||||
b.Put(pool)
|
|
||||||
inv.Put(pool)
|
|
||||||
|
|
||||||
if c := pool.Count(); c > 0 {
|
|
||||||
t.Errorf("Pool count non-zero: %d\n", c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGFp12Invert(t *testing.T) {
|
|
||||||
pool := new(bnPool)
|
|
||||||
|
|
||||||
a := newGFp12(pool)
|
|
||||||
a.x.x.x.SetString("239846234862342323958623", 10)
|
|
||||||
a.x.x.y.SetString("2359862352529835623", 10)
|
|
||||||
a.x.y.x.SetString("928836523", 10)
|
|
||||||
a.x.y.y.SetString("9856234", 10)
|
|
||||||
a.x.z.x.SetString("235635286", 10)
|
|
||||||
a.x.z.y.SetString("5628392833", 10)
|
|
||||||
a.y.x.x.SetString("252936598265329856238956532167968", 10)
|
|
||||||
a.y.x.y.SetString("23596239865236954178968", 10)
|
|
||||||
a.y.y.x.SetString("95421692834", 10)
|
|
||||||
a.y.y.y.SetString("236548", 10)
|
|
||||||
a.y.z.x.SetString("924523", 10)
|
|
||||||
a.y.z.y.SetString("12954623", 10)
|
|
||||||
|
|
||||||
inv := newGFp12(pool)
|
|
||||||
inv.Invert(a, pool)
|
|
||||||
|
|
||||||
b := newGFp12(pool).Mul(inv, a, pool)
|
|
||||||
if !isZero(b.x.x.x) ||
|
|
||||||
!isZero(b.x.x.y) ||
|
|
||||||
!isZero(b.x.y.x) ||
|
|
||||||
!isZero(b.x.y.y) ||
|
|
||||||
!isZero(b.x.z.x) ||
|
|
||||||
!isZero(b.x.z.y) ||
|
|
||||||
!isZero(b.y.x.x) ||
|
|
||||||
!isZero(b.y.x.y) ||
|
|
||||||
!isZero(b.y.y.x) ||
|
|
||||||
!isZero(b.y.y.y) ||
|
|
||||||
!isZero(b.y.z.x) ||
|
|
||||||
!isOne(b.y.z.y) {
|
|
||||||
t.Fatalf("bad result for a^-1*a: %s", b)
|
|
||||||
}
|
|
||||||
|
|
||||||
a.Put(pool)
|
|
||||||
b.Put(pool)
|
|
||||||
inv.Put(pool)
|
|
||||||
|
|
||||||
if c := pool.Count(); c > 0 {
|
|
||||||
t.Errorf("Pool count non-zero: %d\n", c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCurveImpl(t *testing.T) {
|
|
||||||
pool := new(bnPool)
|
|
||||||
|
|
||||||
g := &curvePoint{
|
|
||||||
pool.Get().SetInt64(1),
|
|
||||||
pool.Get().SetInt64(-2),
|
|
||||||
pool.Get().SetInt64(1),
|
|
||||||
pool.Get().SetInt64(0),
|
|
||||||
}
|
|
||||||
|
|
||||||
x := pool.Get().SetInt64(32498273234)
|
|
||||||
X := newCurvePoint(pool).Mul(g, x, pool)
|
|
||||||
|
|
||||||
y := pool.Get().SetInt64(98732423523)
|
|
||||||
Y := newCurvePoint(pool).Mul(g, y, pool)
|
|
||||||
|
|
||||||
s1 := newCurvePoint(pool).Mul(X, y, pool).MakeAffine(pool)
|
|
||||||
s2 := newCurvePoint(pool).Mul(Y, x, pool).MakeAffine(pool)
|
|
||||||
|
|
||||||
if s1.x.Cmp(s2.x) != 0 ||
|
|
||||||
s2.x.Cmp(s1.x) != 0 {
|
|
||||||
t.Errorf("DH points don't match: (%s, %s) (%s, %s)", s1.x, s1.y, s2.x, s2.y)
|
|
||||||
}
|
|
||||||
|
|
||||||
pool.Put(x)
|
|
||||||
X.Put(pool)
|
|
||||||
pool.Put(y)
|
|
||||||
Y.Put(pool)
|
|
||||||
s1.Put(pool)
|
|
||||||
s2.Put(pool)
|
|
||||||
g.Put(pool)
|
|
||||||
|
|
||||||
if c := pool.Count(); c > 0 {
|
|
||||||
t.Errorf("Pool count non-zero: %d\n", c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOrderG1(t *testing.T) {
|
|
||||||
g := new(G1).ScalarBaseMult(Order)
|
|
||||||
if !g.p.IsInfinity() {
|
|
||||||
t.Error("G1 has incorrect order")
|
|
||||||
}
|
|
||||||
|
|
||||||
one := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
|
|
||||||
g.Add(g, one)
|
|
||||||
g.p.MakeAffine(nil)
|
|
||||||
if g.p.x.Cmp(one.p.x) != 0 || g.p.y.Cmp(one.p.y) != 0 {
|
|
||||||
t.Errorf("1+0 != 1 in G1")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOrderG2(t *testing.T) {
|
|
||||||
g := new(G2).ScalarBaseMult(Order)
|
|
||||||
if !g.p.IsInfinity() {
|
|
||||||
t.Error("G2 has incorrect order")
|
|
||||||
}
|
|
||||||
|
|
||||||
one := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
|
|
||||||
g.Add(g, one)
|
|
||||||
g.p.MakeAffine(nil)
|
|
||||||
if g.p.x.x.Cmp(one.p.x.x) != 0 ||
|
|
||||||
g.p.x.y.Cmp(one.p.x.y) != 0 ||
|
|
||||||
g.p.y.x.Cmp(one.p.y.x) != 0 ||
|
|
||||||
g.p.y.y.Cmp(one.p.y.y) != 0 {
|
|
||||||
t.Errorf("1+0 != 1 in G2")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOrderGT(t *testing.T) {
|
|
||||||
gt := Pair(&G1{curveGen}, &G2{twistGen})
|
|
||||||
g := new(GT).ScalarMult(gt, Order)
|
|
||||||
if !g.p.IsOne() {
|
|
||||||
t.Error("GT has incorrect order")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBilinearity(t *testing.T) {
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
a, p1, _ := RandomG1(rand.Reader)
|
|
||||||
b, p2, _ := RandomG2(rand.Reader)
|
|
||||||
e1 := Pair(p1, p2)
|
|
||||||
|
|
||||||
e2 := Pair(&G1{curveGen}, &G2{twistGen})
|
|
||||||
e2.ScalarMult(e2, a)
|
|
||||||
e2.ScalarMult(e2, b)
|
|
||||||
|
|
||||||
minusE2 := new(GT).Neg(e2)
|
|
||||||
e1.Add(e1, minusE2)
|
|
||||||
|
|
||||||
if !e1.p.IsOne() {
|
|
||||||
t.Fatalf("bad pairing result: %s", e1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestG1Marshal(t *testing.T) {
|
|
||||||
g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
|
|
||||||
form := g.Marshal()
|
|
||||||
_, ok := new(G1).Unmarshal(form)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("failed to unmarshal")
|
|
||||||
}
|
|
||||||
|
|
||||||
g.ScalarBaseMult(Order)
|
|
||||||
form = g.Marshal()
|
|
||||||
g2, ok := new(G1).Unmarshal(form)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("failed to unmarshal ∞")
|
|
||||||
}
|
|
||||||
if !g2.p.IsInfinity() {
|
|
||||||
t.Fatalf("∞ unmarshaled incorrectly")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestG2Marshal(t *testing.T) {
|
|
||||||
g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
|
|
||||||
form := g.Marshal()
|
|
||||||
_, ok := new(G2).Unmarshal(form)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("failed to unmarshal")
|
|
||||||
}
|
|
||||||
|
|
||||||
g.ScalarBaseMult(Order)
|
|
||||||
form = g.Marshal()
|
|
||||||
g2, ok := new(G2).Unmarshal(form)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("failed to unmarshal ∞")
|
|
||||||
}
|
|
||||||
if !g2.p.IsInfinity() {
|
|
||||||
t.Fatalf("∞ unmarshaled incorrectly")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestG1Identity(t *testing.T) {
|
|
||||||
g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(0))
|
|
||||||
if !g.p.IsInfinity() {
|
|
||||||
t.Error("failure")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestG2Identity(t *testing.T) {
|
|
||||||
g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(0))
|
|
||||||
if !g.p.IsInfinity() {
|
|
||||||
t.Error("failure")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTripartiteDiffieHellman(t *testing.T) {
|
|
||||||
a, _ := rand.Int(rand.Reader, Order)
|
|
||||||
b, _ := rand.Int(rand.Reader, Order)
|
|
||||||
c, _ := rand.Int(rand.Reader, Order)
|
|
||||||
|
|
||||||
pa, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(a).Marshal())
|
|
||||||
qa, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(a).Marshal())
|
|
||||||
pb, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(b).Marshal())
|
|
||||||
qb, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(b).Marshal())
|
|
||||||
pc, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(c).Marshal())
|
|
||||||
qc, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(c).Marshal())
|
|
||||||
|
|
||||||
k1 := Pair(pb, qc)
|
|
||||||
k1.ScalarMult(k1, a)
|
|
||||||
k1Bytes := k1.Marshal()
|
|
||||||
|
|
||||||
k2 := Pair(pc, qa)
|
|
||||||
k2.ScalarMult(k2, b)
|
|
||||||
k2Bytes := k2.Marshal()
|
|
||||||
|
|
||||||
k3 := Pair(pa, qb)
|
|
||||||
k3.ScalarMult(k3, c)
|
|
||||||
k3Bytes := k3.Marshal()
|
|
||||||
|
|
||||||
if !bytes.Equal(k1Bytes, k2Bytes) || !bytes.Equal(k2Bytes, k3Bytes) {
|
|
||||||
t.Errorf("keys didn't agree")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkPairing(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
Pair(&G1{curveGen}, &G2{twistGen})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,44 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package bn256
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
func bigFromBase10(s string) *big.Int {
|
|
||||||
n, _ := new(big.Int).SetString(s, 10)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// u is the BN parameter that determines the prime: 1868033³.
|
|
||||||
var u = bigFromBase10("4965661367192848881")
|
|
||||||
|
|
||||||
// p is a prime over which we form a basic field: 36u⁴+36u³+24u²+6u+1.
|
|
||||||
var P = bigFromBase10("21888242871839275222246405745257275088696311157297823662689037894645226208583")
|
|
||||||
|
|
||||||
// Order is the number of elements in both G₁ and G₂: 36u⁴+36u³+18u²+6u+1.
|
|
||||||
var Order = bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495617")
|
|
||||||
|
|
||||||
// xiToPMinus1Over6 is ξ^((p-1)/6) where ξ = i+9.
|
|
||||||
var xiToPMinus1Over6 = &gfP2{bigFromBase10("16469823323077808223889137241176536799009286646108169935659301613961712198316"), bigFromBase10("8376118865763821496583973867626364092589906065868298776909617916018768340080")}
|
|
||||||
|
|
||||||
// xiToPMinus1Over3 is ξ^((p-1)/3) where ξ = i+9.
|
|
||||||
var xiToPMinus1Over3 = &gfP2{bigFromBase10("10307601595873709700152284273816112264069230130616436755625194854815875713954"), bigFromBase10("21575463638280843010398324269430826099269044274347216827212613867836435027261")}
|
|
||||||
|
|
||||||
// xiToPMinus1Over2 is ξ^((p-1)/2) where ξ = i+9.
|
|
||||||
var xiToPMinus1Over2 = &gfP2{bigFromBase10("3505843767911556378687030309984248845540243509899259641013678093033130930403"), bigFromBase10("2821565182194536844548159561693502659359617185244120367078079554186484126554")}
|
|
||||||
|
|
||||||
// xiToPSquaredMinus1Over3 is ξ^((p²-1)/3) where ξ = i+9.
|
|
||||||
var xiToPSquaredMinus1Over3 = bigFromBase10("21888242871839275220042445260109153167277707414472061641714758635765020556616")
|
|
||||||
|
|
||||||
// xiTo2PSquaredMinus2Over3 is ξ^((2p²-2)/3) where ξ = i+9 (a cubic root of unity, mod p).
|
|
||||||
var xiTo2PSquaredMinus2Over3 = bigFromBase10("2203960485148121921418603742825762020974279258880205651966")
|
|
||||||
|
|
||||||
// xiToPSquaredMinus1Over6 is ξ^((1p²-1)/6) where ξ = i+9 (a cubic root of -1, mod p).
|
|
||||||
var xiToPSquaredMinus1Over6 = bigFromBase10("21888242871839275220042445260109153167277707414472061641714758635765020556617")
|
|
||||||
|
|
||||||
// xiTo2PMinus2Over3 is ξ^((2p-2)/3) where ξ = i+9.
|
|
||||||
var xiTo2PMinus2Over3 = &gfP2{bigFromBase10("19937756971775647987995932169929341994314640652964949448313374472400716661030"), bigFromBase10("2581911344467009335267311115468803099551665605076196740867805258568234346338")}
|
|
|
@ -1,278 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package bn256
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
// curvePoint implements the elliptic curve y²=x³+3. Points are kept in
|
|
||||||
// Jacobian form and t=z² when valid. G₁ is the set of points of this curve on
|
|
||||||
// GF(p).
|
|
||||||
type curvePoint struct {
|
|
||||||
x, y, z, t *big.Int
|
|
||||||
}
|
|
||||||
|
|
||||||
var curveB = new(big.Int).SetInt64(3)
|
|
||||||
|
|
||||||
// curveGen is the generator of G₁.
|
|
||||||
var curveGen = &curvePoint{
|
|
||||||
new(big.Int).SetInt64(1),
|
|
||||||
new(big.Int).SetInt64(-2),
|
|
||||||
new(big.Int).SetInt64(1),
|
|
||||||
new(big.Int).SetInt64(1),
|
|
||||||
}
|
|
||||||
|
|
||||||
func newCurvePoint(pool *bnPool) *curvePoint {
|
|
||||||
return &curvePoint{
|
|
||||||
pool.Get(),
|
|
||||||
pool.Get(),
|
|
||||||
pool.Get(),
|
|
||||||
pool.Get(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *curvePoint) String() string {
|
|
||||||
c.MakeAffine(new(bnPool))
|
|
||||||
return "(" + c.x.String() + ", " + c.y.String() + ")"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *curvePoint) Put(pool *bnPool) {
|
|
||||||
pool.Put(c.x)
|
|
||||||
pool.Put(c.y)
|
|
||||||
pool.Put(c.z)
|
|
||||||
pool.Put(c.t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *curvePoint) Set(a *curvePoint) {
|
|
||||||
c.x.Set(a.x)
|
|
||||||
c.y.Set(a.y)
|
|
||||||
c.z.Set(a.z)
|
|
||||||
c.t.Set(a.t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsOnCurve returns true iff c is on the curve where c must be in affine form.
|
|
||||||
func (c *curvePoint) IsOnCurve() bool {
|
|
||||||
yy := new(big.Int).Mul(c.y, c.y)
|
|
||||||
xxx := new(big.Int).Mul(c.x, c.x)
|
|
||||||
xxx.Mul(xxx, c.x)
|
|
||||||
yy.Sub(yy, xxx)
|
|
||||||
yy.Sub(yy, curveB)
|
|
||||||
if yy.Sign() < 0 || yy.Cmp(P) >= 0 {
|
|
||||||
yy.Mod(yy, P)
|
|
||||||
}
|
|
||||||
return yy.Sign() == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *curvePoint) SetInfinity() {
|
|
||||||
c.z.SetInt64(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *curvePoint) IsInfinity() bool {
|
|
||||||
return c.z.Sign() == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *curvePoint) Add(a, b *curvePoint, pool *bnPool) {
|
|
||||||
if a.IsInfinity() {
|
|
||||||
c.Set(b)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if b.IsInfinity() {
|
|
||||||
c.Set(a)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
|
|
||||||
|
|
||||||
// Normalize the points by replacing a = [x1:y1:z1] and b = [x2:y2:z2]
|
|
||||||
// by [u1:s1:z1·z2] and [u2:s2:z1·z2]
|
|
||||||
// where u1 = x1·z2², s1 = y1·z2³ and u1 = x2·z1², s2 = y2·z1³
|
|
||||||
z1z1 := pool.Get().Mul(a.z, a.z)
|
|
||||||
z1z1.Mod(z1z1, P)
|
|
||||||
z2z2 := pool.Get().Mul(b.z, b.z)
|
|
||||||
z2z2.Mod(z2z2, P)
|
|
||||||
u1 := pool.Get().Mul(a.x, z2z2)
|
|
||||||
u1.Mod(u1, P)
|
|
||||||
u2 := pool.Get().Mul(b.x, z1z1)
|
|
||||||
u2.Mod(u2, P)
|
|
||||||
|
|
||||||
t := pool.Get().Mul(b.z, z2z2)
|
|
||||||
t.Mod(t, P)
|
|
||||||
s1 := pool.Get().Mul(a.y, t)
|
|
||||||
s1.Mod(s1, P)
|
|
||||||
|
|
||||||
t.Mul(a.z, z1z1)
|
|
||||||
t.Mod(t, P)
|
|
||||||
s2 := pool.Get().Mul(b.y, t)
|
|
||||||
s2.Mod(s2, P)
|
|
||||||
|
|
||||||
// Compute x = (2h)²(s²-u1-u2)
|
|
||||||
// where s = (s2-s1)/(u2-u1) is the slope of the line through
|
|
||||||
// (u1,s1) and (u2,s2). The extra factor 2h = 2(u2-u1) comes from the value of z below.
|
|
||||||
// This is also:
|
|
||||||
// 4(s2-s1)² - 4h²(u1+u2) = 4(s2-s1)² - 4h³ - 4h²(2u1)
|
|
||||||
// = r² - j - 2v
|
|
||||||
// with the notations below.
|
|
||||||
h := pool.Get().Sub(u2, u1)
|
|
||||||
xEqual := h.Sign() == 0
|
|
||||||
|
|
||||||
t.Add(h, h)
|
|
||||||
// i = 4h²
|
|
||||||
i := pool.Get().Mul(t, t)
|
|
||||||
i.Mod(i, P)
|
|
||||||
// j = 4h³
|
|
||||||
j := pool.Get().Mul(h, i)
|
|
||||||
j.Mod(j, P)
|
|
||||||
|
|
||||||
t.Sub(s2, s1)
|
|
||||||
yEqual := t.Sign() == 0
|
|
||||||
if xEqual && yEqual {
|
|
||||||
c.Double(a, pool)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
r := pool.Get().Add(t, t)
|
|
||||||
|
|
||||||
v := pool.Get().Mul(u1, i)
|
|
||||||
v.Mod(v, P)
|
|
||||||
|
|
||||||
// t4 = 4(s2-s1)²
|
|
||||||
t4 := pool.Get().Mul(r, r)
|
|
||||||
t4.Mod(t4, P)
|
|
||||||
t.Add(v, v)
|
|
||||||
t6 := pool.Get().Sub(t4, j)
|
|
||||||
c.x.Sub(t6, t)
|
|
||||||
|
|
||||||
// Set y = -(2h)³(s1 + s*(x/4h²-u1))
|
|
||||||
// This is also
|
|
||||||
// y = - 2·s1·j - (s2-s1)(2x - 2i·u1) = r(v-x) - 2·s1·j
|
|
||||||
t.Sub(v, c.x) // t7
|
|
||||||
t4.Mul(s1, j) // t8
|
|
||||||
t4.Mod(t4, P)
|
|
||||||
t6.Add(t4, t4) // t9
|
|
||||||
t4.Mul(r, t) // t10
|
|
||||||
t4.Mod(t4, P)
|
|
||||||
c.y.Sub(t4, t6)
|
|
||||||
|
|
||||||
// Set z = 2(u2-u1)·z1·z2 = 2h·z1·z2
|
|
||||||
t.Add(a.z, b.z) // t11
|
|
||||||
t4.Mul(t, t) // t12
|
|
||||||
t4.Mod(t4, P)
|
|
||||||
t.Sub(t4, z1z1) // t13
|
|
||||||
t4.Sub(t, z2z2) // t14
|
|
||||||
c.z.Mul(t4, h)
|
|
||||||
c.z.Mod(c.z, P)
|
|
||||||
|
|
||||||
pool.Put(z1z1)
|
|
||||||
pool.Put(z2z2)
|
|
||||||
pool.Put(u1)
|
|
||||||
pool.Put(u2)
|
|
||||||
pool.Put(t)
|
|
||||||
pool.Put(s1)
|
|
||||||
pool.Put(s2)
|
|
||||||
pool.Put(h)
|
|
||||||
pool.Put(i)
|
|
||||||
pool.Put(j)
|
|
||||||
pool.Put(r)
|
|
||||||
pool.Put(v)
|
|
||||||
pool.Put(t4)
|
|
||||||
pool.Put(t6)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *curvePoint) Double(a *curvePoint, pool *bnPool) {
|
|
||||||
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
|
|
||||||
A := pool.Get().Mul(a.x, a.x)
|
|
||||||
A.Mod(A, P)
|
|
||||||
B := pool.Get().Mul(a.y, a.y)
|
|
||||||
B.Mod(B, P)
|
|
||||||
C_ := pool.Get().Mul(B, B)
|
|
||||||
C_.Mod(C_, P)
|
|
||||||
|
|
||||||
t := pool.Get().Add(a.x, B)
|
|
||||||
t2 := pool.Get().Mul(t, t)
|
|
||||||
t2.Mod(t2, P)
|
|
||||||
t.Sub(t2, A)
|
|
||||||
t2.Sub(t, C_)
|
|
||||||
d := pool.Get().Add(t2, t2)
|
|
||||||
t.Add(A, A)
|
|
||||||
e := pool.Get().Add(t, A)
|
|
||||||
f := pool.Get().Mul(e, e)
|
|
||||||
f.Mod(f, P)
|
|
||||||
|
|
||||||
t.Add(d, d)
|
|
||||||
c.x.Sub(f, t)
|
|
||||||
|
|
||||||
t.Add(C_, C_)
|
|
||||||
t2.Add(t, t)
|
|
||||||
t.Add(t2, t2)
|
|
||||||
c.y.Sub(d, c.x)
|
|
||||||
t2.Mul(e, c.y)
|
|
||||||
t2.Mod(t2, P)
|
|
||||||
c.y.Sub(t2, t)
|
|
||||||
|
|
||||||
t.Mul(a.y, a.z)
|
|
||||||
t.Mod(t, P)
|
|
||||||
c.z.Add(t, t)
|
|
||||||
|
|
||||||
pool.Put(A)
|
|
||||||
pool.Put(B)
|
|
||||||
pool.Put(C_)
|
|
||||||
pool.Put(t)
|
|
||||||
pool.Put(t2)
|
|
||||||
pool.Put(d)
|
|
||||||
pool.Put(e)
|
|
||||||
pool.Put(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int, pool *bnPool) *curvePoint {
|
|
||||||
sum := newCurvePoint(pool)
|
|
||||||
sum.SetInfinity()
|
|
||||||
t := newCurvePoint(pool)
|
|
||||||
|
|
||||||
for i := scalar.BitLen(); i >= 0; i-- {
|
|
||||||
t.Double(sum, pool)
|
|
||||||
if scalar.Bit(i) != 0 {
|
|
||||||
sum.Add(t, a, pool)
|
|
||||||
} else {
|
|
||||||
sum.Set(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Set(sum)
|
|
||||||
sum.Put(pool)
|
|
||||||
t.Put(pool)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *curvePoint) MakeAffine(pool *bnPool) *curvePoint {
|
|
||||||
if words := c.z.Bits(); len(words) == 1 && words[0] == 1 {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
zInv := pool.Get().ModInverse(c.z, P)
|
|
||||||
t := pool.Get().Mul(c.y, zInv)
|
|
||||||
t.Mod(t, P)
|
|
||||||
zInv2 := pool.Get().Mul(zInv, zInv)
|
|
||||||
zInv2.Mod(zInv2, P)
|
|
||||||
c.y.Mul(t, zInv2)
|
|
||||||
c.y.Mod(c.y, P)
|
|
||||||
t.Mul(c.x, zInv2)
|
|
||||||
t.Mod(t, P)
|
|
||||||
c.x.Set(t)
|
|
||||||
c.z.SetInt64(1)
|
|
||||||
c.t.SetInt64(1)
|
|
||||||
|
|
||||||
pool.Put(zInv)
|
|
||||||
pool.Put(t)
|
|
||||||
pool.Put(zInv2)
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *curvePoint) Negative(a *curvePoint) {
|
|
||||||
c.x.Set(a.x)
|
|
||||||
c.y.Neg(a.y)
|
|
||||||
c.z.Set(a.z)
|
|
||||||
c.t.SetInt64(0)
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package bn256
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExamplePair() {
|
|
||||||
// This implements the tripartite Diffie-Hellman algorithm from "A One
|
|
||||||
// Round Protocol for Tripartite Diffie-Hellman", A. Joux.
|
|
||||||
// http://www.springerlink.com/content/cddc57yyva0hburb/fulltext.pdf
|
|
||||||
|
|
||||||
// Each of three parties, a, b and c, generate a private value.
|
|
||||||
a, _ := rand.Int(rand.Reader, Order)
|
|
||||||
b, _ := rand.Int(rand.Reader, Order)
|
|
||||||
c, _ := rand.Int(rand.Reader, Order)
|
|
||||||
|
|
||||||
// Then each party calculates g₁ and g₂ times their private value.
|
|
||||||
pa := new(G1).ScalarBaseMult(a)
|
|
||||||
qa := new(G2).ScalarBaseMult(a)
|
|
||||||
|
|
||||||
pb := new(G1).ScalarBaseMult(b)
|
|
||||||
qb := new(G2).ScalarBaseMult(b)
|
|
||||||
|
|
||||||
pc := new(G1).ScalarBaseMult(c)
|
|
||||||
qc := new(G2).ScalarBaseMult(c)
|
|
||||||
|
|
||||||
// Now each party exchanges its public values with the other two and
|
|
||||||
// all parties can calculate the shared key.
|
|
||||||
k1 := Pair(pb, qc)
|
|
||||||
k1.ScalarMult(k1, a)
|
|
||||||
|
|
||||||
k2 := Pair(pc, qa)
|
|
||||||
k2.ScalarMult(k2, b)
|
|
||||||
|
|
||||||
k3 := Pair(pa, qb)
|
|
||||||
k3.ScalarMult(k3, c)
|
|
||||||
|
|
||||||
// k1, k2 and k3 will all be equal.
|
|
||||||
}
|
|
|
@ -1,200 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package bn256
|
|
||||||
|
|
||||||
// For details of the algorithms used, see "Multiplication and Squaring on
|
|
||||||
// Pairing-Friendly Fields, Devegili et al.
|
|
||||||
// http://eprint.iacr.org/2006/471.pdf.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
// gfP12 implements the field of size p¹² as a quadratic extension of gfP6
|
|
||||||
// where ω²=τ.
|
|
||||||
type gfP12 struct {
|
|
||||||
x, y *gfP6 // value is xω + y
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGFp12(pool *bnPool) *gfP12 {
|
|
||||||
return &gfP12{newGFp6(pool), newGFp6(pool)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) String() string {
|
|
||||||
return "(" + e.x.String() + "," + e.y.String() + ")"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) Put(pool *bnPool) {
|
|
||||||
e.x.Put(pool)
|
|
||||||
e.y.Put(pool)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) Set(a *gfP12) *gfP12 {
|
|
||||||
e.x.Set(a.x)
|
|
||||||
e.y.Set(a.y)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) SetZero() *gfP12 {
|
|
||||||
e.x.SetZero()
|
|
||||||
e.y.SetZero()
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) SetOne() *gfP12 {
|
|
||||||
e.x.SetZero()
|
|
||||||
e.y.SetOne()
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) Minimal() {
|
|
||||||
e.x.Minimal()
|
|
||||||
e.y.Minimal()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) IsZero() bool {
|
|
||||||
e.Minimal()
|
|
||||||
return e.x.IsZero() && e.y.IsZero()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) IsOne() bool {
|
|
||||||
e.Minimal()
|
|
||||||
return e.x.IsZero() && e.y.IsOne()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) Conjugate(a *gfP12) *gfP12 {
|
|
||||||
e.x.Negative(a.x)
|
|
||||||
e.y.Set(a.y)
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) Negative(a *gfP12) *gfP12 {
|
|
||||||
e.x.Negative(a.x)
|
|
||||||
e.y.Negative(a.y)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Frobenius computes (xω+y)^p = x^p ω·ξ^((p-1)/6) + y^p
|
|
||||||
func (e *gfP12) Frobenius(a *gfP12, pool *bnPool) *gfP12 {
|
|
||||||
e.x.Frobenius(a.x, pool)
|
|
||||||
e.y.Frobenius(a.y, pool)
|
|
||||||
e.x.MulScalar(e.x, xiToPMinus1Over6, pool)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// FrobeniusP2 computes (xω+y)^p² = x^p² ω·ξ^((p²-1)/6) + y^p²
|
|
||||||
func (e *gfP12) FrobeniusP2(a *gfP12, pool *bnPool) *gfP12 {
|
|
||||||
e.x.FrobeniusP2(a.x)
|
|
||||||
e.x.MulGFP(e.x, xiToPSquaredMinus1Over6)
|
|
||||||
e.y.FrobeniusP2(a.y)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) Add(a, b *gfP12) *gfP12 {
|
|
||||||
e.x.Add(a.x, b.x)
|
|
||||||
e.y.Add(a.y, b.y)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) Sub(a, b *gfP12) *gfP12 {
|
|
||||||
e.x.Sub(a.x, b.x)
|
|
||||||
e.y.Sub(a.y, b.y)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) Mul(a, b *gfP12, pool *bnPool) *gfP12 {
|
|
||||||
tx := newGFp6(pool)
|
|
||||||
tx.Mul(a.x, b.y, pool)
|
|
||||||
t := newGFp6(pool)
|
|
||||||
t.Mul(b.x, a.y, pool)
|
|
||||||
tx.Add(tx, t)
|
|
||||||
|
|
||||||
ty := newGFp6(pool)
|
|
||||||
ty.Mul(a.y, b.y, pool)
|
|
||||||
t.Mul(a.x, b.x, pool)
|
|
||||||
t.MulTau(t, pool)
|
|
||||||
e.y.Add(ty, t)
|
|
||||||
e.x.Set(tx)
|
|
||||||
|
|
||||||
tx.Put(pool)
|
|
||||||
ty.Put(pool)
|
|
||||||
t.Put(pool)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) MulScalar(a *gfP12, b *gfP6, pool *bnPool) *gfP12 {
|
|
||||||
e.x.Mul(e.x, b, pool)
|
|
||||||
e.y.Mul(e.y, b, pool)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *gfP12) Exp(a *gfP12, power *big.Int, pool *bnPool) *gfP12 {
|
|
||||||
sum := newGFp12(pool)
|
|
||||||
sum.SetOne()
|
|
||||||
t := newGFp12(pool)
|
|
||||||
|
|
||||||
for i := power.BitLen() - 1; i >= 0; i-- {
|
|
||||||
t.Square(sum, pool)
|
|
||||||
if power.Bit(i) != 0 {
|
|
||||||
sum.Mul(t, a, pool)
|
|
||||||
} else {
|
|
||||||
sum.Set(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Set(sum)
|
|
||||||
|
|
||||||
sum.Put(pool)
|
|
||||||
t.Put(pool)
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) Square(a *gfP12, pool *bnPool) *gfP12 {
|
|
||||||
// Complex squaring algorithm
|
|
||||||
v0 := newGFp6(pool)
|
|
||||||
v0.Mul(a.x, a.y, pool)
|
|
||||||
|
|
||||||
t := newGFp6(pool)
|
|
||||||
t.MulTau(a.x, pool)
|
|
||||||
t.Add(a.y, t)
|
|
||||||
ty := newGFp6(pool)
|
|
||||||
ty.Add(a.x, a.y)
|
|
||||||
ty.Mul(ty, t, pool)
|
|
||||||
ty.Sub(ty, v0)
|
|
||||||
t.MulTau(v0, pool)
|
|
||||||
ty.Sub(ty, t)
|
|
||||||
|
|
||||||
e.y.Set(ty)
|
|
||||||
e.x.Double(v0)
|
|
||||||
|
|
||||||
v0.Put(pool)
|
|
||||||
t.Put(pool)
|
|
||||||
ty.Put(pool)
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP12) Invert(a *gfP12, pool *bnPool) *gfP12 {
|
|
||||||
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
|
|
||||||
// ftp://136.206.11.249/pub/crypto/pairings.pdf
|
|
||||||
t1 := newGFp6(pool)
|
|
||||||
t2 := newGFp6(pool)
|
|
||||||
|
|
||||||
t1.Square(a.x, pool)
|
|
||||||
t2.Square(a.y, pool)
|
|
||||||
t1.MulTau(t1, pool)
|
|
||||||
t1.Sub(t2, t1)
|
|
||||||
t2.Invert(t1, pool)
|
|
||||||
|
|
||||||
e.x.Negative(a.x)
|
|
||||||
e.y.Set(a.y)
|
|
||||||
e.MulScalar(e, t2, pool)
|
|
||||||
|
|
||||||
t1.Put(pool)
|
|
||||||
t2.Put(pool)
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
|
|
@ -1,227 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package bn256
|
|
||||||
|
|
||||||
// For details of the algorithms used, see "Multiplication and Squaring on
|
|
||||||
// Pairing-Friendly Fields, Devegili et al.
|
|
||||||
// http://eprint.iacr.org/2006/471.pdf.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
// gfP2 implements a field of size p² as a quadratic extension of the base
|
|
||||||
// field where i²=-1.
|
|
||||||
type gfP2 struct {
|
|
||||||
x, y *big.Int // value is xi+y.
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGFp2(pool *bnPool) *gfP2 {
|
|
||||||
return &gfP2{pool.Get(), pool.Get()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) String() string {
|
|
||||||
x := new(big.Int).Mod(e.x, P)
|
|
||||||
y := new(big.Int).Mod(e.y, P)
|
|
||||||
return "(" + x.String() + "," + y.String() + ")"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) Put(pool *bnPool) {
|
|
||||||
pool.Put(e.x)
|
|
||||||
pool.Put(e.y)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) Set(a *gfP2) *gfP2 {
|
|
||||||
e.x.Set(a.x)
|
|
||||||
e.y.Set(a.y)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) SetZero() *gfP2 {
|
|
||||||
e.x.SetInt64(0)
|
|
||||||
e.y.SetInt64(0)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) SetOne() *gfP2 {
|
|
||||||
e.x.SetInt64(0)
|
|
||||||
e.y.SetInt64(1)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) Minimal() {
|
|
||||||
if e.x.Sign() < 0 || e.x.Cmp(P) >= 0 {
|
|
||||||
e.x.Mod(e.x, P)
|
|
||||||
}
|
|
||||||
if e.y.Sign() < 0 || e.y.Cmp(P) >= 0 {
|
|
||||||
e.y.Mod(e.y, P)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) IsZero() bool {
|
|
||||||
return e.x.Sign() == 0 && e.y.Sign() == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) IsOne() bool {
|
|
||||||
if e.x.Sign() != 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
words := e.y.Bits()
|
|
||||||
return len(words) == 1 && words[0] == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) Conjugate(a *gfP2) *gfP2 {
|
|
||||||
e.y.Set(a.y)
|
|
||||||
e.x.Neg(a.x)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) Negative(a *gfP2) *gfP2 {
|
|
||||||
e.x.Neg(a.x)
|
|
||||||
e.y.Neg(a.y)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) Add(a, b *gfP2) *gfP2 {
|
|
||||||
e.x.Add(a.x, b.x)
|
|
||||||
e.y.Add(a.y, b.y)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) Sub(a, b *gfP2) *gfP2 {
|
|
||||||
e.x.Sub(a.x, b.x)
|
|
||||||
e.y.Sub(a.y, b.y)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) Double(a *gfP2) *gfP2 {
|
|
||||||
e.x.Lsh(a.x, 1)
|
|
||||||
e.y.Lsh(a.y, 1)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *gfP2) Exp(a *gfP2, power *big.Int, pool *bnPool) *gfP2 {
|
|
||||||
sum := newGFp2(pool)
|
|
||||||
sum.SetOne()
|
|
||||||
t := newGFp2(pool)
|
|
||||||
|
|
||||||
for i := power.BitLen() - 1; i >= 0; i-- {
|
|
||||||
t.Square(sum, pool)
|
|
||||||
if power.Bit(i) != 0 {
|
|
||||||
sum.Mul(t, a, pool)
|
|
||||||
} else {
|
|
||||||
sum.Set(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Set(sum)
|
|
||||||
|
|
||||||
sum.Put(pool)
|
|
||||||
t.Put(pool)
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// See "Multiplication and Squaring in Pairing-Friendly Fields",
|
|
||||||
// http://eprint.iacr.org/2006/471.pdf
|
|
||||||
func (e *gfP2) Mul(a, b *gfP2, pool *bnPool) *gfP2 {
|
|
||||||
tx := pool.Get().Mul(a.x, b.y)
|
|
||||||
t := pool.Get().Mul(b.x, a.y)
|
|
||||||
tx.Add(tx, t)
|
|
||||||
tx.Mod(tx, P)
|
|
||||||
|
|
||||||
ty := pool.Get().Mul(a.y, b.y)
|
|
||||||
t.Mul(a.x, b.x)
|
|
||||||
ty.Sub(ty, t)
|
|
||||||
e.y.Mod(ty, P)
|
|
||||||
e.x.Set(tx)
|
|
||||||
|
|
||||||
pool.Put(tx)
|
|
||||||
pool.Put(ty)
|
|
||||||
pool.Put(t)
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) MulScalar(a *gfP2, b *big.Int) *gfP2 {
|
|
||||||
e.x.Mul(a.x, b)
|
|
||||||
e.y.Mul(a.y, b)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// MulXi sets e=ξa where ξ=i+9 and then returns e.
|
|
||||||
func (e *gfP2) MulXi(a *gfP2, pool *bnPool) *gfP2 {
|
|
||||||
// (xi+y)(i+3) = (9x+y)i+(9y-x)
|
|
||||||
tx := pool.Get().Lsh(a.x, 3)
|
|
||||||
tx.Add(tx, a.x)
|
|
||||||
tx.Add(tx, a.y)
|
|
||||||
|
|
||||||
ty := pool.Get().Lsh(a.y, 3)
|
|
||||||
ty.Add(ty, a.y)
|
|
||||||
ty.Sub(ty, a.x)
|
|
||||||
|
|
||||||
e.x.Set(tx)
|
|
||||||
e.y.Set(ty)
|
|
||||||
|
|
||||||
pool.Put(tx)
|
|
||||||
pool.Put(ty)
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) Square(a *gfP2, pool *bnPool) *gfP2 {
|
|
||||||
// Complex squaring algorithm:
|
|
||||||
// (xi+b)² = (x+y)(y-x) + 2*i*x*y
|
|
||||||
t1 := pool.Get().Sub(a.y, a.x)
|
|
||||||
t2 := pool.Get().Add(a.x, a.y)
|
|
||||||
ty := pool.Get().Mul(t1, t2)
|
|
||||||
ty.Mod(ty, P)
|
|
||||||
|
|
||||||
t1.Mul(a.x, a.y)
|
|
||||||
t1.Lsh(t1, 1)
|
|
||||||
|
|
||||||
e.x.Mod(t1, P)
|
|
||||||
e.y.Set(ty)
|
|
||||||
|
|
||||||
pool.Put(t1)
|
|
||||||
pool.Put(t2)
|
|
||||||
pool.Put(ty)
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) Invert(a *gfP2, pool *bnPool) *gfP2 {
|
|
||||||
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
|
|
||||||
// ftp://136.206.11.249/pub/crypto/pairings.pdf
|
|
||||||
t := pool.Get()
|
|
||||||
t.Mul(a.y, a.y)
|
|
||||||
t2 := pool.Get()
|
|
||||||
t2.Mul(a.x, a.x)
|
|
||||||
t.Add(t, t2)
|
|
||||||
|
|
||||||
inv := pool.Get()
|
|
||||||
inv.ModInverse(t, P)
|
|
||||||
|
|
||||||
e.x.Neg(a.x)
|
|
||||||
e.x.Mul(e.x, inv)
|
|
||||||
e.x.Mod(e.x, P)
|
|
||||||
|
|
||||||
e.y.Mul(a.y, inv)
|
|
||||||
e.y.Mod(e.y, P)
|
|
||||||
|
|
||||||
pool.Put(t)
|
|
||||||
pool.Put(t2)
|
|
||||||
pool.Put(inv)
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) Real() *big.Int {
|
|
||||||
return e.x
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP2) Imag() *big.Int {
|
|
||||||
return e.y
|
|
||||||
}
|
|
|
@ -1,296 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package bn256
|
|
||||||
|
|
||||||
// For details of the algorithms used, see "Multiplication and Squaring on
|
|
||||||
// Pairing-Friendly Fields, Devegili et al.
|
|
||||||
// http://eprint.iacr.org/2006/471.pdf.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
// gfP6 implements the field of size p⁶ as a cubic extension of gfP2 where τ³=ξ
|
|
||||||
// and ξ=i+9.
|
|
||||||
type gfP6 struct {
|
|
||||||
x, y, z *gfP2 // value is xτ² + yτ + z
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGFp6(pool *bnPool) *gfP6 {
|
|
||||||
return &gfP6{newGFp2(pool), newGFp2(pool), newGFp2(pool)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) String() string {
|
|
||||||
return "(" + e.x.String() + "," + e.y.String() + "," + e.z.String() + ")"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) Put(pool *bnPool) {
|
|
||||||
e.x.Put(pool)
|
|
||||||
e.y.Put(pool)
|
|
||||||
e.z.Put(pool)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) Set(a *gfP6) *gfP6 {
|
|
||||||
e.x.Set(a.x)
|
|
||||||
e.y.Set(a.y)
|
|
||||||
e.z.Set(a.z)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) SetZero() *gfP6 {
|
|
||||||
e.x.SetZero()
|
|
||||||
e.y.SetZero()
|
|
||||||
e.z.SetZero()
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) SetOne() *gfP6 {
|
|
||||||
e.x.SetZero()
|
|
||||||
e.y.SetZero()
|
|
||||||
e.z.SetOne()
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) Minimal() {
|
|
||||||
e.x.Minimal()
|
|
||||||
e.y.Minimal()
|
|
||||||
e.z.Minimal()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) IsZero() bool {
|
|
||||||
return e.x.IsZero() && e.y.IsZero() && e.z.IsZero()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) IsOne() bool {
|
|
||||||
return e.x.IsZero() && e.y.IsZero() && e.z.IsOne()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) Negative(a *gfP6) *gfP6 {
|
|
||||||
e.x.Negative(a.x)
|
|
||||||
e.y.Negative(a.y)
|
|
||||||
e.z.Negative(a.z)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) Frobenius(a *gfP6, pool *bnPool) *gfP6 {
|
|
||||||
e.x.Conjugate(a.x)
|
|
||||||
e.y.Conjugate(a.y)
|
|
||||||
e.z.Conjugate(a.z)
|
|
||||||
|
|
||||||
e.x.Mul(e.x, xiTo2PMinus2Over3, pool)
|
|
||||||
e.y.Mul(e.y, xiToPMinus1Over3, pool)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// FrobeniusP2 computes (xτ²+yτ+z)^(p²) = xτ^(2p²) + yτ^(p²) + z
|
|
||||||
func (e *gfP6) FrobeniusP2(a *gfP6) *gfP6 {
|
|
||||||
// τ^(2p²) = τ²τ^(2p²-2) = τ²ξ^((2p²-2)/3)
|
|
||||||
e.x.MulScalar(a.x, xiTo2PSquaredMinus2Over3)
|
|
||||||
// τ^(p²) = ττ^(p²-1) = τξ^((p²-1)/3)
|
|
||||||
e.y.MulScalar(a.y, xiToPSquaredMinus1Over3)
|
|
||||||
e.z.Set(a.z)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) Add(a, b *gfP6) *gfP6 {
|
|
||||||
e.x.Add(a.x, b.x)
|
|
||||||
e.y.Add(a.y, b.y)
|
|
||||||
e.z.Add(a.z, b.z)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) Sub(a, b *gfP6) *gfP6 {
|
|
||||||
e.x.Sub(a.x, b.x)
|
|
||||||
e.y.Sub(a.y, b.y)
|
|
||||||
e.z.Sub(a.z, b.z)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) Double(a *gfP6) *gfP6 {
|
|
||||||
e.x.Double(a.x)
|
|
||||||
e.y.Double(a.y)
|
|
||||||
e.z.Double(a.z)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) Mul(a, b *gfP6, pool *bnPool) *gfP6 {
|
|
||||||
// "Multiplication and Squaring on Pairing-Friendly Fields"
|
|
||||||
// Section 4, Karatsuba method.
|
|
||||||
// http://eprint.iacr.org/2006/471.pdf
|
|
||||||
|
|
||||||
v0 := newGFp2(pool)
|
|
||||||
v0.Mul(a.z, b.z, pool)
|
|
||||||
v1 := newGFp2(pool)
|
|
||||||
v1.Mul(a.y, b.y, pool)
|
|
||||||
v2 := newGFp2(pool)
|
|
||||||
v2.Mul(a.x, b.x, pool)
|
|
||||||
|
|
||||||
t0 := newGFp2(pool)
|
|
||||||
t0.Add(a.x, a.y)
|
|
||||||
t1 := newGFp2(pool)
|
|
||||||
t1.Add(b.x, b.y)
|
|
||||||
tz := newGFp2(pool)
|
|
||||||
tz.Mul(t0, t1, pool)
|
|
||||||
|
|
||||||
tz.Sub(tz, v1)
|
|
||||||
tz.Sub(tz, v2)
|
|
||||||
tz.MulXi(tz, pool)
|
|
||||||
tz.Add(tz, v0)
|
|
||||||
|
|
||||||
t0.Add(a.y, a.z)
|
|
||||||
t1.Add(b.y, b.z)
|
|
||||||
ty := newGFp2(pool)
|
|
||||||
ty.Mul(t0, t1, pool)
|
|
||||||
ty.Sub(ty, v0)
|
|
||||||
ty.Sub(ty, v1)
|
|
||||||
t0.MulXi(v2, pool)
|
|
||||||
ty.Add(ty, t0)
|
|
||||||
|
|
||||||
t0.Add(a.x, a.z)
|
|
||||||
t1.Add(b.x, b.z)
|
|
||||||
tx := newGFp2(pool)
|
|
||||||
tx.Mul(t0, t1, pool)
|
|
||||||
tx.Sub(tx, v0)
|
|
||||||
tx.Add(tx, v1)
|
|
||||||
tx.Sub(tx, v2)
|
|
||||||
|
|
||||||
e.x.Set(tx)
|
|
||||||
e.y.Set(ty)
|
|
||||||
e.z.Set(tz)
|
|
||||||
|
|
||||||
t0.Put(pool)
|
|
||||||
t1.Put(pool)
|
|
||||||
tx.Put(pool)
|
|
||||||
ty.Put(pool)
|
|
||||||
tz.Put(pool)
|
|
||||||
v0.Put(pool)
|
|
||||||
v1.Put(pool)
|
|
||||||
v2.Put(pool)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) MulScalar(a *gfP6, b *gfP2, pool *bnPool) *gfP6 {
|
|
||||||
e.x.Mul(a.x, b, pool)
|
|
||||||
e.y.Mul(a.y, b, pool)
|
|
||||||
e.z.Mul(a.z, b, pool)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) MulGFP(a *gfP6, b *big.Int) *gfP6 {
|
|
||||||
e.x.MulScalar(a.x, b)
|
|
||||||
e.y.MulScalar(a.y, b)
|
|
||||||
e.z.MulScalar(a.z, b)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// MulTau computes τ·(aτ²+bτ+c) = bτ²+cτ+aξ
|
|
||||||
func (e *gfP6) MulTau(a *gfP6, pool *bnPool) {
|
|
||||||
tz := newGFp2(pool)
|
|
||||||
tz.MulXi(a.x, pool)
|
|
||||||
ty := newGFp2(pool)
|
|
||||||
ty.Set(a.y)
|
|
||||||
e.y.Set(a.z)
|
|
||||||
e.x.Set(ty)
|
|
||||||
e.z.Set(tz)
|
|
||||||
tz.Put(pool)
|
|
||||||
ty.Put(pool)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) Square(a *gfP6, pool *bnPool) *gfP6 {
|
|
||||||
v0 := newGFp2(pool).Square(a.z, pool)
|
|
||||||
v1 := newGFp2(pool).Square(a.y, pool)
|
|
||||||
v2 := newGFp2(pool).Square(a.x, pool)
|
|
||||||
|
|
||||||
c0 := newGFp2(pool).Add(a.x, a.y)
|
|
||||||
c0.Square(c0, pool)
|
|
||||||
c0.Sub(c0, v1)
|
|
||||||
c0.Sub(c0, v2)
|
|
||||||
c0.MulXi(c0, pool)
|
|
||||||
c0.Add(c0, v0)
|
|
||||||
|
|
||||||
c1 := newGFp2(pool).Add(a.y, a.z)
|
|
||||||
c1.Square(c1, pool)
|
|
||||||
c1.Sub(c1, v0)
|
|
||||||
c1.Sub(c1, v1)
|
|
||||||
xiV2 := newGFp2(pool).MulXi(v2, pool)
|
|
||||||
c1.Add(c1, xiV2)
|
|
||||||
|
|
||||||
c2 := newGFp2(pool).Add(a.x, a.z)
|
|
||||||
c2.Square(c2, pool)
|
|
||||||
c2.Sub(c2, v0)
|
|
||||||
c2.Add(c2, v1)
|
|
||||||
c2.Sub(c2, v2)
|
|
||||||
|
|
||||||
e.x.Set(c2)
|
|
||||||
e.y.Set(c1)
|
|
||||||
e.z.Set(c0)
|
|
||||||
|
|
||||||
v0.Put(pool)
|
|
||||||
v1.Put(pool)
|
|
||||||
v2.Put(pool)
|
|
||||||
c0.Put(pool)
|
|
||||||
c1.Put(pool)
|
|
||||||
c2.Put(pool)
|
|
||||||
xiV2.Put(pool)
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *gfP6) Invert(a *gfP6, pool *bnPool) *gfP6 {
|
|
||||||
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
|
|
||||||
// ftp://136.206.11.249/pub/crypto/pairings.pdf
|
|
||||||
|
|
||||||
// Here we can give a short explanation of how it works: let j be a cubic root of
|
|
||||||
// unity in GF(p²) so that 1+j+j²=0.
|
|
||||||
// Then (xτ² + yτ + z)(xj²τ² + yjτ + z)(xjτ² + yj²τ + z)
|
|
||||||
// = (xτ² + yτ + z)(Cτ²+Bτ+A)
|
|
||||||
// = (x³ξ²+y³ξ+z³-3ξxyz) = F is an element of the base field (the norm).
|
|
||||||
//
|
|
||||||
// On the other hand (xj²τ² + yjτ + z)(xjτ² + yj²τ + z)
|
|
||||||
// = τ²(y²-ξxz) + τ(ξx²-yz) + (z²-ξxy)
|
|
||||||
//
|
|
||||||
// So that's why A = (z²-ξxy), B = (ξx²-yz), C = (y²-ξxz)
|
|
||||||
t1 := newGFp2(pool)
|
|
||||||
|
|
||||||
A := newGFp2(pool)
|
|
||||||
A.Square(a.z, pool)
|
|
||||||
t1.Mul(a.x, a.y, pool)
|
|
||||||
t1.MulXi(t1, pool)
|
|
||||||
A.Sub(A, t1)
|
|
||||||
|
|
||||||
B := newGFp2(pool)
|
|
||||||
B.Square(a.x, pool)
|
|
||||||
B.MulXi(B, pool)
|
|
||||||
t1.Mul(a.y, a.z, pool)
|
|
||||||
B.Sub(B, t1)
|
|
||||||
|
|
||||||
C_ := newGFp2(pool)
|
|
||||||
C_.Square(a.y, pool)
|
|
||||||
t1.Mul(a.x, a.z, pool)
|
|
||||||
C_.Sub(C_, t1)
|
|
||||||
|
|
||||||
F := newGFp2(pool)
|
|
||||||
F.Mul(C_, a.y, pool)
|
|
||||||
F.MulXi(F, pool)
|
|
||||||
t1.Mul(A, a.z, pool)
|
|
||||||
F.Add(F, t1)
|
|
||||||
t1.Mul(B, a.x, pool)
|
|
||||||
t1.MulXi(t1, pool)
|
|
||||||
F.Add(F, t1)
|
|
||||||
|
|
||||||
F.Invert(F, pool)
|
|
||||||
|
|
||||||
e.x.Mul(C_, F, pool)
|
|
||||||
e.y.Mul(B, F, pool)
|
|
||||||
e.z.Mul(A, F, pool)
|
|
||||||
|
|
||||||
t1.Put(pool)
|
|
||||||
A.Put(pool)
|
|
||||||
B.Put(pool)
|
|
||||||
C_.Put(pool)
|
|
||||||
F.Put(pool)
|
|
||||||
|
|
||||||
return e
|
|
||||||
}
|
|
|
@ -1,71 +0,0 @@
|
||||||
package bn256
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"crypto/rand"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRandomG2Marshal(t *testing.T) {
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
n, g2, err := RandomG2(rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
t.Logf("%d: %x\n", n, g2.Marshal())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPairings(t *testing.T) {
|
|
||||||
a1 := new(G1).ScalarBaseMult(bigFromBase10("1"))
|
|
||||||
a2 := new(G1).ScalarBaseMult(bigFromBase10("2"))
|
|
||||||
a37 := new(G1).ScalarBaseMult(bigFromBase10("37"))
|
|
||||||
an1 := new(G1).ScalarBaseMult(bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495616"))
|
|
||||||
|
|
||||||
b0 := new(G2).ScalarBaseMult(bigFromBase10("0"))
|
|
||||||
b1 := new(G2).ScalarBaseMult(bigFromBase10("1"))
|
|
||||||
b2 := new(G2).ScalarBaseMult(bigFromBase10("2"))
|
|
||||||
b27 := new(G2).ScalarBaseMult(bigFromBase10("27"))
|
|
||||||
b999 := new(G2).ScalarBaseMult(bigFromBase10("999"))
|
|
||||||
bn1 := new(G2).ScalarBaseMult(bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495616"))
|
|
||||||
|
|
||||||
p1 := Pair(a1, b1)
|
|
||||||
pn1 := Pair(a1, bn1)
|
|
||||||
np1 := Pair(an1, b1)
|
|
||||||
if pn1.String() != np1.String() {
|
|
||||||
t.Error("Pairing mismatch: e(a, -b) != e(-a, b)")
|
|
||||||
}
|
|
||||||
if !PairingCheck([]*G1{a1, an1}, []*G2{b1, b1}) {
|
|
||||||
t.Error("MultiAte check gave false negative!")
|
|
||||||
}
|
|
||||||
p0 := new(GT).Add(p1, pn1)
|
|
||||||
p0_2 := Pair(a1, b0)
|
|
||||||
if p0.String() != p0_2.String() {
|
|
||||||
t.Error("Pairing mismatch: e(a, b) * e(a, -b) != 1")
|
|
||||||
}
|
|
||||||
p0_3 := new(GT).ScalarMult(p1, bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495617"))
|
|
||||||
if p0.String() != p0_3.String() {
|
|
||||||
t.Error("Pairing mismatch: e(a, b) has wrong order")
|
|
||||||
}
|
|
||||||
p2 := Pair(a2, b1)
|
|
||||||
p2_2 := Pair(a1, b2)
|
|
||||||
p2_3 := new(GT).ScalarMult(p1, bigFromBase10("2"))
|
|
||||||
if p2.String() != p2_2.String() {
|
|
||||||
t.Error("Pairing mismatch: e(a, b * 2) != e(a * 2, b)")
|
|
||||||
}
|
|
||||||
if p2.String() != p2_3.String() {
|
|
||||||
t.Error("Pairing mismatch: e(a, b * 2) != e(a, b) ** 2")
|
|
||||||
}
|
|
||||||
if p2.String() == p1.String() {
|
|
||||||
t.Error("Pairing is degenerate!")
|
|
||||||
}
|
|
||||||
if PairingCheck([]*G1{a1, a1}, []*G2{b1, b1}) {
|
|
||||||
t.Error("MultiAte check gave false positive!")
|
|
||||||
}
|
|
||||||
p999 := Pair(a37, b27)
|
|
||||||
p999_2 := Pair(a1, b999)
|
|
||||||
if p999.String() != p999_2.String() {
|
|
||||||
t.Error("Pairing mismatch: e(a * 37, b * 27) != e(a, b * 999)")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,397 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package bn256
|
|
||||||
|
|
||||||
func lineFunctionAdd(r, p *twistPoint, q *curvePoint, r2 *gfP2, pool *bnPool) (a, b, c *gfP2, rOut *twistPoint) {
|
|
||||||
// See the mixed addition algorithm from "Faster Computation of the
|
|
||||||
// Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf
|
|
||||||
|
|
||||||
B := newGFp2(pool).Mul(p.x, r.t, pool)
|
|
||||||
|
|
||||||
D := newGFp2(pool).Add(p.y, r.z)
|
|
||||||
D.Square(D, pool)
|
|
||||||
D.Sub(D, r2)
|
|
||||||
D.Sub(D, r.t)
|
|
||||||
D.Mul(D, r.t, pool)
|
|
||||||
|
|
||||||
H := newGFp2(pool).Sub(B, r.x)
|
|
||||||
I := newGFp2(pool).Square(H, pool)
|
|
||||||
|
|
||||||
E := newGFp2(pool).Add(I, I)
|
|
||||||
E.Add(E, E)
|
|
||||||
|
|
||||||
J := newGFp2(pool).Mul(H, E, pool)
|
|
||||||
|
|
||||||
L1 := newGFp2(pool).Sub(D, r.y)
|
|
||||||
L1.Sub(L1, r.y)
|
|
||||||
|
|
||||||
V := newGFp2(pool).Mul(r.x, E, pool)
|
|
||||||
|
|
||||||
rOut = newTwistPoint(pool)
|
|
||||||
rOut.x.Square(L1, pool)
|
|
||||||
rOut.x.Sub(rOut.x, J)
|
|
||||||
rOut.x.Sub(rOut.x, V)
|
|
||||||
rOut.x.Sub(rOut.x, V)
|
|
||||||
|
|
||||||
rOut.z.Add(r.z, H)
|
|
||||||
rOut.z.Square(rOut.z, pool)
|
|
||||||
rOut.z.Sub(rOut.z, r.t)
|
|
||||||
rOut.z.Sub(rOut.z, I)
|
|
||||||
|
|
||||||
t := newGFp2(pool).Sub(V, rOut.x)
|
|
||||||
t.Mul(t, L1, pool)
|
|
||||||
t2 := newGFp2(pool).Mul(r.y, J, pool)
|
|
||||||
t2.Add(t2, t2)
|
|
||||||
rOut.y.Sub(t, t2)
|
|
||||||
|
|
||||||
rOut.t.Square(rOut.z, pool)
|
|
||||||
|
|
||||||
t.Add(p.y, rOut.z)
|
|
||||||
t.Square(t, pool)
|
|
||||||
t.Sub(t, r2)
|
|
||||||
t.Sub(t, rOut.t)
|
|
||||||
|
|
||||||
t2.Mul(L1, p.x, pool)
|
|
||||||
t2.Add(t2, t2)
|
|
||||||
a = newGFp2(pool)
|
|
||||||
a.Sub(t2, t)
|
|
||||||
|
|
||||||
c = newGFp2(pool)
|
|
||||||
c.MulScalar(rOut.z, q.y)
|
|
||||||
c.Add(c, c)
|
|
||||||
|
|
||||||
b = newGFp2(pool)
|
|
||||||
b.SetZero()
|
|
||||||
b.Sub(b, L1)
|
|
||||||
b.MulScalar(b, q.x)
|
|
||||||
b.Add(b, b)
|
|
||||||
|
|
||||||
B.Put(pool)
|
|
||||||
D.Put(pool)
|
|
||||||
H.Put(pool)
|
|
||||||
I.Put(pool)
|
|
||||||
E.Put(pool)
|
|
||||||
J.Put(pool)
|
|
||||||
L1.Put(pool)
|
|
||||||
V.Put(pool)
|
|
||||||
t.Put(pool)
|
|
||||||
t2.Put(pool)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func lineFunctionDouble(r *twistPoint, q *curvePoint, pool *bnPool) (a, b, c *gfP2, rOut *twistPoint) {
|
|
||||||
// See the doubling algorithm for a=0 from "Faster Computation of the
|
|
||||||
// Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf
|
|
||||||
|
|
||||||
A := newGFp2(pool).Square(r.x, pool)
|
|
||||||
B := newGFp2(pool).Square(r.y, pool)
|
|
||||||
C_ := newGFp2(pool).Square(B, pool)
|
|
||||||
|
|
||||||
D := newGFp2(pool).Add(r.x, B)
|
|
||||||
D.Square(D, pool)
|
|
||||||
D.Sub(D, A)
|
|
||||||
D.Sub(D, C_)
|
|
||||||
D.Add(D, D)
|
|
||||||
|
|
||||||
E := newGFp2(pool).Add(A, A)
|
|
||||||
E.Add(E, A)
|
|
||||||
|
|
||||||
G := newGFp2(pool).Square(E, pool)
|
|
||||||
|
|
||||||
rOut = newTwistPoint(pool)
|
|
||||||
rOut.x.Sub(G, D)
|
|
||||||
rOut.x.Sub(rOut.x, D)
|
|
||||||
|
|
||||||
rOut.z.Add(r.y, r.z)
|
|
||||||
rOut.z.Square(rOut.z, pool)
|
|
||||||
rOut.z.Sub(rOut.z, B)
|
|
||||||
rOut.z.Sub(rOut.z, r.t)
|
|
||||||
|
|
||||||
rOut.y.Sub(D, rOut.x)
|
|
||||||
rOut.y.Mul(rOut.y, E, pool)
|
|
||||||
t := newGFp2(pool).Add(C_, C_)
|
|
||||||
t.Add(t, t)
|
|
||||||
t.Add(t, t)
|
|
||||||
rOut.y.Sub(rOut.y, t)
|
|
||||||
|
|
||||||
rOut.t.Square(rOut.z, pool)
|
|
||||||
|
|
||||||
t.Mul(E, r.t, pool)
|
|
||||||
t.Add(t, t)
|
|
||||||
b = newGFp2(pool)
|
|
||||||
b.SetZero()
|
|
||||||
b.Sub(b, t)
|
|
||||||
b.MulScalar(b, q.x)
|
|
||||||
|
|
||||||
a = newGFp2(pool)
|
|
||||||
a.Add(r.x, E)
|
|
||||||
a.Square(a, pool)
|
|
||||||
a.Sub(a, A)
|
|
||||||
a.Sub(a, G)
|
|
||||||
t.Add(B, B)
|
|
||||||
t.Add(t, t)
|
|
||||||
a.Sub(a, t)
|
|
||||||
|
|
||||||
c = newGFp2(pool)
|
|
||||||
c.Mul(rOut.z, r.t, pool)
|
|
||||||
c.Add(c, c)
|
|
||||||
c.MulScalar(c, q.y)
|
|
||||||
|
|
||||||
A.Put(pool)
|
|
||||||
B.Put(pool)
|
|
||||||
C_.Put(pool)
|
|
||||||
D.Put(pool)
|
|
||||||
E.Put(pool)
|
|
||||||
G.Put(pool)
|
|
||||||
t.Put(pool)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func mulLine(ret *gfP12, a, b, c *gfP2, pool *bnPool) {
|
|
||||||
a2 := newGFp6(pool)
|
|
||||||
a2.x.SetZero()
|
|
||||||
a2.y.Set(a)
|
|
||||||
a2.z.Set(b)
|
|
||||||
a2.Mul(a2, ret.x, pool)
|
|
||||||
t3 := newGFp6(pool).MulScalar(ret.y, c, pool)
|
|
||||||
|
|
||||||
t := newGFp2(pool)
|
|
||||||
t.Add(b, c)
|
|
||||||
t2 := newGFp6(pool)
|
|
||||||
t2.x.SetZero()
|
|
||||||
t2.y.Set(a)
|
|
||||||
t2.z.Set(t)
|
|
||||||
ret.x.Add(ret.x, ret.y)
|
|
||||||
|
|
||||||
ret.y.Set(t3)
|
|
||||||
|
|
||||||
ret.x.Mul(ret.x, t2, pool)
|
|
||||||
ret.x.Sub(ret.x, a2)
|
|
||||||
ret.x.Sub(ret.x, ret.y)
|
|
||||||
a2.MulTau(a2, pool)
|
|
||||||
ret.y.Add(ret.y, a2)
|
|
||||||
|
|
||||||
a2.Put(pool)
|
|
||||||
t3.Put(pool)
|
|
||||||
t2.Put(pool)
|
|
||||||
t.Put(pool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// sixuPlus2NAF is 6u+2 in non-adjacent form.
|
|
||||||
var sixuPlus2NAF = []int8{0, 0, 0, 1, 0, 1, 0, -1, 0, 0, 1, -1, 0, 0, 1, 0,
|
|
||||||
0, 1, 1, 0, -1, 0, 0, 1, 0, -1, 0, 0, 0, 0, 1, 1,
|
|
||||||
1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 1,
|
|
||||||
1, 0, 0, -1, 0, 0, 0, 1, 1, 0, -1, 0, 0, 1, 0, 1, 1}
|
|
||||||
|
|
||||||
// miller implements the Miller loop for calculating the Optimal Ate pairing.
|
|
||||||
// See algorithm 1 from http://cryptojedi.org/papers/dclxvi-20100714.pdf
|
|
||||||
func miller(q *twistPoint, p *curvePoint, pool *bnPool) *gfP12 {
|
|
||||||
ret := newGFp12(pool)
|
|
||||||
ret.SetOne()
|
|
||||||
|
|
||||||
aAffine := newTwistPoint(pool)
|
|
||||||
aAffine.Set(q)
|
|
||||||
aAffine.MakeAffine(pool)
|
|
||||||
|
|
||||||
bAffine := newCurvePoint(pool)
|
|
||||||
bAffine.Set(p)
|
|
||||||
bAffine.MakeAffine(pool)
|
|
||||||
|
|
||||||
minusA := newTwistPoint(pool)
|
|
||||||
minusA.Negative(aAffine, pool)
|
|
||||||
|
|
||||||
r := newTwistPoint(pool)
|
|
||||||
r.Set(aAffine)
|
|
||||||
|
|
||||||
r2 := newGFp2(pool)
|
|
||||||
r2.Square(aAffine.y, pool)
|
|
||||||
|
|
||||||
for i := len(sixuPlus2NAF) - 1; i > 0; i-- {
|
|
||||||
a, b, c, newR := lineFunctionDouble(r, bAffine, pool)
|
|
||||||
if i != len(sixuPlus2NAF)-1 {
|
|
||||||
ret.Square(ret, pool)
|
|
||||||
}
|
|
||||||
|
|
||||||
mulLine(ret, a, b, c, pool)
|
|
||||||
a.Put(pool)
|
|
||||||
b.Put(pool)
|
|
||||||
c.Put(pool)
|
|
||||||
r.Put(pool)
|
|
||||||
r = newR
|
|
||||||
|
|
||||||
switch sixuPlus2NAF[i-1] {
|
|
||||||
case 1:
|
|
||||||
a, b, c, newR = lineFunctionAdd(r, aAffine, bAffine, r2, pool)
|
|
||||||
case -1:
|
|
||||||
a, b, c, newR = lineFunctionAdd(r, minusA, bAffine, r2, pool)
|
|
||||||
default:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
mulLine(ret, a, b, c, pool)
|
|
||||||
a.Put(pool)
|
|
||||||
b.Put(pool)
|
|
||||||
c.Put(pool)
|
|
||||||
r.Put(pool)
|
|
||||||
r = newR
|
|
||||||
}
|
|
||||||
|
|
||||||
// In order to calculate Q1 we have to convert q from the sextic twist
|
|
||||||
// to the full GF(p^12) group, apply the Frobenius there, and convert
|
|
||||||
// back.
|
|
||||||
//
|
|
||||||
// The twist isomorphism is (x', y') -> (xω², yω³). If we consider just
|
|
||||||
// x for a moment, then after applying the Frobenius, we have x̄ω^(2p)
|
|
||||||
// where x̄ is the conjugate of x. If we are going to apply the inverse
|
|
||||||
// isomorphism we need a value with a single coefficient of ω² so we
|
|
||||||
// rewrite this as x̄ω^(2p-2)ω². ξ⁶ = ω and, due to the construction of
|
|
||||||
// p, 2p-2 is a multiple of six. Therefore we can rewrite as
|
|
||||||
// x̄ξ^((p-1)/3)ω² and applying the inverse isomorphism eliminates the
|
|
||||||
// ω².
|
|
||||||
//
|
|
||||||
// A similar argument can be made for the y value.
|
|
||||||
|
|
||||||
q1 := newTwistPoint(pool)
|
|
||||||
q1.x.Conjugate(aAffine.x)
|
|
||||||
q1.x.Mul(q1.x, xiToPMinus1Over3, pool)
|
|
||||||
q1.y.Conjugate(aAffine.y)
|
|
||||||
q1.y.Mul(q1.y, xiToPMinus1Over2, pool)
|
|
||||||
q1.z.SetOne()
|
|
||||||
q1.t.SetOne()
|
|
||||||
|
|
||||||
// For Q2 we are applying the p² Frobenius. The two conjugations cancel
|
|
||||||
// out and we are left only with the factors from the isomorphism. In
|
|
||||||
// the case of x, we end up with a pure number which is why
|
|
||||||
// xiToPSquaredMinus1Over3 is ∈ GF(p). With y we get a factor of -1. We
|
|
||||||
// ignore this to end up with -Q2.
|
|
||||||
|
|
||||||
minusQ2 := newTwistPoint(pool)
|
|
||||||
minusQ2.x.MulScalar(aAffine.x, xiToPSquaredMinus1Over3)
|
|
||||||
minusQ2.y.Set(aAffine.y)
|
|
||||||
minusQ2.z.SetOne()
|
|
||||||
minusQ2.t.SetOne()
|
|
||||||
|
|
||||||
r2.Square(q1.y, pool)
|
|
||||||
a, b, c, newR := lineFunctionAdd(r, q1, bAffine, r2, pool)
|
|
||||||
mulLine(ret, a, b, c, pool)
|
|
||||||
a.Put(pool)
|
|
||||||
b.Put(pool)
|
|
||||||
c.Put(pool)
|
|
||||||
r.Put(pool)
|
|
||||||
r = newR
|
|
||||||
|
|
||||||
r2.Square(minusQ2.y, pool)
|
|
||||||
a, b, c, newR = lineFunctionAdd(r, minusQ2, bAffine, r2, pool)
|
|
||||||
mulLine(ret, a, b, c, pool)
|
|
||||||
a.Put(pool)
|
|
||||||
b.Put(pool)
|
|
||||||
c.Put(pool)
|
|
||||||
r.Put(pool)
|
|
||||||
r = newR
|
|
||||||
|
|
||||||
aAffine.Put(pool)
|
|
||||||
bAffine.Put(pool)
|
|
||||||
minusA.Put(pool)
|
|
||||||
r.Put(pool)
|
|
||||||
r2.Put(pool)
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// finalExponentiation computes the (p¹²-1)/Order-th power of an element of
|
|
||||||
// GF(p¹²) to obtain an element of GT (steps 13-15 of algorithm 1 from
|
|
||||||
// http://cryptojedi.org/papers/dclxvi-20100714.pdf)
|
|
||||||
func finalExponentiation(in *gfP12, pool *bnPool) *gfP12 {
|
|
||||||
t1 := newGFp12(pool)
|
|
||||||
|
|
||||||
// This is the p^6-Frobenius
|
|
||||||
t1.x.Negative(in.x)
|
|
||||||
t1.y.Set(in.y)
|
|
||||||
|
|
||||||
inv := newGFp12(pool)
|
|
||||||
inv.Invert(in, pool)
|
|
||||||
t1.Mul(t1, inv, pool)
|
|
||||||
|
|
||||||
t2 := newGFp12(pool).FrobeniusP2(t1, pool)
|
|
||||||
t1.Mul(t1, t2, pool)
|
|
||||||
|
|
||||||
fp := newGFp12(pool).Frobenius(t1, pool)
|
|
||||||
fp2 := newGFp12(pool).FrobeniusP2(t1, pool)
|
|
||||||
fp3 := newGFp12(pool).Frobenius(fp2, pool)
|
|
||||||
|
|
||||||
fu, fu2, fu3 := newGFp12(pool), newGFp12(pool), newGFp12(pool)
|
|
||||||
fu.Exp(t1, u, pool)
|
|
||||||
fu2.Exp(fu, u, pool)
|
|
||||||
fu3.Exp(fu2, u, pool)
|
|
||||||
|
|
||||||
y3 := newGFp12(pool).Frobenius(fu, pool)
|
|
||||||
fu2p := newGFp12(pool).Frobenius(fu2, pool)
|
|
||||||
fu3p := newGFp12(pool).Frobenius(fu3, pool)
|
|
||||||
y2 := newGFp12(pool).FrobeniusP2(fu2, pool)
|
|
||||||
|
|
||||||
y0 := newGFp12(pool)
|
|
||||||
y0.Mul(fp, fp2, pool)
|
|
||||||
y0.Mul(y0, fp3, pool)
|
|
||||||
|
|
||||||
y1, y4, y5 := newGFp12(pool), newGFp12(pool), newGFp12(pool)
|
|
||||||
y1.Conjugate(t1)
|
|
||||||
y5.Conjugate(fu2)
|
|
||||||
y3.Conjugate(y3)
|
|
||||||
y4.Mul(fu, fu2p, pool)
|
|
||||||
y4.Conjugate(y4)
|
|
||||||
|
|
||||||
y6 := newGFp12(pool)
|
|
||||||
y6.Mul(fu3, fu3p, pool)
|
|
||||||
y6.Conjugate(y6)
|
|
||||||
|
|
||||||
t0 := newGFp12(pool)
|
|
||||||
t0.Square(y6, pool)
|
|
||||||
t0.Mul(t0, y4, pool)
|
|
||||||
t0.Mul(t0, y5, pool)
|
|
||||||
t1.Mul(y3, y5, pool)
|
|
||||||
t1.Mul(t1, t0, pool)
|
|
||||||
t0.Mul(t0, y2, pool)
|
|
||||||
t1.Square(t1, pool)
|
|
||||||
t1.Mul(t1, t0, pool)
|
|
||||||
t1.Square(t1, pool)
|
|
||||||
t0.Mul(t1, y1, pool)
|
|
||||||
t1.Mul(t1, y0, pool)
|
|
||||||
t0.Square(t0, pool)
|
|
||||||
t0.Mul(t0, t1, pool)
|
|
||||||
|
|
||||||
inv.Put(pool)
|
|
||||||
t1.Put(pool)
|
|
||||||
t2.Put(pool)
|
|
||||||
fp.Put(pool)
|
|
||||||
fp2.Put(pool)
|
|
||||||
fp3.Put(pool)
|
|
||||||
fu.Put(pool)
|
|
||||||
fu2.Put(pool)
|
|
||||||
fu3.Put(pool)
|
|
||||||
fu2p.Put(pool)
|
|
||||||
fu3p.Put(pool)
|
|
||||||
y0.Put(pool)
|
|
||||||
y1.Put(pool)
|
|
||||||
y2.Put(pool)
|
|
||||||
y3.Put(pool)
|
|
||||||
y4.Put(pool)
|
|
||||||
y5.Put(pool)
|
|
||||||
y6.Put(pool)
|
|
||||||
|
|
||||||
return t0
|
|
||||||
}
|
|
||||||
|
|
||||||
func optimalAte(a *twistPoint, b *curvePoint, pool *bnPool) *gfP12 {
|
|
||||||
e := miller(a, b, pool)
|
|
||||||
ret := finalExponentiation(e, pool)
|
|
||||||
e.Put(pool)
|
|
||||||
|
|
||||||
if a.IsInfinity() || b.IsInfinity() {
|
|
||||||
ret.SetOne()
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
|
@ -1,249 +0,0 @@
|
||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package bn256
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
// twistPoint implements the elliptic curve y²=x³+3/ξ over GF(p²). Points are
|
|
||||||
// kept in Jacobian form and t=z² when valid. The group G₂ is the set of
|
|
||||||
// n-torsion points of this curve over GF(p²) (where n = Order)
|
|
||||||
type twistPoint struct {
|
|
||||||
x, y, z, t *gfP2
|
|
||||||
}
|
|
||||||
|
|
||||||
var twistB = &gfP2{
|
|
||||||
bigFromBase10("266929791119991161246907387137283842545076965332900288569378510910307636690"),
|
|
||||||
bigFromBase10("19485874751759354771024239261021720505790618469301721065564631296452457478373"),
|
|
||||||
}
|
|
||||||
|
|
||||||
// twistGen is the generator of group G₂.
|
|
||||||
var twistGen = &twistPoint{
|
|
||||||
&gfP2{
|
|
||||||
bigFromBase10("11559732032986387107991004021392285783925812861821192530917403151452391805634"),
|
|
||||||
bigFromBase10("10857046999023057135944570762232829481370756359578518086990519993285655852781"),
|
|
||||||
},
|
|
||||||
&gfP2{
|
|
||||||
bigFromBase10("4082367875863433681332203403145435568316851327593401208105741076214120093531"),
|
|
||||||
bigFromBase10("8495653923123431417604973247489272438418190587263600148770280649306958101930"),
|
|
||||||
},
|
|
||||||
&gfP2{
|
|
||||||
bigFromBase10("0"),
|
|
||||||
bigFromBase10("1"),
|
|
||||||
},
|
|
||||||
&gfP2{
|
|
||||||
bigFromBase10("0"),
|
|
||||||
bigFromBase10("1"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTwistPoint(pool *bnPool) *twistPoint {
|
|
||||||
return &twistPoint{
|
|
||||||
newGFp2(pool),
|
|
||||||
newGFp2(pool),
|
|
||||||
newGFp2(pool),
|
|
||||||
newGFp2(pool),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *twistPoint) String() string {
|
|
||||||
return "(" + c.x.String() + ", " + c.y.String() + ", " + c.z.String() + ")"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *twistPoint) Put(pool *bnPool) {
|
|
||||||
c.x.Put(pool)
|
|
||||||
c.y.Put(pool)
|
|
||||||
c.z.Put(pool)
|
|
||||||
c.t.Put(pool)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *twistPoint) Set(a *twistPoint) {
|
|
||||||
c.x.Set(a.x)
|
|
||||||
c.y.Set(a.y)
|
|
||||||
c.z.Set(a.z)
|
|
||||||
c.t.Set(a.t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsOnCurve returns true iff c is on the curve where c must be in affine form.
|
|
||||||
func (c *twistPoint) IsOnCurve() bool {
|
|
||||||
pool := new(bnPool)
|
|
||||||
yy := newGFp2(pool).Square(c.y, pool)
|
|
||||||
xxx := newGFp2(pool).Square(c.x, pool)
|
|
||||||
xxx.Mul(xxx, c.x, pool)
|
|
||||||
yy.Sub(yy, xxx)
|
|
||||||
yy.Sub(yy, twistB)
|
|
||||||
yy.Minimal()
|
|
||||||
return yy.x.Sign() == 0 && yy.y.Sign() == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *twistPoint) SetInfinity() {
|
|
||||||
c.z.SetZero()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *twistPoint) IsInfinity() bool {
|
|
||||||
return c.z.IsZero()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *twistPoint) Add(a, b *twistPoint, pool *bnPool) {
|
|
||||||
// For additional comments, see the same function in curve.go.
|
|
||||||
|
|
||||||
if a.IsInfinity() {
|
|
||||||
c.Set(b)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if b.IsInfinity() {
|
|
||||||
c.Set(a)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
|
|
||||||
z1z1 := newGFp2(pool).Square(a.z, pool)
|
|
||||||
z2z2 := newGFp2(pool).Square(b.z, pool)
|
|
||||||
u1 := newGFp2(pool).Mul(a.x, z2z2, pool)
|
|
||||||
u2 := newGFp2(pool).Mul(b.x, z1z1, pool)
|
|
||||||
|
|
||||||
t := newGFp2(pool).Mul(b.z, z2z2, pool)
|
|
||||||
s1 := newGFp2(pool).Mul(a.y, t, pool)
|
|
||||||
|
|
||||||
t.Mul(a.z, z1z1, pool)
|
|
||||||
s2 := newGFp2(pool).Mul(b.y, t, pool)
|
|
||||||
|
|
||||||
h := newGFp2(pool).Sub(u2, u1)
|
|
||||||
xEqual := h.IsZero()
|
|
||||||
|
|
||||||
t.Add(h, h)
|
|
||||||
i := newGFp2(pool).Square(t, pool)
|
|
||||||
j := newGFp2(pool).Mul(h, i, pool)
|
|
||||||
|
|
||||||
t.Sub(s2, s1)
|
|
||||||
yEqual := t.IsZero()
|
|
||||||
if xEqual && yEqual {
|
|
||||||
c.Double(a, pool)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
r := newGFp2(pool).Add(t, t)
|
|
||||||
|
|
||||||
v := newGFp2(pool).Mul(u1, i, pool)
|
|
||||||
|
|
||||||
t4 := newGFp2(pool).Square(r, pool)
|
|
||||||
t.Add(v, v)
|
|
||||||
t6 := newGFp2(pool).Sub(t4, j)
|
|
||||||
c.x.Sub(t6, t)
|
|
||||||
|
|
||||||
t.Sub(v, c.x) // t7
|
|
||||||
t4.Mul(s1, j, pool) // t8
|
|
||||||
t6.Add(t4, t4) // t9
|
|
||||||
t4.Mul(r, t, pool) // t10
|
|
||||||
c.y.Sub(t4, t6)
|
|
||||||
|
|
||||||
t.Add(a.z, b.z) // t11
|
|
||||||
t4.Square(t, pool) // t12
|
|
||||||
t.Sub(t4, z1z1) // t13
|
|
||||||
t4.Sub(t, z2z2) // t14
|
|
||||||
c.z.Mul(t4, h, pool)
|
|
||||||
|
|
||||||
z1z1.Put(pool)
|
|
||||||
z2z2.Put(pool)
|
|
||||||
u1.Put(pool)
|
|
||||||
u2.Put(pool)
|
|
||||||
t.Put(pool)
|
|
||||||
s1.Put(pool)
|
|
||||||
s2.Put(pool)
|
|
||||||
h.Put(pool)
|
|
||||||
i.Put(pool)
|
|
||||||
j.Put(pool)
|
|
||||||
r.Put(pool)
|
|
||||||
v.Put(pool)
|
|
||||||
t4.Put(pool)
|
|
||||||
t6.Put(pool)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *twistPoint) Double(a *twistPoint, pool *bnPool) {
|
|
||||||
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
|
|
||||||
A := newGFp2(pool).Square(a.x, pool)
|
|
||||||
B := newGFp2(pool).Square(a.y, pool)
|
|
||||||
C_ := newGFp2(pool).Square(B, pool)
|
|
||||||
|
|
||||||
t := newGFp2(pool).Add(a.x, B)
|
|
||||||
t2 := newGFp2(pool).Square(t, pool)
|
|
||||||
t.Sub(t2, A)
|
|
||||||
t2.Sub(t, C_)
|
|
||||||
d := newGFp2(pool).Add(t2, t2)
|
|
||||||
t.Add(A, A)
|
|
||||||
e := newGFp2(pool).Add(t, A)
|
|
||||||
f := newGFp2(pool).Square(e, pool)
|
|
||||||
|
|
||||||
t.Add(d, d)
|
|
||||||
c.x.Sub(f, t)
|
|
||||||
|
|
||||||
t.Add(C_, C_)
|
|
||||||
t2.Add(t, t)
|
|
||||||
t.Add(t2, t2)
|
|
||||||
c.y.Sub(d, c.x)
|
|
||||||
t2.Mul(e, c.y, pool)
|
|
||||||
c.y.Sub(t2, t)
|
|
||||||
|
|
||||||
t.Mul(a.y, a.z, pool)
|
|
||||||
c.z.Add(t, t)
|
|
||||||
|
|
||||||
A.Put(pool)
|
|
||||||
B.Put(pool)
|
|
||||||
C_.Put(pool)
|
|
||||||
t.Put(pool)
|
|
||||||
t2.Put(pool)
|
|
||||||
d.Put(pool)
|
|
||||||
e.Put(pool)
|
|
||||||
f.Put(pool)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int, pool *bnPool) *twistPoint {
|
|
||||||
sum := newTwistPoint(pool)
|
|
||||||
sum.SetInfinity()
|
|
||||||
t := newTwistPoint(pool)
|
|
||||||
|
|
||||||
for i := scalar.BitLen(); i >= 0; i-- {
|
|
||||||
t.Double(sum, pool)
|
|
||||||
if scalar.Bit(i) != 0 {
|
|
||||||
sum.Add(t, a, pool)
|
|
||||||
} else {
|
|
||||||
sum.Set(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Set(sum)
|
|
||||||
sum.Put(pool)
|
|
||||||
t.Put(pool)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *twistPoint) MakeAffine(pool *bnPool) *twistPoint {
|
|
||||||
if c.z.IsOne() {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
zInv := newGFp2(pool).Invert(c.z, pool)
|
|
||||||
t := newGFp2(pool).Mul(c.y, zInv, pool)
|
|
||||||
zInv2 := newGFp2(pool).Square(zInv, pool)
|
|
||||||
c.y.Mul(t, zInv2, pool)
|
|
||||||
t.Mul(c.x, zInv2, pool)
|
|
||||||
c.x.Set(t)
|
|
||||||
c.z.SetOne()
|
|
||||||
c.t.SetOne()
|
|
||||||
|
|
||||||
zInv.Put(pool)
|
|
||||||
t.Put(pool)
|
|
||||||
zInv2.Put(pool)
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *twistPoint) Negative(a *twistPoint, pool *bnPool) {
|
|
||||||
c.x.Set(a.x)
|
|
||||||
c.y.SetZero()
|
|
||||||
c.y.Sub(c.y, a.y)
|
|
||||||
c.z.Set(a.z)
|
|
||||||
c.t.SetZero()
|
|
||||||
}
|
|
|
@ -188,7 +188,7 @@ func ValidateSignatureValues(v byte, r, s *big.Int, homestead bool) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
// Frontier: allow s to be in full N range
|
// Frontier: allow s to be in full N range
|
||||||
return r.Cmp(secp256k1_N) < 0 && s.Cmp(secp256k1_N) < 0 && (v == 0 || v == 1 || v == 10 || v == 11)
|
return r.Cmp(secp256k1N) < 0 && s.Cmp(secp256k1N) < 0 && (v == 0 || v == 1 || v == 10 || v == 11)
|
||||||
}
|
}
|
||||||
|
|
||||||
func PubkeyToAddress(p ecdsa.PublicKey) common.Address {
|
func PubkeyToAddress(p ecdsa.PublicKey) common.Address {
|
||||||
|
|
213
eth/api.go
213
eth/api.go
|
@ -17,17 +17,14 @@
|
||||||
package eth
|
package eth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
@ -45,8 +42,6 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultTraceTimeout = 5 * time.Second
|
|
||||||
|
|
||||||
// PublicEthereumAPI provides an API to access Ethereum full node-related
|
// PublicEthereumAPI provides an API to access Ethereum full node-related
|
||||||
// information.
|
// information.
|
||||||
type PublicEthereumAPI struct {
|
type PublicEthereumAPI struct {
|
||||||
|
@ -409,7 +404,7 @@ func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeRes
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return StorageRangeResult{}, err
|
return StorageRangeResult{}, err
|
||||||
}
|
}
|
||||||
e := storageEntry{Value: common.BytesToHash(it.Value)}
|
e := storageEntry{Value: common.BytesToHash(content)}
|
||||||
if preimage := st.GetKey(it.Key); preimage != nil {
|
if preimage := st.GetKey(it.Key); preimage != nil {
|
||||||
preimage := common.BytesToHash(preimage)
|
preimage := common.BytesToHash(preimage)
|
||||||
e.Key = &preimage
|
e.Key = &preimage
|
||||||
|
@ -421,7 +416,7 @@ func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeRes
|
||||||
next := common.BytesToHash(it.Key)
|
next := common.BytesToHash(it.Key)
|
||||||
result.NextKey = &next
|
result.NextKey = &next
|
||||||
}
|
}
|
||||||
return result
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetModifiedAccountsByumber returns all accounts that have changed between the
|
// GetModifiedAccountsByumber returns all accounts that have changed between the
|
||||||
|
@ -490,7 +485,7 @@ func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Bloc
|
||||||
}
|
}
|
||||||
newTrie, err := trie.NewSecure(endBlock.Root(), trie.NewDatabase(api.eth.chainDb), 0)
|
newTrie, err := trie.NewSecure(endBlock.Root(), trie.NewDatabase(api.eth.chainDb), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, vm.Context{}, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
diff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}))
|
diff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}))
|
||||||
|
@ -522,108 +517,6 @@ type TraceArgs struct {
|
||||||
Timeout *string
|
Timeout *string
|
||||||
}
|
}
|
||||||
|
|
||||||
// TraceBlock processes the given block'api RLP but does not import the block in to
|
|
||||||
// the chain.
|
|
||||||
func (api *PrivateDebugAPI) TraceBlock(blockRlp []byte, config *vm.LogConfig) BlockTraceResult {
|
|
||||||
var block types.Block
|
|
||||||
err := rlp.Decode(bytes.NewReader(blockRlp), &block)
|
|
||||||
if err != nil {
|
|
||||||
return BlockTraceResult{Error: fmt.Sprintf("could not decode block: %v", err)}
|
|
||||||
}
|
|
||||||
|
|
||||||
validated, logs, err := api.traceBlock(&block, config)
|
|
||||||
return BlockTraceResult{
|
|
||||||
Validated: validated,
|
|
||||||
StructLogs: ethapi.FormatLogs(logs),
|
|
||||||
Error: formatError(err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TraceBlockFromFile loads the block'api RLP from the given file name and attempts to
|
|
||||||
// process it but does not import the block in to the chain.
|
|
||||||
func (api *PrivateDebugAPI) TraceBlockFromFile(file string, config *vm.LogConfig) BlockTraceResult {
|
|
||||||
blockRlp, err := ioutil.ReadFile(file)
|
|
||||||
if err != nil {
|
|
||||||
return BlockTraceResult{Error: fmt.Sprintf("could not read file: %v", err)}
|
|
||||||
}
|
|
||||||
return api.TraceBlock(blockRlp, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TraceBlockByNumber processes the block by canonical block number.
|
|
||||||
func (api *PrivateDebugAPI) TraceBlockByNumber(blockNr rpc.BlockNumber, config *vm.LogConfig) BlockTraceResult {
|
|
||||||
// Fetch the block that we aim to reprocess
|
|
||||||
var block *types.Block
|
|
||||||
switch blockNr {
|
|
||||||
case rpc.PendingBlockNumber:
|
|
||||||
// Pending block is only known by the miner
|
|
||||||
block = api.eth.miner.PendingBlock()
|
|
||||||
case rpc.LatestBlockNumber:
|
|
||||||
block = api.eth.blockchain.CurrentBlock()
|
|
||||||
default:
|
|
||||||
block = api.eth.blockchain.GetBlockByNumber(uint64(blockNr))
|
|
||||||
}
|
|
||||||
|
|
||||||
if block == nil {
|
|
||||||
return BlockTraceResult{Error: fmt.Sprintf("block #%d not found", blockNr)}
|
|
||||||
}
|
|
||||||
|
|
||||||
validated, logs, err := api.traceBlock(block, config)
|
|
||||||
return BlockTraceResult{
|
|
||||||
Validated: validated,
|
|
||||||
StructLogs: ethapi.FormatLogs(logs),
|
|
||||||
Error: formatError(err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TraceBlockByHash processes the block by hash.
|
|
||||||
func (api *PrivateDebugAPI) TraceBlockByHash(hash common.Hash, config *vm.LogConfig) BlockTraceResult {
|
|
||||||
// Fetch the block that we aim to reprocess
|
|
||||||
block := api.eth.BlockChain().GetBlockByHash(hash)
|
|
||||||
if block == nil {
|
|
||||||
return BlockTraceResult{Error: fmt.Sprintf("block #%x not found", hash)}
|
|
||||||
}
|
|
||||||
|
|
||||||
validated, logs, err := api.traceBlock(block, config)
|
|
||||||
return BlockTraceResult{
|
|
||||||
Validated: validated,
|
|
||||||
StructLogs: ethapi.FormatLogs(logs),
|
|
||||||
Error: formatError(err),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// traceBlock processes the given block but does not save the state.
|
|
||||||
func (api *PrivateDebugAPI) traceBlock(block *types.Block, logConfig *vm.LogConfig) (bool, []vm.StructLog, error) {
|
|
||||||
// Validate and reprocess the block
|
|
||||||
var (
|
|
||||||
blockchain = api.eth.BlockChain()
|
|
||||||
validator = blockchain.Validator()
|
|
||||||
processor = blockchain.Processor()
|
|
||||||
)
|
|
||||||
|
|
||||||
structLogger := vm.NewStructLogger(logConfig)
|
|
||||||
|
|
||||||
config := vm.Config{
|
|
||||||
Debug: true,
|
|
||||||
Tracer: structLogger,
|
|
||||||
}
|
|
||||||
if err := api.eth.engine.VerifyHeader(blockchain, block.Header(), true); err != nil {
|
|
||||||
return false, structLogger.StructLogs(), err
|
|
||||||
}
|
|
||||||
statedb, privateStateDb, err := blockchain.StateAt(blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1).Root())
|
|
||||||
if err != nil {
|
|
||||||
return false, structLogger.StructLogs(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
receipts, _, _, usedGas, err := processor.Process(block, statedb, privateStateDb, config)
|
|
||||||
if err != nil {
|
|
||||||
return false, structLogger.StructLogs(), err
|
|
||||||
}
|
|
||||||
if err := validator.ValidateState(block, blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1), statedb, receipts, usedGas); err != nil {
|
|
||||||
return false, structLogger.StructLogs(), err
|
|
||||||
}
|
|
||||||
return true, structLogger.StructLogs(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatError formats a Go error into either an empty string or the data content
|
// formatError formats a Go error into either an empty string or the data content
|
||||||
// of the error itself.
|
// of the error itself.
|
||||||
func formatError(err error) string {
|
func formatError(err error) string {
|
||||||
|
@ -638,103 +531,3 @@ type timeoutError struct{}
|
||||||
func (t *timeoutError) Error() string {
|
func (t *timeoutError) Error() string {
|
||||||
return "Execution time exceeded"
|
return "Execution time exceeded"
|
||||||
}
|
}
|
||||||
|
|
||||||
// TraceTransaction returns the structured logs created during the execution of EVM
|
|
||||||
// and returns them as a JSON object.
|
|
||||||
func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, txHash common.Hash, config *TraceArgs) (interface{}, error) {
|
|
||||||
var tracer vm.Tracer
|
|
||||||
if config != nil && config.Tracer != nil {
|
|
||||||
timeout := defaultTraceTimeout
|
|
||||||
if config.Timeout != nil {
|
|
||||||
var err error
|
|
||||||
if timeout, err = time.ParseDuration(*config.Timeout); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if tracer, err = ethapi.NewJavascriptTracer(*config.Tracer); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle timeouts and RPC cancellations
|
|
||||||
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
|
|
||||||
go func() {
|
|
||||||
<-deadlineCtx.Done()
|
|
||||||
tracer.(*ethapi.JavascriptTracer).Stop(&timeoutError{})
|
|
||||||
}()
|
|
||||||
defer cancel()
|
|
||||||
} else if config == nil {
|
|
||||||
tracer = vm.NewStructLogger(nil)
|
|
||||||
} else {
|
|
||||||
tracer = vm.NewStructLogger(config.LogConfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve the tx from the chain and the containing block
|
|
||||||
tx, blockHash, _, txIndex := core.GetTransaction(api.eth.ChainDb(), txHash)
|
|
||||||
if tx == nil {
|
|
||||||
return nil, fmt.Errorf("transaction %x not found", txHash)
|
|
||||||
}
|
|
||||||
msg, context, statedb, privateStateDb, err := api.computeTxEnv(blockHash, int(txIndex))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the transaction with tracing enabled.
|
|
||||||
vmenv := vm.NewEVM(context, statedb, privateStateDb, api.config, vm.Config{Debug: true, Tracer: tracer})
|
|
||||||
ret, gas, failed, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas()))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("tracing failed: %v", err)
|
|
||||||
}
|
|
||||||
switch tracer := tracer.(type) {
|
|
||||||
case *vm.StructLogger:
|
|
||||||
return ðapi.ExecutionResult{
|
|
||||||
Gas: gas,
|
|
||||||
Failed: failed,
|
|
||||||
ReturnValue: fmt.Sprintf("%x", ret),
|
|
||||||
StructLogs: ethapi.FormatLogs(tracer.StructLogs()),
|
|
||||||
}, nil
|
|
||||||
case *ethapi.JavascriptTracer:
|
|
||||||
return tracer.GetResult()
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("bad tracer type %T", tracer))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// computeTxEnv returns the execution environment of a certain transaction.
|
|
||||||
func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int) (core.Message, vm.Context, *state.StateDB, *state.StateDB, error) {
|
|
||||||
// Create the parent state.
|
|
||||||
block := api.eth.BlockChain().GetBlockByHash(blockHash)
|
|
||||||
if block == nil {
|
|
||||||
return nil, vm.Context{}, nil, nil, fmt.Errorf("block %x not found", blockHash)
|
|
||||||
}
|
|
||||||
parent := api.eth.BlockChain().GetBlock(block.ParentHash(), block.NumberU64()-1)
|
|
||||||
if parent == nil {
|
|
||||||
return nil, vm.Context{}, nil, nil, fmt.Errorf("block parent %x not found", block.ParentHash())
|
|
||||||
}
|
|
||||||
statedb, privateStateDb, err := api.eth.BlockChain().StateAt(parent.Root())
|
|
||||||
if err != nil {
|
|
||||||
return nil, vm.Context{}, nil, nil, err
|
|
||||||
}
|
|
||||||
txs := block.Transactions()
|
|
||||||
|
|
||||||
// Recompute transactions up to the target index.
|
|
||||||
signer := types.MakeSigner(api.config, block.Number())
|
|
||||||
for idx, tx := range txs {
|
|
||||||
// Assemble the transaction call message
|
|
||||||
msg, _ := tx.AsMessage(signer)
|
|
||||||
context := core.NewEVMContext(msg, block.Header(), api.eth.BlockChain(), nil)
|
|
||||||
if idx == txIndex {
|
|
||||||
return msg, context, statedb, privateStateDb, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
vmenv := vm.NewEVM(context, statedb, privateStateDb, api.config, vm.Config{})
|
|
||||||
gp := new(core.GasPool).AddGas(tx.Gas())
|
|
||||||
_, _, _, err := core.ApplyMessage(vmenv, msg, gp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, vm.Context{}, nil, nil, fmt.Errorf("tx %x failed: %v", tx.Hash(), err)
|
|
||||||
}
|
|
||||||
statedb.DeleteSuicides()
|
|
||||||
}
|
|
||||||
return nil, vm.Context{}, nil, nil, fmt.Errorf("tx index %d out of range for block %x", txIndex, blockHash)
|
|
||||||
}
|
|
||||||
|
|
|
@ -70,6 +70,7 @@ type txTraceResult struct {
|
||||||
// being traced.
|
// being traced.
|
||||||
type blockTraceTask struct {
|
type blockTraceTask struct {
|
||||||
statedb *state.StateDB // Intermediate state prepped for tracing
|
statedb *state.StateDB // Intermediate state prepped for tracing
|
||||||
|
privateStateDb *state.StateDB // Quorum
|
||||||
block *types.Block // Block to trace the transactions from
|
block *types.Block // Block to trace the transactions from
|
||||||
rootref common.Hash // Trie root reference held for this task
|
rootref common.Hash // Trie root reference held for this task
|
||||||
results []*txTraceResult // Trace results procudes by the task
|
results []*txTraceResult // Trace results procudes by the task
|
||||||
|
@ -87,6 +88,7 @@ type blockTraceResult struct {
|
||||||
// is being traced.
|
// is being traced.
|
||||||
type txTraceTask struct {
|
type txTraceTask struct {
|
||||||
statedb *state.StateDB // Intermediate state prepped for tracing
|
statedb *state.StateDB // Intermediate state prepped for tracing
|
||||||
|
privateStateDb *state.StateDB
|
||||||
index int // Transaction offset in the block
|
index int // Transaction offset in the block
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +145,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
|
||||||
return nil, fmt.Errorf("parent block #%d not found", number-1)
|
return nil, fmt.Errorf("parent block #%d not found", number-1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
statedb, err := state.New(start.Root(), database)
|
statedb, privateStateDb, err := api.eth.blockchain.StateAt(start.Root())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the starting state is missing, allow some number of blocks to be reexecuted
|
// If the starting state is missing, allow some number of blocks to be reexecuted
|
||||||
reexec := defaultTraceReexec
|
reexec := defaultTraceReexec
|
||||||
|
@ -156,7 +158,8 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
|
||||||
if start == nil {
|
if start == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if statedb, err = state.New(start.Root(), database); err == nil {
|
statedb, privateStateDb, err = api.eth.blockchain.StateAt(start.Root())
|
||||||
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -196,13 +199,14 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
|
||||||
msg, _ := tx.AsMessage(signer)
|
msg, _ := tx.AsMessage(signer)
|
||||||
vmctx := core.NewEVMContext(msg, task.block.Header(), api.eth.blockchain, nil)
|
vmctx := core.NewEVMContext(msg, task.block.Header(), api.eth.blockchain, nil)
|
||||||
|
|
||||||
res, err := api.traceTx(ctx, msg, vmctx, task.statedb, config)
|
res, err := api.traceTx(ctx, msg, vmctx, task.statedb, task.privateStateDb, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
task.results[i] = &txTraceResult{Error: err.Error()}
|
task.results[i] = &txTraceResult{Error: err.Error()}
|
||||||
log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err)
|
log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
task.statedb.Finalise(true)
|
task.statedb.Finalise(true)
|
||||||
|
task.privateStateDb.Finalise(true)
|
||||||
task.results[i] = &txTraceResult{Result: res}
|
task.results[i] = &txTraceResult{Result: res}
|
||||||
}
|
}
|
||||||
// Stream the result back to the user or abort on teardown
|
// Stream the result back to the user or abort on teardown
|
||||||
|
@ -268,14 +272,14 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
|
||||||
txs := block.Transactions()
|
txs := block.Transactions()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case tasks <- &blockTraceTask{statedb: statedb.Copy(), block: block, rootref: proot, results: make([]*txTraceResult, len(txs))}:
|
case tasks <- &blockTraceTask{statedb: statedb.Copy(), privateStateDb: privateStateDb.Copy(), block: block, rootref: proot, results: make([]*txTraceResult, len(txs))}:
|
||||||
case <-notifier.Closed():
|
case <-notifier.Closed():
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
traced += uint64(len(txs))
|
traced += uint64(len(txs))
|
||||||
}
|
}
|
||||||
// Generate the next state snapshot fast without tracing
|
// Generate the next state snapshot fast without tracing
|
||||||
_, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{})
|
_, _, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, privateStateDb, vm.Config{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failed = err
|
failed = err
|
||||||
break
|
break
|
||||||
|
@ -290,6 +294,16 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
|
||||||
failed = err
|
failed = err
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
privateStateRoot, err := privateStateDb.Commit(true)
|
||||||
|
if err != nil {
|
||||||
|
failed = err
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err := privateStateDb.Reset(privateStateRoot); err != nil {
|
||||||
|
failed = err
|
||||||
|
break
|
||||||
|
}
|
||||||
// Reference the trie twice, once for us, once for the trancer
|
// Reference the trie twice, once for us, once for the trancer
|
||||||
database.TrieDB().Reference(root, common.Hash{})
|
database.TrieDB().Reference(root, common.Hash{})
|
||||||
if number >= origin {
|
if number >= origin {
|
||||||
|
@ -399,7 +413,7 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
|
||||||
if config != nil && config.Reexec != nil {
|
if config != nil && config.Reexec != nil {
|
||||||
reexec = *config.Reexec
|
reexec = *config.Reexec
|
||||||
}
|
}
|
||||||
statedb, err := api.computeStateDB(parent, reexec)
|
statedb, privateStateDb, err := api.computeStateDB(parent, reexec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -427,7 +441,7 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
|
||||||
msg, _ := txs[task.index].AsMessage(signer)
|
msg, _ := txs[task.index].AsMessage(signer)
|
||||||
vmctx := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil)
|
vmctx := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil)
|
||||||
|
|
||||||
res, err := api.traceTx(ctx, msg, vmctx, task.statedb, config)
|
res, err := api.traceTx(ctx, msg, vmctx, task.statedb, task.privateStateDb, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
results[task.index] = &txTraceResult{Error: err.Error()}
|
results[task.index] = &txTraceResult{Error: err.Error()}
|
||||||
continue
|
continue
|
||||||
|
@ -440,19 +454,24 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
|
||||||
var failed error
|
var failed error
|
||||||
for i, tx := range txs {
|
for i, tx := range txs {
|
||||||
// Send the trace task over for execution
|
// Send the trace task over for execution
|
||||||
jobs <- &txTraceTask{statedb: statedb.Copy(), index: i}
|
jobs <- &txTraceTask{
|
||||||
|
statedb: statedb.Copy(),
|
||||||
|
privateStateDb: privateStateDb.Copy(),
|
||||||
|
index: i,
|
||||||
|
}
|
||||||
|
|
||||||
// Generate the next state snapshot fast without tracing
|
// Generate the next state snapshot fast without tracing
|
||||||
msg, _ := tx.AsMessage(signer)
|
msg, _ := tx.AsMessage(signer)
|
||||||
vmctx := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil)
|
vmctx := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil)
|
||||||
|
|
||||||
vmenv := vm.NewEVM(vmctx, statedb, api.config, vm.Config{})
|
vmenv := vm.NewEVM(vmctx, statedb, privateStateDb, api.config, vm.Config{})
|
||||||
if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
|
if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
|
||||||
failed = err
|
failed = err
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Finalize the state so any modifications are written to the trie
|
// Finalize the state so any modifications are written to the trie
|
||||||
statedb.Finalise(true)
|
statedb.Finalise(true)
|
||||||
|
privateStateDb.Finalise(true)
|
||||||
}
|
}
|
||||||
close(jobs)
|
close(jobs)
|
||||||
pend.Wait()
|
pend.Wait()
|
||||||
|
@ -467,11 +486,11 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
|
||||||
// computeStateDB retrieves the state database associated with a certain block.
|
// computeStateDB retrieves the state database associated with a certain block.
|
||||||
// If no state is locally available for the given block, a number of blocks are
|
// If no state is locally available for the given block, a number of blocks are
|
||||||
// attempted to be reexecuted to generate the desired state.
|
// attempted to be reexecuted to generate the desired state.
|
||||||
func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*state.StateDB, error) {
|
func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*state.StateDB, *state.StateDB, error) {
|
||||||
// If we have the state fully available, use that
|
// If we have the state fully available, use that
|
||||||
statedb, err := api.eth.blockchain.StateAt(block.Root())
|
statedb, privateStateDb, err := api.eth.blockchain.StateAt(block.Root())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return statedb, nil
|
return statedb, privateStateDb, nil
|
||||||
}
|
}
|
||||||
// Otherwise try to reexec blocks until we find a state or reach our limit
|
// Otherwise try to reexec blocks until we find a state or reach our limit
|
||||||
origin := block.NumberU64()
|
origin := block.NumberU64()
|
||||||
|
@ -482,16 +501,17 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
|
||||||
if block == nil {
|
if block == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if statedb, err = state.New(block.Root(), database); err == nil {
|
statedb, privateStateDb, err = api.eth.blockchain.StateAt(block.Root())
|
||||||
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch err.(type) {
|
switch err.(type) {
|
||||||
case *trie.MissingNodeError:
|
case *trie.MissingNodeError:
|
||||||
return nil, errors.New("required historical state unavailable")
|
return nil, nil, errors.New("required historical state unavailable")
|
||||||
default:
|
default:
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// State was available at historical point, regenerate
|
// State was available at historical point, regenerate
|
||||||
|
@ -508,26 +528,33 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*
|
||||||
}
|
}
|
||||||
// Retrieve the next block to regenerate and process it
|
// Retrieve the next block to regenerate and process it
|
||||||
if block = api.eth.blockchain.GetBlockByNumber(block.NumberU64() + 1); block == nil {
|
if block = api.eth.blockchain.GetBlockByNumber(block.NumberU64() + 1); block == nil {
|
||||||
return nil, fmt.Errorf("block #%d not found", block.NumberU64()+1)
|
return nil, nil, fmt.Errorf("block #%d not found", block.NumberU64()+1)
|
||||||
}
|
}
|
||||||
_, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{})
|
_, _, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, privateStateDb, vm.Config{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
// Finalize the state so any modifications are written to the trie
|
// Finalize the state so any modifications are written to the trie
|
||||||
root, err := statedb.Commit(true)
|
root, err := statedb.Commit(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if err := statedb.Reset(root); err != nil {
|
if err := statedb.Reset(root); err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
privateStateRoot, err := privateStateDb.Commit(api.eth.blockchain.Config().IsEIP158(block.Number()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if err := privateStateDb.Reset(privateStateRoot); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
database.TrieDB().Reference(root, common.Hash{})
|
database.TrieDB().Reference(root, common.Hash{})
|
||||||
database.TrieDB().Dereference(proot, common.Hash{})
|
database.TrieDB().Dereference(proot, common.Hash{})
|
||||||
proot = root
|
proot = root
|
||||||
}
|
}
|
||||||
log.Info("Historical state regenerated", "block", block.NumberU64(), "elapsed", time.Since(start), "size", database.TrieDB().Size())
|
log.Info("Historical state regenerated", "block", block.NumberU64(), "elapsed", time.Since(start), "size", database.TrieDB().Size())
|
||||||
return statedb, nil
|
return statedb, privateStateDb, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TraceTransaction returns the structured logs created during the execution of EVM
|
// TraceTransaction returns the structured logs created during the execution of EVM
|
||||||
|
@ -542,18 +569,18 @@ func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, hash common.Ha
|
||||||
if config != nil && config.Reexec != nil {
|
if config != nil && config.Reexec != nil {
|
||||||
reexec = *config.Reexec
|
reexec = *config.Reexec
|
||||||
}
|
}
|
||||||
msg, vmctx, statedb, err := api.computeTxEnv(blockHash, int(index), reexec)
|
msg, vmctx, statedb, privateStateDb, err := api.computeTxEnv(blockHash, int(index), reexec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Trace the transaction and return
|
// Trace the transaction and return
|
||||||
return api.traceTx(ctx, msg, vmctx, statedb, config)
|
return api.traceTx(ctx, msg, vmctx, statedb, privateStateDb, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
// traceTx configures a new tracer according to the provided configuration, and
|
// traceTx configures a new tracer according to the provided configuration, and
|
||||||
// executes the given message in the provided environment. The return value will
|
// executes the given message in the provided environment. The return value will
|
||||||
// be tracer dependent.
|
// be tracer dependent.
|
||||||
func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, vmctx vm.Context, statedb *state.StateDB, config *TraceConfig) (interface{}, error) {
|
func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, vmctx vm.Context, statedb *state.StateDB, privateStateDb *state.StateDB, config *TraceConfig) (interface{}, error) {
|
||||||
// Assemble the structured logger or the JavaScript tracer
|
// Assemble the structured logger or the JavaScript tracer
|
||||||
var (
|
var (
|
||||||
tracer vm.Tracer
|
tracer vm.Tracer
|
||||||
|
@ -587,7 +614,7 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, v
|
||||||
tracer = vm.NewStructLogger(config.LogConfig)
|
tracer = vm.NewStructLogger(config.LogConfig)
|
||||||
}
|
}
|
||||||
// Run the transaction with tracing enabled.
|
// Run the transaction with tracing enabled.
|
||||||
vmenv := vm.NewEVM(vmctx, statedb, api.config, vm.Config{Debug: true, Tracer: tracer})
|
vmenv := vm.NewEVM(vmctx, statedb, privateStateDb, api.config, vm.Config{Debug: true, Tracer: tracer})
|
||||||
|
|
||||||
ret, gas, failed, err := core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()))
|
ret, gas, failed, err := core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -612,19 +639,19 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, v
|
||||||
}
|
}
|
||||||
|
|
||||||
// computeTxEnv returns the execution environment of a certain transaction.
|
// computeTxEnv returns the execution environment of a certain transaction.
|
||||||
func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, reexec uint64) (core.Message, vm.Context, *state.StateDB, error) {
|
func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, reexec uint64) (core.Message, vm.Context, *state.StateDB, *state.StateDB, error) {
|
||||||
// Create the parent state database
|
// Create the parent state database
|
||||||
block := api.eth.blockchain.GetBlockByHash(blockHash)
|
block := api.eth.blockchain.GetBlockByHash(blockHash)
|
||||||
if block == nil {
|
if block == nil {
|
||||||
return nil, vm.Context{}, nil, fmt.Errorf("block %x not found", blockHash)
|
return nil, vm.Context{}, nil, nil, fmt.Errorf("block %x not found", blockHash)
|
||||||
}
|
}
|
||||||
parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
|
||||||
if parent == nil {
|
if parent == nil {
|
||||||
return nil, vm.Context{}, nil, fmt.Errorf("parent %x not found", block.ParentHash())
|
return nil, vm.Context{}, nil, nil, fmt.Errorf("parent %x not found", block.ParentHash())
|
||||||
}
|
}
|
||||||
statedb, err := api.computeStateDB(parent, reexec)
|
statedb, privateStateDb, err := api.computeStateDB(parent, reexec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, vm.Context{}, nil, err
|
return nil, vm.Context{}, nil, nil, err
|
||||||
}
|
}
|
||||||
// Recompute transactions up to the target index.
|
// Recompute transactions up to the target index.
|
||||||
signer := types.MakeSigner(api.config, block.Number())
|
signer := types.MakeSigner(api.config, block.Number())
|
||||||
|
@ -634,15 +661,15 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree
|
||||||
msg, _ := tx.AsMessage(signer)
|
msg, _ := tx.AsMessage(signer)
|
||||||
context := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil)
|
context := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil)
|
||||||
if idx == txIndex {
|
if idx == txIndex {
|
||||||
return msg, context, statedb, nil
|
return msg, context, statedb, privateStateDb, nil
|
||||||
}
|
}
|
||||||
// Not yet the searched for transaction, execute on top of the current state
|
// Not yet the searched for transaction, execute on top of the current state
|
||||||
vmenv := vm.NewEVM(context, statedb, api.config, vm.Config{})
|
vmenv := vm.NewEVM(context, statedb, privateStateDb, api.config, vm.Config{})
|
||||||
if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
|
if _, _, _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
|
||||||
return nil, vm.Context{}, nil, fmt.Errorf("tx %x failed: %v", tx.Hash(), err)
|
return nil, vm.Context{}, nil, nil, fmt.Errorf("tx %x failed: %v", tx.Hash(), err)
|
||||||
}
|
}
|
||||||
// Ensure any modifications are committed to the state
|
// Ensure any modifications are committed to the state
|
||||||
statedb.Finalise(true)
|
statedb.Finalise(true)
|
||||||
}
|
}
|
||||||
return nil, vm.Context{}, nil, fmt.Errorf("tx index %d out of range for block %x", txIndex, blockHash)
|
return nil, vm.Context{}, nil, nil, fmt.Errorf("tx index %d out of range for block %x", txIndex, blockHash)
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,7 +132,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||||
chainConfig: chainConfig,
|
chainConfig: chainConfig,
|
||||||
eventMux: ctx.EventMux,
|
eventMux: ctx.EventMux,
|
||||||
accountManager: ctx.AccountManager,
|
accountManager: ctx.AccountManager,
|
||||||
engine: CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
|
engine: CreateConsensusEngine(ctx, config, chainConfig, chainDb),
|
||||||
shutdownChan: make(chan bool),
|
shutdownChan: make(chan bool),
|
||||||
networkId: config.NetworkId,
|
networkId: config.NetworkId,
|
||||||
gasPrice: config.GasPrice,
|
gasPrice: config.GasPrice,
|
||||||
|
@ -222,7 +222,7 @@ func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Data
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
|
// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
|
||||||
func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
|
func CreateConsensusEngine(ctx *node.ServiceContext, config *Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
|
||||||
// If proof-of-authority is requested, set it up
|
// If proof-of-authority is requested, set it up
|
||||||
if chainConfig.Clique != nil {
|
if chainConfig.Clique != nil {
|
||||||
return clique.New(chainConfig.Clique, db)
|
return clique.New(chainConfig.Clique, db)
|
||||||
|
@ -238,13 +238,13 @@ func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chai
|
||||||
|
|
||||||
// Otherwise assume proof-of-work
|
// Otherwise assume proof-of-work
|
||||||
switch {
|
switch {
|
||||||
case config.PowMode == ethash.ModeFake:
|
case config.PowMode == ModeFake:
|
||||||
log.Warn("Ethash used in fake mode")
|
log.Warn("Ethash used in fake mode")
|
||||||
return ethash.NewFaker()
|
return ethash.NewFaker()
|
||||||
case config.PowMode == ethash.ModeTest:
|
case config.PowMode == ModeTest:
|
||||||
log.Warn("Ethash used in test mode")
|
log.Warn("Ethash used in test mode")
|
||||||
return ethash.NewTester()
|
return ethash.NewTester()
|
||||||
case config.PowMode == ethash.ModeShared:
|
case config.PowMode == ModeShared:
|
||||||
log.Warn("Ethash used in shared mode")
|
log.Warn("Ethash used in shared mode")
|
||||||
return ethash.NewShared()
|
return ethash.NewShared()
|
||||||
default:
|
default:
|
||||||
|
@ -343,7 +343,7 @@ func (s *Ethereum) Etherbase() (eb common.Address, err error) {
|
||||||
// set in js console via admin interface or wrapper from cli flags
|
// set in js console via admin interface or wrapper from cli flags
|
||||||
func (s *Ethereum) SetEtherbase(etherbase common.Address) {
|
func (s *Ethereum) SetEtherbase(etherbase common.Address) {
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
if _, ok := self.engine.(consensus.Istanbul); ok {
|
if _, ok := s.engine.(consensus.Istanbul); ok {
|
||||||
log.Error("Cannot set etherbase in Istanbul consensus")
|
log.Error("Cannot set etherbase in Istanbul consensus")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -122,8 +122,19 @@ type Config struct {
|
||||||
|
|
||||||
// Miscellaneous options
|
// Miscellaneous options
|
||||||
DocRoot string `toml:"-"`
|
DocRoot string `toml:"-"`
|
||||||
|
PowMode Mode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Mode uint
|
||||||
|
|
||||||
|
const (
|
||||||
|
ModeNormal Mode = iota
|
||||||
|
ModeShared
|
||||||
|
ModeTest
|
||||||
|
ModeFake
|
||||||
|
ModeFullFake
|
||||||
|
)
|
||||||
|
|
||||||
type configMarshaling struct {
|
type configMarshaling struct {
|
||||||
ExtraData hexutil.Bytes
|
ExtraData hexutil.Bytes
|
||||||
}
|
}
|
||||||
|
|
|
@ -1685,7 +1685,33 @@ func (d *Downloader) syncWithPeerUntil(p *peerConnection, hash common.Hash, td *
|
||||||
d.syncStatsChainHeight = remoteHeight
|
d.syncStatsChainHeight = remoteHeight
|
||||||
d.syncStatsLock.Unlock()
|
d.syncStatsLock.Unlock()
|
||||||
|
|
||||||
d.queue.Prepare(localHeight+1, d.mode, uint64(0), remoteHeader)
|
// zekun: HACK
|
||||||
|
latest, err := d.fetchHeight(p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
height := latest.Number.Uint64()
|
||||||
|
origin, err := d.findAncestor(p, height)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pivot := uint64(0)
|
||||||
|
if d.mode == FastSync {
|
||||||
|
if height <= uint64(fsMinFullBlocks) {
|
||||||
|
origin = 0
|
||||||
|
} else {
|
||||||
|
pivot = height - uint64(fsMinFullBlocks)
|
||||||
|
if pivot <= origin {
|
||||||
|
origin = pivot - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.committed = 1
|
||||||
|
if d.mode == FastSync && pivot != 0 {
|
||||||
|
d.committed = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
d.queue.Prepare(localHeight+1, d.mode)
|
||||||
if d.syncInitHook != nil {
|
if d.syncInitHook != nil {
|
||||||
d.syncInitHook(localHeight, remoteHeight)
|
d.syncInitHook(localHeight, remoteHeight)
|
||||||
}
|
}
|
||||||
|
@ -1693,7 +1719,7 @@ func (d *Downloader) syncWithPeerUntil(p *peerConnection, hash common.Hash, td *
|
||||||
func() error { return d.fetchBoundedHeaders(p, localHeight+1, remoteHeight) },
|
func() error { return d.fetchBoundedHeaders(p, localHeight+1, remoteHeight) },
|
||||||
func() error { return d.fetchBodies(localHeight + 1) },
|
func() error { return d.fetchBodies(localHeight + 1) },
|
||||||
func() error { return d.fetchReceipts(localHeight + 1) }, // Receipts are only retrieved during fast sync
|
func() error { return d.fetchReceipts(localHeight + 1) }, // Receipts are only retrieved during fast sync
|
||||||
func() error { return d.processHeaders(localHeight+1, td) },
|
func() error { return d.processHeaders(localHeight+1, pivot, td) },
|
||||||
}
|
}
|
||||||
return d.spawnSync(fetchers)
|
return d.spawnSync(fetchers)
|
||||||
}
|
}
|
||||||
|
|
|
@ -183,6 +183,7 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e
|
||||||
var logs []*types.Log
|
var logs []*types.Log
|
||||||
|
|
||||||
for ; f.begin <= int64(end); f.begin++ {
|
for ; f.begin <= int64(end); f.begin++ {
|
||||||
|
blockNumber := rpc.BlockNumber(f.begin)
|
||||||
header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
|
header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
|
||||||
if header == nil || err != nil {
|
if header == nil || err != nil {
|
||||||
return logs, err
|
return logs, err
|
||||||
|
|
|
@ -390,8 +390,6 @@ func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func
|
||||||
newh = oldh
|
newh = oldh
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logs := filterLogs(unfiltered, nil, nil, addresses, topics)
|
|
||||||
return logs
|
|
||||||
}
|
}
|
||||||
// roll back old blocks
|
// roll back old blocks
|
||||||
for _, h := range oldHeaders {
|
for _, h := range oldHeaders {
|
||||||
|
|
|
@ -218,9 +218,6 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
||||||
// more reliably update peers or the local TD state.
|
// more reliably update peers or the local TD state.
|
||||||
go pm.BroadcastBlock(head, false)
|
go pm.BroadcastBlock(head, false)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
atomic.StoreUint32(&pm.acceptTxs, 1) // Mark initial sync done
|
atomic.StoreUint32(&pm.acceptTxs, 1) // Mark initial sync done
|
||||||
if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 {
|
if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 {
|
||||||
// We've completed a sync cycle, notify all peers of new state. This path is
|
// We've completed a sync cycle, notify all peers of new state. This path is
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"bytes"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -377,11 +376,27 @@ func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs
|
||||||
s.nonceLock.LockAddr(args.From)
|
s.nonceLock.LockAddr(args.From)
|
||||||
defer s.nonceLock.UnlockAddr(args.From)
|
defer s.nonceLock.UnlockAddr(args.From)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
isPrivate := args.PrivateFor != nil
|
||||||
|
|
||||||
|
if isPrivate {
|
||||||
|
data := []byte(*args.Data)
|
||||||
|
log.Info("sending private tx", "data", fmt.Sprintf("%x", data), "privatefrom", args.PrivateFrom, "privatefor", args.PrivateFor)
|
||||||
|
data, err := private.P.Send(data, args.PrivateFrom, args.PrivateFor)
|
||||||
|
log.Info("sent private tx", "data", fmt.Sprintf("%x", data), "privatefrom", args.PrivateFrom, "privatefor", args.PrivateFor)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, err
|
||||||
|
}
|
||||||
|
// zekun: HACK
|
||||||
|
d := hexutil.Bytes(data)
|
||||||
|
args.Data = &d
|
||||||
|
}
|
||||||
|
|
||||||
signed, err := s.signTransaction(ctx, args, passwd)
|
signed, err := s.signTransaction(ctx, args, passwd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.Hash{}, err
|
return common.Hash{}, err
|
||||||
}
|
}
|
||||||
return submitTransaction(ctx, s.b, signed)
|
return submitTransaction(ctx, s.b, signed, isPrivate)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SignTransaction will create a transaction from the given arguments and
|
// SignTransaction will create a transaction from the given arguments and
|
||||||
|
@ -400,35 +415,15 @@ func (s *PrivateAccountAPI) SignTransaction(ctx context.Context, args SendTxArgs
|
||||||
if args.Nonce == nil {
|
if args.Nonce == nil {
|
||||||
return nil, fmt.Errorf("nonce not specified")
|
return nil, fmt.Errorf("nonce not specified")
|
||||||
}
|
}
|
||||||
|
signed, err := s.signTransaction(ctx, args, passwd)
|
||||||
data := []byte(args.Data)
|
|
||||||
isPrivate := args.PrivateFor != nil
|
|
||||||
if isPrivate {
|
|
||||||
log.Info("sending private tx", "data", fmt.Sprintf("%x", data), "privatefrom", args.PrivateFrom, "privatefor", args.PrivateFor)
|
|
||||||
data, err = private.P.Send(data, args.PrivateFrom, args.PrivateFor)
|
|
||||||
log.Info("sent private tx", "data", fmt.Sprintf("%x", data), "privatefrom", args.PrivateFrom, "privatefor", args.PrivateFor)
|
|
||||||
if err != nil {
|
|
||||||
return common.Hash{}, err
|
|
||||||
}
|
|
||||||
args.Data = data
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set some sanity defaults and terminate on failure
|
|
||||||
if err := args.setDefaults(ctx, s.b); err != nil {
|
|
||||||
return common.Hash{}, err
|
|
||||||
}
|
|
||||||
// Assemble the transaction and sign with the wallet
|
|
||||||
tx := args.toTransaction()
|
|
||||||
|
|
||||||
var chainID *big.Int
|
|
||||||
if config := s.b.ChainConfig(); config.IsEIP155(s.b.CurrentBlock().Number()) {
|
|
||||||
chainID = config.ChainId
|
|
||||||
}
|
|
||||||
signed, err := wallet.SignTxWithPassphrase(account, passwd, tx, chainID)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return submitTransaction(ctx, s.b, signed, isPrivate)
|
data, err := rlp.EncodeToBytes(signed)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SignTransactionResult{data, signed}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// signHash is a helper function that calculates a hash for the given message that can be
|
// signHash is a helper function that calculates a hash for the given message that can be
|
||||||
|
@ -529,7 +524,7 @@ func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address common.Add
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
b := state.GetBalance(address)
|
b := state.GetBalance(address)
|
||||||
return b, state.Error()
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBlockByNumber returns the requested block. When blockNr is -1 the chain head is returned. When fullTx is true all
|
// GetBlockByNumber returns the requested block. When blockNr is -1 the chain head is returned. When fullTx is true all
|
||||||
|
@ -616,7 +611,7 @@ func (s *PublicBlockChainAPI) GetCode(ctx context.Context, address common.Addres
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
code := state.GetCode(address)
|
code := state.GetCode(address)
|
||||||
return code, state.Error()
|
return code, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStorageAt returns the storage from the state at the given address, key and
|
// GetStorageAt returns the storage from the state at the given address, key and
|
||||||
|
@ -628,7 +623,7 @@ func (s *PublicBlockChainAPI) GetStorageAt(ctx context.Context, address common.A
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
res := state.GetState(address, common.HexToHash(key))
|
res := state.GetState(address, common.HexToHash(key))
|
||||||
return res[:], state.Error()
|
return res[:], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CallArgs represents the arguments for a call.
|
// CallArgs represents the arguments for a call.
|
||||||
|
@ -1031,7 +1026,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionCount(ctx context.Context, addr
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
nonce := state.GetNonce(address)
|
nonce := state.GetNonce(address)
|
||||||
return (*hexutil.Uint64)(&nonce), state.Error()
|
return (*hexutil.Uint64)(&nonce), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTransactionByHash returns the transaction for the given hash
|
// GetTransactionByHash returns the transaction for the given hash
|
||||||
|
@ -1248,17 +1243,19 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Sen
|
||||||
defer s.nonceLock.UnlockAddr(args.From)
|
defer s.nonceLock.UnlockAddr(args.From)
|
||||||
}
|
}
|
||||||
|
|
||||||
data := []byte(args.Data)
|
|
||||||
isPrivate := args.PrivateFor != nil
|
isPrivate := args.PrivateFor != nil
|
||||||
|
|
||||||
if isPrivate {
|
if isPrivate {
|
||||||
|
data := []byte(*args.Data)
|
||||||
log.Info("sending private tx", "data", fmt.Sprintf("%x", data), "privatefrom", args.PrivateFrom, "privatefor", args.PrivateFor)
|
log.Info("sending private tx", "data", fmt.Sprintf("%x", data), "privatefrom", args.PrivateFrom, "privatefor", args.PrivateFor)
|
||||||
data, err = private.P.Send(data, args.PrivateFrom, args.PrivateFor)
|
data, err = private.P.Send(data, args.PrivateFrom, args.PrivateFor)
|
||||||
log.Info("sent private tx", "data", fmt.Sprintf("%x", data), "privatefrom", args.PrivateFrom, "privatefor", args.PrivateFor)
|
log.Info("sent private tx", "data", fmt.Sprintf("%x", data), "privatefrom", args.PrivateFrom, "privatefor", args.PrivateFor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.Hash{}, err
|
return common.Hash{}, err
|
||||||
}
|
}
|
||||||
args.Data = data
|
// zekun: HACK
|
||||||
|
d := hexutil.Bytes(data)
|
||||||
|
args.Data = &d
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set some sanity defaults and terminate on failure
|
// Set some sanity defaults and terminate on failure
|
||||||
|
@ -1583,7 +1580,7 @@ func (a *Async) send(ctx context.Context, s *PublicTransactionPoolAPI, asyncArgs
|
||||||
res.Error = err.Error()
|
res.Error = err.Error()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
b, err := private.P.Send([]byte(args.Data), args.PrivateFrom, args.PrivateFor)
|
b, err := private.P.Send([]byte(*args.Data), args.PrivateFrom, args.PrivateFor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("Error running Private.P.Send", "err", err)
|
log.Info("Error running Private.P.Send", "err", err)
|
||||||
res.Error = err.Error()
|
res.Error = err.Error()
|
||||||
|
@ -1607,9 +1604,9 @@ func (a *Async) save(ctx context.Context, s *PublicTransactionPoolAPI, args Send
|
||||||
}
|
}
|
||||||
var tx *types.Transaction
|
var tx *types.Transaction
|
||||||
if args.To == nil {
|
if args.To == nil {
|
||||||
tx = types.NewContractCreation((uint64)(*args.Nonce), (*big.Int)(args.Value), (*big.Int)(args.Gas), (*big.Int)(args.GasPrice), data)
|
tx = types.NewContractCreation((uint64)(*args.Nonce), (*big.Int)(args.Value), uint64(*args.Gas), (*big.Int)(args.GasPrice), data)
|
||||||
} else {
|
} else {
|
||||||
tx = types.NewTransaction((uint64)(*args.Nonce), *args.To, (*big.Int)(args.Value), (*big.Int)(args.Gas), (*big.Int)(args.GasPrice), data)
|
tx = types.NewTransaction((uint64)(*args.Nonce), *args.To, (*big.Int)(args.Value), uint64(*args.Gas), (*big.Int)(args.GasPrice), data)
|
||||||
}
|
}
|
||||||
|
|
||||||
signed, err := s.sign(args.From, tx)
|
signed, err := s.sign(args.From, tx)
|
||||||
|
|
|
@ -72,7 +72,7 @@ func (b *LesApiBackend) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumb
|
||||||
return b.GetBlock(ctx, header.Hash())
|
return b.GetBlock(ctx, header.Hash())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *LesApiBackend) StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*state.StateDB, *types.Header, error) {
|
func (b *LesApiBackend) StateAndHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (vm.MinimalApiState, *types.Header, error) {
|
||||||
header, err := b.HeaderByNumber(ctx, blockNr)
|
header, err := b.HeaderByNumber(ctx, blockNr)
|
||||||
if header == nil || err != nil {
|
if header == nil || err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
|
@ -102,10 +102,11 @@ func (b *LesApiBackend) GetTd(hash common.Hash) *big.Int {
|
||||||
return b.eth.blockchain.GetTdByHash(hash)
|
return b.eth.blockchain.GetTdByHash(hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error) {
|
func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, apiState vm.MinimalApiState, header *types.Header, vmCfg vm.Config) (*vm.EVM, func() error, error) {
|
||||||
state.SetBalance(msg.From(), math.MaxBig256)
|
statedb := apiState.(*state.StateDB)
|
||||||
|
statedb.SetBalance(msg.From(), math.MaxBig256)
|
||||||
context := core.NewEVMContext(msg, header, b.eth.blockchain, nil)
|
context := core.NewEVMContext(msg, header, b.eth.blockchain, nil)
|
||||||
return vm.NewEVM(context, state, b.eth.chainConfig, vmCfg), state.Error, nil
|
return vm.NewEVM(context, statedb, statedb, b.eth.chainConfig, vmCfg), statedb.Error, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
|
func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
|
||||||
|
|
|
@ -102,7 +102,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
||||||
peers: peers,
|
peers: peers,
|
||||||
reqDist: newRequestDistributor(peers, quitSync),
|
reqDist: newRequestDistributor(peers, quitSync),
|
||||||
accountManager: ctx.AccountManager,
|
accountManager: ctx.AccountManager,
|
||||||
engine: eth.CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
|
engine: eth.CreateConsensusEngine(ctx, config, chainConfig, chainDb),
|
||||||
shutdownChan: make(chan bool),
|
shutdownChan: make(chan bool),
|
||||||
networkId: config.NetworkId,
|
networkId: config.NetworkId,
|
||||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||||
|
|
|
@ -78,7 +78,7 @@ type BlockChain interface {
|
||||||
GetHeaderByHash(hash common.Hash) *types.Header
|
GetHeaderByHash(hash common.Hash) *types.Header
|
||||||
CurrentHeader() *types.Header
|
CurrentHeader() *types.Header
|
||||||
GetTd(hash common.Hash, number uint64) *big.Int
|
GetTd(hash common.Hash, number uint64) *big.Int
|
||||||
State() (*state.StateDB, error)
|
State() (*state.StateDB, *state.StateDB, error)
|
||||||
InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error)
|
InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error)
|
||||||
Rollback(chain []common.Hash)
|
Rollback(chain []common.Hash)
|
||||||
GetHeaderByNumber(number uint64) *types.Header
|
GetHeaderByNumber(number uint64) *types.Header
|
||||||
|
@ -584,7 +584,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||||
// Retrieve the requested state entry, stopping if enough was found
|
// Retrieve the requested state entry, stopping if enough was found
|
||||||
if number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash); number != nil {
|
if number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash); number != nil {
|
||||||
if header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number); header != nil {
|
if header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number); header != nil {
|
||||||
statedb, err := pm.blockchain.State()
|
statedb, _, err := pm.blockchain.State()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -715,7 +715,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||||
// Retrieve the requested state entry, stopping if enough was found
|
// Retrieve the requested state entry, stopping if enough was found
|
||||||
if number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash); number != nil {
|
if number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash); number != nil {
|
||||||
if header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number); header != nil {
|
if header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number); header != nil {
|
||||||
statedb, err := pm.blockchain.State()
|
statedb, _, err := pm.blockchain.State()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -775,7 +775,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
||||||
|
|
||||||
if number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash); number != nil {
|
if number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash); number != nil {
|
||||||
if header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number); header != nil {
|
if header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number); header != nil {
|
||||||
statedb, _ = pm.blockchain.State()
|
statedb, _, _ = pm.blockchain.State()
|
||||||
root = header.Root
|
root = header.Root
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -209,8 +209,8 @@ func (bc *LightChain) Genesis() *types.Block {
|
||||||
}
|
}
|
||||||
|
|
||||||
// State returns a new mutable state based on the current HEAD block.
|
// State returns a new mutable state based on the current HEAD block.
|
||||||
func (bc *LightChain) State() (*state.StateDB, error) {
|
func (bc *LightChain) State() (*state.StateDB, *state.StateDB, error) {
|
||||||
return nil, errors.New("not implemented, needs client/server interface split")
|
return nil, nil, errors.New("not implemented, needs client/server interface split")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBody retrieves a block body (transactions and uncles) from the database
|
// GetBody retrieves a block body (transactions and uncles) from the database
|
||||||
|
|
|
@ -163,7 +163,7 @@ func (self *Miner) SetExtra(extra []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pending returns the currently pending block and associated state.
|
// Pending returns the currently pending block and associated state.
|
||||||
func (self *Miner) Pending() (*types.Block, *state.StateDB) {
|
func (self *Miner) Pending() (*types.Block, *state.StateDB, *state.StateDB) {
|
||||||
return self.worker.pending()
|
return self.worker.pending()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -77,8 +77,12 @@ type Work struct {
|
||||||
header *types.Header
|
header *types.Header
|
||||||
txs []*types.Transaction
|
txs []*types.Transaction
|
||||||
receipts []*types.Receipt
|
receipts []*types.Receipt
|
||||||
|
privateReceipts []*types.Receipt
|
||||||
|
|
||||||
createdAt time.Time
|
createdAt time.Time
|
||||||
|
|
||||||
|
// Leave this publicState named state, add privateState which most code paths can just ignore
|
||||||
|
privateState *state.StateDB
|
||||||
}
|
}
|
||||||
|
|
||||||
type Result struct {
|
type Result struct {
|
||||||
|
@ -174,17 +178,18 @@ func (self *worker) setExtra(extra []byte) {
|
||||||
self.extra = extra
|
self.extra = extra
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *worker) pending() (*types.Block, *state.StateDB) {
|
func (self *worker) pending() (*types.Block, *state.StateDB, *state.StateDB) {
|
||||||
if atomic.LoadInt32(&self.mining) == 0 {
|
if atomic.LoadInt32(&self.mining) == 0 {
|
||||||
// return a snapshot to avoid contention on currentMu mutex
|
// return a snapshot to avoid contention on currentMu mutex
|
||||||
self.snapshotMu.RLock()
|
self.snapshotMu.RLock()
|
||||||
defer self.snapshotMu.RUnlock()
|
defer self.snapshotMu.RUnlock()
|
||||||
return self.snapshotBlock, self.snapshotState.Copy()
|
return self.snapshotBlock, self.snapshotState.Copy(), self.current.privateState.Copy()
|
||||||
}
|
}
|
||||||
|
|
||||||
self.currentMu.Lock()
|
self.currentMu.Lock()
|
||||||
defer self.currentMu.Unlock()
|
defer self.currentMu.Unlock()
|
||||||
return self.current.Block, self.current.state.Copy()
|
return self.current.Block, self.current.state.Copy(), self.current.privateState.Copy()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *worker) pendingBlock() *types.Block {
|
func (self *worker) pendingBlock() *types.Block {
|
||||||
|
@ -357,19 +362,20 @@ func (self *worker) push(work *Work) {
|
||||||
|
|
||||||
// makeCurrent creates a new environment for the current cycle.
|
// makeCurrent creates a new environment for the current cycle.
|
||||||
func (self *worker) makeCurrent(parent *types.Block, header *types.Header) error {
|
func (self *worker) makeCurrent(parent *types.Block, header *types.Header) error {
|
||||||
state, err := self.chain.StateAt(parent.Root())
|
publicState, privateState, err := self.chain.StateAt(parent.Root())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
work := &Work{
|
work := &Work{
|
||||||
config: self.config,
|
config: self.config,
|
||||||
signer: types.NewEIP155Signer(self.config.ChainId),
|
signer: types.MakeSigner(self.config, header.Number),
|
||||||
state: state,
|
state: publicState,
|
||||||
ancestors: set.New(),
|
ancestors: set.New(),
|
||||||
family: set.New(),
|
family: set.New(),
|
||||||
uncles: set.New(),
|
uncles: set.New(),
|
||||||
header: header,
|
header: header,
|
||||||
createdAt: time.Now(),
|
createdAt: time.Now(),
|
||||||
|
privateState: privateState,
|
||||||
}
|
}
|
||||||
|
|
||||||
// when 08 is processed ancestors contain 07 (quick block)
|
// when 08 is processed ancestors contain 07 (quick block)
|
||||||
|
@ -606,14 +612,22 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
|
||||||
|
|
||||||
func (env *Work) commitTransaction(tx *types.Transaction, bc *core.BlockChain, coinbase common.Address, gp *core.GasPool) (error, []*types.Log) {
|
func (env *Work) commitTransaction(tx *types.Transaction, bc *core.BlockChain, coinbase common.Address, gp *core.GasPool) (error, []*types.Log) {
|
||||||
snap := env.state.Snapshot()
|
snap := env.state.Snapshot()
|
||||||
|
privateSnap := env.privateState.Snapshot()
|
||||||
|
|
||||||
receipt, _, err := core.ApplyTransaction(env.config, bc, &coinbase, gp, env.state, env.header, tx, &env.header.GasUsed, vm.Config{})
|
receipt, privateReceipt, _, err := core.ApplyTransaction(env.config, bc, &coinbase, gp, env.state, env.privateState, env.header, tx, &env.header.GasUsed, vm.Config{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
env.state.RevertToSnapshot(snap)
|
env.state.RevertToSnapshot(snap)
|
||||||
|
env.privateState.RevertToSnapshot(privateSnap)
|
||||||
return err, nil
|
return err, nil
|
||||||
}
|
}
|
||||||
env.txs = append(env.txs, tx)
|
env.txs = append(env.txs, tx)
|
||||||
env.receipts = append(env.receipts, receipt)
|
env.receipts = append(env.receipts, receipt)
|
||||||
|
|
||||||
return nil, receipt.Logs
|
logs := receipt.Logs
|
||||||
|
if privateReceipt != nil {
|
||||||
|
logs = append(receipt.Logs, privateReceipt.Logs...)
|
||||||
|
env.privateReceipts = append(env.privateReceipts, privateReceipt)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, logs
|
||||||
}
|
}
|
||||||
|
|
|
@ -120,7 +120,7 @@ func (ks *KeyStore) SignTx(account *Account, tx *Transaction, chainID *BigInt) (
|
||||||
if chainID == nil { // Null passed from mobile app
|
if chainID == nil { // Null passed from mobile app
|
||||||
chainID = new(BigInt)
|
chainID = new(BigInt)
|
||||||
}
|
}
|
||||||
signed, err := ks.keystore.SignTx(account.account, tx.tx, chainID.bigint)
|
signed, err := ks.keystore.SignTx(account.account, tx.tx, chainID.bigint, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -310,67 +310,3 @@ func (ev *msgEventer) Close() error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// msgEventer wraps a MsgReadWriter and sends events whenever a message is sent
|
|
||||||
// or received
|
|
||||||
type msgEventer struct {
|
|
||||||
MsgReadWriter
|
|
||||||
|
|
||||||
feed *event.Feed
|
|
||||||
peerID discover.NodeID
|
|
||||||
Protocol string
|
|
||||||
}
|
|
||||||
|
|
||||||
// newMsgEventer returns a msgEventer which sends message events to the given
|
|
||||||
// feed
|
|
||||||
func newMsgEventer(rw MsgReadWriter, feed *event.Feed, peerID discover.NodeID, proto string) *msgEventer {
|
|
||||||
return &msgEventer{
|
|
||||||
MsgReadWriter: rw,
|
|
||||||
feed: feed,
|
|
||||||
peerID: peerID,
|
|
||||||
Protocol: proto,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadMsg reads a message from the underlying MsgReadWriter and emits a
|
|
||||||
// "message received" event
|
|
||||||
func (self *msgEventer) ReadMsg() (Msg, error) {
|
|
||||||
msg, err := self.MsgReadWriter.ReadMsg()
|
|
||||||
if err != nil {
|
|
||||||
return msg, err
|
|
||||||
}
|
|
||||||
self.feed.Send(&PeerEvent{
|
|
||||||
Type: PeerEventTypeMsgRecv,
|
|
||||||
Peer: self.peerID,
|
|
||||||
Protocol: self.Protocol,
|
|
||||||
MsgCode: &msg.Code,
|
|
||||||
MsgSize: &msg.Size,
|
|
||||||
})
|
|
||||||
return msg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteMsg writes a message to the underlying MsgReadWriter and emits a
|
|
||||||
// "message sent" event
|
|
||||||
func (self *msgEventer) WriteMsg(msg Msg) error {
|
|
||||||
err := self.MsgReadWriter.WriteMsg(msg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
self.feed.Send(&PeerEvent{
|
|
||||||
Type: PeerEventTypeMsgSend,
|
|
||||||
Peer: self.peerID,
|
|
||||||
Protocol: self.Protocol,
|
|
||||||
MsgCode: &msg.Code,
|
|
||||||
MsgSize: &msg.Size,
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the underlying MsgReadWriter if it implements the io.Closer
|
|
||||||
// interface
|
|
||||||
func (self *msgEventer) Close() error {
|
|
||||||
if v, ok := self.MsgReadWriter.(io.Closer); ok {
|
|
||||||
return v.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -847,7 +847,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *discover.Node) e
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isNodePermissioned(node, currentNode, srv.DataDir, direction) {
|
if !isNodePermissioned(node, currentNode, srv.DataDir, direction) {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Trace("Node Permissioning is Disabled.")
|
log.Trace("Node Permissioning is Disabled.")
|
||||||
|
@ -900,13 +900,6 @@ func truncateName(s string) string {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func truncateName(s string) string {
|
|
||||||
if len(s) > 20 {
|
|
||||||
return s[:20] + "..."
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkpoint sends the conn to run, which performs the
|
// checkpoint sends the conn to run, which performs the
|
||||||
// post-handshake checks for the stage (posthandshake, addpeer).
|
// post-handshake checks for the stage (posthandshake, addpeer).
|
||||||
func (srv *Server) checkpoint(c *conn, stage chan<- *conn) error {
|
func (srv *Server) checkpoint(c *conn, stage chan<- *conn) error {
|
||||||
|
|
|
@ -102,19 +102,19 @@ var (
|
||||||
//
|
//
|
||||||
// This configuration is intentionally not using keyed fields to force anyone
|
// This configuration is intentionally not using keyed fields to force anyone
|
||||||
// adding flags to the config to also have to set these fields.
|
// adding flags to the config to also have to set these fields.
|
||||||
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
|
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), new(EthashConfig), nil, nil, false}
|
||||||
|
|
||||||
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
|
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
|
||||||
// and accepted by the Ethereum core developers into the Clique consensus.
|
// and accepted by the Ethereum core developers into the Clique consensus.
|
||||||
//
|
//
|
||||||
// This configuration is intentionally not using keyed fields to force anyone
|
// This configuration is intentionally not using keyed fields to force anyone
|
||||||
// adding flags to the config to also have to set these fields.
|
// adding flags to the config to also have to set these fields.
|
||||||
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
|
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, &CliqueConfig{Period: 0, Epoch: 30000}, nil, false}
|
||||||
|
|
||||||
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
|
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), new(EthashConfig), nil, nil, false}
|
||||||
TestRules = TestChainConfig.Rules(new(big.Int))
|
TestRules = TestChainConfig.Rules(new(big.Int))
|
||||||
|
|
||||||
QuorumTestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, nil, common.Hash{}, nil, nil, nil, new(EthashConfig), nil, nil, true}
|
QuorumTestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, nil, common.Hash{}, nil, nil, nil, nil, new(EthashConfig), nil, nil, true}
|
||||||
)
|
)
|
||||||
|
|
||||||
// ChainConfig is the core config which determines the blockchain settings.
|
// ChainConfig is the core config which determines the blockchain settings.
|
||||||
|
|
|
@ -87,17 +87,6 @@ var (
|
||||||
DurationLimit = big.NewInt(13) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not.
|
DurationLimit = big.NewInt(13) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not.
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
GasLimitBoundDivisor = big.NewInt(4096) // The bound divisor of the gas limit, used in update calculations.
|
|
||||||
MinGasLimit = big.NewInt(700000000) // Minimum the gas limit may ever be.
|
|
||||||
GenesisGasLimit = big.NewInt(800000000) // Gas limit of the Genesis block.
|
|
||||||
TargetGasLimit = new(big.Int).Set(GenesisGasLimit) // The artificial target
|
|
||||||
DifficultyBoundDivisor = big.NewInt(2048) // The bound divisor of the difficulty, used in the update calculations.
|
|
||||||
GenesisDifficulty = big.NewInt(131072) // Difficulty of the Genesis block.
|
|
||||||
MinimumDifficulty = big.NewInt(131072) // The minimum that the difficulty may ever be.
|
|
||||||
DurationLimit = big.NewInt(13) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not.
|
|
||||||
)
|
|
||||||
|
|
||||||
func GetMaximumExtraDataSize(isQuorum bool) uint64 {
|
func GetMaximumExtraDataSize(isQuorum bool) uint64 {
|
||||||
if isQuorum {
|
if isQuorum {
|
||||||
return QuorumMaximumExtraDataSize
|
return QuorumMaximumExtraDataSize
|
||||||
|
|
|
@ -252,7 +252,7 @@ func (minter *minter) createWork() *work {
|
||||||
Number: parentNumber.Add(parentNumber, common.Big1),
|
Number: parentNumber.Add(parentNumber, common.Big1),
|
||||||
Difficulty: ethash.CalcDifficulty(minter.config, uint64(tstamp), parent.Header()),
|
Difficulty: ethash.CalcDifficulty(minter.config, uint64(tstamp), parent.Header()),
|
||||||
GasLimit: core.CalcGasLimit(parent),
|
GasLimit: core.CalcGasLimit(parent),
|
||||||
GasUsed: new(big.Int),
|
GasUsed: 0,
|
||||||
Coinbase: minter.coinbase,
|
Coinbase: minter.coinbase,
|
||||||
Time: big.NewInt(tstamp),
|
Time: big.NewInt(tstamp),
|
||||||
}
|
}
|
||||||
|
@ -335,10 +335,10 @@ func (minter *minter) mintNewBlock() {
|
||||||
log.Info("Generated next block", "block num", block.Number(), "num txes", txCount)
|
log.Info("Generated next block", "block num", block.Number(), "num txes", txCount)
|
||||||
|
|
||||||
deleteEmptyObjects := minter.chain.Config().IsEIP158(block.Number())
|
deleteEmptyObjects := minter.chain.Config().IsEIP158(block.Number())
|
||||||
if _, err := work.publicState.CommitTo(minter.chainDb, deleteEmptyObjects); err != nil {
|
if _, err := work.publicState.Commit(deleteEmptyObjects); err != nil {
|
||||||
panic(fmt.Sprint("error committing public state: ", err))
|
panic(fmt.Sprint("error committing public state: ", err))
|
||||||
}
|
}
|
||||||
if _, privStateErr := work.privateState.CommitTo(minter.chainDb, deleteEmptyObjects); privStateErr != nil {
|
if _, privStateErr := work.privateState.Commit(deleteEmptyObjects); privStateErr != nil {
|
||||||
panic(fmt.Sprint("error committing private state: ", privStateErr))
|
panic(fmt.Sprint("error committing private state: ", privStateErr))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -397,7 +397,7 @@ func (env *work) commitTransaction(tx *types.Transaction, bc *core.BlockChain, g
|
||||||
|
|
||||||
var author *common.Address
|
var author *common.Address
|
||||||
var vmConf vm.Config
|
var vmConf vm.Config
|
||||||
publicReceipt, privateReceipt, _, err := core.ApplyTransaction(env.config, bc, author, gp, env.publicState, env.privateState, env.header, tx, env.header.GasUsed, vmConf)
|
publicReceipt, privateReceipt, _, err := core.ApplyTransaction(env.config, bc, author, gp, env.publicState, env.privateState, env.header, tx, &env.header.GasUsed, vmConf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
env.publicState.RevertToSnapshot(publicSnapshot)
|
env.publicState.RevertToSnapshot(publicSnapshot)
|
||||||
env.privateState.RevertToSnapshot(privateSnapshot)
|
env.privateState.RevertToSnapshot(privateSnapshot)
|
||||||
|
|
|
@ -124,7 +124,7 @@ func (t *BlockTest) Run() error {
|
||||||
if common.Hash(t.json.BestBlock) != cmlast {
|
if common.Hash(t.json.BestBlock) != cmlast {
|
||||||
return fmt.Errorf("last block hash validation mismatch: want: %x, have: %x", t.json.BestBlock, cmlast)
|
return fmt.Errorf("last block hash validation mismatch: want: %x, have: %x", t.json.BestBlock, cmlast)
|
||||||
}
|
}
|
||||||
newDB, err := chain.State()
|
newDB, _, err := chain.State()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,7 +135,7 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config) (*state.StateD
|
||||||
}
|
}
|
||||||
context := core.NewEVMContext(msg, block.Header(), nil, &t.json.Env.Coinbase)
|
context := core.NewEVMContext(msg, block.Header(), nil, &t.json.Env.Coinbase)
|
||||||
context.GetHash = vmTestBlockHash
|
context.GetHash = vmTestBlockHash
|
||||||
evm := vm.NewEVM(context, statedb, config, vmconfig)
|
evm := vm.NewEVM(context, statedb, statedb, config, vmconfig)
|
||||||
|
|
||||||
gaspool := new(core.GasPool)
|
gaspool := new(core.GasPool)
|
||||||
gaspool.AddGas(block.GasLimit())
|
gaspool.AddGas(block.GasLimit())
|
||||||
|
|
|
@ -143,7 +143,7 @@ func (t *VMTest) newEVM(statedb *state.StateDB, vmconfig vm.Config) *vm.EVM {
|
||||||
GasPrice: t.json.Exec.GasPrice,
|
GasPrice: t.json.Exec.GasPrice,
|
||||||
}
|
}
|
||||||
vmconfig.NoRecursion = true
|
vmconfig.NoRecursion = true
|
||||||
return vm.NewEVM(context, statedb, params.MainnetChainConfig, vmconfig)
|
return vm.NewEVM(context, statedb, statedb, params.MainnetChainConfig, vmconfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
func vmTestBlockHash(n uint64) common.Hash {
|
func vmTestBlockHash(n uint64) common.Hash {
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
*.[68]
|
||||||
|
*.a
|
||||||
|
*.out
|
||||||
|
*.swp
|
||||||
|
_obj
|
||||||
|
_testmain.go
|
||||||
|
cmd/metrics-bench/metrics-bench
|
||||||
|
cmd/metrics-example/metrics-example
|
||||||
|
cmd/never-read/never-read
|
|
@ -0,0 +1,14 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.2
|
||||||
|
- 1.3
|
||||||
|
- 1.4
|
||||||
|
- 1.5
|
||||||
|
|
||||||
|
script:
|
||||||
|
- ./validate.sh
|
||||||
|
|
||||||
|
# this should give us faster builds according to
|
||||||
|
# http://docs.travis-ci.com/user/migrating-from-legacy/
|
||||||
|
sudo: false
|
|
@ -0,0 +1,29 @@
|
||||||
|
Copyright 2012 Richard Crowley. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
2. Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following
|
||||||
|
disclaimer in the documentation and/or other materials provided
|
||||||
|
with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
|
||||||
|
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
|
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||||
|
THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
The views and conclusions contained in the software and documentation
|
||||||
|
are those of the authors and should not be interpreted as representing
|
||||||
|
official policies, either expressed or implied, of Richard Crowley.
|
|
@ -0,0 +1,153 @@
|
||||||
|
go-metrics
|
||||||
|
==========
|
||||||
|
|
||||||
|
![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master)
|
||||||
|
|
||||||
|
Go port of Coda Hale's Metrics library: <https://github.com/dropwizard/metrics>.
|
||||||
|
|
||||||
|
Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
Create and update metrics:
|
||||||
|
|
||||||
|
```go
|
||||||
|
c := metrics.NewCounter()
|
||||||
|
metrics.Register("foo", c)
|
||||||
|
c.Inc(47)
|
||||||
|
|
||||||
|
g := metrics.NewGauge()
|
||||||
|
metrics.Register("bar", g)
|
||||||
|
g.Update(47)
|
||||||
|
|
||||||
|
r := NewRegistry()
|
||||||
|
g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
|
||||||
|
|
||||||
|
s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
|
||||||
|
h := metrics.NewHistogram(s)
|
||||||
|
metrics.Register("baz", h)
|
||||||
|
h.Update(47)
|
||||||
|
|
||||||
|
m := metrics.NewMeter()
|
||||||
|
metrics.Register("quux", m)
|
||||||
|
m.Mark(47)
|
||||||
|
|
||||||
|
t := metrics.NewTimer()
|
||||||
|
metrics.Register("bang", t)
|
||||||
|
t.Time(func() {})
|
||||||
|
t.Update(47)
|
||||||
|
```
|
||||||
|
|
||||||
|
Register() is not threadsafe. For threadsafe metric registration use
|
||||||
|
GetOrRegister:
|
||||||
|
|
||||||
|
```
|
||||||
|
t := metrics.GetOrRegisterTimer("account.create.latency", nil)
|
||||||
|
t.Time(func() {})
|
||||||
|
t.Update(47)
|
||||||
|
```
|
||||||
|
|
||||||
|
Periodically log every metric in human-readable form to standard error:
|
||||||
|
|
||||||
|
```go
|
||||||
|
go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
|
||||||
|
```
|
||||||
|
|
||||||
|
Periodically log every metric in slightly-more-parseable form to syslog:
|
||||||
|
|
||||||
|
```go
|
||||||
|
w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
|
||||||
|
go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
|
||||||
|
```
|
||||||
|
|
||||||
|
Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
import "github.com/cyberdelia/go-metrics-graphite"
|
||||||
|
|
||||||
|
addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
|
||||||
|
go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
|
||||||
|
```
|
||||||
|
|
||||||
|
Periodically emit every metric into InfluxDB:
|
||||||
|
|
||||||
|
**NOTE:** this has been pulled out of the library due to constant fluctuations
|
||||||
|
in the InfluxDB API. In fact, all client libraries are on their way out. see
|
||||||
|
issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
|
||||||
|
[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/vrischmann/go-metrics-influxdb"
|
||||||
|
|
||||||
|
go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{
|
||||||
|
Host: "127.0.0.1:8086",
|
||||||
|
Database: "metrics",
|
||||||
|
Username: "test",
|
||||||
|
Password: "test",
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
|
||||||
|
|
||||||
|
**Note**: the client included with this repository under the `librato` package
|
||||||
|
has been deprecated and moved to the repository linked above.
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/mihasya/go-metrics-librato"
|
||||||
|
|
||||||
|
go librato.Librato(metrics.DefaultRegistry,
|
||||||
|
10e9, // interval
|
||||||
|
"example@example.com", // account owner email address
|
||||||
|
"token", // Librato API token
|
||||||
|
"hostname", // source
|
||||||
|
[]float64{0.95}, // percentiles to send
|
||||||
|
time.Millisecond, // time unit
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Periodically emit every metric to StatHat:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/rcrowley/go-metrics/stathat"
|
||||||
|
|
||||||
|
go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
|
||||||
|
```
|
||||||
|
|
||||||
|
Maintain all metrics along with expvars at `/debug/metrics`:
|
||||||
|
|
||||||
|
This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
|
||||||
|
but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
|
||||||
|
as well as all your go-metrics.
|
||||||
|
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/rcrowley/go-metrics/exp"
|
||||||
|
|
||||||
|
exp.Exp(metrics.DefaultRegistry)
|
||||||
|
```
|
||||||
|
|
||||||
|
Installation
|
||||||
|
------------
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go get github.com/rcrowley/go-metrics
|
||||||
|
```
|
||||||
|
|
||||||
|
StatHat support additionally requires their Go client:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go get github.com/stathat/go
|
||||||
|
```
|
||||||
|
|
||||||
|
Publishing Metrics
|
||||||
|
------------------
|
||||||
|
|
||||||
|
Clients are available for the following destinations:
|
||||||
|
|
||||||
|
* Librato - [https://github.com/mihasya/go-metrics-librato](https://github.com/mihasya/go-metrics-librato)
|
||||||
|
* Graphite - [https://github.com/cyberdelia/go-metrics-graphite](https://github.com/cyberdelia/go-metrics-graphite)
|
||||||
|
* InfluxDB - [https://github.com/vrischmann/go-metrics-influxdb](https://github.com/vrischmann/go-metrics-influxdb)
|
||||||
|
* Ganglia - [https://github.com/appscode/metlia](https://github.com/appscode/metlia)
|
||||||
|
* Prometheus - [https://github.com/deathowl/go-metrics-prometheus](https://github.com/deathowl/go-metrics-prometheus)
|
|
@ -0,0 +1,112 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import "sync/atomic"
|
||||||
|
|
||||||
|
// Counters hold an int64 value that can be incremented and decremented.
|
||||||
|
type Counter interface {
|
||||||
|
Clear()
|
||||||
|
Count() int64
|
||||||
|
Dec(int64)
|
||||||
|
Inc(int64)
|
||||||
|
Snapshot() Counter
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOrRegisterCounter returns an existing Counter or constructs and registers
|
||||||
|
// a new StandardCounter.
|
||||||
|
func GetOrRegisterCounter(name string, r Registry) Counter {
|
||||||
|
if nil == r {
|
||||||
|
r = DefaultRegistry
|
||||||
|
}
|
||||||
|
return r.GetOrRegister(name, NewCounter).(Counter)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCounter constructs a new StandardCounter.
|
||||||
|
func NewCounter() Counter {
|
||||||
|
if UseNilMetrics {
|
||||||
|
return NilCounter{}
|
||||||
|
}
|
||||||
|
return &StandardCounter{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRegisteredCounter constructs and registers a new StandardCounter.
|
||||||
|
func NewRegisteredCounter(name string, r Registry) Counter {
|
||||||
|
c := NewCounter()
|
||||||
|
if nil == r {
|
||||||
|
r = DefaultRegistry
|
||||||
|
}
|
||||||
|
r.Register(name, c)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// CounterSnapshot is a read-only copy of another Counter.
|
||||||
|
type CounterSnapshot int64
|
||||||
|
|
||||||
|
// Clear panics.
|
||||||
|
func (CounterSnapshot) Clear() {
|
||||||
|
panic("Clear called on a CounterSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count at the time the snapshot was taken.
|
||||||
|
func (c CounterSnapshot) Count() int64 { return int64(c) }
|
||||||
|
|
||||||
|
// Dec panics.
|
||||||
|
func (CounterSnapshot) Dec(int64) {
|
||||||
|
panic("Dec called on a CounterSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inc panics.
|
||||||
|
func (CounterSnapshot) Inc(int64) {
|
||||||
|
panic("Inc called on a CounterSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot.
|
||||||
|
func (c CounterSnapshot) Snapshot() Counter { return c }
|
||||||
|
|
||||||
|
// NilCounter is a no-op Counter.
|
||||||
|
type NilCounter struct{}
|
||||||
|
|
||||||
|
// Clear is a no-op.
|
||||||
|
func (NilCounter) Clear() {}
|
||||||
|
|
||||||
|
// Count is a no-op.
|
||||||
|
func (NilCounter) Count() int64 { return 0 }
|
||||||
|
|
||||||
|
// Dec is a no-op.
|
||||||
|
func (NilCounter) Dec(i int64) {}
|
||||||
|
|
||||||
|
// Inc is a no-op.
|
||||||
|
func (NilCounter) Inc(i int64) {}
|
||||||
|
|
||||||
|
// Snapshot is a no-op.
|
||||||
|
func (NilCounter) Snapshot() Counter { return NilCounter{} }
|
||||||
|
|
||||||
|
// StandardCounter is the standard implementation of a Counter and uses the
|
||||||
|
// sync/atomic package to manage a single int64 value.
|
||||||
|
type StandardCounter struct {
|
||||||
|
count int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear sets the counter to zero.
|
||||||
|
func (c *StandardCounter) Clear() {
|
||||||
|
atomic.StoreInt64(&c.count, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the current count.
|
||||||
|
func (c *StandardCounter) Count() int64 {
|
||||||
|
return atomic.LoadInt64(&c.count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dec decrements the counter by the given amount.
|
||||||
|
func (c *StandardCounter) Dec(i int64) {
|
||||||
|
atomic.AddInt64(&c.count, -i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inc increments the counter by the given amount.
|
||||||
|
func (c *StandardCounter) Inc(i int64) {
|
||||||
|
atomic.AddInt64(&c.count, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a read-only copy of the counter.
|
||||||
|
func (c *StandardCounter) Snapshot() Counter {
|
||||||
|
return CounterSnapshot(c.Count())
|
||||||
|
}
|
|
@ -0,0 +1,76 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime/debug"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
debugMetrics struct {
|
||||||
|
GCStats struct {
|
||||||
|
LastGC Gauge
|
||||||
|
NumGC Gauge
|
||||||
|
Pause Histogram
|
||||||
|
//PauseQuantiles Histogram
|
||||||
|
PauseTotal Gauge
|
||||||
|
}
|
||||||
|
ReadGCStats Timer
|
||||||
|
}
|
||||||
|
gcStats debug.GCStats
|
||||||
|
)
|
||||||
|
|
||||||
|
// Capture new values for the Go garbage collector statistics exported in
|
||||||
|
// debug.GCStats. This is designed to be called as a goroutine.
|
||||||
|
func CaptureDebugGCStats(r Registry, d time.Duration) {
|
||||||
|
for _ = range time.Tick(d) {
|
||||||
|
CaptureDebugGCStatsOnce(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capture new values for the Go garbage collector statistics exported in
|
||||||
|
// debug.GCStats. This is designed to be called in a background goroutine.
|
||||||
|
// Giving a registry which has not been given to RegisterDebugGCStats will
|
||||||
|
// panic.
|
||||||
|
//
|
||||||
|
// Be careful (but much less so) with this because debug.ReadGCStats calls
|
||||||
|
// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
|
||||||
|
// operation, isn't something you want to be doing all the time.
|
||||||
|
func CaptureDebugGCStatsOnce(r Registry) {
|
||||||
|
lastGC := gcStats.LastGC
|
||||||
|
t := time.Now()
|
||||||
|
debug.ReadGCStats(&gcStats)
|
||||||
|
debugMetrics.ReadGCStats.UpdateSince(t)
|
||||||
|
|
||||||
|
debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
|
||||||
|
debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
|
||||||
|
if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
|
||||||
|
debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
|
||||||
|
}
|
||||||
|
//debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
|
||||||
|
debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register metrics for the Go garbage collector statistics exported in
|
||||||
|
// debug.GCStats. The metrics are named by their fully-qualified Go symbols,
|
||||||
|
// i.e. debug.GCStats.PauseTotal.
|
||||||
|
func RegisterDebugGCStats(r Registry) {
|
||||||
|
debugMetrics.GCStats.LastGC = NewGauge()
|
||||||
|
debugMetrics.GCStats.NumGC = NewGauge()
|
||||||
|
debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
|
||||||
|
//debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
|
||||||
|
debugMetrics.GCStats.PauseTotal = NewGauge()
|
||||||
|
debugMetrics.ReadGCStats = NewTimer()
|
||||||
|
|
||||||
|
r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
|
||||||
|
r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
|
||||||
|
r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
|
||||||
|
//r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
|
||||||
|
r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
|
||||||
|
r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate an initial slice for gcStats.Pause to avoid allocations during
|
||||||
|
// normal operation.
|
||||||
|
func init() {
|
||||||
|
gcStats.Pause = make([]time.Duration, 11)
|
||||||
|
}
|
|
@ -0,0 +1,118 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EWMAs continuously calculate an exponentially-weighted moving average
|
||||||
|
// based on an outside source of clock ticks.
|
||||||
|
type EWMA interface {
|
||||||
|
Rate() float64
|
||||||
|
Snapshot() EWMA
|
||||||
|
Tick()
|
||||||
|
Update(int64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEWMA constructs a new EWMA with the given alpha.
|
||||||
|
func NewEWMA(alpha float64) EWMA {
|
||||||
|
if UseNilMetrics {
|
||||||
|
return NilEWMA{}
|
||||||
|
}
|
||||||
|
return &StandardEWMA{alpha: alpha}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEWMA1 constructs a new EWMA for a one-minute moving average.
|
||||||
|
func NewEWMA1() EWMA {
|
||||||
|
return NewEWMA(1 - math.Exp(-5.0/60.0/1))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEWMA5 constructs a new EWMA for a five-minute moving average.
|
||||||
|
func NewEWMA5() EWMA {
|
||||||
|
return NewEWMA(1 - math.Exp(-5.0/60.0/5))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
|
||||||
|
func NewEWMA15() EWMA {
|
||||||
|
return NewEWMA(1 - math.Exp(-5.0/60.0/15))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EWMASnapshot is a read-only copy of another EWMA.
|
||||||
|
type EWMASnapshot float64
|
||||||
|
|
||||||
|
// Rate returns the rate of events per second at the time the snapshot was
|
||||||
|
// taken.
|
||||||
|
func (a EWMASnapshot) Rate() float64 { return float64(a) }
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot.
|
||||||
|
func (a EWMASnapshot) Snapshot() EWMA { return a }
|
||||||
|
|
||||||
|
// Tick panics.
|
||||||
|
func (EWMASnapshot) Tick() {
|
||||||
|
panic("Tick called on an EWMASnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update panics.
|
||||||
|
func (EWMASnapshot) Update(int64) {
|
||||||
|
panic("Update called on an EWMASnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// NilEWMA is a no-op EWMA.
|
||||||
|
type NilEWMA struct{}
|
||||||
|
|
||||||
|
// Rate is a no-op.
|
||||||
|
func (NilEWMA) Rate() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Snapshot is a no-op.
|
||||||
|
func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
|
||||||
|
|
||||||
|
// Tick is a no-op.
|
||||||
|
func (NilEWMA) Tick() {}
|
||||||
|
|
||||||
|
// Update is a no-op.
|
||||||
|
func (NilEWMA) Update(n int64) {}
|
||||||
|
|
||||||
|
// StandardEWMA is the standard implementation of an EWMA and tracks the number
|
||||||
|
// of uncounted events and processes them on each tick. It uses the
|
||||||
|
// sync/atomic package to manage uncounted events.
|
||||||
|
type StandardEWMA struct {
|
||||||
|
uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
|
||||||
|
alpha float64
|
||||||
|
rate float64
|
||||||
|
init bool
|
||||||
|
mutex sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate returns the moving average rate of events per second.
|
||||||
|
func (a *StandardEWMA) Rate() float64 {
|
||||||
|
a.mutex.Lock()
|
||||||
|
defer a.mutex.Unlock()
|
||||||
|
return a.rate * float64(1e9)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a read-only copy of the EWMA.
|
||||||
|
func (a *StandardEWMA) Snapshot() EWMA {
|
||||||
|
return EWMASnapshot(a.Rate())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tick ticks the clock to update the moving average. It assumes it is called
|
||||||
|
// every five seconds.
|
||||||
|
func (a *StandardEWMA) Tick() {
|
||||||
|
count := atomic.LoadInt64(&a.uncounted)
|
||||||
|
atomic.AddInt64(&a.uncounted, -count)
|
||||||
|
instantRate := float64(count) / float64(5e9)
|
||||||
|
a.mutex.Lock()
|
||||||
|
defer a.mutex.Unlock()
|
||||||
|
if a.init {
|
||||||
|
a.rate += a.alpha * (instantRate - a.rate)
|
||||||
|
} else {
|
||||||
|
a.init = true
|
||||||
|
a.rate = instantRate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update adds n uncounted events.
|
||||||
|
func (a *StandardEWMA) Update(n int64) {
|
||||||
|
atomic.AddInt64(&a.uncounted, n)
|
||||||
|
}
|
|
@ -0,0 +1,156 @@
|
||||||
|
// Hook go-metrics into expvar
|
||||||
|
// on any /debug/metrics request, load all vars from the registry into expvar, and execute regular expvar handler
|
||||||
|
package exp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"expvar"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/rcrowley/go-metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
type exp struct {
|
||||||
|
expvarLock sync.Mutex // expvar panics if you try to register the same var twice, so we must probe it safely
|
||||||
|
registry metrics.Registry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (exp *exp) expHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// load our variables into expvar
|
||||||
|
exp.syncToExpvar()
|
||||||
|
|
||||||
|
// now just run the official expvar handler code (which is not publicly callable, so pasted inline)
|
||||||
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
|
fmt.Fprintf(w, "{\n")
|
||||||
|
first := true
|
||||||
|
expvar.Do(func(kv expvar.KeyValue) {
|
||||||
|
if !first {
|
||||||
|
fmt.Fprintf(w, ",\n")
|
||||||
|
}
|
||||||
|
first = false
|
||||||
|
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
|
||||||
|
})
|
||||||
|
fmt.Fprintf(w, "\n}\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exp will register an expvar powered metrics handler with http.DefaultServeMux on "/debug/vars"
|
||||||
|
func Exp(r metrics.Registry) {
|
||||||
|
h := ExpHandler(r)
|
||||||
|
// this would cause a panic:
|
||||||
|
// panic: http: multiple registrations for /debug/vars
|
||||||
|
// http.HandleFunc("/debug/vars", e.expHandler)
|
||||||
|
// haven't found an elegant way, so just use a different endpoint
|
||||||
|
http.Handle("/debug/metrics", h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpHandler will return an expvar powered metrics handler.
|
||||||
|
func ExpHandler(r metrics.Registry) http.Handler {
|
||||||
|
e := exp{sync.Mutex{}, r}
|
||||||
|
return http.HandlerFunc(e.expHandler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (exp *exp) getInt(name string) *expvar.Int {
|
||||||
|
var v *expvar.Int
|
||||||
|
exp.expvarLock.Lock()
|
||||||
|
p := expvar.Get(name)
|
||||||
|
if p != nil {
|
||||||
|
v = p.(*expvar.Int)
|
||||||
|
} else {
|
||||||
|
v = new(expvar.Int)
|
||||||
|
expvar.Publish(name, v)
|
||||||
|
}
|
||||||
|
exp.expvarLock.Unlock()
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (exp *exp) getFloat(name string) *expvar.Float {
|
||||||
|
var v *expvar.Float
|
||||||
|
exp.expvarLock.Lock()
|
||||||
|
p := expvar.Get(name)
|
||||||
|
if p != nil {
|
||||||
|
v = p.(*expvar.Float)
|
||||||
|
} else {
|
||||||
|
v = new(expvar.Float)
|
||||||
|
expvar.Publish(name, v)
|
||||||
|
}
|
||||||
|
exp.expvarLock.Unlock()
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (exp *exp) publishCounter(name string, metric metrics.Counter) {
|
||||||
|
v := exp.getInt(name)
|
||||||
|
v.Set(metric.Count())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (exp *exp) publishGauge(name string, metric metrics.Gauge) {
|
||||||
|
v := exp.getInt(name)
|
||||||
|
v.Set(metric.Value())
|
||||||
|
}
|
||||||
|
func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64) {
|
||||||
|
exp.getFloat(name).Set(metric.Value())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (exp *exp) publishHistogram(name string, metric metrics.Histogram) {
|
||||||
|
h := metric.Snapshot()
|
||||||
|
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
||||||
|
exp.getInt(name + ".count").Set(h.Count())
|
||||||
|
exp.getFloat(name + ".min").Set(float64(h.Min()))
|
||||||
|
exp.getFloat(name + ".max").Set(float64(h.Max()))
|
||||||
|
exp.getFloat(name + ".mean").Set(float64(h.Mean()))
|
||||||
|
exp.getFloat(name + ".std-dev").Set(float64(h.StdDev()))
|
||||||
|
exp.getFloat(name + ".50-percentile").Set(float64(ps[0]))
|
||||||
|
exp.getFloat(name + ".75-percentile").Set(float64(ps[1]))
|
||||||
|
exp.getFloat(name + ".95-percentile").Set(float64(ps[2]))
|
||||||
|
exp.getFloat(name + ".99-percentile").Set(float64(ps[3]))
|
||||||
|
exp.getFloat(name + ".999-percentile").Set(float64(ps[4]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (exp *exp) publishMeter(name string, metric metrics.Meter) {
|
||||||
|
m := metric.Snapshot()
|
||||||
|
exp.getInt(name + ".count").Set(m.Count())
|
||||||
|
exp.getFloat(name + ".one-minute").Set(float64(m.Rate1()))
|
||||||
|
exp.getFloat(name + ".five-minute").Set(float64(m.Rate5()))
|
||||||
|
exp.getFloat(name + ".fifteen-minute").Set(float64((m.Rate15())))
|
||||||
|
exp.getFloat(name + ".mean").Set(float64(m.RateMean()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (exp *exp) publishTimer(name string, metric metrics.Timer) {
|
||||||
|
t := metric.Snapshot()
|
||||||
|
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
||||||
|
exp.getInt(name + ".count").Set(t.Count())
|
||||||
|
exp.getFloat(name + ".min").Set(float64(t.Min()))
|
||||||
|
exp.getFloat(name + ".max").Set(float64(t.Max()))
|
||||||
|
exp.getFloat(name + ".mean").Set(float64(t.Mean()))
|
||||||
|
exp.getFloat(name + ".std-dev").Set(float64(t.StdDev()))
|
||||||
|
exp.getFloat(name + ".50-percentile").Set(float64(ps[0]))
|
||||||
|
exp.getFloat(name + ".75-percentile").Set(float64(ps[1]))
|
||||||
|
exp.getFloat(name + ".95-percentile").Set(float64(ps[2]))
|
||||||
|
exp.getFloat(name + ".99-percentile").Set(float64(ps[3]))
|
||||||
|
exp.getFloat(name + ".999-percentile").Set(float64(ps[4]))
|
||||||
|
exp.getFloat(name + ".one-minute").Set(float64(t.Rate1()))
|
||||||
|
exp.getFloat(name + ".five-minute").Set(float64(t.Rate5()))
|
||||||
|
exp.getFloat(name + ".fifteen-minute").Set(float64((t.Rate15())))
|
||||||
|
exp.getFloat(name + ".mean-rate").Set(float64(t.RateMean()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (exp *exp) syncToExpvar() {
|
||||||
|
exp.registry.Each(func(name string, i interface{}) {
|
||||||
|
switch i.(type) {
|
||||||
|
case metrics.Counter:
|
||||||
|
exp.publishCounter(name, i.(metrics.Counter))
|
||||||
|
case metrics.Gauge:
|
||||||
|
exp.publishGauge(name, i.(metrics.Gauge))
|
||||||
|
case metrics.GaugeFloat64:
|
||||||
|
exp.publishGaugeFloat64(name, i.(metrics.GaugeFloat64))
|
||||||
|
case metrics.Histogram:
|
||||||
|
exp.publishHistogram(name, i.(metrics.Histogram))
|
||||||
|
case metrics.Meter:
|
||||||
|
exp.publishMeter(name, i.(metrics.Meter))
|
||||||
|
case metrics.Timer:
|
||||||
|
exp.publishTimer(name, i.(metrics.Timer))
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unsupported type for '%s': %T", name, i))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,127 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// GaugeFloat64s hold a float64 value that can be set arbitrarily.
|
||||||
|
type GaugeFloat64 interface {
|
||||||
|
Snapshot() GaugeFloat64
|
||||||
|
Update(float64)
|
||||||
|
Value() float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
|
||||||
|
// new StandardGaugeFloat64.
|
||||||
|
func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
|
||||||
|
if nil == r {
|
||||||
|
r = DefaultRegistry
|
||||||
|
}
|
||||||
|
return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
|
||||||
|
func NewGaugeFloat64() GaugeFloat64 {
|
||||||
|
if UseNilMetrics {
|
||||||
|
return NilGaugeFloat64{}
|
||||||
|
}
|
||||||
|
return &StandardGaugeFloat64{
|
||||||
|
value: 0.0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
|
||||||
|
func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
|
||||||
|
c := NewGaugeFloat64()
|
||||||
|
if nil == r {
|
||||||
|
r = DefaultRegistry
|
||||||
|
}
|
||||||
|
r.Register(name, c)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFunctionalGauge constructs a new FunctionalGauge.
|
||||||
|
func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
|
||||||
|
if UseNilMetrics {
|
||||||
|
return NilGaugeFloat64{}
|
||||||
|
}
|
||||||
|
return &FunctionalGaugeFloat64{value: f}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
|
||||||
|
func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
|
||||||
|
c := NewFunctionalGaugeFloat64(f)
|
||||||
|
if nil == r {
|
||||||
|
r = DefaultRegistry
|
||||||
|
}
|
||||||
|
r.Register(name, c)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
|
||||||
|
type GaugeFloat64Snapshot float64
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot.
|
||||||
|
func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
|
||||||
|
|
||||||
|
// Update panics.
|
||||||
|
func (GaugeFloat64Snapshot) Update(float64) {
|
||||||
|
panic("Update called on a GaugeFloat64Snapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the value at the time the snapshot was taken.
|
||||||
|
func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
|
||||||
|
|
||||||
|
// NilGauge is a no-op Gauge.
|
||||||
|
type NilGaugeFloat64 struct{}
|
||||||
|
|
||||||
|
// Snapshot is a no-op.
|
||||||
|
func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
|
||||||
|
|
||||||
|
// Update is a no-op.
|
||||||
|
func (NilGaugeFloat64) Update(v float64) {}
|
||||||
|
|
||||||
|
// Value is a no-op.
|
||||||
|
func (NilGaugeFloat64) Value() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
|
||||||
|
// sync.Mutex to manage a single float64 value.
|
||||||
|
type StandardGaugeFloat64 struct {
|
||||||
|
mutex sync.Mutex
|
||||||
|
value float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a read-only copy of the gauge.
|
||||||
|
func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
|
||||||
|
return GaugeFloat64Snapshot(g.Value())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update updates the gauge's value.
|
||||||
|
func (g *StandardGaugeFloat64) Update(v float64) {
|
||||||
|
g.mutex.Lock()
|
||||||
|
defer g.mutex.Unlock()
|
||||||
|
g.value = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the gauge's current value.
|
||||||
|
func (g *StandardGaugeFloat64) Value() float64 {
|
||||||
|
g.mutex.Lock()
|
||||||
|
defer g.mutex.Unlock()
|
||||||
|
return g.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// FunctionalGaugeFloat64 returns value from given function
|
||||||
|
type FunctionalGaugeFloat64 struct {
|
||||||
|
value func() float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the gauge's current value.
|
||||||
|
func (g FunctionalGaugeFloat64) Value() float64 {
|
||||||
|
return g.value()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot.
|
||||||
|
func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
|
||||||
|
|
||||||
|
// Update panics.
|
||||||
|
func (FunctionalGaugeFloat64) Update(float64) {
|
||||||
|
panic("Update called on a FunctionalGaugeFloat64")
|
||||||
|
}
|
|
@ -0,0 +1,113 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GraphiteConfig provides a container with configuration parameters for
|
||||||
|
// the Graphite exporter
|
||||||
|
type GraphiteConfig struct {
|
||||||
|
Addr *net.TCPAddr // Network address to connect to
|
||||||
|
Registry Registry // Registry to be exported
|
||||||
|
FlushInterval time.Duration // Flush interval
|
||||||
|
DurationUnit time.Duration // Time conversion unit for durations
|
||||||
|
Prefix string // Prefix to be prepended to metric names
|
||||||
|
Percentiles []float64 // Percentiles to export from timers and histograms
|
||||||
|
}
|
||||||
|
|
||||||
|
// Graphite is a blocking exporter function which reports metrics in r
|
||||||
|
// to a graphite server located at addr, flushing them every d duration
|
||||||
|
// and prepending metric names with prefix.
|
||||||
|
func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
|
||||||
|
GraphiteWithConfig(GraphiteConfig{
|
||||||
|
Addr: addr,
|
||||||
|
Registry: r,
|
||||||
|
FlushInterval: d,
|
||||||
|
DurationUnit: time.Nanosecond,
|
||||||
|
Prefix: prefix,
|
||||||
|
Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GraphiteWithConfig is a blocking exporter function just like Graphite,
|
||||||
|
// but it takes a GraphiteConfig instead.
|
||||||
|
func GraphiteWithConfig(c GraphiteConfig) {
|
||||||
|
log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
|
||||||
|
for _ = range time.Tick(c.FlushInterval) {
|
||||||
|
if err := graphite(&c); nil != err {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GraphiteOnce performs a single submission to Graphite, returning a
|
||||||
|
// non-nil error on failed connections. This can be used in a loop
|
||||||
|
// similar to GraphiteWithConfig for custom error handling.
|
||||||
|
func GraphiteOnce(c GraphiteConfig) error {
|
||||||
|
log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
|
||||||
|
return graphite(&c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func graphite(c *GraphiteConfig) error {
|
||||||
|
now := time.Now().Unix()
|
||||||
|
du := float64(c.DurationUnit)
|
||||||
|
conn, err := net.DialTCP("tcp", nil, c.Addr)
|
||||||
|
if nil != err {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
w := bufio.NewWriter(conn)
|
||||||
|
c.Registry.Each(func(name string, i interface{}) {
|
||||||
|
switch metric := i.(type) {
|
||||||
|
case Counter:
|
||||||
|
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
|
||||||
|
case Gauge:
|
||||||
|
fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
|
||||||
|
case GaugeFloat64:
|
||||||
|
fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
|
||||||
|
case Histogram:
|
||||||
|
h := metric.Snapshot()
|
||||||
|
ps := h.Percentiles(c.Percentiles)
|
||||||
|
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
|
||||||
|
for psIdx, psKey := range c.Percentiles {
|
||||||
|
key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
|
||||||
|
fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
|
||||||
|
}
|
||||||
|
case Meter:
|
||||||
|
m := metric.Snapshot()
|
||||||
|
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
|
||||||
|
case Timer:
|
||||||
|
t := metric.Snapshot()
|
||||||
|
ps := t.Percentiles(c.Percentiles)
|
||||||
|
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
|
||||||
|
for psIdx, psKey := range c.Percentiles {
|
||||||
|
key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
|
||||||
|
fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
|
||||||
|
fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
|
||||||
|
}
|
||||||
|
w.Flush()
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
// Healthchecks hold an error value describing an arbitrary up/down status.
|
||||||
|
type Healthcheck interface {
|
||||||
|
Check()
|
||||||
|
Error() error
|
||||||
|
Healthy()
|
||||||
|
Unhealthy(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHealthcheck constructs a new Healthcheck which will use the given
|
||||||
|
// function to update its status.
|
||||||
|
func NewHealthcheck(f func(Healthcheck)) Healthcheck {
|
||||||
|
if UseNilMetrics {
|
||||||
|
return NilHealthcheck{}
|
||||||
|
}
|
||||||
|
return &StandardHealthcheck{nil, f}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NilHealthcheck is a no-op.
|
||||||
|
type NilHealthcheck struct{}
|
||||||
|
|
||||||
|
// Check is a no-op.
|
||||||
|
func (NilHealthcheck) Check() {}
|
||||||
|
|
||||||
|
// Error is a no-op.
|
||||||
|
func (NilHealthcheck) Error() error { return nil }
|
||||||
|
|
||||||
|
// Healthy is a no-op.
|
||||||
|
func (NilHealthcheck) Healthy() {}
|
||||||
|
|
||||||
|
// Unhealthy is a no-op.
|
||||||
|
func (NilHealthcheck) Unhealthy(error) {}
|
||||||
|
|
||||||
|
// StandardHealthcheck is the standard implementation of a Healthcheck and
|
||||||
|
// stores the status and a function to call to update the status.
|
||||||
|
type StandardHealthcheck struct {
|
||||||
|
err error
|
||||||
|
f func(Healthcheck)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check runs the healthcheck function to update the healthcheck's status.
|
||||||
|
func (h *StandardHealthcheck) Check() {
|
||||||
|
h.f(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the healthcheck's status, which will be nil if it is healthy.
|
||||||
|
func (h *StandardHealthcheck) Error() error {
|
||||||
|
return h.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Healthy marks the healthcheck as healthy.
|
||||||
|
func (h *StandardHealthcheck) Healthy() {
|
||||||
|
h.err = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unhealthy marks the healthcheck as unhealthy. The error is stored and
|
||||||
|
// may be retrieved by the Error method.
|
||||||
|
func (h *StandardHealthcheck) Unhealthy(err error) {
|
||||||
|
h.err = err
|
||||||
|
}
|
|
@ -0,0 +1,202 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
// Histograms calculate distribution statistics from a series of int64 values.
|
||||||
|
type Histogram interface {
|
||||||
|
Clear()
|
||||||
|
Count() int64
|
||||||
|
Max() int64
|
||||||
|
Mean() float64
|
||||||
|
Min() int64
|
||||||
|
Percentile(float64) float64
|
||||||
|
Percentiles([]float64) []float64
|
||||||
|
Sample() Sample
|
||||||
|
Snapshot() Histogram
|
||||||
|
StdDev() float64
|
||||||
|
Sum() int64
|
||||||
|
Update(int64)
|
||||||
|
Variance() float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOrRegisterHistogram returns an existing Histogram or constructs and
|
||||||
|
// registers a new StandardHistogram.
|
||||||
|
func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
|
||||||
|
if nil == r {
|
||||||
|
r = DefaultRegistry
|
||||||
|
}
|
||||||
|
return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHistogram constructs a new StandardHistogram from a Sample.
|
||||||
|
func NewHistogram(s Sample) Histogram {
|
||||||
|
if UseNilMetrics {
|
||||||
|
return NilHistogram{}
|
||||||
|
}
|
||||||
|
return &StandardHistogram{sample: s}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRegisteredHistogram constructs and registers a new StandardHistogram from
|
||||||
|
// a Sample.
|
||||||
|
func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
|
||||||
|
c := NewHistogram(s)
|
||||||
|
if nil == r {
|
||||||
|
r = DefaultRegistry
|
||||||
|
}
|
||||||
|
r.Register(name, c)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// HistogramSnapshot is a read-only copy of another Histogram.
|
||||||
|
type HistogramSnapshot struct {
|
||||||
|
sample *SampleSnapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear panics.
|
||||||
|
func (*HistogramSnapshot) Clear() {
|
||||||
|
panic("Clear called on a HistogramSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the number of samples recorded at the time the snapshot was
|
||||||
|
// taken.
|
||||||
|
func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
|
||||||
|
|
||||||
|
// Max returns the maximum value in the sample at the time the snapshot was
|
||||||
|
// taken.
|
||||||
|
func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
|
||||||
|
|
||||||
|
// Mean returns the mean of the values in the sample at the time the snapshot
|
||||||
|
// was taken.
|
||||||
|
func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
|
||||||
|
|
||||||
|
// Min returns the minimum value in the sample at the time the snapshot was
|
||||||
|
// taken.
|
||||||
|
func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
|
||||||
|
|
||||||
|
// Percentile returns an arbitrary percentile of values in the sample at the
|
||||||
|
// time the snapshot was taken.
|
||||||
|
func (h *HistogramSnapshot) Percentile(p float64) float64 {
|
||||||
|
return h.sample.Percentile(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentiles returns a slice of arbitrary percentiles of values in the sample
|
||||||
|
// at the time the snapshot was taken.
|
||||||
|
func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
|
||||||
|
return h.sample.Percentiles(ps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sample returns the Sample underlying the histogram.
|
||||||
|
func (h *HistogramSnapshot) Sample() Sample { return h.sample }
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot.
|
||||||
|
func (h *HistogramSnapshot) Snapshot() Histogram { return h }
|
||||||
|
|
||||||
|
// StdDev returns the standard deviation of the values in the sample at the
|
||||||
|
// time the snapshot was taken.
|
||||||
|
func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
|
||||||
|
|
||||||
|
// Sum returns the sum in the sample at the time the snapshot was taken.
|
||||||
|
func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
|
||||||
|
|
||||||
|
// Update panics.
|
||||||
|
func (*HistogramSnapshot) Update(int64) {
|
||||||
|
panic("Update called on a HistogramSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Variance returns the variance of inputs at the time the snapshot was taken.
|
||||||
|
func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
|
||||||
|
|
||||||
|
// NilHistogram is a no-op Histogram.
|
||||||
|
type NilHistogram struct{}
|
||||||
|
|
||||||
|
// Clear is a no-op.
|
||||||
|
func (NilHistogram) Clear() {}
|
||||||
|
|
||||||
|
// Count is a no-op.
|
||||||
|
func (NilHistogram) Count() int64 { return 0 }
|
||||||
|
|
||||||
|
// Max is a no-op.
|
||||||
|
func (NilHistogram) Max() int64 { return 0 }
|
||||||
|
|
||||||
|
// Mean is a no-op.
|
||||||
|
func (NilHistogram) Mean() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Min is a no-op.
|
||||||
|
func (NilHistogram) Min() int64 { return 0 }
|
||||||
|
|
||||||
|
// Percentile is a no-op.
|
||||||
|
func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Percentiles is a no-op.
|
||||||
|
func (NilHistogram) Percentiles(ps []float64) []float64 {
|
||||||
|
return make([]float64, len(ps))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sample is a no-op.
|
||||||
|
func (NilHistogram) Sample() Sample { return NilSample{} }
|
||||||
|
|
||||||
|
// Snapshot is a no-op.
|
||||||
|
func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
|
||||||
|
|
||||||
|
// StdDev is a no-op.
|
||||||
|
func (NilHistogram) StdDev() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Sum is a no-op.
|
||||||
|
func (NilHistogram) Sum() int64 { return 0 }
|
||||||
|
|
||||||
|
// Update is a no-op.
|
||||||
|
func (NilHistogram) Update(v int64) {}
|
||||||
|
|
||||||
|
// Variance is a no-op.
|
||||||
|
func (NilHistogram) Variance() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// StandardHistogram is the standard implementation of a Histogram and uses a
|
||||||
|
// Sample to bound its memory use.
|
||||||
|
type StandardHistogram struct {
|
||||||
|
sample Sample
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear clears the histogram and its sample.
|
||||||
|
func (h *StandardHistogram) Clear() { h.sample.Clear() }
|
||||||
|
|
||||||
|
// Count returns the number of samples recorded since the histogram was last
|
||||||
|
// cleared.
|
||||||
|
func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
|
||||||
|
|
||||||
|
// Max returns the maximum value in the sample.
|
||||||
|
func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
|
||||||
|
|
||||||
|
// Mean returns the mean of the values in the sample.
|
||||||
|
func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
|
||||||
|
|
||||||
|
// Min returns the minimum value in the sample.
|
||||||
|
func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
|
||||||
|
|
||||||
|
// Percentile returns an arbitrary percentile of the values in the sample.
|
||||||
|
func (h *StandardHistogram) Percentile(p float64) float64 {
|
||||||
|
return h.sample.Percentile(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentiles returns a slice of arbitrary percentiles of the values in the
|
||||||
|
// sample.
|
||||||
|
func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
|
||||||
|
return h.sample.Percentiles(ps)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sample returns the Sample underlying the histogram.
|
||||||
|
func (h *StandardHistogram) Sample() Sample { return h.sample }
|
||||||
|
|
||||||
|
// Snapshot returns a read-only copy of the histogram.
|
||||||
|
func (h *StandardHistogram) Snapshot() Histogram {
|
||||||
|
return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StdDev returns the standard deviation of the values in the sample.
|
||||||
|
func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
|
||||||
|
|
||||||
|
// Sum returns the sum in the sample.
|
||||||
|
func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
|
||||||
|
|
||||||
|
// Update samples a new value.
|
||||||
|
func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
|
||||||
|
|
||||||
|
// Variance returns the variance of the values in the sample.
|
||||||
|
func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
|
|
@ -0,0 +1,87 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalJSON returns a byte slice containing a JSON representation of all
|
||||||
|
// the metrics in the Registry.
|
||||||
|
func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
|
||||||
|
data := make(map[string]map[string]interface{})
|
||||||
|
r.Each(func(name string, i interface{}) {
|
||||||
|
values := make(map[string]interface{})
|
||||||
|
switch metric := i.(type) {
|
||||||
|
case Counter:
|
||||||
|
values["count"] = metric.Count()
|
||||||
|
case Gauge:
|
||||||
|
values["value"] = metric.Value()
|
||||||
|
case GaugeFloat64:
|
||||||
|
values["value"] = metric.Value()
|
||||||
|
case Healthcheck:
|
||||||
|
values["error"] = nil
|
||||||
|
metric.Check()
|
||||||
|
if err := metric.Error(); nil != err {
|
||||||
|
values["error"] = metric.Error().Error()
|
||||||
|
}
|
||||||
|
case Histogram:
|
||||||
|
h := metric.Snapshot()
|
||||||
|
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
||||||
|
values["count"] = h.Count()
|
||||||
|
values["min"] = h.Min()
|
||||||
|
values["max"] = h.Max()
|
||||||
|
values["mean"] = h.Mean()
|
||||||
|
values["stddev"] = h.StdDev()
|
||||||
|
values["median"] = ps[0]
|
||||||
|
values["75%"] = ps[1]
|
||||||
|
values["95%"] = ps[2]
|
||||||
|
values["99%"] = ps[3]
|
||||||
|
values["99.9%"] = ps[4]
|
||||||
|
case Meter:
|
||||||
|
m := metric.Snapshot()
|
||||||
|
values["count"] = m.Count()
|
||||||
|
values["1m.rate"] = m.Rate1()
|
||||||
|
values["5m.rate"] = m.Rate5()
|
||||||
|
values["15m.rate"] = m.Rate15()
|
||||||
|
values["mean.rate"] = m.RateMean()
|
||||||
|
case Timer:
|
||||||
|
t := metric.Snapshot()
|
||||||
|
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
||||||
|
values["count"] = t.Count()
|
||||||
|
values["min"] = t.Min()
|
||||||
|
values["max"] = t.Max()
|
||||||
|
values["mean"] = t.Mean()
|
||||||
|
values["stddev"] = t.StdDev()
|
||||||
|
values["median"] = ps[0]
|
||||||
|
values["75%"] = ps[1]
|
||||||
|
values["95%"] = ps[2]
|
||||||
|
values["99%"] = ps[3]
|
||||||
|
values["99.9%"] = ps[4]
|
||||||
|
values["1m.rate"] = t.Rate1()
|
||||||
|
values["5m.rate"] = t.Rate5()
|
||||||
|
values["15m.rate"] = t.Rate15()
|
||||||
|
values["mean.rate"] = t.RateMean()
|
||||||
|
}
|
||||||
|
data[name] = values
|
||||||
|
})
|
||||||
|
return json.Marshal(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteJSON writes metrics from the given registry periodically to the
|
||||||
|
// specified io.Writer as JSON.
|
||||||
|
func WriteJSON(r Registry, d time.Duration, w io.Writer) {
|
||||||
|
for _ = range time.Tick(d) {
|
||||||
|
WriteJSONOnce(r, w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteJSONOnce writes metrics from the given registry to the specified
|
||||||
|
// io.Writer as JSON.
|
||||||
|
func WriteJSONOnce(r Registry, w io.Writer) {
|
||||||
|
json.NewEncoder(w).Encode(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(p.underlying)
|
||||||
|
}
|
|
@ -0,0 +1,80 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Logger interface {
|
||||||
|
Printf(format string, v ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Log(r Registry, freq time.Duration, l Logger) {
|
||||||
|
LogScaled(r, freq, time.Nanosecond, l)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output each metric in the given registry periodically using the given
|
||||||
|
// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
|
||||||
|
func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
|
||||||
|
du := float64(scale)
|
||||||
|
duSuffix := scale.String()[1:]
|
||||||
|
|
||||||
|
for _ = range time.Tick(freq) {
|
||||||
|
r.Each(func(name string, i interface{}) {
|
||||||
|
switch metric := i.(type) {
|
||||||
|
case Counter:
|
||||||
|
l.Printf("counter %s\n", name)
|
||||||
|
l.Printf(" count: %9d\n", metric.Count())
|
||||||
|
case Gauge:
|
||||||
|
l.Printf("gauge %s\n", name)
|
||||||
|
l.Printf(" value: %9d\n", metric.Value())
|
||||||
|
case GaugeFloat64:
|
||||||
|
l.Printf("gauge %s\n", name)
|
||||||
|
l.Printf(" value: %f\n", metric.Value())
|
||||||
|
case Healthcheck:
|
||||||
|
metric.Check()
|
||||||
|
l.Printf("healthcheck %s\n", name)
|
||||||
|
l.Printf(" error: %v\n", metric.Error())
|
||||||
|
case Histogram:
|
||||||
|
h := metric.Snapshot()
|
||||||
|
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
||||||
|
l.Printf("histogram %s\n", name)
|
||||||
|
l.Printf(" count: %9d\n", h.Count())
|
||||||
|
l.Printf(" min: %9d\n", h.Min())
|
||||||
|
l.Printf(" max: %9d\n", h.Max())
|
||||||
|
l.Printf(" mean: %12.2f\n", h.Mean())
|
||||||
|
l.Printf(" stddev: %12.2f\n", h.StdDev())
|
||||||
|
l.Printf(" median: %12.2f\n", ps[0])
|
||||||
|
l.Printf(" 75%%: %12.2f\n", ps[1])
|
||||||
|
l.Printf(" 95%%: %12.2f\n", ps[2])
|
||||||
|
l.Printf(" 99%%: %12.2f\n", ps[3])
|
||||||
|
l.Printf(" 99.9%%: %12.2f\n", ps[4])
|
||||||
|
case Meter:
|
||||||
|
m := metric.Snapshot()
|
||||||
|
l.Printf("meter %s\n", name)
|
||||||
|
l.Printf(" count: %9d\n", m.Count())
|
||||||
|
l.Printf(" 1-min rate: %12.2f\n", m.Rate1())
|
||||||
|
l.Printf(" 5-min rate: %12.2f\n", m.Rate5())
|
||||||
|
l.Printf(" 15-min rate: %12.2f\n", m.Rate15())
|
||||||
|
l.Printf(" mean rate: %12.2f\n", m.RateMean())
|
||||||
|
case Timer:
|
||||||
|
t := metric.Snapshot()
|
||||||
|
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
||||||
|
l.Printf("timer %s\n", name)
|
||||||
|
l.Printf(" count: %9d\n", t.Count())
|
||||||
|
l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix)
|
||||||
|
l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix)
|
||||||
|
l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix)
|
||||||
|
l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix)
|
||||||
|
l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix)
|
||||||
|
l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix)
|
||||||
|
l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix)
|
||||||
|
l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix)
|
||||||
|
l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix)
|
||||||
|
l.Printf(" 1-min rate: %12.2f\n", t.Rate1())
|
||||||
|
l.Printf(" 5-min rate: %12.2f\n", t.Rate5())
|
||||||
|
l.Printf(" 15-min rate: %12.2f\n", t.Rate15())
|
||||||
|
l.Printf(" mean rate: %12.2f\n", t.RateMean())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,285 @@
|
||||||
|
Memory usage
|
||||||
|
============
|
||||||
|
|
||||||
|
(Highly unscientific.)
|
||||||
|
|
||||||
|
Command used to gather static memory usage:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
|
||||||
|
```
|
||||||
|
|
||||||
|
Program used to gather baseline memory usage:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
time.Sleep(600e9)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Baseline
|
||||||
|
--------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 42604 kB
|
||||||
|
VmSize: 42604 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 1120 kB
|
||||||
|
VmRSS: 1120 kB
|
||||||
|
VmData: 35460 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1020 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 36 kB
|
||||||
|
VmSwap: 0 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
Program used to gather metric memory usage (with other metrics being similar):
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"metrics"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
fmt.Sprintf("foo")
|
||||||
|
metrics.NewRegistry()
|
||||||
|
time.Sleep(600e9)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
1000 counters registered
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 44016 kB
|
||||||
|
VmSize: 44016 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 1928 kB
|
||||||
|
VmRSS: 1928 kB
|
||||||
|
VmData: 36868 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1024 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 40 kB
|
||||||
|
VmSwap: 0 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
**1.412 kB virtual, TODO 0.808 kB resident per counter.**
|
||||||
|
|
||||||
|
100000 counters registered
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 55024 kB
|
||||||
|
VmSize: 55024 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 12440 kB
|
||||||
|
VmRSS: 12440 kB
|
||||||
|
VmData: 47876 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1024 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 64 kB
|
||||||
|
VmSwap: 0 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
**0.1242 kB virtual, 0.1132 kB resident per counter.**
|
||||||
|
|
||||||
|
1000 gauges registered
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 44012 kB
|
||||||
|
VmSize: 44012 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 1928 kB
|
||||||
|
VmRSS: 1928 kB
|
||||||
|
VmData: 36868 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1020 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 40 kB
|
||||||
|
VmSwap: 0 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
**1.408 kB virtual, 0.808 kB resident per counter.**
|
||||||
|
|
||||||
|
100000 gauges registered
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 55020 kB
|
||||||
|
VmSize: 55020 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 12432 kB
|
||||||
|
VmRSS: 12432 kB
|
||||||
|
VmData: 47876 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1020 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 60 kB
|
||||||
|
VmSwap: 0 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
**0.12416 kB virtual, 0.11312 resident per gauge.**
|
||||||
|
|
||||||
|
1000 histograms with a uniform sample size of 1028
|
||||||
|
--------------------------------------------------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 72272 kB
|
||||||
|
VmSize: 72272 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 16204 kB
|
||||||
|
VmRSS: 16204 kB
|
||||||
|
VmData: 65100 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1048 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 80 kB
|
||||||
|
VmSwap: 0 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
**29.668 kB virtual, TODO 15.084 resident per histogram.**
|
||||||
|
|
||||||
|
10000 histograms with a uniform sample size of 1028
|
||||||
|
---------------------------------------------------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 256912 kB
|
||||||
|
VmSize: 256912 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 146204 kB
|
||||||
|
VmRSS: 146204 kB
|
||||||
|
VmData: 249740 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1048 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 448 kB
|
||||||
|
VmSwap: 0 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
**21.4308 kB virtual, 14.5084 kB resident per histogram.**
|
||||||
|
|
||||||
|
50000 histograms with a uniform sample size of 1028
|
||||||
|
---------------------------------------------------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 908112 kB
|
||||||
|
VmSize: 908112 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 645832 kB
|
||||||
|
VmRSS: 645588 kB
|
||||||
|
VmData: 900940 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1048 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 1716 kB
|
||||||
|
VmSwap: 1544 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
**17.31016 kB virtual, 12.88936 kB resident per histogram.**
|
||||||
|
|
||||||
|
1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
|
||||||
|
-------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 62480 kB
|
||||||
|
VmSize: 62480 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 11572 kB
|
||||||
|
VmRSS: 11572 kB
|
||||||
|
VmData: 55308 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1048 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 64 kB
|
||||||
|
VmSwap: 0 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
**19.876 kB virtual, 10.452 kB resident per histogram.**
|
||||||
|
|
||||||
|
10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
|
||||||
|
--------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 153296 kB
|
||||||
|
VmSize: 153296 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 101176 kB
|
||||||
|
VmRSS: 101176 kB
|
||||||
|
VmData: 146124 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1048 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 240 kB
|
||||||
|
VmSwap: 0 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
**11.0692 kB virtual, 10.0056 kB resident per histogram.**
|
||||||
|
|
||||||
|
50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
|
||||||
|
--------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 557264 kB
|
||||||
|
VmSize: 557264 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 501056 kB
|
||||||
|
VmRSS: 501056 kB
|
||||||
|
VmData: 550092 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1048 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 1032 kB
|
||||||
|
VmSwap: 0 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
**10.2932 kB virtual, 9.99872 kB resident per histogram.**
|
||||||
|
|
||||||
|
1000 meters
|
||||||
|
-----------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 74504 kB
|
||||||
|
VmSize: 74504 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 24124 kB
|
||||||
|
VmRSS: 24124 kB
|
||||||
|
VmData: 67340 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1040 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 92 kB
|
||||||
|
VmSwap: 0 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
**31.9 kB virtual, 23.004 kB resident per meter.**
|
||||||
|
|
||||||
|
10000 meters
|
||||||
|
------------
|
||||||
|
|
||||||
|
```
|
||||||
|
VmPeak: 278920 kB
|
||||||
|
VmSize: 278920 kB
|
||||||
|
VmLck: 0 kB
|
||||||
|
VmHWM: 227300 kB
|
||||||
|
VmRSS: 227300 kB
|
||||||
|
VmData: 271756 kB
|
||||||
|
VmStk: 136 kB
|
||||||
|
VmExe: 1040 kB
|
||||||
|
VmLib: 1848 kB
|
||||||
|
VmPTE: 488 kB
|
||||||
|
VmSwap: 0 kB
|
||||||
|
```
|
||||||
|
|
||||||
|
**23.6316 kB virtual, 22.618 kB resident per meter.**
|
|
@ -0,0 +1,233 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Meters count events to produce exponentially-weighted moving average rates
|
||||||
|
// at one-, five-, and fifteen-minutes and a mean rate.
|
||||||
|
type Meter interface {
|
||||||
|
Count() int64
|
||||||
|
Mark(int64)
|
||||||
|
Rate1() float64
|
||||||
|
Rate5() float64
|
||||||
|
Rate15() float64
|
||||||
|
RateMean() float64
|
||||||
|
Snapshot() Meter
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOrRegisterMeter returns an existing Meter or constructs and registers a
|
||||||
|
// new StandardMeter.
|
||||||
|
func GetOrRegisterMeter(name string, r Registry) Meter {
|
||||||
|
if nil == r {
|
||||||
|
r = DefaultRegistry
|
||||||
|
}
|
||||||
|
return r.GetOrRegister(name, NewMeter).(Meter)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMeter constructs a new StandardMeter and launches a goroutine.
|
||||||
|
func NewMeter() Meter {
|
||||||
|
if UseNilMetrics {
|
||||||
|
return NilMeter{}
|
||||||
|
}
|
||||||
|
m := newStandardMeter()
|
||||||
|
arbiter.Lock()
|
||||||
|
defer arbiter.Unlock()
|
||||||
|
arbiter.meters = append(arbiter.meters, m)
|
||||||
|
if !arbiter.started {
|
||||||
|
arbiter.started = true
|
||||||
|
go arbiter.tick()
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMeter constructs and registers a new StandardMeter and launches a
|
||||||
|
// goroutine.
|
||||||
|
func NewRegisteredMeter(name string, r Registry) Meter {
|
||||||
|
c := NewMeter()
|
||||||
|
if nil == r {
|
||||||
|
r = DefaultRegistry
|
||||||
|
}
|
||||||
|
r.Register(name, c)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// MeterSnapshot is a read-only copy of another Meter.
|
||||||
|
type MeterSnapshot struct {
|
||||||
|
count int64
|
||||||
|
rate1, rate5, rate15, rateMean float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of events at the time the snapshot was taken.
|
||||||
|
func (m *MeterSnapshot) Count() int64 { return m.count }
|
||||||
|
|
||||||
|
// Mark panics.
|
||||||
|
func (*MeterSnapshot) Mark(n int64) {
|
||||||
|
panic("Mark called on a MeterSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate1 returns the one-minute moving average rate of events per second at the
|
||||||
|
// time the snapshot was taken.
|
||||||
|
func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
|
||||||
|
|
||||||
|
// Rate5 returns the five-minute moving average rate of events per second at
|
||||||
|
// the time the snapshot was taken.
|
||||||
|
func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
|
||||||
|
|
||||||
|
// Rate15 returns the fifteen-minute moving average rate of events per second
|
||||||
|
// at the time the snapshot was taken.
|
||||||
|
func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
|
||||||
|
|
||||||
|
// RateMean returns the meter's mean rate of events per second at the time the
|
||||||
|
// snapshot was taken.
|
||||||
|
func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot.
|
||||||
|
func (m *MeterSnapshot) Snapshot() Meter { return m }
|
||||||
|
|
||||||
|
// NilMeter is a no-op Meter.
|
||||||
|
type NilMeter struct{}
|
||||||
|
|
||||||
|
// Count is a no-op.
|
||||||
|
func (NilMeter) Count() int64 { return 0 }
|
||||||
|
|
||||||
|
// Mark is a no-op.
|
||||||
|
func (NilMeter) Mark(n int64) {}
|
||||||
|
|
||||||
|
// Rate1 is a no-op.
|
||||||
|
func (NilMeter) Rate1() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Rate5 is a no-op.
|
||||||
|
func (NilMeter) Rate5() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Rate15is a no-op.
|
||||||
|
func (NilMeter) Rate15() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// RateMean is a no-op.
|
||||||
|
func (NilMeter) RateMean() float64 { return 0.0 }
|
||||||
|
|
||||||
|
// Snapshot is a no-op.
|
||||||
|
func (NilMeter) Snapshot() Meter { return NilMeter{} }
|
||||||
|
|
||||||
|
// StandardMeter is the standard implementation of a Meter.
|
||||||
|
type StandardMeter struct {
|
||||||
|
lock sync.RWMutex
|
||||||
|
snapshot *MeterSnapshot
|
||||||
|
a1, a5, a15 EWMA
|
||||||
|
startTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStandardMeter() *StandardMeter {
|
||||||
|
return &StandardMeter{
|
||||||
|
snapshot: &MeterSnapshot{},
|
||||||
|
a1: NewEWMA1(),
|
||||||
|
a5: NewEWMA5(),
|
||||||
|
a15: NewEWMA15(),
|
||||||
|
startTime: time.Now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the number of events recorded.
|
||||||
|
func (m *StandardMeter) Count() int64 {
|
||||||
|
m.lock.RLock()
|
||||||
|
count := m.snapshot.count
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark records the occurance of n events.
|
||||||
|
func (m *StandardMeter) Mark(n int64) {
|
||||||
|
m.lock.Lock()
|
||||||
|
defer m.lock.Unlock()
|
||||||
|
m.snapshot.count += n
|
||||||
|
m.a1.Update(n)
|
||||||
|
m.a5.Update(n)
|
||||||
|
m.a15.Update(n)
|
||||||
|
m.updateSnapshot()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate1 returns the one-minute moving average rate of events per second.
|
||||||
|
func (m *StandardMeter) Rate1() float64 {
|
||||||
|
m.lock.RLock()
|
||||||
|
rate1 := m.snapshot.rate1
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return rate1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate5 returns the five-minute moving average rate of events per second.
|
||||||
|
func (m *StandardMeter) Rate5() float64 {
|
||||||
|
m.lock.RLock()
|
||||||
|
rate5 := m.snapshot.rate5
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return rate5
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rate15 returns the fifteen-minute moving average rate of events per second.
|
||||||
|
func (m *StandardMeter) Rate15() float64 {
|
||||||
|
m.lock.RLock()
|
||||||
|
rate15 := m.snapshot.rate15
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return rate15
|
||||||
|
}
|
||||||
|
|
||||||
|
// RateMean returns the meter's mean rate of events per second.
|
||||||
|
func (m *StandardMeter) RateMean() float64 {
|
||||||
|
m.lock.RLock()
|
||||||
|
rateMean := m.snapshot.rateMean
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return rateMean
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a read-only copy of the meter.
|
||||||
|
func (m *StandardMeter) Snapshot() Meter {
|
||||||
|
m.lock.RLock()
|
||||||
|
snapshot := *m.snapshot
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return &snapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StandardMeter) updateSnapshot() {
|
||||||
|
// should run with write lock held on m.lock
|
||||||
|
snapshot := m.snapshot
|
||||||
|
snapshot.rate1 = m.a1.Rate()
|
||||||
|
snapshot.rate5 = m.a5.Rate()
|
||||||
|
snapshot.rate15 = m.a15.Rate()
|
||||||
|
snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StandardMeter) tick() {
|
||||||
|
m.lock.Lock()
|
||||||
|
defer m.lock.Unlock()
|
||||||
|
m.a1.Tick()
|
||||||
|
m.a5.Tick()
|
||||||
|
m.a15.Tick()
|
||||||
|
m.updateSnapshot()
|
||||||
|
}
|
||||||
|
|
||||||
|
type meterArbiter struct {
|
||||||
|
sync.RWMutex
|
||||||
|
started bool
|
||||||
|
meters []*StandardMeter
|
||||||
|
ticker *time.Ticker
|
||||||
|
}
|
||||||
|
|
||||||
|
var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
|
||||||
|
|
||||||
|
// Ticks meters on the scheduled interval
|
||||||
|
func (ma *meterArbiter) tick() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ma.ticker.C:
|
||||||
|
ma.tickMeters()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ma *meterArbiter) tickMeters() {
|
||||||
|
ma.RLock()
|
||||||
|
defer ma.RUnlock()
|
||||||
|
for _, meter := range ma.meters {
|
||||||
|
meter.tick()
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
// Go port of Coda Hale's Metrics library
|
||||||
|
//
|
||||||
|
// <https://github.com/rcrowley/go-metrics>
|
||||||
|
//
|
||||||
|
// Coda Hale's original work: <https://github.com/codahale/metrics>
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
// UseNilMetrics is checked by the constructor functions for all of the
|
||||||
|
// standard metrics. If it is true, the metric returned is a stub.
|
||||||
|
//
|
||||||
|
// This global kill-switch helps quantify the observer effect and makes
|
||||||
|
// for less cluttered pprof profiles.
|
||||||
|
var UseNilMetrics bool = false
|
|
@ -0,0 +1,119 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var shortHostName string = ""
|
||||||
|
|
||||||
|
// OpenTSDBConfig provides a container with configuration parameters for
|
||||||
|
// the OpenTSDB exporter
|
||||||
|
type OpenTSDBConfig struct {
|
||||||
|
Addr *net.TCPAddr // Network address to connect to
|
||||||
|
Registry Registry // Registry to be exported
|
||||||
|
FlushInterval time.Duration // Flush interval
|
||||||
|
DurationUnit time.Duration // Time conversion unit for durations
|
||||||
|
Prefix string // Prefix to be prepended to metric names
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenTSDB is a blocking exporter function which reports metrics in r
|
||||||
|
// to a TSDB server located at addr, flushing them every d duration
|
||||||
|
// and prepending metric names with prefix.
|
||||||
|
func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
|
||||||
|
OpenTSDBWithConfig(OpenTSDBConfig{
|
||||||
|
Addr: addr,
|
||||||
|
Registry: r,
|
||||||
|
FlushInterval: d,
|
||||||
|
DurationUnit: time.Nanosecond,
|
||||||
|
Prefix: prefix,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
|
||||||
|
// but it takes a OpenTSDBConfig instead.
|
||||||
|
func OpenTSDBWithConfig(c OpenTSDBConfig) {
|
||||||
|
for _ = range time.Tick(c.FlushInterval) {
|
||||||
|
if err := openTSDB(&c); nil != err {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getShortHostname() string {
|
||||||
|
if shortHostName == "" {
|
||||||
|
host, _ := os.Hostname()
|
||||||
|
if index := strings.Index(host, "."); index > 0 {
|
||||||
|
shortHostName = host[:index]
|
||||||
|
} else {
|
||||||
|
shortHostName = host
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return shortHostName
|
||||||
|
}
|
||||||
|
|
||||||
|
func openTSDB(c *OpenTSDBConfig) error {
|
||||||
|
shortHostname := getShortHostname()
|
||||||
|
now := time.Now().Unix()
|
||||||
|
du := float64(c.DurationUnit)
|
||||||
|
conn, err := net.DialTCP("tcp", nil, c.Addr)
|
||||||
|
if nil != err {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
w := bufio.NewWriter(conn)
|
||||||
|
c.Registry.Each(func(name string, i interface{}) {
|
||||||
|
switch metric := i.(type) {
|
||||||
|
case Counter:
|
||||||
|
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
|
||||||
|
case Gauge:
|
||||||
|
fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
|
||||||
|
case GaugeFloat64:
|
||||||
|
fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
|
||||||
|
case Histogram:
|
||||||
|
h := metric.Snapshot()
|
||||||
|
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
||||||
|
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
|
||||||
|
case Meter:
|
||||||
|
m := metric.Snapshot()
|
||||||
|
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
|
||||||
|
case Timer:
|
||||||
|
t := metric.Snapshot()
|
||||||
|
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
|
||||||
|
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
|
||||||
|
fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
|
||||||
|
}
|
||||||
|
w.Flush()
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
0
vendor/github.com/rcrowley/go-metrics/registry.go
generated
vendored
Normal file → Executable file
0
vendor/github.com/rcrowley/go-metrics/registry.go
generated
vendored
Normal file → Executable file
|
@ -0,0 +1,212 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"runtime/pprof"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
memStats runtime.MemStats
|
||||||
|
runtimeMetrics struct {
|
||||||
|
MemStats struct {
|
||||||
|
Alloc Gauge
|
||||||
|
BuckHashSys Gauge
|
||||||
|
DebugGC Gauge
|
||||||
|
EnableGC Gauge
|
||||||
|
Frees Gauge
|
||||||
|
HeapAlloc Gauge
|
||||||
|
HeapIdle Gauge
|
||||||
|
HeapInuse Gauge
|
||||||
|
HeapObjects Gauge
|
||||||
|
HeapReleased Gauge
|
||||||
|
HeapSys Gauge
|
||||||
|
LastGC Gauge
|
||||||
|
Lookups Gauge
|
||||||
|
Mallocs Gauge
|
||||||
|
MCacheInuse Gauge
|
||||||
|
MCacheSys Gauge
|
||||||
|
MSpanInuse Gauge
|
||||||
|
MSpanSys Gauge
|
||||||
|
NextGC Gauge
|
||||||
|
NumGC Gauge
|
||||||
|
GCCPUFraction GaugeFloat64
|
||||||
|
PauseNs Histogram
|
||||||
|
PauseTotalNs Gauge
|
||||||
|
StackInuse Gauge
|
||||||
|
StackSys Gauge
|
||||||
|
Sys Gauge
|
||||||
|
TotalAlloc Gauge
|
||||||
|
}
|
||||||
|
NumCgoCall Gauge
|
||||||
|
NumGoroutine Gauge
|
||||||
|
NumThread Gauge
|
||||||
|
ReadMemStats Timer
|
||||||
|
}
|
||||||
|
frees uint64
|
||||||
|
lookups uint64
|
||||||
|
mallocs uint64
|
||||||
|
numGC uint32
|
||||||
|
numCgoCalls int64
|
||||||
|
|
||||||
|
threadCreateProfile = pprof.Lookup("threadcreate")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Capture new values for the Go runtime statistics exported in
|
||||||
|
// runtime.MemStats. This is designed to be called as a goroutine.
|
||||||
|
func CaptureRuntimeMemStats(r Registry, d time.Duration) {
|
||||||
|
for _ = range time.Tick(d) {
|
||||||
|
CaptureRuntimeMemStatsOnce(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capture new values for the Go runtime statistics exported in
|
||||||
|
// runtime.MemStats. This is designed to be called in a background
|
||||||
|
// goroutine. Giving a registry which has not been given to
|
||||||
|
// RegisterRuntimeMemStats will panic.
|
||||||
|
//
|
||||||
|
// Be very careful with this because runtime.ReadMemStats calls the C
|
||||||
|
// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
|
||||||
|
// and that last one does what it says on the tin.
|
||||||
|
func CaptureRuntimeMemStatsOnce(r Registry) {
|
||||||
|
t := time.Now()
|
||||||
|
runtime.ReadMemStats(&memStats) // This takes 50-200us.
|
||||||
|
runtimeMetrics.ReadMemStats.UpdateSince(t)
|
||||||
|
|
||||||
|
runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
|
||||||
|
runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
|
||||||
|
if memStats.DebugGC {
|
||||||
|
runtimeMetrics.MemStats.DebugGC.Update(1)
|
||||||
|
} else {
|
||||||
|
runtimeMetrics.MemStats.DebugGC.Update(0)
|
||||||
|
}
|
||||||
|
if memStats.EnableGC {
|
||||||
|
runtimeMetrics.MemStats.EnableGC.Update(1)
|
||||||
|
} else {
|
||||||
|
runtimeMetrics.MemStats.EnableGC.Update(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
|
||||||
|
runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
|
||||||
|
runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
|
||||||
|
runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
|
||||||
|
runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
|
||||||
|
runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
|
||||||
|
runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
|
||||||
|
runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
|
||||||
|
runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
|
||||||
|
runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
|
||||||
|
runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
|
||||||
|
runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
|
||||||
|
runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
|
||||||
|
runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
|
||||||
|
runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
|
||||||
|
runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
|
||||||
|
runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
|
||||||
|
|
||||||
|
// <https://code.google.com/p/go/source/browse/src/pkg/runtime/mgc0.c>
|
||||||
|
i := numGC % uint32(len(memStats.PauseNs))
|
||||||
|
ii := memStats.NumGC % uint32(len(memStats.PauseNs))
|
||||||
|
if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
|
||||||
|
for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
|
||||||
|
runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if i > ii {
|
||||||
|
for ; i < uint32(len(memStats.PauseNs)); i++ {
|
||||||
|
runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
|
||||||
|
}
|
||||||
|
i = 0
|
||||||
|
}
|
||||||
|
for ; i < ii; i++ {
|
||||||
|
runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frees = memStats.Frees
|
||||||
|
lookups = memStats.Lookups
|
||||||
|
mallocs = memStats.Mallocs
|
||||||
|
numGC = memStats.NumGC
|
||||||
|
|
||||||
|
runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
|
||||||
|
runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
|
||||||
|
runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
|
||||||
|
runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
|
||||||
|
runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
|
||||||
|
|
||||||
|
currentNumCgoCalls := numCgoCall()
|
||||||
|
runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
|
||||||
|
numCgoCalls = currentNumCgoCalls
|
||||||
|
|
||||||
|
runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
|
||||||
|
|
||||||
|
runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register runtimeMetrics for the Go runtime statistics exported in runtime and
|
||||||
|
// specifically runtime.MemStats. The runtimeMetrics are named by their
|
||||||
|
// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
|
||||||
|
func RegisterRuntimeMemStats(r Registry) {
|
||||||
|
runtimeMetrics.MemStats.Alloc = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.BuckHashSys = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.DebugGC = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.EnableGC = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.Frees = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.HeapAlloc = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.HeapIdle = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.HeapInuse = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.HeapObjects = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.HeapReleased = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.HeapSys = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.LastGC = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.Lookups = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.Mallocs = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.MCacheInuse = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.MCacheSys = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.MSpanInuse = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.MSpanSys = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.NextGC = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.NumGC = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
|
||||||
|
runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
|
||||||
|
runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.StackInuse = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.StackSys = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.Sys = NewGauge()
|
||||||
|
runtimeMetrics.MemStats.TotalAlloc = NewGauge()
|
||||||
|
runtimeMetrics.NumCgoCall = NewGauge()
|
||||||
|
runtimeMetrics.NumGoroutine = NewGauge()
|
||||||
|
runtimeMetrics.NumThread = NewGauge()
|
||||||
|
runtimeMetrics.ReadMemStats = NewTimer()
|
||||||
|
|
||||||
|
r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
|
||||||
|
r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
|
||||||
|
r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
|
||||||
|
r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
|
||||||
|
r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
|
||||||
|
r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
|
||||||
|
r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
|
||||||
|
r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
|
||||||
|
r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
|
||||||
|
r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
|
||||||
|
r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
|
||||||
|
r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
|
||||||
|
r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
|
||||||
|
r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
|
||||||
|
r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
|
||||||
|
r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
|
||||||
|
r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
|
||||||
|
r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
|
||||||
|
r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
|
||||||
|
r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
|
||||||
|
r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
|
||||||
|
r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
|
||||||
|
r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
|
||||||
|
r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
|
||||||
|
r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
|
||||||
|
r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
|
||||||
|
r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
|
||||||
|
r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
|
||||||
|
r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
|
||||||
|
r.Register("runtime.NumThread", runtimeMetrics.NumThread)
|
||||||
|
r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
|
||||||
|
}
|
|
@ -0,0 +1,10 @@
|
||||||
|
// +build cgo
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import "runtime"
|
||||||
|
|
||||||
|
func numCgoCall() int64 {
|
||||||
|
return runtime.NumCgoCall()
|
||||||
|
}
|
|
@ -0,0 +1,9 @@
|
||||||
|
// +build go1.5
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import "runtime"
|
||||||
|
|
||||||
|
func gcCPUFraction(memStats *runtime.MemStats) float64 {
|
||||||
|
return memStats.GCCPUFraction
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
// +build !cgo appengine
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
func numCgoCall() int64 {
|
||||||
|
return 0
|
||||||
|
}
|
9
vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
generated
vendored
Executable file
9
vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
generated
vendored
Executable file
|
@ -0,0 +1,9 @@
|
||||||
|
// +build !go1.5
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import "runtime"
|
||||||
|
|
||||||
|
func gcCPUFraction(memStats *runtime.MemStats) float64 {
|
||||||
|
return 0
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue