commit b9e34e3b15a00aea7daecbe8755af97b1d938744 Author: StephenButtolph Date: Tue Mar 10 15:20:34 2020 -0400 init repo diff --git a/.ci/after_success.sh b/.ci/after_success.sh new file mode 100755 index 0000000..9dadabf --- /dev/null +++ b/.ci/after_success.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -ev + +bash <(curl -s https://codecov.io/bash) + +docker tag $DOCKERHUB_REPO:$COMMIT $DOCKERHUB_REPO:travis-$TRAVIS_BUILD_NUMBER + +if [ "${TRAVIS_EVENT_TYPE}" == "push" ] && [ "${TRAVIS_BRANCH}" == "platform" ]; then + docker tag $DOCKERHUB_REPO:$COMMIT $DOCKERHUB_REPO:$TRAVIS_BRANCH +fi + +echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin +docker push $DOCKERHUB_REPO diff --git a/.ci/before_install.sh b/.ci/before_install.sh new file mode 100755 index 0000000..1d7d05d --- /dev/null +++ b/.ci/before_install.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -ev + +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +sudo apt-get update +sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce +# hack to address problem with using DOCKER_BUILDKIT=1, inspired by: +# * https://github.com/rootless-containers/usernetes/blob/master/.travis.yml +# +# links discussing the issue: +# * https://github.com/moby/buildkit/issues/606#issuecomment-453959632 +# * https://travis-ci.community/t/docker-builds-are-broken-if-buildkit-is-used-docker-buildkit-1/2994 +# * https://github.com/moby/moby/issues/39120 +sudo docker --version +sudo cat /etc/docker/daemon.json +sudo rm -f /etc/docker/daemon.json +sudo systemctl restart docker diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 0000000..7eb5335 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,18 @@ +codecov: + branch: platform + +coverage: + range: 60..100 + round: down + precision: 5 + status: + project: + default: + threshold: 5% + patch: + default: + threshold: 50% + +comment: + layout: "header, diff, changes, sunburst, uncovered" + behavior: default diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..96ae9ff --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,26 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: bug +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior. + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Operating System** +Which OS you used to reveal the bug. + +**Additional context** +Add any other context about the problem here. diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b2daa0e --- /dev/null +++ b/.gitignore @@ -0,0 +1,45 @@ +*.log +*~ +.DS_Store + +awscpu + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.profile + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +*logs/ + +.vscode* + +*.pb* +*.ava + +db* +*cpu[0-9]* +*mem[0-9]* +*lock[0-9]* +*.profile +*.swp +*.aux +*.fdb* +*.fls +*.gz +*.pdf + +.coverage + +bin/ +build/ + +*/mykey/staker.* \ No newline at end of file diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..e1b4760 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,16 @@ +dist: bionic +language: go +services: +- docker +env: + global: + - CODECOV_TOKEN="8c18c993-fc6e-4706-998b-01ddc7987804" + - GECKO_HOME=/go/src/github.com/ava-labs/gecko/ + - COMMIT=${TRAVIS_COMMIT::8} + - DOCKERHUB_REPO=avaplatform/gecko + - secure: "L/A9+re0NEKP6EV6H9RcTGiDhX3WMvsiWrkRKDYKqnviqbjY30RK6EM4vvjrM4Lrw2QwsO3YKgnku3+zioE/TxEZFkpkbjNUXru0nYBrWAg1TKVsDXnYaIZkHUejfryST3E8N7F4Hx6zCtGEO0sEdUeKuT+MNUIuHezHooTgGzDjMogm70EWMFjQHc7VucTJu7dWU1RBPjovWQ0q9qflrtCpbrvXFIiihQQ1PQha1Q2C4wLakKuLbhhSafue90Mnyss0blaPHy/tyewcASJu4vsGTKRBn0DzttlkNTwuD6+nKrbmJY0ohunnkVFzYjrZAw1gyN+DCDb/lPbz4ZDItKPwrIUPEtL5xuUOrxUZPUh+0io3Q2d6rjaqkdGjd1KQXzbnW1mn0BxX3d3b2UpIqhBn9umYYjHBKnMuoRiTK33b7U9+LF3K84+tEvVDCPeHs/mw6Inp5jGRSravnM6yPQ6feGzogs4+3EMzZXxnkngKFKCsnd67Oe9xfV9amOU2aQAx4jaAwlPjEpBEkUa8YKx3lPznvmUk1QsNCUbLjdSl5JBaXojLJoiuPbj29hp4S5AXXgn+3Hvwk3ndcFCxi6/l1W9mjYSOtFqg3EAUdF4EgnA/ykQg9ZokkoKY0+qgOzG2bKOAYuCDWeGr7P1apToh00ccsQXL81nVPiq7uDw=" + - secure: "zfTm7tJBYiPYrli76d4Ep6Lc2TJQ8Xv//+7OoqTA/aIf6YJDHe05f2GFTWAHG2iOIix/yjwHYwnhyIW66eWPb+Ujejnmh4eXlYZFufX9J5jUpDpbFu/+ybOLgE1Tmr0je0ycneSMe/NAaS74nWU1wnP34/cEE4sYL7TJyhwbeEtgz3cbSWwkpdvHFbXCjSOA196jdIYYUwsnqU9yycAG+2WUSk3DHHzzdtMrh/UOH2r1VFyp5US0zmbW90WkWX+o3TIlzZJgTUGQRNnWKq95Mrh1EQotxgL6CJ8NkfY4bVAGAhusPjdjscJsHxfY93WRMH64TzPYYp0zdibatH0ztyhnZPXVKqv+AIIVTEW+xWv5V18kTQAd1uBW103NFacbgXhIGWtbFcN9g1+ws29HROMclYs7ci6+72Qnq0eL55huqSyFx6+InhYwn+LfJmaBcGW4wx1umdp505M0obZ4ghlyn6b0pDYmqsu1XyBC3mjUTFbwlQmWE2Fize4L5o+DdH4ZDc9japF9ntxIMvO+b3nOicr7tplY2AGp61bB89o3dUAFlN5mDaEJotiAuFk5mo244rY1FjSzyGiKkA3M9TkTIbgcbN098hOJoMCYybH7yqiPwNnZiFvUuYjHuC5D1kIYBWuqqO0iVcbIZn0rV2jyzbVFlhFVk2clTZGhkrY=" +before_install: .ci/before_install.sh +install: DOCKER_BUILDKIT=1 docker build --progress plain --ssh default -t $DOCKERHUB_REPO:$COMMIT . +script: docker run --rm -v "$PWD:$GECKO_HOME" $DOCKERHUB_REPO:$COMMIT bash "$GECKO_HOME/scripts/build_test.sh" +after_success: .ci/after_success.sh diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..b24a23f --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +# syntax=docker/dockerfile:experimental + +FROM golang:1.13.4-buster + +RUN apt-get update && apt-get install -y libssl-dev libuv1-dev curl cmake + +RUN mkdir -p /go/src/github.com/ava-labs + +# Because downloading ethereum takes long it is done separately, so that the docker +# layer, when cached can be re-used +RUN go get -t -v github.com/ava-labs/go-ethereum + +WORKDIR $GOPATH/src/github.com/ava-labs/ +COPY . gecko + +WORKDIR $GOPATH/src/github.com/ava-labs/gecko +RUN ./scripts/build.sh diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..1d314a7 --- /dev/null +++ b/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2020, Ava Labs, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..c860d1a --- /dev/null +++ b/README.md @@ -0,0 +1,84 @@ +# gecko + +## Installation + +AVA is an incredibly lightweight protocol, so the minimum computer requirements are quite modest. + +- Hardware: 2 GHz or faster CPU, 3 GB RAM, 250 MB hard disk. +- OS: Ubuntu >= 18.04 or Mac OS X >= Catalina. +- Software: [Go](https://golang.org/doc/install) version >= 1.13.X and set up [`$GOPATH`](https://github.com/golang/go/wiki/SettingGOPATH). +- Network: IPv4 or IPv6 network connection, with an open public port. + +### Native Install + +Ubuntu users need the following libraries: + +* libssl-dev +* libuv1-dev +* cmake +* make +* curl +* g++ + +Install the libraries: + +```sh +sudo apt-get install libssl-dev libuv1-dev cmake make curl g++ +``` + +#### Downloading Gecko Source Code + +Clone the Gecko repository: + +```sh +cd $GOPATH +mkdir -p src/github.com/ava-labs +cd src/github.com/ava-labs +git clone https://github.com/ava-labs/gecko.git +cd gecko +``` + +#### Building the Gecko Executable + +Build Gecko using the build script: + +```sh +./scripts/build.sh +``` + +The Gecko binary, named `ava`, is in the `build` directory. + +### Docker Install + +- Make sure you have docker installed on your machine (so commands like `docker run` etc. are available). +- Build the docker image of latest gecko branch by `scripts/build_image.sh`. +- Check the built image by `docker image ls`, you should see some image tagged + `gecko-xxxxxxxx`, where `xxxxxxxx` is the commit id of the Gecko source it was built from. +- Test Gecko by `docker run -ti -p 9651:9651 gecko-xxxxxxxx /gecko/build/ava + --public-ip=127.0.0.1 --snow-sample-size=1 --snow-quorum-size=1 --staking-tls-enabled=false`. (For a production deployment, + you may want to extend the docker image with required credentials for + staking and TLS.) + +## Running Gecko and Creating a Local Test Network + +To create your own local test network, run: + +```sh +./build/ava --public-ip=127.0.0.1 --snow-sample-size=1 --snow-quorum-size=1 --staking-tls-enabled=false +``` + +This launches an AVA network with one node. + +You should see some pretty ASCII art and log messages. +You may see a few warnings. These are OK. + +You can use `Ctrl + C` to kill the node. + +If you want to specify your log level. You should set `--log-level` to one of the following values, in decreasing order of logging. +* `--log-level=verbo` +* `--log-level=debug` +* `--log-level=info` +* `--log-level=warn` +* `--log-level=error` +* `--log-level=fatal` +* `--log-level=off` diff --git a/api/admin/chain.go b/api/admin/chain.go new file mode 100644 index 0000000..16a0456 --- /dev/null +++ b/api/admin/chain.go @@ -0,0 +1,27 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package admin + +import ( + "net/http" + + "github.com/ava-labs/gecko/ids" +) + +// GetChainAliasesArgs are the arguments for Admin.GetChainAliases API call +type GetChainAliasesArgs struct{ ChainID string } + +// GetChainAliasesReply are the arguments for Admin.GetChainAliases API call +type GetChainAliasesReply struct{ Aliases []string } + +// GetChainAliases returns the aliases of the chain +// whose string representation is [args.ChainID] +func (service *Admin) GetChainAliases(r *http.Request, args *GetChainAliasesArgs, reply *GetChainAliasesReply) error { + ID, err := ids.FromString(args.ChainID) + if err != nil { + return err + } + reply.Aliases = service.chainManager.Aliases(ID) + return nil +} diff --git a/api/admin/networking.go b/api/admin/networking.go new file mode 100644 index 0000000..ef564e6 --- /dev/null +++ b/api/admin/networking.go @@ -0,0 +1,27 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package admin + +import ( + "sort" + + "github.com/ava-labs/gecko/utils" +) + +// Peerable can return a group of peers +type Peerable interface{ Peers() []utils.IPDesc } + +// Networking provides helper methods for tracking the current network state +type Networking struct{ peers Peerable } + +// Peers returns the current peers +func (n *Networking) Peers() ([]string, error) { + ipDescs := n.peers.Peers() + ips := make([]string, len(ipDescs)) + for i, ipDesc := range ipDescs { + ips[i] = ipDesc.String() + } + sort.Strings(ips) + return ips, nil +} diff --git a/api/admin/performance.go b/api/admin/performance.go new file mode 100644 index 0000000..bf2a460 --- /dev/null +++ b/api/admin/performance.go @@ -0,0 +1,81 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package admin + +import ( + "errors" + "os" + "runtime" + "runtime/pprof" +) + +var ( + errCPUProfilerRunning = errors.New("cpu profiler already running") + errCPUProfilerNotRunning = errors.New("cpu profiler doesn't exist") +) + +// Performance provides helper methods for measuring the current performance of +// the system +type Performance struct{ cpuProfileFile *os.File } + +// StartCPUProfiler starts measuring the cpu utilization of this node +func (p *Performance) StartCPUProfiler(filename string) error { + if p.cpuProfileFile != nil { + return errCPUProfilerRunning + } + + file, err := os.Create(filename) + if err != nil { + return err + } + if err := pprof.StartCPUProfile(file); err != nil { + file.Close() + return err + } + runtime.SetMutexProfileFraction(1) + + p.cpuProfileFile = file + return nil +} + +// StopCPUProfiler stops measuring the cpu utilization of this node +func (p *Performance) StopCPUProfiler() error { + if p.cpuProfileFile == nil { + return errCPUProfilerNotRunning + } + + pprof.StopCPUProfile() + err := p.cpuProfileFile.Close() + p.cpuProfileFile = nil + return err +} + +// MemoryProfile dumps the current memory utilization of this node +func (p *Performance) MemoryProfile(filename string) error { + file, err := os.Create(filename) + if err != nil { + return err + } + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(file); err != nil { + file.Close() + return err + } + return file.Close() +} + +// LockProfile dumps the current lock statistics of this node +func (p *Performance) LockProfile(filename string) error { + file, err := os.Create(filename) + if err != nil { + return err + } + + profile := pprof.Lookup("mutex") + if err := profile.WriteTo(file, 1); err != nil { + file.Close() + return err + } + return file.Close() +} diff --git a/api/admin/service.go b/api/admin/service.go new file mode 100644 index 0000000..c947d90 --- /dev/null +++ b/api/admin/service.go @@ -0,0 +1,209 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package admin + +import ( + "net/http" + + "github.com/gorilla/rpc/v2" + + "github.com/ava-labs/gecko/api" + "github.com/ava-labs/gecko/chains" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/logging" + + cjson "github.com/ava-labs/gecko/utils/json" +) + +// Admin is the API service for node admin management +type Admin struct { + networkID uint32 + log logging.Logger + networking Networking + performance Performance + chainManager chains.Manager + httpServer *api.Server +} + +// NewService returns a new admin API service +func NewService(networkID uint32, log logging.Logger, chainManager chains.Manager, peers Peerable, httpServer *api.Server) *common.HTTPHandler { + newServer := rpc.NewServer() + codec := cjson.NewCodec() + newServer.RegisterCodec(codec, "application/json") + newServer.RegisterCodec(codec, "application/json;charset=UTF-8") + newServer.RegisterService(&Admin{ + networkID: networkID, + log: log, + chainManager: chainManager, + networking: Networking{ + peers: peers, + }, + httpServer: httpServer, + }, "admin") + return &common.HTTPHandler{Handler: newServer} +} + +// GetNetworkIDArgs are the arguments for calling GetNetworkID +type GetNetworkIDArgs struct{} + +// GetNetworkIDReply are the results from calling GetNetworkID +type GetNetworkIDReply struct { + NetworkID cjson.Uint32 `json:"networkID"` +} + +// GetNetworkID returns the network ID this node is running on +func (service *Admin) GetNetworkID(r *http.Request, args *GetNetworkIDArgs, reply *GetNetworkIDReply) error { + service.log.Debug("Admin: GetNetworkID called") + + reply.NetworkID = cjson.Uint32(service.networkID) + return nil +} + +// GetBlockchainIDArgs are the arguments for calling GetBlockchainID +type GetBlockchainIDArgs struct { + Alias string `json:"alias"` +} + +// GetBlockchainIDReply are the results from calling GetBlockchainID +type GetBlockchainIDReply struct { + BlockchainID string `json:"blockchainID"` +} + +// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied +func (service *Admin) GetBlockchainID(r *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error { + service.log.Debug("Admin: GetBlockchainID called") + + bID, err := service.chainManager.Lookup(args.Alias) + reply.BlockchainID = bID.String() + return err +} + +// PeersArgs are the arguments for calling Peers +type PeersArgs struct{} + +// PeersReply are the results from calling Peers +type PeersReply struct { + Peers []string `json:"peers"` +} + +// Peers returns the list of current validators +func (service *Admin) Peers(r *http.Request, args *PeersArgs, reply *PeersReply) error { + service.log.Debug("Admin: Peers called") + + peers, err := service.networking.Peers() + reply.Peers = peers + return err +} + +// StartCPUProfilerArgs are the arguments for calling StartCPUProfiler +type StartCPUProfilerArgs struct { + Filename string `json:"filename"` +} + +// StartCPUProfilerReply are the results from calling StartCPUProfiler +type StartCPUProfilerReply struct { + Success bool `json:"success"` +} + +// StartCPUProfiler starts a cpu profile writing to the specified file +func (service *Admin) StartCPUProfiler(r *http.Request, args *StartCPUProfilerArgs, reply *StartCPUProfilerReply) error { + service.log.Debug("Admin: StartCPUProfiler called with %s", args.Filename) + reply.Success = true + return service.performance.StartCPUProfiler(args.Filename) +} + +// StopCPUProfilerArgs are the arguments for calling StopCPUProfiler +type StopCPUProfilerArgs struct{} + +// StopCPUProfilerReply are the results from calling StopCPUProfiler +type StopCPUProfilerReply struct { + Success bool `json:"success"` +} + +// StopCPUProfiler stops the cpu profile +func (service *Admin) StopCPUProfiler(r *http.Request, args *StopCPUProfilerArgs, reply *StopCPUProfilerReply) error { + service.log.Debug("Admin: StopCPUProfiler called") + reply.Success = true + return service.performance.StopCPUProfiler() +} + +// MemoryProfileArgs are the arguments for calling MemoryProfile +type MemoryProfileArgs struct { + Filename string `json:"filename"` +} + +// MemoryProfileReply are the results from calling MemoryProfile +type MemoryProfileReply struct { + Success bool `json:"success"` +} + +// MemoryProfile runs a memory profile writing to the specified file +func (service *Admin) MemoryProfile(r *http.Request, args *MemoryProfileArgs, reply *MemoryProfileReply) error { + service.log.Debug("Admin: MemoryProfile called with %s", args.Filename) + reply.Success = true + return service.performance.MemoryProfile(args.Filename) +} + +// LockProfileArgs are the arguments for calling LockProfile +type LockProfileArgs struct { + Filename string `json:"filename"` +} + +// LockProfileReply are the results from calling LockProfile +type LockProfileReply struct { + Success bool `json:"success"` +} + +// LockProfile runs a mutex profile writing to the specified file +func (service *Admin) LockProfile(r *http.Request, args *LockProfileArgs, reply *LockProfileReply) error { + service.log.Debug("Admin: LockProfile called with %s", args.Filename) + reply.Success = true + return service.performance.LockProfile(args.Filename) +} + +// AliasArgs are the arguments for calling Alias +type AliasArgs struct { + Endpoint string `json:"endpoint"` + Alias string `json:"alias"` +} + +// AliasReply are the results from calling Alias +type AliasReply struct { + Success bool `json:"success"` +} + +// Alias attempts to alias an HTTP endpoint to a new name +func (service *Admin) Alias(r *http.Request, args *AliasArgs, reply *AliasReply) error { + service.log.Debug("Admin: Alias called with URL: %s, Alias: %s", args.Endpoint, args.Alias) + reply.Success = true + return service.httpServer.AddAliasesWithReadLock(args.Endpoint, args.Alias) +} + +// AliasChainArgs are the arguments for calling AliasChain +type AliasChainArgs struct { + Chain string `json:"chain"` + Alias string `json:"alias"` +} + +// AliasChainReply are the results from calling AliasChain +type AliasChainReply struct { + Success bool `json:"success"` +} + +// AliasChain attempts to alias a chain to a new name +func (service *Admin) AliasChain(_ *http.Request, args *AliasChainArgs, reply *AliasChainReply) error { + service.log.Debug("Admin: AliasChain called with Chain: %s, Alias: %s", args.Chain, args.Alias) + + chainID, err := service.chainManager.Lookup(args.Chain) + if err != nil { + return err + } + + if err := service.chainManager.Alias(chainID, args.Alias); err != nil { + return err + } + + reply.Success = true + return service.httpServer.AddAliasesWithReadLock("bc/"+chainID.String(), "bc/"+args.Alias) +} diff --git a/api/ipcs/chainipc.go b/api/ipcs/chainipc.go new file mode 100644 index 0000000..a2bc0b5 --- /dev/null +++ b/api/ipcs/chainipc.go @@ -0,0 +1,33 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ipcs + +import ( + "nanomsg.org/go/mangos/v2" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/logging" +) + +// ChainIPC a struct which holds IPC socket information +type ChainIPC struct { + log logging.Logger + socket mangos.Socket +} + +// Accept delivers a message to the ChainIPC +func (cipc *ChainIPC) Accept(chainID, containerID ids.ID, container []byte) error { + err := cipc.socket.Send(container) + if err != nil { + cipc.log.Error("%s while trying to send:\n%s", err, formatting.DumpBytes{Bytes: container}) + } + return err +} + +// Stop halts the ChainIPC event loop +func (cipc *ChainIPC) Stop() error { + cipc.log.Info("closing Chain IPC") + return cipc.socket.Close() +} diff --git a/api/ipcs/server.go b/api/ipcs/server.go new file mode 100644 index 0000000..30bcc5d --- /dev/null +++ b/api/ipcs/server.go @@ -0,0 +1,141 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ipcs + +import ( + "fmt" + "net/http" + + "nanomsg.org/go/mangos/v2/protocol/pub" + + _ "nanomsg.org/go/mangos/v2/transport/ipc" // registers the IPC transport + + "github.com/gorilla/rpc/v2" + + "github.com/ava-labs/gecko/api" + "github.com/ava-labs/gecko/chains" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/triggers" + "github.com/ava-labs/gecko/utils/json" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/wrappers" +) + +const baseURL = "ipc:///tmp/" + +// IPCs maintains the IPCs +type IPCs struct { + log logging.Logger + chainManager chains.Manager + httpServer *api.Server + events *triggers.EventDispatcher + chains map[[32]byte]*ChainIPC +} + +// NewService returns a new IPCs API service +func NewService(log logging.Logger, chainManager chains.Manager, events *triggers.EventDispatcher, httpServer *api.Server) *common.HTTPHandler { + newServer := rpc.NewServer() + codec := json.NewCodec() + newServer.RegisterCodec(codec, "application/json") + newServer.RegisterCodec(codec, "application/json;charset=UTF-8") + newServer.RegisterService(&IPCs{ + log: log, + chainManager: chainManager, + httpServer: httpServer, + events: events, + chains: map[[32]byte]*ChainIPC{}, + }, "ipcs") + return &common.HTTPHandler{Handler: newServer} +} + +// PublishBlockchainArgs are the arguments for calling PublishBlockchain +type PublishBlockchainArgs struct { + BlockchainID string `json:"blockchainID"` +} + +// PublishBlockchainReply are the results from calling PublishBlockchain +type PublishBlockchainReply struct { + URL string `json:"url"` +} + +// PublishBlockchain publishes the finalized accepted transactions from the blockchainID over the IPC +func (ipc *IPCs) PublishBlockchain(r *http.Request, args *PublishBlockchainArgs, reply *PublishBlockchainReply) error { + chainID, err := ipc.chainManager.Lookup(args.BlockchainID) + if err != nil { + ipc.log.Error("unknown blockchainID: %s", err) + return err + } + + chainIDKey := chainID.Key() + chainIDStr := chainID.String() + url := baseURL + chainIDStr + ".ipc" + + reply.URL = url + + if _, ok := ipc.chains[chainIDKey]; ok { + ipc.log.Info("returning existing blockchainID %s", chainIDStr) + return nil + } + + sock, err := pub.NewSocket() + if err != nil { + ipc.log.Error("can't get new pub socket: %s", err) + return err + } + + if err = sock.Listen(url); err != nil { + ipc.log.Error("can't listen on pub socket: %s", err) + sock.Close() + return err + } + + chainIPC := &ChainIPC{ + log: ipc.log, + socket: sock, + } + if err := ipc.events.RegisterChain(chainID, "ipc", chainIPC); err != nil { + ipc.log.Error("couldn't register event: %s", err) + sock.Close() + return err + } + + ipc.chains[chainIDKey] = chainIPC + return nil +} + +// UnpublishBlockchainArgs are the arguments for calling UnpublishBlockchain +type UnpublishBlockchainArgs struct { + BlockchainID string `json:"blockchainID"` +} + +// UnpublishBlockchainReply are the results from calling UnpublishBlockchain +type UnpublishBlockchainReply struct { + Success bool `json:"success"` +} + +// UnpublishBlockchain closes publishing of a blockchainID +func (ipc *IPCs) UnpublishBlockchain(r *http.Request, args *UnpublishBlockchainArgs, reply *UnpublishBlockchainReply) error { + chainID, err := ipc.chainManager.Lookup(args.BlockchainID) + if err != nil { + ipc.log.Error("unknown blockchainID %s: %s", args.BlockchainID, err) + return err + } + + chainIDKey := chainID.Key() + + chain, ok := ipc.chains[chainIDKey] + if !ok { + return fmt.Errorf("blockchainID not publishing: %s", chainID) + } + + errs := wrappers.Errs{} + errs.Add( + chain.Stop(), + ipc.events.DeregisterChain(chainID, "ipc"), + ) + delete(ipc.chains, chainIDKey) + + reply.Success = true + return errs.Err +} diff --git a/api/keystore/blockchain_keystore.go b/api/keystore/blockchain_keystore.go new file mode 100644 index 0000000..704ac90 --- /dev/null +++ b/api/keystore/blockchain_keystore.go @@ -0,0 +1,20 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package keystore + +import ( + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" +) + +// BlockchainKeystore ... +type BlockchainKeystore struct { + blockchainID ids.ID + ks *Keystore +} + +// GetDatabase ... +func (bks *BlockchainKeystore) GetDatabase(username, password string) (database.Database, error) { + return bks.ks.GetDatabase(bks.blockchainID, username, password) +} diff --git a/api/keystore/service.go b/api/keystore/service.go new file mode 100644 index 0000000..604ec5d --- /dev/null +++ b/api/keystore/service.go @@ -0,0 +1,308 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package keystore + +import ( + "errors" + "fmt" + "net/http" + "sync" + + "github.com/gorilla/rpc/v2" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/encdb" + "github.com/ava-labs/gecko/database/prefixdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms/components/codec" + + jsoncodec "github.com/ava-labs/gecko/utils/json" +) + +var ( + errEmptyUsername = errors.New("username can't be the empty string") +) + +// KeyValuePair ... +type KeyValuePair struct { + Key []byte `serialize:"true"` + Value []byte `serialize:"true"` +} + +// UserDB describes the full content of a user +type UserDB struct { + User `serialize:"true"` + Data []KeyValuePair `serialize:"true"` +} + +// Keystore is the RPC interface for keystore management +type Keystore struct { + lock sync.Mutex + log logging.Logger + + codec codec.Codec + + // Key: username + // Value: The user with that name + users map[string]*User + + // Used to persist users and their data + userDB database.Database + bcDB database.Database + // BaseDB + // / \ + // UserDB BlockchainDB + // / | \ + // Usr Usr Usr + // / | \ + // BID BID BID +} + +// Initialize the keystore +func (ks *Keystore) Initialize(log logging.Logger, db database.Database) { + ks.log = log + ks.codec = codec.NewDefault() + ks.users = make(map[string]*User) + ks.userDB = prefixdb.New([]byte("users"), db) + ks.bcDB = prefixdb.New([]byte("bcs"), db) +} + +// CreateHandler returns a new service object that can send requests to thisAPI. +func (ks *Keystore) CreateHandler() *common.HTTPHandler { + newServer := rpc.NewServer() + codec := jsoncodec.NewCodec() + newServer.RegisterCodec(codec, "application/json") + newServer.RegisterCodec(codec, "application/json;charset=UTF-8") + newServer.RegisterService(ks, "keystore") + return &common.HTTPHandler{LockOptions: common.NoLock, Handler: newServer} +} + +// Get the user whose name is [username] +func (ks *Keystore) getUser(username string) (*User, error) { + // If the user is already in memory, return it + usr, exists := ks.users[username] + if exists { + return usr, nil + } + // The user is not in memory; try the database + usrBytes, err := ks.userDB.Get([]byte(username)) + if err != nil { // Most likely bc user doesn't exist in database + return nil, err + } + + usr = &User{} + return usr, ks.codec.Unmarshal(usrBytes, usr) +} + +// CreateUserArgs are arguments for passing into CreateUser requests +type CreateUserArgs struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// CreateUserReply is the response from calling CreateUser +type CreateUserReply struct { + Success bool `json:"success"` +} + +// CreateUser creates an empty user with the provided username and password +func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *CreateUserReply) error { + ks.lock.Lock() + defer ks.lock.Unlock() + + ks.log.Verbo("CreateUser called with %s", args.Username) + + if args.Username == "" { + return errEmptyUsername + } + if usr, err := ks.getUser(args.Username); err == nil || usr != nil { + return fmt.Errorf("user already exists: %s", args.Username) + } + + usr := &User{} + if err := usr.Initialize(args.Password); err != nil { + return err + } + + usrBytes, err := ks.codec.Marshal(usr) + if err != nil { + return err + } + + if err := ks.userDB.Put([]byte(args.Username), usrBytes); err != nil { + return err + } + ks.users[args.Username] = usr + reply.Success = true + return nil +} + +// ListUsersArgs are the arguments to ListUsers +type ListUsersArgs struct{} + +// ListUsersReply is the reply from ListUsers +type ListUsersReply struct { + Users []string `json:"users"` +} + +// ListUsers lists all the registered usernames +func (ks *Keystore) ListUsers(_ *http.Request, args *ListUsersArgs, reply *ListUsersReply) error { + ks.lock.Lock() + defer ks.lock.Unlock() + + ks.log.Verbo("ListUsers called") + + reply.Users = []string{} + + it := ks.userDB.NewIterator() + defer it.Release() + for it.Next() { + reply.Users = append(reply.Users, string(it.Key())) + } + return it.Error() +} + +// ExportUserArgs are the arguments to ExportUser +type ExportUserArgs struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// ExportUserReply is the reply from ExportUser +type ExportUserReply struct { + User string `json:"user"` +} + +// ExportUser exports a serialized encoding of a user's information complete with encrypted database values +func (ks *Keystore) ExportUser(_ *http.Request, args *ExportUserArgs, reply *ExportUserReply) error { + ks.lock.Lock() + defer ks.lock.Unlock() + + ks.log.Verbo("ExportUser called for %s", args.Username) + + usr, err := ks.getUser(args.Username) + if err != nil { + return err + } + if !usr.CheckPassword(args.Password) { + return fmt.Errorf("incorrect password for %s", args.Username) + } + + userDB := prefixdb.New([]byte(args.Username), ks.bcDB) + + userData := UserDB{ + User: *usr, + } + + it := userDB.NewIterator() + defer it.Release() + for it.Next() { + userData.Data = append(userData.Data, KeyValuePair{ + Key: it.Key(), + Value: it.Value(), + }) + } + if err := it.Error(); err != nil { + return err + } + + b, err := ks.codec.Marshal(&userData) + if err != nil { + return err + } + cb58 := formatting.CB58{Bytes: b} + reply.User = cb58.String() + return nil +} + +// ImportUserArgs are arguments for ImportUser +type ImportUserArgs struct { + Username string `json:"username"` + Password string `json:"password"` + User string `json:"user"` +} + +// ImportUserReply is the response for ImportUser +type ImportUserReply struct { + Success bool `json:"success"` +} + +// ImportUser imports a serialized encoding of a user's information complete with encrypted database values, integrity checks the password, and adds it to the database +func (ks *Keystore) ImportUser(r *http.Request, args *ImportUserArgs, reply *ImportUserReply) error { + ks.lock.Lock() + defer ks.lock.Unlock() + + ks.log.Verbo("ImportUser called for %s", args.Username) + + if usr, err := ks.getUser(args.Username); err == nil || usr != nil { + return fmt.Errorf("user already exists: %s", args.Username) + } + + cb58 := formatting.CB58{} + if err := cb58.FromString(args.User); err != nil { + return err + } + + userData := UserDB{} + if err := ks.codec.Unmarshal(cb58.Bytes, &userData); err != nil { + return err + } + + usrBytes, err := ks.codec.Marshal(&userData.User) + if err != nil { + return err + } + + // TODO: Should add batching to prevent creating a user without importing + // the account + if err := ks.userDB.Put([]byte(args.Username), usrBytes); err != nil { + return err + } + ks.users[args.Username] = &userData.User + + userDB := prefixdb.New([]byte(args.Username), ks.bcDB) + batch := userDB.NewBatch() + + for _, kvp := range userData.Data { + batch.Put(kvp.Key, kvp.Value) + } + + reply.Success = true + return batch.Write() +} + +// NewBlockchainKeyStore ... +func (ks *Keystore) NewBlockchainKeyStore(blockchainID ids.ID) *BlockchainKeystore { + return &BlockchainKeystore{ + blockchainID: blockchainID, + ks: ks, + } +} + +// GetDatabase ... +func (ks *Keystore) GetDatabase(bID ids.ID, username, password string) (database.Database, error) { + ks.lock.Lock() + defer ks.lock.Unlock() + + usr, err := ks.getUser(username) + if err != nil { + return nil, err + } + if !usr.CheckPassword(password) { + return nil, fmt.Errorf("incorrect password for user '%s'", username) + } + + userDB := prefixdb.New([]byte(username), ks.bcDB) + bcDB := prefixdb.NewNested(bID.Bytes(), userDB) + encDB, err := encdb.New([]byte(password), bcDB) + + if err != nil { + return nil, err + } + + return encDB, nil +} diff --git a/api/keystore/service_test.go b/api/keystore/service_test.go new file mode 100644 index 0000000..ab2a096 --- /dev/null +++ b/api/keystore/service_test.go @@ -0,0 +1,202 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package keystore + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" +) + +func TestServiceListNoUsers(t *testing.T) { + ks := Keystore{} + ks.Initialize(logging.NoLog{}, memdb.New()) + + reply := ListUsersReply{} + if err := ks.ListUsers(nil, &ListUsersArgs{}, &reply); err != nil { + t.Fatal(err) + } + if len(reply.Users) != 0 { + t.Fatalf("No users should have been created yet") + } +} + +func TestServiceCreateUser(t *testing.T) { + ks := Keystore{} + ks.Initialize(logging.NoLog{}, memdb.New()) + + { + reply := CreateUserReply{} + if err := ks.CreateUser(nil, &CreateUserArgs{ + Username: "bob", + Password: "launch", + }, &reply); err != nil { + t.Fatal(err) + } + if !reply.Success { + t.Fatalf("User should have been created successfully") + } + } + + { + reply := ListUsersReply{} + if err := ks.ListUsers(nil, &ListUsersArgs{}, &reply); err != nil { + t.Fatal(err) + } + if len(reply.Users) != 1 { + t.Fatalf("One user should have been created") + } + if user := reply.Users[0]; user != "bob" { + t.Fatalf("'bob' should have been a user that was created") + } + } +} + +func TestServiceCreateDuplicate(t *testing.T) { + ks := Keystore{} + ks.Initialize(logging.NoLog{}, memdb.New()) + + { + reply := CreateUserReply{} + if err := ks.CreateUser(nil, &CreateUserArgs{ + Username: "bob", + Password: "launch", + }, &reply); err != nil { + t.Fatal(err) + } + if !reply.Success { + t.Fatalf("User should have been created successfully") + } + } + + { + reply := CreateUserReply{} + if err := ks.CreateUser(nil, &CreateUserArgs{ + Username: "bob", + Password: "launch!", + }, &reply); err == nil { + t.Fatalf("Should have errored due to the username already existing") + } + } +} + +func TestServiceCreateUserNoName(t *testing.T) { + ks := Keystore{} + ks.Initialize(logging.NoLog{}, memdb.New()) + + reply := CreateUserReply{} + if err := ks.CreateUser(nil, &CreateUserArgs{ + Password: "launch", + }, &reply); err == nil { + t.Fatalf("Shouldn't have allowed empty username") + } +} + +func TestServiceUseBlockchainDB(t *testing.T) { + ks := Keystore{} + ks.Initialize(logging.NoLog{}, memdb.New()) + + { + reply := CreateUserReply{} + if err := ks.CreateUser(nil, &CreateUserArgs{ + Username: "bob", + Password: "launch", + }, &reply); err != nil { + t.Fatal(err) + } + if !reply.Success { + t.Fatalf("User should have been created successfully") + } + } + + { + db, err := ks.GetDatabase(ids.Empty, "bob", "launch") + if err != nil { + t.Fatal(err) + } + if err := db.Put([]byte("hello"), []byte("world")); err != nil { + t.Fatal(err) + } + } + + { + db, err := ks.GetDatabase(ids.Empty, "bob", "launch") + if err != nil { + t.Fatal(err) + } + if val, err := db.Get([]byte("hello")); err != nil { + t.Fatal(err) + } else if !bytes.Equal(val, []byte("world")) { + t.Fatalf("Should have read '%s' from the db", "world") + } + } +} + +func TestServiceExportImport(t *testing.T) { + ks := Keystore{} + ks.Initialize(logging.NoLog{}, memdb.New()) + + { + reply := CreateUserReply{} + if err := ks.CreateUser(nil, &CreateUserArgs{ + Username: "bob", + Password: "launch", + }, &reply); err != nil { + t.Fatal(err) + } + if !reply.Success { + t.Fatalf("User should have been created successfully") + } + } + + { + db, err := ks.GetDatabase(ids.Empty, "bob", "launch") + if err != nil { + t.Fatal(err) + } + if err := db.Put([]byte("hello"), []byte("world")); err != nil { + t.Fatal(err) + } + } + + exportReply := ExportUserReply{} + if err := ks.ExportUser(nil, &ExportUserArgs{ + Username: "bob", + Password: "launch", + }, &exportReply); err != nil { + t.Fatal(err) + } + + newKS := Keystore{} + newKS.Initialize(logging.NoLog{}, memdb.New()) + + { + reply := ImportUserReply{} + if err := newKS.ImportUser(nil, &ImportUserArgs{ + Username: "bob", + Password: "launch", + User: exportReply.User, + }, &reply); err != nil { + t.Fatal(err) + } + if !reply.Success { + t.Fatalf("User should have been imported successfully") + } + } + + { + db, err := newKS.GetDatabase(ids.Empty, "bob", "launch") + if err != nil { + t.Fatal(err) + } + if val, err := db.Get([]byte("hello")); err != nil { + t.Fatal(err) + } else if !bytes.Equal(val, []byte("world")) { + t.Fatalf("Should have read '%s' from the db", "world") + } + } +} diff --git a/api/keystore/user.go b/api/keystore/user.go new file mode 100644 index 0000000..ea1a11c --- /dev/null +++ b/api/keystore/user.go @@ -0,0 +1,35 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package keystore + +import ( + "bytes" + "crypto/rand" + + "golang.org/x/crypto/argon2" +) + +// User describes a user of the keystore +type User struct { + Password [32]byte `serialize:"true"` // The salted, hashed password + Salt [16]byte `serialize:"true"` // The salt +} + +// Initialize ... +func (usr *User) Initialize(password string) error { + _, err := rand.Read(usr.Salt[:]) + if err != nil { + return err + } + // pw is the salted, hashed password + pw := argon2.IDKey([]byte(password), usr.Salt[:], 1, 64*1024, 4, 32) + copy(usr.Password[:], pw[:32]) + return nil +} + +// CheckPassword ... +func (usr *User) CheckPassword(password string) bool { + pw := argon2.IDKey([]byte(password), usr.Salt[:], 1, 64*1024, 4, 32) + return bytes.Equal(pw, usr.Password[:]) +} diff --git a/api/keystore/user_test.go b/api/keystore/user_test.go new file mode 100644 index 0000000..079b68e --- /dev/null +++ b/api/keystore/user_test.go @@ -0,0 +1,24 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package keystore + +import ( + "testing" +) + +func TestUser(t *testing.T) { + usr := User{} + if err := usr.Initialize("heytherepal"); err != nil { + t.Fatal(err) + } + if !usr.CheckPassword("heytherepal") { + t.Fatalf("Should have verified the password") + } + if usr.CheckPassword("heytherepal!") { + t.Fatalf("Shouldn't have verified the password") + } + if usr.CheckPassword("") { + t.Fatalf("Shouldn't have verified the password") + } +} diff --git a/api/metrics/service.go b/api/metrics/service.go new file mode 100644 index 0000000..5fa9206 --- /dev/null +++ b/api/metrics/service.go @@ -0,0 +1,23 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// NewService returns a new prometheus service +func NewService() (*prometheus.Registry, *common.HTTPHandler) { + registerer := prometheus.NewRegistry() + handler := promhttp.InstrumentMetricHandler( + registerer, + promhttp.HandlerFor( + registerer, + promhttp.HandlerOpts{}, + ), + ) + return registerer, &common.HTTPHandler{LockOptions: common.NoLock, Handler: handler} +} diff --git a/api/middleware_handler.go b/api/middleware_handler.go new file mode 100644 index 0000000..fba0563 --- /dev/null +++ b/api/middleware_handler.go @@ -0,0 +1,23 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package api + +import ( + "net/http" +) + +type middlewareHandler struct { + before, after func() + handler http.Handler +} + +func (mh middlewareHandler) ServeHTTP(writer http.ResponseWriter, request *http.Request) { + if mh.before != nil { + mh.before() + } + if mh.after != nil { + defer mh.after() + } + mh.handler.ServeHTTP(writer, request) +} diff --git a/api/router.go b/api/router.go new file mode 100644 index 0000000..d674cc8 --- /dev/null +++ b/api/router.go @@ -0,0 +1,132 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package api + +import ( + "errors" + "fmt" + "net/http" + "sync" + + "github.com/gorilla/mux" +) + +var ( + errUnknownBaseURL = errors.New("unknown base url") + errUnknownEndpoint = errors.New("unknown endpoint") +) + +type router struct { + lock sync.RWMutex + router *mux.Router + + routeLock sync.Mutex + reservedRoutes map[string]bool // Reserves routes so that there can't be alias that conflict + aliases map[string][]string // Maps a route to a set of reserved routes + routes map[string]map[string]http.Handler // Maps routes to a handler +} + +func newRouter() *router { + return &router{ + router: mux.NewRouter(), + reservedRoutes: make(map[string]bool), + aliases: make(map[string][]string), + routes: make(map[string]map[string]http.Handler), + } +} + +func (r *router) ServeHTTP(writer http.ResponseWriter, request *http.Request) { + r.lock.RLock() + defer r.lock.RUnlock() + + r.router.ServeHTTP(writer, request) +} + +func (r *router) GetHandler(base, endpoint string) (http.Handler, error) { + r.routeLock.Lock() + defer r.routeLock.Unlock() + + urlBase, exists := r.routes[base] + if !exists { + return nil, errUnknownBaseURL + } + handler, exists := urlBase[endpoint] + if !exists { + return nil, errUnknownEndpoint + } + return handler, nil +} + +func (r *router) AddRouter(base, endpoint string, handler http.Handler) error { + r.lock.Lock() + defer r.lock.Unlock() + r.routeLock.Lock() + defer r.routeLock.Unlock() + + return r.addRouter(base, endpoint, handler) +} + +func (r *router) addRouter(base, endpoint string, handler http.Handler) error { + if r.reservedRoutes[base] { + return fmt.Errorf("couldn't route to %s as that route is either aliased or already maps to a handler", base) + } + + return r.forceAddRouter(base, endpoint, handler) +} + +func (r *router) forceAddRouter(base, endpoint string, handler http.Handler) error { + endpoints := r.routes[base] + if endpoints == nil { + endpoints = make(map[string]http.Handler) + } + url := base + endpoint + if _, exists := endpoints[endpoint]; exists { + return fmt.Errorf("failed to create endpoint as %s already exists", url) + } + + endpoints[endpoint] = handler + r.routes[base] = endpoints + r.router.Handle(url, handler) + + var err error + if aliases, exists := r.aliases[base]; exists { + for _, alias := range aliases { + if innerErr := r.forceAddRouter(alias, endpoint, handler); err == nil { + err = innerErr + } + } + } + return err +} + +func (r *router) AddAlias(base string, aliases ...string) error { + r.lock.Lock() + defer r.lock.Unlock() + r.routeLock.Lock() + defer r.routeLock.Unlock() + + for _, alias := range aliases { + if r.reservedRoutes[alias] { + return fmt.Errorf("couldn't alias to %s as that route is either already aliased or already maps to a handler", alias) + } + } + + for _, alias := range aliases { + r.reservedRoutes[alias] = true + } + + r.aliases[base] = append(r.aliases[base], aliases...) + + var err error + if endpoints, exists := r.routes[base]; exists { + for endpoint, handler := range endpoints { + for _, alias := range aliases { + if innerErr := r.forceAddRouter(alias, endpoint, handler); err == nil { + err = innerErr + } + } + } + } + return err +} diff --git a/api/router_test.go b/api/router_test.go new file mode 100644 index 0000000..40ebd14 --- /dev/null +++ b/api/router_test.go @@ -0,0 +1,69 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package api + +import ( + "net/http" + "testing" +) + +type testHandler struct{} + +func (*testHandler) ServeHTTP(_ http.ResponseWriter, _ *http.Request) {} + +func TestAliasing(t *testing.T) { + r := newRouter() + + if err := r.AddAlias("1", "2", "3"); err != nil { + t.Fatal(err) + } + if err := r.AddAlias("1", "4"); err != nil { + t.Fatal(err) + } + if err := r.AddAlias("5", "1"); err != nil { + t.Fatal(err) + } + if err := r.AddAlias("3", "6"); err != nil { + t.Fatal(err) + } + if err := r.AddAlias("7", "4"); err == nil { + t.Fatalf("Already reserved %s", "4") + } + + handler1 := &testHandler{} + if err := r.AddRouter("2", "", handler1); err == nil { + t.Fatalf("Already reserved %s", "2") + } + if err := r.AddRouter("5", "", handler1); err != nil { + t.Fatal(err) + } + if handler, exists := r.routes["5"][""]; !exists { + t.Fatalf("Should have added %s", "5") + } else if handler != handler1 { + t.Fatalf("Registered unknown handler") + } + + if err := r.AddAlias("5", "7"); err != nil { + t.Fatal(err) + } + + if handler, exists := r.routes["7"][""]; !exists { + t.Fatalf("Should have added %s", "7") + } else if handler != handler1 { + t.Fatalf("Registered unknown handler") + } +} + +func TestBlock(t *testing.T) { + r := newRouter() + + if err := r.AddAlias("1", "1"); err != nil { + t.Fatal(err) + } + + handler1 := &testHandler{} + if err := r.AddRouter("1", "", handler1); err == nil { + t.Fatalf("Permanently locked %s", "1") + } +} diff --git a/api/server.go b/api/server.go new file mode 100644 index 0000000..ffbdc03 --- /dev/null +++ b/api/server.go @@ -0,0 +1,165 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package api + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "sync" + + "github.com/gorilla/handlers" + + "github.com/rs/cors" + + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/logging" +) + +const baseURL = "/ext" + +var ( + errUnknownLockOption = errors.New("invalid lock options") +) + +// Server maintains the HTTP router +type Server struct { + log logging.Logger + factory logging.Factory + router *router + portURL string +} + +// Initialize creates the API server at the provided port +func (s *Server) Initialize(log logging.Logger, factory logging.Factory, port uint16) { + s.log = log + s.factory = factory + s.portURL = fmt.Sprintf(":%d", port) + s.router = newRouter() +} + +// Dispatch starts the API server +func (s *Server) Dispatch() error { + handler := cors.Default().Handler(s.router) + return http.ListenAndServe(s.portURL, handler) +} + +// DispatchTLS starts the API server with the provided TLS certificate +func (s *Server) DispatchTLS(certFile, keyFile string) error { + handler := cors.Default().Handler(s.router) + return http.ListenAndServeTLS(s.portURL, certFile, keyFile, handler) +} + +// RegisterChain registers the API endpoints associated with this chain That +// is, add pairs to server so that http calls can be made to +// the vm +func (s *Server) RegisterChain(ctx *snow.Context, vmIntf interface{}) { + vm, ok := vmIntf.(common.VM) + if !ok { + return + } + + // all subroutes to a chain begin with "bc/" + defaultEndpoint := "bc/" + ctx.ChainID.String() + httpLogger, err := s.factory.MakeChain(ctx.ChainID, "http") + if err != nil { + s.log.Error("Failed to create new http logger: %s", err) + return + } + s.log.Verbo("About to add API endpoints for chain with ID %s", ctx.ChainID) + + // Register each endpoint + for extension, service := range vm.CreateHandlers() { + // Validate that the route being added is valid + // e.g. "/foo" and "" are ok but "\n" is not + _, err := url.ParseRequestURI(extension) + if extension != "" && err != nil { + s.log.Warn("could not add route to chain's API handler because route is malformed: %s", extension) + continue + } + s.log.Verbo("adding API endpoint: %s", defaultEndpoint+extension) + if err := s.AddRoute(service, &ctx.Lock, defaultEndpoint, extension, httpLogger); err != nil { + s.log.Error("error adding route: %s", err) + } + } +} + +// AddRoute registers the appropriate endpoint for the vm given an endpoint +func (s *Server) AddRoute(handler *common.HTTPHandler, lock *sync.RWMutex, base, endpoint string, log logging.Logger) error { + url := fmt.Sprintf("%s/%s", baseURL, base) + s.log.Info("adding route %s%s", url, endpoint) + h := handlers.CombinedLoggingHandler(log, handler.Handler) + switch handler.LockOptions { + case common.WriteLock: + return s.router.AddRouter(url, endpoint, middlewareHandler{ + before: lock.Lock, + after: lock.Unlock, + handler: h, + }) + case common.ReadLock: + return s.router.AddRouter(url, endpoint, middlewareHandler{ + before: lock.RLock, + after: lock.RUnlock, + handler: h, + }) + case common.NoLock: + return s.router.AddRouter(url, endpoint, h) + default: + return errUnknownLockOption + } +} + +// AddAliases registers aliases to the server +func (s *Server) AddAliases(endpoint string, aliases ...string) error { + url := fmt.Sprintf("%s/%s", baseURL, endpoint) + endpoints := make([]string, len(aliases)) + for i, alias := range aliases { + endpoints[i] = fmt.Sprintf("%s/%s", baseURL, alias) + } + return s.router.AddAlias(url, endpoints...) +} + +// AddAliasesWithReadLock registers aliases to the server assuming the http read +// lock is currently held. +func (s *Server) AddAliasesWithReadLock(endpoint string, aliases ...string) error { + // This is safe, as the read lock doesn't actually need to be held once the + // http handler is called. However, it is unlocked later, so this function + // must end with the lock held. + s.router.lock.RUnlock() + defer s.router.lock.RLock() + + return s.AddAliases(endpoint, aliases...) +} + +// Call ... +func (s *Server) Call( + writer http.ResponseWriter, + method, + base, + endpoint string, + body io.Reader, + headers map[string]string, +) error { + url := fmt.Sprintf("%s/vm/%s", baseURL, base) + + handler, err := s.router.GetHandler(url, endpoint) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", "*", body) + if err != nil { + return err + } + for key, value := range headers { + req.Header.Set(key, value) + } + + handler.ServeHTTP(writer, req) + + return nil +} diff --git a/api/server_test.go b/api/server_test.go new file mode 100644 index 0000000..75e03ad --- /dev/null +++ b/api/server_test.go @@ -0,0 +1,60 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package api + +import ( + "bytes" + "net/http" + "net/http/httptest" + "sync" + "testing" + + "github.com/gorilla/rpc/v2" + "github.com/gorilla/rpc/v2/json2" + + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/logging" +) + +type Service struct{ called bool } + +type Args struct{} + +type Reply struct{} + +func (s *Service) Call(_ *http.Request, args *Args, reply *Reply) error { + s.called = true + return nil +} + +func TestCall(t *testing.T) { + s := Server{} + s.Initialize(logging.NoLog{}, logging.NoFactory{}, 8080) + + serv := &Service{} + newServer := rpc.NewServer() + newServer.RegisterCodec(json2.NewCodec(), "application/json") + newServer.RegisterCodec(json2.NewCodec(), "application/json;charset=UTF-8") + newServer.RegisterService(serv, "test") + + if err := s.AddRoute(&common.HTTPHandler{Handler: newServer}, new(sync.RWMutex), "vm/lol", "", logging.NoLog{}); err != nil { + t.Fatal(err) + } + + buf, err := json2.EncodeClientRequest("test.Call", &Args{}) + if err != nil { + t.Fatal(err) + } + + writer := httptest.NewRecorder() + body := bytes.NewBuffer(buf) + headers := map[string]string{ + "Content-Type": "application/json", + } + s.Call(writer, "POST", "lol", "", body, headers) + + if !serv.called { + t.Fatalf("Should have been called") + } +} diff --git a/cache/cache.go b/cache/cache.go new file mode 100644 index 0000000..4fe4c0b --- /dev/null +++ b/cache/cache.go @@ -0,0 +1,41 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package cache + +import ( + "github.com/ava-labs/gecko/ids" +) + +// Cacher acts as a best effort key value store +type Cacher interface { + // Put inserts an element into the cache. If spaced is required, elements will + // be evicted. + Put(key ids.ID, value interface{}) + + // Get returns the entry in the cache with the key specified, if no value + // exists, false is returned. + Get(key ids.ID) (interface{}, bool) + + // Evict removes the specified entry from the cache + Evict(key ids.ID) + + // Flush removes all entries from the cache + Flush() +} + +// Evictable allows the object to be notified when it is evicted +type Evictable interface { + ID() ids.ID + Evict() +} + +// Deduplicator acts as a best effort deduplication service +type Deduplicator interface { + // Deduplicate returns either the provided value, or a previously provided + // value with the same ID that hasn't yet been evicted + Deduplicate(Evictable) Evictable + + // Flush removes all entries from the cache + Flush() +} diff --git a/cache/lru_cache.go b/cache/lru_cache.go new file mode 100644 index 0000000..629d6bd --- /dev/null +++ b/cache/lru_cache.go @@ -0,0 +1,139 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package cache + +import ( + "container/list" + "sync" + + "github.com/ava-labs/gecko/ids" +) + +type entry struct { + Key ids.ID + Value interface{} +} + +// LRU is a key value store with bounded size. If the size is attempted to be +// exceeded, then an element is removed from the cache before the insertion is +// done, based on evicting the least recently used value. +type LRU struct { + lock sync.Mutex + entryMap map[[32]byte]*list.Element + entryList *list.List + Size int +} + +// Put implements the cache interface +func (c *LRU) Put(key ids.ID, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + c.put(key, value) +} + +// Get implements the cache interface +func (c *LRU) Get(key ids.ID) (interface{}, bool) { + c.lock.Lock() + defer c.lock.Unlock() + + return c.get(key) +} + +// Evict implements the cache interface +func (c *LRU) Evict(key ids.ID) { + c.lock.Lock() + defer c.lock.Unlock() + + c.evict(key) +} + +// Flush implements the cache interface +func (c *LRU) Flush() { + c.lock.Lock() + defer c.lock.Unlock() + + c.flush() +} + +func (c *LRU) init() { + if c.entryMap == nil { + c.entryMap = make(map[[32]byte]*list.Element) + } + if c.entryList == nil { + c.entryList = list.New() + } + if c.Size <= 0 { + c.Size = 1 + } +} + +func (c *LRU) resize() { + for c.entryList.Len() > c.Size { + e := c.entryList.Front() + c.entryList.Remove(e) + + val := e.Value.(*entry) + delete(c.entryMap, val.Key.Key()) + } +} + +func (c *LRU) put(key ids.ID, value interface{}) { + c.init() + c.resize() + + if e, ok := c.entryMap[key.Key()]; !ok { + if c.entryList.Len() >= c.Size { + e = c.entryList.Front() + c.entryList.MoveToBack(e) + + val := e.Value.(*entry) + delete(c.entryMap, val.Key.Key()) + val.Key = key + val.Value = value + } else { + e = c.entryList.PushBack(&entry{ + Key: key, + Value: value, + }) + } + c.entryMap[key.Key()] = e + } else { + c.entryList.MoveToBack(e) + + val := e.Value.(*entry) + val.Value = value + } +} + +func (c *LRU) get(key ids.ID) (interface{}, bool) { + c.init() + c.resize() + + if e, ok := c.entryMap[key.Key()]; ok { + c.entryList.MoveToBack(e) + + val := e.Value.(*entry) + return val.Value, true + } + return struct{}{}, false +} + +func (c *LRU) evict(key ids.ID) { + c.init() + c.resize() + + keyBytes := key.Key() + if e, ok := c.entryMap[keyBytes]; ok { + c.entryList.Remove(e) + delete(c.entryMap, keyBytes) + } +} + +func (c *LRU) flush() { + c.init() + + c.entryMap = make(map[[32]byte]*list.Element) + c.entryList = list.New() +} diff --git a/cache/lru_cache_test.go b/cache/lru_cache_test.go new file mode 100644 index 0000000..d16024b --- /dev/null +++ b/cache/lru_cache_test.go @@ -0,0 +1,171 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package cache + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestLRU(t *testing.T) { + cache := LRU{Size: 1} + + id1 := ids.NewID([32]byte{1}) + if _, found := cache.Get(id1); found { + t.Fatalf("Retrieved value when none exists") + } + + expectedValue1 := 1 + cache.Put(id1, expectedValue1) + if value, found := cache.Get(id1); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if value != expectedValue1 { + t.Fatalf("Failed to retrieve correct value when one exists") + } + + cache.Put(id1, expectedValue1) + if value, found := cache.Get(id1); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if value != expectedValue1 { + t.Fatalf("Failed to retrieve correct value when one exists") + } + + cache.Put(id1, expectedValue1) + if value, found := cache.Get(id1); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if value != expectedValue1 { + t.Fatalf("Failed to retrieve correct value when one exists") + } + + id2 := ids.NewID([32]byte{2}) + + expectedValue2 := 2 + cache.Put(id2, expectedValue2) + if _, found := cache.Get(id1); found { + t.Fatalf("Retrieved value when none exists") + } + if value, found := cache.Get(id2); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if value != expectedValue2 { + t.Fatalf("Failed to retrieve correct value when one exists") + } +} + +func TestLRUEviction(t *testing.T) { + cache := LRU{Size: 2} + + id1 := ids.NewID([32]byte{1}) + id2 := ids.NewID([32]byte{2}) + id3 := ids.NewID([32]byte{3}) + + cache.Put(id1, 1) + cache.Put(id2, 2) + + if val, found := cache.Get(id1); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if val != 1 { + t.Fatalf("Retrieved wrong value") + } else if val, found := cache.Get(id2); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if val != 2 { + t.Fatalf("Retrieved wrong value") + } else if _, found := cache.Get(id3); found { + t.Fatalf("Retrieve value when none exists") + } + + cache.Put(id3, 3) + + if _, found := cache.Get(id1); found { + t.Fatalf("Retrieve value when none exists") + } else if val, found := cache.Get(id2); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if val != 2 { + t.Fatalf("Retrieved wrong value") + } else if val, found := cache.Get(id3); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if val != 3 { + t.Fatalf("Retrieved wrong value") + } + + cache.Get(id2) + cache.Put(id1, 1) + + if val, found := cache.Get(id1); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if val != 1 { + t.Fatalf("Retrieved wrong value") + } else if val, found := cache.Get(id2); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if val != 2 { + t.Fatalf("Retrieved wrong value") + } else if _, found := cache.Get(id3); found { + t.Fatalf("Retrieved value when none exists") + } + + cache.Evict(id2) + cache.Put(id3, 3) + + if val, found := cache.Get(id1); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if val != 1 { + t.Fatalf("Retrieved wrong value") + } else if _, found := cache.Get(id2); found { + t.Fatalf("Retrieved value when none exists") + } else if val, found := cache.Get(id3); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if val != 3 { + t.Fatalf("Retrieved wrong value") + } + + cache.Flush() + + if _, found := cache.Get(id1); found { + t.Fatalf("Retrieved value when none exists") + } else if _, found := cache.Get(id2); found { + t.Fatalf("Retrieved value when none exists") + } else if _, found := cache.Get(id3); found { + t.Fatalf("Retrieved value when none exists") + } +} + +func TestLRUResize(t *testing.T) { + cache := LRU{Size: 2} + + id1 := ids.NewID([32]byte{1}) + id2 := ids.NewID([32]byte{2}) + + cache.Put(id1, 1) + cache.Put(id2, 2) + + if val, found := cache.Get(id1); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if val != 1 { + t.Fatalf("Retrieved wrong value") + } else if val, found := cache.Get(id2); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if val != 2 { + t.Fatalf("Retrieved wrong value") + } + + cache.Size = 1 + + if _, found := cache.Get(id1); found { + t.Fatalf("Retrieve value when none exists") + } else if val, found := cache.Get(id2); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if val != 2 { + t.Fatalf("Retrieved wrong value") + } + + cache.Size = 0 + + if _, found := cache.Get(id1); found { + t.Fatalf("Retrieve value when none exists") + } else if val, found := cache.Get(id2); !found { + t.Fatalf("Failed to retrieve value when one exists") + } else if val != 2 { + t.Fatalf("Retrieved wrong value") + } +} diff --git a/cache/unique_cache.go b/cache/unique_cache.go new file mode 100644 index 0000000..c72d966 --- /dev/null +++ b/cache/unique_cache.go @@ -0,0 +1,93 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package cache + +import ( + "container/list" + "sync" +) + +// EvictableLRU is an LRU cache that notifies the objects when they are evicted. +type EvictableLRU struct { + lock sync.Mutex + entryMap map[[32]byte]*list.Element + entryList *list.List + Size int +} + +// Deduplicate implements the Deduplicator interface +func (c *EvictableLRU) Deduplicate(value Evictable) Evictable { + c.lock.Lock() + defer c.lock.Unlock() + + return c.deduplicate(value) +} + +// Flush implements the Deduplicator interface +func (c *EvictableLRU) Flush() { + c.lock.Lock() + defer c.lock.Unlock() + + c.flush() +} + +func (c *EvictableLRU) init() { + if c.entryMap == nil { + c.entryMap = make(map[[32]byte]*list.Element) + } + if c.entryList == nil { + c.entryList = list.New() + } + if c.Size <= 0 { + c.Size = 1 + } +} + +func (c *EvictableLRU) resize() { + for c.entryList.Len() > c.Size { + e := c.entryList.Front() + c.entryList.Remove(e) + + val := e.Value.(Evictable) + delete(c.entryMap, val.ID().Key()) + val.Evict() + } +} + +func (c *EvictableLRU) deduplicate(value Evictable) Evictable { + c.init() + c.resize() + + key := value.ID().Key() + if e, ok := c.entryMap[key]; !ok { + if c.entryList.Len() >= c.Size { + e = c.entryList.Front() + c.entryList.MoveToBack(e) + + val := e.Value.(Evictable) + delete(c.entryMap, val.ID().Key()) + val.Evict() + + e.Value = value + } else { + e = c.entryList.PushBack(value) + } + c.entryMap[key] = e + } else { + c.entryList.MoveToBack(e) + + val := e.Value.(Evictable) + value = val + } + return value +} + +func (c *EvictableLRU) flush() { + c.init() + + size := c.Size + c.Size = 0 + c.resize() + c.Size = size +} diff --git a/cache/unique_cache_test.go b/cache/unique_cache_test.go new file mode 100644 index 0000000..f61149f --- /dev/null +++ b/cache/unique_cache_test.go @@ -0,0 +1,62 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package cache + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +type evictable struct { + id ids.ID + evicted int +} + +func (e *evictable) ID() ids.ID { return e.id } +func (e *evictable) Evict() { e.evicted++ } + +func TestEvictableLRU(t *testing.T) { + cache := EvictableLRU{} + + expectedValue1 := &evictable{id: ids.NewID([32]byte{1})} + if returnedValue := cache.Deduplicate(expectedValue1).(*evictable); returnedValue != expectedValue1 { + t.Fatalf("Returned unknown value") + } else if expectedValue1.evicted != 0 { + t.Fatalf("Value was evicted unexpectedly") + } else if returnedValue := cache.Deduplicate(expectedValue1).(*evictable); returnedValue != expectedValue1 { + t.Fatalf("Returned unknown value") + } else if expectedValue1.evicted != 0 { + t.Fatalf("Value was evicted unexpectedly") + } + + expectedValue2 := &evictable{id: ids.NewID([32]byte{2})} + if returnedValue := cache.Deduplicate(expectedValue2).(*evictable); returnedValue != expectedValue2 { + t.Fatalf("Returned unknown value") + } else if expectedValue1.evicted != 1 { + t.Fatalf("Value should have been evicted") + } else if expectedValue2.evicted != 0 { + t.Fatalf("Value was evicted unexpectedly") + } + + cache.Size = 2 + + expectedValue3 := &evictable{id: ids.NewID([32]byte{2})} + if returnedValue := cache.Deduplicate(expectedValue3).(*evictable); returnedValue != expectedValue2 { + t.Fatalf("Returned unknown value") + } else if expectedValue1.evicted != 1 { + t.Fatalf("Value should have been evicted") + } else if expectedValue2.evicted != 0 { + t.Fatalf("Value was evicted unexpectedly") + } + + cache.Flush() + if expectedValue1.evicted != 1 { + t.Fatalf("Value should have been evicted") + } else if expectedValue2.evicted != 1 { + t.Fatalf("Value should have been evicted") + } else if expectedValue3.evicted != 0 { + t.Fatalf("Value was evicted unexpectedly") + } +} diff --git a/chains/awaiter.go b/chains/awaiter.go new file mode 100644 index 0000000..fdcdc49 --- /dev/null +++ b/chains/awaiter.go @@ -0,0 +1,11 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chains + +import "github.com/ava-labs/gecko/snow/networking" + +// Awaiter can await connections to be connected +type Awaiter interface { + AwaitConnections(awaiting *networking.AwaitingConnections) +} diff --git a/chains/manager.go b/chains/manager.go new file mode 100644 index 0000000..efb4372 --- /dev/null +++ b/chains/manager.go @@ -0,0 +1,524 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chains + +import ( + "fmt" + "time" + + "github.com/ava-labs/gecko/api" + "github.com/ava-labs/gecko/api/keystore" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/prefixdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/engine/avalanche" + "github.com/ava-labs/gecko/snow/engine/avalanche/state" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/engine/common/queue" + "github.com/ava-labs/gecko/snow/networking" + "github.com/ava-labs/gecko/snow/networking/handler" + "github.com/ava-labs/gecko/snow/networking/router" + "github.com/ava-labs/gecko/snow/networking/sender" + "github.com/ava-labs/gecko/snow/networking/timeout" + "github.com/ava-labs/gecko/snow/triggers" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms" + + avacon "github.com/ava-labs/gecko/snow/consensus/avalanche" + avaeng "github.com/ava-labs/gecko/snow/engine/avalanche" + + smcon "github.com/ava-labs/gecko/snow/consensus/snowman" + smeng "github.com/ava-labs/gecko/snow/engine/snowman" +) + +const ( + defaultChannelSize = 1000 + requestTimeout = 2 * time.Second +) + +// Manager manages the chains running on this node. +// It can: +// * Create a chain +// * Add a registrant. When a chain is created, each registrant calls +// RegisterChain with the new chain as the argument. +// * Get the aliases associated with a given chain. +// * Get the ID of the chain associated with a given alias. +type Manager interface { + // Return the router this Manager is using to route consensus messages to chains + Router() router.Router + + // Create a chain in the future + CreateChain(ChainParameters) + + // Create a chain now + ForceCreateChain(ChainParameters) + + // Add a registrant [r]. Every time a chain is + // created, [r].RegisterChain([new chain]) is called + AddRegistrant(Registrant) + + // Given an alias, return the ID of the chain associated with that alias + Lookup(string) (ids.ID, error) + + // Given an alias, return the ID of the VM associated with that alias + LookupVM(string) (ids.ID, error) + + // Return the aliases associated with a chain + Aliases(ids.ID) []string + + // Add an alias to a chain + Alias(ids.ID, string) error + + Shutdown() +} + +// ChainParameters defines the chain being created +type ChainParameters struct { + ID ids.ID // The ID of the chain being created + SubnetID ids.ID // ID of the subnet that validates this chain + GenesisData []byte // The genesis data of this chain's ledger + VMAlias string // The ID of the vm this chain is running + FxAliases []string // The IDs of the feature extensions this chain is running + + CustomBeacons validators.Set // Should only be set if the default beacons can't be used. +} + +type manager struct { + // Note: The string representation of a chain's ID is also considered to be an alias of the chain + // That is, [chainID].String() is an alias for the chain, too + ids.Aliaser + + log logging.Logger + logFactory logging.Factory + vmManager vms.Manager // Manage mappings from vm ID --> vm + decisionEvents *triggers.EventDispatcher + consensusEvents *triggers.EventDispatcher + db database.Database + chainRouter router.Router // Routes incoming messages to the appropriate chain + sender sender.ExternalSender // Sends consensus messages to other validators + timeoutManager *timeout.Manager // Manages request timeouts when sending messages to other validators + consensusParams avacon.Parameters // The consensus parameters (alpha, beta, etc.) for new chains + validators validators.Manager // Validators validating on this chain + registrants []Registrant // Those notified when a chain is created + nodeID ids.ShortID // The ID of this node + networkID uint32 // ID of the network this node is connected to + awaiter Awaiter // Waits for required connections before running bootstrapping + server *api.Server // Handles HTTP API calls + keystore *keystore.Keystore + + unblocked bool + blockedChains []ChainParameters +} + +// New returns a new Manager where: +// is this node's database +// sends messages to other validators +// validate this chain +// TODO: Make this function take less arguments +func New( + log logging.Logger, + logFactory logging.Factory, + vmManager vms.Manager, + decisionEvents *triggers.EventDispatcher, + consensusEvents *triggers.EventDispatcher, + db database.Database, + router router.Router, + sender sender.ExternalSender, + consensusParams avacon.Parameters, + validators validators.Manager, + nodeID ids.ShortID, + networkID uint32, + awaiter Awaiter, + server *api.Server, + keystore *keystore.Keystore, +) Manager { + timeoutManager := timeout.Manager{} + timeoutManager.Initialize(requestTimeout) + go log.RecoverAndPanic(timeoutManager.Dispatch) + + router.Initialize(log, &timeoutManager) + + m := &manager{ + log: log, + logFactory: logFactory, + vmManager: vmManager, + decisionEvents: decisionEvents, + consensusEvents: consensusEvents, + db: db, + chainRouter: router, + sender: sender, + timeoutManager: &timeoutManager, + consensusParams: consensusParams, + validators: validators, + nodeID: nodeID, + networkID: networkID, + awaiter: awaiter, + server: server, + keystore: keystore, + } + m.Initialize() + return m +} + +// Router that this chain manager is using to route consensus messages to chains +func (m *manager) Router() router.Router { return m.chainRouter } + +// Create a chain +func (m *manager) CreateChain(chain ChainParameters) { + if !m.unblocked { + m.blockedChains = append(m.blockedChains, chain) + } else { + m.ForceCreateChain(chain) + } +} + +// Create a chain +func (m *manager) ForceCreateChain(chain ChainParameters) { + m.log.Info("creating chain:\n"+ + " ID: %s\n"+ + " VMID:%s", + chain.ID, + chain.VMAlias, + ) + + // Assert that there isn't already a chain with an alias in [chain].Aliases + // (Recall that the string repr. of a chain's ID is also an alias for a chain) + if alias, isRepeat := m.isChainWithAlias(chain.ID.String()); isRepeat { + m.log.Error("there is already a chain with alias '%s'. Chain not created.", alias) + return + } + + vmID, err := m.vmManager.Lookup(chain.VMAlias) + if err != nil { + m.log.Error("error while looking up VM: %s", err) + return + } + + // Get a factory for the vm we want to use on our chain + vmFactory, err := m.vmManager.GetVMFactory(vmID) + if err != nil { + m.log.Error("error while getting vmFactory: %s", err) + return + } + + // Create the chain + vm := vmFactory.New() + + fxs := make([]*common.Fx, len(chain.FxAliases)) + for i, fxAlias := range chain.FxAliases { + fxID, err := m.vmManager.Lookup(fxAlias) + if err != nil { + m.log.Error("error while looking up Fx: %s", err) + return + } + + // Get a factory for the fx we want to use on our chain + fxFactory, err := m.vmManager.GetVMFactory(fxID) + if err != nil { + m.log.Error("error while getting fxFactory: %s", err) + return + } + + // Create the fx + fxs[i] = &common.Fx{ + ID: fxID, + Fx: fxFactory.New(), + } + } + + // Create the log and context of the chain + chainLog, err := m.logFactory.MakeChain(chain.ID, "") + if err != nil { + m.log.Error("error while creating chain's log %s", err) + return + } + + ctx := &snow.Context{ + NetworkID: m.networkID, + ChainID: chain.ID, + Log: chainLog, + DecisionDispatcher: m.decisionEvents, + ConsensusDispatcher: m.consensusEvents, + NodeID: m.nodeID, + HTTP: m.server, + Keystore: m.keystore.NewBlockchainKeyStore(chain.ID), + BCLookup: m, + } + consensusParams := m.consensusParams + if alias, err := m.PrimaryAlias(ctx.ChainID); err == nil { + consensusParams.Namespace = fmt.Sprintf("gecko_%s", alias) + } else { + consensusParams.Namespace = fmt.Sprintf("gecko_%s", ctx.ChainID) + } + + // The validators of this blockchain + validators, ok := m.validators.GetValidatorSet(ids.Empty) // TODO: Change argument to chain.SubnetID + if !ok { + m.log.Error("couldn't get validator set of subnet with ID %s. The subnet may not exist", chain.SubnetID) + return + } + + beacons := validators + if chain.CustomBeacons != nil { + beacons = chain.CustomBeacons + } + + switch vm := vm.(type) { + case avalanche.DAGVM: + err := m.createAvalancheChain( + ctx, + chain.GenesisData, + validators, + beacons, + vm, + fxs, + consensusParams, + ) + if err != nil { + m.log.Error("error while creating new avalanche vm %s", err) + return + } + case smeng.ChainVM: + err := m.createSnowmanChain( + ctx, + chain.GenesisData, + validators, + beacons, + vm, + fxs, + consensusParams.Parameters, + ) + if err != nil { + m.log.Error("error while creating new snowman vm %s", err) + return + } + default: + m.log.Error("the vm should have type avalanche.DAGVM or snowman.ChainVM. Chain not created") + return + } + + // Associate the newly created chain with its default alias + m.log.AssertNoError(m.Alias(chain.ID, chain.ID.String())) + + // Notify those that registered to be notified when a new chain is created + m.notifyRegistrants(ctx, vm) +} + +// Implements Manager.AddRegistrant +func (m *manager) AddRegistrant(r Registrant) { m.registrants = append(m.registrants, r) } + +func (m *manager) unblockChains() { + m.unblocked = true + blocked := m.blockedChains + m.blockedChains = nil + for _, chain := range blocked { + m.ForceCreateChain(chain) + } +} + +// Create a DAG-based blockchain that uses Avalanche +func (m *manager) createAvalancheChain( + ctx *snow.Context, + genesisData []byte, + validators, + beacons validators.Set, + vm avalanche.DAGVM, + fxs []*common.Fx, + consensusParams avacon.Parameters, +) error { + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + db := prefixdb.New(ctx.ChainID.Bytes(), m.db) + vmDB := prefixdb.New([]byte("vm"), db) + vertexDB := prefixdb.New([]byte("vertex"), db) + vertexBootstrappingDB := prefixdb.New([]byte("vertex_bootstrapping"), db) + txBootstrappingDB := prefixdb.New([]byte("tx_bootstrapping"), db) + + vtxBlocker, err := queue.New(vertexBootstrappingDB) + if err != nil { + return err + } + txBlocker, err := queue.New(txBootstrappingDB) + if err != nil { + return err + } + + // The channel through which a VM may send messages to the consensus engine + // VM uses this channel to notify engine that a block is ready to be made + msgChan := make(chan common.Message, defaultChannelSize) + + if err := vm.Initialize(ctx, vmDB, genesisData, msgChan, fxs); err != nil { + return err + } + + // Handles serialization/deserialization of vertices and also the + // persistence of vertices + vtxState := &state.Serializer{} + vtxState.Initialize(ctx, vm, vertexDB) + + // Passes messages from the consensus engine to the network + sender := sender.Sender{} + sender.Initialize(ctx, m.sender, m.chainRouter, m.timeoutManager) + + // The engine handles consensus + engine := avaeng.Transitive{ + Config: avaeng.Config{ + BootstrapConfig: avaeng.BootstrapConfig{ + Config: common.Config{ + Context: ctx, + }, + }, + }, + } + + engine.Initialize(avaeng.Config{ + BootstrapConfig: avaeng.BootstrapConfig{ + Config: common.Config{ + Context: ctx, + Validators: validators, + Beacons: beacons, + Alpha: (beacons.Len() + 1) / 2, + Sender: &sender, + }, + VtxBlocked: vtxBlocker, + TxBlocked: txBlocker, + State: vtxState, + VM: vm, + }, + Params: consensusParams, + Consensus: &avacon.Topological{}, + }) + + // Asynchronously passes messages from the network to the consensus engine + handler := &handler.Handler{} + handler.Initialize(&engine, msgChan, defaultChannelSize) + + // Allows messages to be routed to the new chain + m.chainRouter.AddChain(handler) + go ctx.Log.RecoverAndPanic(handler.Dispatch) + + awaiting := &networking.AwaitingConnections{ + Finish: func() { + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + engine.Startup() + }, + } + for _, vdr := range beacons.List() { + awaiting.Requested.Add(vdr.ID()) + } + awaiting.NumRequired = (3*awaiting.Requested.Len() + 3) / 4 // 75% must be connected to + m.awaiter.AwaitConnections(awaiting) + + return nil +} + +// Create a linear chain using the Snowman consensus engine +func (m *manager) createSnowmanChain( + ctx *snow.Context, + genesisData []byte, + validators, + beacons validators.Set, + vm smeng.ChainVM, + fxs []*common.Fx, + consensusParams snowball.Parameters, +) error { + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + db := prefixdb.New(ctx.ChainID.Bytes(), m.db) + vmDB := prefixdb.New([]byte("vm"), db) + bootstrappingDB := prefixdb.New([]byte("bootstrapping"), db) + + blocked, err := queue.New(bootstrappingDB) + if err != nil { + return err + } + + // The channel through which a VM may send messages to the consensus engine + // VM uses this channel to notify engine that a block is ready to be made + msgChan := make(chan common.Message, defaultChannelSize) + + // Initialize the VM + if err := vm.Initialize(ctx, vmDB, genesisData, msgChan, fxs); err != nil { + return err + } + + // Passes messages from the consensus engine to the network + sender := sender.Sender{} + sender.Initialize(ctx, m.sender, m.chainRouter, m.timeoutManager) + + // The engine handles consensus + engine := smeng.Transitive{} + engine.Initialize(smeng.Config{ + BootstrapConfig: smeng.BootstrapConfig{ + Config: common.Config{ + Context: ctx, + Validators: validators, + Beacons: beacons, + Alpha: (beacons.Len() + 1) / 2, + Sender: &sender, + }, + Blocked: blocked, + VM: vm, + Bootstrapped: m.unblockChains, + }, + Params: consensusParams, + Consensus: &smcon.Topological{}, + }) + + // Asynchronously passes messages from the network to the consensus engine + handler := &handler.Handler{} + handler.Initialize(&engine, msgChan, defaultChannelSize) + + // Allow incoming messages to be routed to the new chain + m.chainRouter.AddChain(handler) + go ctx.Log.RecoverAndPanic(handler.Dispatch) + + awaiting := &networking.AwaitingConnections{ + Finish: func() { + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + engine.Startup() + }, + } + for _, vdr := range beacons.List() { + awaiting.Requested.Add(vdr.ID()) + } + awaiting.NumRequired = (3*awaiting.Requested.Len() + 3) / 4 // 75% must be connected to + m.awaiter.AwaitConnections(awaiting) + return nil +} + +// Shutdown stops all the chains +func (m *manager) Shutdown() { m.chainRouter.Shutdown() } + +// LookupVM returns the ID of the VM associated with an alias +func (m *manager) LookupVM(alias string) (ids.ID, error) { return m.vmManager.Lookup(alias) } + +// Notify registrants [those who want to know about the creation of chains] +// that the specified chain has been created +func (m *manager) notifyRegistrants(ctx *snow.Context, vm interface{}) { + for _, registrant := range m.registrants { + registrant.RegisterChain(ctx, vm) + } +} + +// Returns: +// 1) the alias that already exists, or the empty string if there is none +// 2) true iff there exists a chain such that the chain has an alias in [aliases] +func (m *manager) isChainWithAlias(aliases ...string) (string, bool) { + for _, alias := range aliases { + if _, err := m.Lookup(alias); err == nil { + return alias, true + } + } + return "", false +} diff --git a/chains/registrant.go b/chains/registrant.go new file mode 100644 index 0000000..1c02e55 --- /dev/null +++ b/chains/registrant.go @@ -0,0 +1,13 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chains + +import ( + "github.com/ava-labs/gecko/snow" +) + +// Registrant can register the existence of a chain +type Registrant interface { + RegisterChain(ctx *snow.Context, vm interface{}) +} diff --git a/database/batch.go b/database/batch.go new file mode 100644 index 0000000..443fd67 --- /dev/null +++ b/database/batch.go @@ -0,0 +1,33 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// For ease of implementation, our database's interface matches Ethereum's +// database implementation. This was to allow use to use Geth code as is for the +// EVM chain. + +package database + +// Batch is a write-only database that commits changes to its host database +// when Write is called. A batch cannot be used concurrently. +type Batch interface { + KeyValueWriter + + // ValueSize retrieves the amount of data queued up for writing. + ValueSize() int + + // Write flushes any accumulated data to disk. + Write() error + + // Reset resets the batch for reuse. + Reset() + + // Replay replays the batch contents. + Replay(w KeyValueWriter) error +} + +// Batcher wraps the NewBatch method of a backing data store. +type Batcher interface { + // NewBatch creates a write-only database that buffers changes to its host db + // until a final write is called. + NewBatch() Batch +} diff --git a/database/database.go b/database/database.go new file mode 100644 index 0000000..2a56af2 --- /dev/null +++ b/database/database.go @@ -0,0 +1,62 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// For ease of implementation, our database's interface matches Ethereum's +// database implementation. This was to allow use to use Geth code as is for the +// EVM chain. + +package database + +import ( + "io" +) + +// KeyValueReader wraps the Has and Get method of a backing data store. +type KeyValueReader interface { + // Has retrieves if a key is present in the key-value data store. + Has(key []byte) (bool, error) + + // Get retrieves the given key if it's present in the key-value data store. + Get(key []byte) ([]byte, error) +} + +// KeyValueWriter wraps the Put method of a backing data store. +type KeyValueWriter interface { + // Put inserts the given value into the key-value data store. + Put(key []byte, value []byte) error + + // Delete removes the key from the key-value data store. + Delete(key []byte) error +} + +// Stater wraps the Stat method of a backing data store. +type Stater interface { + // Stat returns a particular internal stat of the database. + Stat(property string) (string, error) +} + +// Compacter wraps the Compact method of a backing data store. +type Compacter interface { + // Compact the underlying DB for the given key range. + // Specifically, deleted and overwritten versions are discarded, + // and the data is rearranged to reduce the cost of operations + // needed to access the data. This operation should typically only + // be invoked by users who understand the underlying implementation. + // + // A nil start is treated as a key before all keys in the DB. + // And a nil limit is treated as a key after all keys in the DB. + // Therefore if both are nil then it will compact entire DB. + Compact(start []byte, limit []byte) error +} + +// Database contains all the methods required to allow handling different +// key-value data stores backing the database. +type Database interface { + KeyValueReader + KeyValueWriter + Batcher + Iteratee + Stater + Compacter + io.Closer +} diff --git a/database/encdb/encdb.go b/database/encdb/encdb.go new file mode 100644 index 0000000..e93032e --- /dev/null +++ b/database/encdb/encdb.go @@ -0,0 +1,283 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package encdb + +import ( + "crypto/cipher" + "crypto/rand" + "sync" + + "golang.org/x/crypto/chacha20poly1305" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/nodb" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/vms/components/codec" +) + +// Database encrypts all values that are provided +type Database struct { + lock sync.RWMutex + codec codec.Codec + cipher cipher.AEAD + db database.Database +} + +// New returns a new encrypted database +func New(password []byte, db database.Database) (*Database, error) { + h := hashing.ComputeHash256(password) + aead, err := chacha20poly1305.NewX(h) + if err != nil { + return nil, err + } + return &Database{ + codec: codec.NewDefault(), + cipher: aead, + db: db, + }, nil +} + +// Has implements the Database interface +func (db *Database) Has(key []byte) (bool, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return false, database.ErrClosed + } + return db.db.Has(key) +} + +// Get implements the Database interface +func (db *Database) Get(key []byte) ([]byte, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return nil, database.ErrClosed + } + encVal, err := db.db.Get(key) + if err != nil { + return nil, err + } + return db.decrypt(encVal) +} + +// Put implements the Database interface +func (db *Database) Put(key, value []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return database.ErrClosed + } + + encValue, err := db.encrypt(value) + if err != nil { + return err + } + return db.db.Put(key, encValue) +} + +// Delete implements the Database interface +func (db *Database) Delete(key []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return database.ErrClosed + } + return db.db.Delete(key) +} + +// NewBatch implements the Database interface +func (db *Database) NewBatch() database.Batch { + return &batch{ + Batch: db.db.NewBatch(), + db: db, + } +} + +// NewIterator implements the Database interface +func (db *Database) NewIterator() database.Iterator { return db.NewIteratorWithStartAndPrefix(nil, nil) } + +// NewIteratorWithStart implements the Database interface +func (db *Database) NewIteratorWithStart(start []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(start, nil) +} + +// NewIteratorWithPrefix implements the Database interface +func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(nil, prefix) +} + +// NewIteratorWithStartAndPrefix implements the Database interface +func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return &nodb.Iterator{Err: database.ErrClosed} + } + return &iterator{ + Iterator: db.db.NewIteratorWithStartAndPrefix(start, prefix), + db: db, + } +} + +// Stat implements the Database interface +func (db *Database) Stat(stat string) (string, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return "", database.ErrClosed + } + return db.db.Stat(stat) +} + +// Compact implements the Database interface +func (db *Database) Compact(start, limit []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return database.ErrClosed + } + return db.db.Compact(start, limit) +} + +// Close implements the Database interface +func (db *Database) Close() error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return database.ErrClosed + } + db.db = nil + return nil +} + +type keyValue struct { + key []byte + value []byte + delete bool +} + +type batch struct { + database.Batch + + db *Database + writes []keyValue +} + +func (b *batch) Put(key, value []byte) error { + b.writes = append(b.writes, keyValue{copyBytes(key), copyBytes(value), false}) + encValue, err := b.db.encrypt(value) + if err != nil { + return err + } + return b.Batch.Put(key, encValue) +} + +func (b *batch) Delete(key []byte) error { + b.writes = append(b.writes, keyValue{copyBytes(key), nil, true}) + return b.Batch.Delete(key) +} + +func (b *batch) Write() error { + b.db.lock.Lock() + defer b.db.lock.Unlock() + + if b.db.db == nil { + return database.ErrClosed + } + + return b.Batch.Write() +} + +// Reset resets the batch for reuse. +func (b *batch) Reset() { + b.writes = b.writes[:0] + b.Batch.Reset() +} + +// Replay replays the batch contents. +func (b *batch) Replay(w database.KeyValueWriter) error { + for _, keyvalue := range b.writes { + if keyvalue.delete { + if err := w.Delete(keyvalue.key); err != nil { + return err + } + } else if err := w.Put(keyvalue.key, keyvalue.value); err != nil { + return err + } + } + return nil +} + +type iterator struct { + database.Iterator + db *Database + + val []byte + err error +} + +func (it *iterator) Next() bool { + next := it.Iterator.Next() + if next { + encVal := it.Iterator.Value() + val, err := it.db.decrypt(encVal) + if err != nil { + it.err = err + return false + } + it.val = val + } else { + it.val = nil + } + return next +} + +func (it *iterator) Error() error { + if it.err != nil { + return it.err + } + return it.Iterator.Error() +} + +func (it *iterator) Value() []byte { return it.val } + +func copyBytes(bytes []byte) []byte { + copiedBytes := make([]byte, len(bytes)) + copy(copiedBytes, bytes) + return copiedBytes +} + +type encryptedValue struct { + Ciphertext []byte `serialize:"true"` + Nonce []byte `serialize:"true"` +} + +func (db *Database) encrypt(plaintext []byte) ([]byte, error) { + nonce := make([]byte, chacha20poly1305.NonceSizeX) + if _, err := rand.Read(nonce); err != nil { + return nil, err + } + ciphertext := db.cipher.Seal(nil, nonce, plaintext, nil) + return db.codec.Marshal(&encryptedValue{ + Ciphertext: ciphertext, + Nonce: nonce, + }) +} + +func (db *Database) decrypt(ciphertext []byte) ([]byte, error) { + val := encryptedValue{} + if err := db.codec.Unmarshal(ciphertext, &val); err != nil { + return nil, err + } + return db.cipher.Open(nil, val.Nonce, val.Ciphertext, nil) +} diff --git a/database/encdb/encdb_test.go b/database/encdb/encdb_test.go new file mode 100644 index 0000000..7caf3ac --- /dev/null +++ b/database/encdb/encdb_test.go @@ -0,0 +1,24 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package encdb + +import ( + "testing" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/memdb" +) + +func TestInterface(t *testing.T) { + pw := "lol totally a secure password" + for _, test := range database.Tests { + unencryptedDB := memdb.New() + db, err := New([]byte(pw), unencryptedDB) + if err != nil { + t.Fatal(err) + } + + test(t, db) + } +} diff --git a/database/errors.go b/database/errors.go new file mode 100644 index 0000000..b6679d4 --- /dev/null +++ b/database/errors.go @@ -0,0 +1,12 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package database + +import "errors" + +// common errors +var ( + ErrClosed = errors.New("closed") + ErrNotFound = errors.New("not found") +) diff --git a/database/iterator.go b/database/iterator.go new file mode 100644 index 0000000..283ed62 --- /dev/null +++ b/database/iterator.go @@ -0,0 +1,62 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// For ease of implementation, our database's interface matches Ethereum's +// database implementation. This was to allow use to use Geth code as is for the +// EVM chain. + +package database + +// Iterator iterates over a database's key/value pairs in ascending key order. +// +// When it encounters an error any seek will return false and will yield no key/ +// value pairs. The error can be queried by calling the Error method. Calling +// Release is still necessary. +// +// An iterator must be released after use, but it is not necessary to read an +// iterator until exhaustion. An iterator is not safe for concurrent use, but it +// is safe to use multiple iterators concurrently. +type Iterator interface { + // Next moves the iterator to the next key/value pair. It returns whether the + // iterator is exhausted. + Next() bool + + // Error returns any accumulated error. Exhausting all the key/value pairs + // is not considered to be an error. + Error() error + + // Key returns the key of the current key/value pair, or nil if done. The caller + // should not modify the contents of the returned slice, and its contents may + // change on the next call to Next. + Key() []byte + + // Value returns the value of the current key/value pair, or nil if done. The + // caller should not modify the contents of the returned slice, and its contents + // may change on the next call to Next. + Value() []byte + + // Release releases associated resources. Release should always succeed and can + // be called multiple times without causing error. + Release() +} + +// Iteratee wraps the NewIterator methods of a backing data store. +type Iteratee interface { + // NewIterator creates a binary-alphabetical iterator over the entire keyspace + // contained within the key-value database. + NewIterator() Iterator + + // NewIteratorWithStart creates a binary-alphabetical iterator over a subset of + // database content starting at a particular initial key (or after, if it does + // not exist). + NewIteratorWithStart(start []byte) Iterator + + // NewIteratorWithPrefix creates a binary-alphabetical iterator over a subset + // of database content with a particular key prefix. + NewIteratorWithPrefix(prefix []byte) Iterator + + // NewIteratorWithStartAndPrefix creates a binary-alphabetical iterator over a + // subset of database content with a particular key prefix starting at a + // specified key. + NewIteratorWithStartAndPrefix(start, prefix []byte) Iterator +} diff --git a/database/leveldb/leveldb.go b/database/leveldb/leveldb.go new file mode 100644 index 0000000..ef5e89c --- /dev/null +++ b/database/leveldb/leveldb.go @@ -0,0 +1,219 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package leveldb + +import ( + "bytes" + + "github.com/ava-labs/gecko/database" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +const ( + // minBlockCacheSize is the minimum number of bytes to use for block caching + // in leveldb. + minBlockCacheSize = 8 * opt.MiB + + // minWriteBufferSize is the minimum number of bytes to use for buffers in + // leveldb. + minWriteBufferSize = 8 * opt.MiB + + // minHandleCap is the minimum number of files descriptors to cap levelDB to + // use + minHandleCap = 16 +) + +// Database is a persistent key-value store. Apart from basic data storage +// functionality it also supports batch writes and iterating over the keyspace +// in binary-alphabetical order. +type Database struct{ *leveldb.DB } + +// New returns a wrapped LevelDB object. +func New(file string, blockCacheSize, writeBufferSize, handleCap int) (*Database, error) { + // Enforce minimums + if blockCacheSize < minBlockCacheSize { + blockCacheSize = minBlockCacheSize + } + if writeBufferSize < minWriteBufferSize { + writeBufferSize = minWriteBufferSize + } + if handleCap < minHandleCap { + handleCap = minHandleCap + } + + // Open the db and recover any potential corruptions + db, err := leveldb.OpenFile(file, &opt.Options{ + OpenFilesCacheCapacity: handleCap, + BlockCacheCapacity: blockCacheSize, + // There are two buffers of size WriteBuffer used. + WriteBuffer: writeBufferSize / 2, + Filter: filter.NewBloomFilter(10), + }) + if _, corrupted := err.(*errors.ErrCorrupted); corrupted { + db, err = leveldb.RecoverFile(file, nil) + } + if err != nil { + return nil, err + } + return &Database{DB: db}, nil +} + +// Has returns if the key is set in the database +func (db *Database) Has(key []byte) (bool, error) { + has, err := db.DB.Has(key, nil) + return has, updateError(err) +} + +// Get returns the value the key maps to in the database +func (db *Database) Get(key []byte) ([]byte, error) { + value, err := db.DB.Get(key, nil) + return value, updateError(err) +} + +// Put sets the value of the provided key to the provided value +func (db *Database) Put(key []byte, value []byte) error { + return updateError(db.DB.Put(key, value, nil)) +} + +// Delete removes the key from the database +func (db *Database) Delete(key []byte) error { return updateError(db.DB.Delete(key, nil)) } + +// NewBatch creates a write/delete-only buffer that is atomically committed to +// the database when write is called +func (db *Database) NewBatch() database.Batch { return &batch{db: db.DB} } + +// NewIterator creates a lexicographically ordered iterator over the database +func (db *Database) NewIterator() database.Iterator { + return &iter{db.DB.NewIterator(new(util.Range), nil)} +} + +// NewIteratorWithStart creates a lexicographically ordered iterator over the +// database starting at the provided key +func (db *Database) NewIteratorWithStart(start []byte) database.Iterator { + return &iter{db.DB.NewIterator(&util.Range{Start: start}, nil)} +} + +// NewIteratorWithPrefix creates a lexicographically ordered iterator over the +// database ignoring keys that do not start with the provided prefix +func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return &iter{db.DB.NewIterator(util.BytesPrefix(prefix), nil)} +} + +// NewIteratorWithStartAndPrefix creates a lexicographically ordered iterator +// over the database starting at start and ignoring keys that do not start with +// the provided prefix +func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + iterRange := util.BytesPrefix(prefix) + if bytes.Compare(start, prefix) == 1 { + iterRange.Start = start + } + return &iter{db.DB.NewIterator(iterRange, nil)} +} + +// Stat returns a particular internal stat of the database. +func (db *Database) Stat(property string) (string, error) { + stat, err := db.DB.GetProperty(property) + return stat, updateError(err) +} + +// This comment is basically copy pasted from the underlying levelDB library: + +// Compact the underlying DB for the given key range. +// Specifically, deleted and overwritten versions are discarded, +// and the data is rearranged to reduce the cost of operations +// needed to access the data. This operation should typically only +// be invoked by users who understand the underlying implementation. +// +// A nil start is treated as a key before all keys in the DB. +// And a nil limit is treated as a key after all keys in the DB. +// Therefore if both are nil then it will compact entire DB. +func (db *Database) Compact(start []byte, limit []byte) error { + return updateError(db.DB.CompactRange(util.Range{Start: start, Limit: limit})) +} + +// Close implements the Database interface +func (db *Database) Close() error { return updateError(db.DB.Close()) } + +// batch is a wrapper around a levelDB batch to contain sizes. +type batch struct { + leveldb.Batch + + db *leveldb.DB + size int +} + +// Put the value into the batch for later writing +func (b *batch) Put(key, value []byte) error { + b.Batch.Put(key, value) + b.size += len(value) + return nil +} + +// Delete the key during writing +func (b *batch) Delete(key []byte) error { + b.Batch.Delete(key) + b.size++ + return nil +} + +// ValueSize retrieves the amount of data queued up for writing. +func (b *batch) ValueSize() int { return b.size } + +// Write flushes any accumulated data to disk. +func (b *batch) Write() error { return updateError(b.db.Write(&b.Batch, nil)) } + +// Reset resets the batch for reuse. +func (b *batch) Reset() { + b.Batch.Reset() + b.size = 0 +} + +// Replay the batch contents. +func (b *batch) Replay(w database.KeyValueWriter) error { + replay := &replayer{writer: w} + if err := b.Batch.Replay(replay); err != nil { + // Never actually returns an error, because Replay just returns nil + return updateError(err) + } + return updateError(replay.err) +} + +type replayer struct { + writer database.KeyValueWriter + err error +} + +func (r *replayer) Put(key, value []byte) { + if r.err != nil { + return + } + r.err = r.writer.Put(key, value) +} + +func (r *replayer) Delete(key []byte) { + if r.err != nil { + return + } + r.err = r.writer.Delete(key) +} + +type iter struct{ iterator.Iterator } + +func (i *iter) Error() error { return updateError(i.Iterator.Error()) } + +func updateError(err error) error { + switch err { + case leveldb.ErrClosed: + return database.ErrClosed + case leveldb.ErrNotFound: + return database.ErrNotFound + default: + return err + } +} diff --git a/database/leveldb/leveldb_test.go b/database/leveldb/leveldb_test.go new file mode 100644 index 0000000..25a2d9c --- /dev/null +++ b/database/leveldb/leveldb_test.go @@ -0,0 +1,27 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package leveldb + +import ( + "fmt" + "os" + "testing" + + "github.com/ava-labs/gecko/database" +) + +func TestInterface(t *testing.T) { + for i, test := range database.Tests { + folder := fmt.Sprintf("db%d", i) + + db, err := New(folder, 0, 0, 0) + if err != nil { + t.Fatalf("leveldb.New(%s, 0, 0) errored with %s", folder, err) + } + defer os.RemoveAll(folder) + defer db.Close() + + test(t, db) + } +} diff --git a/database/memdb/memdb.go b/database/memdb/memdb.go new file mode 100644 index 0000000..9f6ba58 --- /dev/null +++ b/database/memdb/memdb.go @@ -0,0 +1,258 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package memdb + +import ( + "sort" + "strings" + "sync" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/nodb" +) + +// DefaultSize is the default initial size of the memory database +const DefaultSize = 1 << 10 + +// Database is an ephemeral key-value store that implements the Database +// interface. +type Database struct { + lock sync.RWMutex + db map[string][]byte +} + +// New returns a map with the Database interface methods implemented. +func New() *Database { return NewWithSize(DefaultSize) } + +// NewWithSize returns a map pre-allocated to the provided size with the +// Database interface methods implemented. +func NewWithSize(size int) *Database { return &Database{db: make(map[string][]byte, size)} } + +// Close implements the Database interface +func (db *Database) Close() error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return database.ErrClosed + } + db.db = nil + return nil +} + +// Has implements the Database interface +func (db *Database) Has(key []byte) (bool, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return false, database.ErrClosed + } + _, ok := db.db[string(key)] + return ok, nil +} + +// Get implements the Database interface +func (db *Database) Get(key []byte) ([]byte, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return nil, database.ErrClosed + } + if entry, ok := db.db[string(key)]; ok { + return copyBytes(entry), nil + } + return nil, database.ErrNotFound +} + +// Put implements the Database interface +func (db *Database) Put(key []byte, value []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return database.ErrClosed + } + db.db[string(key)] = copyBytes(value) + return nil +} + +// Delete implements the Database interface +func (db *Database) Delete(key []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return database.ErrClosed + } + delete(db.db, string(key)) + return nil +} + +// NewBatch implements the Database interface +func (db *Database) NewBatch() database.Batch { return &batch{db: db} } + +// NewIterator implements the Database interface +func (db *Database) NewIterator() database.Iterator { return db.NewIteratorWithStartAndPrefix(nil, nil) } + +// NewIteratorWithStart implements the Database interface +func (db *Database) NewIteratorWithStart(start []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(start, nil) +} + +// NewIteratorWithPrefix implements the Database interface +func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(nil, prefix) +} + +// NewIteratorWithStartAndPrefix implements the Database interface +func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return &nodb.Iterator{Err: database.ErrClosed} + } + + startString := string(start) + prefixString := string(prefix) + keys := make([]string, 0, len(db.db)) + for key := range db.db { + if strings.HasPrefix(key, prefixString) && key >= startString { + keys = append(keys, key) + } + } + sort.Strings(keys) // Keys need to be in sorted order + values := make([][]byte, 0, len(keys)) + for _, key := range keys { + values = append(values, db.db[key]) + } + return &iterator{ + keys: keys, + values: values, + } +} + +// Stat implements the Database interface +func (db *Database) Stat(property string) (string, error) { return "", database.ErrNotFound } + +// Compact implements the Database interface +func (db *Database) Compact(start []byte, limit []byte) error { return nil } + +type keyValue struct { + key []byte + value []byte + delete bool +} + +type batch struct { + db *Database + writes []keyValue + size int +} + +func (b *batch) Put(key, value []byte) error { + b.writes = append(b.writes, keyValue{copyBytes(key), copyBytes(value), false}) + b.size += len(value) + return nil +} + +func (b *batch) Delete(key []byte) error { + b.writes = append(b.writes, keyValue{copyBytes(key), nil, true}) + b.size++ + return nil +} + +// ValueSize implements the Batch interface +func (b *batch) ValueSize() int { return b.size } + +// Write implements the Batch interface +func (b *batch) Write() error { + b.db.lock.Lock() + defer b.db.lock.Unlock() + + if b.db.db == nil { + return database.ErrClosed + } + + for _, kv := range b.writes { + key := string(kv.key) + if kv.delete { + delete(b.db.db, key) + } else { + b.db.db[key] = kv.value + } + } + return nil +} + +// Reset implements the Batch interface +func (b *batch) Reset() { + b.writes = b.writes[:0] + b.size = 0 +} + +// Replay implements the Batch interface +func (b *batch) Replay(w database.KeyValueWriter) error { + for _, keyvalue := range b.writes { + if keyvalue.delete { + if err := w.Delete(keyvalue.key); err != nil { + return err + } + } else if err := w.Put(keyvalue.key, keyvalue.value); err != nil { + return err + } + } + return nil +} + +type iterator struct { + initialized bool + keys []string + values [][]byte +} + +// Next implements the Iterator interface +func (it *iterator) Next() bool { + // If the iterator was not yet initialized, do it now + if !it.initialized { + it.initialized = true + return len(it.keys) > 0 + } + // Iterator already initialize, advance it + if len(it.keys) > 0 { + it.keys = it.keys[1:] + it.values = it.values[1:] + } + return len(it.keys) > 0 +} + +// Error implements the Iterator interface +func (it *iterator) Error() error { return nil } + +// Key implements the Iterator interface +func (it *iterator) Key() []byte { + if len(it.keys) > 0 { + return []byte(it.keys[0]) + } + return nil +} + +// Value implements the Iterator interface +func (it *iterator) Value() []byte { + if len(it.values) > 0 { + return it.values[0] + } + return nil +} + +// Release implements the Iterator interface +func (it *iterator) Release() { it.keys = nil; it.values = nil } + +func copyBytes(bytes []byte) []byte { + copiedBytes := make([]byte, len(bytes)) + copy(copiedBytes, bytes) + return copiedBytes +} diff --git a/database/memdb/memdb_test.go b/database/memdb/memdb_test.go new file mode 100644 index 0000000..0f16fee --- /dev/null +++ b/database/memdb/memdb_test.go @@ -0,0 +1,16 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package memdb + +import ( + "testing" + + "github.com/ava-labs/gecko/database" +) + +func TestInterface(t *testing.T) { + for _, test := range database.Tests { + test(t, New()) + } +} diff --git a/database/mockdb/mockdb.go b/database/mockdb/mockdb.go new file mode 100644 index 0000000..a50d692 --- /dev/null +++ b/database/mockdb/mockdb.go @@ -0,0 +1,137 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package mockdb + +import ( + "errors" + + "github.com/ava-labs/gecko/database" +) + +var errNoFunction = errors.New("user didn't specify what value(s) return") + +// Database implements database.Database. +// This is a mock database meant to be used in tests. +// You specify the database's return value(s) for a given method call by +// assign value to the corresponding member. +// For example, to specify what should happen when Has is called, +// assign a value to OnHas. +// If no value is assigned to the corresponding member, the method returns an error or nil +// If you +type Database struct { + // Executed when Has is called + OnHas func([]byte) (bool, error) + OnGet func([]byte) ([]byte, error) + OnPut func([]byte) error + OnDelete func([]byte) error + OnNewBatch func() database.Batch + OnNewIterator func() database.Iterator + OnNewIteratorWithStart func([]byte) database.Iterator + OnNewIteratorWithPrefix func([]byte) database.Iterator + OnNewIteratorWithStartAndPrefix func([]byte, []byte) database.Iterator + OnStat func() (string, error) + OnCompact func([]byte, []byte) error + OnClose func() error +} + +// Has implements the database.Database interface +func (db *Database) Has(b []byte) (bool, error) { + if db.OnHas == nil { + return false, errNoFunction + } + return db.OnHas(b) +} + +// Get implements the database.Database interface +func (db *Database) Get(b []byte) ([]byte, error) { + if db.OnGet == nil { + return nil, errNoFunction + } + return db.OnGet(b) +} + +// Put implements the database.Database interface +func (db *Database) Put(b []byte) error { + if db.OnPut == nil { + return errNoFunction + } + return db.OnPut(b) +} + +// Delete implements the database.Database interface +func (db *Database) Delete(b []byte) error { + if db.OnDelete == nil { + return errNoFunction + } + return db.OnDelete(b) +} + +// NewBatch implements the database.Database interface +func (db *Database) NewBatch() database.Batch { + if db.OnNewBatch == nil { + return nil + } + return db.OnNewBatch() +} + +// NewIterator implements the database.Database interface +func (db *Database) NewIterator() database.Iterator { + if db.OnNewIterator == nil { + return nil + } + return db.OnNewIterator() +} + +// NewIteratorWithStart implements the database.Database interface +func (db *Database) NewIteratorWithStart(start []byte) database.Iterator { + if db.OnNewIteratorWithStart == nil { + return nil + } + return db.OnNewIteratorWithStart(start) +} + +// NewIteratorWithPrefix implements the database.Database interface +func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator { + if db.OnNewIteratorWithPrefix == nil { + return nil + } + return db.OnNewIteratorWithPrefix(prefix) +} + +// NewIteratorWithStartAndPrefix implements the database.Database interface +func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + if db.OnNewIteratorWithStartAndPrefix == nil { + return nil + } + return db.OnNewIteratorWithStartAndPrefix(start, prefix) +} + +// Stat implements the database.Database interface +func (db *Database) Stat() (string, error) { + if db.OnStat == nil { + return "", errNoFunction + } + return db.OnStat() +} + +// Compact implements the database.Database interface +func (db *Database) Compact(start []byte, limit []byte) error { + if db.OnCompact == nil { + return errNoFunction + } + return db.OnCompact(start, limit) +} + +// Close implements the database.Database interface +func (db *Database) Close() error { + if db.OnClose == nil { + return errNoFunction + } + return db.OnClose() +} + +// New returns a new mock database +func New() *Database { + return &Database{} +} diff --git a/database/mockdb/mockdb_test.go b/database/mockdb/mockdb_test.go new file mode 100644 index 0000000..73f1814 --- /dev/null +++ b/database/mockdb/mockdb_test.go @@ -0,0 +1,75 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package mockdb + +import ( + "bytes" + "errors" + "testing" +) + +// Assert that when no members are assigned values, every method returns nil/error +func TestDefaultError(t *testing.T) { + db := New() + + if err := db.Close(); err == nil { + t.Fatal("should have errored") + } + if _, err := db.Has([]byte{}); err == nil { + t.Fatal("should have errored") + } + if _, err := db.Get([]byte{}); err == nil { + t.Fatal("should have errored") + } + if err := db.Put([]byte{}); err == nil { + t.Fatal("should have errored") + } + if err := db.Delete([]byte{}); err == nil { + t.Fatal("should have errored") + } + if batch := db.NewBatch(); batch != nil { + t.Fatal("should have been nil") + } + if iterator := db.NewIterator(); iterator != nil { + t.Fatal("should have errored") + } + if iterator := db.NewIteratorWithPrefix([]byte{}); iterator != nil { + t.Fatal("should have errored") + } + if iterator := db.NewIteratorWithStart([]byte{}); iterator != nil { + t.Fatal("should have errored") + } + if iterator := db.NewIteratorWithStartAndPrefix([]byte{}, []byte{}); iterator != nil { + t.Fatal("should have errored") + } + if err := db.Compact([]byte{}, []byte{}); err == nil { + t.Fatal("should have errored") + } + if _, err := db.Stat(); err == nil { + t.Fatal("should have errored") + } +} + +// Assert that mocking works for Get +func TestGet(t *testing.T) { + db := New() + + // Mock Has() + db.OnHas = func(b []byte) (bool, error) { + if bytes.Equal(b, []byte{1, 2, 3}) { + return true, nil + } + return false, errors.New("") + } + + if has, err := db.Has([]byte{1, 2, 3}); err != nil { + t.Fatal("should not have errored") + } else if has != true { + t.Fatal("has should be true") + } + + if _, err := db.Has([]byte{1, 2}); err == nil { + t.Fatal("should have have errored") + } +} diff --git a/database/nodb/nodb.go b/database/nodb/nodb.go new file mode 100644 index 0000000..96b5ef0 --- /dev/null +++ b/database/nodb/nodb.go @@ -0,0 +1,88 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package nodb + +import ( + "github.com/ava-labs/gecko/database" +) + +// Database is a lightning fast key value store with probabilistic operations. +type Database struct{} + +// Has returns false, nil +func (*Database) Has([]byte) (bool, error) { return false, database.ErrClosed } + +// Get returns nil, error +func (*Database) Get([]byte) ([]byte, error) { return nil, database.ErrClosed } + +// Put returns nil +func (*Database) Put(_ []byte, _ []byte) error { return database.ErrClosed } + +// Delete returns nil +func (*Database) Delete([]byte) error { return database.ErrClosed } + +// NewBatch returns a new batch +func (*Database) NewBatch() database.Batch { return &Batch{} } + +// NewIterator returns a new empty iterator +func (*Database) NewIterator() database.Iterator { return &Iterator{} } + +// NewIteratorWithStart returns a new empty iterator +func (*Database) NewIteratorWithStart([]byte) database.Iterator { return &Iterator{} } + +// NewIteratorWithPrefix returns a new empty iterator +func (*Database) NewIteratorWithPrefix([]byte) database.Iterator { return &Iterator{} } + +// NewIteratorWithStartAndPrefix returns a new empty iterator +func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + return &Iterator{} +} + +// Stat returns an error +func (*Database) Stat(string) (string, error) { return "", database.ErrClosed } + +// Compact returns nil +func (*Database) Compact(_, _ []byte) error { return database.ErrClosed } + +// Close returns nil +func (*Database) Close() error { return database.ErrClosed } + +// Batch does nothing +type Batch struct{} + +// Put returns nil +func (*Batch) Put(_, _ []byte) error { return database.ErrClosed } + +// Delete returns nil +func (*Batch) Delete([]byte) error { return database.ErrClosed } + +// ValueSize returns 0 +func (*Batch) ValueSize() int { return 0 } + +// Write returns nil +func (*Batch) Write() error { return database.ErrClosed } + +// Reset does nothing +func (*Batch) Reset() {} + +// Replay does nothing +func (*Batch) Replay(database.KeyValueWriter) error { return database.ErrClosed } + +// Iterator does nothing +type Iterator struct{ Err error } + +// Next returns false +func (*Iterator) Next() bool { return false } + +// Error returns any errors +func (it *Iterator) Error() error { return it.Err } + +// Key returns nil +func (*Iterator) Key() []byte { return nil } + +// Value returns nil +func (*Iterator) Value() []byte { return nil } + +// Release does nothing +func (*Iterator) Release() {} diff --git a/database/prefixdb/prefixdb.go b/database/prefixdb/prefixdb.go new file mode 100644 index 0000000..5e88318 --- /dev/null +++ b/database/prefixdb/prefixdb.go @@ -0,0 +1,237 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package prefixdb + +import ( + "sync" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/nodb" + "github.com/ava-labs/gecko/utils/hashing" +) + +// Database partitions a database into a sub-database by prefixing all keys with +// a unique value. +type Database struct { + lock sync.RWMutex + dbPrefix []byte + db database.Database +} + +// New returns a new prefixed database +func New(prefix []byte, db database.Database) *Database { + if prefixDB, ok := db.(*Database); ok { + simplePrefix := make([]byte, len(prefixDB.dbPrefix)+len(prefix)) + copy(simplePrefix, prefixDB.dbPrefix) + copy(simplePrefix[len(prefixDB.dbPrefix):], prefix) + + return NewNested(simplePrefix, prefixDB.db) + } + return NewNested(prefix, db) +} + +// NewNested returns a new prefixed database without attempting to compress +// prefixes. +func NewNested(prefix []byte, db database.Database) *Database { + return &Database{ + dbPrefix: hashing.ComputeHash256(prefix), + db: db, + } +} + +// Has implements the Database interface +func (db *Database) Has(key []byte) (bool, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return false, database.ErrClosed + } + return db.db.Has(db.prefix(key)) +} + +// Get implements the Database interface +func (db *Database) Get(key []byte) ([]byte, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return nil, database.ErrClosed + } + return db.db.Get(db.prefix(key)) +} + +// Put implements the Database interface +func (db *Database) Put(key, value []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return database.ErrClosed + } + return db.db.Put(db.prefix(key), value) +} + +// Delete implements the Database interface +func (db *Database) Delete(key []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return database.ErrClosed + } + return db.db.Delete(db.prefix(key)) +} + +// NewBatch implements the Database interface +func (db *Database) NewBatch() database.Batch { + return &batch{ + Batch: db.db.NewBatch(), + db: db, + } +} + +// NewIterator implements the Database interface +func (db *Database) NewIterator() database.Iterator { return db.NewIteratorWithStartAndPrefix(nil, nil) } + +// NewIteratorWithStart implements the Database interface +func (db *Database) NewIteratorWithStart(start []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(start, nil) +} + +// NewIteratorWithPrefix implements the Database interface +func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(nil, prefix) +} + +// NewIteratorWithStartAndPrefix implements the Database interface +func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return &nodb.Iterator{Err: database.ErrClosed} + } + return &iterator{ + Iterator: db.db.NewIteratorWithStartAndPrefix(db.prefix(start), db.prefix(prefix)), + db: db, + } +} + +// Stat implements the Database interface +func (db *Database) Stat(stat string) (string, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.db == nil { + return "", database.ErrClosed + } + return db.db.Stat(stat) +} + +// Compact implements the Database interface +func (db *Database) Compact(start, limit []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return database.ErrClosed + } + return db.db.Compact(db.prefix(start), db.prefix(limit)) +} + +// Close implements the Database interface +func (db *Database) Close() error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.db == nil { + return database.ErrClosed + } + db.db = nil + return nil +} + +func (db *Database) prefix(key []byte) []byte { + prefixedKey := make([]byte, len(db.dbPrefix)+len(key)) + copy(prefixedKey, db.dbPrefix) + copy(prefixedKey[len(db.dbPrefix):], key) + return prefixedKey +} + +type keyValue struct { + key []byte + value []byte + delete bool +} + +type batch struct { + database.Batch + db *Database + writes []keyValue +} + +// Put implements the Batch interface +func (b *batch) Put(key, value []byte) error { + b.writes = append(b.writes, keyValue{copyBytes(key), copyBytes(value), false}) + return b.Batch.Put(b.db.prefix(key), value) +} + +// Delete implements the Batch interface +func (b *batch) Delete(key []byte) error { + b.writes = append(b.writes, keyValue{copyBytes(key), nil, true}) + return b.Batch.Delete(b.db.prefix(key)) +} + +// Write flushes any accumulated data to the memory database. +func (b *batch) Write() error { + b.db.lock.Lock() + defer b.db.lock.Unlock() + + if b.db.db == nil { + return database.ErrClosed + } + + return b.Batch.Write() +} + +// Reset resets the batch for reuse. +func (b *batch) Reset() { + b.writes = b.writes[:0] + b.Batch.Reset() +} + +// Replay replays the batch contents. +func (b *batch) Replay(w database.KeyValueWriter) error { + for _, keyvalue := range b.writes { + if keyvalue.delete { + if err := w.Delete(keyvalue.key); err != nil { + return err + } + } else if err := w.Put(keyvalue.key, keyvalue.value); err != nil { + return err + } + } + return nil +} + +type iterator struct { + database.Iterator + db *Database +} + +// Key calls the inner iterators Key and strips the prefix +func (it *iterator) Key() []byte { + key := it.Iterator.Key() + if prefixLen := len(it.db.dbPrefix); len(key) >= prefixLen { + return key[prefixLen:] + } + return key +} + +func copyBytes(bytes []byte) []byte { + copiedBytes := make([]byte, len(bytes)) + copy(copiedBytes, bytes) + return copiedBytes +} diff --git a/database/prefixdb/prefixdb_test.go b/database/prefixdb/prefixdb_test.go new file mode 100644 index 0000000..4fba21d --- /dev/null +++ b/database/prefixdb/prefixdb_test.go @@ -0,0 +1,23 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package prefixdb + +import ( + "testing" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/memdb" +) + +func TestInterface(t *testing.T) { + for _, test := range database.Tests { + db := memdb.New() + test(t, New([]byte("hello"), db)) + test(t, New([]byte("world"), db)) + test(t, New([]byte("wor"), New([]byte("ld"), db))) + test(t, New([]byte("ld"), New([]byte("wor"), db))) + test(t, NewNested([]byte("wor"), New([]byte("ld"), db))) + test(t, NewNested([]byte("ld"), New([]byte("wor"), db))) + } +} diff --git a/database/test_database.go b/database/test_database.go new file mode 100644 index 0000000..f255d8a --- /dev/null +++ b/database/test_database.go @@ -0,0 +1,551 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package database + +import ( + "bytes" + "testing" +) + +var ( + // Tests is a list of all database tests + Tests = []func(t *testing.T, db Database){ + TestSimpleKeyValue, + TestSimpleKeyValueClosed, + TestBatchPut, + TestBatchDelete, + TestBatchReset, + TestBatchReplay, + TestIterator, + TestIteratorStart, + TestIteratorPrefix, + TestIteratorStartPrefix, + TestIteratorClosed, + TestStatNoPanic, + TestCompactNoPanic, + } +) + +// TestSimpleKeyValue ... +func TestSimpleKeyValue(t *testing.T, db Database) { + key := []byte("hello") + value := []byte("world") + + if has, err := db.Has(key); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has unexpectedly returned true on key %s", key) + } else if v, err := db.Get(key); err != ErrNotFound { + t.Fatalf("Expected %s on db.Get for missing key %s. Returned 0x%x", ErrNotFound, key, v) + } else if err := db.Delete(key); err != nil { + t.Fatalf("Unexpected error on db.Delete: %s", err) + } + + if err := db.Put(key, value); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } + + if has, err := db.Has(key); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if !has { + t.Fatalf("db.Has unexpectedly returned false on key %s", key) + } else if v, err := db.Get(key); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, v) { + t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value) + } + + if err := db.Delete(key); err != nil { + t.Fatalf("Unexpected error on db.Delete: %s", err) + } + + if has, err := db.Has(key); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has unexpectedly returned true on key %s", key) + } else if v, err := db.Get(key); err != ErrNotFound { + t.Fatalf("Expected %s on db.Get for missing key %s. Returned 0x%x", ErrNotFound, key, v) + } else if err := db.Delete(key); err != nil { + t.Fatalf("Unexpected error on db.Delete: %s", err) + } +} + +// TestSimpleKeyValueClosed ... +func TestSimpleKeyValueClosed(t *testing.T, db Database) { + key := []byte("hello") + value := []byte("world") + + if has, err := db.Has(key); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has unexpectedly returned true on key %s", key) + } else if v, err := db.Get(key); err != ErrNotFound { + t.Fatalf("Expected %s on db.Get for missing key %s. Returned 0x%x", ErrNotFound, key, v) + } else if err := db.Delete(key); err != nil { + t.Fatalf("Unexpected error on db.Delete: %s", err) + } + + if err := db.Put(key, value); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } + + if has, err := db.Has(key); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if !has { + t.Fatalf("db.Has unexpectedly returned false on key %s", key) + } else if v, err := db.Get(key); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, v) { + t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value) + } + + if err := db.Close(); err != nil { + t.Fatalf("Unexpected error on db.Close: %s", err) + } + + if _, err := db.Has(key); err != ErrClosed { + t.Fatalf("Expected %s on db.Has after close", ErrClosed) + } else if _, err := db.Get(key); err != ErrClosed { + t.Fatalf("Expected %s on db.Get after close", ErrClosed) + } else if err := db.Put(key, value); err != ErrClosed { + t.Fatalf("Expected %s on db.Put after close", ErrClosed) + } else if err := db.Delete(key); err != ErrClosed { + t.Fatalf("Expected %s on db.Delete after close", ErrClosed) + } else if err := db.Close(); err != ErrClosed { + t.Fatalf("Expected %s on db.Close after close", ErrClosed) + } +} + +// TestBatchPut ... +func TestBatchPut(t *testing.T, db Database) { + key := []byte("hello") + value := []byte("world") + + batch := db.NewBatch() + if batch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := batch.Put(key, value); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } else if size := batch.ValueSize(); size <= 0 { + t.Fatalf("batch.ValueSize: Returned: %d ; Expected: > 0", size) + } + + if err := batch.Write(); err != nil { + t.Fatalf("Unexpected error on batch.Write: %s", err) + } + + if has, err := db.Has(key); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if !has { + t.Fatalf("db.Has unexpectedly returned false on key %s", key) + } else if v, err := db.Get(key); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, v) { + t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value) + } + + if err := db.Delete(key); err != nil { + t.Fatalf("Unexpected error on db.Delete: %s", err) + } + + batch = db.NewBatch() + if batch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := batch.Put(key, value); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + db.Close() + + if err := batch.Write(); err != ErrClosed { + t.Fatalf("Expected %s on batch.Write", ErrClosed) + } +} + +// TestBatchDelete ... +func TestBatchDelete(t *testing.T, db Database) { + key := []byte("hello") + value := []byte("world") + + if err := db.Put(key, value); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } + + batch := db.NewBatch() + if batch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := batch.Delete(key); err != nil { + t.Fatalf("Unexpected error on batch.Delete: %s", err) + } + + if err := batch.Write(); err != nil { + t.Fatalf("Unexpected error on batch.Write: %s", err) + } + + if has, err := db.Has(key); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has unexpectedly returned true on key %s", key) + } else if v, err := db.Get(key); err != ErrNotFound { + t.Fatalf("Expected %s on db.Get for missing key %s. Returned 0x%x", ErrNotFound, key, v) + } else if err := db.Delete(key); err != nil { + t.Fatalf("Unexpected error on db.Delete: %s", err) + } +} + +// TestBatchReset ... +func TestBatchReset(t *testing.T, db Database) { + key := []byte("hello") + value := []byte("world") + + if err := db.Put(key, value); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } + + batch := db.NewBatch() + if batch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := batch.Delete(key); err != nil { + t.Fatalf("Unexpected error on batch.Delete: %s", err) + } + + batch.Reset() + + if err := batch.Write(); err != nil { + t.Fatalf("Unexpected error on batch.Write: %s", err) + } + + if has, err := db.Has(key); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if !has { + t.Fatalf("db.Has unexpectedly returned false on key %s", key) + } else if v, err := db.Get(key); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, v) { + t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value) + } +} + +// TestBatchReplay ... +func TestBatchReplay(t *testing.T, db Database) { + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("hello2") + value2 := []byte("world2") + + batch := db.NewBatch() + if batch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := batch.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } else if err := batch.Put(key2, value2); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + secondBatch := db.NewBatch() + if secondBatch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := batch.Replay(secondBatch); err != nil { + t.Fatalf("Unexpected error on batch.Replay: %s", err) + } + + if err := secondBatch.Write(); err != nil { + t.Fatalf("Unexpected error on batch.Write: %s", err) + } + + if has, err := db.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if !has { + t.Fatalf("db.Has unexpectedly returned false on key %s", key1) + } else if v, err := db.Get(key1); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value1, v) { + t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value1) + } + + thirdBatch := db.NewBatch() + if thirdBatch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := thirdBatch.Delete(key1); err != nil { + t.Fatalf("Unexpected error on batch.Delete: %s", err) + } else if err := thirdBatch.Delete(key2); err != nil { + t.Fatalf("Unexpected error on batch.Delete: %s", err) + } + + if err := db.Close(); err != nil { + t.Fatalf("Unexpected error on db.Close: %s", err) + } + + if err := batch.Replay(db); err != ErrClosed { + t.Fatalf("Expected %s on batch.Replay", ErrClosed) + } else if err := thirdBatch.Replay(db); err != ErrClosed { + t.Fatalf("Expected %s on batch.Replay", ErrClosed) + } +} + +// TestIterator ... +func TestIterator(t *testing.T, db Database) { + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("hello2") + value2 := []byte("world2") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } else if err := db.Put(key2, value2); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + iterator := db.NewIterator() + if iterator == nil { + t.Fatalf("db.NewIterator returned nil") + } + defer iterator.Release() + + if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key1) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) + } else if value := iterator.Value(); !bytes.Equal(value, value1) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) + } else if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key2) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key2) + } else if value := iterator.Value(); !bytes.Equal(value, value2) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value2) + } else if iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) + } else if key := iterator.Key(); key != nil { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) + } else if value := iterator.Value(); value != nil { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) + } else if err := iterator.Error(); err != nil { + t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) + } +} + +// TestIteratorStart ... +func TestIteratorStart(t *testing.T, db Database) { + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("hello2") + value2 := []byte("world2") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } else if err := db.Put(key2, value2); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + iterator := db.NewIteratorWithStart(key2) + if iterator == nil { + t.Fatalf("db.NewIteratorWithStart returned nil") + } + defer iterator.Release() + + if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key2) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key2) + } else if value := iterator.Value(); !bytes.Equal(value, value2) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value2) + } else if iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) + } else if key := iterator.Key(); key != nil { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) + } else if value := iterator.Value(); value != nil { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) + } else if err := iterator.Error(); err != nil { + t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) + } +} + +// TestIteratorPrefix ... +func TestIteratorPrefix(t *testing.T, db Database) { + key1 := []byte("hello") + value1 := []byte("world1") + + key2 := []byte("goodbye") + value2 := []byte("world2") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } else if err := db.Put(key2, value2); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + iterator := db.NewIteratorWithPrefix([]byte("h")) + if iterator == nil { + t.Fatalf("db.NewIteratorWithPrefix returned nil") + } + defer iterator.Release() + + if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key1) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) + } else if value := iterator.Value(); !bytes.Equal(value, value1) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) + } else if iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) + } else if key := iterator.Key(); key != nil { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) + } else if value := iterator.Value(); value != nil { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) + } else if err := iterator.Error(); err != nil { + t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) + } +} + +// TestIteratorStartPrefix ... +func TestIteratorStartPrefix(t *testing.T, db Database) { + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("z") + value2 := []byte("world2") + + key3 := []byte("hello3") + value3 := []byte("world3") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } else if err := db.Put(key2, value2); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } else if err := db.Put(key3, value3); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + iterator := db.NewIteratorWithStartAndPrefix(key1, []byte("h")) + if iterator == nil { + t.Fatalf("db.NewIteratorWithStartAndPrefix returned nil") + } + defer iterator.Release() + + if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key1) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) + } else if value := iterator.Value(); !bytes.Equal(value, value1) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) + } else if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key3) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key3) + } else if value := iterator.Value(); !bytes.Equal(value, value3) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value3) + } else if iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) + } else if key := iterator.Key(); key != nil { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) + } else if value := iterator.Value(); value != nil { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) + } else if err := iterator.Error(); err != nil { + t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) + } +} + +// TestIteratorClosed ... +func TestIteratorClosed(t *testing.T, db Database) { + key1 := []byte("hello1") + value1 := []byte("world1") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + if err := db.Close(); err != nil { + t.Fatalf("Unexpected error on db.Close: %s", err) + } + + iterator := db.NewIterator() + if iterator == nil { + t.Fatalf("db.NewIterator returned nil") + } + defer iterator.Release() + + if iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) + } else if key := iterator.Key(); key != nil { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) + } else if value := iterator.Value(); value != nil { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) + } else if err := iterator.Error(); err != ErrClosed { + t.Fatalf("Expected %s on iterator.Error", ErrClosed) + } +} + +// TestStatNoPanic ... +func TestStatNoPanic(t *testing.T, db Database) { + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("z") + value2 := []byte("world2") + + key3 := []byte("hello3") + value3 := []byte("world3") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } else if err := db.Put(key2, value2); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } else if err := db.Put(key3, value3); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + db.Stat("") + + if err := db.Close(); err != nil { + t.Fatalf("Unexpected error on db.Close: %s", err) + } + + db.Stat("") +} + +// TestCompactNoPanic ... +func TestCompactNoPanic(t *testing.T, db Database) { + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("z") + value2 := []byte("world2") + + key3 := []byte("hello3") + value3 := []byte("world3") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } else if err := db.Put(key2, value2); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } else if err := db.Put(key3, value3); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + db.Compact(nil, nil) + + if err := db.Close(); err != nil { + t.Fatalf("Unexpected error on db.Close: %s", err) + } + + db.Compact(nil, nil) +} diff --git a/database/versiondb/versiondb.go b/database/versiondb/versiondb.go new file mode 100644 index 0000000..a475e76 --- /dev/null +++ b/database/versiondb/versiondb.go @@ -0,0 +1,393 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package versiondb + +import ( + "sort" + "strings" + "sync" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/database/nodb" +) + +// Database implements the Database interface by living on top of another +// database, writing changes to the underlying database only when commit is +// called. +type Database struct { + lock sync.RWMutex + mem map[string]valueDelete + db database.Database +} + +type valueDelete struct { + value []byte + delete bool +} + +// New returns a new prefixed database +func New(db database.Database) *Database { + return &Database{ + mem: make(map[string]valueDelete, memdb.DefaultSize), + db: db, + } +} + +// Has implements the database.Database interface +func (db *Database) Has(key []byte) (bool, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.mem == nil { + return false, database.ErrClosed + } + if val, has := db.mem[string(key)]; has { + return !val.delete, nil + } + return db.db.Has(key) +} + +// Get implements the database.Database interface +func (db *Database) Get(key []byte) ([]byte, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.mem == nil { + return nil, database.ErrClosed + } + if val, has := db.mem[string(key)]; has { + if val.delete { + return nil, database.ErrNotFound + } + return copyBytes(val.value), nil + } + return db.db.Get(key) +} + +// Put implements the database.Database interface +func (db *Database) Put(key, value []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.mem == nil { + return database.ErrClosed + } + db.mem[string(key)] = valueDelete{value: value} + return nil +} + +// Delete implements the database.Database interface +func (db *Database) Delete(key []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.mem == nil { + return database.ErrClosed + } + db.mem[string(key)] = valueDelete{delete: true} + return nil +} + +// NewBatch implements the database.Database interface +func (db *Database) NewBatch() database.Batch { return &batch{db: db} } + +// NewIterator implements the database.Database interface +func (db *Database) NewIterator() database.Iterator { return db.NewIteratorWithStartAndPrefix(nil, nil) } + +// NewIteratorWithStart implements the database.Database interface +func (db *Database) NewIteratorWithStart(start []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(start, nil) +} + +// NewIteratorWithPrefix implements the database.Database interface +func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(nil, prefix) +} + +// NewIteratorWithStartAndPrefix implements the database.Database interface +func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.mem == nil { + return &nodb.Iterator{Err: database.ErrClosed} + } + + startString := string(start) + prefixString := string(prefix) + keys := make([]string, 0, len(db.mem)) + for key := range db.mem { + if strings.HasPrefix(key, prefixString) && key >= startString { + keys = append(keys, key) + } + } + sort.Strings(keys) // Keys need to be in sorted order + values := make([]valueDelete, 0, len(keys)) + for _, key := range keys { + values = append(values, db.mem[key]) + } + + return &iterator{ + Iterator: db.db.NewIteratorWithStartAndPrefix(start, prefix), + keys: keys, + values: values, + } +} + +// Stat implements the database.Database interface +func (db *Database) Stat(stat string) (string, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.mem == nil { + return "", database.ErrClosed + } + return db.db.Stat(stat) +} + +// Compact implements the database.Database interface +func (db *Database) Compact(start, limit []byte) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.mem == nil { + return database.ErrClosed + } + return db.db.Compact(start, limit) +} + +// SetDatabase changes the underlying database to the specified database +func (db *Database) SetDatabase(newDB database.Database) error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.mem == nil { + return database.ErrClosed + } + + db.db = newDB + return nil +} + +// GetDatabase returns the underlying database +func (db *Database) GetDatabase() database.Database { + db.lock.RLock() + defer db.lock.RUnlock() + + return db.db +} + +// Commit writes all the operations of this database to the underlying database +func (db *Database) Commit() error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.mem == nil { + return database.ErrClosed + } + if len(db.mem) == 0 { + return nil + } + + batch := db.db.NewBatch() + for key, value := range db.mem { + if value.delete { + if err := batch.Delete([]byte(key)); err != nil { + return err + } + } else if err := batch.Put([]byte(key), value.value); err != nil { + return err + } + } + if err := batch.Write(); err != nil { + return err + } + + db.mem = make(map[string]valueDelete, memdb.DefaultSize) + return nil +} + +// Close implements the database.Database interface +func (db *Database) Close() error { + db.lock.Lock() + defer db.lock.Unlock() + + if db.mem == nil { + return database.ErrClosed + } + db.mem = nil + db.db = nil + return nil +} + +type keyValue struct { + key []byte + value []byte + delete bool +} + +type batch struct { + db *Database + writes []keyValue + size int +} + +// Put implements the Database interface +func (b *batch) Put(key, value []byte) error { + b.writes = append(b.writes, keyValue{copyBytes(key), copyBytes(value), false}) + b.size += len(value) + return nil +} + +// Delete implements the Database interface +func (b *batch) Delete(key []byte) error { + b.writes = append(b.writes, keyValue{copyBytes(key), nil, true}) + b.size++ + return nil +} + +// ValueSize implements the Database interface +func (b *batch) ValueSize() int { return b.size } + +// Write implements the Database interface +func (b *batch) Write() error { + b.db.lock.Lock() + defer b.db.lock.Unlock() + + if b.db.mem == nil { + return database.ErrClosed + } + + for _, kv := range b.writes { + b.db.mem[string(kv.key)] = valueDelete{ + value: kv.value, + delete: kv.delete, + } + } + return nil +} + +// Reset implements the Database interface +func (b *batch) Reset() { + b.writes = b.writes[:0] + b.size = 0 +} + +// Replay implements the Database interface +func (b *batch) Replay(w database.KeyValueWriter) error { + for _, kv := range b.writes { + if kv.delete { + if err := w.Delete(kv.key); err != nil { + return err + } + } else if err := w.Put(kv.key, kv.value); err != nil { + return err + } + } + return nil +} + +// iterator walks over both the in memory database and the underlying database +// at the same time. +type iterator struct { + database.Iterator + + key, value []byte + + keys []string + values []valueDelete + + initialized, exhausted bool +} + +// Next moves the iterator to the next key/value pair. It returns whether the +// iterator is exhausted. We must pay careful attention to set the proper values +// based on if the in memory db or the underlying db should be read next +func (it *iterator) Next() bool { + if !it.initialized { + it.exhausted = !it.Iterator.Next() + it.initialized = true + } + + for { + switch { + case it.exhausted && len(it.keys) == 0: + it.key = nil + it.value = nil + return false + case it.exhausted: + nextKey := it.keys[0] + nextValue := it.values[0] + + it.keys = it.keys[1:] + it.values = it.values[1:] + + if !nextValue.delete { + it.key = []byte(nextKey) + it.value = nextValue.value + return true + } + case len(it.keys) == 0: + it.key = it.Iterator.Key() + it.value = it.Iterator.Value() + it.exhausted = !it.Iterator.Next() + return true + default: + memKey := it.keys[0] + memValue := it.values[0] + + dbKey := it.Iterator.Key() + + dbStringKey := string(dbKey) + switch { + case memKey < dbStringKey: + it.keys = it.keys[1:] + it.values = it.values[1:] + + if !memValue.delete { + it.key = []byte(memKey) + it.value = memValue.value + return true + } + case dbStringKey < memKey: + it.key = dbKey + it.value = it.Iterator.Value() + it.exhausted = !it.Iterator.Next() + return true + default: + it.keys = it.keys[1:] + it.values = it.values[1:] + it.exhausted = !it.Iterator.Next() + + if !memValue.delete { + it.key = []byte(memKey) + it.value = memValue.value + return true + } + } + } + } +} + +// Key implements the Iterator interface +func (it *iterator) Key() []byte { return it.key } + +// Value implements the Iterator interface +func (it *iterator) Value() []byte { return it.value } + +// Release implements the Iterator interface +func (it *iterator) Release() { + it.key = nil + it.value = nil + it.keys = nil + it.values = nil + it.Iterator.Release() +} + +func copyBytes(bytes []byte) []byte { + copiedBytes := make([]byte, len(bytes)) + copy(copiedBytes, bytes) + return copiedBytes +} diff --git a/database/versiondb/versiondb_test.go b/database/versiondb/versiondb_test.go new file mode 100644 index 0000000..ab3a9bb --- /dev/null +++ b/database/versiondb/versiondb_test.go @@ -0,0 +1,299 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package versiondb + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/memdb" +) + +func TestInterface(t *testing.T) { + for _, test := range database.Tests { + baseDB := memdb.New() + test(t, New(baseDB)) + } +} + +func TestIterate(t *testing.T) { + baseDB := memdb.New() + db := New(baseDB) + + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("z") + value2 := []byte("world2") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } + + if err := db.Commit(); err != nil { + t.Fatalf("Unexpected error on db.Commit: %s", err) + } + + iterator := db.NewIterator() + if iterator == nil { + t.Fatalf("db.NewIterator returned nil") + } + defer iterator.Release() + + if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key1) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) + } else if value := iterator.Value(); !bytes.Equal(value, value1) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) + } else if iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) + } else if key := iterator.Key(); key != nil { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) + } else if value := iterator.Value(); value != nil { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) + } else if err := iterator.Error(); err != nil { + t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) + } + + if err := db.Put(key2, value2); err != nil { + t.Fatalf("Unexpected error on database.Put: %s", err) + } + + iterator = db.NewIterator() + if iterator == nil { + t.Fatalf("db.NewIterator returned nil") + } + defer iterator.Release() + + if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key1) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) + } else if value := iterator.Value(); !bytes.Equal(value, value1) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) + } else if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key2) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key2) + } else if value := iterator.Value(); !bytes.Equal(value, value2) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value2) + } else if iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) + } else if key := iterator.Key(); key != nil { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) + } else if value := iterator.Value(); value != nil { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) + } else if err := iterator.Error(); err != nil { + t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) + } + + if err := db.Delete(key1); err != nil { + t.Fatalf("Unexpected error on database.Delete: %s", err) + } + + iterator = db.NewIterator() + if iterator == nil { + t.Fatalf("db.NewIterator returned nil") + } + defer iterator.Release() + + if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key2) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key2) + } else if value := iterator.Value(); !bytes.Equal(value, value2) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value2) + } else if iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) + } else if key := iterator.Key(); key != nil { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) + } else if value := iterator.Value(); value != nil { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) + } else if err := iterator.Error(); err != nil { + t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) + } + + if err := db.Commit(); err != nil { + t.Fatalf("Unexpected error on database.Commit: %s", err) + } else if err := db.Put(key2, value1); err != nil { + t.Fatalf("Unexpected error on database.Put: %s", err) + } + + iterator = db.NewIterator() + if iterator == nil { + t.Fatalf("db.NewIterator returned nil") + } + defer iterator.Release() + + if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key2) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key2) + } else if value := iterator.Value(); !bytes.Equal(value, value1) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) + } else if iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) + } else if key := iterator.Key(); key != nil { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) + } else if value := iterator.Value(); value != nil { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) + } else if err := iterator.Error(); err != nil { + t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) + } + + if err := db.Commit(); err != nil { + t.Fatalf("Unexpected error on database.Commit: %s", err) + } else if err := db.Put(key1, value2); err != nil { + t.Fatalf("Unexpected error on database.Put: %s", err) + } + + iterator = db.NewIterator() + if iterator == nil { + t.Fatalf("db.NewIterator returned nil") + } + defer iterator.Release() + + if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key1) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) + } else if value := iterator.Value(); !bytes.Equal(value, value2) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value2) + } else if !iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) + } else if key := iterator.Key(); !bytes.Equal(key, key2) { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key2) + } else if value := iterator.Value(); !bytes.Equal(value, value1) { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) + } else if iterator.Next() { + t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) + } else if key := iterator.Key(); key != nil { + t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) + } else if value := iterator.Value(); value != nil { + t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) + } else if err := iterator.Error(); err != nil { + t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) + } +} + +func TestCommit(t *testing.T) { + baseDB := memdb.New() + db := New(baseDB) + + if err := db.Commit(); err != nil { + t.Fatalf("Unexpected error on db.Commit: %s", err) + } + + key1 := []byte("hello1") + value1 := []byte("world1") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } + + if err := db.Commit(); err != nil { + t.Fatalf("Unexpected error on db.Commit: %s", err) + } + + if value, err := db.Get(key1); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, value1) { + t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) + } else if value, err := baseDB.Get(key1); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, value1) { + t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) + } +} + +func TestCommitClosed(t *testing.T) { + baseDB := memdb.New() + db := New(baseDB) + + key1 := []byte("hello1") + value1 := []byte("world1") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } else if err := db.Close(); err != nil { + t.Fatalf("Unexpected error on db.Close: %s", err) + } else if err := db.Commit(); err != database.ErrClosed { + t.Fatalf("Expected %s on db.Commit", database.ErrClosed) + } +} + +func TestCommitClosedWrite(t *testing.T) { + baseDB := memdb.New() + db := New(baseDB) + + key1 := []byte("hello1") + value1 := []byte("world1") + + baseDB.Close() + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } else if err := db.Commit(); err != database.ErrClosed { + t.Fatalf("Expected %s on db.Commit", database.ErrClosed) + } +} + +func TestCommitClosedDelete(t *testing.T) { + baseDB := memdb.New() + db := New(baseDB) + + key1 := []byte("hello1") + + baseDB.Close() + + if err := db.Delete(key1); err != nil { + t.Fatalf("Unexpected error on db.Delete: %s", err) + } else if err := db.Commit(); err != database.ErrClosed { + t.Fatalf("Expected %s on db.Commit", database.ErrClosed) + } +} + +func TestSetDatabase(t *testing.T) { + baseDB := memdb.New() + newDB := memdb.New() + db := New(baseDB) + + key1 := []byte("hello1") + value1 := []byte("world1") + + if err := db.SetDatabase(newDB); err != nil { + t.Fatalf("Unexpected error on db.SetDatabase: %s", err) + } + + if db.GetDatabase() != newDB { + t.Fatalf("Unexpected database from db.GetDatabase") + } else if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } else if err := db.Commit(); err != nil { + t.Fatalf("Unexpected error on db.Commit: %s", err) + } else if has, err := baseDB.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has Returned: %v ; Expected: %v", has, false) + } else if has, err := newDB.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if !has { + t.Fatalf("db.Has Returned: %v ; Expected: %v", has, true) + } +} + +func TestSetDatabaseClosed(t *testing.T) { + baseDB := memdb.New() + db := New(baseDB) + + if err := db.Close(); err != nil { + t.Fatalf("Unexpected error on db.Close: %s", err) + } else if err := db.SetDatabase(memdb.New()); err != database.ErrClosed { + t.Fatalf("Expected %s on db.SetDatabase", database.ErrClosed) + } else if db.GetDatabase() != nil { + t.Fatalf("Unexpected database from db.GetDatabase") + } +} diff --git a/genesis/genesis.go b/genesis/genesis.go new file mode 100644 index 0000000..fa34a75 --- /dev/null +++ b/genesis/genesis.go @@ -0,0 +1,513 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +// TODO: Move this to a separate repo and leave only a byte array + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/evm" + "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/spchainvm" + "github.com/ava-labs/gecko/vms/spdagvm" + "github.com/ava-labs/gecko/vms/timestampvm" +) + +// Note that since an AVA network has exactly one Platform Chain, +// and the Platform Chain defines the genesis state of the network +// (who is staking, which chains exist, etc.), defining the genesis +// state of the Platform Chain is the same as defining the genesis +// state of the network. + +// Hardcoded network IDs +const ( + MainnetID uint32 = 1 + TestnetID uint32 = 2 + BorealisID uint32 = 2 + LocalID uint32 = 12345 + + MainnetName = "mainnet" + TestnetName = "testnet" + BorealisName = "borealis" + LocalName = "local" +) + +var ( + validNetworkName = regexp.MustCompile(`network-[0-9]+`) +) + +// Hard coded genesis constants +var ( + // Give special names to the mainnet and testnet + NetworkIDToNetworkName = map[uint32]string{ + MainnetID: MainnetName, + TestnetID: BorealisName, + LocalID: LocalName, + } + NetworkNameToNetworkID = map[string]uint32{ + MainnetName: MainnetID, + TestnetName: TestnetID, + BorealisName: BorealisID, + LocalName: LocalID, + } + Keys = []string{ + "ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN", + } + Addresses = []string{ + "6Y3kysjF9jnHnYkdS9yGAuoHyae2eNmeV", + } + ParsedAddresses = []ids.ShortID{} + StakerIDs = []string{ + "7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg", + "MFrZFVCXPv5iCn6M9K6XduxGTYp891xXZ", + "NFBbbJ4qCmNaCzeW7sxErhvWqvEQMnYcN", + "GWPcbFJZFfZreETSoWjPimr846mXEKCtu", + "P7oB2McjBGgW2NXXWVYjV8JEDFoW9xDE5", + } + ParsedStakerIDs = []ids.ShortID{} +) + +func init() { + for _, addrStr := range Addresses { + addr, err := ids.ShortFromString(addrStr) + if err != nil { + panic(err) + } + ParsedAddresses = append(ParsedAddresses, addr) + } + for _, stakerIDStr := range StakerIDs { + stakerID, err := ids.ShortFromString(stakerIDStr) + if err != nil { + panic(err) + } + ParsedStakerIDs = append(ParsedStakerIDs, stakerID) + } +} + +// NetworkName returns a human readable name for the network with +// ID [networkID] +func NetworkName(networkID uint32) string { + if name, exists := NetworkIDToNetworkName[networkID]; exists { + return name + } + return fmt.Sprintf("network-%d", networkID) +} + +// NetworkID returns the ID of the network with name [networkName] +func NetworkID(networkName string) (uint32, error) { + networkName = strings.ToLower(networkName) + if id, exists := NetworkNameToNetworkID[networkName]; exists { + return id, nil + } + + if id, err := strconv.ParseUint(networkName, 10, 0); err == nil { + if id > math.MaxUint32 { + return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) + } + return uint32(id), nil + } + if validNetworkName.MatchString(networkName) { + if id, err := strconv.Atoi(networkName[8:]); err == nil { + if id > math.MaxUint32 { + return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) + } + return uint32(id), nil + } + } + + return 0, fmt.Errorf("Failed to parse %s as a network name", networkName) +} + +// Aliases returns the default aliases based on the network ID +func Aliases(networkID uint32) (generalAliases map[string][]string, chainAliases map[[32]byte][]string, vmAliases map[[32]byte][]string) { + generalAliases = map[string][]string{ + "vm/" + platformvm.ID.String(): []string{"vm/platform"}, + "vm/" + avm.ID.String(): []string{"vm/avm"}, + "vm/" + evm.ID.String(): []string{"vm/evm"}, + "vm/" + spdagvm.ID.String(): []string{"vm/spdag"}, + "vm/" + spchainvm.ID.String(): []string{"vm/spchain"}, + "vm/" + timestampvm.ID.String(): []string{"vm/timestamp"}, + "bc/" + ids.Empty.String(): []string{"P", "platform", "bc/P", "bc/platform"}, + } + chainAliases = map[[32]byte][]string{ + ids.Empty.Key(): []string{"P", "platform"}, + } + vmAliases = map[[32]byte][]string{ + platformvm.ID.Key(): []string{"platform"}, + avm.ID.Key(): []string{"avm"}, + evm.ID.Key(): []string{"evm"}, + spdagvm.ID.Key(): []string{"spdag"}, + spchainvm.ID.Key(): []string{"spchain"}, + timestampvm.ID.Key(): []string{"timestamp"}, + } + + genesisBytes := Genesis(networkID) + genesis := &platformvm.Genesis{} // TODO let's not re-create genesis to do aliasing + platformvm.Codec.Unmarshal(genesisBytes, genesis) // TODO check for error + genesis.Initialize() + + for _, chain := range genesis.Chains { + switch { + case avm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"X", "avm", "bc/X", "bc/avm"} + chainAliases[chain.ID().Key()] = []string{"X", "avm"} + case evm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"C", "evm", "bc/C", "bc/evm"} + chainAliases[chain.ID().Key()] = []string{"C", "evm"} + case spdagvm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"bc/spdag"} + chainAliases[chain.ID().Key()] = []string{"spdag"} + case spchainvm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"bc/spchain"} + chainAliases[chain.ID().Key()] = []string{"spchain"} + case timestampvm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"bc/timestamp"} + chainAliases[chain.ID().Key()] = []string{"timestamp"} + } + } + return +} + +// Genesis returns the genesis data of the Platform Chain. +// Since the Platform Chain causes the creation of all other +// chains, this function returns the genesis data of the entire network. +// The ID of the new network is [networkID]. +func Genesis(networkID uint32) []byte { + if networkID != LocalID { + panic("unknown network ID provided") + } + + return []byte{ + 0x00, 0x00, 0x00, 0x01, 0x3c, 0xb7, 0xd3, 0x84, + 0x2e, 0x8c, 0xee, 0x6a, 0x0e, 0xbd, 0x09, 0xf1, + 0xfe, 0x88, 0x4f, 0x68, 0x61, 0xe1, 0xb2, 0x9c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, + 0x05, 0xde, 0x31, 0xb4, 0xd8, 0xb2, 0x29, 0x91, + 0xd5, 0x1a, 0xa6, 0xaa, 0x1f, 0xc7, 0x33, 0xf2, + 0x3a, 0x85, 0x1a, 0x8c, 0x94, 0x00, 0x00, 0x12, + 0x30, 0x9c, 0xe5, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5d, 0xbb, 0x75, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x5f, 0x9c, 0xa9, 0x00, 0x00, 0x00, 0x30, + 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, 0x8c, 0xee, + 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, 0x88, 0x4f, + 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xaa, 0x18, + 0xd3, 0x99, 0x1c, 0xf6, 0x37, 0xaa, 0x6c, 0x16, + 0x2f, 0x5e, 0x95, 0xcf, 0x16, 0x3f, 0x69, 0xcd, + 0x82, 0x91, 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0xbb, + 0x75, 0x80, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9c, + 0xa9, 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xb7, + 0xd3, 0x84, 0x2e, 0x8c, 0xee, 0x6a, 0x0e, 0xbd, + 0x09, 0xf1, 0xfe, 0x88, 0x4f, 0x68, 0x61, 0xe1, + 0xb2, 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x05, 0xe9, 0x09, 0x4f, 0x73, 0x69, + 0x80, 0x02, 0xfd, 0x52, 0xc9, 0x08, 0x19, 0xb4, + 0x57, 0xb9, 0xfb, 0xc8, 0x66, 0xab, 0x80, 0x00, + 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x5d, 0xbb, 0x75, 0x80, 0x00, + 0x00, 0x00, 0x00, 0x5f, 0x9c, 0xa9, 0x00, 0x00, + 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, + 0x8c, 0xee, 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, + 0x88, 0x4f, 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + 0x47, 0x9f, 0x66, 0xc8, 0xbe, 0x89, 0x58, 0x30, + 0x54, 0x7e, 0x70, 0xb4, 0xb2, 0x98, 0xca, 0xfd, + 0x43, 0x3d, 0xba, 0x6e, 0x00, 0x00, 0x12, 0x30, + 0x9c, 0xe5, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5d, 0xbb, 0x75, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x5f, 0x9c, 0xa9, 0x00, 0x00, 0x00, 0x30, 0x39, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x3c, 0xb7, 0xd3, 0x84, 0x2e, 0x8c, 0xee, 0x6a, + 0x0e, 0xbd, 0x09, 0xf1, 0xfe, 0x88, 0x4f, 0x68, + 0x61, 0xe1, 0xb2, 0x9c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x05, 0xf2, 0x9b, 0xce, + 0x5f, 0x34, 0xa7, 0x43, 0x01, 0xeb, 0x0d, 0xe7, + 0x16, 0xd5, 0x19, 0x4e, 0x4a, 0x4a, 0xea, 0x5d, + 0x7a, 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0xbb, 0x75, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9c, 0xa9, + 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xb7, 0xd3, + 0x84, 0x2e, 0x8c, 0xee, 0x6a, 0x0e, 0xbd, 0x09, + 0xf1, 0xfe, 0x88, 0x4f, 0x68, 0x61, 0xe1, 0xb2, + 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x05, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x41, 0x56, 0x4d, 0x61, 0x76, 0x6d, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x73, + 0x65, 0x63, 0x70, 0x32, 0x35, 0x36, 0x6b, 0x31, + 0x66, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x03, 0x41, 0x56, 0x41, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x03, 0x41, 0x56, 0x41, 0x00, 0x03, 0x41, + 0x56, 0x41, 0x09, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x9f, 0xdf, 0x42, 0xf6, + 0xe4, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x01, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, + 0x8c, 0xee, 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, + 0x88, 0x4f, 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x41, 0x74, + 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x65, 0x76, + 0x6d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0xc9, 0x7b, 0x22, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, + 0x7b, 0x22, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x22, 0x3a, 0x34, 0x33, 0x31, 0x31, 0x30, + 0x2c, 0x22, 0x68, 0x6f, 0x6d, 0x65, 0x73, 0x74, + 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64, 0x61, 0x6f, + 0x46, 0x6f, 0x72, 0x6b, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64, 0x61, + 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x22, 0x3a, 0x74, 0x72, + 0x75, 0x65, 0x2c, 0x22, 0x65, 0x69, 0x70, 0x31, + 0x35, 0x30, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, + 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69, 0x70, 0x31, + 0x35, 0x30, 0x48, 0x61, 0x73, 0x68, 0x22, 0x3a, + 0x22, 0x30, 0x78, 0x32, 0x30, 0x38, 0x36, 0x37, + 0x39, 0x39, 0x61, 0x65, 0x65, 0x62, 0x65, 0x61, + 0x65, 0x31, 0x33, 0x35, 0x63, 0x32, 0x34, 0x36, + 0x63, 0x36, 0x35, 0x30, 0x32, 0x31, 0x63, 0x38, + 0x32, 0x62, 0x34, 0x65, 0x31, 0x35, 0x61, 0x32, + 0x63, 0x34, 0x35, 0x31, 0x33, 0x34, 0x30, 0x39, + 0x39, 0x33, 0x61, 0x61, 0x63, 0x66, 0x64, 0x32, + 0x37, 0x35, 0x31, 0x38, 0x38, 0x36, 0x35, 0x31, + 0x34, 0x66, 0x30, 0x22, 0x2c, 0x22, 0x65, 0x69, + 0x70, 0x31, 0x35, 0x35, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69, + 0x70, 0x31, 0x35, 0x38, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x62, 0x79, + 0x7a, 0x61, 0x6e, 0x74, 0x69, 0x75, 0x6d, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, + 0x22, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x74, 0x69, 0x6e, 0x6f, 0x70, 0x6c, 0x65, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, + 0x22, 0x70, 0x65, 0x74, 0x65, 0x72, 0x73, 0x62, + 0x75, 0x72, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x22, 0x3a, 0x30, 0x7d, 0x2c, 0x22, 0x6e, 0x6f, + 0x6e, 0x63, 0x65, 0x22, 0x3a, 0x22, 0x30, 0x78, + 0x30, 0x22, 0x2c, 0x22, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3a, 0x22, + 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x65, 0x78, + 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x22, + 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30, 0x22, 0x2c, + 0x22, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x35, 0x66, + 0x35, 0x65, 0x31, 0x30, 0x30, 0x22, 0x2c, 0x22, + 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, + 0x74, 0x79, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, + 0x22, 0x2c, 0x22, 0x6d, 0x69, 0x78, 0x48, 0x61, + 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, + 0x2c, 0x22, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, + 0x73, 0x65, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, + 0x2c, 0x22, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x22, + 0x3a, 0x7b, 0x22, 0x37, 0x35, 0x31, 0x61, 0x30, + 0x62, 0x39, 0x36, 0x65, 0x31, 0x30, 0x34, 0x32, + 0x62, 0x65, 0x65, 0x37, 0x38, 0x39, 0x34, 0x35, + 0x32, 0x65, 0x63, 0x62, 0x32, 0x30, 0x32, 0x35, + 0x33, 0x66, 0x62, 0x61, 0x34, 0x30, 0x64, 0x62, + 0x65, 0x38, 0x35, 0x22, 0x3a, 0x7b, 0x22, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x3a, + 0x22, 0x30, 0x78, 0x33, 0x33, 0x62, 0x32, 0x65, + 0x33, 0x63, 0x39, 0x66, 0x64, 0x30, 0x38, 0x30, + 0x34, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x22, 0x7d, 0x7d, 0x2c, 0x22, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x3a, 0x22, + 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x67, 0x61, + 0x73, 0x55, 0x73, 0x65, 0x64, 0x22, 0x3a, 0x22, + 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, 0x7d, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x53, 0x69, + 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x44, 0x41, 0x47, + 0x20, 0x50, 0x61, 0x79, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x73, 0x70, 0x64, 0x61, 0x67, 0x76, 0x6d, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x01, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, 0x8c, 0xee, + 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, 0x88, 0x4f, + 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, + 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x20, 0x50, 0x61, 0x79, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x73, 0x70, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x76, 0x6d, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, + 0x01, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, 0x8c, 0xee, + 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, 0x88, 0x4f, + 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, + 0x30, 0x9c, 0xe5, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x17, 0x53, 0x69, 0x6d, 0x70, + 0x6c, 0x65, 0x20, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x20, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5d, 0xbb, 0x75, 0x80, + } +} + +// VMGenesis ... +func VMGenesis(networkID uint32, vmID ids.ID) *platformvm.CreateChainTx { + genesisBytes := Genesis(networkID) + genesis := platformvm.Genesis{} + platformvm.Codec.Unmarshal(genesisBytes, &genesis) + for _, chain := range genesis.Chains { + if chain.VMID.Equals(vmID) { + return chain + } + } + return nil +} diff --git a/genesis/genesis_test.go b/genesis/genesis_test.go new file mode 100644 index 0000000..7a6c6eb --- /dev/null +++ b/genesis/genesis_test.go @@ -0,0 +1,114 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "testing" + + "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/evm" + "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/spchainvm" + "github.com/ava-labs/gecko/vms/spdagvm" +) + +func TestNetworkName(t *testing.T) { + if name := NetworkName(MainnetID); name != MainnetName { + t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, MainnetName) + } + if name := NetworkName(TestnetID); name != BorealisName { + t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, BorealisName) + } + if name := NetworkName(BorealisID); name != BorealisName { + t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, BorealisName) + } + if name := NetworkName(4294967295); name != "network-4294967295" { + t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, "network-4294967295") + } +} + +func TestNetworkID(t *testing.T) { + id, err := NetworkID(MainnetName) + if err != nil { + t.Fatal(err) + } + if id != MainnetID { + t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", MainnetID, id) + } + + id, err = NetworkID(TestnetName) + if err != nil { + t.Fatal(err) + } + if id != TestnetID { + t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", TestnetID, id) + } + + id, err = NetworkID(BorealisName) + if err != nil { + t.Fatal(err) + } + if id != TestnetID { + t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", TestnetID, id) + } + + id, err = NetworkID("bOrEaLiS") + if err != nil { + t.Fatal(err) + } + if id != TestnetID { + t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", TestnetID, id) + } + + id, err = NetworkID("network-4294967295") + if err != nil { + t.Fatal(err) + } + if id != 4294967295 { + t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", 4294967295, id) + } + + id, err = NetworkID("4294967295") + if err != nil { + t.Fatal(err) + } + if id != 4294967295 { + t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", 4294967295, id) + } + + if _, err := NetworkID("network-4294967296"); err == nil { + t.Fatalf("Should have errored due to the network being too large.") + } + + if _, err := NetworkID("4294967296"); err == nil { + t.Fatalf("Should have errored due to the network being too large.") + } + + if _, err := NetworkID("asdcvasdc-252"); err == nil { + t.Fatalf("Should have errored due to the invalid input string.") + } +} + +func TestAliases(t *testing.T) { + generalAliases, _, _ := Aliases(LocalID) + if _, exists := generalAliases["vm/"+platformvm.ID.String()]; !exists { + t.Fatalf("Should have a custom alias from the vm") + } else if _, exists := generalAliases["vm/"+avm.ID.String()]; !exists { + t.Fatalf("Should have a custom alias from the vm") + } else if _, exists := generalAliases["vm/"+evm.ID.String()]; !exists { + t.Fatalf("Should have a custom alias from the vm") + } else if _, exists := generalAliases["vm/"+spdagvm.ID.String()]; !exists { + t.Fatalf("Should have a custom alias from the vm") + } else if _, exists := generalAliases["vm/"+spchainvm.ID.String()]; !exists { + t.Fatalf("Should have a custom alias from the vm") + } +} + +func TestGenesis(t *testing.T) { + genesisBytes := Genesis(LocalID) + genesis := platformvm.Genesis{} + if err := platformvm.Codec.Unmarshal(genesisBytes, &genesis); err != nil { + t.Fatal(err) + } +} diff --git a/ids/aliases.go b/ids/aliases.go new file mode 100644 index 0000000..c12e808 --- /dev/null +++ b/ids/aliases.go @@ -0,0 +1,54 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "fmt" +) + +// Aliaser allows one to give an ID aliases and lookup the aliases given to an +// ID. An ID can have arbitrarily many aliases; two IDs may not have the same +// alias. +type Aliaser struct { + dealias map[string]ID + aliases map[[32]byte][]string +} + +// Initialize the aliaser to have no aliases +func (a *Aliaser) Initialize() { + a.dealias = make(map[string]ID) + a.aliases = make(map[[32]byte][]string) +} + +// Lookup returns the ID associated with alias +func (a *Aliaser) Lookup(alias string) (ID, error) { + if ID, ok := a.dealias[alias]; ok { + return ID, nil + } + return ID{}, fmt.Errorf("there is no ID with alias %s", alias) +} + +// Aliases returns the aliases of an ID +func (a Aliaser) Aliases(id ID) []string { return a.aliases[id.Key()] } + +// PrimaryAlias returns the first alias of [id] +func (a Aliaser) PrimaryAlias(id ID) (string, error) { + aliases, exists := a.aliases[id.Key()] + if !exists || len(aliases) == 0 { + return "", fmt.Errorf("there is no alias for ID %s", id) + } + return aliases[0], nil +} + +// Alias gives [id] the alias [alias] +func (a Aliaser) Alias(id ID, alias string) error { + if _, exists := a.dealias[alias]; exists { + return fmt.Errorf("%s is already used as an alias for an ID", alias) + } + key := id.Key() + + a.dealias[alias] = id + a.aliases[key] = append(a.aliases[key], alias) + return nil +} diff --git a/ids/bag.go b/ids/bag.go new file mode 100644 index 0000000..1022489 --- /dev/null +++ b/ids/bag.go @@ -0,0 +1,134 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "fmt" + "strings" +) + +// Bag is a multiset of IDs. +// +// A bag has the ability to split and filter on it's bits for ease of use for +// binary voting. +type Bag struct { + counts map[[32]byte]int + size int + + mode ID + modeFreq int + + threshold int + metThreshold Set +} + +func (b *Bag) init() { + if b.counts == nil { + b.counts = make(map[[32]byte]int) + } +} + +// SetThreshold sets the number of times an ID must be added to be contained in +// the threshold set. +func (b *Bag) SetThreshold(threshold int) { + if b.threshold == threshold { + return + } + + b.threshold = threshold + b.metThreshold.Clear() + for vote, count := range b.counts { + if count >= threshold { + b.metThreshold.Add(NewID(vote)) + } + } +} + +// Add increases the number of times each id has been seen by one. +func (b *Bag) Add(ids ...ID) { + for _, id := range ids { + b.AddCount(id, 1) + } +} + +// AddCount increases the nubmer of times the id has been seen by count. +// +// count must be >= 1 +func (b *Bag) AddCount(id ID, count int) { + b.init() + + totalCount := b.counts[*id.ID] + count + b.counts[*id.ID] = totalCount + b.size += count + + if totalCount > b.modeFreq { + b.mode = id + b.modeFreq = totalCount + } + if totalCount >= b.threshold { + b.metThreshold.Add(id) + } +} + +// Count returns the number of times the id has been added. +func (b *Bag) Count(id ID) int { return b.counts[*id.ID] } + +// Len returns the number of times an id has been added. +func (b *Bag) Len() int { return b.size } + +// List returns a list of all ids that have been added. +func (b *Bag) List() []ID { + idList := []ID(nil) + for id := range b.counts { + idList = append(idList, NewID(id)) + } + return idList +} + +// Mode returns the id that has been seen the most and the number of times it +// has been seen. Ties are broken by the first id to be seen the reported number +// of times. +func (b *Bag) Mode() (ID, int) { return b.mode, b.modeFreq } + +// Threshold returns the ids that have been seen at least threshold times. +func (b *Bag) Threshold() Set { return b.metThreshold } + +// Filter returns the bag of ids with the same counts as this bag, except all +// the ids in the returned bag must have the same bits in the range [start, end] +// as id. +func (b *Bag) Filter(start, end int, id ID) Bag { + newBag := Bag{} + for vote, count := range b.counts { + voteID := NewID(vote) + if EqualSubset(start, end, id, voteID) { + newBag.AddCount(voteID, count) + } + } + return newBag +} + +// Split returns the bags of ids with the same counts a this bag, except all ids +// in the 0th index have a 0 at bit [index], and all ids in the 1st index have a +// 1 at bit [index]. +func (b *Bag) Split(index uint) [2]Bag { + splitVotes := [2]Bag{} + for vote, count := range b.counts { + voteID := NewID(vote) + bit := voteID.Bit(index) + splitVotes[bit].AddCount(voteID, count) + } + return splitVotes +} + +func (b *Bag) String() string { + sb := strings.Builder{} + + sb.WriteString(fmt.Sprintf("Bag: (Size = %d)", b.Len())) + for idBytes, count := range b.counts { + id := NewID(idBytes) + sb.WriteString(fmt.Sprintf("\n ID[%s]: Count = %d", id, count)) + } + + return sb.String() +} diff --git a/ids/bag_test.go b/ids/bag_test.go new file mode 100644 index 0000000..ed35233 --- /dev/null +++ b/ids/bag_test.go @@ -0,0 +1,201 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "testing" +) + +func TestBagAdd(t *testing.T) { + id0 := Empty + id1 := NewID([32]byte{1}) + + bag := Bag{} + + if count := bag.Count(id0); count != 0 { + t.Fatalf("Bag.Count returned %d expected %d", count, 0) + } else if count := bag.Count(id1); count != 0 { + t.Fatalf("Bag.Count returned %d expected %d", count, 0) + } else if size := bag.Len(); size != 0 { + t.Fatalf("Bag.Len returned %d expected %d", count, 0) + } else if list := bag.List(); list != nil { + t.Fatalf("Bag.List returned %v expected %v", list, nil) + } else if mode, freq := bag.Mode(); !mode.IsZero() { + t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, ID{}) + } else if freq != 0 { + t.Fatalf("Bag.Mode[1] returned %d expected %d", freq, 0) + } else if threshold := bag.Threshold(); threshold.Len() != 0 { + t.Fatalf("Bag.Threshold returned %s expected %s", threshold, Set{}) + } + + bag.Add(id0) + + if count := bag.Count(id0); count != 1 { + t.Fatalf("Bag.Count returned %d expected %d", count, 1) + } else if count := bag.Count(id1); count != 0 { + t.Fatalf("Bag.Count returned %d expected %d", count, 0) + } else if size := bag.Len(); size != 1 { + t.Fatalf("Bag.Len returned %d expected %d", count, 1) + } else if list := bag.List(); len(list) != 1 { + t.Fatalf("Bag.List returned %d expected %d", len(list), 1) + } else if mode, freq := bag.Mode(); !mode.Equals(id0) { + t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, id0) + } else if freq != 1 { + t.Fatalf("Bag.Mode[1] returned %d expected %d", freq, 1) + } else if threshold := bag.Threshold(); threshold.Len() != 1 { + t.Fatalf("Bag.Threshold returned %d expected %d", len(threshold), 1) + } + + bag.Add(id0) + + if count := bag.Count(id0); count != 2 { + t.Fatalf("Bag.Count returned %d expected %d", count, 2) + } else if count := bag.Count(id1); count != 0 { + t.Fatalf("Bag.Count returned %d expected %d", count, 0) + } else if size := bag.Len(); size != 2 { + t.Fatalf("Bag.Len returned %d expected %d", count, 2) + } else if list := bag.List(); len(list) != 1 { + t.Fatalf("Bag.List returned %d expected %d", len(list), 1) + } else if mode, freq := bag.Mode(); !mode.Equals(id0) { + t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, id0) + } else if freq != 2 { + t.Fatalf("Bag.Mode[1] returned %d expected %d", freq, 2) + } else if threshold := bag.Threshold(); threshold.Len() != 1 { + t.Fatalf("Bag.Threshold returned %d expected %d", len(threshold), 1) + } + + bag.AddCount(id1, 3) + + if count := bag.Count(id0); count != 2 { + t.Fatalf("Bag.Count returned %d expected %d", count, 2) + } else if count := bag.Count(id1); count != 3 { + t.Fatalf("Bag.Count returned %d expected %d", count, 3) + } else if size := bag.Len(); size != 5 { + t.Fatalf("Bag.Len returned %d expected %d", count, 5) + } else if list := bag.List(); len(list) != 2 { + t.Fatalf("Bag.List returned %d expected %d", len(list), 2) + } else if mode, freq := bag.Mode(); !mode.Equals(id1) { + t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, id1) + } else if freq != 3 { + t.Fatalf("Bag.Mode[1] returned %d expected %d", freq, 3) + } else if threshold := bag.Threshold(); threshold.Len() != 2 { + t.Fatalf("Bag.Threshold returned %d expected %d", len(threshold), 2) + } +} + +func TestBagSetThreshold(t *testing.T) { + id0 := Empty + id1 := NewID([32]byte{1}) + + bag := Bag{} + + bag.AddCount(id0, 2) + bag.AddCount(id1, 3) + + bag.SetThreshold(0) + + if count := bag.Count(id0); count != 2 { + t.Fatalf("Bag.Count returned %d expected %d", count, 2) + } else if count := bag.Count(id1); count != 3 { + t.Fatalf("Bag.Count returned %d expected %d", count, 3) + } else if size := bag.Len(); size != 5 { + t.Fatalf("Bag.Len returned %d expected %d", count, 5) + } else if list := bag.List(); len(list) != 2 { + t.Fatalf("Bag.List returned %d expected %d", len(list), 2) + } else if mode, freq := bag.Mode(); !mode.Equals(id1) { + t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, id1) + } else if freq != 3 { + t.Fatalf("Bag.Mode[1] returned %d expected %d", freq, 3) + } else if threshold := bag.Threshold(); threshold.Len() != 2 { + t.Fatalf("Bag.Threshold returned %d expected %d", len(threshold), 2) + } + + bag.SetThreshold(3) + + if count := bag.Count(id0); count != 2 { + t.Fatalf("Bag.Count returned %d expected %d", count, 2) + } else if count := bag.Count(id1); count != 3 { + t.Fatalf("Bag.Count returned %d expected %d", count, 3) + } else if size := bag.Len(); size != 5 { + t.Fatalf("Bag.Len returned %d expected %d", count, 5) + } else if list := bag.List(); len(list) != 2 { + t.Fatalf("Bag.List returned %d expected %d", len(list), 2) + } else if mode, freq := bag.Mode(); !mode.Equals(id1) { + t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, id1) + } else if freq != 3 { + t.Fatalf("Bag.Mode[1] returned %d expected %d", freq, 3) + } else if threshold := bag.Threshold(); threshold.Len() != 1 { + t.Fatalf("Bag.Threshold returned %d expected %d", len(threshold), 1) + } else if !threshold.Contains(id1) { + t.Fatalf("Bag.Threshold doesn't contain %s", id1) + } +} + +func TestBagFilter(t *testing.T) { + id0 := Empty + id1 := NewID([32]byte{1}) + id2 := NewID([32]byte{2}) + + bag := Bag{} + + bag.AddCount(id0, 1) + bag.AddCount(id1, 3) + bag.AddCount(id2, 5) + + even := bag.Filter(0, 1, id0) + + if count := even.Count(id0); count != 1 { + t.Fatalf("Bag.Count returned %d expected %d", count, 1) + } else if count := even.Count(id1); count != 0 { + t.Fatalf("Bag.Count returned %d expected %d", count, 0) + } else if count := even.Count(id2); count != 5 { + t.Fatalf("Bag.Count returned %d expected %d", count, 5) + } +} + +func TestBagSplit(t *testing.T) { + id0 := Empty + id1 := NewID([32]byte{1}) + id2 := NewID([32]byte{2}) + + bag := Bag{} + + bag.AddCount(id0, 1) + bag.AddCount(id1, 3) + bag.AddCount(id2, 5) + + bags := bag.Split(0) + + evens := bags[0] + odds := bags[1] + + if count := evens.Count(id0); count != 1 { + t.Fatalf("Bag.Count returned %d expected %d", count, 1) + } else if count := evens.Count(id1); count != 0 { + t.Fatalf("Bag.Count returned %d expected %d", count, 0) + } else if count := evens.Count(id2); count != 5 { + t.Fatalf("Bag.Count returned %d expected %d", count, 5) + } else if count := odds.Count(id0); count != 0 { + t.Fatalf("Bag.Count returned %d expected %d", count, 0) + } else if count := odds.Count(id1); count != 3 { + t.Fatalf("Bag.Count returned %d expected %d", count, 3) + } else if count := odds.Count(id2); count != 0 { + t.Fatalf("Bag.Count returned %d expected %d", count, 0) + } +} + +func TestBagString(t *testing.T) { + id0 := Empty + + bag := Bag{} + + bag.AddCount(id0, 1337) + + expected := "Bag: (Size = 1337)\n" + + " ID[11111111111111111111111111111111LpoYY]: Count = 1337" + + if bagString := bag.String(); bagString != expected { + t.Fatalf("Bag.String:\nReturned:\n%s\nExpected:\n%s", bagString, expected) + } +} diff --git a/ids/bit_set.go b/ids/bit_set.go new file mode 100644 index 0000000..f686e51 --- /dev/null +++ b/ids/bit_set.go @@ -0,0 +1,39 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "fmt" + "math/bits" +) + +// BitSet is a set that can contain uints in the range [0, 64). All functions +// are O(1). The zero value is the empty set. +type BitSet uint64 + +// Add [i] to the set of ints +func (bs *BitSet) Add(i uint) { *bs |= 1 << i } + +// Union adds all the elements in [s] to this set +func (bs *BitSet) Union(s BitSet) { *bs |= s } + +// Intersection takes the intersection of [s] with this set +func (bs *BitSet) Intersection(s BitSet) { *bs &= s } + +// Difference removes all the elements in [s] from this set +func (bs *BitSet) Difference(s BitSet) { *bs &^= s } + +// Remove [i] from the set of ints +func (bs *BitSet) Remove(i uint) { *bs &^= 1 << i } + +// Clear removes all elements from this set +func (bs *BitSet) Clear() { *bs = 0 } + +// Contains returns true if [i] was previously added to this set +func (bs BitSet) Contains(i uint) bool { return bs&(1< stop || stop < 0 { + return true + } + if stop >= NumBits { + return false + } + + id1Bytes := id1.Bytes() + id2Bytes := id2.Bytes() + + startIndex := start / BitsPerByte + stopIndex := stop / BitsPerByte + + // If there is a series of bytes between the first byte and the last byte, they must be equal + if startIndex+1 < stopIndex && !bytes.Equal(id1Bytes[startIndex+1:stopIndex], id2Bytes[startIndex+1:stopIndex]) { + return false + } + + startBit := uint(start % BitsPerByte) // Index in the byte that the first bit is at + stopBit := uint(stop % BitsPerByte) // Index in the byte that the last bit is at + + startMask := -1 << startBit // 111...0... The number of 0s is equal to startBit + stopMask := (1 << (stopBit + 1)) - 1 // 000...1... The number of 1s is equal to stopBit+1 + + if startIndex == stopIndex { + // If we are looking at the same byte, both masks need to be applied + mask := startMask & stopMask + + // The index here could be startIndex or stopIndex, as they are equal + b1 := mask & int(id1Bytes[startIndex]) + b2 := mask & int(id2Bytes[startIndex]) + + return b1 == b2 + } + + start1 := startMask & int(id1Bytes[startIndex]) + start2 := startMask & int(id2Bytes[startIndex]) + + stop1 := stopMask & int(id1Bytes[stopIndex]) + stop2 := stopMask & int(id2Bytes[stopIndex]) + + return start1 == start2 && stop1 == stop2 +} + +// FirstDifferenceSubset takes in two indices and two ids and returns the index +// of the first difference between the ids inside bit start to bit end +// (non-inclusive). Bit indices are defined above +func FirstDifferenceSubset(start, stop int, id1, id2 ID) (int, bool) { + stop-- + if start > stop || stop < 0 || stop >= NumBits { + return 0, false + } + + id1Bytes := id1.Bytes() + id2Bytes := id2.Bytes() + + startIndex := start / BitsPerByte + stopIndex := stop / BitsPerByte + + startBit := uint(start % BitsPerByte) // Index in the byte that the first bit is at + stopBit := uint(stop % BitsPerByte) // Index in the byte that the last bit is at + + startMask := -1 << startBit // 111...0... The number of 0s is equal to startBit + stopMask := (1 << (stopBit + 1)) - 1 // 000...1... The number of 1s is equal to stopBit+1 + + if startIndex == stopIndex { + // If we are looking at the same byte, both masks need to be applied + mask := startMask & stopMask + + // The index here could be startIndex or stopIndex, as they are equal + b1 := mask & int(id1Bytes[startIndex]) + b2 := mask & int(id2Bytes[startIndex]) + + if b1 == b2 { + return 0, false + } + + bd := b1 ^ b2 + return bits.TrailingZeros8(uint8(bd)) + startIndex*BitsPerByte, true + } + + // Check the first byte, may have some bits masked + start1 := startMask & int(id1Bytes[startIndex]) + start2 := startMask & int(id2Bytes[startIndex]) + + if start1 != start2 { + bd := start1 ^ start2 + return bits.TrailingZeros8(uint8(bd)) + startIndex*BitsPerByte, true + } + + // Check all the interior bits + for i := startIndex + 1; i < stopIndex; i++ { + b1 := int(id1Bytes[i]) + b2 := int(id2Bytes[i]) + if b1 != b2 { + bd := b1 ^ b2 + return bits.TrailingZeros8(uint8(bd)) + i*BitsPerByte, true + } + } + + // Check the last byte, may have some bits masked + stop1 := stopMask & int(id1Bytes[stopIndex]) + stop2 := stopMask & int(id2Bytes[stopIndex]) + + if stop1 != stop2 { + bd := stop1 ^ stop2 + return bits.TrailingZeros8(uint8(bd)) + stopIndex*BitsPerByte, true + } + + // No difference was found + return 0, false +} diff --git a/ids/bits_test.go b/ids/bits_test.go new file mode 100644 index 0000000..412b1c5 --- /dev/null +++ b/ids/bits_test.go @@ -0,0 +1,181 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "fmt" + "math" + "strings" + "testing" + + "github.com/ava-labs/gecko/utils/random" +) + +func flip(b uint8) uint8 { + b = b>>4 | b<<4 + b = (b&0xCC)>>2 | (b&0x33)<<2 + b = (b&0xAA)>>1 | (b&0x55)<<1 + return b +} + +func BitString(id ID) string { + sb := strings.Builder{} + for _, b := range id.Bytes() { + sb.WriteString(fmt.Sprintf("%08b", flip(b))) + } + return sb.String() +} + +func Check(start, stop int, id1, id2 ID) bool { + s1 := BitString(id1) + s2 := BitString(id2) + + shorts1 := s1[start:stop] + shorts2 := s2[start:stop] + + return shorts1 == shorts2 +} + +func TestEqualSubsetEarlyStop(t *testing.T) { + id1 := NewID([32]byte{0xf0, 0x0f}) + id2 := NewID([32]byte{0xf0, 0x1f}) + + if !EqualSubset(0, 12, id1, id2) { + t.Fatalf("Should have passed: %08b %08b == %08b %08b", id1.Bytes()[0], id1.Bytes()[1], id2.Bytes()[0], id2.Bytes()[1]) + } else if EqualSubset(0, 13, id1, id2) { + t.Fatalf("Should not have passed: %08b %08b == %08b %08b", id1.Bytes()[0], id1.Bytes()[1], id2.Bytes()[0], id2.Bytes()[1]) + } +} + +func TestEqualSubsetLateStart(t *testing.T) { + id1 := NewID([32]byte{0x1f, 0xf8}) + id2 := NewID([32]byte{0x10, 0x08}) + + if !EqualSubset(4, 12, id1, id2) { + t.Fatalf("Should have passed: %08b %08b == %08b %08b", id1.Bytes()[0], id1.Bytes()[1], id2.Bytes()[0], id2.Bytes()[1]) + } +} + +func TestEqualSubsetSameByte(t *testing.T) { + id1 := NewID([32]byte{0x18}) + id2 := NewID([32]byte{0xfc}) + + if !EqualSubset(3, 5, id1, id2) { + t.Fatalf("Should have passed: %08b == %08b", id1.Bytes()[0], id2.Bytes()[0]) + } +} + +func TestEqualSubsetBadMiddle(t *testing.T) { + id1 := NewID([32]byte{0x18, 0xe8, 0x55}) + id2 := NewID([32]byte{0x18, 0x8e, 0x55}) + + if EqualSubset(0, 8*3, id1, id2) { + t.Fatalf("Should not have passed: %08b == %08b", id1.Bytes()[1], id2.Bytes()[1]) + } +} + +func TestEqualSubsetAll3Bytes(t *testing.T) { + seed := random.Rand(0, math.MaxInt64) + id1 := NewID([32]byte{}).Prefix(uint64(seed)) + bytes1 := id1.Bytes() + + for i := 0; i < BitsPerByte; i++ { + for j := i; j < BitsPerByte; j++ { + for k := j; k < BitsPerByte; k++ { + id2 := NewID([32]byte{uint8(i), uint8(j), uint8(k)}) + bytes2 := id2.Bytes() + + for start := 0; start < BitsPerByte*3; start++ { + for end := start; end <= BitsPerByte*3; end++ { + if EqualSubset(start, end, id1, id2) != Check(start, end, id1, id2) { + t.Fatalf("Subset failed on seed %d:\ns = %d\ne = %d\n%08b %08b %08b == %08b %08b %08b", + seed, start, end, + bytes1[0], bytes1[1], bytes1[2], + bytes2[0], bytes2[1], bytes2[2]) + } + } + } + } + } + } +} + +func TestEqualSubsetOutOfBounds(t *testing.T) { + id1 := NewID([32]byte{0x18, 0xe8, 0x55}) + id2 := NewID([32]byte{0x18, 0x8e, 0x55}) + + if EqualSubset(0, math.MaxInt32, id1, id2) { + t.Fatalf("Should not have passed") + } +} + +func TestFirstDifferenceSubsetEarlyStop(t *testing.T) { + id1 := NewID([32]byte{0xf0, 0x0f}) + id2 := NewID([32]byte{0xf0, 0x1f}) + + if _, found := FirstDifferenceSubset(0, 12, id1, id2); found { + t.Fatalf("Shouldn't have found a difference: %08b %08b == %08b %08b", id1.Bytes()[0], id1.Bytes()[1], id2.Bytes()[0], id2.Bytes()[1]) + } else if index, found := FirstDifferenceSubset(0, 13, id1, id2); !found { + t.Fatalf("Should have found a difference: %08b %08b == %08b %08b", id1.Bytes()[0], id1.Bytes()[1], id2.Bytes()[0], id2.Bytes()[1]) + } else if index != 12 { + t.Fatalf("Found a difference at index %d expected %d: %08b %08b == %08b %08b", index, 12, id1.Bytes()[0], id1.Bytes()[1], id2.Bytes()[0], id2.Bytes()[1]) + } +} + +func TestFirstDifferenceEqualByte4(t *testing.T) { + id1 := NewID([32]byte{0x10}) + id2 := NewID([32]byte{0x00}) + + if _, found := FirstDifferenceSubset(0, 4, id1, id2); found { + t.Fatalf("Shouldn't have found a difference: %08b == %08b", id1.Bytes()[0], id2.Bytes()[0]) + } else if index, found := FirstDifferenceSubset(0, 5, id1, id2); !found { + t.Fatalf("Should have found a difference: %08b == %08b", id1.Bytes()[0], id2.Bytes()[0]) + } else if index != 4 { + t.Fatalf("Found a difference at index %d expected %d: %08b == %08b", index, 4, id1.Bytes()[0], id2.Bytes()[0]) + } +} + +func TestFirstDifferenceEqualByte5(t *testing.T) { + id1 := NewID([32]byte{0x20}) + id2 := NewID([32]byte{0x00}) + + if _, found := FirstDifferenceSubset(0, 5, id1, id2); found { + t.Fatalf("Shouldn't have found a difference: %08b == %08b", id1.Bytes()[0], id2.Bytes()[0]) + } else if index, found := FirstDifferenceSubset(0, 6, id1, id2); !found { + t.Fatalf("Should have found a difference: %08b == %08b", id1.Bytes()[0], id2.Bytes()[0]) + } else if index != 5 { + t.Fatalf("Found a difference at index %d expected %d: %08b == %08b", index, 5, id1.Bytes()[0], id2.Bytes()[0]) + } +} + +func TestFirstDifferenceSubsetMiddle(t *testing.T) { + id1 := NewID([32]byte{0xf0, 0x0f, 0x11}) + id2 := NewID([32]byte{0xf0, 0x1f, 0xff}) + + if index, found := FirstDifferenceSubset(0, 24, id1, id2); !found { + t.Fatalf("Should have found a difference: %08b %08b %08b == %08b %08b %08b", id1.Bytes()[0], id1.Bytes()[1], id1.Bytes()[2], id2.Bytes()[0], id2.Bytes()[1], id2.Bytes()[2]) + } else if index != 12 { + t.Fatalf("Found a difference at index %d expected %d: %08b %08b %08b == %08b %08b %08b", index, 12, id1.Bytes()[0], id1.Bytes()[1], id1.Bytes()[2], id2.Bytes()[0], id2.Bytes()[1], id2.Bytes()[2]) + } +} + +func TestFirstDifferenceStartMiddle(t *testing.T) { + id1 := NewID([32]byte{0x1f, 0x0f, 0x11}) + id2 := NewID([32]byte{0x0f, 0x1f, 0xff}) + + if index, found := FirstDifferenceSubset(0, 24, id1, id2); !found { + t.Fatalf("Should have found a difference: %08b %08b %08b == %08b %08b %08b", id1.Bytes()[0], id1.Bytes()[1], id1.Bytes()[2], id2.Bytes()[0], id2.Bytes()[1], id2.Bytes()[2]) + } else if index != 4 { + t.Fatalf("Found a difference at index %d expected %d: %08b %08b %08b == %08b %08b %08b", index, 4, id1.Bytes()[0], id1.Bytes()[1], id1.Bytes()[2], id2.Bytes()[0], id2.Bytes()[1], id2.Bytes()[2]) + } +} + +func TestFirstDifferenceVacuous(t *testing.T) { + id1 := NewID([32]byte{0xf0, 0x0f, 0x11}) + id2 := NewID([32]byte{0xf0, 0x1f, 0xff}) + + if _, found := FirstDifferenceSubset(0, 0, id1, id2); found { + t.Fatalf("Shouldn't have found a difference") + } +} diff --git a/ids/id.go b/ids/id.go new file mode 100644 index 0000000..5598dd0 --- /dev/null +++ b/ids/id.go @@ -0,0 +1,155 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "bytes" + "encoding/hex" + "sort" + + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/wrappers" +) + +// Empty is a useful all zero value +var Empty = ID{ID: &[32]byte{}} + +// ID wraps a 32 byte hash as an identifier +// Internal field [ID] should never be modified +// from outside ids package +type ID struct { + ID *[32]byte `serialize:"true"` +} + +// NewID creates an identifer from a 32 byte hash +func NewID(id [32]byte) ID { return ID{ID: &id} } + +// ToID attempt to convert a byte slice into an id +func ToID(bytes []byte) (ID, error) { + addrHash, err := hashing.ToHash256(bytes) + return NewID(addrHash), err +} + +// FromString is the inverse of ID.String() +func FromString(idStr string) (ID, error) { + cb58 := formatting.CB58{} + err := cb58.FromString(idStr) + if err != nil { + return ID{}, err + } + return ToID(cb58.Bytes) +} + +// MarshalJSON ... +func (id ID) MarshalJSON() ([]byte, error) { + if id.IsZero() { + return []byte("null"), nil + } + cb58 := formatting.CB58{Bytes: id.ID[:]} + return cb58.MarshalJSON() +} + +// UnmarshalJSON ... +func (id *ID) UnmarshalJSON(b []byte) error { + if string(b) == "null" { + return nil + } + cb58 := formatting.CB58{} + if err := cb58.UnmarshalJSON(b); err != nil { + return err + } + newID, err := ToID(cb58.Bytes) + if err != nil { + return err + } + *id = newID + return nil +} + +// IsZero returns true if the value has not been initialized +func (id ID) IsZero() bool { return id.ID == nil } + +// Key returns a 32 byte hash that this id represents. This is useful to allow +// for this id to be used as keys in maps. +func (id ID) Key() [32]byte { return *id.ID } + +// Prefix this id to create a more selective id. This can be used to store +// multiple values under the same key. For example: +// prefix1(id) -> confidence +// prefix2(id) -> vertex +// This will return a new id and not modify the original id. +func (id ID) Prefix(prefixes ...uint64) ID { + packer := wrappers.Packer{ + Bytes: make([]byte, len(prefixes)*wrappers.LongLen+hashing.HashLen), + } + + for _, prefix := range prefixes { + packer.PackLong(prefix) + } + packer.PackFixedBytes(id.Bytes()) + + return NewID(hashing.ComputeHash256Array(packer.Bytes)) +} + +// Equals returns true if the ids have the same byte representation +func (id ID) Equals(oID ID) bool { + return id.ID == oID.ID || + (id.ID != nil && oID.ID != nil && bytes.Equal(id.Bytes(), oID.Bytes())) +} + +// Bytes returns the 32 byte hash as a slice. It is assumed this slice is not +// modified. +func (id ID) Bytes() []byte { return id.ID[:] } + +// Bit returns the bit value at the ith index of the byte array. Returns 0 or 1 +func (id ID) Bit(i uint) int { + byteIndex := i / BitsPerByte + bitIndex := i % BitsPerByte + + bytes := id.Bytes() + b := bytes[byteIndex] + + // b = [7, 6, 5, 4, 3, 2, 1, 0] + + b = b >> bitIndex + + // b = [0, ..., bitIndex + 1, bitIndex] + // 1 = [0, 0, 0, 0, 0, 0, 0, 1] + + b = b & 1 + + // b = [0, 0, 0, 0, 0, 0, 0, bitIndex] + + return int(b) +} + +// Hex returns a hex encoded string of this id. +func (id ID) Hex() string { return hex.EncodeToString(id.Bytes()) } + +func (id ID) String() string { + if id.IsZero() { + return "nil" + } + bytes := id.Bytes() + cb58 := formatting.CB58{Bytes: bytes} + return cb58.String() +} + +type sortIDData []ID + +func (ids sortIDData) Less(i, j int) bool { + return bytes.Compare( + ids[i].Bytes(), + ids[j].Bytes()) == -1 +} +func (ids sortIDData) Len() int { return len(ids) } +func (ids sortIDData) Swap(i, j int) { ids[j], ids[i] = ids[i], ids[j] } + +// SortIDs sorts the ids lexicographically +func SortIDs(ids []ID) { sort.Sort(sortIDData(ids)) } + +// IsSortedAndUniqueIDs returns true if the ids are sorted and unique +func IsSortedAndUniqueIDs(ids []ID) bool { return utils.IsSortedAndUnique(sortIDData(ids)) } diff --git a/ids/id_test.go b/ids/id_test.go new file mode 100644 index 0000000..af3efa4 --- /dev/null +++ b/ids/id_test.go @@ -0,0 +1,81 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "bytes" + "testing" +) + +func TestID(t *testing.T) { + hash := [32]byte{24} + id := NewID(hash) + + if key := id.Key(); !bytes.Equal(hash[:], key[:]) { + t.Fatalf("ID.Key returned wrong bytes") + } + + prefixed := id.Prefix(0) + + if key := id.Key(); !bytes.Equal(hash[:], key[:]) { + t.Fatalf("ID.Prefix mutated the ID") + } + + if nextPrefix := id.Prefix(0); !prefixed.Equals(nextPrefix) { + t.Fatalf("ID.Prefix not consistant") + } + + if b := id.Bytes(); !bytes.Equal(hash[:], b) { + t.Fatalf("ID.Bytes returned wrong bytes") + } + + if str := id.String(); str != "Ba3mm8Ra8JYYebeZ9p7zw1ayorDbeD1euwxhgzSLsncKqGoNt" { + t.Fatalf("ID.String returned wrong string: %s", str) + } +} + +func TestIDBit(t *testing.T) { + id0 := NewID([32]byte{1 << 0}) + id1 := NewID([32]byte{1 << 1}) + id2 := NewID([32]byte{1 << 2}) + id3 := NewID([32]byte{1 << 3}) + id4 := NewID([32]byte{1 << 4}) + id5 := NewID([32]byte{1 << 5}) + id6 := NewID([32]byte{1 << 6}) + id7 := NewID([32]byte{1 << 7}) + id8 := NewID([32]byte{0, 1 << 0}) + + if id0.Bit(0) != 1 { + t.Fatalf("Wrong bit") + } else if id1.Bit(1) != 1 { + t.Fatalf("Wrong bit") + } else if id2.Bit(2) != 1 { + t.Fatalf("Wrong bit") + } else if id3.Bit(3) != 1 { + t.Fatalf("Wrong bit") + } else if id4.Bit(4) != 1 { + t.Fatalf("Wrong bit") + } else if id5.Bit(5) != 1 { + t.Fatalf("Wrong bit") + } else if id6.Bit(6) != 1 { + t.Fatalf("Wrong bit") + } else if id7.Bit(7) != 1 { + t.Fatalf("Wrong bit") + } else if id8.Bit(8) != 1 { + t.Fatalf("Wrong bit") + } +} + +func TestFromString(t *testing.T) { + key := [32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'} + id := NewID(key) + idStr := id.String() + id2, err := FromString(idStr) + if err != nil { + t.Fatal(err) + } + if id.Key() != id2.Key() { + t.Fatal("Expected FromString to be inverse of String but it wasn't") + } +} diff --git a/ids/queue.go b/ids/queue.go new file mode 100644 index 0000000..32d3411 --- /dev/null +++ b/ids/queue.go @@ -0,0 +1,52 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "container/list" +) + +// QueueSet is a set of IDs stored in fifo order +type QueueSet struct { + idList *list.List +} + +func (qs *QueueSet) init() { + if qs.idList == nil { + qs.idList = list.New() + } +} + +// SetHead ... +func (qs *QueueSet) SetHead(id ID) { + qs.init() + + for qs.idList.Len() > 0 { + element := qs.idList.Front() + head := element.Value.(ID) + if head.Equals(id) { + return + } + qs.idList.Remove(element) + } + + qs.idList.PushFront(id) +} + +// Append ... +func (qs *QueueSet) Append(id ID) { + qs.init() + + qs.idList.PushBack(id) +} + +// GetTail ... +func (qs *QueueSet) GetTail() ID { + qs.init() + + if qs.idList.Len() == 0 { + return ID{} + } + return qs.idList.Back().Value.(ID) +} diff --git a/ids/set.go b/ids/set.go new file mode 100644 index 0000000..9d0b1ec --- /dev/null +++ b/ids/set.go @@ -0,0 +1,107 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "strings" +) + +// Set is a set of IDs +type Set map[[32]byte]bool + +func (ids *Set) init(size int) { + if *ids == nil { + *ids = make(map[[32]byte]bool, size) + } +} + +// Add all the ids to this set, if the id is already in the set, nothing happens +func (ids *Set) Add(idList ...ID) { + ids.init(2 * len(idList)) + for _, id := range idList { + (*ids)[*id.ID] = true + } +} + +// Union adds all the ids from the provided sets to this set. +func (ids *Set) Union(set Set) { + ids.init(2 * set.Len()) + for id := range set { + (*ids)[id] = true + } +} + +// Contains returns true if the set contains this id, false otherwise +func (ids *Set) Contains(id ID) bool { + ids.init(1) + return (*ids)[*id.ID] +} + +// Overlaps returns true if the intersection of the set is non-empty +func (ids *Set) Overlaps(big Set) bool { + small := *ids + if small.Len() > big.Len() { + small = big + big = *ids + } + + for _, id := range small.List() { + if big.Contains(id) { + return true + } + } + return false +} + +// Len returns the number of ids in this set +func (ids Set) Len() int { return len(ids) } + +// Remove all the id from this set, if the id isn't in the set, nothing happens +func (ids *Set) Remove(idList ...ID) { + ids.init(1) + for _, id := range idList { + delete(*ids, *id.ID) + } +} + +// Clear empties this set +func (ids *Set) Clear() { *ids = nil } + +// List converts this set into a list +func (ids Set) List() []ID { + idList := []ID(nil) + for id := range ids { + idList = append(idList, NewID(id)) + } + return idList +} + +// Equals returns true if the sets contain the same elements +func (ids Set) Equals(oIDs Set) bool { + if ids.Len() != oIDs.Len() { + return false + } + for key := range oIDs { + if !ids[key] { + return false + } + } + return true +} + +// String returns the string representation of a set +func (ids Set) String() string { + sb := strings.Builder{} + sb.WriteString("{") + first := true + for idBytes := range ids { + if !first { + sb.WriteString(", ") + } + first = false + sb.WriteString(NewID(idBytes).String()) + } + sb.WriteString("}") + return sb.String() +} diff --git a/ids/set_test.go b/ids/set_test.go new file mode 100644 index 0000000..3c7ab15 --- /dev/null +++ b/ids/set_test.go @@ -0,0 +1,57 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "testing" +) + +func TestSet(t *testing.T) { + id1 := NewID([32]byte{1}) + + ids := Set{} + + ids.Add(id1) + if !ids.Contains(id1) { + t.Fatalf("Initial value not set correctly") + } + + ids.Remove(id1) + if ids.Contains(id1) { + t.Fatalf("Value not removed correctly") + } + + ids.Add(id1) + if !ids.Contains(id1) { + t.Fatalf("Initial value not set correctly") + } else if ids.Len() != 1 { + t.Fatalf("Bad set size") + } else if list := ids.List(); len(list) != 1 { + t.Fatalf("Bad list size") + } else if !list[0].Equals(id1) { + t.Fatalf("Set value not correct") + } + + ids.Clear() + if ids.Contains(id1) { + t.Fatalf("Value not removed correctly") + } + + ids.Add(id1) + + ids2 := Set{} + + if ids.Overlaps(ids2) { + t.Fatalf("Empty set shouldn't overlap") + } + + ids2.Union(ids) + if !ids2.Contains(id1) { + t.Fatalf("Value not union added correctly") + } + + if !ids.Overlaps(ids2) { + t.Fatalf("Sets overlap") + } +} diff --git a/ids/short.go b/ids/short.go new file mode 100644 index 0000000..dd5e3f8 --- /dev/null +++ b/ids/short.go @@ -0,0 +1,121 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "bytes" + "encoding/hex" + "sort" + + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" +) + +// ShortEmpty is a useful all zero value +var ShortEmpty = ShortID{ID: &[20]byte{}} + +// ShortID wraps a 20 byte hash as an identifier +type ShortID struct { + ID *[20]byte `serialize:"true"` +} + +// NewShortID creates an identifer from a 20 byte hash +func NewShortID(id [20]byte) ShortID { return ShortID{ID: &id} } + +// ToShortID attempt to convert a byte slice into an id +func ToShortID(bytes []byte) (ShortID, error) { + addrHash, err := hashing.ToHash160(bytes) + return NewShortID(addrHash), err +} + +// ShortFromString is the inverse of ShortID.String() +func ShortFromString(idStr string) (ShortID, error) { + cb58 := formatting.CB58{} + err := cb58.FromString(idStr) + if err != nil { + return ShortID{}, err + } + return ToShortID(cb58.Bytes) +} + +// MarshalJSON ... +func (id ShortID) MarshalJSON() ([]byte, error) { + if id.IsZero() { + return []byte("null"), nil + } + cb58 := formatting.CB58{Bytes: id.ID[:]} + return cb58.MarshalJSON() +} + +// UnmarshalJSON ... +func (id *ShortID) UnmarshalJSON(b []byte) error { + if string(b) == "null" { + return nil + } + cb58 := formatting.CB58{} + if err := cb58.UnmarshalJSON(b); err != nil { + return err + } + newID, err := ToShortID(cb58.Bytes) + if err != nil { + return err + } + *id = newID + return nil +} + +// IsZero returns true if the value has not been initialized +func (id ShortID) IsZero() bool { return id.ID == nil } + +// LongID returns a 32 byte identifier from this id +func (id ShortID) LongID() ID { + dest := [32]byte{} + copy(dest[:], id.ID[:]) + return NewID(dest) +} + +// Key returns a 20 byte hash that this id represents. This is useful to allow +// for this id to be used as keys in maps. +func (id ShortID) Key() [20]byte { return *id.ID } + +// Equals returns true if the ids have the same byte representation +func (id ShortID) Equals(oID ShortID) bool { + return id.ID == oID.ID || + (id.ID != nil && oID.ID != nil && bytes.Equal(id.Bytes(), oID.Bytes())) +} + +// Bytes returns the 20 byte hash as a slice. It is assumed this slice is not +// modified. +func (id ShortID) Bytes() []byte { return id.ID[:] } + +// Hex returns a hex encoded string of this id. +func (id ShortID) Hex() string { return hex.EncodeToString(id.Bytes()) } + +func (id ShortID) String() string { + if id.IsZero() { + return "nil" + } + bytes := id.Bytes() + cb58 := formatting.CB58{Bytes: bytes} + return cb58.String() +} + +type sortShortIDData []ShortID + +func (ids sortShortIDData) Less(i, j int) bool { + return bytes.Compare( + ids[i].Bytes(), + ids[j].Bytes()) == -1 +} +func (ids sortShortIDData) Len() int { return len(ids) } +func (ids sortShortIDData) Swap(i, j int) { ids[j], ids[i] = ids[i], ids[j] } + +// SortShortIDs sorts the ids lexicographically +func SortShortIDs(ids []ShortID) { sort.Sort(sortShortIDData(ids)) } + +// IsSortedAndUniqueShortIDs returns true if the ids are sorted and unique +func IsSortedAndUniqueShortIDs(ids []ShortID) bool { + return utils.IsSortedAndUnique(sortShortIDData(ids)) +} diff --git a/ids/short_set.go b/ids/short_set.go new file mode 100644 index 0000000..690cc3a --- /dev/null +++ b/ids/short_set.go @@ -0,0 +1,102 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import "strings" + +// ShortSet is a set of ShortIDs +type ShortSet map[[20]byte]bool + +func (ids *ShortSet) init(size int) { + if *ids == nil { + *ids = make(map[[20]byte]bool, size) + } +} + +// Add all the ids to this set, if the id is already in the set, nothing happens +func (ids *ShortSet) Add(idList ...ShortID) { + ids.init(2 * len(idList)) + for _, id := range idList { + (*ids)[id.Key()] = true + } +} + +// Union adds all the ids from the provided sets to this set. +func (ids *ShortSet) Union(idSet ShortSet) { + ids.init(2 * idSet.Len()) + for id := range idSet { + (*ids)[id] = true + } +} + +// Contains returns true if the set contains this id, false otherwise +func (ids *ShortSet) Contains(id ShortID) bool { + ids.init(1) + return (*ids)[id.Key()] +} + +// Len returns the number of ids in this set +func (ids ShortSet) Len() int { return len(ids) } + +// Remove all the id from this set, if the id isn't in the set, nothing happens +func (ids *ShortSet) Remove(idList ...ShortID) { + ids.init(1) + for _, id := range idList { + delete(*ids, id.Key()) + } +} + +// Clear empties this set +func (ids *ShortSet) Clear() { *ids = nil } + +// CappedList returns a list of length at most [size]. Size should be >= 0 +func (ids ShortSet) CappedList(size int) []ShortID { + idList := make([]ShortID, size)[:0] + for id := range ids { + if size <= 0 { + break + } + size-- + idList = append(idList, NewShortID(id)) + } + return idList +} + +// List converts this set into a list +func (ids ShortSet) List() []ShortID { + idList := make([]ShortID, len(ids))[:0] + for id := range ids { + idList = append(idList, NewShortID(id)) + } + return idList +} + +// Equals returns true if the sets contain the same elements +func (ids ShortSet) Equals(oIDs ShortSet) bool { + if ids.Len() != oIDs.Len() { + return false + } + for key := range oIDs { + if !ids[key] { + return false + } + } + return true +} + +// String returns the string representation of a set +func (ids ShortSet) String() string { + sb := strings.Builder{} + sb.WriteString("{") + first := true + for idBytes := range ids { + if !first { + sb.WriteString(", ") + } + first = false + sb.WriteString(NewShortID(idBytes).String()) + } + sb.WriteString("}") + return sb.String() +} diff --git a/ids/short_set_test.go b/ids/short_set_test.go new file mode 100644 index 0000000..554f5d8 --- /dev/null +++ b/ids/short_set_test.go @@ -0,0 +1,239 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "strings" + "testing" +) + +func TestShortSetContains(t *testing.T) { + set := ShortSet{} + + id0 := NewShortID([20]byte{0}) + id1 := NewShortID([20]byte{1}) + + switch { + case set.Contains(id0): + t.Fatalf("Sets shouldn't contain %s", id0) + case set.Contains(id1): + t.Fatalf("Sets shouldn't contain %s", id1) + } + + set.Add(id0) + + switch { + case !set.Contains(id0): + t.Fatalf("Set should contain %s", id0) + case set.Contains(id1): + t.Fatalf("Set shouldn't contain %s", id1) + } + + set.Add(id1) + + switch { + case !set.Contains(id0): + t.Fatalf("Set should contain %s", id0) + case !set.Contains(id1): + t.Fatalf("Set should contain %s", id1) + } + + set.Remove(id0) + + switch { + case set.Contains(id0): + t.Fatalf("Sets shouldn't contain %s", id0) + case !set.Contains(id1): + t.Fatalf("Set should contain %s", id1) + } + + set.Add(id0) + + switch { + case !set.Contains(id0): + t.Fatalf("Set should contain %s", id0) + case !set.Contains(id1): + t.Fatalf("Set should contain %s", id1) + } +} + +func TestShortSetUnion(t *testing.T) { + set := ShortSet{} + unionSet := ShortSet{} + + id0 := NewShortID([20]byte{0}) + id1 := NewShortID([20]byte{1}) + + unionSet.Add(id0) + set.Union(unionSet) + + switch { + case !set.Contains(id0): + t.Fatalf("Set should contain %s", id0) + case set.Contains(id1): + t.Fatalf("Set shouldn't contain %s", id1) + } + + unionSet.Add(id1) + set.Union(unionSet) + + switch { + case !set.Contains(id0): + t.Fatalf("Set should contain %s", id0) + case !set.Contains(id1): + t.Fatalf("Set should contain %s", id1) + } + + set.Remove(id0) + + switch { + case set.Contains(id0): + t.Fatalf("Sets shouldn't contain %s", id0) + case !set.Contains(id1): + t.Fatalf("Set should contain %s", id1) + } + + set.Clear() + set.Union(unionSet) + + switch { + case !set.Contains(id0): + t.Fatalf("Set should contain %s", id0) + case !set.Contains(id1): + t.Fatalf("Set should contain %s", id1) + } +} + +func TestShortSetEquals(t *testing.T) { + set := ShortSet{} + otherSet := ShortSet{} + if !set.Equals(otherSet) { + t.Fatal("Empty sets should be equal") + } + if !otherSet.Equals(set) { + t.Fatal("Empty sets should be equal") + } + + set.Add(NewShortID([20]byte{1, 2, 3, 4, 5})) + if set.Equals(otherSet) { + t.Fatal("Sets should be unequal") + } + if otherSet.Equals(set) { + t.Fatal("Sets should be unequal") + } + + otherSet.Add(NewShortID([20]byte{1, 2, 3, 4, 5})) + if !set.Equals(otherSet) { + t.Fatal("sets should be equal") + } + if !otherSet.Equals(set) { + t.Fatal("sets should be equal") + } + + otherSet.Add(NewShortID([20]byte{6, 7, 8, 9, 10})) + if set.Equals(otherSet) { + t.Fatal("Sets should be unequal") + } + if otherSet.Equals(set) { + t.Fatal("Sets should be unequal") + } + + set.Add(NewShortID([20]byte{6, 7, 8, 9, 10})) + if !set.Equals(otherSet) { + t.Fatal("sets should be equal") + } + if !otherSet.Equals(set) { + t.Fatal("sets should be equal") + } + + otherSet.Add(NewShortID([20]byte{11, 12, 13, 14, 15})) + if set.Equals(otherSet) { + t.Fatal("Sets should be unequal") + } + if otherSet.Equals(set) { + t.Fatal("Sets should be unequal") + } + + set.Add(NewShortID([20]byte{11, 12, 13, 14, 16})) + if set.Equals(otherSet) { + t.Fatal("Sets should be unequal") + } + if otherSet.Equals(set) { + t.Fatal("Sets should be unequal") + } +} + +func TestShortSetList(t *testing.T) { + set := ShortSet{} + otherSet := ShortSet{} + + id0 := NewShortID([20]byte{0}) + id1 := NewShortID([20]byte{1}) + + set.Add(id0) + otherSet.Add(set.List()...) + + if !set.Equals(otherSet) { + t.Fatalf("Sets should be equal but are:\n%s\n%s", set, otherSet) + } + + set.Add(id1) + otherSet.Clear() + otherSet.Add(set.List()...) + + if !set.Equals(otherSet) { + t.Fatalf("Sets should be equal but are:\n%s\n%s", set, otherSet) + } +} + +func TestShortSetCappedList(t *testing.T) { + set := ShortSet{} + + id := ShortEmpty + + if list := set.CappedList(0); len(list) != 0 { + t.Fatalf("List should have been empty but was %v", list) + } + + set.Add(id) + + if list := set.CappedList(0); len(list) != 0 { + t.Fatalf("List should have been empty but was %v", list) + } else if list := set.CappedList(1); len(list) != 1 { + t.Fatalf("List should have had length %d but had %d", 1, len(list)) + } else if returnedID := list[0]; !id.Equals(returnedID) { + t.Fatalf("List should have been %s but was %s", id, returnedID) + } else if list := set.CappedList(2); len(list) != 1 { + t.Fatalf("List should have had length %d but had %d", 1, len(list)) + } else if returnedID := list[0]; !id.Equals(returnedID) { + t.Fatalf("List should have been %s but was %s", id, returnedID) + } +} + +func TestShortSetString(t *testing.T) { + set := ShortSet{} + + id0 := NewShortID([20]byte{0}) + id1 := NewShortID([20]byte{1}) + + if str := set.String(); str != "{}" { + t.Fatalf("Set should have been %s but was %s", "{}", str) + } + + set.Add(id0) + + if str := set.String(); str != "{111111111111111111116DBWJs}" { + t.Fatalf("Set should have been %s but was %s", "{111111111111111111116DBWJs}", str) + } + + set.Add(id1) + + if str := set.String(); !strings.Contains(str, "111111111111111111116DBWJs") { + t.Fatalf("Set should have contained %s", "111111111111111111116DBWJs") + } else if !strings.Contains(str, "6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt") { + t.Fatalf("Set should have contained %s", "6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt") + } else if count := strings.Count(str, ","); count != 1 { + t.Fatalf("Should only have one %s in %s", ",", str) + } +} diff --git a/ids/unique_bag.go b/ids/unique_bag.go new file mode 100644 index 0000000..6117cb7 --- /dev/null +++ b/ids/unique_bag.go @@ -0,0 +1,94 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "fmt" + "strings" +) + +// UniqueBag ... +type UniqueBag map[[32]byte]BitSet + +func (b *UniqueBag) init() { + if *b == nil { + *b = make(map[[32]byte]BitSet) + } +} + +// Add ... +func (b *UniqueBag) Add(setID uint, idSet ...ID) { + bs := BitSet(0) + bs.Add(setID) + + for _, id := range idSet { + b.UnionSet(id, bs) + } +} + +// UnionSet ... +func (b *UniqueBag) UnionSet(id ID, set BitSet) { + b.init() + + key := id.Key() + previousSet := (*b)[key] + previousSet.Union(set) + (*b)[key] = previousSet +} + +// DifferenceSet ... +func (b *UniqueBag) DifferenceSet(id ID, set BitSet) { + b.init() + + key := id.Key() + previousSet := (*b)[key] + previousSet.Difference(set) + (*b)[key] = previousSet +} + +// Difference ... +func (b *UniqueBag) Difference(diff *UniqueBag) { + b.init() + + for key, previousSet := range *b { + if previousSetDiff, exists := (*diff)[key]; exists { + previousSet.Difference(previousSetDiff) + } + (*b)[key] = previousSet + } +} + +// GetSet ... +func (b *UniqueBag) GetSet(id ID) BitSet { return (*b)[*id.ID] } + +// List ... +func (b *UniqueBag) List() []ID { + idList := []ID(nil) + for id := range *b { + idList = append(idList, NewID(id)) + } + return idList +} + +// Bag ... +func (b *UniqueBag) Bag(alpha int) Bag { + bag := Bag{} + bag.SetThreshold(alpha) + for id, bs := range *b { + bag.AddCount(NewID(id), bs.Len()) + } + return bag +} + +func (b *UniqueBag) String() string { + sb := strings.Builder{} + + sb.WriteString(fmt.Sprintf("UniqueBag: (Size = %d)", len(*b))) + for idBytes, set := range *b { + id := NewID(idBytes) + sb.WriteString(fmt.Sprintf("\n ID[%s]: Members = %s", id, set)) + } + + return sb.String() +} diff --git a/ids/unique_bag_test.go b/ids/unique_bag_test.go new file mode 100644 index 0000000..79ed041 --- /dev/null +++ b/ids/unique_bag_test.go @@ -0,0 +1,106 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "testing" +) + +func TestUniqueBag(t *testing.T) { + var ub1 UniqueBag + + ub1.init() + + if ub1 == nil { + t.Fatalf("Unique Bag still nil after initialized") + } else if len(ub1.List()) != 0 { + t.Fatalf("Unique Bag should be empty") + } + + id1 := Empty.Prefix(1) + id2 := Empty.Prefix(2) + + ub2 := make(UniqueBag) + ub2.Add(1, id1, id2) + + if !ub2.GetSet(id1).Contains(1) { + t.Fatalf("Set missing element") + } else if !ub2.GetSet(id2).Contains(1) { + t.Fatalf("Set missing element") + } + + var bs1 BitSet + bs1.Add(2) + bs1.Add(4) + + ub3 := make(UniqueBag) + + ub3.UnionSet(id1, bs1) + + bs1.Clear() + bs1 = ub3.GetSet(id1) + if bs1.Len() != 2 { + t.Fatalf("Incorrect length of set") + } else if !bs1.Contains(2) { + t.Fatalf("Set missing element") + } else if !bs1.Contains(4) { + t.Fatalf("Set missing element") + } + + // Difference test + bs1.Clear() + + ub4 := make(UniqueBag) + ub4.Add(1, id1) + ub4.Add(2, id1) + ub4.Add(5, id2) + ub4.Add(8, id2) + + ub5 := make(UniqueBag) + ub5.Add(5, id2) + ub5.Add(5, id1) + + ub4.Difference(&ub5) + + if len(ub5.List()) != 2 { + t.Fatalf("Incorrect number of ids in Unique Bag") + } + + ub4id1 := ub4.GetSet(id1) + if ub4id1.Len() != 2 { + t.Fatalf("Set of Unique Bag has incorrect length") + } else if !ub4id1.Contains(1) { + t.Fatalf("Set of Unique Bag missing element") + } else if !ub4id1.Contains(2) { + t.Fatalf("Set of Unique Bag missing element") + } + + ub4id2 := ub4.GetSet(id2) + if ub4id2.Len() != 1 { + t.Fatalf("Set of Unique Bag has incorrect length") + } else if !ub4id2.Contains(8) { + t.Fatalf("Set of Unique Bag missing element") + } + + // DifferenceSet test + + ub6 := make(UniqueBag) + ub6.Add(1, id1) + ub6.Add(2, id1) + ub6.Add(7, id1) + + diffBitSet := BitSet(0) + diffBitSet.Add(1) + diffBitSet.Add(7) + + ub6.DifferenceSet(id1, diffBitSet) + + ub6id1 := ub6.GetSet(id1) + + if ub6id1.Len() != 1 { + t.Fatalf("Set of Unique Bag missing element") + } else if !ub6id1.Contains(2) { + t.Fatalf("Set of Unique Bag missing element") + } +} diff --git a/keys/keys1/genCA.sh b/keys/keys1/genCA.sh new file mode 100755 index 0000000..14a0f4c --- /dev/null +++ b/keys/keys1/genCA.sh @@ -0,0 +1,5 @@ +#!/bin/sh +set -ex + +openssl genrsa -out `dirname "$0"`/rootCA.key 4096 +openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys1/genStaker.sh b/keys/keys1/genStaker.sh new file mode 100755 index 0000000..0a4b836 --- /dev/null +++ b/keys/keys1/genStaker.sh @@ -0,0 +1,6 @@ +#!/bin/sh +set -ex + +openssl genrsa -out `dirname "$0"`/staker.key 4096 +openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr +openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys1/rootCA.crt b/keys/keys1/rootCA.crt new file mode 100644 index 0000000..da6320a --- /dev/null +++ b/keys/keys1/rootCA.crt @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB +dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN +AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw +MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM +Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV +BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg +jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 +QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU +m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 +lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB +KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW +cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 +RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH +bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW +T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB +J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU +KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei +73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E +BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj +FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG +XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY +omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv +Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC +XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 +gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn +3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N +W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s +scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU +kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD +DB8IRfWqBx2nWw== +-----END CERTIFICATE----- diff --git a/keys/keys1/rootCA.key b/keys/keys1/rootCA.key new file mode 100644 index 0000000..fe23a96 --- /dev/null +++ b/keys/keys1/rootCA.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 +6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ +mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h +Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL +AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk +tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd +CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu +TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV +Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll +JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt +RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA +AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg +O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 +WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc +fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o +WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y +243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM +Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv +/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF +2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 +wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R +WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 +POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC +T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW +jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc +23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK +XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl +jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ +/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P +rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl +KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD +E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C +cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE +r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu +GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy +7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr +RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF +SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor +Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY +KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t +Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM +/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 +YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt +I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy ++jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f +UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER +KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW +MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe +f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== +-----END RSA PRIVATE KEY----- diff --git a/keys/keys1/rootCA.srl b/keys/keys1/rootCA.srl new file mode 100644 index 0000000..473c41d --- /dev/null +++ b/keys/keys1/rootCA.srl @@ -0,0 +1 @@ +BAF3B5C5C6D0D14A diff --git a/keys/keys1/staker.crt b/keys/keys1/staker.crt new file mode 100644 index 0000000..b97df69 --- /dev/null +++ b/keys/keys1/staker.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFNzCCAx8CCQC687XFxtDRSjANBgkqhkiG9w0BAQsFADB/MQswCQYDVQQGEwJV +UzELMAkGA1UECAwCTlkxDzANBgNVBAcMBkl0aGFjYTEQMA4GA1UECgwHQXZhbGFi +czEOMAwGA1UECwwFR2Vja28xDDAKBgNVBAMMA2F2YTEiMCAGCSqGSIb3DQEJARYT +c3RlcGhlbkBhdmFsYWJzLm9yZzAgFw0xOTA3MDIxNjEyMTVaGA8zMDE5MDcxMDE2 +MTIxNVowOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQKDAdBdmFs +YWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDKYSRw/W0YpYH/MTQhiFrR0m89l6yTuzLpDtjudr/5RnhIPvtqk7YIGm/m9l29 +xwR4J5r7SZGs+70yBetkbS+h7PwJ2rmWDwbrdyJKvVBhqf8kSn+VU2LePSIcJj19 +3LDyWhV1H4lqNkUkcAR76Fh9qjMvA2p0vJ66+eDLXlph/RYapQx9HgOj/0BmAKMr +YCyo5BhRih+Ougg8aK4G9PQTIA5G2wTWW2QkHxM/QppFjZd/XwQeJ2H6ubWMFc5f +ttf6AzpJvFIDBu/JDCKWiCu5m8t4GL8w2OrIx8Js19lF4YYE2eojCreqgPi64S3o +cqwKsDoySTw6/5iKQ5BUYwUXX3z7EXOqD8SMHefUKeczj4WvAaZLzR27qXm55EgR +YQAIX4fhmY7NfSop3Wh0Eo62+JHoM/1g+UgOXlbnWpY95Mgd7/fwDSWLu4IxE0/u +q8VufIbfC4yrY8qlTVfAffI1ldRdvJjPJBPiQ0CNrOl60LVptpkGc9shH7wZ2bP0 +bEnYKTgLAfOzD8Ut71O2AOIa80A1GNFl4Yle/MSNJOcQOSpgtWdREzIUoenAjfuz +M4OeTr4cRg4+VYTAo9KHKriN1DuewNzGd8WjKAVHmcIMjqISLTlzMhdsdm+OmfQ6 +OvyX7v0GTOBbhP09NGcww5A0gCzXN18FS5oxnxe6OG9D0wIDAQABMA0GCSqGSIb3 +DQEBCwUAA4ICAQAqL1TWI1PTMm3JaXkhdTBe8tsk7+FsHAFzTcBVBsB8dkJNGhxb +dlu7XIm+AyGUn0j8siz8qojKbO+rEPV/ImTH5W7Q36rXSdgvNUWpKrKIC5S8PUF5 +T4pH+lpYIlQHnTaKMuqH3nO3I40IhEhPaa2wAwy2kDlz46fJcr6aMzj6Zg43J5UK +Zid+BQsiWAUau5V7CpC7GMCx4YdOZWWsT3dAsug9hvwTe81kK1JoTH0juwPTBH0t +xUgUVIWyuweM1UwYF3n8Hmwq6B46YmujhMDKT+3lgqZt7eZ1XvieLdBRlVQWzOa/ +6QYTkrqwPZioKIStrxVGYjk40qECNodCSCIwRDgbnQubRWrdslxiIyc5blJNuOV+ +jgv5d2EeUpwUjvpZuEV7FqPKGRgiG0jfl6Psms9gYUXd+y3ytG9HeoDNmLTSTBE4 +nCQXX935P2/xOuok6CpiGpP89DX7t8yiwk8LFNnY3rvv50nVy8kerVdnfHTmoMZ9 +/IBgojSIKov4lmPKdgzFfimzhbssVCa4DO/LIhTF7bQbH1ut/Oq7npdOpMjLYIBE +9lagvRVTVFwT/uwrCcXHCb21b/puwV94SNXVwt7BheFTFBdtxJrR4jjr2T5odLkX +6nQcY8V2OT7KOxn0KVc6pl3saJTLmL+H/3CtAao9NtmuUDapKINRSVNyvg== +-----END CERTIFICATE----- diff --git a/keys/keys1/staker.csr b/keys/keys1/staker.csr new file mode 100644 index 0000000..a4d8227 --- /dev/null +++ b/keys/keys1/staker.csr @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK +DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDKYSRw/W0YpYH/MTQhiFrR0m89l6yTuzLpDtjudr/5RnhIPvtqk7YI +Gm/m9l29xwR4J5r7SZGs+70yBetkbS+h7PwJ2rmWDwbrdyJKvVBhqf8kSn+VU2Le +PSIcJj193LDyWhV1H4lqNkUkcAR76Fh9qjMvA2p0vJ66+eDLXlph/RYapQx9HgOj +/0BmAKMrYCyo5BhRih+Ougg8aK4G9PQTIA5G2wTWW2QkHxM/QppFjZd/XwQeJ2H6 +ubWMFc5fttf6AzpJvFIDBu/JDCKWiCu5m8t4GL8w2OrIx8Js19lF4YYE2eojCreq +gPi64S3ocqwKsDoySTw6/5iKQ5BUYwUXX3z7EXOqD8SMHefUKeczj4WvAaZLzR27 +qXm55EgRYQAIX4fhmY7NfSop3Wh0Eo62+JHoM/1g+UgOXlbnWpY95Mgd7/fwDSWL +u4IxE0/uq8VufIbfC4yrY8qlTVfAffI1ldRdvJjPJBPiQ0CNrOl60LVptpkGc9sh +H7wZ2bP0bEnYKTgLAfOzD8Ut71O2AOIa80A1GNFl4Yle/MSNJOcQOSpgtWdREzIU +oenAjfuzM4OeTr4cRg4+VYTAo9KHKriN1DuewNzGd8WjKAVHmcIMjqISLTlzMhds +dm+OmfQ6OvyX7v0GTOBbhP09NGcww5A0gCzXN18FS5oxnxe6OG9D0wIDAQABoAAw +DQYJKoZIhvcNAQELBQADggIBAE7VplAZTEGHpYwXZvhlVg0qDsb/7IQj77eNteSU +33Dq6u7QLgS+Ea04Xv5BHnhnBoWRtrNR8WLTw64cuj6p/sqXiQsSNDgxNDPuPG+g +1FFi6wjgtoIJnx/QrITuUyO/MRy1awKLHlGfbY6yXSdLCC9bqLSIRm0tx+E+jo5C +0r5+ZOcLK8ZXWq9uHjmekX0hoN4qzsbQ0J5IeMh9ag+698aqzBSEDljLHg614yiK +FxtpD+23O0XfAdgqFgXRLLg3tt8AkVuys7r/uwHoz9du+nwW2U5nsMIYBXLV2mq3 +1KbpXDTlVwaSoA2LP8dpmvbyTgNbXsjPdS91Rrzd7fcsammcSV0aWPiXmIbTLtn8 +61ZRR0uj+jB68cRjSvegnheifsGyq6alr8OSUMdeWVyiPy2O7N6fUVj+Fmyzl5Ph +fl9UPZTmt/zOZrcSBoWjtZfmQVfw29SfMYwlNKALN4eOT6XwBLDK4uu4UXSoXwi+ +V8evUUfBWcrcXHMTIFhoZbW/b7gjhnv148XWYI0ta8pjt/akzlPLtf4ETPqfECNN +4+p2w9+R5ktzCLeceXQc8eN+ZwjIt31zG48J7Sl1wJB13VR0jPy6zDsyUIswIVfe +7gp7GHg8R0lzDpEYCvU+R7RUWK6xcpjt7+mTHM70csnnOg7uPqnXqOdtVplr0y+R +pmqJ +-----END CERTIFICATE REQUEST----- diff --git a/keys/keys1/staker.key b/keys/keys1/staker.key new file mode 100644 index 0000000..f4747a5 --- /dev/null +++ b/keys/keys1/staker.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAymEkcP1tGKWB/zE0IYha0dJvPZesk7sy6Q7Y7na/+UZ4SD77 +apO2CBpv5vZdvccEeCea+0mRrPu9MgXrZG0voez8Cdq5lg8G63ciSr1QYan/JEp/ +lVNi3j0iHCY9fdyw8loVdR+JajZFJHAEe+hYfaozLwNqdLyeuvngy15aYf0WGqUM +fR4Do/9AZgCjK2AsqOQYUYofjroIPGiuBvT0EyAORtsE1ltkJB8TP0KaRY2Xf18E +Hidh+rm1jBXOX7bX+gM6SbxSAwbvyQwilogruZvLeBi/MNjqyMfCbNfZReGGBNnq +Iwq3qoD4uuEt6HKsCrA6Mkk8Ov+YikOQVGMFF198+xFzqg/EjB3n1CnnM4+FrwGm +S80du6l5ueRIEWEACF+H4ZmOzX0qKd1odBKOtviR6DP9YPlIDl5W51qWPeTIHe/3 +8A0li7uCMRNP7qvFbnyG3wuMq2PKpU1XwH3yNZXUXbyYzyQT4kNAjazpetC1abaZ +BnPbIR+8Gdmz9GxJ2Ck4CwHzsw/FLe9TtgDiGvNANRjRZeGJXvzEjSTnEDkqYLVn +URMyFKHpwI37szODnk6+HEYOPlWEwKPShyq4jdQ7nsDcxnfFoygFR5nCDI6iEi05 +czIXbHZvjpn0Ojr8l+79BkzgW4T9PTRnMMOQNIAs1zdfBUuaMZ8XujhvQ9MCAwEA +AQKCAgEAuUM4Mt8r8bYBTPVj/ZZvXUjAYKfqacqijkrzN0kp8C4cijZtvWC+8KgS +7GF36vS3GK9Y5tSwMKS6y4IzvFlfk2H4T6UU41OaSA9lKvonDWCrmjNAnBgbl8pq +4U34WLGgohrpLbDTAJHxtat9z1ghOdiGxnDgEUFiJVP9/u2+25jtlTKmPhstxgEy +mK3YsSp3d5xmzq4cuXF/fJ1vQhsXHDLqHt78jKZZA+AWpIB57VXy67y1bk0rGnTK +xxRnOaOODubJgxqMEQ1WkLs1Jow9Sspd9vDghPzt4SNMzorB8YDESMib17xF6iXq +jFj6x6HB8H7mp4X3RyMYJuo2w6lpzBsEncUYpKhqMabF0I/giI5VdpSDvkCCOFen +nWZLV9Ai/x7tTq/0F+cVM69Mgfe8iYymqlfd6WRZITKfViNHALlG/Pq9yHJsz7Ng +S8BKODt/sj4Q0xLtFDT/DmpP50iq7SiS14obcKcQr8FAjM/sOY/Ulg4M8MA7EugS +pDJwLl6XDoIMMCNwZ1HGsDstzmx5Mf50bS4tbK4iZzcpPX5RBTlVdo9MTSgnFizp +Ii1NjHLuVVCSLb1OjoTgu0cQFiWEBCkC1XuoR8RCY6iWVrUH4Gezni7ckt2mJaNA +pd6/87dFKE3jh5T6jZeJMJg5skTZHSozJDuaj9pMK/JONSD06sECggEBAPq2lEmd +g1hpMIqa7ey1uoLd1zFFzlWrxTJLlu38N69mYDOHrV/zqRGOpZB+1nH7tQJIT/L1 +xLN33mFVqCrN8yUmZ+iUWioaI5JZ1jzCgemVGeBgodwP9MOZfxxrDp17oTdabaEq +7ZaBYnY8xK/4bCxu/B4mFiF3Za8ZTd/+2yev7JM+E3MorWc7rrKm1ApflfxytdhO +JLBiqOcqobI3dgHyzesVb8cT4XCpoRhdrFwort0JI7ryfddd49vMJ3ElRbnN/h4F +f24cWY/sQPq/nfDmec28Z7nVza1D4rszNylYDvzdjF0Q1mL5dFVntWbZA1CNurVw +nTfwuyQ8RF9YnYMCggEBAM6lpNeqaiG9ixKSr65pYOKtByUI3/eTT4vBnrDtYF+8 +ohiKgIymG/vJsSdrynKfwJObEy2dBYhCGF3h9z2nc9KJQD/su7wxCsdmBs7YoDiM +uzNPlRAmI0QAFILPCk48z/lUQk3r/Mzu0YzRv7fI4WSpIGAefVPDqy1uXsATDoDJ +arcEkND5Lib89Lx7r02EevJJTdhTJM8mBdRl6wpNV3xBdwis67uSyunFZYpSiMw7 +WWjIRhzhLIvpgD78UvNvuJi0UGVEjTqnxvuW3Y6sLfIk80KSR24USinT27t//x7z +yzNko75avF2hm1f8Y/EpcHHAax8NAQF5uuV9xBNvv3ECggEAdS/sRjCK2UNpvg/G +0FLtWAgrcsuHM4IzjVvJs3ml6aV3p/5uKqBw0VUUzGKNCAA4TlXQkOcRxzVrS6HH +FiLn2OCHxy24q19Gazz0p7ffE3hu/PMOFRecN+VChd0AmtnTtFTfU2sGXMgjZtLm +uL3siiRiUhFJXOE7NUolnWK5u2Y+tWBZpQVJcCx0busNx7+AEtznZLC583xaKJtD +s1K7JRQB7jU55xrC0G9pbkMysm0NtyFzgwmfipBHVlCpyvg6DCxd8FhvhN9Zea1b +fhkc0SJZorHC5hkqpydJDmlVCk0vzEAeQM4C94ZUOytbnjQnmXp14CNASYqLXteQ +ueRo0wKCAQAG0F10IxFm1WotjZqvZJgmQVBX/0frUPcxg4vpB5rC7WRm7MI6YQvR +LKBjzWEakHv4Igfq3B+fk5ZcGiRd6xSdn5r3wKWcGf3h/1JAJdJ6quFNWtVud+N3 +zYzfl1YeqFCvRwD8ssheNY3BV/U7aStNd2oy4S5+wZf2YopLSRWUV4/mQwdHbMAB +1xt2z5lDNBgdvx8LAArZrcZJb6blaxF0bnAvYAxR3hBEzxZ/DiOmoFpdYyU0tJQU +dPmemhFeJ5PtrRxtimohwgCEsT/TAYhuUJuY2VvznEWpxWucbicKbT2JD0t67mEB +sV9+8jqVbCliBtdBadtbohjwkkoR3gBxAoIBAG3cZuNkIWpELEbeICKouSOKN06r +Fs/UXU8roNThPR7vPtjeD1NDMmUHJr1FG4SJrSigdD8qNBg8w/G3nI0Iw7eFskk5 +8mNm21CpDzON36ZO7IDMj5uyBlj2t+Ixl/uJYhYSpuNXyUTMm+rkFJ0vdSV4fjLd +J2m30juYnMiBBJf7dz5M95+T0xicGWyV24zVYYBbSo0NHEGxqeRhikNqZNPkod6f +kfOJZGalh2KaK5RMpZpFFhZ/kW9xRWNJZyCWgkIoYkdilMuISBu3lCrk8rdMpAL0 +wHEcq8xwcgYCS2qk8HwjtmVd3gpB1y9UshMr3qnuH1wMpU5C+nM2oy3vSko= +-----END RSA PRIVATE KEY----- diff --git a/keys/keys2/genCA.sh b/keys/keys2/genCA.sh new file mode 100755 index 0000000..14a0f4c --- /dev/null +++ b/keys/keys2/genCA.sh @@ -0,0 +1,5 @@ +#!/bin/sh +set -ex + +openssl genrsa -out `dirname "$0"`/rootCA.key 4096 +openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys2/genStaker.sh b/keys/keys2/genStaker.sh new file mode 100755 index 0000000..0a4b836 --- /dev/null +++ b/keys/keys2/genStaker.sh @@ -0,0 +1,6 @@ +#!/bin/sh +set -ex + +openssl genrsa -out `dirname "$0"`/staker.key 4096 +openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr +openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys2/rootCA.crt b/keys/keys2/rootCA.crt new file mode 100644 index 0000000..da6320a --- /dev/null +++ b/keys/keys2/rootCA.crt @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB +dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN +AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw +MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM +Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV +BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg +jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 +QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU +m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 +lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB +KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW +cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 +RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH +bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW +T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB +J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU +KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei +73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E +BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj +FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG +XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY +omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv +Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC +XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 +gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn +3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N +W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s +scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU +kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD +DB8IRfWqBx2nWw== +-----END CERTIFICATE----- diff --git a/keys/keys2/rootCA.key b/keys/keys2/rootCA.key new file mode 100644 index 0000000..fe23a96 --- /dev/null +++ b/keys/keys2/rootCA.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 +6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ +mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h +Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL +AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk +tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd +CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu +TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV +Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll +JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt +RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA +AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg +O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 +WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc +fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o +WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y +243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM +Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv +/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF +2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 +wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R +WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 +POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC +T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW +jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc +23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK +XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl +jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ +/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P +rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl +KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD +E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C +cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE +r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu +GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy +7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr +RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF +SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor +Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY +KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t +Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM +/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 +YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt +I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy ++jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f +UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER +KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW +MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe +f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== +-----END RSA PRIVATE KEY----- diff --git a/keys/keys2/rootCA.srl b/keys/keys2/rootCA.srl new file mode 100644 index 0000000..473c41d --- /dev/null +++ b/keys/keys2/rootCA.srl @@ -0,0 +1 @@ +BAF3B5C5C6D0D14A diff --git a/keys/keys2/staker.crt b/keys/keys2/staker.crt new file mode 100644 index 0000000..a572af1 --- /dev/null +++ b/keys/keys2/staker.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFNzCCAx8CCQC687XFxtDRSjANBgkqhkiG9w0BAQsFADB/MQswCQYDVQQGEwJV +UzELMAkGA1UECAwCTlkxDzANBgNVBAcMBkl0aGFjYTEQMA4GA1UECgwHQXZhbGFi +czEOMAwGA1UECwwFR2Vja28xDDAKBgNVBAMMA2F2YTEiMCAGCSqGSIb3DQEJARYT +c3RlcGhlbkBhdmFsYWJzLm9yZzAgFw0xOTA3MDIxNjEyMTlaGA8zMDE5MDcxMDE2 +MTIxOVowOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQKDAdBdmFs +YWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDdToR60na6NuR9iSAUMyzPXJNMWVQbLyT5/iZCiJ3BB4YWMBhfxpJWJiWXcM+z +nDgpJuyCEeh5Dp6ZY3Fe7k6Hht6FmFpDjwnjpQmdkEKUg00G+ElPTp/UsmsPL+JA +swPqBZWpMBS3dsXQNunMMtMGlrf5S0l6XX4y7kc/GTxYgveWZ9JtR/m2KNer+wjg +BHqJ4rPqnHB30sDYPZg91Cz1Ak8Bb2w2I108zQVgKK6eIqNKXJJ/4pizSZdU4920 +wMxYBpnfDAchnxei9U/v3QbT7eKUI2fGr+hOWTIWU80+VeOBt8a6P4sS9AQh5/6G +8qwmAqO3YQ9dxN82iu/H3+N+GGa/M0r5rEWrzwIuFhwKvyQcpPRBm2yQnBnhL9G5 +kN6n4OBM0KsgZ3CYlHZSg4eWcNgBt1WCFsQc7vfUFaJnr8QP3pF4V/4Bok7wTO5H +N0A1EYEVYuX53NGnrKVe+Fg9+xMOgXPWkUNqdvpI9ZbV3Z0S5866qF3/vBZrhgCr +Kc5E/vMexBRe8Ki4wKqONVhi9WGUcRHvFEikc+7VrPj0YaG6zVLd+uOAJN81fKOP +Yo4X4sZrMyPYl3OjGtMhfV4KvCaLEr1duOklqO6cCvGQ8iAlLVy3VJyW5GJ0D0Ky +iAir4VNdAJKo1ZgiGivJLWulTfjUifCN9o115AiqJxiqwwIDAQABMA0GCSqGSIb3 +DQEBCwUAA4ICAQCQOdwD7eRIxBvbQHUc+m0TRzEa17BCfck1Y2WwN3TZXDGSkPVE +0uujA8SL3qi8/CTLGRqI9U3gRZJf+tJPBF/P021PEmyaFTS4htxcDxTxuZv2jCo9 ++XhUEyvRWitTmoy1esq3mkotVQHeTmQvwCsQJAhctVA/hRdJwmMPs1B8QxOUI6Bq +SOBHa9CsXIzVOFv8FqE91PZA2ns30sKQYrrnbH99apfF5WglLUoyPwxf2e3AACh7 +beEdk45ivvKwi5Jk8nr85KDHYPlqkr0bd9Ehl8xplaNBdMPeRufqBDlztjcLJ3wo +mnrt95gQMeSoLHY3UNsIRjbj43zImu7q9v/DD9ppQpu26aRDRmBNgLZA9GM5XnbZ +RFi3VxLyqasGcSzaHwz5c7vOBOkOdlqcQzISRvWDxiN1HkAL+hkiQCuMchgORAgM +wzPooa8rfWtLIpOXMpwuVGb/8rGNLEPovoCK9z6c+WZ+zkRo4+3TQkOMY66Xht7r +Ahly3ler+Tyg6a5jXT92WKC/MXBYAy2ZQNoy204kNKevcH7R2cSkxITd3n5EacNy +5MAtCNIk7JweLCh9rLrLUBt+i4n44sP+LVhfWHemngA8CoF4n6eQ0pp0ixZTen0j +4uN0G2Nf+JeGMlqoObLWdIOdH/pbDppXGoZaKKDd7+bA74Fle5Uh7+1e3A== +-----END CERTIFICATE----- diff --git a/keys/keys2/staker.csr b/keys/keys2/staker.csr new file mode 100644 index 0000000..8b7f8c9 --- /dev/null +++ b/keys/keys2/staker.csr @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK +DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDdToR60na6NuR9iSAUMyzPXJNMWVQbLyT5/iZCiJ3BB4YWMBhfxpJW +JiWXcM+znDgpJuyCEeh5Dp6ZY3Fe7k6Hht6FmFpDjwnjpQmdkEKUg00G+ElPTp/U +smsPL+JAswPqBZWpMBS3dsXQNunMMtMGlrf5S0l6XX4y7kc/GTxYgveWZ9JtR/m2 +KNer+wjgBHqJ4rPqnHB30sDYPZg91Cz1Ak8Bb2w2I108zQVgKK6eIqNKXJJ/4piz +SZdU4920wMxYBpnfDAchnxei9U/v3QbT7eKUI2fGr+hOWTIWU80+VeOBt8a6P4sS +9AQh5/6G8qwmAqO3YQ9dxN82iu/H3+N+GGa/M0r5rEWrzwIuFhwKvyQcpPRBm2yQ +nBnhL9G5kN6n4OBM0KsgZ3CYlHZSg4eWcNgBt1WCFsQc7vfUFaJnr8QP3pF4V/4B +ok7wTO5HN0A1EYEVYuX53NGnrKVe+Fg9+xMOgXPWkUNqdvpI9ZbV3Z0S5866qF3/ +vBZrhgCrKc5E/vMexBRe8Ki4wKqONVhi9WGUcRHvFEikc+7VrPj0YaG6zVLd+uOA +JN81fKOPYo4X4sZrMyPYl3OjGtMhfV4KvCaLEr1duOklqO6cCvGQ8iAlLVy3VJyW +5GJ0D0KyiAir4VNdAJKo1ZgiGivJLWulTfjUifCN9o115AiqJxiqwwIDAQABoAAw +DQYJKoZIhvcNAQELBQADggIBAM2IHKsQsebxTD50QQXtSNbyRzG/GpMZuZXn/QYO +QGW0ThJwtcmx6cqQvuyBovH5WhB9QUBFjiKkR7Qef7HUsgxU1cJA75gBfb2GMUru +Q+T37xOxtr6S2TcKOq/LvdJaTYmAHmW9V7vwEcrMRa9lWVTEmJIKTuxiUubpXtup +8OB8WLIvDikVtKtegvl6VCaTApCkUfuLhf7DERQ6sGLXWz6dVQcfvbfcXK2fn1Ik +Koxqy1SSz/rPb4u9NEk1yqvJQdpgnbTM3drTPHiIHCA7F6SjMu5tekHtVQkFOd6c +B0geEwyxY97zqnFv5YXiukXEaAnCRAlOuIZXRqtK6GFthTWo33YpB2KaRUtJ7IuP +og4Q/zjDs8DEc/qbbUbhyulExz6uoyRKO4j/gG3ESC6j09j7Eungt1LDhyt8p3wD +pytIIPkTseykO0CcEpEcGbES6d3u4PrFJ75XWxMkNZVK8mC3faxx2kJLfS1+4Fg8 +A0zbcN6qwm1ezGq2vGQcyVKyFVWJQAEAFuSO8sjW6dk3ClfE+MNGUvxTQMe96V14 +jGRICCp9aJrJXA3u0iQaUX0cXmlhegAYk7Ho/Ef3k/PcP8DzZ8Ck839oRHBw4pPv +tKbyiKnOcet7AFGwsiM2t5VLrj4jovhRLEiaXrCaxNe6j4xs63TEb+8uTCzKyktC +4BFq +-----END CERTIFICATE REQUEST----- diff --git a/keys/keys2/staker.key b/keys/keys2/staker.key new file mode 100644 index 0000000..c31bc80 --- /dev/null +++ b/keys/keys2/staker.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKgIBAAKCAgEA3U6EetJ2ujbkfYkgFDMsz1yTTFlUGy8k+f4mQoidwQeGFjAY +X8aSViYll3DPs5w4KSbsghHoeQ6emWNxXu5Oh4behZhaQ48J46UJnZBClINNBvhJ +T06f1LJrDy/iQLMD6gWVqTAUt3bF0DbpzDLTBpa3+UtJel1+Mu5HPxk8WIL3lmfS +bUf5tijXq/sI4AR6ieKz6pxwd9LA2D2YPdQs9QJPAW9sNiNdPM0FYCiuniKjSlyS +f+KYs0mXVOPdtMDMWAaZ3wwHIZ8XovVP790G0+3ilCNnxq/oTlkyFlPNPlXjgbfG +uj+LEvQEIef+hvKsJgKjt2EPXcTfNorvx9/jfhhmvzNK+axFq88CLhYcCr8kHKT0 +QZtskJwZ4S/RuZDep+DgTNCrIGdwmJR2UoOHlnDYAbdVghbEHO731BWiZ6/ED96R +eFf+AaJO8EzuRzdANRGBFWLl+dzRp6ylXvhYPfsTDoFz1pFDanb6SPWW1d2dEufO +uqhd/7wWa4YAqynORP7zHsQUXvCouMCqjjVYYvVhlHER7xRIpHPu1az49GGhus1S +3frjgCTfNXyjj2KOF+LGazMj2JdzoxrTIX1eCrwmixK9XbjpJajunArxkPIgJS1c +t1ScluRidA9CsogIq+FTXQCSqNWYIhoryS1rpU341InwjfaNdeQIqicYqsMCAwEA +AQKCAgANGUOgHWrnlK4re/1JFMpXL6yMPVFMFptCrLdJAtsLfM2D7K7UpGUu8i0R +bJzujZWJYgNno3W2DJZ4j7k7HDHLtcDf+WeGTiYQskkCaXJ3ZdoeSn3UUtwE89aA +XJ4wpCfcJx53mB/xx/bnXwixjGSPJEaZW8pqkrQQgaf35R98Qawz28tJqpPuIza4 +uDALSliSZretcDr77J57bhHfvvo2Oj/A3v5xqeAv5BaoXWAQfg5aLWaCaUAOhJGP +dbk+pJazsxhSalzVsZvtikWD9focex0JFZtj2C+Qy5i6V5VzVhQULnN1vKMXqRfB +hgC7rgtgaJGWHgmRzEBF8y1EEE1fohbo2sqkG4oMz3jBZ4o4MADQcpfK2qchgrnk +OxIS/uU8szdu84iH8s6F/Hl1+87jnq6O9Re0iMSuvyUbjAEe8Cm9P/a5M1X9eyzw +WSXSPZBwKSRoP3wuycbEonTWQnQHdwySY+IvdtgliEDhKrVbZGnks5zmaaIydW/y +LS2S9JRM5Y+Xp0vV3nGlEehCUdrXoQ1Dz/AiHnWHjbxoCFGt0qL6COJziAGfUXKa +cQ5iDd7zc2J3m2Z6c8W8xkPJe+1dmNWfGHrja8DSHtTcDY6Aqd98Vu0niu8PC7bx +Avw++6J2wG7LN89rgR0uP7as9Cx4kHHsOFwp+lKODVe2dw0vAQKCAQEA7moNCjP6 +5PkSkSNPi/jw1Y/FCwBoJEzl3Q5ftwGrkYZFRLBTvCCli2jhexaC0D9+yjsVaL/2 +Vap43/xi55ipniqv8c1WAc5xFh+6hCydS6K9owDxlHN75MGLrmrYjY+3aMdo15Dm +x5bznOLLyMUW4Ak+77MTw12fad/7L0ANXumFFj6ydcS8PHmhJlmz5VegWz5b1KGQ +K//phcuOm349xekt7J5kKRbDEqLOlZv/EIAdCBQM4U3d6P/2vUUy5nKYG0F1xeaC +leVpr1EPoEI+XkTy+jjoaBs7iUHpcD359XQCWLniwf1Yfttk9zJp7m6tR/Geablk +unnH5zyFkwzlQwKCAQEA7aFtNsjL0UEXlyBYjCROoPu6ka/o4QyEaWvMHciXu+Gv +M7TQCF2i9oeQXABIyTNoQr+pNjARboY8p0+9ZfV8QGlvH6awW2MNzD07lg9hwsjY +JOCI64XxZj183GhHgN9/cE4PXBrQCqPLPCKdV66yAR9WNm9Va3Y9Xf/RvcoLiNB1 +FAg5bhbNQMnR38nPJs9+suSqYB8xADKvwmKEdony+WIM/GQyYZiDlXEj8EfWQouM +wAok6Vuhs6cuLiHHzXFR4Y6RCWRb2nf2VrzWopz2Bp02IeHY0UZsZeKnqha9dtUu +ZCIt2MZUELxih9JS+wzCX8BJk3xedi89zOZKRx4MgQKCAQEAxqnUJ9ZckIQDtrEn +zckoVayxUpOKNAVn3SXnGAXqQx8RhUUw4SiLCXnhucFuS709F6LYGisrRwMAKhST +Dc0mOcf0SJcDvgmaLgdOUmkiwS3gu31D0KHScTHeBP6/aGaDPGo9sLLruxDL+sT5 +bljc0N6jdPVR2I+hEIY1NpA3FAmefoTMDFpdSD9Jyz0gLFEyLBXwS2Q9UIy0uGqA +cI1nSA0f2XW6nIp9DoBfiEcu6T738g1TFkLeURNJNTn+SgzfNob7bmbAFcvOnun7 +DV1lvwPRPDRDZMycdalYrdDXAnMiqXBrxZ4oKb0DiwCVSLss5TAvAoYbq09jBgpm +e7xZJQKCAQEA3f7l0b1qs5WU3UmJj3rHvhsNY9crvzr7ZKUhLl3cathe3fY4NuiL +OrbQxTI6zURqTZlSEl57mn5roX6cGOlqZ55YAwCtVuLF3B0EUp8SHG+XhXQCVc1v +BK3CvQHqctnY62jxboFaA+abEhXgWi7I+sV0vCvsaBUxJWS9ZAmiFvFvvwQj6tYA +cFta5y9YiBBmc+etx1i8ZUv06Ksyxq7/P707Fnrgmk5p9y2YfnwODWLjXfDcJOnG +udggC1bhmusXrJmMo3KPYRybFNMbzRTHvswV6zdbX77ju5cwPXU7EQ39ZeyMWiyG +EpB7mBmEDicQW3V/Bvq0IMLngElP8PqAgQKCAQEAq4BE1PFN6hQOqe0mcO8g9mqu +zxl2MM0Kb2ABE8fxQ2w4Fy7g42NozDUW13/MN7q1I+AwMhbl4Ib2QImEMTuFaHPY +A3OZlnE9L0oi4FI+kG2eJOB/+5pHSuf/jrZ/4gARK+uc/CDeaIljP/nxw0cX+sF+ +HjX4Ob4/CyEIeIUGdOGs7g9kf+oirXryuDcZxl/2fQOxqva9dhhBLhPXG3otSp0T +D90xC1lSPLIHf+VUiF9bLMtUp4meGcgwpXPVjRV5cblLrP9PxbevlhG2D3vnOK9A +8jWI9P1uNBEAUTSmXV8reMYOyNXJH8YbbT4yiarWnaQM0J0ipWwXGEeWagv/aA== +-----END RSA PRIVATE KEY----- diff --git a/keys/keys3/genCA.sh b/keys/keys3/genCA.sh new file mode 100755 index 0000000..14a0f4c --- /dev/null +++ b/keys/keys3/genCA.sh @@ -0,0 +1,5 @@ +#!/bin/sh +set -ex + +openssl genrsa -out `dirname "$0"`/rootCA.key 4096 +openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys3/genStaker.sh b/keys/keys3/genStaker.sh new file mode 100755 index 0000000..0a4b836 --- /dev/null +++ b/keys/keys3/genStaker.sh @@ -0,0 +1,6 @@ +#!/bin/sh +set -ex + +openssl genrsa -out `dirname "$0"`/staker.key 4096 +openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr +openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys3/rootCA.crt b/keys/keys3/rootCA.crt new file mode 100644 index 0000000..da6320a --- /dev/null +++ b/keys/keys3/rootCA.crt @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB +dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN +AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw +MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM +Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV +BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg +jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 +QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU +m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 +lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB +KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW +cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 +RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH +bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW +T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB +J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU +KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei +73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E +BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj +FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG +XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY +omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv +Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC +XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 +gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn +3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N +W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s +scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU +kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD +DB8IRfWqBx2nWw== +-----END CERTIFICATE----- diff --git a/keys/keys3/rootCA.key b/keys/keys3/rootCA.key new file mode 100644 index 0000000..fe23a96 --- /dev/null +++ b/keys/keys3/rootCA.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 +6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ +mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h +Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL +AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk +tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd +CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu +TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV +Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll +JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt +RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA +AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg +O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 +WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc +fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o +WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y +243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM +Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv +/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF +2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 +wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R +WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 +POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC +T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW +jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc +23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK +XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl +jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ +/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P +rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl +KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD +E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C +cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE +r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu +GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy +7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr +RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF +SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor +Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY +KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t +Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM +/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 +YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt +I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy ++jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f +UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER +KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW +MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe +f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== +-----END RSA PRIVATE KEY----- diff --git a/keys/keys3/rootCA.srl b/keys/keys3/rootCA.srl new file mode 100644 index 0000000..473c41d --- /dev/null +++ b/keys/keys3/rootCA.srl @@ -0,0 +1 @@ +BAF3B5C5C6D0D14A diff --git a/keys/keys3/staker.crt b/keys/keys3/staker.crt new file mode 100644 index 0000000..65781b0 --- /dev/null +++ b/keys/keys3/staker.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFNzCCAx8CCQC687XFxtDRSjANBgkqhkiG9w0BAQsFADB/MQswCQYDVQQGEwJV +UzELMAkGA1UECAwCTlkxDzANBgNVBAcMBkl0aGFjYTEQMA4GA1UECgwHQXZhbGFi +czEOMAwGA1UECwwFR2Vja28xDDAKBgNVBAMMA2F2YTEiMCAGCSqGSIb3DQEJARYT +c3RlcGhlbkBhdmFsYWJzLm9yZzAgFw0xOTA3MDIxNjEyMjJaGA8zMDE5MDcxMDE2 +MTIyMlowOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQKDAdBdmFs +YWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQC8mVDToHbkUF2gRdVfpydZLNKeQ38d6HZFkUM3U1dWLZFSZNvagN8hlQvY/tQu +3A40p19WgKbzWZre3tg1Akw8Jztdz9gl4RMn142IIO3CiwIptkE0JopbZhmG5fAC +2n/MXQtfieI3hzeR04LW4JgLKzf3Nn8xZdlBgJfBmL5qUUnE7O7IbJGGma6gSD3e +wetE6KQZtNtf0xRIv08doZKYwTl6ItkdGK76ufqq098GVwWvA1wSune4+MFgs9N4 +eFJj6Jyt85fiK/cwPx7KRdgYgBzrZQ4EPshRnwWrBTieOOaJvAA2RMxMEYzKRrJA +AsYI1zxtNyqIUaBTcxmaz+NXUGW+wHwITic0Gp/XQm2Lwr/lxIV6OnAlL3CgbSXi +rSnoG+eHQ+vDzBAcRDkTAgv/GUIzlfqT2StTK02uIBgJYzvFTG4plHitccRfy8wx +sh5Z8xG99lmPQQtLsnlQAV+Li06Cb8CH4hUVoiWiVs5QAahqWmv5fpoX0Es26RyU +HXGbjE202pyMMA7jUerUVKMijOoGZtcH6zB4p/dJ0TtToRwOgrA7NCI9AYVtqVXr +XG/udj8ur2r1bTVwIbHsOeTEP3gY0mHRWm2E/bLjt9vbYIRUxR8xWnLkbeBziNTw +g+36jdDF+6gu3cUz/nbSn8YY+Y1jjXuM3lqF8iMaAobhuwIDAQABMA0GCSqGSIb3 +DQEBCwUAA4ICAQAe2kC0HjKZU+dlnU2RlfBpB4QgzzrFE5N9A8F1MlE4vV3AzCg1 +RVdHPvniXzdNhDiiflK0l/cnrFv2X1TzYMrrA677/usHf2Bw0xjm/ipHOt5V+4TN +mZAIA4IPl09gP28IZLc9xSuq4FoHeM8OTxhttOlINhqpG9P5d6bPezW6ZzI3CdPP +CF69xK4GFlj/NQnAoFogid4ojYYNTj/cM4PYQU2KbrlzLyPuUk/CgwefXLMH87/H +e3kPDev80Tjv2Pm5nD937fZfgrEoyolKxiRVcfZVMxR7qhPhizjueD0DAkfQIs7L +YVSyx/qjEv2bBYaim5RQakUeHR1Xu5Xj/k5zr33t979ede50byQrcWm4H5JxnEpD +JxJnFfDOU6o14SKGHSrao5Z4C3dI55DM84WLASnlMI5BK4XtS3notLNzG8dfWWhT +9m0Hcry+wPNDcGr8Mtj1los/0bMDqMHC4jcFW1hrXCUUs9RYzE+N/xoqwCQSgN1P +E73uXTySWj5ovMR5TPF6PhcftLB/OziqO7FverEBpvGGHUAnUT61JtjodjXPbEdj +0VgyMOBY2y53HTXnx3dxeFZkUdRX/VZYy8tMK3MTY+7UIU5cWYnCZAo5LNcc0ukR +S6WS9+6eaQ6XRjhfNUjx9a7FzqapWdtTedpipmBP1Njap3g29iUuVnLQeg== +-----END CERTIFICATE----- diff --git a/keys/keys3/staker.csr b/keys/keys3/staker.csr new file mode 100644 index 0000000..87bcd8d --- /dev/null +++ b/keys/keys3/staker.csr @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK +DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQC8mVDToHbkUF2gRdVfpydZLNKeQ38d6HZFkUM3U1dWLZFSZNvagN8h +lQvY/tQu3A40p19WgKbzWZre3tg1Akw8Jztdz9gl4RMn142IIO3CiwIptkE0Jopb +ZhmG5fAC2n/MXQtfieI3hzeR04LW4JgLKzf3Nn8xZdlBgJfBmL5qUUnE7O7IbJGG +ma6gSD3ewetE6KQZtNtf0xRIv08doZKYwTl6ItkdGK76ufqq098GVwWvA1wSune4 ++MFgs9N4eFJj6Jyt85fiK/cwPx7KRdgYgBzrZQ4EPshRnwWrBTieOOaJvAA2RMxM +EYzKRrJAAsYI1zxtNyqIUaBTcxmaz+NXUGW+wHwITic0Gp/XQm2Lwr/lxIV6OnAl +L3CgbSXirSnoG+eHQ+vDzBAcRDkTAgv/GUIzlfqT2StTK02uIBgJYzvFTG4plHit +ccRfy8wxsh5Z8xG99lmPQQtLsnlQAV+Li06Cb8CH4hUVoiWiVs5QAahqWmv5fpoX +0Es26RyUHXGbjE202pyMMA7jUerUVKMijOoGZtcH6zB4p/dJ0TtToRwOgrA7NCI9 +AYVtqVXrXG/udj8ur2r1bTVwIbHsOeTEP3gY0mHRWm2E/bLjt9vbYIRUxR8xWnLk +beBziNTwg+36jdDF+6gu3cUz/nbSn8YY+Y1jjXuM3lqF8iMaAobhuwIDAQABoAAw +DQYJKoZIhvcNAQELBQADggIBAEWU13T1alCni4R36J65TrGfIljW8LGhmWRo5xoV +YW7HxxZ/WTAFSwAv0yHCGq+H/tebRZhvua+c+jP16YBDoAp5neGWW57gLDg+35H5 +guLo73p/qM6hyaUGSfyO9D1nS1QX8R0r70TQYbIrVB4uQeTM2pEYR6NYO7bjPEWr +WwC6RnbtnsNGTeoH+LwiM+uY//VB/tUe1u2y6U8HkIXJo7j4+NqUL1xXmYmC6Rph +PNI3MAZUL40z1VX7fn/Vp7+rc0CBUsFMOLfLmSgL8jsQeKuyVAQKA4xzWQ2qeuGV +Bv24rHbnSxYSu8tMs31LZPn+fsvNWB9iU7MEiTUr+8nAPEAANNaBwaD1EUkzC1WC +OcCUpMgkhVuzfHq+eXWnw3cGVvEZ8A4DzOyl1ZFofxBX7IOOv0lmpDQSeEQlmKPF +LdWI2JJM76BjeXI7l5HbOmRJv1kYFwBq/biDxCSmmNX8uHvAj1EgDNXvr/pRw7rT +6yKOLtk1GSCCrrYQijCXRx2u276+j8MtC5i6FVcUoaSYD+nx2+ApOnZlYR7xsJYw +5ECaeDagjHP472FY/fuhy/UwAIFm5gCcVFs3A2M/Iyn+vsAW5WEdh+fwGiWxfw49 +Y5KRT8u7BD0R5T5UYxYwzYekEzxsoD3bvQGx/4tboSUxkOd7pVymbuGzIsQ18heI +78pG +-----END CERTIFICATE REQUEST----- diff --git a/keys/keys3/staker.key b/keys/keys3/staker.key new file mode 100644 index 0000000..504cdfe --- /dev/null +++ b/keys/keys3/staker.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEAvJlQ06B25FBdoEXVX6cnWSzSnkN/Heh2RZFDN1NXVi2RUmTb +2oDfIZUL2P7ULtwONKdfVoCm81ma3t7YNQJMPCc7Xc/YJeETJ9eNiCDtwosCKbZB +NCaKW2YZhuXwAtp/zF0LX4niN4c3kdOC1uCYCys39zZ/MWXZQYCXwZi+alFJxOzu +yGyRhpmuoEg93sHrROikGbTbX9MUSL9PHaGSmME5eiLZHRiu+rn6qtPfBlcFrwNc +Erp3uPjBYLPTeHhSY+icrfOX4iv3MD8eykXYGIAc62UOBD7IUZ8FqwU4njjmibwA +NkTMTBGMykayQALGCNc8bTcqiFGgU3MZms/jV1BlvsB8CE4nNBqf10Jti8K/5cSF +ejpwJS9woG0l4q0p6Bvnh0Prw8wQHEQ5EwIL/xlCM5X6k9krUytNriAYCWM7xUxu +KZR4rXHEX8vMMbIeWfMRvfZZj0ELS7J5UAFfi4tOgm/Ah+IVFaIlolbOUAGoalpr ++X6aF9BLNukclB1xm4xNtNqcjDAO41Hq1FSjIozqBmbXB+sweKf3SdE7U6EcDoKw +OzQiPQGFbalV61xv7nY/Lq9q9W01cCGx7DnkxD94GNJh0VpthP2y47fb22CEVMUf +MVpy5G3gc4jU8IPt+o3QxfuoLt3FM/520p/GGPmNY417jN5ahfIjGgKG4bsCAwEA +AQKCAgA+uHIT3yKK7VslqPO7+tfwJSLqNSI6LQvgON30sUezRjY1A4vGD+OkxG+L +O7wO1Wn4As2G9AQRm/QQOGYIwvnda2Kn4S5N8psvPdU4t1K6xwXyH0Vx9Xs/yCWn +IiL+n/GuYicdH7rWoqZNXdz+XvTRig7zrPEB2ZA143EUlhqFOwFgdzc1+j0vWT6k +2UGSKkV2xjOExQvLw2PUiaLjBM++80uNHbe8oG/YvC7rzsg10Iz4VhKxu8eDAV82 +LLegMcucpEgu5XrWYa60Idm4hR/HjhuQASx3JvXxhwQYiwT4QY4Rsi8T3S9gANok +jvxKo2F+oS3cWGNRsGu0NOwH+yjsVyMYazcLOUesAAe85ttXgYr02+Z/uMnxqtOF +gjIHY3X5QZbD4l4gbwx+PLbjsj4KC6r3yZrr51PdLUrBvoqBhqwuCksdaMntWGME +u0V/ooJi4+uzCYzN06jFfAFXa2pWzVB5yKw1d6yYi9U/bPd4xn1phLUMHrC2bvdM +H8P18gAS6rkWn+ageiWRHmkf4uoKgv3PrMjijkBaGpf6xjv6+0Q393jdVIC7wgJV +8W0i1f1Awv68089mHBEarPTv3gz39251WFCPNQhEuSy79Li5zjwOprZXS0MnJXbm +B00IPTIw51KuaoueWzY1pA2IFQ/0sH3fo2JhD0pp1gI0Dde7EQKCAQEA7RVgNelk +3H3ZJsoOfOTFa03lnXuEfTAyxhEECRz64k54pSbEWV73PIeYByZvnsrKRydpYWUP +Cp/mKhAJH4UCf2hzY0GyO7/D6+HEKZdCg6a0DNKckAoFkBfeOlLJLjLVAW2eEVxz +tlFt+/WBE90GCvE5ovXuEhXGaPxCPp5giIN5phSzSD557bwwOyPwNKFZ7Ao77UNK +kz6EzcvQgqb205SRRKGpS8/T/9LcLsUYVkBfYQ/BayjffO+cQF4vH5rB4x/8/T7t +uUa79uY+LeGHgTSFIAui9LEK5ry//2hDJINsItYMks1Qo4Suu23pOuGerjiFTKWl +mOIoFmPmbebAcwKCAQEAy6WaJczPcKQQ/hqglQtxU6VfP49ZjUKkoi+OCmtvKDts +7pJfIkuluhnYGeqFfjnopw5bgZHNFm6Gccc5CwBtN6Wk1KnnOgDIg3kYCrfjtKy/ +BSSV3kLEBvhe9EJA56mFgP7RufMbHTXhXPGLkgE7JBZj2EKxp1qGYYVZesTMFwDM +KEHwzIGcFkyZsd2jptyLYqcfDKzTHmFGcw1mdtLWAUdpv3xrS3GvrCbUMqIodjRd +qkrg/d/kQpK7A3oLOWfa6eBQ2BXqaWB1x13bzJ2WlshxJAZ1p1ozKii5BQ9rvwWo +muI5vd7o6A9Xsl8QzluSSSPi+NhjZ64gMBrXciRvmQKCAQB/dB5k3TP71SwITle7 +jMEVDquCHgT7yA2DrWIeBBZb0xPItS6ZXRRM1hhEv8UB+MMFvYpJcarEa3Gw6y38 +Y+UT2XMuyQKoXE9XX+e09DwtylDBE/hW9wxGio5NjHPbAjjAq81uR+Vs/hnCehkK +NKgq+cOid9OkpVAk4Hg8cagzu3qKblZzYCLsS18ibA+WO6e73USaKLLOta1vdUKC ++n92/0eZPc9lkjTGMvVrr0mGFNUxuOaiVTbQU4AMmpV6yBezol6/RjVGhWBHOz/y +KmxOaY2nzJmuMf9KS+5rwAFYf86Ca9AWm4neXlYRLOVVYjWMM5Z1vhdoOSyT3ODj +9ElBAoIBAGCRPaBxF2j9k8U7ESy8CVg10g3MxxVSJcl2rW9JdKNqUoRqykvz/Tlb +afsYF4c8pJMbHs85OTxK2tv3MZiC8kdx99CUZL4/gtW9RWZHvuV9CPPCXoLPvC7l +9fjztd1kqJb7vq3jltbqJtyw+ZMZnFbHez8gmSeXqKNz3XN3AKRjz2vDoREI4OA+ +IJ+UTzcf28TDJNkY1t/QFt0V3KG55psipwWTVTmoRjpnCzabaH5s5IGNElWwpoff +FmlWpR3qnodKxGtDMS4Y/KC2ZDUKAU+s6uG/YmkiP6LdPqckod4qK8KORf1AR8dL +BzXhGJISIDMonkeMLM8MZd0JzWIl3vkCggEAPBkExd2j4VY5s+wQJdiMto5DDoci +kAEIvIkJY9I+Pt2lpinQKAcAAXbvueaJkJpq31f6Y66uok8QnD09bIQCABjjlIve +o7qQ+H8/iqHQX1nbHDzInaDdad3jYtkWUHjHPaKg2/ktyNkFtlSHskvvCEVw5aju +80Q3tRpQG9Pe4ZRjKEzNIpMXfQksFH0KwjwAVKwYJLqZxtNEYok4dpefSIsnH/rX +pwK/pyBrFqxU6PURULUJuLqRlaIRXAU31RmJsVs2JbmI7Cbtj2TmqAOxsLsi5UeJ +cZxcTAuYCNYMu88ktHul8YJdBF3rQKUOnsgW1cx7H6LGbuPZTpg8Sbyltw== +-----END RSA PRIVATE KEY----- diff --git a/keys/keys4/genCA.sh b/keys/keys4/genCA.sh new file mode 100755 index 0000000..14a0f4c --- /dev/null +++ b/keys/keys4/genCA.sh @@ -0,0 +1,5 @@ +#!/bin/sh +set -ex + +openssl genrsa -out `dirname "$0"`/rootCA.key 4096 +openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys4/genStaker.sh b/keys/keys4/genStaker.sh new file mode 100755 index 0000000..0a4b836 --- /dev/null +++ b/keys/keys4/genStaker.sh @@ -0,0 +1,6 @@ +#!/bin/sh +set -ex + +openssl genrsa -out `dirname "$0"`/staker.key 4096 +openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr +openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys4/rootCA.crt b/keys/keys4/rootCA.crt new file mode 100644 index 0000000..da6320a --- /dev/null +++ b/keys/keys4/rootCA.crt @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB +dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN +AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw +MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM +Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV +BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg +jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 +QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU +m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 +lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB +KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW +cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 +RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH +bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW +T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB +J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU +KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei +73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E +BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj +FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG +XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY +omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv +Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC +XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 +gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn +3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N +W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s +scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU +kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD +DB8IRfWqBx2nWw== +-----END CERTIFICATE----- diff --git a/keys/keys4/rootCA.key b/keys/keys4/rootCA.key new file mode 100644 index 0000000..fe23a96 --- /dev/null +++ b/keys/keys4/rootCA.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 +6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ +mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h +Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL +AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk +tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd +CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu +TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV +Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll +JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt +RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA +AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg +O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 +WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc +fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o +WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y +243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM +Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv +/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF +2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 +wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R +WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 +POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC +T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW +jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc +23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK +XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl +jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ +/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P +rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl +KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD +E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C +cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE +r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu +GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy +7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr +RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF +SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor +Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY +KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t +Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM +/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 +YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt +I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy ++jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f +UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER +KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW +MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe +f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== +-----END RSA PRIVATE KEY----- diff --git a/keys/keys4/rootCA.srl b/keys/keys4/rootCA.srl new file mode 100644 index 0000000..473c41d --- /dev/null +++ b/keys/keys4/rootCA.srl @@ -0,0 +1 @@ +BAF3B5C5C6D0D14A diff --git a/keys/keys4/staker.crt b/keys/keys4/staker.crt new file mode 100644 index 0000000..92128d0 --- /dev/null +++ b/keys/keys4/staker.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFNzCCAx8CCQC687XFxtDRSjANBgkqhkiG9w0BAQsFADB/MQswCQYDVQQGEwJV +UzELMAkGA1UECAwCTlkxDzANBgNVBAcMBkl0aGFjYTEQMA4GA1UECgwHQXZhbGFi +czEOMAwGA1UECwwFR2Vja28xDDAKBgNVBAMMA2F2YTEiMCAGCSqGSIb3DQEJARYT +c3RlcGhlbkBhdmFsYWJzLm9yZzAgFw0xOTA3MDIxNjEyMjVaGA8zMDE5MDcxMDE2 +MTIyNVowOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQKDAdBdmFs +YWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDZnDoDHE2nj82xDjH0Tb7OXMqQDHz+zbLidt6MSI1XB3vOAIEiPqrtenGnqRbV +Fcm5GZxvxh4YQD8CjKSk1qgZJczs0DPSiGQ8Efl4PGO4xnEbllgL3PURPWp7mEV3 +oh6fxICgQKTBlT671EnFzB5lyJWpumRzvA1vyhBMsY8aO+xdq5LUFltYzBdvpgLX +VaDwHZQ2PQEWtF0d0JO2N0WFFDGNmx6n8pKSeIAVDsTwZCZK+FCeeEyoGfXsINsc +0yCMQslawkfOMqA9yBV3Ji6QmFYKyGYt65MWGNqPA4XrIyliKwCCXwz9mjaWyN7r +Ayw9cWlLMODNmDORWzGRZ5290MEAEIZsqjYHVitRTM/RnNIadToZGO0y5uAkM14c +mTvnsK1CP92qtfSisq75W/I91drThoEtTK78UGOl/5Q1YBR08F+tSUWZWyHeI6UO +BUCGC2bCtmzKMl7vU25lG6mbCR1JuQi6RYpnfMjXH36lV4S7fTvSwwuR03h2F3H1 +eFkWNG2lbFrW0dzDCPg3lXwmFQ65hUcQhctznoBz5C1lF2eW03wuVgxinnuVlJHj +y/GrqmWsASn1PDuVs4k7k6DJfwyHAiA0uxXrGfxYvp7H8j4+2YOmWiWl5xYgrEDj +ur5n8Zx46PHQer2Avq3sbEGEe1MCtXJlj3drd5Him3m+NQIDAQABMA0GCSqGSIb3 +DQEBCwUAA4ICAQA40ax0dAMrbWikaJ5s6kjaGkPkYuxHNJbw047Do0hjw+ncXsxc +QDHmWcoHHpgMQCx0+vp8y+oKZ4pnqNfGSuOTo7/l05oQW/NbWw9mHwTiLMeI18/x +Ay+5LpOasw+omqWLbdbbWqL0o/RvtBdK2rkcHzTVzECgGSoxUFfZD+ck2odpH+aR +sQVu86AZVfclN2mjMyFSqMItqRcVw7rqr3Xy6FcgRQPykUnpguCEgcc9c54c1lQ9 +Zpddt4ezY7cTdk86oh7yA8QFchvtE9Zb5dJ5Vu9bdy9ig1kyscPTm+SeyhXRchUo +ql4H/czGBVMHUY41wY2VFz7HitECcTAIpS6QvcxxgYevGNjZZxyZvEA8SYpLMZyb +omk4enDTLd/xK1yF7VFodTDEyq63IAm0NTQZUVvIDfJeuzuNz55uxgdUq2RLpaJe +0bvrt9Obz+f5j2jonb2e0BuucwSdTyFXkUCxMW+piIUGkyrguAhlcHohDLEo2uB/ +iQ4fosGqqsl47b+TezT5pSSblkgUjiwz6eDpM4lQpx22MxsHVlxFHrcBNm0Td92v +FixrmllamAZbEz1tB//0bipKaOOZuhANJfrgN8BC6v2ahl4/SBuut09a0Azyxqpp +uCsyTnfNEd1W6c6noaq24s+7W7KKLIekuNn1NunnHqKqriEuH1xlxxPjYA== +-----END CERTIFICATE----- diff --git a/keys/keys4/staker.csr b/keys/keys4/staker.csr new file mode 100644 index 0000000..9d94e57 --- /dev/null +++ b/keys/keys4/staker.csr @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK +DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDZnDoDHE2nj82xDjH0Tb7OXMqQDHz+zbLidt6MSI1XB3vOAIEiPqrt +enGnqRbVFcm5GZxvxh4YQD8CjKSk1qgZJczs0DPSiGQ8Efl4PGO4xnEbllgL3PUR +PWp7mEV3oh6fxICgQKTBlT671EnFzB5lyJWpumRzvA1vyhBMsY8aO+xdq5LUFltY +zBdvpgLXVaDwHZQ2PQEWtF0d0JO2N0WFFDGNmx6n8pKSeIAVDsTwZCZK+FCeeEyo +GfXsINsc0yCMQslawkfOMqA9yBV3Ji6QmFYKyGYt65MWGNqPA4XrIyliKwCCXwz9 +mjaWyN7rAyw9cWlLMODNmDORWzGRZ5290MEAEIZsqjYHVitRTM/RnNIadToZGO0y +5uAkM14cmTvnsK1CP92qtfSisq75W/I91drThoEtTK78UGOl/5Q1YBR08F+tSUWZ +WyHeI6UOBUCGC2bCtmzKMl7vU25lG6mbCR1JuQi6RYpnfMjXH36lV4S7fTvSwwuR +03h2F3H1eFkWNG2lbFrW0dzDCPg3lXwmFQ65hUcQhctznoBz5C1lF2eW03wuVgxi +nnuVlJHjy/GrqmWsASn1PDuVs4k7k6DJfwyHAiA0uxXrGfxYvp7H8j4+2YOmWiWl +5xYgrEDjur5n8Zx46PHQer2Avq3sbEGEe1MCtXJlj3drd5Him3m+NQIDAQABoAAw +DQYJKoZIhvcNAQELBQADggIBAMdZKzx/Qz07D/ISgEe10+XofO5It86g12YJBgGN +4UEnKNk1quJIs0PAwcDNp7G4BpEMuP5xjyf4q976gzAkTg2kcB+LK85eGGSxkxAt +uFQPlFvk85qn4k7wLSx2zkqs47ItvqK5Ev8lLCZ/HfIy+7y57BKqDTvzhXarE3lq +bEZketwQvDcQPN7Ho9gxDMMQDeE2NeDyYhQtCMlX8PnmBRhWZ4CExODMdm8TrTJJ +5HDoj+fXCaSSbXPN25LKYSKOEM4wtzHa91hQK7JGoeHuSS0zFxDwXNKi3sLLtKTH +jsYL/E9bH2NxKPRoHwCJMS0N2jUqnHFyhQUp8VtJlxN0IsPLZGXFapVo4fk2hDpF +OapX0kysLV37KEklVHucQclU5SeTpzoS7gYXqvOJ3Q/IFR6CFAkCHaDggWpB/sVm +OPG6Pt6XXbGNCav9+Am+0q4UD5O1Sbobx+0XJu3VtnuZdn4Mt0uBSL1DZdG9ceig +mGz4bx1kvnzhL1LOAPmxOYqrLCwqJRkRCa+25uRNqBAqWcU48pwoxC3RLyWvy2UN +8Or+TsBzPUldq8yWn3s0/zE8yui6pxzpGUD2TfbUT78O0HJKn5nQjrjVdQZhaA4t +KnrZCz7lIrHRXf2Hbsg/9QgHhcpkknc98z0trNQHncp/kxUvrBJyJGrUh1DEkOSe +f9p0 +-----END CERTIFICATE REQUEST----- diff --git a/keys/keys4/staker.key b/keys/keys4/staker.key new file mode 100644 index 0000000..d51233e --- /dev/null +++ b/keys/keys4/staker.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEA2Zw6AxxNp4/NsQ4x9E2+zlzKkAx8/s2y4nbejEiNVwd7zgCB +Ij6q7Xpxp6kW1RXJuRmcb8YeGEA/AoykpNaoGSXM7NAz0ohkPBH5eDxjuMZxG5ZY +C9z1ET1qe5hFd6Ien8SAoECkwZU+u9RJxcweZciVqbpkc7wNb8oQTLGPGjvsXauS +1BZbWMwXb6YC11Wg8B2UNj0BFrRdHdCTtjdFhRQxjZsep/KSkniAFQ7E8GQmSvhQ +nnhMqBn17CDbHNMgjELJWsJHzjKgPcgVdyYukJhWCshmLeuTFhjajwOF6yMpYisA +gl8M/Zo2lsje6wMsPXFpSzDgzZgzkVsxkWedvdDBABCGbKo2B1YrUUzP0ZzSGnU6 +GRjtMubgJDNeHJk757CtQj/dqrX0orKu+VvyPdXa04aBLUyu/FBjpf+UNWAUdPBf +rUlFmVsh3iOlDgVAhgtmwrZsyjJe71NuZRupmwkdSbkIukWKZ3zI1x9+pVeEu307 +0sMLkdN4dhdx9XhZFjRtpWxa1tHcwwj4N5V8JhUOuYVHEIXLc56Ac+QtZRdnltN8 +LlYMYp57lZSR48vxq6plrAEp9Tw7lbOJO5OgyX8MhwIgNLsV6xn8WL6ex/I+PtmD +plolpecWIKxA47q+Z/GceOjx0Hq9gL6t7GxBhHtTArVyZY93a3eR4pt5vjUCAwEA +AQKCAgBMoBNZZwz9FMkEMJBsizfF6Ky3Pn6BJqN31Q2WbjG+1HbG2iyeh1ye1L/S +ntrYW5y1ngwU27lbJrxJRIbxOFjmygW32bR1zOsmr9mdef5PYSkQ4sbMHpj44hxt +uvezIZYRAhuc0kZxmAEIGL+Fc9O8WX5Bzs1yZ2R/2bIVn2xZe4JGlZTVM64kvXD/ +MoDLnG5YPsIiuyZ3/TjQt9JblmjXbH3qdBW+Y88y3lWTlKjKUSmeuoOA2bF8e++5 +nvQo2TsbyKSoXcL1G6SLPLo6Q2qgJdQeZeR9BPe9DzFerInqe24mEChUv+2OG1Bf +lgnQzUQ1uoquHF78Zjy6UVdJ8Sd8ufvKC9rz8JYsIynfw0gQC3F8/emm1QSabFvY +tG4+x0K8FgrijjE08RvqgIndx9ftCNoN4u3lXxPrJhKpr2xuXSa4VZbumgN7fqWx +UBC8lmPQi5VZmj3nJfj4datmBTvs1dOLRMdfdtTFz+cAdWNZxX3HOLZUSqMVWgXY +kX0s7IV9GnyUntBktX+IEbWlAttzldyqF9md4avjKXQ+Y4PK/sR1yWsuvtiZdYUL +/QrQHX0CsVv1hRcX0yekA0a8qwaGmxEcndEKv7wF1i626jc2fDR6qI1yp20Xl3Si +kYBSNh7VK210XIhddSuVxW5/gyNnFABDfp1bSdTh5ZJRfNvtQQKCAQEA9Zipnyu8 +JKlLtWrh2iP9pmFBaeUgoF68IVfd6IXdV7nAHSWysiC43SCUmI63xluMYEJFdAZ+ +m/iRc/n6VFKEBW4+Ujk9VW1E1iqHgSABg7ntEsB2MDcYY0qKsN4CYjC2fNYO97zJ +5oju84U3Qn8TWNkMsrUU7crm2oAQd08AizVFqLo1d8aIzRq+tl952S/lhfXKc/P9 +kfhl+RKjiYC2zbWnGinxc2Nbf5pWwnmtSrceng+ZkgVfSB3HvSckqzENye9YkpVM +GE+KjEdss+QnGQRWM2JPlyoYDmhT6rrasRT6TKsecwo1rRXBi4C1eTZQSnZf24Og +QurS//XzHzbnkQKCAQEA4tQSmaJPZNWYxOHnlivzselfJYyg5JyJVMQWw/7CvTcV +GOpoD4hC25puAniT1U/RyaYaUFZ+Ip2FhK2Qce+uskNgtqFN9phh/DeO1g8CSaIe +6Ebtg8N8GLc0YhdiJtd2XGrktj2XthML7OJPYIidd48tGuQizfijo4Fe1S0rSW56 +B4RHTh/O6a0taNeFbnZQJD52ha9wlnc/PZSCUMb9C0d08dSxdBQV+SVdGrl/IRfC +qHHoC86GYDcmnviD5CFOxpx7AJ/hQAwPFQRCnWGHwDjpcoMOtktyo7pj9MDuzBUb +kr4r1ei8f7PC9dmSYmYzJMQxLfz+Ti2SyyOmdM1CZQKCAQEAsVr4izCLIrJ7MNyp +kt1QzDkJgw5q/ETNeQq5/rPE/xfty16w5//HYDCp/m15+y2bdtwEyd/yyHG9oFIS +W5hnLIDLUpdxWmKZRkvaJP5W+ahnspX4A6OV4gYvl8ALWpsw/X+buX3FE80pOgSm +vkeEUjIUAG3SWlKfWYUH3xDXJLBoyIsIF6HwoqVAufTCynvTNWUlOY0mPaZzBWZX +YPHpkS4wKS3G5nwG1GRBaRlzcjRBUQWU8iUdBLg0yL0ett2qxnwoq1pTZG70b48Y +yePl9CP0mBDTxycnzie7ChS73wt2Ia2lRJBH6OGALlzZMFpvqwZG/P/V2N05WIxl +cNI2cQKCAQEAoys7VhlUU4zzoG2BUp27aDggobpP4yRYBgoo9kTFgaemHY5B3SqA +LckhadWjQsdwekZql3AgvHXkHlVcmxl36fReFgJjOwjTM8QjlAin9KAS67RaF3cA +RidEH2wCxz4nfsPGUvJruCZrZbRGtYKRA/iS0c1a3CAIVw4xUdh0UxaN4epeAO0Q +wzg4ejrPWW7yp5/nUrOpohOWAo5aUBFU5lA4593A6WephthB6X+W3A9jkBigfB3M +vFnwBltvRSRQrr7SHNjmCFSkZNHzuZL3PGe0RxPP+YK8rNrgHKjNHzHv69exYOdS +8eo2TPR+QRqTn9ciKZrctRBDkK3MiCk/oQKCAQAZIZdkOClUPHfSk4x5nBXashKY +gDzeyYHYLwNaBdEKgHNuf6jCltKWoDzZsqrv1Nya/148sTgSTg931bbch+lnHKJd +cXrCQZWBnu2UquisFMeNOvpp0cPt4tIYDZVCRMRrwIlZqIJxb2nAwFvb0fEfLk+4 +gmu+3cCaN/vS3oJA9EFkzjxG0XiLOynyAZb5fY04NmFOIsq3rgT4DeCurHTKtOJ2 +t14oTNq06LD566OnT6plL7vaLtTR/9/qJc007Wjw8QdbTuQALqCjWWg2b7BVkOyR +o9GrhPzSeT6nBHI8EoJv0nxeQWNDX9pZiW/1nsyuAAFJ9ISbDWjz/TwB17UL +-----END RSA PRIVATE KEY----- diff --git a/keys/keys5/genCA.sh b/keys/keys5/genCA.sh new file mode 100755 index 0000000..14a0f4c --- /dev/null +++ b/keys/keys5/genCA.sh @@ -0,0 +1,5 @@ +#!/bin/sh +set -ex + +openssl genrsa -out `dirname "$0"`/rootCA.key 4096 +openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys5/genStaker.sh b/keys/keys5/genStaker.sh new file mode 100755 index 0000000..0a4b836 --- /dev/null +++ b/keys/keys5/genStaker.sh @@ -0,0 +1,6 @@ +#!/bin/sh +set -ex + +openssl genrsa -out `dirname "$0"`/staker.key 4096 +openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr +openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys5/rootCA.crt b/keys/keys5/rootCA.crt new file mode 100644 index 0000000..da6320a --- /dev/null +++ b/keys/keys5/rootCA.crt @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB +dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN +AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw +MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM +Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV +BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg +jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 +QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU +m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 +lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB +KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW +cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 +RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH +bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW +T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB +J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU +KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei +73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E +BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj +FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG +XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY +omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv +Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC +XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 +gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn +3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N +W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s +scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU +kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD +DB8IRfWqBx2nWw== +-----END CERTIFICATE----- diff --git a/keys/keys5/rootCA.key b/keys/keys5/rootCA.key new file mode 100644 index 0000000..fe23a96 --- /dev/null +++ b/keys/keys5/rootCA.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 +6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ +mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h +Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL +AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk +tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd +CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu +TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV +Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll +JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt +RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA +AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg +O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 +WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc +fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o +WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y +243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM +Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv +/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF +2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 +wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R +WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 +POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC +T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW +jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc +23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK +XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl +jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ +/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P +rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl +KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD +E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C +cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE +r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu +GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy +7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr +RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF +SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor +Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY +KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t +Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM +/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 +YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt +I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy ++jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f +UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER +KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW +MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe +f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== +-----END RSA PRIVATE KEY----- diff --git a/keys/keys5/rootCA.srl b/keys/keys5/rootCA.srl new file mode 100644 index 0000000..473c41d --- /dev/null +++ b/keys/keys5/rootCA.srl @@ -0,0 +1 @@ +BAF3B5C5C6D0D14A diff --git a/keys/keys5/staker.crt b/keys/keys5/staker.crt new file mode 100644 index 0000000..e50294d --- /dev/null +++ b/keys/keys5/staker.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFNzCCAx8CCQC687XFxtDRSjANBgkqhkiG9w0BAQsFADB/MQswCQYDVQQGEwJV +UzELMAkGA1UECAwCTlkxDzANBgNVBAcMBkl0aGFjYTEQMA4GA1UECgwHQXZhbGFi +czEOMAwGA1UECwwFR2Vja28xDDAKBgNVBAMMA2F2YTEiMCAGCSqGSIb3DQEJARYT +c3RlcGhlbkBhdmFsYWJzLm9yZzAgFw0xOTA3MDIxNjEyMjlaGA8zMDE5MDcxMDE2 +MTIyOVowOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQKDAdBdmFs +YWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQDgK5r5vdHtJFEgw7hGE/lzKaHcvwzr32armq0k9tYchJXfT3k1j1lXtBAdcUN3 +gSRKjgzH/vjbn0ea3AiDCUd2Mck/n0KcJZ43S5I7ZjP7rbav296bKCZ1Hr7r5gXY +Fhk+3aUsVfDUqAPBwyP8KeV31ARVA/s+WPeWqs69QXTdyJuBYE5pr40v1Sf+ebUI +nZ37uGY3kiO0Ex/JgcoQsGJzrWD/ztbRCFIvrdNJZd0pGvMlmTKp7XsMR3cpvqk7 +70//MLCdyGW/1IArTSuD1vd7mBX1JyVXKycYN0vIOtbgxPOFutUyqDOeP7o51q4i +PS3dCRgfmn/hWLwy+CtJe0BGKsb4tk0tKxo0se8v9JA8mUtnmzmMt4Y9jijOrCOB +7XwWKmJYEm8N5Ubcy6cp2oL8vQVtzz3PXrkFt+3cFt1jrjdpQYgH4jykkWDeOjEf +y1FCwzsNRudLTvLhfLn86/ZT4cLZ9JI7/WW0IPC8Fc7lhznJ+bIQUeEndaGdgVkx +uEg0MxdrMr0jU0IFoXySRXNRzcDWZShEjBTv7tnFxLmoNU+uJb/KpMH6sRYi3zs8 +5ecaMKNyG+LDmBahUlHx5hKAH49O8855+AMhsg91ONZJldjQX0oZrIKzK5BpsqeT +l4c2Yt/fALiZaeFk1pBEsvVeMOBCIuWE+b4UIEaLAOhxfwIDAQABMA0GCSqGSIb3 +DQEBCwUAA4ICAQB+2VXnqRqfG7H2/K0lgzxT+X9r1u+YDn0EaUGAG71s70Qnqbpn +X7tBmCKLN6XgPL0HrN933nwiYrmfb8S33zZ7kw8GJDvaTamLNyem4/8qTBQmnRwe +6rQ7SY2l73Ig87mR0WTi+rTnTTtc66+/jLtFeaj0Ycl9hBZXHKiULSGhsbUbwtkz +iuNlANhoNKXNIABRImUq6OwYhEQN0DwHXj79wkpyDYjKZwHuEZUknc8Pl2oQPBke +mil3tsrvGRkwhisnXX7tqh6rWKVZNJkO68hy7XO9aTXjbcB/7Y1K83ISNEyGPsH/ +pwFyd/j8O4modwh7Ulww1/hwcqnqiEFE3KzxX2pMh7VxeAmX2t5eXFZOlRx1lecM +XRkVu19lYDKQHGSrGxng+BFlSOB96e5kXIbuIXKpPAACoBQ/JZYbtHks9H8OtNYO +P2joqmnQ9wGkE5co1Ii//j2tuoCRCpK86mmbTlyNYvK+1/kkKcsaiiWXNrQsrIDZ +BFs0FwX5g24OP5+brxTlRZE01R6St8lQj4IUwAcIzG8fFmMCWaYavrCZTeYaEiyF +A0X2VA/vZ7x9D5P9Z5OakMhrMW+hJTYrpH1rm6KR7B26iU2kJRxTX7xQ9lrksqfB +7lX+q0iheeYA4cHbGJNWwWgd+FQsK/PTeiyr4rfqututdWA0IxoLRc3XFw== +-----END CERTIFICATE----- diff --git a/keys/keys5/staker.csr b/keys/keys5/staker.csr new file mode 100644 index 0000000..418de02 --- /dev/null +++ b/keys/keys5/staker.csr @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK +DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDgK5r5vdHtJFEgw7hGE/lzKaHcvwzr32armq0k9tYchJXfT3k1j1lX +tBAdcUN3gSRKjgzH/vjbn0ea3AiDCUd2Mck/n0KcJZ43S5I7ZjP7rbav296bKCZ1 +Hr7r5gXYFhk+3aUsVfDUqAPBwyP8KeV31ARVA/s+WPeWqs69QXTdyJuBYE5pr40v +1Sf+ebUInZ37uGY3kiO0Ex/JgcoQsGJzrWD/ztbRCFIvrdNJZd0pGvMlmTKp7XsM +R3cpvqk770//MLCdyGW/1IArTSuD1vd7mBX1JyVXKycYN0vIOtbgxPOFutUyqDOe +P7o51q4iPS3dCRgfmn/hWLwy+CtJe0BGKsb4tk0tKxo0se8v9JA8mUtnmzmMt4Y9 +jijOrCOB7XwWKmJYEm8N5Ubcy6cp2oL8vQVtzz3PXrkFt+3cFt1jrjdpQYgH4jyk +kWDeOjEfy1FCwzsNRudLTvLhfLn86/ZT4cLZ9JI7/WW0IPC8Fc7lhznJ+bIQUeEn +daGdgVkxuEg0MxdrMr0jU0IFoXySRXNRzcDWZShEjBTv7tnFxLmoNU+uJb/KpMH6 +sRYi3zs85ecaMKNyG+LDmBahUlHx5hKAH49O8855+AMhsg91ONZJldjQX0oZrIKz +K5BpsqeTl4c2Yt/fALiZaeFk1pBEsvVeMOBCIuWE+b4UIEaLAOhxfwIDAQABoAAw +DQYJKoZIhvcNAQELBQADggIBAMWzSdk6C53ijuLYOt77BAYpxnxRvKA1tsxJd9K5 ++R+ZGiuTLXWArgClhhwx65OTeqrwkVlh2CZoVTBeEMzvxn6zHO4S20KcWJ1lWU76 +OIrBZrAecBVXB5LmttUkvlMgVlWLQuVpVJiEn1jW6KeABqWKCLz7Au8TzHfr1HQ4 +1ukndu3SsKVwSIy0ZHFpQaXvzA8f0V93ap9R2RVw9BXqCJDe+BtQPvlCwOrvQ7d3 +gg+3aymbqUx3hrscEvd1ETad7LyFw3QfPcr1j1FwPH+K1/UDrWxIzxmO+HM5Lh8f +269aYceysgv/xa/KpANTxVAM7j1SE1CjjI5e5CQJVZ+gtAqTIv3lLkk0dWQksObN +Z1tTtJkFAUNbGsMadtVeTmx2eBcRi4LEv0DIPyyWUQTWwTYtaMFi8I0bYPk1T/fV +9umR6jqZ0l1qdiuLYOSYUx4iI5SAmCrA/kEINOj0u2gqqkxdOgUVsuKqer4w9Iyt +qOhhOHwctRo+cIhpVwcF2ouJeNrFqoBzOgHKQxBvcDWJM8ra5GCNIvD3MP4Q63hy +b4fkBcYwb1B2ETH9nSDtfW+JLjt70rvf6IxAiXRRiOv4fPzaUlK49NRVgjzx5Iu+ +8Zq4+I+S6qZOROWsOVSpJu44VvNZd5bMB9dEHnkoGxkPjo8pkC/o0uZbxsnZScSL +WGxS +-----END CERTIFICATE REQUEST----- diff --git a/keys/keys5/staker.key b/keys/keys5/staker.key new file mode 100644 index 0000000..82c668b --- /dev/null +++ b/keys/keys5/staker.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEA4Cua+b3R7SRRIMO4RhP5cymh3L8M699mq5qtJPbWHISV3095 +NY9ZV7QQHXFDd4EkSo4Mx/74259HmtwIgwlHdjHJP59CnCWeN0uSO2Yz+622r9ve +mygmdR6+6+YF2BYZPt2lLFXw1KgDwcMj/Cnld9QEVQP7Plj3lqrOvUF03cibgWBO +aa+NL9Un/nm1CJ2d+7hmN5IjtBMfyYHKELBic61g/87W0QhSL63TSWXdKRrzJZky +qe17DEd3Kb6pO+9P/zCwnchlv9SAK00rg9b3e5gV9SclVysnGDdLyDrW4MTzhbrV +Mqgznj+6OdauIj0t3QkYH5p/4Vi8MvgrSXtARirG+LZNLSsaNLHvL/SQPJlLZ5s5 +jLeGPY4ozqwjge18FipiWBJvDeVG3MunKdqC/L0Fbc89z165Bbft3BbdY643aUGI +B+I8pJFg3joxH8tRQsM7DUbnS07y4Xy5/Ov2U+HC2fSSO/1ltCDwvBXO5Yc5yfmy +EFHhJ3WhnYFZMbhINDMXazK9I1NCBaF8kkVzUc3A1mUoRIwU7+7ZxcS5qDVPriW/ +yqTB+rEWIt87POXnGjCjchviw5gWoVJR8eYSgB+PTvPOefgDIbIPdTjWSZXY0F9K +GayCsyuQabKnk5eHNmLf3wC4mWnhZNaQRLL1XjDgQiLlhPm+FCBGiwDocX8CAwEA +AQKCAgEApuMPrxmH7Xn6A+BxkYpRTVETNZnt7rQUZXDzse8pm3WBdgxeemdL5iUh +Uin+RjuYXwC9ty606hv8XOeuVo9T6kRKRNk157WBwjy6kwoVbSr4NJgFc5FCgDLx +hAFtHF/nT4wG6ajZcBfdJCU45wPx13G5/+jE5LerKzniS7ctX+d3Daw69CdDfva7 +nZHSGqXs9Xdkcb6UYf1SztuXKTGHOgM7kXXVKy18sg5AnAX/zhhIKBeTRjqMPqn9 +ptBQgVQ6RAtlkTGdvmBfQt1ipfYlrJee0THhdLGlmzufaWOUkSVO/qIHEn1yYD+l +TmXqoYbWXBXnJbAJwCQlh/SFlWDyiWWOxszxdwwT2ybw7OR3a0DEV0MbKJkUexyF +92Lr3qoBSZRFQnXVvBgjQOwnzEFph1ANuGY3odL8JSM1tHniIsCs4WhDPOsbAj+h +kwS51colMk3bNCZ3xeArjMLBVLgT7xLX/7ZYc7/oTEFWik+20TvSEWzdE1N/4gfJ +jEU/VqrnNjyev2w9Ak6bEkwZFLS6VZ9rTWTF9jk8C1aXj/RhfaaC33xXBbhn9HuX +lTu/JaLMp0Qc4aClqUYM6LlxIejH5b8fIxCNHJislXJDa6a6aQl85BiQODPFxVT5 +WCpQD4858EuLdX4BRW2fIGRY6DivR6uJRAmxLf+EwAg/rgTzUsECggEBAPSkHX5F +BhRgudF0MnwN+enj4SoXHhRG+DTorxO1Zh2qN9lnXO9nMKMCXVJLIVvGFuiMRSJ0 +VKf1u0UqaBF02MbIvbei7mzkkW0/74m04X37iyMmtnmooQ0GEV84oONwAt3DeeTg +vIpOtq9V26XHGaQDxcRFMFBuD02a2yf3JYkXj74i2scMP4xxMHMkJxGK9FSBOhnp +k/p0hMl3FVGfo5Ns5T1Rl3pMueEF3B5+BvrV1z14IN/0lwuhujrUUYS4Ew+Pk5zC +FSubfIQMqST1jvXXTaGgX0GPffa4lxgaDEATLewvL3Fjy27Xzl57i9ZvTNC4yFad +4okjr/eItHtKVHECggEBAOqUKww/6uiJMNvc2NxLUMxuzB07rqOZKT2VMBkG5Gzk +v81fDtlndD8cwHSqOLKscH/QKXD7WK3FCuvZSvMwCjEB4Pp1zgwJoBexuXvFDDbs +0T77Qiwe+2WmRIiYev5aRG3lnBMM8RDS/QPzEdoxHdzrFURYVl0rv5l/7rwB2Zd6 +xAYHcUpZc4ZaysEgqQCuZQqC7Mrq7qfByUthH28Yicz1978fpE3dx15ceqjU9jBQ +xUUwbeKT/UkQQvmYHdtgwEjhzVQL1OAAWkT6RssMqx2RAdi0SqWPFEhxNPHBpG9B +lKUDBBIM6du916On0Bjghh3WhxQKpTIzveNAiexbXO8CggEBANvJohGyc37VU7wg +18ZqTA/cwostD8IJ7K6kKb7cJy0Zo2l3mqAfJiwdULhBdWvdMPGmK+qDdxcbBy9h +pPOh9avJ5+BWyjwcsabkXRFr53ZnCp7/BcuRO3fW7r6Mwsby+DBCkX2Whuz/QNOP +oHF0yc138jKeMoTgDHGdYa2rNhbPiz24VLOlhmZnvq6DWXJCU7akDw3+swq9qhrS +GN4nPS+TEvUfG6ctzYWj3RmsAhtTCThZd7edKCK0HvsBi2dgdQdy55xbJefynlCI +i2IAF3s4/q7pxQrCntmNB3oI1N6wHH7n+Yi2rqsbyXVLK9vwTKPsj1h6Km8pF8ud +DwEBS5ECggEAMnq2FMnAbE/xgq6wwB85APUq2XOZbj0sYcMz+X7BMym6mKBHGsOn +gVlXlQN4dgKjpu2NrXF5MNPBOOWmulRxLQChgGRPdcmweMjXCGpr6XnmwW3iXIpC +QSqZfueJOCkGpruNbZAQZDVzGyF4iwKc0YiJKA72btBWR9r+7dhcEbvqaP27BGvh +b10kWpEDrVDaD3wDJtuNhe4uuhjpYcffB4s6yBcwDU2XdJfkEWban6UR/oSgcOy1 +yb5FG17/tdDJMCXfQKHXKmkJA+TzzQgp3o/w3MhXc+8pRzmNUiUAlKyBJ01R1+yN +eqsMt3wKTQAr/EnJAagUyovV5gxiYcl7YwKCAQAdOYcZx/l//O0Ftm6wpXGRjakN +IHFcP2i7mUW76Ewnj1iFa9Yv7pgbbBD9S1SMuetfIWcqSjDiUaymnDdA18NVYUYv +lhlUJ6kwdVusejqfcn+75Jf87BvWdIVGrNxPdB7Z/lmbWxFqyZi00R90UGBntaMu +zg/ibrLgatzA9SKgoWXm2bLt6bbXefmOgnZXyw8Qko70Xxtx5eBR1BDAQjDis81n +Lg96sJ3LOn7SXHfxJ3BtXshTJAoBFx6EpmulgNoPWIkJtd7XWYP6Yy22D+kK7OhH +Rq3CiYMtDmZoub/kVBL0MVdSm7hn1TSVTHjFoW6cwQ37iKHjkZVRwX1Kzt0B +-----END RSA PRIVATE KEY----- diff --git a/keys/mykey/genCA.sh b/keys/mykey/genCA.sh new file mode 100755 index 0000000..14a0f4c --- /dev/null +++ b/keys/mykey/genCA.sh @@ -0,0 +1,5 @@ +#!/bin/sh +set -ex + +openssl genrsa -out `dirname "$0"`/rootCA.key 4096 +openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/mykey/genStaker.sh b/keys/mykey/genStaker.sh new file mode 100755 index 0000000..d955767 --- /dev/null +++ b/keys/mykey/genStaker.sh @@ -0,0 +1,13 @@ +#!/bin/sh +set -ex + +keypath=$GOPATH/src/github.com/ava-labs/gecko/keys/mykey + +if test -f "$keypath/staker.key" || test -f "$keypath/staker.crt"; then + echo "staker.key or staker.crt already exists. Not generating new key/certificiate." + exit 1 +fi + +openssl genrsa -out `dirname "$0"`/staker.key 4096 +openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr +openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/mykey/rootCA.crt b/keys/mykey/rootCA.crt new file mode 100644 index 0000000..da6320a --- /dev/null +++ b/keys/mykey/rootCA.crt @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB +dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN +AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw +MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM +Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV +BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg +jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 +QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU +m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 +lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB +KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW +cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 +RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH +bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW +T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB +J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU +KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei +73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E +BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj +FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG +XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY +omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv +Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC +XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 +gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn +3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N +W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s +scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU +kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD +DB8IRfWqBx2nWw== +-----END CERTIFICATE----- diff --git a/keys/mykey/rootCA.key b/keys/mykey/rootCA.key new file mode 100644 index 0000000..fe23a96 --- /dev/null +++ b/keys/mykey/rootCA.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 +6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ +mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h +Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL +AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk +tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd +CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu +TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV +Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll +JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt +RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA +AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg +O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 +WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc +fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o +WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y +243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM +Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv +/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF +2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 +wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R +WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 +POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC +T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW +jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc +23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK +XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl +jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ +/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P +rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl +KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD +E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C +cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE +r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu +GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy +7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr +RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF +SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor +Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY +KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t +Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM +/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 +YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt +I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy ++jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f +UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER +KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW +MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe +f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== +-----END RSA PRIVATE KEY----- diff --git a/keys/mykey/rootCA.srl b/keys/mykey/rootCA.srl new file mode 100644 index 0000000..de0f603 --- /dev/null +++ b/keys/mykey/rootCA.srl @@ -0,0 +1 @@ +BAF3B5C5C6D0D164 diff --git a/main/constants.go b/main/constants.go new file mode 100644 index 0000000..4c7f23b --- /dev/null +++ b/main/constants.go @@ -0,0 +1,13 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +const ( + gecko = " ___ ________ __ ___\n" + + " / _ \\_/\\ / _____/ ____ ____ | | ______ / _ \\_/\\\n" + + " \\/ \\___/ / \\ ____/ __ \\_/ ___\\| |/ / _ \\ \\/ \\___/\n" + + " \\ \\_\\ \\ ___/\\ \\___| < <_> )\n" + + " \\______ /\\___ >\\___ >__|_ \\____/\n" + + " \\/ \\/ \\/ \\/" +) diff --git a/main/main.go b/main/main.go new file mode 100644 index 0000000..db293b5 --- /dev/null +++ b/main/main.go @@ -0,0 +1,102 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "fmt" + "path" + + "github.com/ava-labs/gecko/node" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/go-ethereum/p2p/nat" +) + +// main is the primary entry point to Ava. This can either create a CLI to an +// existing node or create a new node. +func main() { + // Err is set based on the CLI arguments + if Err != nil { + fmt.Printf("parsing parameters returned with error %s\n", Err) + return + } + + config := Config.LoggingConfig + config.Directory = path.Join(config.Directory, "node") + factory := logging.NewFactory(config) + defer factory.Close() + + log, err := factory.Make() + if err != nil { + fmt.Printf("starting logger failed with: %s\n", err) + return + } + fmt.Println(gecko) + + defer func() { recover() }() + + defer log.Stop() + defer log.StopOnPanic() + defer Config.DB.Close() + + // Track if sybil control is enforced + if !Config.EnableStaking { + log.Warn("Staking and p2p encryption are disabled. Packet spoofing is possible.") + } + + // Check if transaction signatures should be checked + if !Config.EnableCrypto { + log.Warn("transaction signatures are not being checked") + } + crypto.EnableCrypto = Config.EnableCrypto + + if err := Config.ConsensusParams.Valid(); err != nil { + log.Fatal("consensus parameters are invalid: %s", err) + return + } + + // Track if assertions should be executed + if Config.LoggingConfig.Assertions { + log.Warn("assertions are enabled. This may slow down execution") + } + + natChan := make(chan struct{}) + defer close(natChan) + + go nat.Map( + /*nat=*/ Config.Nat, + /*closeChannel=*/ natChan, + /*protocol=*/ "TCP", + /*internetPort=*/ int(Config.StakingIP.Port), + /*localPort=*/ int(Config.StakingIP.Port), + /*name=*/ "Gecko Staking Server", + ) + + go nat.Map( + /*nat=*/ Config.Nat, + /*closeChannel=*/ natChan, + /*protocol=*/ "TCP", + /*internetPort=*/ int(Config.HTTPPort), + /*localPort=*/ int(Config.HTTPPort), + /*name=*/ "Gecko HTTP Server", + ) + + log.Debug("initializing node state") + // MainNode is a global variable in the node.go file + if err := node.MainNode.Initialize(&Config, log, factory); err != nil { + log.Fatal("error initializing node state: %s", err) + return + } + + log.Debug("Starting servers") + if err := node.MainNode.StartConsensusServer(); err != nil { + log.Fatal("problem starting servers: %s", err) + return + } + + defer node.MainNode.Shutdown() + + log.Debug("Dispatching node handlers") + node.MainNode.Dispatch() +} diff --git a/main/params.go b/main/params.go new file mode 100644 index 0000000..88a3722 --- /dev/null +++ b/main/params.go @@ -0,0 +1,206 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "errors" + "flag" + "fmt" + "net" + "path" + "strings" + + "github.com/ava-labs/go-ethereum/p2p/nat" + + "github.com/ava-labs/gecko/database/leveldb" + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/genesis" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/node" + "github.com/ava-labs/gecko/snow/networking/router" + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/wrappers" +) + +// Results of parsing the CLI +var ( + Config = node.Config{} + Err error +) + +var ( + errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs") +) + +// Parse the CLI arguments +func init() { + errs := &wrappers.Errs{} + defer func() { Err = errs.Err }() + + loggingConfig, err := logging.DefaultConfig() + errs.Add(err) + + // NetworkID: + networkName := flag.String("network-id", genesis.LocalName, "Network ID this node will connect to") + + // Ava fees: + flag.Uint64Var(&Config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva") + + // Assertions: + flag.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution") + + // Crypto: + flag.BoolVar(&Config.EnableCrypto, "signature-verification-enabled", true, "Turn on signature verification") + + // Database: + db := flag.Bool("db-enabled", true, "Turn on persistent storage") + dbDir := flag.String("db-dir", "db", "Database directory for Ava state") + + // IP: + consensusIP := flag.String("public-ip", "", "Public IP of this node") + + // HTTP Server: + httpPort := flag.Uint("http-port", 9650, "Port of the HTTP server") + flag.BoolVar(&Config.EnableHTTPS, "http-tls-enabled", false, "Upgrade the HTTP server to HTTPs") + flag.StringVar(&Config.HTTPSKeyFile, "http-tls-key-file", "", "TLS private key file for the HTTPs server") + flag.StringVar(&Config.HTTPSCertFile, "http-tls-cert-file", "", "TLS certificate file for the HTTPs server") + + // Bootstrapping: + bootstrapIPs := flag.String("bootstrap-ips", "", "Comma separated list of bootstrap peer ips to connect to. Example: 127.0.0.1:9630,127.0.0.1:9631") + bootstrapIDs := flag.String("bootstrap-ids", "", "Comma separated list of bootstrap peer ids to connect to. Example: JR4dVmy6ffUGAKCBDkyCbeZbyHQBeDsET,8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") + + // Staking: + consensusPort := flag.Uint("staking-port", 9651, "Port of the consensus server") + flag.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections") + flag.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", "", "TLS private key file for staking connections") + flag.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", "", "TLS certificate file for staking connections") + + // Logging: + logsDir := flag.String("log-dir", "", "Logging directory for Ava") + logLevel := flag.String("log-level", "info", "The log level. Should be one of {verbo, debug, info, warn, error, fatal, off}") + logDisplayLevel := flag.String("log-display-level", "", "The log display level. If left blank, will inherit the value of log-level. Otherwise, should be one of {verbo, debug, info, warn, error, fatal, off}") + + flag.IntVar(&Config.ConsensusParams.K, "snow-sample-size", 20, "Number of nodes to query for each network poll") + flag.IntVar(&Config.ConsensusParams.Alpha, "snow-quorum-size", 18, "Alpha value to use for required number positive results") + flag.IntVar(&Config.ConsensusParams.BetaVirtuous, "snow-virtuous-commit-threshold", 20, "Beta value to use for virtuous transactions") + flag.IntVar(&Config.ConsensusParams.BetaRogue, "snow-rogue-commit-threshold", 30, "Beta value to use for rogue transactions") + flag.IntVar(&Config.ConsensusParams.Parents, "snow-avalanche-num-parents", 5, "Number of vertexes for reference from each new vertex") + flag.IntVar(&Config.ConsensusParams.BatchSize, "snow-avalanche-batch-size", 30, "Number of operations to batch in each new vertex") + + // Enable/Disable APIs: + flag.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", true, "If true, this node exposes the Admin API") + flag.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", true, "If true, this node exposes the Keystore API") + flag.BoolVar(&Config.MetricsAPIEnabled, "api-metrics-enabled", true, "If true, this node exposes the Metrics API") + flag.BoolVar(&Config.IPCEnabled, "api-ipcs-enabled", false, "If true, IPCs can be opened") + + // Throughput Server + throughputPort := flag.Uint("xput-server-port", 9652, "Port of the deprecated throughput test server") + flag.BoolVar(&Config.ThroughputServerEnabled, "xput-server-enabled", false, "If true, throughput test server is created") + + flag.Parse() + + networkID, err := genesis.NetworkID(*networkName) + errs.Add(err) + + if networkID != genesis.LocalID { + errs.Add(fmt.Errorf("the only supported networkID is: %s", genesis.LocalName)) + } + + Config.NetworkID = networkID + + // DB: + if *db && err == nil { + // TODO: Add better params here + dbPath := path.Join(*dbDir, genesis.NetworkName(Config.NetworkID)) + db, err := leveldb.New(dbPath, 0, 0, 0) + Config.DB = db + errs.Add(err) + } else { + Config.DB = memdb.New() + } + + Config.Nat = nat.Any() + + var ip net.IP + // If public IP is not specified, get it using shell command dig + if *consensusIP == "" { + ip, err = Config.Nat.ExternalIP() + errs.Add(fmt.Errorf("%s\nIf you are trying to create a local network, try adding --public-ip=127.0.0.1", err)) + } else { + ip = net.ParseIP(*consensusIP) + } + + if ip == nil { + errs.Add(fmt.Errorf("Invalid IP Address %s", *consensusIP)) + } + Config.StakingIP = utils.IPDesc{ + IP: ip, + Port: uint16(*consensusPort), + } + + // Bootstrapping: + for _, ip := range strings.Split(*bootstrapIPs, ",") { + if ip != "" { + addr, err := utils.ToIPDesc(ip) + errs.Add(err) + Config.BootstrapPeers = append(Config.BootstrapPeers, &node.Peer{ + IP: addr, + }) + } + } + if Config.EnableStaking { + i := 0 + cb58 := formatting.CB58{} + for _, id := range strings.Split(*bootstrapIDs, ",") { + if id != "" { + errs.Add(cb58.FromString(id)) + cert, err := ids.ToShortID(cb58.Bytes) + errs.Add(err) + + if len(Config.BootstrapPeers) <= i { + errs.Add(errBootstrapMismatch) + continue + } + Config.BootstrapPeers[i].ID = cert + i++ + } + } + if len(Config.BootstrapPeers) != i { + errs.Add(fmt.Errorf("More bootstrap IPs, %d, provided than bootstrap IDs, %d", len(Config.BootstrapPeers), i)) + } + } else { + for _, peer := range Config.BootstrapPeers { + peer.ID = ids.NewShortID(hashing.ComputeHash160Array([]byte(peer.IP.String()))) + } + } + + // HTTP: + Config.HTTPPort = uint16(*httpPort) + + // Logging: + if *logsDir != "" { + loggingConfig.Directory = *logsDir + } + logFileLevel, err := logging.ToLevel(*logLevel) + errs.Add(err) + loggingConfig.LogLevel = logFileLevel + + if *logDisplayLevel == "" { + *logDisplayLevel = *logLevel + } + displayLevel, err := logging.ToLevel(*logDisplayLevel) + errs.Add(err) + loggingConfig.DisplayLevel = displayLevel + + Config.LoggingConfig = loggingConfig + + // Throughput: + Config.ThroughputPort = uint16(*throughputPort) + + // Router used for consensus + Config.ConsensusRouter = &router.ChainRouter{} +} diff --git a/networking/addrset.go b/networking/addrset.go new file mode 100644 index 0000000..a03e0df --- /dev/null +++ b/networking/addrset.go @@ -0,0 +1,288 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package networking + +import ( + "fmt" + "sync" + + "github.com/ava-labs/salticidae-go" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils" +) + +// Connections provides an interface for what a group of connections will +// support. +type Connections interface { + Add(salticidae.NetAddr, ids.ShortID) + + GetIP(ids.ShortID) (salticidae.NetAddr, bool) + GetID(salticidae.NetAddr) (ids.ShortID, bool) + + ContainsIP(salticidae.NetAddr) bool + ContainsID(ids.ShortID) bool + + Remove(salticidae.NetAddr, ids.ShortID) + RemoveIP(salticidae.NetAddr) + RemoveID(ids.ShortID) + + Peers() []utils.IPDesc + IPs() []salticidae.NetAddr + IDs() ids.ShortSet + Conns() ([]utils.IPDesc, []ids.ShortID) + RawConns() ([]salticidae.NetAddr, []ids.ShortID) + + Len() int +} + +// AddrCert implements the Connections interface +type AddrCert struct { + // ip -> id + ipToID map[uint64]ids.ShortID + // id -> ip + idToIP map[[20]byte]salticidae.NetAddr + mux sync.Mutex +} + +// Add Assumes that addr is garbage collected normally +func (ac *AddrCert) Add(ip salticidae.NetAddr, id ids.ShortID) { + ac.mux.Lock() + defer ac.mux.Unlock() + + ac.add(ip, id) +} + +// GetIP returns the ip mapped to the id that is provided if one exists. +func (ac *AddrCert) GetIP(id ids.ShortID) (salticidae.NetAddr, bool) { + ac.mux.Lock() + defer ac.mux.Unlock() + + return ac.getIP(id) +} + +// GetID returns the id mapped to the ip that is provided if one exists. +func (ac *AddrCert) GetID(ip salticidae.NetAddr) (ids.ShortID, bool) { + ac.mux.Lock() + defer ac.mux.Unlock() + + return ac.getID(ip) +} + +// ContainsIP returns true if the ip is contained in the connection pool +func (ac *AddrCert) ContainsIP(ip salticidae.NetAddr) bool { + _, exists := ac.GetID(ip) + return exists +} + +// ContainsID returns true if the id is contained in the connection pool +func (ac *AddrCert) ContainsID(id ids.ShortID) bool { + _, exists := ac.GetIP(id) + return exists +} + +// Remove ensures that no connection will have any mapping containing [ip] or +// [id]. +func (ac *AddrCert) Remove(ip salticidae.NetAddr, id ids.ShortID) { + ac.mux.Lock() + defer ac.mux.Unlock() + + ac.remove(ip, id) +} + +// RemoveIP ensures that no connection will have a mapping containing [ip] +func (ac *AddrCert) RemoveIP(ip salticidae.NetAddr) { + ac.mux.Lock() + defer ac.mux.Unlock() + + ac.removeIP(ip) +} + +// RemoveID ensures that no connection will have a mapping containing [id] +func (ac *AddrCert) RemoveID(id ids.ShortID) { + ac.mux.Lock() + defer ac.mux.Unlock() + + ac.removeID(id) +} + +// Peers returns the full list of ips contained in this connection pool. +func (ac *AddrCert) Peers() []utils.IPDesc { + ac.mux.Lock() + defer ac.mux.Unlock() + + return ac.peers() +} + +// IPs returns the full list of ips contained in this connection pool. This can +// be useful for gossiping a node's connections through the network. +func (ac *AddrCert) IPs() []salticidae.NetAddr { + ac.mux.Lock() + defer ac.mux.Unlock() + + return ac.ips() +} + +// IDs return the set of IDs that are mapping in this connection pool. +func (ac *AddrCert) IDs() ids.ShortSet { + ac.mux.Lock() + defer ac.mux.Unlock() + + return ac.ids() +} + +// Conns return the set of connections in this connection pool. +func (ac *AddrCert) Conns() ([]utils.IPDesc, []ids.ShortID) { + ac.mux.Lock() + defer ac.mux.Unlock() + + return ac.conns() +} + +// RawConns return the set of connections in this connection pool. +func (ac *AddrCert) RawConns() ([]salticidae.NetAddr, []ids.ShortID) { + ac.mux.Lock() + defer ac.mux.Unlock() + + return ac.rawConns() +} + +// Len returns the number of elements in the map +func (ac *AddrCert) Len() int { + ac.mux.Lock() + defer ac.mux.Unlock() + + return ac.len() +} + +func (ac *AddrCert) init() { + if ac.ipToID == nil { + ac.ipToID = make(map[uint64]ids.ShortID) + } + if ac.idToIP == nil { + ac.idToIP = make(map[[20]byte]salticidae.NetAddr) + } +} + +func (ac *AddrCert) add(ip salticidae.NetAddr, id ids.ShortID) { + ac.init() + + ac.removeIP(ip) + ac.removeID(id) + + ac.ipToID[addrToID(ip)] = id + ac.idToIP[id.Key()] = ip +} + +func (ac *AddrCert) getIP(id ids.ShortID) (salticidae.NetAddr, bool) { + ac.init() + + ip, exists := ac.idToIP[id.Key()] + return ip, exists +} + +func (ac *AddrCert) getID(ip salticidae.NetAddr) (ids.ShortID, bool) { + ac.init() + + id, exists := ac.ipToID[addrToID(ip)] + return id, exists +} + +func (ac *AddrCert) remove(ip salticidae.NetAddr, id ids.ShortID) { + ac.removeIP(ip) + ac.removeID(id) +} + +func (ac *AddrCert) removeIP(ip salticidae.NetAddr) { + ac.init() + + ipID := addrToID(ip) + if id, exists := ac.ipToID[ipID]; exists { + delete(ac.ipToID, ipID) + delete(ac.idToIP, id.Key()) + } +} + +func (ac *AddrCert) removeID(id ids.ShortID) { + ac.init() + + idKey := id.Key() + if ip, exists := ac.idToIP[idKey]; exists { + delete(ac.ipToID, addrToID(ip)) + delete(ac.idToIP, idKey) + } +} + +func (ac *AddrCert) peers() []utils.IPDesc { + ac.init() + + ips := []utils.IPDesc(nil) + for _, ip := range ac.idToIP { + ips = append(ips, toIPDesc(ip)) + } + return ips +} + +func (ac *AddrCert) ips() []salticidae.NetAddr { + ac.init() + + ips := []salticidae.NetAddr(nil) + for _, ip := range ac.idToIP { + ips = append(ips, ip) + } + return ips +} + +func (ac *AddrCert) ids() ids.ShortSet { + ac.init() + + ids := ids.ShortSet{} + for _, id := range ac.ipToID { + ids.Add(id) + } + return ids +} + +func (ac *AddrCert) conns() ([]utils.IPDesc, []ids.ShortID) { + ac.init() + + ipList := []utils.IPDesc(nil) + idList := []ids.ShortID(nil) + for id, ip := range ac.idToIP { + ipList = append(ipList, toIPDesc(ip)) + idList = append(idList, ids.NewShortID(id)) + } + return ipList, idList +} + +func (ac *AddrCert) rawConns() ([]salticidae.NetAddr, []ids.ShortID) { + ac.init() + + ipList := []salticidae.NetAddr(nil) + idList := []ids.ShortID(nil) + for id, ip := range ac.idToIP { + ipList = append(ipList, ip) + idList = append(idList, ids.NewShortID(id)) + } + return ipList, idList +} + +func (ac *AddrCert) len() int { return len(ac.ipToID) } + +func toIPDesc(addr salticidae.NetAddr) utils.IPDesc { + ip, err := ToIPDesc(addr) + HandshakeNet.log.AssertNoError(err) + return ip +} + +// ToIPDesc converts an address to an IP +func ToIPDesc(addr salticidae.NetAddr) (utils.IPDesc, error) { + ip := salticidae.FromBigEndianU32(addr.GetIP()) + port := salticidae.FromBigEndianU16(addr.GetPort()) + return utils.ToIPDesc(fmt.Sprintf("%d.%d.%d.%d:%d", byte(ip>>24), byte(ip>>16), byte(ip>>8), byte(ip), port)) +} + +func addrToID(addr salticidae.NetAddr) uint64 { + return uint64(addr.GetIP()) | (uint64(addr.GetPort()) << 32) +} diff --git a/networking/builder.go b/networking/builder.go new file mode 100644 index 0000000..cbe451b --- /dev/null +++ b/networking/builder.go @@ -0,0 +1,156 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package networking + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils" +) + +// Builder extends a Codec to build messages safely +type Builder struct{ Codec } + +// GetVersion message +func (m Builder) GetVersion() (Msg, error) { return m.Pack(GetVersion, nil) } + +// Version message +func (m Builder) Version(networkID uint32, myTime uint64, myVersion string) (Msg, error) { + return m.Pack(Version, map[Field]interface{}{ + NetworkID: networkID, + MyTime: myTime, + VersionStr: myVersion, + }) +} + +// GetPeerList message +func (m Builder) GetPeerList() (Msg, error) { return m.Pack(GetPeerList, nil) } + +// PeerList message +func (m Builder) PeerList(ipDescs []utils.IPDesc) (Msg, error) { + return m.Pack(PeerList, map[Field]interface{}{Peers: ipDescs}) +} + +// GetAcceptedFrontier message +func (m Builder) GetAcceptedFrontier(chainID ids.ID, requestID uint32) (Msg, error) { + return m.Pack(GetAcceptedFrontier, map[Field]interface{}{ + ChainID: chainID.Bytes(), + RequestID: requestID, + }) +} + +// AcceptedFrontier message +func (m Builder) AcceptedFrontier(chainID ids.ID, requestID uint32, containerIDs ids.Set) (Msg, error) { + containerIDBytes := make([][]byte, containerIDs.Len()) + for i, containerID := range containerIDs.List() { + containerIDBytes[i] = containerID.Bytes() + } + return m.Pack(AcceptedFrontier, map[Field]interface{}{ + ChainID: chainID.Bytes(), + RequestID: requestID, + ContainerIDs: containerIDBytes, + }) +} + +// GetAccepted message +func (m Builder) GetAccepted(chainID ids.ID, requestID uint32, containerIDs ids.Set) (Msg, error) { + containerIDBytes := make([][]byte, containerIDs.Len()) + for i, containerID := range containerIDs.List() { + containerIDBytes[i] = containerID.Bytes() + } + return m.Pack(GetAccepted, map[Field]interface{}{ + ChainID: chainID.Bytes(), + RequestID: requestID, + ContainerIDs: containerIDBytes, + }) +} + +// Accepted message +func (m Builder) Accepted(chainID ids.ID, requestID uint32, containerIDs ids.Set) (Msg, error) { + containerIDBytes := make([][]byte, containerIDs.Len()) + for i, containerID := range containerIDs.List() { + containerIDBytes[i] = containerID.Bytes() + } + return m.Pack(Accepted, map[Field]interface{}{ + ChainID: chainID.Bytes(), + RequestID: requestID, + ContainerIDs: containerIDBytes, + }) +} + +// Get message +func (m Builder) Get(chainID ids.ID, requestID uint32, containerID ids.ID) (Msg, error) { + return m.Pack(Get, map[Field]interface{}{ + ChainID: chainID.Bytes(), + RequestID: requestID, + ContainerID: containerID.Bytes(), + }) +} + +// Put message +func (m Builder) Put(chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) (Msg, error) { + return m.Pack(Put, map[Field]interface{}{ + ChainID: chainID.Bytes(), + RequestID: requestID, + ContainerID: containerID.Bytes(), + ContainerBytes: container, + }) +} + +// PushQuery message +func (m Builder) PushQuery(chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) (Msg, error) { + return m.Pack(PushQuery, map[Field]interface{}{ + ChainID: chainID.Bytes(), + RequestID: requestID, + ContainerID: containerID.Bytes(), + ContainerBytes: container, + }) +} + +// PullQuery message +func (m Builder) PullQuery(chainID ids.ID, requestID uint32, containerID ids.ID) (Msg, error) { + return m.Pack(PullQuery, map[Field]interface{}{ + ChainID: chainID.Bytes(), + RequestID: requestID, + ContainerID: containerID.Bytes(), + }) +} + +// Chits message +func (m Builder) Chits(chainID ids.ID, requestID uint32, containerIDs ids.Set) (Msg, error) { + containerIDBytes := make([][]byte, containerIDs.Len()) + for i, containerID := range containerIDs.List() { + containerIDBytes[i] = containerID.Bytes() + } + return m.Pack(Chits, map[Field]interface{}{ + ChainID: chainID.Bytes(), + RequestID: requestID, + ContainerIDs: containerIDBytes, + }) +} + +// Ping message +func (m Builder) Ping() (Msg, error) { return m.Pack(Ping, nil) } + +// Pong message +func (m Builder) Pong() (Msg, error) { return m.Pack(Pong, nil) } + +// Data message +func (m Builder) Data(b []byte) (Msg, error) { return m.Pack(Data, map[Field]interface{}{Bytes: b}) } + +// IssueTx message +func (m Builder) IssueTx(chainID ids.ID, tx []byte) (Msg, error) { + return m.Pack(IssueTx, map[Field]interface{}{ + ChainID: chainID.Bytes(), + Tx: tx, + }) +} + +// DecidedTx message +func (m Builder) DecidedTx(txID ids.ID, status choices.Status) (Msg, error) { + return m.Pack(DecidedTx, map[Field]interface{}{ + TxID: txID.Bytes(), + Status: uint32(status), + }) +} diff --git a/networking/codec.go b/networking/codec.go new file mode 100644 index 0000000..4f104e2 --- /dev/null +++ b/networking/codec.go @@ -0,0 +1,81 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package networking + +import ( + "errors" + "math" + + "github.com/ava-labs/salticidae-go" + + "github.com/ava-labs/gecko/utils/wrappers" +) + +var ( + errBadLength = errors.New("stream has unexpected length") + errMissingField = errors.New("message missing field") + errBadOp = errors.New("input field has invalid operation") +) + +// Codec defines the serialization and deserialization of network messages +type Codec struct{} + +// Pack attempts to pack a map of fields into a message. +func (Codec) Pack(op salticidae.Opcode, fields map[Field]interface{}) (Msg, error) { + message, ok := Messages[op] + if !ok { + return nil, errBadOp + } + + p := wrappers.Packer{MaxSize: math.MaxInt32} + for _, field := range message { + data, ok := fields[field] + if !ok { + return nil, errMissingField + } + field.Packer()(&p, data) + } + + if p.Errored() { // Prevent the datastream from leaking + return nil, p.Err + } + + return &msg{ + op: op, + ds: salticidae.NewDataStreamFromBytes(p.Bytes, false), + fields: fields, + }, nil +} + +// Parse attempts to convert a byte stream into a message. +func (Codec) Parse(op salticidae.Opcode, ds salticidae.DataStream) (Msg, error) { + message, ok := Messages[op] + if !ok { + return nil, errBadOp + } + + // TODO: make this work without copy + size := ds.Size() + p := wrappers.Packer{Bytes: make([]byte, size)} + + byteHandle := ds.GetDataInPlace(size) + defer byteHandle.Release() + + copy(p.Bytes, byteHandle.Get()) + + fields := make(map[Field]interface{}, len(message)) + for _, field := range message { + fields[field] = field.Unpacker()(&p) + } + + if p.Offset != size { + return nil, errBadLength + } + + return &msg{ + op: op, + ds: ds, + fields: fields, + }, p.Err +} diff --git a/networking/commands.go b/networking/commands.go new file mode 100644 index 0000000..ffb5cbb --- /dev/null +++ b/networking/commands.go @@ -0,0 +1,187 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package networking + +import ( + "github.com/ava-labs/salticidae-go" + + "github.com/ava-labs/gecko/utils/wrappers" +) + +// Field that may be packed into a message +type Field uint32 + +// Fields that may be packed. These values are not sent over the wire. +const ( + VersionStr Field = iota // Used in handshake + NetworkID // Used in handshake + MyTime // Used in handshake + Peers // Used in handshake + ChainID // Used for dispatching + RequestID // Used for all messages + ContainerID // Used for querying + ContainerBytes // Used for gossiping + ContainerIDs // Used for querying + Bytes // Used as arbitrary data + TxID // Used for throughput tests + Tx // Used for throughput tests + Status // Used for throughput tests +) + +// Packer returns the packer function that can be used to pack this field. +func (f Field) Packer() func(*wrappers.Packer, interface{}) { + switch f { + case VersionStr: + return wrappers.TryPackStr + case NetworkID: + return wrappers.TryPackInt + case MyTime: + return wrappers.TryPackLong + case Peers: + return wrappers.TryPackIPList + case ChainID: // TODO: This will be shortened to use a modified varint spec + return wrappers.TryPackHash + case RequestID: + return wrappers.TryPackInt + case ContainerID: + return wrappers.TryPackHash + case ContainerBytes: + return wrappers.TryPackBytes + case ContainerIDs: + return wrappers.TryPackHashes + case Bytes: + return wrappers.TryPackBytes + case TxID: + return wrappers.TryPackHash + case Tx: + return wrappers.TryPackBytes + case Status: + return wrappers.TryPackInt + default: + return nil + } +} + +// Unpacker returns the unpacker function that can be used to unpack this field. +func (f Field) Unpacker() func(*wrappers.Packer) interface{} { + switch f { + case VersionStr: + return wrappers.TryUnpackStr + case NetworkID: + return wrappers.TryUnpackInt + case MyTime: + return wrappers.TryUnpackLong + case Peers: + return wrappers.TryUnpackIPList + case ChainID: // TODO: This will be shortened to use a modified varint spec + return wrappers.TryUnpackHash + case RequestID: + return wrappers.TryUnpackInt + case ContainerID: + return wrappers.TryUnpackHash + case ContainerBytes: + return wrappers.TryUnpackBytes + case ContainerIDs: + return wrappers.TryUnpackHashes + case Bytes: + return wrappers.TryUnpackBytes + case TxID: + return wrappers.TryUnpackHash + case Tx: + return wrappers.TryUnpackBytes + case Status: + return wrappers.TryUnpackInt + default: + return nil + } +} + +func (f Field) String() string { + switch f { + case VersionStr: + return "VersionStr" + case NetworkID: + return "NetworkID" + case MyTime: + return "MyTime" + case Peers: + return "Peers" + case ChainID: + return "ChainID" + case ContainerID: + return "ContainerID" + case ContainerBytes: + return "Container Bytes" + case ContainerIDs: + return "Container IDs" + case Bytes: + return "Bytes" + case TxID: + return "TxID" + case Tx: + return "Tx" + case Status: + return "Status" + default: + return "Unknown Field" + } +} + +// Public commands that may be sent between stakers +const ( + // Handshake: + GetVersion salticidae.Opcode = iota + Version + GetPeerList + PeerList + // Bootstrapping: + GetAcceptedFrontier + AcceptedFrontier + GetAccepted + Accepted + // Consensus: + Get + Put + PushQuery + PullQuery + Chits + // Pinging: + Ping + Pong + // Arbitrary data message: + Data + // Throughput test: + IssueTx + DecidedTx +) + +// Defines the messages that can be sent/received with this network +var ( + Messages = map[salticidae.Opcode][]Field{ + // Handshake: + GetVersion: []Field{}, + Version: []Field{NetworkID, MyTime, VersionStr}, + GetPeerList: []Field{}, + PeerList: []Field{Peers}, + // Bootstrapping: + GetAcceptedFrontier: []Field{ChainID, RequestID}, + AcceptedFrontier: []Field{ChainID, RequestID, ContainerIDs}, + GetAccepted: []Field{ChainID, RequestID, ContainerIDs}, + Accepted: []Field{ChainID, RequestID, ContainerIDs}, + // Consensus: + Get: []Field{ChainID, RequestID, ContainerID}, + Put: []Field{ChainID, RequestID, ContainerID, ContainerBytes}, + PushQuery: []Field{ChainID, RequestID, ContainerID, ContainerBytes}, + PullQuery: []Field{ChainID, RequestID, ContainerID}, + Chits: []Field{ChainID, RequestID, ContainerIDs}, + // Pinging: + Ping: []Field{}, + Pong: []Field{}, + // Arbitrary data message: + Data: []Field{Bytes}, + // Throughput test: + IssueTx: []Field{ChainID, Tx}, + DecidedTx: []Field{TxID, Status}, + } +) diff --git a/networking/handshake_handlers.go b/networking/handshake_handlers.go new file mode 100644 index 0000000..83d1348 --- /dev/null +++ b/networking/handshake_handlers.go @@ -0,0 +1,575 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package networking + +// #include "salticidae/network.h" +// bool checkPeerCertificate(msgnetwork_conn_t *, bool, void *); +// void unknownPeerHandler(netaddr_t *, x509_t *, void *); +// void peerHandler(peernetwork_conn_t *, bool, void *); +// void ping(msg_t *, msgnetwork_conn_t *, void *); +// void pong(msg_t *, msgnetwork_conn_t *, void *); +// void getVersion(msg_t *, msgnetwork_conn_t *, void *); +// void version(msg_t *, msgnetwork_conn_t *, void *); +// void getPeerList(msg_t *, msgnetwork_conn_t *, void *); +// void peerList(msg_t *, msgnetwork_conn_t *, void *); +import "C" + +import ( + "errors" + "fmt" + "math" + "sync" + "time" + "unsafe" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/salticidae-go" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/networking" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/random" + "github.com/ava-labs/gecko/utils/timer" +) + +/* +Receive a new connection. + - Send version message. +Receive version message. + - Validate data + - Send peer list + - Mark this node as being connected +*/ + +/* +Periodically gossip peerlists. + - Only connected stakers should be gossiped. + - Gossip to a caped number of peers. + - The peers to gossip to should be at least half full of stakers (or all the + stakers should be in the set). +*/ + +const ( + // CurrentVersion this avalanche instance is executing. + CurrentVersion = "avalanche/0.0.1" + // MaxClockDifference allowed between connected nodes. + MaxClockDifference = time.Minute + // PeerListGossipSpacing is the amount of time to wait between pushing this + // node's peer list to other nodes. + PeerListGossipSpacing = time.Minute + // PeerListGossipSize is the number of peers to gossip each period. + PeerListGossipSize = 100 + // PeerListStakerGossipFraction calculates the fraction of stakers that are + // gossiped to. If set to 1, then only stakers will be gossiped to. + PeerListStakerGossipFraction = 2 + // GetVersionTimeout is the amount of time to wait before sending a + // getVersion message to a partially connected peer + GetVersionTimeout = 2 * time.Second +) + +// Manager is the struct that will be accessed on event calls +var ( + HandshakeNet = Handshake{} +) + +var ( + errDSValidators = errors.New("couldn't get validator set of default subnet") +) + +// Handshake handles the authentication of new peers. Only valid stakers +// will appear connected. +type Handshake struct { + handshakeMetrics + + networkID uint32 + + log logging.Logger + vdrs validators.Set + myAddr salticidae.NetAddr + myID ids.ShortID + net salticidae.PeerNetwork + enableStaking bool // Should only be false for local tests + + clock timer.Clock + pending AddrCert // Connections that I haven't gotten version messages from + connections AddrCert // Connections that I think are connected + + versionTimeout timer.TimeoutManager + peerListGossiper *timer.Repeater + + awaitingLock sync.Mutex + awaiting []*networking.AwaitingConnections +} + +// Initialize to the c networking library. This should only be done once during +// node setup. +func (nm *Handshake) Initialize( + log logging.Logger, + vdrs validators.Set, + myAddr salticidae.NetAddr, + myID ids.ShortID, + peerNet salticidae.PeerNetwork, + registerer prometheus.Registerer, + enableStaking bool, + networkID uint32, +) { + log.AssertTrue(nm.net == nil, "Should only register network handlers once") + nm.log = log + nm.vdrs = vdrs + nm.myAddr = myAddr + nm.myID = myID + nm.net = peerNet + nm.enableStaking = enableStaking + nm.networkID = networkID + + net := peerNet.AsMsgNetwork() + + net.RegConnHandler(salticidae.MsgNetworkConnCallback(C.checkPeerCertificate), nil) + peerNet.RegPeerHandler(salticidae.PeerNetworkPeerCallback(C.peerHandler), nil) + peerNet.RegUnknownPeerHandler(salticidae.PeerNetworkUnknownPeerCallback(C.unknownPeerHandler), nil) + net.RegHandler(Ping, salticidae.MsgNetworkMsgCallback(C.ping), nil) + net.RegHandler(Pong, salticidae.MsgNetworkMsgCallback(C.pong), nil) + net.RegHandler(GetVersion, salticidae.MsgNetworkMsgCallback(C.getVersion), nil) + net.RegHandler(Version, salticidae.MsgNetworkMsgCallback(C.version), nil) + net.RegHandler(GetPeerList, salticidae.MsgNetworkMsgCallback(C.getPeerList), nil) + net.RegHandler(PeerList, salticidae.MsgNetworkMsgCallback(C.peerList), nil) + + nm.handshakeMetrics.Initialize(nm.log, registerer) + + nm.versionTimeout.Initialize(GetVersionTimeout) + go nm.log.RecoverAndPanic(nm.versionTimeout.Dispatch) + nm.peerListGossiper = timer.NewRepeater(nm.gossipPeerList, PeerListGossipSpacing) + go nm.log.RecoverAndPanic(nm.peerListGossiper.Dispatch) +} + +// AwaitConnections ... +func (nm *Handshake) AwaitConnections(awaiting *networking.AwaitingConnections) { + nm.awaitingLock.Lock() + defer nm.awaitingLock.Unlock() + + awaiting.Add(nm.myID) + for _, cert := range nm.connections.IDs().List() { + awaiting.Add(cert) + } + if awaiting.Ready() { + go awaiting.Finish() + } else { + nm.awaiting = append(nm.awaiting, awaiting) + } +} + +func (nm *Handshake) gossipPeerList() { + stakers := []ids.ShortID{} + nonStakers := []ids.ShortID{} + for _, id := range nm.connections.IDs().List() { + if nm.vdrs.Contains(id) { + stakers = append(stakers, id) + } else { + nonStakers = append(nonStakers, id) + } + } + + numStakersToSend := (PeerListGossipSize + PeerListStakerGossipFraction - 1) / PeerListStakerGossipFraction + if len(stakers) < numStakersToSend { + numStakersToSend = len(stakers) + } + numNonStakersToSend := PeerListGossipSize - numStakersToSend + if len(nonStakers) < numNonStakersToSend { + numNonStakersToSend = len(nonStakers) + } + + idsToSend := []ids.ShortID{} + sampler := random.Uniform{N: len(stakers)} + for i := 0; i < numStakersToSend; i++ { + idsToSend = append(idsToSend, stakers[sampler.Sample()]) + } + sampler.N = len(nonStakers) + sampler.Replace() + for i := 0; i < numNonStakersToSend; i++ { + idsToSend = append(idsToSend, nonStakers[sampler.Sample()]) + } + + ips := []salticidae.NetAddr{} + for _, id := range idsToSend { + if ip, exists := nm.connections.GetIP(id); exists { + ips = append(ips, ip) + } + } + + nm.SendPeerList(ips...) +} + +// Connections returns the object that tracks the nodes that are currently +// connected to this node. +func (nm *Handshake) Connections() Connections { return &nm.connections } + +// Shutdown the network +func (nm *Handshake) Shutdown() { + nm.versionTimeout.Stop() + nm.peerListGossiper.Stop() +} + +// SendGetVersion to the requested peer +func (nm *Handshake) SendGetVersion(addr salticidae.NetAddr) { + build := Builder{} + gv, err := build.GetVersion() + nm.log.AssertNoError(err) + nm.send(gv, addr) + + nm.numGetVersionSent.Inc() +} + +// SendVersion to the requested peer +func (nm *Handshake) SendVersion(addr salticidae.NetAddr) error { + build := Builder{} + v, err := build.Version(nm.networkID, nm.clock.Unix(), CurrentVersion) + if err != nil { + return fmt.Errorf("packing Version failed due to %s", err) + } + nm.send(v, addr) + nm.numVersionSent.Inc() + return nil +} + +// SendPeerList to the requested peer +func (nm *Handshake) SendPeerList(addrs ...salticidae.NetAddr) error { + if len(addrs) == 0 { + return nil + } + + ips, ids := nm.connections.Conns() + ipsToSend := []utils.IPDesc(nil) + for i, id := range ids { + if nm.vdrs.Contains(id) { + ipsToSend = append(ipsToSend, ips[i]) + } + } + + if len(ipsToSend) == 0 { + nm.log.Debug("No IPs to send to %d peer(s)", len(addrs)) + return nil + } + + nm.log.Verbo("Sending %d ips to %d peer(s)", len(ipsToSend), len(addrs)) + + build := Builder{} + pl, err := build.PeerList(ipsToSend) + if err != nil { + return fmt.Errorf("Packing Peerlist failed due to %w", err) + } + nm.send(pl, addrs...) + nm.numPeerlistSent.Add(float64(len(addrs))) + return nil +} + +func (nm *Handshake) send(msg Msg, addrs ...salticidae.NetAddr) { + ds := msg.DataStream() + defer ds.Free() + ba := salticidae.NewByteArrayMovedFromDataStream(ds, false) + defer ba.Free() + cMsg := salticidae.NewMsgMovedFromByteArray(msg.Op(), ba, false) + defer cMsg.Free() + + switch len(addrs) { + case 0: + case 1: + nm.net.SendMsg(cMsg, addrs[0]) + default: + nm.net.MulticastMsgByMove(cMsg, addrs) + } +} + +// checkPeerCertificate of a new inbound connection +//export checkPeerCertificate +func checkPeerCertificate(_ *C.struct_msgnetwork_conn_t, connected C.bool, _ unsafe.Pointer) C.bool { + return connected +} + +// peerHandler notifies a change to the set of connected peers +// connected is true if a new peer is connected +// connected is false if a formerly connected peer has disconnected +//export peerHandler +func peerHandler(_conn *C.struct_peernetwork_conn_t, connected C.bool, _ unsafe.Pointer) { + pConn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn(_conn)) + addr := pConn.GetPeerAddr(true) + + ip := toIPDesc(addr) + if !connected { + if !HandshakeNet.enableStaking { + cert := toShortID(ip) + HandshakeNet.vdrs.Remove(cert) + } + + cert := ids.ShortID{} + if pendingCert, exists := HandshakeNet.pending.GetID(addr); exists { + cert = pendingCert + } else if connectedCert, exists := HandshakeNet.connections.GetID(addr); exists { + cert = connectedCert + } else { + return + } + + HandshakeNet.pending.RemoveIP(addr) + HandshakeNet.connections.RemoveIP(addr) + + HandshakeNet.numPeers.Set(float64(HandshakeNet.connections.Len())) + + HandshakeNet.log.Warn("Disconnected from %s", ip) + + HandshakeNet.awaitingLock.Lock() + defer HandshakeNet.awaitingLock.Unlock() + + for _, awaiting := range HandshakeNet.awaiting { + awaiting.Remove(cert) + } + + return + } + + HandshakeNet.log.Debug("Connected to %s", ip) + + // If we're enforcing staking, use a peer's certificate to uniquely identify them + // Otherwise, use a hash of their ip to identify them + cert := ids.ShortID{} + if HandshakeNet.enableStaking { + cert = getPeerCert(_conn) + } else { + cert = toShortID(ip) + } + HandshakeNet.pending.Add(addr, cert) + + certID := cert.LongID() + handler := new(func()) + *handler = func() { + if HandshakeNet.pending.ContainsIP(addr) { + HandshakeNet.SendGetVersion(addr) + HandshakeNet.versionTimeout.Put(certID, *handler) + } + } + (*handler)() +} + +// unknownPeerHandler notifies of an unknown peer connection attempt +//export unknownPeerHandler +func unknownPeerHandler(_addr *C.netaddr_t, _cert *C.x509_t, _ unsafe.Pointer) { + addr := salticidae.NetAddrFromC(salticidae.CNetAddr(_addr)) + ip := toIPDesc(addr) + HandshakeNet.log.Info("Adding peer %s", ip) + HandshakeNet.net.AddPeer(addr) +} + +// ping handles the recept of a ping message +//export ping +func ping(_ *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + conn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn(_conn)) + addr := conn.GetPeerAddr(false) + defer addr.Free() + if addr.IsNull() { + HandshakeNet.log.Warn("Ping sent from unknown peer") + return + } + + build := Builder{} + pong, err := build.Pong() + HandshakeNet.log.AssertNoError(err) + + HandshakeNet.send(pong, addr) +} + +// pong handles the recept of a pong message +//export pong +func pong(*C.struct_msg_t, *C.struct_msgnetwork_conn_t, unsafe.Pointer) {} + +// getVersion handles the recept of a getVersion message +//export getVersion +func getVersion(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + HandshakeNet.numGetVersionReceived.Inc() + + conn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn(_conn)) + addr := conn.GetPeerAddr(false) + defer addr.Free() + + if addr.IsNull() { + HandshakeNet.log.Warn("GetVersion sent from unknown peer") + return + } + + HandshakeNet.SendVersion(addr) +} + +// version handles the recept of a version message +//export version +func version(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + HandshakeNet.numVersionReceived.Inc() + + msg := salticidae.MsgFromC(salticidae.CMsg(_msg)) + conn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn(_conn)) + addr := conn.GetPeerAddr(true) + if addr.IsNull() { + HandshakeNet.log.Warn("Version sent from unknown peer") + return + } + + cert := ids.ShortID{} + if HandshakeNet.enableStaking { + cert = getMsgCert(_conn) + } else { + ip := toIPDesc(addr) + cert = toShortID(ip) + } + + defer HandshakeNet.pending.Remove(addr, cert) + + build := Builder{} + pMsg, err := build.Parse(Version, msg.GetPayloadByMove()) + if err != nil { + HandshakeNet.log.Warn("Failed to parse Version message") + + HandshakeNet.net.DelPeer(addr) + return + } + + if networkID := pMsg.Get(NetworkID).(uint32); networkID != HandshakeNet.networkID { + HandshakeNet.log.Warn("Peer's network ID doesn't match our networkID: Peer's = %d ; Ours = %d", networkID, HandshakeNet.networkID) + + HandshakeNet.net.DelPeer(addr) + return + } + + myTime := float64(HandshakeNet.clock.Unix()) + if peerTime := float64(pMsg.Get(MyTime).(uint64)); math.Abs(peerTime-myTime) > MaxClockDifference.Seconds() { + HandshakeNet.log.Warn("Peer's clock is too far out of sync with mine. His = %d, Mine = %d (seconds)", uint64(peerTime), uint64(myTime)) + + HandshakeNet.net.DelPeer(addr) + return + } + + if peerVersion := pMsg.Get(VersionStr).(string); !checkCompatibility(CurrentVersion, peerVersion) { + HandshakeNet.log.Warn("Bad version") + + HandshakeNet.net.DelPeer(addr) + return + } + + HandshakeNet.log.Debug("Finishing handshake with %s", toIPDesc(addr)) + + HandshakeNet.SendPeerList(addr) + HandshakeNet.connections.Add(addr, cert) + + HandshakeNet.versionTimeout.Remove(cert.LongID()) + + if !HandshakeNet.enableStaking { + HandshakeNet.vdrs.Add(validators.NewValidator(cert, 1)) + } + + HandshakeNet.numPeers.Set(float64(HandshakeNet.connections.Len())) + + HandshakeNet.awaitingLock.Lock() + defer HandshakeNet.awaitingLock.Unlock() + + for i := 0; i < len(HandshakeNet.awaiting); i++ { + awaiting := HandshakeNet.awaiting[i] + awaiting.Add(cert) + if !awaiting.Ready() { + continue + } + + newLen := len(HandshakeNet.awaiting) - 1 + HandshakeNet.awaiting[i] = HandshakeNet.awaiting[newLen] + HandshakeNet.awaiting = HandshakeNet.awaiting[:newLen] + + i-- + + go awaiting.Finish() + } +} + +// getPeerList handles the recept of a getPeerList message +//export getPeerList +func getPeerList(_ *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + HandshakeNet.numGetPeerlistReceived.Inc() + + conn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn(_conn)) + addr := conn.GetPeerAddr(false) + defer addr.Free() + if addr.IsNull() { + HandshakeNet.log.Warn("GetPeerList sent from unknown peer") + return + } + HandshakeNet.SendPeerList(addr) +} + +// peerList handles the recept of a peerList message +//export peerList +func peerList(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + HandshakeNet.numPeerlistReceived.Inc() + + msg := salticidae.MsgFromC(salticidae.CMsg(_msg)) + build := Builder{} + pMsg, err := build.Parse(PeerList, msg.GetPayloadByMove()) + if err != nil { + HandshakeNet.log.Warn("Failed to parse PeerList message due to %s", err) + // TODO: What should we do here? + return + } + + ips := pMsg.Get(Peers).([]utils.IPDesc) + cErr := salticidae.NewError() + for _, ip := range ips { + HandshakeNet.log.Verbo("Trying to adding peer %s", ip) + addr := salticidae.NewNetAddrFromIPPortString(ip.String(), false, &cErr) + if cErr.GetCode() == 0 && !HandshakeNet.myAddr.IsEq(addr) { // Make sure not to connect to myself + ip := toIPDesc(addr) + + if !HandshakeNet.pending.ContainsIP(addr) && !HandshakeNet.connections.ContainsIP(addr) { + HandshakeNet.log.Debug("Adding peer %s", ip) + HandshakeNet.net.AddPeer(addr) + } + } + addr.Free() + } +} + +func getMsgCert(_conn *C.struct_msgnetwork_conn_t) ids.ShortID { + conn := salticidae.MsgNetworkConnFromC(salticidae.CMsgNetworkConn(_conn)) + return getCert(conn.GetPeerCert()) +} + +func getPeerCert(_conn *C.struct_peernetwork_conn_t) ids.ShortID { + conn := salticidae.MsgNetworkConnFromC(salticidae.CMsgNetworkConn(_conn)) + return getCert(conn.GetPeerCert()) +} + +func getCert(cert salticidae.X509) ids.ShortID { + der := cert.GetDer(false) + defer der.Free() + + certDS := salticidae.NewDataStreamMovedFromByteArray(der, false) + defer certDS.Free() + + certBytes := certDS.GetDataInPlace(certDS.Size()).Get() + certID, err := ids.ToShortID(hashing.PubkeyBytesToAddress(certBytes)) + HandshakeNet.log.AssertNoError(err) + return certID +} + +// checkCompatibility Check to make sure that the peer and I speak the same language. +func checkCompatibility(myVersion string, peerVersion string) bool { + // At the moment, we are all compatible. + return true +} + +func toAddr(ip utils.IPDesc, autoFree bool) salticidae.NetAddr { + err := salticidae.NewError() + addr := salticidae.NewNetAddrFromIPPortString(ip.String(), autoFree, &err) + HandshakeNet.log.AssertTrue(err.GetCode() == 0, "IP Failed parsing") + return addr +} +func toShortID(ip utils.IPDesc) ids.ShortID { + return ids.NewShortID(hashing.ComputeHash160Array([]byte(ip.String()))) +} diff --git a/networking/handshake_metrics.go b/networking/handshake_metrics.go new file mode 100644 index 0000000..485359c --- /dev/null +++ b/networking/handshake_metrics.go @@ -0,0 +1,104 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package networking + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/utils/logging" +) + +type handshakeMetrics struct { + numPeers prometheus.Gauge + + numGetVersionSent, numGetVersionReceived, + numVersionSent, numVersionReceived, + numGetPeerlistSent, numGetPeerlistReceived, + numPeerlistSent, numPeerlistReceived prometheus.Counter +} + +func (hm *handshakeMetrics) Initialize(log logging.Logger, registerer prometheus.Registerer) { + hm.numPeers = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: "gecko", + Name: "peers", + Help: "Number of network peers", + }) + hm.numGetVersionSent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "get_version_sent", + Help: "Number of get_version messages sent", + }) + hm.numGetVersionReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "get_version_received", + Help: "Number of get_version messages received", + }) + hm.numVersionSent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "version_sent", + Help: "Number of version messages sent", + }) + hm.numVersionReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "version_received", + Help: "Number of version messages received", + }) + hm.numGetPeerlistSent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "get_peerlist_sent", + Help: "Number of get_peerlist messages sent", + }) + hm.numGetPeerlistReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "get_peerlist_received", + Help: "Number of get_peerlist messages received", + }) + hm.numPeerlistSent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "peerlist_sent", + Help: "Number of peerlist messages sent", + }) + hm.numPeerlistReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "peerlist_received", + Help: "Number of peerlist messages received", + }) + + if err := registerer.Register(hm.numPeers); err != nil { + log.Error("Failed to register peers statistics due to %s", err) + } + if err := registerer.Register(hm.numGetVersionSent); err != nil { + log.Error("Failed to register get_version_sent statistics due to %s", err) + } + if err := registerer.Register(hm.numGetVersionReceived); err != nil { + log.Error("Failed to register get_version_received statistics due to %s", err) + } + if err := registerer.Register(hm.numVersionSent); err != nil { + log.Error("Failed to register version_sent statistics due to %s", err) + } + if err := registerer.Register(hm.numVersionReceived); err != nil { + log.Error("Failed to register version_received statistics due to %s", err) + } + if err := registerer.Register(hm.numGetPeerlistSent); err != nil { + log.Error("Failed to register get_peerlist_sent statistics due to %s", err) + } + if err := registerer.Register(hm.numGetPeerlistReceived); err != nil { + log.Error("Failed to register get_peerlist_received statistics due to %s", err) + } + if err := registerer.Register(hm.numPeerlistSent); err != nil { + log.Error("Failed to register peerlist_sent statistics due to %s", err) + } + if err := registerer.Register(hm.numPeerlistReceived); err != nil { + log.Error("Failed to register peerlist_received statistics due to %s", err) + } +} diff --git a/networking/msg.go b/networking/msg.go new file mode 100644 index 0000000..9ea49b2 --- /dev/null +++ b/networking/msg.go @@ -0,0 +1,30 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package networking + +import ( + "github.com/ava-labs/salticidae-go" +) + +// Msg represents a set of fields that can be serialized into a byte stream +type Msg interface { + Op() salticidae.Opcode + Get(Field) interface{} + DataStream() salticidae.DataStream +} + +type msg struct { + op salticidae.Opcode + ds salticidae.DataStream + fields map[Field]interface{} +} + +// Field returns the value of the specified field in this message +func (msg *msg) Op() salticidae.Opcode { return msg.op } + +// Field returns the value of the specified field in this message +func (msg *msg) Get(field Field) interface{} { return msg.fields[field] } + +// Bytes returns this message in bytes +func (msg *msg) DataStream() salticidae.DataStream { return msg.ds } diff --git a/networking/voting_handlers.go b/networking/voting_handlers.go new file mode 100644 index 0000000..ecc7402 --- /dev/null +++ b/networking/voting_handlers.go @@ -0,0 +1,648 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package networking + +// #include "salticidae/network.h" +// void getAcceptedFrontier(msg_t *, msgnetwork_conn_t *, void *); +// void acceptedFrontier(msg_t *, msgnetwork_conn_t *, void *); +// void getAccepted(msg_t *, msgnetwork_conn_t *, void *); +// void accepted(msg_t *, msgnetwork_conn_t *, void *); +// void get(msg_t *, msgnetwork_conn_t *, void *); +// void put(msg_t *, msgnetwork_conn_t *, void *); +// void pushQuery(msg_t *, msgnetwork_conn_t *, void *); +// void pullQuery(msg_t *, msgnetwork_conn_t *, void *); +// void chits(msg_t *, msgnetwork_conn_t *, void *); +import "C" + +import ( + "errors" + "fmt" + "unsafe" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/salticidae-go" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/networking/router" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/timer" +) + +var ( + // VotingNet implements the SenderExternal interface. + VotingNet = Voting{} +) + +var ( + errConnectionDropped = errors.New("connection dropped before receiving message") +) + +// Voting implements the SenderExternal interface with a c++ library. +type Voting struct { + votingMetrics + + log logging.Logger + vdrs validators.Set + net salticidae.PeerNetwork + conns Connections + + router router.Router + executor timer.Executor +} + +// Initialize to the c networking library. Should only be called once ever. +func (s *Voting) Initialize(log logging.Logger, vdrs validators.Set, peerNet salticidae.PeerNetwork, conns Connections, router router.Router, registerer prometheus.Registerer) { + log.AssertTrue(s.net == nil, "Should only register network handlers once") + log.AssertTrue(s.conns == nil, "Should only set connections once") + log.AssertTrue(s.router == nil, "Should only set the router once") + + s.log = log + s.vdrs = vdrs + s.net = peerNet + s.conns = conns + s.router = router + + s.votingMetrics.Initialize(log, registerer) + + net := peerNet.AsMsgNetwork() + + net.RegHandler(GetAcceptedFrontier, salticidae.MsgNetworkMsgCallback(C.getAcceptedFrontier), nil) + net.RegHandler(AcceptedFrontier, salticidae.MsgNetworkMsgCallback(C.acceptedFrontier), nil) + net.RegHandler(GetAccepted, salticidae.MsgNetworkMsgCallback(C.getAccepted), nil) + net.RegHandler(Accepted, salticidae.MsgNetworkMsgCallback(C.accepted), nil) + net.RegHandler(Get, salticidae.MsgNetworkMsgCallback(C.get), nil) + net.RegHandler(Put, salticidae.MsgNetworkMsgCallback(C.put), nil) + net.RegHandler(PushQuery, salticidae.MsgNetworkMsgCallback(C.pushQuery), nil) + net.RegHandler(PullQuery, salticidae.MsgNetworkMsgCallback(C.pullQuery), nil) + net.RegHandler(Chits, salticidae.MsgNetworkMsgCallback(C.chits), nil) + + s.executor.Initialize() + go log.RecoverAndPanic(s.executor.Dispatch) +} + +// Shutdown threads +func (s *Voting) Shutdown() { s.executor.Stop() } + +// Accept is called after every consensus decision +func (s *Voting) Accept(chainID, containerID ids.ID, container []byte) error { + addrs := []salticidae.NetAddr(nil) + + allAddrs, allIDs := s.conns.RawConns() + for i, id := range allIDs { + if !s.vdrs.Contains(id) { + addrs = append(addrs, allAddrs[i]) + } + } + + build := Builder{} + msg, err := build.Put(chainID, 0, containerID, container) + if err != nil { + return fmt.Errorf("Attempted to pack too large of a Put message.\nContainer length: %d: %w", len(container), err) + } + + s.log.Verbo("Sending a Put message to non-validators."+ + "\nNumber of Non-Validators: %d"+ + "\nChain: %s"+ + "\nContainer ID: %s"+ + "\nContainer:\n%s", + len(addrs), + chainID, + containerID, + formatting.DumpBytes{Bytes: container}, + ) + s.send(msg, addrs...) + s.numPutSent.Add(float64(len(addrs))) + return nil +} + +// GetAcceptedFrontier implements the Sender interface. +func (s *Voting) GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32) { + addrs := []salticidae.NetAddr(nil) + validatorIDList := validatorIDs.List() + for _, validatorID := range validatorIDList { + vID := validatorID + if addr, exists := s.conns.GetIP(vID); exists { + addrs = append(addrs, addr) + s.log.Verbo("Sending a GetAcceptedFrontier to %s", toIPDesc(addr)) + } else { + s.log.Debug("Attempted to send a GetAcceptedFrontier message to a disconnected validator: %s", vID) + s.executor.Add(func() { s.router.GetAcceptedFrontierFailed(vID, chainID, requestID) }) + } + } + + build := Builder{} + msg, err := build.GetAcceptedFrontier(chainID, requestID) + s.log.AssertNoError(err) + + s.log.Verbo("Sending a GetAcceptedFrontier message."+ + "\nNumber of Validators: %d"+ + "\nChain: %s"+ + "\nRequest ID: %d", + len(addrs), + chainID, + requestID, + ) + s.send(msg, addrs...) + s.numGetAcceptedFrontierSent.Add(float64(len(addrs))) +} + +// AcceptedFrontier implements the Sender interface. +func (s *Voting) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) { + addr, exists := s.conns.GetIP(validatorID) + if !exists { + s.log.Debug("Attempted to send an AcceptedFrontier message to a disconnected validator: %s", validatorID) + return // Validator is not connected + } + + build := Builder{} + msg, err := build.AcceptedFrontier(chainID, requestID, containerIDs) + if err != nil { + s.log.Error("Attempted to pack too large of an AcceptedFrontier message.\nNumber of containerIDs: %d", containerIDs.Len()) + return // Packing message failed + } + + s.log.Verbo("Sending an AcceptedFrontier message."+ + "\nValidator: %s"+ + "\nDestination: %s"+ + "\nChain: %s"+ + "\nRequest ID: %d"+ + "\nContainer IDs: %s", + validatorID, + toIPDesc(addr), + chainID, + requestID, + containerIDs, + ) + s.send(msg, addr) + s.numAcceptedFrontierSent.Inc() +} + +// GetAccepted implements the Sender interface. +func (s *Voting) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set) { + addrs := []salticidae.NetAddr(nil) + validatorIDList := validatorIDs.List() + for _, validatorID := range validatorIDList { + vID := validatorID + if addr, exists := s.conns.GetIP(validatorID); exists { + addrs = append(addrs, addr) + s.log.Verbo("Sending a GetAccepted to %s", toIPDesc(addr)) + } else { + s.log.Debug("Attempted to send a GetAccepted message to a disconnected validator: %s", vID) + s.executor.Add(func() { s.router.GetAcceptedFailed(vID, chainID, requestID) }) + } + } + + build := Builder{} + msg, err := build.GetAccepted(chainID, requestID, containerIDs) + if err != nil { + for _, addr := range addrs { + if validatorID, exists := s.conns.GetID(addr); exists { + s.executor.Add(func() { s.router.GetAcceptedFailed(validatorID, chainID, requestID) }) + } + } + s.log.Debug("Attempted to pack too large of a GetAccepted message.\nNumber of containerIDs: %d", containerIDs.Len()) + return // Packing message failed + } + + s.log.Verbo("Sending a GetAccepted message."+ + "\nNumber of Validators: %d"+ + "\nChain: %s"+ + "\nRequest ID: %d"+ + "\nContainer IDs:%s", + len(addrs), + chainID, + requestID, + containerIDs, + ) + s.send(msg, addrs...) + s.numGetAcceptedSent.Add(float64(len(addrs))) +} + +// Accepted implements the Sender interface. +func (s *Voting) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) { + addr, exists := s.conns.GetIP(validatorID) + if !exists { + s.log.Debug("Attempted to send an Accepted message to a disconnected validator: %s", validatorID) + return // Validator is not connected + } + + build := Builder{} + msg, err := build.Accepted(chainID, requestID, containerIDs) + if err != nil { + s.log.Error("Attempted to pack too large of an Accepted message.\nNumber of containerIDs: %d", containerIDs.Len()) + return // Packing message failed + } + + s.log.Verbo("Sending an Accepted message."+ + "\nValidator: %s"+ + "\nDestination: %s"+ + "\nChain: %s"+ + "\nRequest ID: %d"+ + "\nContainer IDs: %s", + validatorID, + toIPDesc(addr), + chainID, + requestID, + containerIDs, + ) + s.send(msg, addr) + s.numAcceptedSent.Inc() +} + +// Get implements the Sender interface. +func (s *Voting) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) { + addr, exists := s.conns.GetIP(validatorID) + if !exists { + s.log.Debug("Attempted to send a Get message to a disconnected validator: %s", validatorID) + s.executor.Add(func() { s.router.GetFailed(validatorID, chainID, requestID, containerID) }) + return // Validator is not connected + } + + build := Builder{} + msg, err := build.Get(chainID, requestID, containerID) + s.log.AssertNoError(err) + + s.log.Verbo("Sending a Get message."+ + "\nValidator: %s"+ + "\nDestination: %s"+ + "\nChain: %s"+ + "\nRequest ID: %d"+ + "\nContainer ID: %s", + validatorID, + toIPDesc(addr), + chainID, + requestID, + containerID, + ) + s.send(msg, addr) + s.numGetSent.Inc() +} + +// Put implements the Sender interface. +func (s *Voting) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) { + addr, exists := s.conns.GetIP(validatorID) + if !exists { + s.log.Debug("Attempted to send a Container message to a disconnected validator: %s", validatorID) + return // Validator is not connected + } + + build := Builder{} + msg, err := build.Put(chainID, requestID, containerID, container) + if err != nil { + s.log.Error("Attempted to pack too large of a Put message.\nContainer length: %d", len(container)) + return // Packing message failed + } + + s.log.Verbo("Sending a Container message."+ + "\nValidator: %s"+ + "\nDestination: %s"+ + "\nChain: %s"+ + "\nRequest ID: %d"+ + "\nContainer ID: %s"+ + "\nContainer:\n%s", + validatorID, + toIPDesc(addr), + chainID, + requestID, + containerID, + formatting.DumpBytes{Bytes: container}, + ) + s.send(msg, addr) + s.numPutSent.Inc() +} + +// PushQuery implements the Sender interface. +func (s *Voting) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) { + addrs := []salticidae.NetAddr(nil) + validatorIDList := validatorIDs.List() + for _, validatorID := range validatorIDList { + vID := validatorID + if addr, exists := s.conns.GetIP(vID); exists { + addrs = append(addrs, addr) + s.log.Verbo("Sending a PushQuery to %s", toIPDesc(addr)) + } else { + s.log.Debug("Attempted to send a PushQuery message to a disconnected validator: %s", vID) + s.executor.Add(func() { s.router.QueryFailed(vID, chainID, requestID) }) + } + } + + build := Builder{} + msg, err := build.PushQuery(chainID, requestID, containerID, container) + if err != nil { + for _, addr := range addrs { + if validatorID, exists := s.conns.GetID(addr); exists { + s.executor.Add(func() { s.router.QueryFailed(validatorID, chainID, requestID) }) + } + } + s.log.Error("Attempted to pack too large of a PushQuery message.\nContainer length: %d", len(container)) + return // Packing message failed + } + + s.log.Verbo("Sending a PushQuery message."+ + "\nNumber of Validators: %d"+ + "\nChain: %s"+ + "\nRequest ID: %d"+ + "\nContainer ID: %s"+ + "\nContainer:\n%s", + len(addrs), + chainID, + requestID, + containerID, + formatting.DumpBytes{Bytes: container}, + ) + s.send(msg, addrs...) + s.numPushQuerySent.Add(float64(len(addrs))) +} + +// PullQuery implements the Sender interface. +func (s *Voting) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID) { + addrs := []salticidae.NetAddr(nil) + validatorIDList := validatorIDs.List() + for _, validatorID := range validatorIDList { + vID := validatorID + if addr, exists := s.conns.GetIP(vID); exists { + addrs = append(addrs, addr) + s.log.Verbo("Sending a PushQuery to %s", toIPDesc(addr)) + } else { + s.log.Warn("Attempted to send a PushQuery message to a disconnected validator: %s", vID) + s.executor.Add(func() { s.router.QueryFailed(vID, chainID, requestID) }) + } + } + + build := Builder{} + msg, err := build.PullQuery(chainID, requestID, containerID) + s.log.AssertNoError(err) + + s.log.Verbo("Sending a PullQuery message."+ + "\nNumber of Validators: %d"+ + "\nChain: %s"+ + "\nRequest ID: %d"+ + "\nContainer ID: %s", + len(addrs), + chainID, + requestID, + containerID, + ) + s.send(msg, addrs...) + s.numPullQuerySent.Add(float64(len(addrs))) +} + +// Chits implements the Sender interface. +func (s *Voting) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) { + addr, exists := s.conns.GetIP(validatorID) + if !exists { + s.log.Debug("Attempted to send a Chits message to a disconnected validator: %s", validatorID) + return // Validator is not connected + } + + build := Builder{} + msg, err := build.Chits(chainID, requestID, votes) + if err != nil { + s.log.Error("Attempted to pack too large of a Chits message.\nChits length: %d", votes.Len()) + return // Packing message failed + } + + s.log.Verbo("Sending a Chits message."+ + "\nValidator: %s"+ + "\nDestination: %s"+ + "\nChain: %s"+ + "\nRequest ID: %d"+ + "\nNumber of Chits: %d", + validatorID, + toIPDesc(addr), + chainID, + requestID, + votes.Len(), + ) + s.send(msg, addr) + s.numChitsSent.Inc() +} + +func (s *Voting) send(msg Msg, addrs ...salticidae.NetAddr) { + ds := msg.DataStream() + defer ds.Free() + ba := salticidae.NewByteArrayMovedFromDataStream(ds, false) + defer ba.Free() + cMsg := salticidae.NewMsgMovedFromByteArray(msg.Op(), ba, false) + defer cMsg.Free() + + switch len(addrs) { + case 0: + case 1: + s.net.SendMsg(cMsg, addrs[0]) + default: + s.net.MulticastMsgByMove(cMsg, addrs) + } +} + +// getAcceptedFrontier handles the recept of a getAcceptedFrontier container +// message for a chain +//export getAcceptedFrontier +func getAcceptedFrontier(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + VotingNet.numGetAcceptedFrontierReceived.Inc() + + validatorID, chainID, requestID, _, err := VotingNet.sanitize(_msg, _conn, GetAcceptedFrontier) + if err != nil { + VotingNet.log.Error("Failed to sanitize message due to: %s", err) + return + } + + VotingNet.router.GetAcceptedFrontier(validatorID, chainID, requestID) +} + +// acceptedFrontier handles the recept of an acceptedFrontier message +//export acceptedFrontier +func acceptedFrontier(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + VotingNet.numAcceptedFrontierReceived.Inc() + + validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, AcceptedFrontier) + if err != nil { + VotingNet.log.Error("Failed to sanitize message due to: %s", err) + return + } + + containerIDs := ids.Set{} + for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) { + containerID, err := ids.ToID(containerIDBytes) + if err != nil { + VotingNet.log.Warn("Error parsing ContainerID: %v", containerIDBytes) + return + } + containerIDs.Add(containerID) + } + + VotingNet.router.AcceptedFrontier(validatorID, chainID, requestID, containerIDs) +} + +// getAccepted handles the recept of a getAccepted message +//export getAccepted +func getAccepted(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + VotingNet.numGetAcceptedReceived.Inc() + + validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, GetAccepted) + if err != nil { + VotingNet.log.Error("Failed to sanitize message due to: %s", err) + return + } + + containerIDs := ids.Set{} + for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) { + containerID, err := ids.ToID(containerIDBytes) + if err != nil { + VotingNet.log.Warn("Error parsing ContainerID: %v", containerIDBytes) + return + } + containerIDs.Add(containerID) + } + + VotingNet.router.GetAccepted(validatorID, chainID, requestID, containerIDs) +} + +// accepted handles the recept of an accepted message +//export accepted +func accepted(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + VotingNet.numAcceptedReceived.Inc() + + validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, Accepted) + if err != nil { + VotingNet.log.Error("Failed to sanitize message due to: %s", err) + return + } + + containerIDs := ids.Set{} + for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) { + containerID, err := ids.ToID(containerIDBytes) + if err != nil { + VotingNet.log.Warn("Error parsing ContainerID: %v", containerIDBytes) + return + } + containerIDs.Add(containerID) + } + + VotingNet.router.Accepted(validatorID, chainID, requestID, containerIDs) +} + +// get handles the recept of a get container message for a chain +//export get +func get(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + VotingNet.numGetReceived.Inc() + + validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, Get) + if err != nil { + VotingNet.log.Error("Failed to sanitize message due to: %s", err) + return + } + + containerID, _ := ids.ToID(msg.Get(ContainerID).([]byte)) + + VotingNet.router.Get(validatorID, chainID, requestID, containerID) +} + +// put handles the receipt of a container message +//export put +func put(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + VotingNet.numPutReceived.Inc() + + validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, Put) + if err != nil { + VotingNet.log.Error("Failed to sanitize message due to: %s", err) + return + } + + containerID, _ := ids.ToID(msg.Get(ContainerID).([]byte)) + + containerBytes := msg.Get(ContainerBytes).([]byte) + + VotingNet.router.Put(validatorID, chainID, requestID, containerID, containerBytes) +} + +// pushQuery handles the recept of a pull query message +//export pushQuery +func pushQuery(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + VotingNet.numPushQueryReceived.Inc() + + validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, PushQuery) + if err != nil { + VotingNet.log.Error("Failed to sanitize message due to: %s", err) + return + } + + containerID, _ := ids.ToID(msg.Get(ContainerID).([]byte)) + + containerBytes := msg.Get(ContainerBytes).([]byte) + + VotingNet.router.PushQuery(validatorID, chainID, requestID, containerID, containerBytes) +} + +// pullQuery handles the recept of a query message +//export pullQuery +func pullQuery(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + VotingNet.numPullQueryReceived.Inc() + + validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, PullQuery) + if err != nil { + VotingNet.log.Error("Failed to sanitize message due to: %s", err) + return + } + + containerID, _ := ids.ToID(msg.Get(ContainerID).([]byte)) + + VotingNet.router.PullQuery(validatorID, chainID, requestID, containerID) +} + +// chits handles the recept of a chits message +//export chits +func chits(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + VotingNet.numChitsReceived.Inc() + + validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, Chits) + if err != nil { + VotingNet.log.Error("Failed to sanitize message due to: %s", err) + return + } + + votes := ids.Set{} + for _, voteBytes := range msg.Get(ContainerIDs).([][]byte) { + vote, err := ids.ToID(voteBytes) + if err != nil { + VotingNet.log.Warn("Error parsing chit: %v", voteBytes) + return + } + votes.Add(vote) + } + + VotingNet.router.Chits(validatorID, chainID, requestID, votes) +} + +func (s *Voting) sanitize(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, op salticidae.Opcode) (ids.ShortID, ids.ID, uint32, Msg, error) { + conn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn((*C.peernetwork_conn_t)(_conn))) + addr := conn.GetPeerAddr(false) + defer addr.Free() + if addr.IsNull() { + return ids.ShortID{}, ids.ID{}, 0, nil, errConnectionDropped + } + s.log.Verbo("Receiving message from %s", toIPDesc(addr)) + + validatorID, exists := s.conns.GetID(addr) + if !exists { + return ids.ShortID{}, ids.ID{}, 0, nil, fmt.Errorf("message received from an un-registered source: %s", toIPDesc(addr)) + } + + msg := salticidae.MsgFromC(salticidae.CMsg(_msg)) + codec := Codec{} + pMsg, err := codec.Parse(op, msg.GetPayloadByMove()) + if err != nil { + return ids.ShortID{}, ids.ID{}, 0, nil, err // The message couldn't be parsed + } + + chainID, err := ids.ToID(pMsg.Get(ChainID).([]byte)) + s.log.AssertNoError(err) + + requestID := pMsg.Get(RequestID).(uint32) + + return validatorID, chainID, requestID, pMsg, nil +} diff --git a/networking/voting_metrics.go b/networking/voting_metrics.go new file mode 100644 index 0000000..54e54fa --- /dev/null +++ b/networking/voting_metrics.go @@ -0,0 +1,188 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package networking + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/utils/logging" +) + +type votingMetrics struct { + numGetAcceptedFrontierSent, numGetAcceptedFrontierReceived, + numAcceptedFrontierSent, numAcceptedFrontierReceived, + numGetAcceptedSent, numGetAcceptedReceived, + numAcceptedSent, numAcceptedReceived, + numGetSent, numGetReceived, + numPutSent, numPutReceived, + numPushQuerySent, numPushQueryReceived, + numPullQuerySent, numPullQueryReceived, + numChitsSent, numChitsReceived prometheus.Counter +} + +func (vm *votingMetrics) Initialize(log logging.Logger, registerer prometheus.Registerer) { + vm.numGetAcceptedFrontierSent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "get_accepted_frontier_sent", + Help: "Number of get accepted frontier messages sent", + }) + vm.numGetAcceptedFrontierReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "get_accepted_frontier_received", + Help: "Number of get accepted frontier messages received", + }) + vm.numAcceptedFrontierSent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "accepted_frontier_sent", + Help: "Number of accepted frontier messages sent", + }) + vm.numAcceptedFrontierReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "accepted_frontier_received", + Help: "Number of accepted frontier messages received", + }) + vm.numGetAcceptedSent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "get_accepted_sent", + Help: "Number of get accepted messages sent", + }) + vm.numGetAcceptedReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "get_accepted_received", + Help: "Number of get accepted messages received", + }) + vm.numAcceptedSent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "accepted_sent", + Help: "Number of accepted messages sent", + }) + vm.numAcceptedReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "accepted_received", + Help: "Number of accepted messages received", + }) + vm.numGetSent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "get_sent", + Help: "Number of get messages sent", + }) + vm.numGetReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "get_received", + Help: "Number of get messages received", + }) + vm.numPutSent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "put_sent", + Help: "Number of put messages sent", + }) + vm.numPutReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "put_received", + Help: "Number of put messages received", + }) + vm.numPushQuerySent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "push_query_sent", + Help: "Number of push query messages sent", + }) + vm.numPushQueryReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "push_query_received", + Help: "Number of push query messages received", + }) + vm.numPullQuerySent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "pull_query_sent", + Help: "Number of pull query messages sent", + }) + vm.numPullQueryReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "pull_query_received", + Help: "Number of pull query messages received", + }) + vm.numChitsSent = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "chits_sent", + Help: "Number of chits messages sent", + }) + vm.numChitsReceived = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "gecko", + Name: "chits_received", + Help: "Number of chits messages received", + }) + + if err := registerer.Register(vm.numGetAcceptedFrontierSent); err != nil { + log.Error("Failed to register get_accepted_frontier_sent statistics due to %s", err) + } + if err := registerer.Register(vm.numGetAcceptedFrontierReceived); err != nil { + log.Error("Failed to register get_accepted_frontier_received statistics due to %s", err) + } + if err := registerer.Register(vm.numAcceptedFrontierSent); err != nil { + log.Error("Failed to register accepted_frontier_sent statistics due to %s", err) + } + if err := registerer.Register(vm.numAcceptedFrontierReceived); err != nil { + log.Error("Failed to register accepted_frontier_received statistics due to %s", err) + } + if err := registerer.Register(vm.numGetAcceptedSent); err != nil { + log.Error("Failed to register get_accepted_sent statistics due to %s", err) + } + if err := registerer.Register(vm.numGetAcceptedReceived); err != nil { + log.Error("Failed to register get_accepted_received statistics due to %s", err) + } + if err := registerer.Register(vm.numAcceptedSent); err != nil { + log.Error("Failed to register accepted_sent statistics due to %s", err) + } + if err := registerer.Register(vm.numAcceptedReceived); err != nil { + log.Error("Failed to register accepted_received statistics due to %s", err) + } + if err := registerer.Register(vm.numGetSent); err != nil { + log.Error("Failed to register get_sent statistics due to %s", err) + } + if err := registerer.Register(vm.numGetReceived); err != nil { + log.Error("Failed to register get_received statistics due to %s", err) + } + if err := registerer.Register(vm.numPutSent); err != nil { + log.Error("Failed to register put_sent statistics due to %s", err) + } + if err := registerer.Register(vm.numPutReceived); err != nil { + log.Error("Failed to register put_received statistics due to %s", err) + } + if err := registerer.Register(vm.numPushQuerySent); err != nil { + log.Error("Failed to register push_query_sent statistics due to %s", err) + } + if err := registerer.Register(vm.numPushQueryReceived); err != nil { + log.Error("Failed to register push_query_received statistics due to %s", err) + } + if err := registerer.Register(vm.numPullQuerySent); err != nil { + log.Error("Failed to register pull_query_sent statistics due to %s", err) + } + if err := registerer.Register(vm.numPullQueryReceived); err != nil { + log.Error("Failed to register pull_query_received statistics due to %s", err) + } + if err := registerer.Register(vm.numChitsSent); err != nil { + log.Error("Failed to register chits_sent statistics due to %s", err) + } + if err := registerer.Register(vm.numChitsReceived); err != nil { + log.Error("Failed to register chits_received statistics due to %s", err) + } +} diff --git a/networking/xputtest/c_client.go b/networking/xputtest/c_client.go new file mode 100644 index 0000000..78c3b87 --- /dev/null +++ b/networking/xputtest/c_client.go @@ -0,0 +1,75 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package xputtest + +// #include "salticidae/network.h" +// void issueTx(msg_t *, msgnetwork_conn_t *, void *); +import "C" + +import ( + "unsafe" + + "github.com/ava-labs/salticidae-go" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/networking" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/hashing" +) + +// CClientHandler is the struct that will be accessed on event calls +var CClientHandler CClient + +// CClient manages a client network using the c networking library +type CClient struct { + issuer *Issuer + net salticidae.MsgNetwork +} + +// Initialize to the c networking library. This should only be called once +// during setup of the node. +func (h *CClient) Initialize(net salticidae.MsgNetwork, issuer *Issuer) { + h.issuer = issuer + h.net = net + + net.RegHandler(networking.IssueTx, salticidae.MsgNetworkMsgCallback(C.issueTx), nil) +} + +func (h *CClient) send(msg networking.Msg, conn salticidae.MsgNetworkConn) { + ds := msg.DataStream() + defer ds.Free() + ba := salticidae.NewByteArrayMovedFromDataStream(ds, false) + defer ba.Free() + cMsg := salticidae.NewMsgMovedFromByteArray(msg.Op(), ba, false) + defer cMsg.Free() + + h.net.SendMsg(cMsg, conn) +} + +// issueTx handles the recept of an IssueTx message +//export issueTx +func issueTx(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + msg := salticidae.MsgFromC(salticidae.CMsg(_msg)) + + build := networking.Builder{} + pMsg, err := build.Parse(networking.IssueTx, msg.GetPayloadByMove()) + if err != nil { + return + } + + chainID, _ := ids.ToID(pMsg.Get(networking.ChainID).([]byte)) + + txBytes := pMsg.Get(networking.Tx).([]byte) + + txID := ids.NewID(hashing.ComputeHash256Array(txBytes)) + + conn := salticidae.MsgNetworkConnFromC(salticidae.CMsgNetworkConn(_conn)).Copy(false) + CClientHandler.issuer.IssueTx(chainID, txBytes, func(status choices.Status) { + build := networking.Builder{} + msg, _ := build.DecidedTx(txID, status) + + CClientHandler.send(msg, conn) + conn.Free() + }) +} diff --git a/networking/xputtest/issuer.go b/networking/xputtest/issuer.go new file mode 100644 index 0000000..933f026 --- /dev/null +++ b/networking/xputtest/issuer.go @@ -0,0 +1,71 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package xputtest + +import ( + "sync" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" +) + +type issuableVM interface { + IssueTx([]byte, func(choices.Status)) (ids.ID, error) +} + +// Issuer manages all the chain transaction flushing. +type Issuer struct { + lock sync.Mutex + vms map[[32]byte]issuableVM + locks map[[32]byte]sync.Locker + + callbacks chan func() +} + +// Initialize this flusher +func (i *Issuer) Initialize() { + i.lock.Lock() + defer i.lock.Unlock() + i.vms = make(map[[32]byte]issuableVM) + i.locks = make(map[[32]byte]sync.Locker) + i.callbacks = make(chan func(), 1000) + + go func() { + for callback := range i.callbacks { + callback() + } + }() +} + +// RegisterChain implements the registrant +func (i *Issuer) RegisterChain(ctx *snow.Context, vm interface{}) { + i.lock.Lock() + defer i.lock.Unlock() + + key := ctx.ChainID.Key() + + switch vm := vm.(type) { + case issuableVM: + i.vms[key] = vm + i.locks[key] = &ctx.Lock + } +} + +// IssueTx issue the transaction to the chain and register the timeout. +func (i *Issuer) IssueTx(chainID ids.ID, tx []byte, finalized func(choices.Status)) { + i.lock.Lock() + defer i.lock.Unlock() + + key := chainID.Key() + if lock, exists := i.locks[key]; exists { + i.callbacks <- func() { + lock.Lock() + defer lock.Unlock() + if vm, exists := i.vms[key]; exists { + vm.IssueTx(tx, finalized) + } + } + } +} diff --git a/node/config.go b/node/config.go new file mode 100644 index 0000000..76c9e48 --- /dev/null +++ b/node/config.go @@ -0,0 +1,71 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package node + +import ( + "github.com/ava-labs/go-ethereum/p2p/nat" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/networking/router" + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/logging" +) + +// Config contains all of the configurations of an Ava node. +type Config struct { + // protocol to use for opening the network interface + Nat nat.Interface + + // ID of the network this node should connect to + NetworkID uint32 + + // Transaction fee configuration + AvaTxFee uint64 + + // Assertions configuration + EnableAssertions bool + + // Crypto configuration + EnableCrypto bool + + // Database to use for the node + DB database.Database + + // Staking configuration + StakingIP utils.IPDesc + EnableStaking bool + StakingKeyFile string + StakingCertFile string + + // Bootstrapping configuration + BootstrapPeers []*Peer + + // HTTP configuration + HTTPPort uint16 + EnableHTTPS bool + HTTPSKeyFile string + HTTPSCertFile string + + // Enable/Disable APIs + AdminAPIEnabled bool + KeystoreAPIEnabled bool + MetricsAPIEnabled bool + + // Logging configuration + LoggingConfig logging.Config + + // Consensus configuration + ConsensusParams avalanche.Parameters + + // Throughput configuration + ThroughputPort uint16 + ThroughputServerEnabled bool + + // IPCEnabled configuration + IPCEnabled bool + + // Router that is used to handle incoming consensus messages + ConsensusRouter router.Router +} diff --git a/node/node.go b/node/node.go new file mode 100644 index 0000000..7a37690 --- /dev/null +++ b/node/node.go @@ -0,0 +1,540 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package node + +// #include "salticidae/network.h" +// void onTerm(int sig, void *); +// void errorHandler(SalticidaeCError *, bool, void *); +import "C" + +import ( + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "sync" + "time" + "unsafe" + + "github.com/ava-labs/salticidae-go" + + "github.com/ava-labs/gecko/api" + "github.com/ava-labs/gecko/api/admin" + "github.com/ava-labs/gecko/api/ipcs" + "github.com/ava-labs/gecko/api/keystore" + "github.com/ava-labs/gecko/api/metrics" + "github.com/ava-labs/gecko/chains" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/prefixdb" + "github.com/ava-labs/gecko/genesis" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/networking" + "github.com/ava-labs/gecko/networking/xputtest" + "github.com/ava-labs/gecko/snow/triggers" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms" + "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/evm" + "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/secp256k1fx" + "github.com/ava-labs/gecko/vms/spchainvm" + "github.com/ava-labs/gecko/vms/spdagvm" + "github.com/ava-labs/gecko/vms/timestampvm" +) + +const ( + defaultChannelSize = 1 + externalRequestTimeout = 2 * time.Second + internalRequestTimeout = 250 * time.Millisecond +) + +// MainNode is the reference for node callbacks +var MainNode = Node{} + +// Node is an instance of an Ava node. +type Node struct { + Log logging.Logger + LogFactory logging.Factory + HTTPLog logging.Logger + + // This node's unique ID used when communicationg with other nodes + // (in consensus, for example) + ID ids.ShortID + + // Storage for this node + DB database.Database + + // Handles calls to Keystore API + keystoreServer keystore.Keystore + + // Manages creation of blockchains and routing messages to them + chainManager chains.Manager + + // Manages Virtual Machines + vmManager vms.Manager + + // dispatcher for events as they happen in consensus + DecisionDispatcher *triggers.EventDispatcher + ConsensusDispatcher *triggers.EventDispatcher + + // Event loop manager + EC salticidae.EventContext + // Network that manages validator peers + PeerNet salticidae.PeerNetwork + // Network that manages clients + ClientNet salticidae.MsgNetwork // TODO: Remove + + // API that handles new connections + ValidatorAPI *networking.Handshake + // API that handles voting messages + ConsensusAPI *networking.Voting + + // current validators of the network + vdrs validators.Manager + + // APIs that handle client messages + // TODO: Remove + Issuer *xputtest.Issuer + CClientAPI *xputtest.CClient + + // Handles HTTP API calls + APIServer api.Server + + // This node's configuration + Config *Config +} + +/* + ****************************************************************************** + *************************** P2P Networking Section *************************** + ****************************************************************************** + */ + +//export onTerm +func onTerm(C.int, unsafe.Pointer) { + MainNode.Log.Debug("Terminate signal received") + MainNode.EC.Stop() +} + +//export errorHandler +func errorHandler(_err *C.struct_SalticidaeCError, fatal C.bool, _ unsafe.Pointer) { + err := (*salticidae.Error)(unsafe.Pointer(_err)) + if fatal { + MainNode.Log.Fatal("Error during async call: %s", salticidae.StrError(err.GetCode())) + MainNode.EC.Stop() + return + } + MainNode.Log.Error("Error during async call: %s", salticidae.StrError(err.GetCode())) +} + +func (n *Node) initNetlib() error { + // Create main event context + n.EC = salticidae.NewEventContext() + + // Set up interrupt signal and terminate signal handlers + evInt := salticidae.NewSigEvent(n.EC, salticidae.SigEventCallback(C.onTerm), nil) + evInt.Add(salticidae.SIGINT) + evTerm := salticidae.NewSigEvent(n.EC, salticidae.SigEventCallback(C.onTerm), nil) + evTerm.Add(salticidae.SIGTERM) + + // Create peer network config, may have tls enabled + peerConfig := salticidae.NewPeerNetworkConfig() + if n.Config.EnableStaking { + msgConfig := peerConfig.AsMsgNetworkConfig() + msgConfig.EnableTLS(true) + msgConfig.TLSKeyFile(n.Config.StakingKeyFile) + msgConfig.TLSCertFile(n.Config.StakingCertFile) + } + + // Create the peer network + err := salticidae.NewError() + n.PeerNet = salticidae.NewPeerNetwork(n.EC, peerConfig, &err) + if code := err.GetCode(); code != 0 { + return errors.New(salticidae.StrError(code)) + } + // Add peer network error handling + net := n.PeerNet.AsMsgNetwork() + net.RegErrorHandler(salticidae.MsgNetworkErrorCallback(C.errorHandler), nil) + + if n.Config.ThroughputServerEnabled { + // Create the client network + msgConfig := salticidae.NewMsgNetworkConfig() + n.ClientNet = salticidae.NewMsgNetwork(n.EC, msgConfig, &err) + if code := err.GetCode(); code != 0 { + return errors.New(salticidae.StrError(code)) + } + // Add client network error handling + n.ClientNet.RegErrorHandler(salticidae.MsgNetworkErrorCallback(C.errorHandler), nil) + } + + return nil +} + +func (n *Node) initValidatorNet() error { + // Initialize validator manager and default subnet's validator set + defaultSubnetValidators := validators.NewSet() + if !n.Config.EnableStaking { + defaultSubnetValidators.Add(validators.NewValidator(n.ID, 1)) + } + n.vdrs = validators.NewManager() + n.vdrs.PutValidatorSet(platformvm.DefaultSubnetID, defaultSubnetValidators) + + cErr := salticidae.NewError() + serverIP := salticidae.NewNetAddrFromIPPortString(n.Config.StakingIP.String(), true, &cErr) + if code := cErr.GetCode(); code != 0 { + return errors.New(salticidae.StrError(code)) + } + + n.ValidatorAPI = &networking.HandshakeNet + n.ValidatorAPI.Initialize( + /*log=*/ n.Log, + /*validators=*/ defaultSubnetValidators, + /*myIP=*/ serverIP, + /*myID=*/ n.ID, + /*network=*/ n.PeerNet, + /*metrics=*/ n.Config.ConsensusParams.Metrics, + /*enableStaking=*/ n.Config.EnableStaking, + /*networkID=*/ n.Config.NetworkID, + ) + + return nil +} + +func (n *Node) initConsensusNet() { + vdrs, ok := n.vdrs.GetValidatorSet(platformvm.DefaultSubnetID) + n.Log.AssertTrue(ok, "should have initialize the validator set already") + + n.ConsensusAPI = &networking.VotingNet + n.ConsensusAPI.Initialize(n.Log, vdrs, n.PeerNet, n.ValidatorAPI.Connections(), n.chainManager.Router(), n.Config.ConsensusParams.Metrics) + + n.Log.AssertNoError(n.ConsensusDispatcher.Register("gossip", n.ConsensusAPI)) +} + +func (n *Node) initClients() { + n.Issuer = &xputtest.Issuer{} + n.Issuer.Initialize() + + n.CClientAPI = &xputtest.CClientHandler + n.CClientAPI.Initialize(n.ClientNet, n.Issuer) + + n.chainManager.AddRegistrant(n.Issuer) +} + +// StartConsensusServer starts the P2P server this node uses to communicate +// with other nodes +func (n *Node) StartConsensusServer() error { + n.Log.Verbo("starting the consensus server") + + n.PeerNet.AsMsgNetwork().Start() + + err := salticidae.NewError() + + // The IP this node listens on for P2P messaging + serverIP := salticidae.NewNetAddrFromIPPortString(n.Config.StakingIP.String(), true, &err) + if code := err.GetCode(); code != 0 { + return fmt.Errorf("failed to create ip addr: %s", salticidae.StrError(code)) + } + + // Listen for P2P messages + n.PeerNet.Listen(serverIP, &err) + if code := err.GetCode(); code != 0 { + return fmt.Errorf("failed to start consensus server: %s", salticidae.StrError(code)) + } + + // Start a server to handle throughput tests if configuration says to. Disabled by default. + if n.Config.ThroughputServerEnabled { + n.ClientNet.Start() + + clientIP := salticidae.NewNetAddrFromIPPortString(fmt.Sprintf("127.0.0.1:%d", n.Config.ThroughputPort), true, &err) + if code := err.GetCode(); code != 0 { + return fmt.Errorf("failed to start xput server: %s", salticidae.StrError(code)) + } + + n.ClientNet.Listen(clientIP, &err) + if code := err.GetCode(); code != 0 { + return fmt.Errorf("failed to listen on xput server: %s", salticidae.StrError(code)) + } + } + + // Add bootstrap nodes to the peer network + for _, peer := range n.Config.BootstrapPeers { + if !peer.IP.Equal(n.Config.StakingIP) { + bootstrapIP := salticidae.NewNetAddrFromIPPortString(peer.IP.String(), true, &err) + if code := err.GetCode(); code != 0 { + return fmt.Errorf("failed to create bootstrap ip addr: %s", salticidae.StrError(code)) + } + n.PeerNet.AddPeer(bootstrapIP) + } else { + n.Log.Error("can't add self as a bootstrapper") + } + } + + return nil +} + +// Dispatch starts the node's servers. +// Returns when the node exits. +func (n *Node) Dispatch() { n.EC.Dispatch() } + +/* + ****************************************************************************** + *********************** End P2P Networking Section *************************** + ****************************************************************************** + */ + +func (n *Node) initDatabase() { n.DB = n.Config.DB } + +// Initialize this node's ID +// If staking is disabled, a node's ID is a hash of its IP +// Otherwise, it is a hash of the TLS certificate that this node +// uses for P2P communication +func (n *Node) initNodeID() error { + if !n.Config.EnableStaking { + n.ID = ids.NewShortID(hashing.ComputeHash160Array([]byte(n.Config.StakingIP.String()))) + n.Log.Info("Set the node's ID to %s", n.ID) + return nil + } + + stakeCert, err := ioutil.ReadFile(n.Config.StakingCertFile) + if err != nil { + return fmt.Errorf("problem reading staking certificate: %w", err) + } + + block, _ := pem.Decode(stakeCert) + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return fmt.Errorf("problem parsing staking certificate: %w", err) + } + n.ID, err = ids.ToShortID(hashing.PubkeyBytesToAddress(cert.Raw)) + if err != nil { + return fmt.Errorf("problem deriving staker ID from certificate: %w", err) + } + n.Log.Info("Set node's ID to %s", n.ID) + return nil +} + +// Create the vmManager and register the following vms: +// AVM, EVM, Simple Payments DAG, Simple Payments Chain +// The Platform VM is registered in initStaking because +// its factory needs to reference n.chainManager, which is nil right now +func (n *Node) initVMManager() { + n.vmManager = vms.NewManager(&n.APIServer, n.HTTPLog) + n.vmManager.RegisterVMFactory(avm.ID, &avm.Factory{}) + n.vmManager.RegisterVMFactory(evm.ID, &evm.Factory{}) + n.vmManager.RegisterVMFactory(spdagvm.ID, &spdagvm.Factory{TxFee: n.Config.AvaTxFee}) + n.vmManager.RegisterVMFactory(spchainvm.ID, &spchainvm.Factory{}) + n.vmManager.RegisterVMFactory(secp256k1fx.ID, &secp256k1fx.Factory{}) + n.vmManager.RegisterVMFactory(timestampvm.ID, ×tampvm.Factory{}) +} + +// Create the EventDispatcher used for hooking events +// into the general process flow. +func (n *Node) initEventDispatcher() { + n.DecisionDispatcher = &triggers.EventDispatcher{} + n.DecisionDispatcher.Initialize(n.Log) + + n.ConsensusDispatcher = &triggers.EventDispatcher{} + n.ConsensusDispatcher.Initialize(n.Log) +} + +// Initializes the Platform chain. +// Its genesis data specifies the other chains that should +// be created. +func (n *Node) initChains() { + n.Log.Info("initializing chains") + + vdrs := n.vdrs + if !n.Config.EnableStaking { + defaultSubnetValidators := validators.NewSet() + vdrs = validators.NewManager() + vdrs.PutValidatorSet(platformvm.DefaultSubnetID, defaultSubnetValidators) + } + + n.vmManager.RegisterVMFactory( + /*vmID=*/ platformvm.ID, + /*vmFactory=*/ &platformvm.Factory{ + ChainManager: n.chainManager, + Validators: vdrs, + }, + ) + + beacons := validators.NewSet() + for _, peer := range n.Config.BootstrapPeers { + beacons.Add(validators.NewValidator(peer.ID, 1)) + } + + genesisBytes := genesis.Genesis(n.Config.NetworkID) + + // Create the Platform Chain + n.chainManager.ForceCreateChain(chains.ChainParameters{ + ID: ids.Empty, + GenesisData: genesisBytes, // Specifies other chains to create + VMAlias: platformvm.ID.String(), + CustomBeacons: beacons, + }) +} + +// initAPIServer initializes the server that handles HTTP calls +func (n *Node) initAPIServer() { + n.Log.Info("Initializing API server") + + n.APIServer.Initialize(n.Log, n.LogFactory, n.Config.HTTPPort) + + if n.Config.EnableHTTPS { + n.Log.Debug("Initializing API server with TLS Enabled") + go n.Log.RecoverAndPanic(func() { + if err := n.APIServer.DispatchTLS(n.Config.HTTPSCertFile, n.Config.HTTPSKeyFile); err != nil { + n.Log.Warn("API server initialization failed with %s, attempting to create insecure API server", err) + n.APIServer.Dispatch() + } + }) + } else { + n.Log.Debug("Initializing API server with TLS Disabled") + go n.Log.RecoverAndPanic(func() { n.APIServer.Dispatch() }) + } +} + +// Assumes n.DB, n.vdrs all initialized (non-nil) +func (n *Node) initChainManager() { + n.chainManager = chains.New( + n.Log, + n.LogFactory, + n.vmManager, + n.DecisionDispatcher, + n.ConsensusDispatcher, + n.DB, + n.Config.ConsensusRouter, + &networking.VotingNet, + n.Config.ConsensusParams, + n.vdrs, + n.ID, + n.Config.NetworkID, + n.ValidatorAPI, + &n.APIServer, + &n.keystoreServer, + ) + + n.chainManager.AddRegistrant(&n.APIServer) +} + +// initWallet initializes the Wallet service +// Assumes n.APIServer is already set +func (n *Node) initKeystoreAPI() { + n.Log.Info("initializing Keystore API") + keystoreDB := prefixdb.New([]byte("keystore"), n.DB) + n.keystoreServer.Initialize(n.Log, keystoreDB) + keystoreHandler := n.keystoreServer.CreateHandler() + if n.Config.KeystoreAPIEnabled { + n.APIServer.AddRoute(keystoreHandler, &sync.RWMutex{}, "keystore", "", n.HTTPLog) + } +} + +// initMetricsAPI initializes the Metrics API +// Assumes n.APIServer is already set +func (n *Node) initMetricsAPI() { + n.Log.Info("initializing Metrics API") + registry, handler := metrics.NewService() + if n.Config.MetricsAPIEnabled { + n.APIServer.AddRoute(handler, &sync.RWMutex{}, "metrics", "", n.HTTPLog) + } + n.Config.ConsensusParams.Metrics = registry +} + +// initAdminAPI initializes the Admin API service +// Assumes n.log, n.chainManager, and n.ValidatorAPI already initialized +func (n *Node) initAdminAPI() { + if n.Config.AdminAPIEnabled { + n.Log.Info("initializing Admin API") + service := admin.NewService(n.Config.NetworkID, n.Log, n.chainManager, n.ValidatorAPI.Connections(), &n.APIServer) + n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog) + } +} + +// initIPCAPI initializes the IPC API service +// Assumes n.log and n.chainManager already initialized +func (n *Node) initIPCAPI() { + if n.Config.IPCEnabled { + n.Log.Info("initializing IPC API") + service := ipcs.NewService(n.Log, n.chainManager, n.DecisionDispatcher, &n.APIServer) + n.APIServer.AddRoute(service, &sync.RWMutex{}, "ipcs", "", n.HTTPLog) + } +} + +// Give chains and VMs aliases as specified by the genesis information +func (n *Node) initAliases() { + n.Log.Info("initializing aliases") + defaultAliases, chainAliases, vmAliases := genesis.Aliases(n.Config.NetworkID) + for chainIDKey, aliases := range chainAliases { + chainID := ids.NewID(chainIDKey) + for _, alias := range aliases { + n.Log.AssertNoError(n.chainManager.Alias(chainID, alias)) + } + } + for vmIDKey, aliases := range vmAliases { + vmID := ids.NewID(vmIDKey) + for _, alias := range aliases { + n.Log.AssertNoError(n.vmManager.Alias(vmID, alias)) + } + } + for url, aliases := range defaultAliases { + n.APIServer.AddAliases(url, aliases...) + } +} + +// Initialize this node +func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logging.Factory) error { + n.Log = logger + n.LogFactory = logFactory + n.Config = Config + + httpLog, err := logFactory.MakeSubdir("http") + if err != nil { + return fmt.Errorf("problem initializing HTTP logger: %w", err) + } + n.HTTPLog = httpLog + + n.initDatabase() // Set up the node's database + + if err = n.initNodeID(); err != nil { // Derive this node's ID + return fmt.Errorf("problem initializing staker ID: %w", err) + } + + // Start HTTP APIs + n.initAPIServer() // Start the API Server + n.initKeystoreAPI() // Start the Keystore API + n.initMetricsAPI() // Start the Metrics API + + // Start node-to-node consensus server + if err = n.initNetlib(); err != nil { // Set up all networking + return fmt.Errorf("problem initializing networking: %w", err) + } + n.initValidatorNet() // Set up the validator handshake + authentication + n.initVMManager() // Set up the vm manager + n.initEventDispatcher() // Set up the event dipatcher + n.initChainManager() // Set up the chain manager + n.initConsensusNet() // Set up the main consensus network + + // TODO: Remove once API is fully featured for throughput tests + if n.Config.ThroughputServerEnabled { + n.initClients() // Set up the client servers + } + + n.initAdminAPI() // Start the Admin API + n.initIPCAPI() // Start the IPC API + n.initAliases() // Set up aliases + n.initChains() // Start the Platform chain + + return nil +} + +// Shutdown this node +func (n *Node) Shutdown() { + n.Log.Info("shutting down the node") + n.ValidatorAPI.Shutdown() + n.ConsensusAPI.Shutdown() + n.chainManager.Shutdown() +} diff --git a/node/peer.go b/node/peer.go new file mode 100644 index 0000000..627b28f --- /dev/null +++ b/node/peer.go @@ -0,0 +1,17 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package node + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils" +) + +// Peer contains the specification of an Ava node that can be communicated with. +type Peer struct { + // IP of the peer + IP utils.IPDesc + // ID of the peer that can be verified during a handshake + ID ids.ShortID +} diff --git a/scripts/Dockerfile.deploy b/scripts/Dockerfile.deploy new file mode 100644 index 0000000..1f83e6e --- /dev/null +++ b/scripts/Dockerfile.deploy @@ -0,0 +1,11 @@ +# create an image from the local files +FROM golang:1.13.4-buster + +RUN apt-get update && apt-get install -y libssl-dev libuv1-dev curl cmake + +COPY .build_image_gopath $GOPATH/ + +WORKDIR $GOPATH/src/github.com/ava-labs/gecko + +RUN ./scripts/build.sh +RUN ln -sv $GOPATH/src/github.com/ava-labs/gecko/ /gecko diff --git a/scripts/ansible/ansible.cfg b/scripts/ansible/ansible.cfg new file mode 100755 index 0000000..369b749 --- /dev/null +++ b/scripts/ansible/ansible.cfg @@ -0,0 +1,6 @@ +[defaults] +transport = ssh +deprecation_warnings = false + +[ssh_connection] +ssh_args = -o ForwardAgent=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null diff --git a/scripts/ansible/inventory.yml b/scripts/ansible/inventory.yml new file mode 100755 index 0000000..1841bd9 --- /dev/null +++ b/scripts/ansible/inventory.yml @@ -0,0 +1,92 @@ +borealis_bootstrap: + hosts: + bootstrap1: + ansible_host: 3.227.207.132 + staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys1/staker.key" + staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys1/staker.crt" + http_tls_enabled: true + http_tls_key_file: "/home/ubuntu/ssl/privkey.pem" + http_tls_cert_file: "/home/ubuntu/ssl/fullchain.pem" + vars: + ansible_connection: ssh + ansible_user: ubuntu + + network_id: "borealis" + api_admin_enabled: true + api_keystore_enabled: true + api_metrics_enabled: true + ava_tx_fee: 0 + assertions_enabled: true + signature_verification_enabled: true + db_enabled: true + db_dir: "/home/ubuntu/db" + http_port: 21000 + http_tls_enabled: false + http_tls_key_file: "" + http_tls_cert_file: "" + bootstrap_ips: "" + bootstrap_ids: "" + staking_port: 21001 + staking_tls_enabled: true + log_dir: "/home/ubuntu/.gecko" + log_level: debug + snow_sample_size: 3 + snow_quorum_size: 2 + snow_virtuous_commit_threshold: 20 + snow_rogue_commit_threshold: 30 + snow_avalanche_num_parents: 5 + snow_avalanche_batch_size: 30 + api_ipcs_enabled: false + xput_server_enabled: false + xput_server_port: 21002 + +borealis_node: + hosts: + node1: + ansible_host: 34.207.133.167 + staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys2/staker.key" + staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys2/staker.crt" + node2: + ansible_host: 107.23.241.199 + staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys3/staker.key" + staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys3/staker.crt" + node3: + ansible_host: 54.197.215.186 + staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys4/staker.key" + staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys4/staker.crt" + node4: + ansible_host: 18.234.153.22 + staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys5/staker.key" + staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys5/staker.crt" + vars: + ansible_connection: ssh + ansible_user: ubuntu + + network_id: "borealis" + api_admin_enabled: true + api_keystore_enabled: true + api_metrics_enabled: true + ava_tx_fee: 0 + assertions_enabled: true + signature_verification_enabled: true + db_enabled: true + db_dir: "/home/ubuntu/db" + http_port: 21000 + http_tls_enabled: false + http_tls_key_file: "" + http_tls_cert_file: "" + bootstrap_ips: "3.227.207.132:21001" + bootstrap_ids: "7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg" + staking_port: 21001 + staking_tls_enabled: true + log_dir: "/home/ubuntu/.gecko" + log_level: debug + snow_sample_size: 3 + snow_quorum_size: 2 + snow_virtuous_commit_threshold: 20 + snow_rogue_commit_threshold: 30 + snow_avalanche_num_parents: 5 + snow_avalanche_batch_size: 30 + api_ipcs_enabled: false + xput_server_enabled: false + xput_server_port: 21002 diff --git a/scripts/ansible/restart_playbook.yml b/scripts/ansible/restart_playbook.yml new file mode 100755 index 0000000..97b8533 --- /dev/null +++ b/scripts/ansible/restart_playbook.yml @@ -0,0 +1,36 @@ + +--- +- name: Update the network + connection: ssh + gather_facts: false + hosts: all + vars: + ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava + repo_folder: ~/go/src/github.com/ava-labs/gecko + repo_name: ava-labs/gecko + tasks: + - name: Kill Node + command: killall ava + ignore_errors: yes + - git: + repo: ssh://git@github.com/{{ repo_name }}.git + dest: "{{ repo_folder }}" + update: yes + - name: Build project + command: ./scripts/build.sh + args: + chdir: "{{ repo_folder }}" + environment: + PATH: /sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin:/snap/bin + - name: Remove previous database + file: + path: "{{ db_dir }}" + state: absent + - name: Remove previous logs + file: + path: "{{ log_dir }}" + state: absent + - name: Start node + shell: "nohup {{ ava_binary }} --network-id={{ network_id }} --api-admin-enabled={{ api_admin_enabled }} --api-keystore-enabled={{ api_keystore_enabled }} --api-metrics-enabled={{ api_metrics_enabled }} --ava-tx-fee={{ ava_tx_fee }} --assertions-enabled={{ assertions_enabled }} --signature-verification-enabled={{ signature_verification_enabled }} --db-enabled={{ db_enabled }} --db-dir={{ db_dir }} --http-port={{ http_port }} --http-tls-enabled={{ http_tls_enabled }} --http-tls-key-file={{ http_tls_key_file }} --http-tls-cert-file={{ http_tls_cert_file }} --bootstrap-ips={{ bootstrap_ips }} --bootstrap-ids={{ bootstrap_ids }} --public-ip={{ ansible_host }} --staking-port={{ staking_port }} --staking-tls-enabled={{ staking_tls_enabled }} --staking-tls-key-file={{ staking_tls_key_file }} --staking-tls-cert-file={{ staking_tls_cert_file }} --log-dir={{ log_dir }} --log-level={{ log_level }} --snow-sample-size={{ snow_sample_size }} --snow-quorum-size={{ snow_quorum_size }} --snow-virtuous-commit-threshold={{ snow_virtuous_commit_threshold }} --snow-rogue-commit-threshold={{ snow_rogue_commit_threshold }} --snow-avalanche-num-parents={{ snow_avalanche_num_parents }} --snow-avalanche-batch-size={{ snow_avalanche_batch_size }} --api-ipcs-enabled={{ api_ipcs_enabled }} --xput-server-enabled={{ xput_server_enabled }} --xput-server-port={{ xput_server_port }} >/dev/null 2>&1 &" + environment: + PATH: /sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin:/snap/bin diff --git a/scripts/ansible/update_playbook.yml b/scripts/ansible/update_playbook.yml new file mode 100755 index 0000000..ad9d314 --- /dev/null +++ b/scripts/ansible/update_playbook.yml @@ -0,0 +1,28 @@ + +--- +- name: Update the network + connection: ssh + gather_facts: false + hosts: all + vars: + ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava + repo_folder: ~/go/src/github.com/ava-labs/gecko + repo_name: ava-labs/gecko + tasks: + - name: Kill Node + command: killall ava + ignore_errors: yes + - git: + repo: ssh://git@github.com/{{ repo_name }}.git + dest: "{{ repo_folder }}" + update: yes + - name: Build project + command: ./scripts/build.sh + args: + chdir: "{{ repo_folder }}" + environment: + PATH: /sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin:/snap/bin + - name: Start node + shell: "nohup {{ ava_binary }} --network-id={{ network_id }} --api-admin-enabled={{ api_admin_enabled }} --api-keystore-enabled={{ api_keystore_enabled }} --api-metrics-enabled={{ api_metrics_enabled }} --ava-tx-fee={{ ava_tx_fee }} --assertions-enabled={{ assertions_enabled }} --signature-verification-enabled={{ signature_verification_enabled }} --db-enabled={{ db_enabled }} --db-dir={{ db_dir }} --http-port={{ http_port }} --http-tls-enabled={{ http_tls_enabled }} --http-tls-key-file={{ http_tls_key_file }} --http-tls-cert-file={{ http_tls_cert_file }} --bootstrap-ips={{ bootstrap_ips }} --bootstrap-ids={{ bootstrap_ids }} --public-ip={{ ansible_host }} --staking-port={{ staking_port }} --staking-tls-enabled={{ staking_tls_enabled }} --staking-tls-key-file={{ staking_tls_key_file }} --staking-tls-cert-file={{ staking_tls_cert_file }} --log-dir={{ log_dir }} --log-level={{ log_level }} --snow-sample-size={{ snow_sample_size }} --snow-quorum-size={{ snow_quorum_size }} --snow-virtuous-commit-threshold={{ snow_virtuous_commit_threshold }} --snow-rogue-commit-threshold={{ snow_rogue_commit_threshold }} --snow-avalanche-num-parents={{ snow_avalanche_num_parents }} --snow-avalanche-batch-size={{ snow_avalanche_batch_size }} --api-ipcs-enabled={{ api_ipcs_enabled }} --xput-server-enabled={{ xput_server_enabled }} --xput-server-port={{ xput_server_port }} >/dev/null 2>&1 &" + environment: + PATH: /sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin:/snap/bin diff --git a/scripts/aws/create.py b/scripts/aws/create.py new file mode 100644 index 0000000..ab7a6d7 --- /dev/null +++ b/scripts/aws/create.py @@ -0,0 +1,36 @@ +import sys +import boto3 + +ec2 = boto3.client("ec2") + +# Should be called with python3 aws_create.py $numBootstraps $numNodes +numBootstraps = int(sys.argv[1]) +numNodes = int(sys.argv[2]) + +bootstapNode = "Borealis-Bootstrap" +fullNode = "Borealis-Node" + + +def runInstances(num: int, name: str): + if num > 0: + ec2.run_instances( + ImageId="ami-0badd1c10cb7673e9", + InstanceType="c5.large", + MaxCount=num, + MinCount=num, + SubnetId="subnet-0c80cf240e54118c8", + TagSpecifications=[ + {"ResourceType": "instance", "Tags": [{"Key": "Name", "Value": name}]} + ], + SecurityGroupIds=["sg-0d6172e416170b426"], + KeyName="stephen_ava", + ) + + +def main(): + runInstances(numBootstraps, bootstapNode) + runInstances(numNodes, fullNode) + + +if __name__ == "__main__": + main() diff --git a/scripts/build.sh b/scripts/build.sh new file mode 100755 index 0000000..6bb31a3 --- /dev/null +++ b/scripts/build.sh @@ -0,0 +1,20 @@ +#!/bin/bash -e + +# Ted: contact me when you make any changes + +PREFIX="${PREFIX:-$(pwd)/build}" + +SRC_DIR="$(dirname "${BASH_SOURCE[0]}")" +source "$SRC_DIR/env.sh" + +GECKO_PKG=github.com/ava-labs/gecko +GECKO_PATH="$GOPATH/src/$GECKO_PKG" +if [[ -d "$GECKO_PATH/.git" ]]; then + cd "$GECKO_PATH" + go get -t -v "./..." + cd - +else + go get -t -v "$GECKO_PKG/..." +fi +go build -o "$PREFIX/ava" "$GECKO_PATH/main/"*.go +go build -o "$PREFIX/xputtest" "$GECKO_PATH/xputtest/"*.go diff --git a/scripts/build_image.sh b/scripts/build_image.sh new file mode 100755 index 0000000..1b91410 --- /dev/null +++ b/scripts/build_image.sh @@ -0,0 +1,25 @@ +#!/bin/bash -e +SRC_DIR="$(dirname "${BASH_SOURCE[0]}")" +export GOPATH="$SRC_DIR/.build_image_gopath" +WORKPREFIX="$GOPATH/src/github.com/ava-labs/" +DOCKER="${DOCKER:-docker}" +keep_existing=0 +while getopts 'k' opt +do + case $opt in + (k) keep_existing=1;; + esac +done +if [[ "$keep_existing" != 1 ]]; then + rm -rf "$WORKPREFIX" +fi + +if [[ ! -d "$WORKPREFIX" ]]; then + mkdir -p "$WORKPREFIX" + git config --global credential.helper cache + git clone https://github.com/ava-labs/coreth.git "$WORKPREFIX/coreth" + git clone --depth 1 https://github.com/ava-labs/go-ethereum.git "$WORKPREFIX/go-ethereum" + git clone https://github.com/ava-labs/gecko.git "$WORKPREFIX/gecko" +fi +GECKO_COMMIT="$(git --git-dir="$WORKPREFIX/gecko/.git" rev-parse --short HEAD)" +"${DOCKER}" build -t "gecko-$GECKO_COMMIT" "$SRC_DIR" -f "$SRC_DIR/Dockerfile.deploy" diff --git a/scripts/build_test.sh b/scripts/build_test.sh new file mode 100755 index 0000000..f627613 --- /dev/null +++ b/scripts/build_test.sh @@ -0,0 +1,8 @@ +#!/bin/bash -e + +# Ted: contact me when you make any changes + +SRC_DIR="$(dirname "${BASH_SOURCE[0]}")" +source "$SRC_DIR/env.sh" + +go test -race -coverprofile=coverage.out -covermode=atomic ./... diff --git a/scripts/env.sh b/scripts/env.sh new file mode 100644 index 0000000..24141f9 --- /dev/null +++ b/scripts/env.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# Ted: contact me when you make any changes + +# resolve the required env for salticidae-go +GOPATH="$(go env GOPATH)" +SALTICIDAE_GO_HOME="$GOPATH/src/github.com/ava-labs/salticidae-go/" + +if [[ -f "$SALTICIDAE_GO_HOME/salticidae/libsalticidae.a" ]]; then + source "$SALTICIDAE_GO_HOME/scripts/env.sh" +else + source /dev/stdin <<<"$(curl -sS https://raw.githubusercontent.com/ava-labs/salticidae-go/master/setup.sh)" +fi diff --git a/snow/choices/decision.go b/snow/choices/decision.go new file mode 100644 index 0000000..a99fd81 --- /dev/null +++ b/snow/choices/decision.go @@ -0,0 +1,40 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package choices + +import ( + "github.com/ava-labs/gecko/ids" +) + +// Decidable represents element that can be decided. +// +// Decidable objects are typically thought of as either transactions, blocks, or +// vertices. +type Decidable interface { + // ID returns a unique ID for this element. + // + // Typically, this is implemented by using a cryptographic hash of a + // binary representation of this element. An element should return the same + // IDs upon repeated calls. + ID() ids.ID + + // Accept this element. + // + // This element will be accepted by every correct node in the network. + Accept() + + // Reject this element. + // + // This element will not be accepted by any correct node in the network. + Reject() + + // Status returns this element's current status. + // + // If Accept has been called on an element with this ID, Accepted should be + // returned. Similarly, if Reject has been called on an element with this + // ID, Rejected should be returned. If the contents of this element are + // unknown, then Unknown should be returned. Otherwise, Processing should be + // returned. + Status() Status +} diff --git a/snow/choices/status.go b/snow/choices/status.go new file mode 100644 index 0000000..f214e3f --- /dev/null +++ b/snow/choices/status.go @@ -0,0 +1,110 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package choices + +import ( + "errors" + + "github.com/ava-labs/gecko/utils/wrappers" +) + +var ( + errUnknownStatus = errors.New("unknown status") +) + +// Status ... +type Status uint32 + +// List of possible status values +// [Unknown] Zero value, means the status is not known +// [Processing] means the operation is known, but hasn't been decided yet +// [Rejected] means the operation will never be accepted +// [Accepted] means the operation was accepted +const ( + Unknown Status = iota + Processing + Rejected + Accepted +) + +// MarshalJSON ... +func (s Status) MarshalJSON() ([]byte, error) { + if err := s.Valid(); err != nil { + return nil, err + } + return []byte("\"" + s.String() + "\""), nil +} + +// UnmarshalJSON ... +func (s *Status) UnmarshalJSON(b []byte) error { + str := string(b) + if str == "null" { + return nil + } + switch str { + case "\"Unknown\"": + *s = Unknown + case "\"Processing\"": + *s = Processing + case "\"Rejected\"": + *s = Rejected + case "\"Accepted\"": + *s = Accepted + default: + return errUnknownStatus + } + return nil +} + +// Fetched returns true if the status has been set. +func (s Status) Fetched() bool { + switch s { + case Processing: + return true + default: + return s.Decided() + } +} + +// Decided returns true if the status is Rejected or Executed. +func (s Status) Decided() bool { + switch s { + case Rejected, Accepted: + return true + default: + return false + } +} + +// Valid returns nil if the status is a valid status. +func (s Status) Valid() error { + switch s { + case Unknown, Processing, Rejected, Accepted: + return nil + default: + return errUnknownStatus + } +} + +func (s Status) String() string { + switch s { + case Unknown: + return "Unknown" + case Processing: + return "Processing" + case Rejected: + return "Rejected" + case Accepted: + return "Accepted" + default: + return "Invalid status" + } +} + +// Bytes returns the byte repr. of this status +func (s Status) Bytes() []byte { + p := wrappers.Packer{Bytes: make([]byte, 4)} + p.PackInt(uint32(s)) + return p.Bytes +} diff --git a/snow/choices/status_test.go b/snow/choices/status_test.go new file mode 100644 index 0000000..da5c4da --- /dev/null +++ b/snow/choices/status_test.go @@ -0,0 +1,65 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package choices + +import ( + "math" + "testing" +) + +func TestStatusValid(t *testing.T) { + if err := Accepted.Valid(); err != nil { + t.Fatalf("%s failed verification", Accepted) + } else if err := Rejected.Valid(); err != nil { + t.Fatalf("%s failed verification", Rejected) + } else if err := Processing.Valid(); err != nil { + t.Fatalf("%s failed verification", Processing) + } else if err := Unknown.Valid(); err != nil { + t.Fatalf("%s failed verification", Unknown) + } else if badStatus := Status(math.MaxInt32); badStatus.Valid() == nil { + t.Fatalf("%s passed verification", badStatus) + } +} + +func TestStatusDecided(t *testing.T) { + if !Accepted.Decided() { + t.Fatalf("%s failed decision", Accepted) + } else if !Rejected.Decided() { + t.Fatalf("%s failed decision", Rejected) + } else if Processing.Decided() { + t.Fatalf("%s failed decision", Processing) + } else if Unknown.Decided() { + t.Fatalf("%s failed decision", Unknown) + } else if badStatus := Status(math.MaxInt32); badStatus.Decided() { + t.Fatalf("%s failed decision", badStatus) + } +} + +func TestStatusFetched(t *testing.T) { + if !Accepted.Fetched() { + t.Fatalf("%s failed issue", Accepted) + } else if !Rejected.Fetched() { + t.Fatalf("%s failed issue", Rejected) + } else if !Processing.Fetched() { + t.Fatalf("%s failed issue", Processing) + } else if Unknown.Fetched() { + t.Fatalf("%s failed issue", Unknown) + } else if badStatus := Status(math.MaxInt32); badStatus.Fetched() { + t.Fatalf("%s failed issue", badStatus) + } +} + +func TestStatusString(t *testing.T) { + if Accepted.String() != "Accepted" { + t.Fatalf("%s failed printing", Accepted) + } else if Rejected.String() != "Rejected" { + t.Fatalf("%s failed printing", Rejected) + } else if Processing.String() != "Processing" { + t.Fatalf("%s failed printing", Processing) + } else if Unknown.String() != "Unknown" { + t.Fatalf("%s failed printing", Unknown) + } else if badStatus := Status(math.MaxInt32); badStatus.String() != "Invalid status" { + t.Fatalf("%s failed printing", badStatus) + } +} diff --git a/snow/consensus/avalanche/consensus.go b/snow/consensus/avalanche/consensus.go new file mode 100644 index 0000000..07cf5e4 --- /dev/null +++ b/snow/consensus/avalanche/consensus.go @@ -0,0 +1,82 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" +) + +// TODO: Implement pruning of accepted decisions. +// To perfectly preserve the protocol, this implementation will need to store +// the hashes of all accepted decisions. It is possible to add a heuristic that +// removes sufficiently old decisions. However, that will need to be analyzed to +// ensure safety. It is doable with a weak syncrony assumption. + +// Consensus represents a general avalanche instance that can be used directly +// to process a series of partially ordered elements. +type Consensus interface { + // Takes in alpha, beta1, beta2, the accepted frontier, the join statuses, + // the mutation statuses, and the consumer statuses. If accept or reject is + // called, the status maps should be immediately updated accordingly. + // Assumes each element in the accepted frontier will return accepted from + // the join status map. + Initialize(*snow.Context, Parameters, []Vertex) + + // Returns the parameters that describe this avalanche instance + Parameters() Parameters + + // Returns true if the transaction is virtuous. + // That is, no transaction has been added that conflicts with it + IsVirtuous(snowstorm.Tx) bool + + // Adds a new decision. Assumes the dependencies have already been added. + // Assumes that mutations don't conflict with themselves. + Add(Vertex) + + // VertexIssued returns true iff Vertex has been added + VertexIssued(Vertex) bool + + // TxIssued returns true if a vertex containing this transanction has been added + TxIssued(snowstorm.Tx) bool + + // Returns the set of transaction IDs that are virtuous but not contained in + // any preferred vertices. + Orphans() ids.Set + + // Returns a set of vertex IDs that were virtuous at the last update. + Virtuous() ids.Set + + // Returns a set of vertex IDs that are preferred + Preferences() ids.Set + + // RecordPoll collects the results of a network poll. If a result has not + // been added, the result is dropped. + RecordPoll(ids.UniqueBag) + + // Quiesce returns true iff all vertices that have been added but not been accepted or rejected are rogue. + // Note, it is possible that after returning quiesce, a new decision may be added such + // that this instance should no longer quiesce. + Quiesce() bool + + // Finalized returns true if all transactions that have been added have been + // finalized. Note, it is possible that after returning finalized, a new + // decision may be added such that this instance is no longer finalized. + Finalized() bool +} + +// Vertex is a collection of multiple transactions tied to other vertices +type Vertex interface { + choices.Decidable + + // Returns the vertices this vertex depends on + Parents() []Vertex + + // Returns a series of state transitions to be performed on acceptance + Txs() []snowstorm.Tx + + Bytes() []byte +} diff --git a/snow/consensus/avalanche/consensus_test.go b/snow/consensus/avalanche/consensus_test.go new file mode 100644 index 0000000..f803018 --- /dev/null +++ b/snow/consensus/avalanche/consensus_test.go @@ -0,0 +1,269 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "fmt" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" +) + +var ( + Genesis = GenerateID() + offset = uint64(0) +) + +func ParamsTest(t *testing.T, factory Factory) { + avl := factory.New() + + ctx := snow.DefaultContextTest() + params := Parameters{ + Parameters: snowball.Parameters{ + Namespace: fmt.Sprintf("gecko_%s", ctx.ChainID.String()), + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + + numProcessing := prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: params.Namespace, + Name: "vtx_processing", + }) + numAccepted := prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: params.Namespace, + Name: "vtx_accepted", + }) + numRejected := prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: params.Namespace, + Name: "vtx_rejected", + }) + + params.Metrics.Register(numProcessing) + params.Metrics.Register(numAccepted) + params.Metrics.Register(numRejected) + + avl.Initialize(ctx, params, nil) + + if p := avl.Parameters(); p.K != params.K { + t.Fatalf("Wrong K parameter") + } else if p.Alpha != params.Alpha { + t.Fatalf("Wrong Alpha parameter") + } else if p.BetaVirtuous != params.BetaVirtuous { + t.Fatalf("Wrong Beta1 parameter") + } else if p.BetaRogue != params.BetaRogue { + t.Fatalf("Wrong Beta2 parameter") + } else if p.Parents != params.Parents { + t.Fatalf("Wrong Parents parameter") + } +} + +func AddTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + if !avl.Finalized() { + t.Fatalf("An empty avalanche instance is not finalized") + } else if !Matches([]ids.ID{vts[0].ID(), vts[1].ID()}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } + + tx0 := &snowstorm.TestTx{Identifier: GenerateID()} + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + avl.Add(vtx0) + + if avl.Finalized() { + t.Fatalf("A non-empty avalanche instance is finalized") + } else if !Matches([]ids.ID{vtx0.id}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } + + tx1 := &snowstorm.TestTx{Identifier: GenerateID()} + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + avl.Add(vtx1) + + if avl.Finalized() { + t.Fatalf("A non-empty avalanche instance is finalized") + } else if !Matches([]ids.ID{vtx0.id}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } + + avl.Add(vtx1) + + if avl.Finalized() { + t.Fatalf("A non-empty avalanche instance is finalized") + } else if !Matches([]ids.ID{vtx0.id}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } + + avl.Add(vts[0]) + + if avl.Finalized() { + t.Fatalf("A non-empty avalanche instance is finalized") + } else if !Matches([]ids.ID{vtx0.id}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } +} + +func VertexIssuedTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + if !avl.VertexIssued(vts[0]) { + t.Fatalf("Genesis Vertex not reported as issued") + } + + tx := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx.Ins.Add(utxos[0]) + + vtx := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx}, + height: 1, + status: choices.Processing, + } + + if avl.VertexIssued(vtx) { + t.Fatalf("Vertex reported as issued") + } + + avl.Add(vtx) + + if !avl.VertexIssued(vtx) { + t.Fatalf("Vertex reported as not issued") + } +} + +func TxIssuedTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Accepted, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + if !avl.TxIssued(tx0) { + t.Fatalf("Genesis Tx not reported as issued") + } else if avl.TxIssued(tx1) { + t.Fatalf("Tx reported as issued") + } + + vtx := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + avl.Add(vtx) + + if !avl.TxIssued(tx1) { + t.Fatalf("Tx reported as not issued") + } +} diff --git a/snow/consensus/avalanche/factory.go b/snow/consensus/avalanche/factory.go new file mode 100644 index 0000000..5f0d2d1 --- /dev/null +++ b/snow/consensus/avalanche/factory.go @@ -0,0 +1,9 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +// Factory returns new instances of Consensus +type Factory interface { + New() Consensus +} diff --git a/snow/consensus/avalanche/ids_test.go b/snow/consensus/avalanche/ids_test.go new file mode 100644 index 0000000..dc9a807 --- /dev/null +++ b/snow/consensus/avalanche/ids_test.go @@ -0,0 +1,40 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/ava-labs/gecko/ids" +) + +func GenerateID() ids.ID { + offset++ + return ids.Empty.Prefix(offset) +} + +func Matches(a, b []ids.ID) bool { + if len(a) != len(b) { + return false + } + set := ids.Set{} + set.Add(a...) + for _, id := range b { + if !set.Contains(id) { + return false + } + } + return true +} +func MatchesShort(a, b []ids.ShortID) bool { + if len(a) != len(b) { + return false + } + set := ids.ShortSet{} + set.Add(a...) + for _, id := range b { + if !set.Contains(id) { + return false + } + } + return true +} diff --git a/snow/consensus/avalanche/parameters.go b/snow/consensus/avalanche/parameters.go new file mode 100644 index 0000000..53aecd4 --- /dev/null +++ b/snow/consensus/avalanche/parameters.go @@ -0,0 +1,29 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "fmt" + + "github.com/ava-labs/gecko/snow/consensus/snowball" +) + +// Parameters the avalanche paramaters include the snowball paramters and the +// optimal number of parents +type Parameters struct { + snowball.Parameters + Parents, BatchSize int +} + +// Valid returns nil if the parameters describe a valid initialization. +func (p Parameters) Valid() error { + switch { + case p.Parents <= 1: + return fmt.Errorf("parents = %d: Fails the condition that: 1 < Parents", p.Parents) + case p.BatchSize <= 0: + return fmt.Errorf("batchSize = %d: Fails the condition that: 0 < BatchSize", p.BatchSize) + default: + return p.Parameters.Valid() + } +} diff --git a/snow/consensus/avalanche/parameters_test.go b/snow/consensus/avalanche/parameters_test.go new file mode 100644 index 0000000..aa28f98 --- /dev/null +++ b/snow/consensus/avalanche/parameters_test.go @@ -0,0 +1,61 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "testing" + + "github.com/ava-labs/gecko/snow/consensus/snowball" +) + +func TestParametersValid(t *testing.T) { + p := Parameters{ + Parameters: snowball.Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + }, + Parents: 2, + BatchSize: 1, + } + + if err := p.Valid(); err != nil { + t.Fatal(err) + } +} + +func TestParametersInvalidParents(t *testing.T) { + p := Parameters{ + Parameters: snowball.Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + }, + Parents: 1, + BatchSize: 1, + } + + if err := p.Valid(); err == nil { + t.Fatalf("Should have failed due to invalid parents") + } +} + +func TestParametersInvalidBatchSize(t *testing.T) { + p := Parameters{ + Parameters: snowball.Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + }, + Parents: 2, + BatchSize: 0, + } + + if err := p.Valid(); err == nil { + t.Fatalf("Should have failed due to invalid batch size") + } +} diff --git a/snow/consensus/avalanche/topological.go b/snow/consensus/avalanche/topological.go new file mode 100644 index 0000000..dca8a19 --- /dev/null +++ b/snow/consensus/avalanche/topological.go @@ -0,0 +1,455 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" +) + +// TopologicalFactory implements Factory by returning a topological struct +type TopologicalFactory struct{} + +// New implements Factory +func (TopologicalFactory) New() Consensus { return &Topological{} } + +// TODO: Implement pruning of decisions. +// To perfectly preserve the protocol, this implementation will need to store +// the hashes of all accepted decisions. It is possible to add a heuristic that +// removes sufficiently old decisions. However, that will need to be analyzed to +// ensure safety. It is doable when adding in a weak synchrony assumption. + +// Topological performs the avalanche algorithm by utilizing a topological sort +// of the voting results. Assumes that vertices are inserted in topological +// order. +type Topological struct { + // Context used for logging + ctx *snow.Context + // Threshold for confidence increases + params Parameters + + numProcessing prometheus.Gauge + numAccepted, numRejected prometheus.Counter + + // Maps vtxID -> vtx + nodes map[[32]byte]Vertex + // Tracks the conflict relations + cg snowstorm.Consensus + + // preferred is the frontier of vtxIDs that are strongly preferred + // virtuous is the frontier of vtxIDs that are strongly virtuous + // orphans are the txIDs that are virtuous, but not preferred + preferred, virtuous, orphans ids.Set + // frontier is the set of vts that have no descendents + frontier map[[32]byte]Vertex + // preferenceCache is the cache for strongly preferred checks + // virtuousCache is the cache for strongly virtuous checks + preferenceCache, virtuousCache map[[32]byte]bool +} + +type kahnNode struct { + inDegree int + votes ids.BitSet +} + +// Initialize implements the Avalanche interface +func (ta *Topological) Initialize(ctx *snow.Context, params Parameters, frontier []Vertex) { + ctx.Log.AssertDeferredNoError(params.Valid) + + ta.ctx = ctx + ta.params = params + + ta.numProcessing = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: params.Namespace, + Name: "vtx_processing", + Help: "Number of currently processing vertices", + }) + ta.numAccepted = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: params.Namespace, + Name: "vtx_accepted", + Help: "Number of vertices accepted", + }) + ta.numRejected = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: params.Namespace, + Name: "vtx_rejected", + Help: "Number of vertices rejected", + }) + + if err := ta.params.Metrics.Register(ta.numProcessing); err != nil { + ta.ctx.Log.Error("Failed to register vtx_processing statistics due to %s", err) + } + if err := ta.params.Metrics.Register(ta.numAccepted); err != nil { + ta.ctx.Log.Error("Failed to register vtx_accepted statistics due to %s", err) + } + if err := ta.params.Metrics.Register(ta.numRejected); err != nil { + ta.ctx.Log.Error("Failed to register vtx_rejected statistics due to %s", err) + } + + ta.nodes = make(map[[32]byte]Vertex) + + ta.cg = &snowstorm.Directed{} + ta.cg.Initialize(ctx, params.Parameters) + + ta.frontier = make(map[[32]byte]Vertex) + for _, vtx := range frontier { + ta.frontier[vtx.ID().Key()] = vtx + } + ta.updateFrontiers() +} + +// Parameters implements the Avalanche interface +func (ta *Topological) Parameters() Parameters { return ta.params } + +// IsVirtuous implements the Avalanche interface +func (ta *Topological) IsVirtuous(tx snowstorm.Tx) bool { return ta.cg.IsVirtuous(tx) } + +// Add implements the Avalanche interface +func (ta *Topological) Add(vtx Vertex) { + ta.ctx.Log.AssertTrue(vtx != nil, "Attempting to insert nil vertex") + + vtxID := vtx.ID() + key := vtxID.Key() + if vtx.Status().Decided() { + return // Already decided this vertex + } else if _, exists := ta.nodes[key]; exists { + return // Already inserted this vertex + } + + ta.ctx.ConsensusDispatcher.Issue(ta.ctx.ChainID, vtxID, vtx.Bytes()) + + for _, tx := range vtx.Txs() { + if !tx.Status().Decided() { + // Add the consumers to the conflict graph. + ta.cg.Add(tx) + } + } + + ta.nodes[key] = vtx // Add this vertex to the set of nodes + ta.numProcessing.Inc() + + ta.update(vtx) // Update the vertex and it's ancestry +} + +// VertexIssued implements the Avalanche interface +func (ta *Topological) VertexIssued(vtx Vertex) bool { + if vtx.Status().Decided() { + return true + } + _, ok := ta.nodes[vtx.ID().Key()] + return ok +} + +// TxIssued implements the Avalanche interface +func (ta *Topological) TxIssued(tx snowstorm.Tx) bool { return ta.cg.Issued(tx) } + +// Orphans implements the Avalanche interface +func (ta *Topological) Orphans() ids.Set { return ta.orphans } + +// Virtuous implements the Avalanche interface +func (ta *Topological) Virtuous() ids.Set { return ta.virtuous } + +// Preferences implements the Avalanche interface +func (ta *Topological) Preferences() ids.Set { return ta.preferred } + +// RecordPoll implements the Avalanche interface +func (ta *Topological) RecordPoll(responses ids.UniqueBag) { + // Set up the topological sort: O(|Live Set|) + kahns, leaves := ta.calculateInDegree(responses) + // Collect the votes for each transaction: O(|Live Set|) + votes := ta.pushVotes(kahns, leaves) + // Update the conflict graph: O(|Transactions|) + ta.ctx.Log.Verbo("Updating consumer confidences based on:\n%s", &votes) + ta.cg.RecordPoll(votes) + // Update the dag: O(|Live Set|) + ta.updateFrontiers() +} + +// Quiesce implements the Avalanche interface +func (ta *Topological) Quiesce() bool { return ta.cg.Quiesce() } + +// Finalized implements the Avalanche interface +func (ta *Topological) Finalized() bool { return ta.cg.Finalized() } + +// Takes in a list of votes and sets up the topological ordering. Returns the +// reachable section of the graph annotated with the number of inbound edges and +// the non-transitively applied votes. Also returns the list of leaf nodes. +func (ta *Topological) calculateInDegree( + responses ids.UniqueBag) (map[[32]byte]kahnNode, []ids.ID) { + kahns := make(map[[32]byte]kahnNode) + leaves := ids.Set{} + + for _, vote := range responses.List() { + key := vote.Key() + // If it is not found, then the vote is either for something decided, + // or something we haven't heard of yet. + if vtx := ta.nodes[key]; vtx != nil { + kahn, previouslySeen := kahns[key] + // Add this new vote to the current bag of votes + kahn.votes.Union(responses.GetSet(vote)) + kahns[key] = kahn + + if !previouslySeen { + // If I've never seen this node before, it is currently a leaf. + leaves.Add(vote) + ta.markAncestorInDegrees(kahns, leaves, vtx.Parents()) + } + } + } + + return kahns, leaves.List() +} + +// adds a new in-degree reference for all nodes +func (ta *Topological) markAncestorInDegrees( + kahns map[[32]byte]kahnNode, + leaves ids.Set, + deps []Vertex) (map[[32]byte]kahnNode, ids.Set) { + frontier := []Vertex{} + for _, vtx := range deps { + // The vertex may have been decided, no need to vote in that case + if !vtx.Status().Decided() { + frontier = append(frontier, vtx) + } + } + + for len(frontier) > 0 { + newLen := len(frontier) - 1 + current := frontier[newLen] + frontier = frontier[:newLen] + + currentID := current.ID() + currentKey := currentID.Key() + kahn, alreadySeen := kahns[currentKey] + // I got here through a transitive edge, so increase the in-degree + kahn.inDegree++ + kahns[currentKey] = kahn + + if kahn.inDegree == 1 { + // If I am transitively seeing this node for the first + // time, it is no longer a leaf. + leaves.Remove(currentID) + } + + if !alreadySeen { + // If I am seeing this node for the first time, I need to check its + // parents + for _, depVtx := range current.Parents() { + // No need to traverse to a decided vertex + if !depVtx.Status().Decided() { + frontier = append(frontier, depVtx) + } + } + } + } + return kahns, leaves +} + +// count the number of votes for each operation +func (ta *Topological) pushVotes( + kahnNodes map[[32]byte]kahnNode, + leaves []ids.ID) ids.Bag { + votes := make(ids.UniqueBag) + + for len(leaves) > 0 { + newLeavesSize := len(leaves) - 1 + leaf := leaves[newLeavesSize] + leaves = leaves[:newLeavesSize] + + key := leaf.Key() + kahn := kahnNodes[key] + + if vtx := ta.nodes[key]; vtx != nil { + for _, tx := range vtx.Txs() { + // Give the votes to the consumer + txID := tx.ID() + votes.UnionSet(txID, kahn.votes) + } + + for _, dep := range vtx.Parents() { + depID := dep.ID() + depKey := depID.Key() + if depNode, notPruned := kahnNodes[depKey]; notPruned { + depNode.inDegree-- + // Give the votes to my parents + depNode.votes.Union(kahn.votes) + kahnNodes[depKey] = depNode + + if depNode.inDegree == 0 { + // Only traverse into the leaves + leaves = append(leaves, depID) + } + } + } + } + } + + return votes.Bag(ta.params.Alpha) +} + +// If I've already checked, do nothing +// If I'm decided, cache the preference and return +// At this point, I must be live +// I now try to accept all my consumers +// I now update all my ancestors +// If any of my parents are rejected, reject myself +// If I'm preferred, remove all my ancestors from the preferred frontier, add +// myself to the preferred frontier +// If all my parents are accepted and I'm acceptable, accept myself +func (ta *Topological) update(vtx Vertex) { + vtxID := vtx.ID() + vtxKey := vtxID.Key() + if _, cached := ta.preferenceCache[vtxKey]; cached { + return // This vertex has already been updated + } + + switch vtx.Status() { + case choices.Accepted: + ta.preferred.Add(vtxID) // I'm preferred + ta.virtuous.Add(vtxID) // Accepted is defined as virtuous + + ta.frontier[vtxKey] = vtx // I have no descendents yet + + ta.preferenceCache[vtxKey] = true + ta.virtuousCache[vtxKey] = true + return + case choices.Rejected: + // I'm rejected + ta.preferenceCache[vtxKey] = false + ta.virtuousCache[vtxKey] = false + return + } + + acceptable := true // If the batch is accepted, this vertex is acceptable + rejectable := false // If I'm rejectable, I must be rejected + preferred := true + virtuous := true + txs := vtx.Txs() + preferences := ta.cg.Preferences() + virtuousTxs := ta.cg.Virtuous() + + for _, tx := range txs { + txID := tx.ID() + s := tx.Status() + if s == choices.Rejected { + // If I contain a rejected consumer, I am rejectable + rejectable = true + preferred = false + virtuous = false + } + if s != choices.Accepted { + // If I contain a non-accepted consumer, I am not acceptable + acceptable = false + preferred = preferred && preferences.Contains(txID) + virtuous = virtuous && virtuousTxs.Contains(txID) + } + } + + deps := vtx.Parents() + // Update all of my dependencies + for _, dep := range deps { + ta.update(dep) + + depID := dep.ID() + key := depID.Key() + preferred = preferred && ta.preferenceCache[key] + virtuous = virtuous && ta.virtuousCache[key] + } + + // Check my parent statuses + for _, dep := range deps { + if status := dep.Status(); status == choices.Rejected { + vtx.Reject() // My parent is rejected, so I should be rejected + ta.numRejected.Inc() + delete(ta.nodes, vtxKey) + ta.numProcessing.Dec() + + ta.preferenceCache[vtxKey] = false + ta.virtuousCache[vtxKey] = false + return + } else if status != choices.Accepted { + acceptable = false // My parent isn't accepted, so I can't be + } + } + + // Technically, we could also check to see if there are direct conflicts + // between this vertex and a vertex in it's ancestry. If there does exist + // such a conflict, this vertex could also be rejected. However, this would + // require a traversal. Therefore, this memory optimization is ignored. + // Also, this will only happen from a byzantine node issuing the vertex. + // Therefore, this is very unlikely to actually be triggered in practice. + + // Remove all my parents from the frontier + for _, dep := range deps { + delete(ta.frontier, dep.ID().Key()) + } + ta.frontier[vtxKey] = vtx // I have no descendents yet + + ta.preferenceCache[vtxKey] = preferred + ta.virtuousCache[vtxKey] = virtuous + + if preferred { + ta.preferred.Add(vtxID) // I'm preferred + for _, dep := range deps { + ta.preferred.Remove(dep.ID()) // My parents aren't part of the frontier + } + + for _, tx := range txs { + if tx.Status() != choices.Accepted { + ta.orphans.Remove(tx.ID()) + } + } + } + + if virtuous { + ta.virtuous.Add(vtxID) // I'm virtuous + for _, dep := range deps { + ta.virtuous.Remove(dep.ID()) // My parents aren't part of the frontier + } + } + + switch { + case acceptable: + // I'm acceptable, why not accept? + ta.ctx.ConsensusDispatcher.Accept(ta.ctx.ChainID, vtxID, vtx.Bytes()) + vtx.Accept() + ta.numAccepted.Inc() + delete(ta.nodes, vtxKey) + ta.numProcessing.Dec() + case rejectable: + // I'm rejectable, why not reject? + vtx.Reject() + + ta.ctx.ConsensusDispatcher.Reject(ta.ctx.ChainID, vtxID, vtx.Bytes()) + + ta.numRejected.Inc() + delete(ta.nodes, vtxKey) + ta.numProcessing.Dec() + } +} + +// Update the frontier sets +func (ta *Topological) updateFrontiers() { + vts := ta.frontier + + ta.preferred.Clear() + ta.virtuous.Clear() + ta.orphans.Clear() + ta.frontier = make(map[[32]byte]Vertex) + ta.preferenceCache = make(map[[32]byte]bool) + ta.virtuousCache = make(map[[32]byte]bool) + + ta.orphans.Union(ta.cg.Virtuous()) // Initially, nothing is preferred + + for _, vtx := range vts { + // Update all the vertices that were in my previous frontier + ta.update(vtx) + } +} diff --git a/snow/consensus/avalanche/topological_test.go b/snow/consensus/avalanche/topological_test.go new file mode 100644 index 0000000..f43ee5b --- /dev/null +++ b/snow/consensus/avalanche/topological_test.go @@ -0,0 +1,753 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "math" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" +) + +func TestTopologicalParams(t *testing.T) { ParamsTest(t, TopologicalFactory{}) } + +func TestTopologicalAdd(t *testing.T) { AddTest(t, TopologicalFactory{}) } + +func TestTopologicalVertexIssued(t *testing.T) { VertexIssuedTest(t, TopologicalFactory{}) } + +func TestTopologicalTxIssued(t *testing.T) { TxIssuedTest(t, TopologicalFactory{}) } + +func TestAvalancheVoting(t *testing.T) { + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + ta := Topological{} + ta.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + ta.Add(vtx0) + ta.Add(vtx1) + + sm := make(ids.UniqueBag) + sm.Add(0, vtx1.id) + sm.Add(1, vtx1.id) + ta.RecordPoll(sm) + + if ta.Finalized() { + t.Fatalf("An avalanche instance finalized too early") + } else if !Matches([]ids.ID{vtx1.id}, ta.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } + + ta.RecordPoll(sm) + + if !ta.Finalized() { + t.Fatalf("An avalanche instance finalized too late") + } else if !Matches([]ids.ID{vtx1.id}, ta.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if tx0.Status() != choices.Rejected { + t.Fatalf("Tx should have been rejected") + } else if tx1.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } +} + +func TestAvalancheTransitiveVoting(t *testing.T) { + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID(), GenerateID()} + + ta := Topological{} + ta.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[1]) + + vtx1 := &Vtx{ + dependencies: []Vertex{vtx0}, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 2, + status: choices.Processing, + } + + vtx2 := &Vtx{ + dependencies: []Vertex{vtx1}, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 3, + status: choices.Processing, + } + + ta.Add(vtx0) + ta.Add(vtx1) + ta.Add(vtx2) + + sm1 := make(ids.UniqueBag) + sm1.Add(0, vtx0.id) + sm1.Add(1, vtx2.id) + ta.RecordPoll(sm1) + + if ta.Finalized() { + t.Fatalf("An avalanche instance finalized too early") + } else if !Matches([]ids.ID{vtx2.id}, ta.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if tx0.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } + + sm2 := make(ids.UniqueBag) + sm2.Add(0, vtx2.id) + sm2.Add(1, vtx2.id) + ta.RecordPoll(sm2) + + if !ta.Finalized() { + t.Fatalf("An avalanche instance finalized too late") + } else if !Matches([]ids.ID{vtx2.id}, ta.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if tx0.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } else if tx1.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } +} + +func TestAvalancheSplitVoting(t *testing.T) { + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + ta := Topological{} + ta.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + ta.Add(vtx0) + ta.Add(vtx1) + + sm1 := make(ids.UniqueBag) + sm1.Add(0, vtx0.id) + sm1.Add(1, vtx1.id) + ta.RecordPoll(sm1) + + if !ta.Finalized() { + t.Fatalf("An avalanche instance finalized too late") + } else if !Matches([]ids.ID{vtx0.id, vtx1.id}, ta.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if tx0.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } +} + +func TestAvalancheTransitiveRejection(t *testing.T) { + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID(), GenerateID()} + + ta := Topological{} + ta.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + tx2 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx2.Ins.Add(utxos[1]) + + vtx2 := &Vtx{ + dependencies: []Vertex{vtx0}, + id: GenerateID(), + txs: []snowstorm.Tx{tx2}, + height: 2, + status: choices.Processing, + } + + ta.Add(vtx0) + ta.Add(vtx1) + ta.Add(vtx2) + + sm := make(ids.UniqueBag) + sm.Add(0, vtx1.id) + sm.Add(1, vtx1.id) + ta.RecordPoll(sm) + + if ta.Finalized() { + t.Fatalf("An avalanche instance finalized too early") + } else if !Matches([]ids.ID{vtx1.id}, ta.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } + + ta.RecordPoll(sm) + + if ta.Finalized() { + t.Fatalf("An avalanche instance finalized too early") + } else if !Matches([]ids.ID{vtx1.id}, ta.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if tx0.Status() != choices.Rejected { + t.Fatalf("Tx should have been rejected") + } else if tx1.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } else if tx2.Status() != choices.Processing { + t.Fatalf("Tx should not have been decided") + } + + ta.preferenceCache = make(map[[32]byte]bool) + ta.virtuousCache = make(map[[32]byte]bool) + + ta.update(vtx2) +} + +func TestAvalancheVirtuous(t *testing.T) { + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID(), GenerateID()} + + ta := Topological{} + ta.Initialize(snow.DefaultContextTest(), params, vts) + + if virtuous := ta.Virtuous(); virtuous.Len() != 2 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vts[0].ID()) { + t.Fatalf("Wrong virtuous") + } else if !virtuous.Contains(vts[1].ID()) { + t.Fatalf("Wrong virtuous") + } + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + tx2 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx2.Ins.Add(utxos[1]) + + vtx2 := &Vtx{ + dependencies: []Vertex{vtx0}, + id: GenerateID(), + txs: []snowstorm.Tx{tx2}, + height: 2, + status: choices.Processing, + } + + ta.Add(vtx0) + + if virtuous := ta.Virtuous(); virtuous.Len() != 1 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vtx0.id) { + t.Fatalf("Wrong virtuous") + } + + ta.Add(vtx1) + + if virtuous := ta.Virtuous(); virtuous.Len() != 1 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vtx0.id) { + t.Fatalf("Wrong virtuous") + } + + ta.updateFrontiers() + + if virtuous := ta.Virtuous(); virtuous.Len() != 2 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vts[0].ID()) { + t.Fatalf("Wrong virtuous") + } else if !virtuous.Contains(vts[1].ID()) { + t.Fatalf("Wrong virtuous") + } + + ta.Add(vtx2) + + if virtuous := ta.Virtuous(); virtuous.Len() != 2 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vts[0].ID()) { + t.Fatalf("Wrong virtuous") + } else if !virtuous.Contains(vts[1].ID()) { + t.Fatalf("Wrong virtuous") + } + + ta.updateFrontiers() + + if virtuous := ta.Virtuous(); virtuous.Len() != 2 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vts[0].ID()) { + t.Fatalf("Wrong virtuous") + } else if !virtuous.Contains(vts[1].ID()) { + t.Fatalf("Wrong virtuous") + } +} + +func TestAvalancheIsVirtuous(t *testing.T) { + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID(), GenerateID()} + + ta := Topological{} + ta.Initialize(snow.DefaultContextTest(), params, vts) + + if virtuous := ta.Virtuous(); virtuous.Len() != 2 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vts[0].ID()) { + t.Fatalf("Wrong virtuous") + } else if !virtuous.Contains(vts[1].ID()) { + t.Fatalf("Wrong virtuous") + } + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + if !ta.IsVirtuous(tx0) { + t.Fatalf("Should be virtuous.") + } else if !ta.IsVirtuous(tx1) { + t.Fatalf("Should be virtuous.") + } + + ta.Add(vtx0) + + if !ta.IsVirtuous(tx0) { + t.Fatalf("Should be virtuous.") + } else if ta.IsVirtuous(tx1) { + t.Fatalf("Should not be virtuous.") + } + + ta.Add(vtx1) + + if ta.IsVirtuous(tx0) { + t.Fatalf("Should not be virtuous.") + } else if ta.IsVirtuous(tx1) { + t.Fatalf("Should not be virtuous.") + } +} + +func TestAvalancheQuiesce(t *testing.T) { + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID(), GenerateID()} + + ta := Topological{} + ta.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + tx2 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx2.Ins.Add(utxos[1]) + + vtx2 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx2}, + height: 2, + status: choices.Processing, + } + + ta.Add(vtx0) + + if ta.Quiesce() { + t.Fatalf("Shouldn't quiesce") + } + + ta.Add(vtx1) + + if !ta.Quiesce() { + t.Fatalf("Should quiesce") + } + + ta.Add(vtx2) + + if ta.Quiesce() { + t.Fatalf("Shouldn't quiesce") + } + + sm := make(ids.UniqueBag) + sm.Add(0, vtx2.id) + ta.RecordPoll(sm) + + if !ta.Quiesce() { + t.Fatalf("Should quiesce") + } +} + +func TestAvalancheOrphans(t *testing.T) { + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: math.MaxInt32, + BetaRogue: math.MaxInt32, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID(), GenerateID()} + + ta := Topological{} + ta.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + tx2 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx2.Ins.Add(utxos[1]) + + vtx2 := &Vtx{ + dependencies: []Vertex{vtx0}, + id: GenerateID(), + txs: []snowstorm.Tx{tx2}, + height: 2, + status: choices.Processing, + } + + ta.Add(vtx0) + + if orphans := ta.Orphans(); orphans.Len() != 0 { + t.Fatalf("Wrong number of orphans") + } + + ta.Add(vtx1) + + if orphans := ta.Orphans(); orphans.Len() != 0 { + t.Fatalf("Wrong number of orphans") + } + + ta.Add(vtx2) + + if orphans := ta.Orphans(); orphans.Len() != 0 { + t.Fatalf("Wrong number of orphans") + } + + sm := make(ids.UniqueBag) + sm.Add(0, vtx1.id) + ta.RecordPoll(sm) + + if orphans := ta.Orphans(); orphans.Len() != 1 { + t.Fatalf("Wrong number of orphans") + } else if !orphans.Contains(tx2.ID()) { + t.Fatalf("Wrong orphan") + } +} diff --git a/snow/consensus/avalanche/vertex_test.go b/snow/consensus/avalanche/vertex_test.go new file mode 100644 index 0000000..14b0286 --- /dev/null +++ b/snow/consensus/avalanche/vertex_test.go @@ -0,0 +1,41 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "sort" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" +) + +type Vtx struct { + dependencies []Vertex + id ids.ID + txs []snowstorm.Tx + + height int + status choices.Status + + bytes []byte +} + +func (v *Vtx) ID() ids.ID { return v.id } +func (v *Vtx) ParentIDs() []ids.ID { return nil } +func (v *Vtx) Parents() []Vertex { return v.dependencies } +func (v *Vtx) Txs() []snowstorm.Tx { return v.txs } +func (v *Vtx) Status() choices.Status { return v.status } +func (v *Vtx) Live() {} +func (v *Vtx) Accept() { v.status = choices.Accepted } +func (v *Vtx) Reject() { v.status = choices.Rejected } +func (v *Vtx) Bytes() []byte { return v.bytes } + +type sortVts []*Vtx + +func (sv sortVts) Less(i, j int) bool { return sv[i].height < sv[j].height } +func (sv sortVts) Len() int { return len(sv) } +func (sv sortVts) Swap(i, j int) { sv[j], sv[i] = sv[i], sv[j] } + +func SortVts(vts []*Vtx) { sort.Sort(sortVts(vts)) } diff --git a/snow/consensus/snowball/binary_snowball.go b/snow/consensus/snowball/binary_snowball.go new file mode 100644 index 0000000..f755a6b --- /dev/null +++ b/snow/consensus/snowball/binary_snowball.go @@ -0,0 +1,64 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" +) + +// binarySnowball is the implementation of a binary snowball instance +type binarySnowball struct { + // preference is the choice with the largest number of successful polls. + // Ties are broken by switching choice lazily + preference int + + // numSuccessfulPolls tracks the total number of successful network polls of + // the 0 and 1 choices + numSuccessfulPolls [2]int + + // snowflake wraps the binary snowflake logic + snowflake binarySnowflake +} + +// Initialize implements the BinarySnowball interface +func (sb *binarySnowball) Initialize(beta, choice int) { + sb.preference = choice + sb.snowflake.Initialize(beta, choice) +} + +// Preference implements the BinarySnowball interface +func (sb *binarySnowball) Preference() int { + // It is possible, with low probability, that the snowflake preference is + // not equal to the snowball preference when snowflake finalizes. However, + // this case is handled for completion. Therefore, if snowflake is + // finalized, then our finalized snowflake choice should be preferred. + if sb.Finalized() { + return sb.snowflake.Preference() + } + return sb.preference +} + +// RecordSuccessfulPoll implements the BinarySnowball interface +func (sb *binarySnowball) RecordSuccessfulPoll(choice int) { + sb.numSuccessfulPolls[choice]++ + if sb.numSuccessfulPolls[choice] > sb.numSuccessfulPolls[1-choice] { + sb.preference = choice + } + sb.snowflake.RecordSuccessfulPoll(choice) +} + +// RecordUnsuccessfulPoll implements the BinarySnowball interface +func (sb *binarySnowball) RecordUnsuccessfulPoll() { sb.snowflake.RecordUnsuccessfulPoll() } + +// Finalized implements the BinarySnowball interface +func (sb *binarySnowball) Finalized() bool { return sb.snowflake.Finalized() } + +func (sb *binarySnowball) String() string { + return fmt.Sprintf( + "SB(Preference = %d, NumSuccessfulPolls[0] = %d, NumSuccessfulPolls[1] = %d, SF = %s)", + sb.preference, + sb.numSuccessfulPolls[0], + sb.numSuccessfulPolls[1], + &sb.snowflake) +} diff --git a/snow/consensus/snowball/binary_snowball_test.go b/snow/consensus/snowball/binary_snowball_test.go new file mode 100644 index 0000000..dd37f13 --- /dev/null +++ b/snow/consensus/snowball/binary_snowball_test.go @@ -0,0 +1,197 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" +) + +func TestBinarySnowball(t *testing.T) { + Red := 0 + Blue := 1 + + beta := 2 + + sb := binarySnowball{} + sb.Initialize(beta, Red) + + if pref := sb.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if !sb.Finalized() { + t.Fatalf("Didn't finalized correctly") + } +} + +func TestBinarySnowballRecordUnsuccessfulPoll(t *testing.T) { + Red := 0 + Blue := 1 + + beta := 2 + + sb := binarySnowball{} + sb.Initialize(beta, Red) + + if pref := sb.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordUnsuccessfulPoll() + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if !sb.Finalized() { + t.Fatalf("Finalized too late") + } + + expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 3, SF = SF(Preference = 1, Confidence = 2, Finalized = true))" + if str := sb.String(); str != expected { + t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) + } +} + +func TestBinarySnowballAcceptWeirdColor(t *testing.T) { + Red := 0 + Blue := 1 + + beta := 2 + + sb := binarySnowball{} + sb.Initialize(beta, Red) + + if pref := sb.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Red) + sb.RecordUnsuccessfulPoll() + + if pref := sb.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Red) + sb.RecordUnsuccessfulPoll() + + if pref := sb.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if !sb.Finalized() { + t.Fatalf("Finalized too late") + } + + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 2, SF = SF(Preference = 1, Confidence = 2, Finalized = true))" + if str := sb.String(); str != expected { + t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) + } +} + +func TestBinarySnowballLockColor(t *testing.T) { + Red := 0 + Blue := 1 + + beta := 1 + + sb := binarySnowball{} + sb.Initialize(beta, Red) + + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if !sb.Finalized() { + t.Fatalf("Finalized too late") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if !sb.Finalized() { + t.Fatalf("Finalized too late") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if !sb.Finalized() { + t.Fatalf("Finalized too late") + } + + expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 2, SF = SF(Preference = 0, Confidence = 1, Finalized = true))" + if str := sb.String(); str != expected { + t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) + } +} diff --git a/snow/consensus/snowball/binary_snowflake.go b/snow/consensus/snowball/binary_snowflake.go new file mode 100644 index 0000000..715e3ed --- /dev/null +++ b/snow/consensus/snowball/binary_snowflake.go @@ -0,0 +1,68 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" +) + +// binarySnowflake is the implementation of a binary snowflake instance +type binarySnowflake struct { + // preference is the choice that last had a successful poll. Unless there + // hasn't been a successful poll, in which case it is the initially provided + // choice. + preference int + + // confidence tracks the number of successful polls in a row that have + // returned the preference + confidence int + + // beta is the number of consecutive successful queries required for + // finalization. + beta int + + // finalized prevents the state from changing after the required number of + // consecutive polls has been reached + finalized bool +} + +// Initialize implements the BinarySnowflake interface +func (sf *binarySnowflake) Initialize(beta, choice int) { + sf.beta = beta + sf.preference = choice +} + +// Preference implements the BinarySnowflake interface +func (sf *binarySnowflake) Preference() int { return sf.preference } + +// RecordSuccessfulPoll implements the BinarySnowflake interface +func (sf *binarySnowflake) RecordSuccessfulPoll(choice int) { + if sf.Finalized() { + return // This instace is already decided. + } + + if sf.preference == choice { + sf.confidence++ + } else { + // confidence is set to 1 because there has already been 1 successful + // poll, namely this poll. + sf.confidence = 1 + sf.preference = choice + } + + sf.finalized = sf.confidence >= sf.beta +} + +// RecordUnsuccessfulPoll implements the BinarySnowflake interface +func (sf *binarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } + +// Finalized implements the BinarySnowflake interface +func (sf *binarySnowflake) Finalized() bool { return sf.finalized } + +func (sf *binarySnowflake) String() string { + return fmt.Sprintf("SF(Preference = %d, Confidence = %d, Finalized = %v)", + sf.Preference(), + sf.confidence, + sf.Finalized()) +} diff --git a/snow/consensus/snowball/byzantine.go b/snow/consensus/snowball/byzantine.go new file mode 100644 index 0000000..8995d11 --- /dev/null +++ b/snow/consensus/snowball/byzantine.go @@ -0,0 +1,47 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "github.com/ava-labs/gecko/ids" +) + +// ByzantineFactory implements Factory by returning a byzantine struct +type ByzantineFactory struct{} + +// New implements Factory +func (ByzantineFactory) New() Consensus { return &Byzantine{} } + +// Byzantine is a naive implementation of a multi-choice snowball instance +type Byzantine struct { + // params contains all the configurations of a snowball instance + params Parameters + + // Hardcode the preference + preference ids.ID +} + +// Initialize implements the Consensus interface +func (b *Byzantine) Initialize(params Parameters, choice ids.ID) { + b.preference = choice +} + +// Parameters implements the Consensus interface +func (b *Byzantine) Parameters() Parameters { return b.params } + +// Add implements the Consensus interface +func (b *Byzantine) Add(choice ids.ID) {} + +// Preference implements the Consensus interface +func (b *Byzantine) Preference() ids.ID { return b.preference } + +// RecordPoll implements the Consensus interface +func (b *Byzantine) RecordPoll(votes ids.Bag) {} + +// RecordUnsuccessfulPoll implements the Consensus interface +func (b *Byzantine) RecordUnsuccessfulPoll() {} + +// Finalized implements the Consensus interface +func (b *Byzantine) Finalized() bool { return true } +func (b *Byzantine) String() string { return b.preference.String() } diff --git a/snow/consensus/snowball/consensus.go b/snow/consensus/snowball/consensus.go new file mode 100644 index 0000000..edbf14b --- /dev/null +++ b/snow/consensus/snowball/consensus.go @@ -0,0 +1,124 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" +) + +// Consensus represents a general snow instance that can be used directly to +// process the results of network queries. +type Consensus interface { + fmt.Stringer + + // Takes in alpha, beta1, beta2, and the initial choice + Initialize(params Parameters, initialPreference ids.ID) + + // Returns the parameters that describe this snowball instance + Parameters() Parameters + + // Adds a new choice to vote on + Add(newChoice ids.ID) + + // Returns the currently preferred choice to be finalized + Preference() ids.ID + + // RecordPoll records the results of a network poll. Assumes all choices + // have been previously added. + RecordPoll(votes ids.Bag) + + // RecordUnsuccessfulPoll resets the snowflake counters of this consensus + // instance + RecordUnsuccessfulPoll() + + // Return whether a choice has been finalized + Finalized() bool +} + +// NnarySnowball augments NnarySnowflake with a counter that tracks the total +// number of positive responses from a network sample. +type NnarySnowball interface{ NnarySnowflake } + +// NnarySnowflake is a snowflake instance deciding between an unbounded number +// of values. After performing a network sample of k nodes, if you have alpha +// votes for one of the choices, you should vote for that choice. Otherwise, you +// should reset. +type NnarySnowflake interface { + fmt.Stringer + + // Takes in beta1, beta2, and the initial choice + Initialize(betaVirtuous, betaRogue int, initialPreference ids.ID) + + // Adds a new possible choice + Add(newChoice ids.ID) + + // Returns the currently preferred choice to be finalized + Preference() ids.ID + + // RecordSuccessfulPoll records a successful poll towards finalizing the + // specified choice. Assumes the choice was previously added. + RecordSuccessfulPoll(choice ids.ID) + + // RecordUnsuccessfulPoll resets the snowflake counter of this instance + RecordUnsuccessfulPoll() + + // Return whether a choice has been finalized + Finalized() bool +} + +// BinarySnowball augments BinarySnowflake with a counter that tracks the total +// number of positive responses from a network sample. +type BinarySnowball interface{ BinarySnowflake } + +// BinarySnowflake is a snowball instance deciding between two values +// After performing a network sample of k nodes, if you have alpha votes for +// one of the choices, you should vote for that choice. Otherwise, you should +// reset. +type BinarySnowflake interface { + fmt.Stringer + + // Takes in the beta value, and the initial choice + Initialize(beta, initialPreference int) + + // Returns the currently preferred choice to be finalized + Preference() int + + // RecordSuccessfulPoll records a successful poll towards finalizing the + // specified choice + RecordSuccessfulPoll(choice int) + + // RecordUnsuccessfulPoll resets the snowflake counter of this instance + RecordUnsuccessfulPoll() + + // Return whether a choice has been finalized + Finalized() bool +} + +// UnarySnowball is a snowball instance deciding on one value. After performing +// a network sample of k nodes, if you have alpha votes for the choice, you +// should vote. Otherwise, you should reset. +type UnarySnowball interface { + fmt.Stringer + + // Takes in the beta value + Initialize(beta int) + + // RecordSuccessfulPoll records a successful poll towards finalizing + RecordSuccessfulPoll() + + // RecordUnsuccessfulPoll resets the snowflake counter of this instance + RecordUnsuccessfulPoll() + + // Return whether a choice has been finalized + Finalized() bool + + // Returns a new binary snowball instance with the agreement parameters + // transferred. Takes in the new beta value and the original choice + Extend(beta, originalPreference int) BinarySnowball + + // Returns a new unary snowball instance with the same state + Clone() UnarySnowball +} diff --git a/snow/consensus/snowball/consensus_performance_test.go b/snow/consensus/snowball/consensus_performance_test.go new file mode 100644 index 0000000..9cfce43 --- /dev/null +++ b/snow/consensus/snowball/consensus_performance_test.go @@ -0,0 +1,59 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "math/rand" + "testing" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestSnowballOptimized(t *testing.T) { + numColors := 10 + numNodes := 100 + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, Alpha: 15, BetaVirtuous: 20, BetaRogue: 30, + } + seed := int64(0) + + nBitwise := Network{} + nBitwise.Initialize(params, numColors) + + nNaive := nBitwise + + rand.Seed(seed) + for i := 0; i < numNodes; i++ { + nBitwise.AddNode(&Tree{}) + } + + rand.Seed(seed) + for i := 0; i < numNodes; i++ { + nNaive.AddNode(&Flat{}) + } + + numRounds := 0 + for !nBitwise.Finalized() && !nBitwise.Disagreement() && !nNaive.Finalized() && !nNaive.Disagreement() { + rand.Seed(int64(numRounds) + seed) + nBitwise.Round() + + rand.Seed(int64(numRounds) + seed) + nNaive.Round() + numRounds++ + } + + if nBitwise.Disagreement() || nNaive.Disagreement() { + t.Fatalf("Network agreed on inconsistent values") + } + + // Although this can theoretically fail with a correct implementation, it + // shouldn't in practice + if !nBitwise.Finalized() { + t.Fatalf("Network agreed on values faster with naive implementation") + } + if !nBitwise.Agreement() { + t.Fatalf("Network agreed on inconsistent values") + } +} diff --git a/snow/consensus/snowball/consensus_reversibility_test.go b/snow/consensus/snowball/consensus_reversibility_test.go new file mode 100644 index 0000000..9863d0e --- /dev/null +++ b/snow/consensus/snowball/consensus_reversibility_test.go @@ -0,0 +1,58 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "math/rand" + "testing" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestSnowballGovernance(t *testing.T) { + numColors := 2 + numNodes := 100 + numByzantine := 10 + numRed := 55 + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, Alpha: 15, BetaVirtuous: 20, BetaRogue: 30, + } + seed := int64(0) + + nBitwise := Network{} + nBitwise.Initialize(params, numColors) + + rand.Seed(seed) + for i := 0; i < numRed; i++ { + nBitwise.AddNodeSpecificColor(&Tree{}, []int{0, 1}) + } + + for _, node := range nBitwise.nodes { + if !node.Preference().Equals(nBitwise.colors[0]) { + t.Fatalf("Wrong preferences") + } + } + + for i := 0; i < numNodes-numByzantine-numRed; i++ { + nBitwise.AddNodeSpecificColor(&Tree{}, []int{1, 0}) + } + + for i := 0; i < numByzantine; i++ { + nBitwise.AddNodeSpecificColor(&Byzantine{}, []int{1, 0}) + } + + for !nBitwise.Finalized() { + nBitwise.Round() + } + + for _, node := range nBitwise.nodes { + if _, ok := node.(*Byzantine); ok { + continue + } + if !node.Preference().Equals(nBitwise.colors[0]) { + t.Fatalf("Wrong preferences") + } + } +} diff --git a/snow/consensus/snowball/consensus_test.go b/snow/consensus/snowball/consensus_test.go new file mode 100644 index 0000000..67fec3d --- /dev/null +++ b/snow/consensus/snowball/consensus_test.go @@ -0,0 +1,38 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" +) + +var ( + Red = ids.Empty.Prefix(0) + Blue = ids.Empty.Prefix(1) + Green = ids.Empty.Prefix(2) +) + +func ParamsTest(t *testing.T, factory Factory) { + sb := factory.New() + + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + } + sb.Initialize(params, Red) + + if p := sb.Parameters(); p.K != params.K { + t.Fatalf("Wrong K parameter") + } else if p.Alpha != params.Alpha { + t.Fatalf("Wrong Alpha parameter") + } else if p.BetaVirtuous != params.BetaVirtuous { + t.Fatalf("Wrong Beta1 parameter") + } else if p.BetaRogue != params.BetaRogue { + t.Fatalf("Wrong Beta2 parameter") + } +} diff --git a/snow/consensus/snowball/factory.go b/snow/consensus/snowball/factory.go new file mode 100644 index 0000000..b86f8a4 --- /dev/null +++ b/snow/consensus/snowball/factory.go @@ -0,0 +1,9 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +// Factory returns new instances of Consensus +type Factory interface { + New() Consensus +} diff --git a/snow/consensus/snowball/flat.go b/snow/consensus/snowball/flat.go new file mode 100644 index 0000000..da4eb1b --- /dev/null +++ b/snow/consensus/snowball/flat.go @@ -0,0 +1,54 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "github.com/ava-labs/gecko/ids" +) + +// FlatFactory implements Factory by returning a flat struct +type FlatFactory struct{} + +// New implements Factory +func (FlatFactory) New() Consensus { return &Flat{} } + +// Flat is a naive implementation of a multi-choice snowball instance +type Flat struct { + // params contains all the configurations of a snowball instance + params Parameters + + // snowball wraps the n-nary snowball logic + snowball nnarySnowball +} + +// Initialize implements the Consensus interface +func (f *Flat) Initialize(params Parameters, choice ids.ID) { + f.params = params + f.snowball.Initialize(params.BetaVirtuous, params.BetaRogue, choice) +} + +// Parameters implements the Consensus interface +func (f *Flat) Parameters() Parameters { return f.params } + +// Add implements the Consensus interface +func (f *Flat) Add(choice ids.ID) { f.snowball.Add(choice) } + +// Preference implements the Consensus interface +func (f *Flat) Preference() ids.ID { return f.snowball.Preference() } + +// RecordPoll implements the Consensus interface +func (f *Flat) RecordPoll(votes ids.Bag) { + if pollMode, numVotes := votes.Mode(); numVotes >= f.params.Alpha { + f.snowball.RecordSuccessfulPoll(pollMode) + } else { + f.RecordUnsuccessfulPoll() + } +} + +// RecordUnsuccessfulPoll implements the Consensus interface +func (f *Flat) RecordUnsuccessfulPoll() { f.snowball.RecordUnsuccessfulPoll() } + +// Finalized implements the Consensus interface +func (f *Flat) Finalized() bool { return f.snowball.Finalized() } +func (f *Flat) String() string { return f.snowball.String() } diff --git a/snow/consensus/snowball/flat_test.go b/snow/consensus/snowball/flat_test.go new file mode 100644 index 0000000..1aaa754 --- /dev/null +++ b/snow/consensus/snowball/flat_test.go @@ -0,0 +1,72 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" +) + +func TestFlatParams(t *testing.T) { ParamsTest(t, FlatFactory{}) } + +func TestFlat(t *testing.T) { + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + } + f := Flat{} + f.Initialize(params, Red) + f.Add(Green) + f.Add(Blue) + + if pref := f.Preference(); !pref.Equals(Red) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if f.Finalized() { + t.Fatalf("Finalized too early") + } + + twoBlue := ids.Bag{} + twoBlue.Add(Blue, Blue) + f.RecordPoll(twoBlue) + + if pref := f.Preference(); !pref.Equals(Blue) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if f.Finalized() { + t.Fatalf("Finalized too early") + } + + oneRedOneBlue := ids.Bag{} + twoBlue.Add(Red, Blue) + f.RecordPoll(oneRedOneBlue) + + if pref := f.Preference(); !pref.Equals(Blue) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if f.Finalized() { + t.Fatalf("Finalized too early") + } + + f.RecordPoll(twoBlue) + + if pref := f.Preference(); !pref.Equals(Blue) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if f.Finalized() { + t.Fatalf("Finalized too early") + } + + f.RecordPoll(twoBlue) + + if pref := f.Preference(); !pref.Equals(Blue) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if !f.Finalized() { + t.Fatalf("Finalized too late") + } + + expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF = SF(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, Confidence = 2, Finalized = true))" + if str := f.String(); str != expected { + t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) + } +} diff --git a/snow/consensus/snowball/network_test.go b/snow/consensus/snowball/network_test.go new file mode 100644 index 0000000..6e5435a --- /dev/null +++ b/snow/consensus/snowball/network_test.go @@ -0,0 +1,102 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/random" +) + +type Network struct { + params Parameters + colors []ids.ID + nodes, running []Consensus +} + +func (n *Network) Initialize(params Parameters, numColors int) { + n.params = params + for i := 0; i < numColors; i++ { + n.colors = append(n.colors, ids.Empty.Prefix(uint64(i))) + } +} + +func (n *Network) AddNode(sb Consensus) { + s := random.Uniform{N: len(n.colors)} + sb.Initialize(n.params, n.colors[s.Sample()]) + for s.CanSample() { + sb.Add(n.colors[s.Sample()]) + } + + n.nodes = append(n.nodes, sb) + if !sb.Finalized() { + n.running = append(n.running, sb) + } +} + +func (n *Network) AddNodeSpecificColor(sb Consensus, indices []int) { + sb.Initialize(n.params, n.colors[indices[0]]) + for _, i := range indices[1:] { + sb.Add(n.colors[i]) + } + + n.nodes = append(n.nodes, sb) + if !sb.Finalized() { + n.running = append(n.running, sb) + } +} + +func (n *Network) Finalized() bool { + return len(n.running) == 0 +} + +func (n *Network) Round() { + if len(n.running) > 0 { + runningInd := random.Rand(0, len(n.running)) + running := n.running[runningInd] + + sampler := random.Uniform{N: len(n.nodes)} + sampledColors := ids.Bag{} + for i := 0; i < n.params.K; i++ { + peer := n.nodes[sampler.Sample()] + sampledColors.Add(peer.Preference()) + } + + running.RecordPoll(sampledColors) + + // If this node has been finalized, remove it from the poller + if running.Finalized() { + newSize := len(n.running) - 1 + n.running[runningInd] = n.running[newSize] + n.running = n.running[:newSize] + } + } +} + +func (n *Network) Disagreement() bool { + i := 0 + for ; i < len(n.nodes) && !n.nodes[i].Finalized(); i++ { + } + if i < len(n.nodes) { + pref := n.nodes[i].Preference() + for ; i < len(n.nodes); i++ { + if node := n.nodes[i]; node.Finalized() && !pref.Equals(node.Preference()) { + return true + } + } + } + return false +} + +func (n *Network) Agreement() bool { + if len(n.nodes) == 0 { + return true + } + pref := n.nodes[0].Preference() + for _, node := range n.nodes { + if !pref.Equals(node.Preference()) { + return false + } + } + return true +} diff --git a/snow/consensus/snowball/nnary_snowball.go b/snow/consensus/snowball/nnary_snowball.go new file mode 100644 index 0000000..6821a50 --- /dev/null +++ b/snow/consensus/snowball/nnary_snowball.go @@ -0,0 +1,79 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" +) + +// nnarySnowball is a naive implementation of a multi-color snowball instance +type nnarySnowball struct { + // preference is the choice with the largest number of successful polls. + // Ties are broken by switching choice lazily + preference ids.ID + + // maxSuccessfulPolls maximum number of successful polls this instance has + // gotten for any choice + maxSuccessfulPolls int + + // numSuccessfulPolls tracks the total number of successful network polls of + // the choices + numSuccessfulPolls map[[32]byte]int + + // snowflake wraps the n-nary snowflake logic + snowflake nnarySnowflake +} + +// Initialize implements the NnarySnowball interface +func (sb *nnarySnowball) Initialize(betaVirtuous, betaRogue int, choice ids.ID) { + sb.preference = choice + sb.numSuccessfulPolls = make(map[[32]byte]int) + sb.snowflake.Initialize(betaVirtuous, betaRogue, choice) +} + +// Add implements the NnarySnowball interface +func (sb *nnarySnowball) Add(choice ids.ID) { sb.snowflake.Add(choice) } + +// Preference implements the NnarySnowball interface +func (sb *nnarySnowball) Preference() ids.ID { + // It is possible, with low probability, that the snowflake preference is + // not equal to the snowball preference when snowflake finalizes. However, + // this case is handled for completion. Therefore, if snowflake is + // finalized, then our finalized snowflake choice should be preferred. + if sb.Finalized() { + return sb.snowflake.Preference() + } + return sb.preference +} + +// RecordSuccessfulPoll implements the NnarySnowball interface +func (sb *nnarySnowball) RecordSuccessfulPoll(choice ids.ID) { + if sb.Finalized() { + return + } + + key := choice.Key() + numSuccessfulPolls := sb.numSuccessfulPolls[key] + 1 + sb.numSuccessfulPolls[key] = numSuccessfulPolls + + if numSuccessfulPolls > sb.maxSuccessfulPolls { + sb.preference = choice + sb.maxSuccessfulPolls = numSuccessfulPolls + } + + sb.snowflake.RecordSuccessfulPoll(choice) +} + +// RecordUnsuccessfulPoll implements the NnarySnowball interface +func (sb *nnarySnowball) RecordUnsuccessfulPoll() { sb.snowflake.RecordUnsuccessfulPoll() } + +// Finalized implements the NnarySnowball interface +func (sb *nnarySnowball) Finalized() bool { return sb.snowflake.Finalized() } + +func (sb *nnarySnowball) String() string { + return fmt.Sprintf("SB(Preference = %s, NumSuccessfulPolls = %d, SF = %s)", + sb.preference, sb.maxSuccessfulPolls, &sb.snowflake) +} diff --git a/snow/consensus/snowball/nnary_snowball_test.go b/snow/consensus/snowball/nnary_snowball_test.go new file mode 100644 index 0000000..655fdc6 --- /dev/null +++ b/snow/consensus/snowball/nnary_snowball_test.go @@ -0,0 +1,189 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" +) + +func TestNnarySnowball(t *testing.T) { + betaVirtuous := 2 + betaRogue := 2 + + sb := nnarySnowball{} + sb.Initialize(betaVirtuous, betaRogue, Red) + sb.Add(Blue) + sb.Add(Green) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if !sb.Finalized() { + t.Fatalf("Should be finalized") + } +} + +func TestNnarySnowflake(t *testing.T) { + betaVirtuous := 2 + betaRogue := 2 + + sf := nnarySnowflake{} + sf.Initialize(betaVirtuous, betaRogue, Red) + sf.Add(Blue) + sf.Add(Green) + + if pref := sf.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Blue) + + if pref := sf.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Red) + + if pref := sf.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Red) + + if pref := sf.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if !sf.Finalized() { + t.Fatalf("Should be finalized") + } + + sf.RecordSuccessfulPoll(Blue) + + if pref := sf.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if !sf.Finalized() { + t.Fatalf("Should be finalized") + } +} + +func TestNarySnowballRecordUnsuccessfulPoll(t *testing.T) { + betaVirtuous := 2 + betaRogue := 2 + + sb := nnarySnowball{} + sb.Initialize(betaVirtuous, betaRogue, Red) + sb.Add(Blue) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordUnsuccessfulPoll() + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if !sb.Finalized() { + t.Fatalf("Finalized too late") + } + + expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF = SF(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, Confidence = 2, Finalized = true))" + if str := sb.String(); str != expected { + t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) + } + + for i := 0; i < 4; i++ { + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if !sb.Finalized() { + t.Fatalf("Finalized too late") + } + } +} + +func TestNarySnowflakeColor(t *testing.T) { + betaVirtuous := 2 + betaRogue := 2 + + sb := nnarySnowball{} + sb.Initialize(betaVirtuous, betaRogue, Red) + sb.Add(Blue) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Blue) + + if pref := sb.snowflake.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } + + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if pref := sb.snowflake.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } +} diff --git a/snow/consensus/snowball/nnary_snowflake.go b/snow/consensus/snowball/nnary_snowflake.go new file mode 100644 index 0000000..f9d1069 --- /dev/null +++ b/snow/consensus/snowball/nnary_snowflake.go @@ -0,0 +1,81 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" +) + +// nnarySnowflake is the implementation of a snowflake instance with an +// unbounded number of choices +type nnarySnowflake struct { + // betaVirtuous is the number of consecutive successful queries required for + // finalization on a virtuous instance. + betaVirtuous int + + // betaRogue is the number of consecutive successful queries required for + // finalization on a rogue instance. + betaRogue int + + // confidence tracks the number of successful polls in a row that have + // returned the preference + confidence int + + // preference is the choice that last had a successful poll. Unless there + // hasn't been a successful poll, in which case it is the initially provided + // choice. + preference ids.ID + + // rogue tracks if this instance has multiple choices or only one + rogue bool + + // finalized prevents the state from changing after the required number of + // consecutive polls has been reached + finalized bool +} + +// Initialize implements the NnarySnowflake interface +func (sf *nnarySnowflake) Initialize(betaVirtuous, betaRogue int, choice ids.ID) { + sf.betaVirtuous = betaVirtuous + sf.betaRogue = betaRogue + sf.preference = choice +} + +// Add implements the NnarySnowflake interface +func (sf *nnarySnowflake) Add(choice ids.ID) { sf.rogue = sf.rogue || !choice.Equals(sf.preference) } + +// Preference implements the NnarySnowflake interface +func (sf *nnarySnowflake) Preference() ids.ID { return sf.preference } + +// RecordSuccessfulPoll implements the NnarySnowflake interface +func (sf *nnarySnowflake) RecordSuccessfulPoll(choice ids.ID) { + if sf.Finalized() { + return + } + + if sf.preference.Equals(choice) { + sf.confidence++ + } else { + sf.confidence = 1 + sf.preference = choice + } + + sf.finalized = (!sf.rogue && sf.confidence >= sf.betaVirtuous) || + sf.confidence >= sf.betaRogue +} + +// RecordUnsuccessfulPoll implements the NnarySnowflake interface +func (sf *nnarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } + +// Finalized implements the NnarySnowflake interface +func (sf *nnarySnowflake) Finalized() bool { return sf.finalized } + +func (sf *nnarySnowflake) String() string { + return fmt.Sprintf("SF(Preference = %s, Confidence = %d, Finalized = %v)", + sf.preference, + sf.confidence, + sf.Finalized()) +} diff --git a/snow/consensus/snowball/parameters.go b/snow/consensus/snowball/parameters.go new file mode 100644 index 0000000..5e14afa --- /dev/null +++ b/snow/consensus/snowball/parameters.go @@ -0,0 +1,33 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" +) + +// Parameters required for snowball consensus +type Parameters struct { + Namespace string + Metrics prometheus.Registerer + K, Alpha, BetaVirtuous, BetaRogue int +} + +// Valid returns nil if the parameters describe a valid initialization. +func (p Parameters) Valid() error { + switch { + case p.Alpha <= p.K/2: + return fmt.Errorf("K = %d, Alpha = %d: Fails the condition that: K/2 < Alpha", p.K, p.Alpha) + case p.K < p.Alpha: + return fmt.Errorf("K = %d, Alpha = %d: Fails the condition that: Alpha <= K", p.K, p.Alpha) + case p.BetaVirtuous <= 0: + return fmt.Errorf("BetaVirtuous = %d: Fails the condition that: 0 < BetaVirtuous", p.BetaVirtuous) + case p.BetaRogue < p.BetaVirtuous: + return fmt.Errorf("BetaVirtuous = %d, BetaRogue = %d: Fails the condition that: BetaVirtuous <= BetaRogue", p.BetaVirtuous, p.BetaRogue) + default: + return nil + } +} diff --git a/snow/consensus/snowball/parameters_test.go b/snow/consensus/snowball/parameters_test.go new file mode 100644 index 0000000..de1b666 --- /dev/null +++ b/snow/consensus/snowball/parameters_test.go @@ -0,0 +1,73 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" +) + +func TestParametersValid(t *testing.T) { + p := Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + } + + if err := p.Valid(); err != nil { + t.Fatal(err) + } +} + +func TestParametersInvalidK(t *testing.T) { + p := Parameters{ + K: 0, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + } + + if err := p.Valid(); err == nil { + t.Fatalf("Should have failed due to invalid k") + } +} + +func TestParametersInvalidAlpha(t *testing.T) { + p := Parameters{ + K: 1, + Alpha: 0, + BetaVirtuous: 1, + BetaRogue: 1, + } + + if err := p.Valid(); err == nil { + t.Fatalf("Should have failed due to invalid alpha") + } +} + +func TestParametersInvalidBetaVirtuous(t *testing.T) { + p := Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 0, + BetaRogue: 1, + } + + if err := p.Valid(); err == nil { + t.Fatalf("Should have failed due to invalid beta virtuous") + } +} + +func TestParametersInvalidBetaRogue(t *testing.T) { + p := Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 0, + } + + if err := p.Valid(); err == nil { + t.Fatalf("Should have failed due to invalid beta rogue") + } +} diff --git a/snow/consensus/snowball/tree.go b/snow/consensus/snowball/tree.go new file mode 100644 index 0000000..ad6554b --- /dev/null +++ b/snow/consensus/snowball/tree.go @@ -0,0 +1,542 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" + "strings" + + "github.com/ava-labs/gecko/ids" +) + +// TreeFactory implements Factory by returning a tree struct +type TreeFactory struct{} + +// New implements Factory +func (TreeFactory) New() Consensus { return &Tree{} } + +// Tree implements the snowball interface by using a modified patricia tree. +type Tree struct { + // params contains all the configurations of a snowball instance + params Parameters + + // shouldReset is used as an optimization to prevent needless tree + // traversals. If a snowball instance does not get an alpha majority, that + // instance needs to reset by calling RecordUnsuccessfulPoll. Because the + // tree splits votes based on the branch, when an instance doesn't get an + // alpha majority none of the children of this instance can get an alpha + // majority. To avoid calling RecordUnsuccessfulPoll on the full sub-tree of + // a node that didn't get an alpha majority, shouldReset is used to indicate + // that any later traversal into this sub-tree should call + // RecordUnsuccessfulPoll before performing any other action. + shouldReset bool + + // root is the node that represents the first snowball instance in the tree, + // and contains references to all the other snowball instances in the tree. + root node +} + +// Initialize implements the Consensus interface +func (t *Tree) Initialize(params Parameters, choice ids.ID) { + t.params = params + + snowball := &unarySnowball{} + snowball.Initialize(params.BetaVirtuous) + + t.root = &unaryNode{ + tree: t, + preference: choice, + commonPrefix: ids.NumBits, // The initial state has no conflicts + snowball: snowball, + } +} + +// Parameters implements the Consensus interface +func (t *Tree) Parameters() Parameters { return t.params } + +// Add implements the Consensus interface +func (t *Tree) Add(choice ids.ID) { + prefix := t.root.DecidedPrefix() + // Make sure that we haven't already decided against this new id + if ids.EqualSubset(0, prefix, t.Preference(), choice) { + t.root = t.root.Add(choice) + } +} + +// Preference implements the Consensus interface +func (t *Tree) Preference() ids.ID { return t.root.Preference() } + +// RecordPoll implements the Consensus interface +func (t *Tree) RecordPoll(votes ids.Bag) { + // Get the assumed decided prefix of the root node. + decidedPrefix := t.root.DecidedPrefix() + + // If any of the bits differ from the preference in this prefix, the vote is + // for a rejected operation. So, we filter out these invalid votes. + filteredVotes := votes.Filter(0, decidedPrefix, t.Preference()) + + // Now that the votes have been restricted to valid votes, pass them into + // the first snowball instance + t.root = t.root.RecordPoll(filteredVotes, t.shouldReset) + + // Because we just passed the reset into the snowball instance, we should no + // longer reset. + t.shouldReset = false +} + +// RecordUnsuccessfulPoll implements the Consensus interface +func (t *Tree) RecordUnsuccessfulPoll() { t.shouldReset = true } + +// Finalized implements the Consensus interface +func (t *Tree) Finalized() bool { return t.root.Finalized() } + +func (t *Tree) String() string { + builder := strings.Builder{} + + prefixes := []string{""} + nodes := []node{t.root} + + for len(prefixes) > 0 { + newSize := len(prefixes) - 1 + + prefix := prefixes[newSize] + prefixes = prefixes[:newSize] + + node := nodes[newSize] + nodes = nodes[:newSize] + + s, newNodes := node.Printable() + + builder.WriteString(prefix) + builder.WriteString(s) + builder.WriteString("\n") + + newPrefix := prefix + " " + for range newNodes { + prefixes = append(prefixes, newPrefix) + } + nodes = append(nodes, newNodes...) + } + + return strings.TrimSuffix(builder.String(), "\n") +} + +type node interface { + // Preference returns the preferred choice of this sub-tree + Preference() ids.ID + // Return the number of assumed decided bits of this node + DecidedPrefix() int + // Adds a new choice to vote on + Add(newChoice ids.ID) node + // Apply the votes, reset the model if needed + RecordPoll(votes ids.Bag, shouldReset bool) (newChild node) + // Returns true if consensus has been reached on this node + Finalized() bool + + Printable() (string, []node) +} + +// unary is a node with either no children, or a single child. It handles the +// voting on a range of identical, virtuous, snowball instances. +type unaryNode struct { + // tree references the tree that contains this node + tree *Tree + + // preference is the choice that is preferred at every branch in this + // sub-tree + preference ids.ID + + // decidedPrefix is the last bit in the prefix that is assumed to be decided + decidedPrefix int // Will be in the range [0, 255) + + // commonPrefix is the last bit in the prefix that this node transitively + // references + commonPrefix int // Will be in the range (decidedPrefix, 256) + + // snowball wraps the snowball logic + snowball UnarySnowball + + // shouldReset is used as an optimization to prevent needless tree + // traversals. It is the continuation of shouldReset in the Tree struct. + shouldReset bool + + // child is the, possibly nil, node that votes on the next bits in the + // decision + child node +} + +func (u *unaryNode) Preference() ids.ID { return u.preference } +func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } + +// This is by far the most complicated function in this algorithm. +// The intuition is that this instance represents a series of consecutive unary +// snowball instances, and this function's purpose is convert one of these unary +// snowball instances into a binary snowball instance. +// There are 5 possible cases. +// 1. None of these instances should be split, we should attempt to split a +// child +// +// For example, attempting to insert the value "00001" in this node: +// +// +-------------------+ <-- This node will not be split +// | | +// | 0 0 0 | +// | | +// +-------------------+ <-- Pass the add to the child +// ^ +// | +// +// Results in: +// +// +-------------------+ +// | | +// | 0 0 0 | +// | | +// +-------------------+ <-- With the modified child +// ^ +// | +// +// 2. This instance represents a series of only one unary instance and it must +// be split +// This will return a binary choice, with one child the same as my child, +// and another (possibly nil child) representing a new chain to the end of +// the hash +// +// For example, attempting to insert the value "1" in this tree: +// +// +-------------------+ +// | | +// | 0 | +// | | +// +-------------------+ +// +// Results in: +// +// +-------------------+ +// | | | +// | 0 | 1 | +// | | | +// +-------------------+ +// +// 3. This instance must be split on the first bit +// This will return a binary choice, with one child equal to this instance +// with decidedPrefix increased by one, and another representing a new +// chain to the end of the hash +// +// For example, attempting to insert the value "10" in this tree: +// +// +-------------------+ +// | | +// | 0 0 | +// | | +// +-------------------+ +// +// Results in: +// +// +-------------------+ +// | | | +// | 0 | 1 | +// | | | +// +-------------------+ +// ^ ^ +// / \ +// +-------------------+ +-------------------+ +// | | | | +// | 0 | | 0 | +// | | | | +// +-------------------+ +-------------------+ +// +// 4. This instance must be split on the last bit +// This will modify this unary choice. The commonPrefix is decreased by +// one. The child is set to a binary instance that has a child equal to +// the current child and another child equal to a new unary instance to +// the end of the hash +// +// For example, attempting to insert the value "01" in this tree: +// +// +-------------------+ +// | | +// | 0 0 | +// | | +// +-------------------+ +// +// Results in: +// +// +-------------------+ +// | | +// | 0 | +// | | +// +-------------------+ +// ^ +// | +// +-------------------+ +// | | | +// | 0 | 1 | +// | | | +// +-------------------+ +// +// 5. This instance must be split on an interior bit +// This will modify this unary choice. The commonPrefix is set to the +// interior bit. The child is set to a binary instance that has a child +// equal to this unary choice with the decidedPrefix equal to the interior +// bit and another child equal to a new unary instance to the end of the +// hash +// +// For example, attempting to insert the value "010" in this tree: +// +// +-------------------+ +// | | +// | 0 0 0 | +// | | +// +-------------------+ +// +// Results in: +// +// +-------------------+ +// | | +// | 0 | +// | | +// +-------------------+ +// ^ +// | +// +-------------------+ +// | | | +// | 0 | 1 | +// | | | +// +-------------------+ +// ^ ^ +// / \ +// +-------------------+ +-------------------+ +// | | | | +// | 0 | | 0 | +// | | | | +// +-------------------+ +-------------------+ +func (u *unaryNode) Add(newChoice ids.ID) node { + if u.Finalized() { + return u // Only happens if the tree is finalized, or it's a leaf node + } + + if index, found := ids.FirstDifferenceSubset( + u.decidedPrefix, u.commonPrefix, u.preference, newChoice); !found { + // If the first difference doesn't exist, then this node shouldn't be + // split + if u.child != nil && ids.EqualSubset( + u.commonPrefix, u.child.DecidedPrefix(), u.preference, newChoice) { + // If the choice matched my child's prefix, then the add should be + // passed to my child. (Case 1. from above) + u.child = u.child.Add(newChoice) + } + // If the choice didn't my child's prefix, then the choice was + // previously rejected and the tree should not be modified + } else { + // The difference was found, so this node must be split + + bit := u.preference.Bit(uint(index)) // The currently preferred bit + b := &binaryNode{ + tree: u.tree, + bit: index, + snowball: u.snowball.Extend(u.tree.params.BetaRogue, bit), + shouldReset: [2]bool{u.shouldReset, u.shouldReset}, + } + b.preferences[bit] = u.preference + b.preferences[1-bit] = newChoice + + newChildSnowball := &unarySnowball{} + newChildSnowball.Initialize(u.tree.params.BetaVirtuous) + newChild := &unaryNode{ + tree: u.tree, + preference: newChoice, + decidedPrefix: index + 1, // The new child assumes this branch has decided in it's favor + commonPrefix: ids.NumBits, // The new child has no conflicts under this branch + snowball: newChildSnowball, + } + + switch { + case u.decidedPrefix == u.commonPrefix-1: + // This node was only voting over one bit. (Case 2. from above) + b.children[bit] = u.child + if u.child != nil { + b.children[1-bit] = newChild + } + return b + case index == u.decidedPrefix: + // This node was split on the first bit. (Case 3. from above) + u.decidedPrefix++ + b.children[bit] = u + b.children[1-bit] = newChild + return b + case index == u.commonPrefix-1: + // This node was split on the last bit. (Case 4. from above) + u.commonPrefix-- + b.children[bit] = u.child + if u.child != nil { + b.children[1-bit] = newChild + } + u.child = b + return u + default: + // This node was split on an interior bit. (Case 5. from above) + originalDecidedPrefix := u.decidedPrefix + u.decidedPrefix = index + 1 + b.children[bit] = u + b.children[1-bit] = newChild + return &unaryNode{ + tree: u.tree, + preference: u.preference, + decidedPrefix: originalDecidedPrefix, + commonPrefix: index, + snowball: u.snowball.Clone(), + child: b, + } + } + } + return u // Do nothing, the choice was already rejected +} + +func (u *unaryNode) RecordPoll(votes ids.Bag, reset bool) node { + // This ensures that votes for rejected colors are dropped + votes = votes.Filter(u.decidedPrefix, u.commonPrefix, u.preference) + + // If my parent didn't get enough votes previously, then neither did I + if reset { + u.snowball.RecordUnsuccessfulPoll() + u.shouldReset = true // Make sure my child is also reset correctly + } + + // If I got enough votes this time + if votes.Len() >= u.tree.params.Alpha { + u.snowball.RecordSuccessfulPoll() + + if u.child != nil { + decidedPrefix := u.child.DecidedPrefix() + filteredVotes := votes.Filter(u.commonPrefix, decidedPrefix, u.preference) + // If I'm now decided, return my child + if u.Finalized() { + return u.child.RecordPoll(filteredVotes, u.shouldReset) + } + u.child = u.child.RecordPoll(filteredVotes, u.shouldReset) + // The child's preference may have changed + u.preference = u.child.Preference() + } + // Now that I have passed my votes to my child, I don't need to reset + // them + u.shouldReset = false + } else { + // I didn't get enough votes, I must reset and my child must reset as + // well + u.snowball.RecordUnsuccessfulPoll() + u.shouldReset = true + } + + return u +} + +func (u *unaryNode) Finalized() bool { return u.snowball.Finalized() } + +func (u *unaryNode) Printable() (string, []node) { + s := fmt.Sprintf("%s Bits = [%d, %d)", + u.snowball, u.decidedPrefix, u.commonPrefix) + if u.child == nil { + return s, nil + } + return s, []node{u.child} +} + +// binaryNode is a node with either no children, or two children. It handles the +// voting of a single, rogue, snowball instance. +type binaryNode struct { + // tree references the tree that contains this node + tree *Tree + + // preferences are the choices that are preferred at every branch in their + // sub-tree + preferences [2]ids.ID + + // bit is the index in the id of the choice this node is deciding on + bit int // Will be in the range [0, 256) + + // snowball wraps the snowball logic + snowball BinarySnowball + + // shouldReset is used as an optimization to prevent needless tree + // traversals. It is the continuation of shouldReset in the Tree struct. + shouldReset [2]bool + + // children are the, possibly nil, nodes that vote on the next bits in the + // decision + children [2]node +} + +func (b *binaryNode) Preference() ids.ID { return b.preferences[b.snowball.Preference()] } +func (b *binaryNode) DecidedPrefix() int { return b.bit } + +func (b *binaryNode) Add(id ids.ID) node { + bit := id.Bit(uint(b.bit)) + child := b.children[bit] + // If child is nil, then we are running an instance on the last bit. Finding + // two hashes that are equal up to the last bit would be really cool though. + // Regardless, the case is handled + if child != nil && + // + 1 is used because we already explicitly check the p.bit bit + ids.EqualSubset(b.bit+1, child.DecidedPrefix(), b.preferences[bit], id) { + b.children[bit] = child.Add(id) + } + return b +} + +func (b *binaryNode) RecordPoll(votes ids.Bag, reset bool) node { + // The list of votes we are passed is split into votes for bit 0 and votes + // for bit 1 + splitVotes := votes.Split(uint(b.bit)) + + bit := 0 // Because alpha > k/2, only the larger count could be increased + if splitVotes[0].Len() < splitVotes[1].Len() { + bit = 1 + } + + if reset { + b.snowball.RecordUnsuccessfulPoll() + b.shouldReset[bit] = true + // 1-bit isn't set here because it is set below anyway + } + b.shouldReset[1-bit] = true // They didn't get the threshold of votes + + prunedVotes := splitVotes[bit] + // If this bit got alpha votes, it was a successful poll + if prunedVotes.Len() >= b.tree.params.Alpha { + b.snowball.RecordSuccessfulPoll(bit) + + if child := b.children[bit]; child != nil { + // The votes are filtered to ensure that they are votes that should + // count for the child + filteredVotes := prunedVotes.Filter( + b.bit+1, child.DecidedPrefix(), b.preferences[bit]) + + if b.snowball.Finalized() { + // If we are decided here, that means we must have decided due + // to this poll. Therefore, we must have decided on bit. + return child.RecordPoll(filteredVotes, b.shouldReset[bit]) + } + newChild := child.RecordPoll(filteredVotes, b.shouldReset[bit]) + b.children[bit] = newChild + b.preferences[bit] = newChild.Preference() + } + b.shouldReset[bit] = false // We passed the reset down + } else { + b.snowball.RecordUnsuccessfulPoll() + // The winning child didn't get enough votes either + b.shouldReset[bit] = true + } + return b +} + +func (b *binaryNode) Finalized() bool { return b.snowball.Finalized() } + +func (b *binaryNode) Printable() (string, []node) { + s := fmt.Sprintf("%s Bit = %d", b.snowball, b.bit) + if b.children[0] == nil { + return s, nil + } + return s, []node{b.children[1], b.children[0]} +} diff --git a/snow/consensus/snowball/tree_test.go b/snow/consensus/snowball/tree_test.go new file mode 100644 index 0000000..56904e1 --- /dev/null +++ b/snow/consensus/snowball/tree_test.go @@ -0,0 +1,535 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "math/rand" + "strings" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" +) + +func TestTreeParams(t *testing.T) { ParamsTest(t, TreeFactory{}) } + +func TestSnowballSingleton(t *testing.T) { + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + } + tree := Tree{} + tree.Initialize(params, Red) + + if tree.Finalized() { + t.Fatalf("Snowball is finalized too soon") + } + + oneRed := ids.Bag{} + oneRed.Add(Red) + tree.RecordPoll(oneRed) + + if tree.Finalized() { + t.Fatalf("Snowball is finalized too soon") + } + + tree.RecordPoll(oneRed) + + if tree.Finalized() { + t.Fatalf("Snowball is finalized too soon") + } + + tree.RecordPoll(oneRed) + + if !tree.Finalized() { + t.Fatalf("Snowball should be finalized") + } else if !Red.Equals(tree.Preference()) { + t.Fatalf("After only voting red, something else was decided") + } + + tree.Add(Blue) + + oneBlue := ids.Bag{} + oneBlue.Add(Blue) + tree.RecordPoll(oneBlue) + + if !tree.Finalized() { + t.Fatalf("Snowball should be finalized") + } else if !Red.Equals(tree.Preference()) { + t.Fatalf("After only voting red, something else was decided") + } +} + +func TestSnowballRecordUnsuccessfulPoll(t *testing.T) { + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + } + tree := Tree{} + tree.Initialize(params, Red) + + if tree.Finalized() { + t.Fatalf("Snowball is finalized too soon") + } + + oneRed := ids.Bag{} + oneRed.Add(Red) + tree.RecordPoll(oneRed) + + tree.RecordUnsuccessfulPoll() + + tree.RecordPoll(oneRed) + + if tree.Finalized() { + t.Fatalf("Snowball is finalized too soon") + } + + tree.RecordPoll(oneRed) + + if tree.Finalized() { + t.Fatalf("Snowball is finalized too soon") + } + + tree.RecordPoll(oneRed) + + if !tree.Finalized() { + t.Fatalf("Snowball should be finalized") + } else if !Red.Equals(tree.Preference()) { + t.Fatalf("After only voting red, something else was decided") + } +} + +func TestSnowballBinary(t *testing.T) { + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + tree := Tree{} + tree.Initialize(params, Red) + tree.Add(Blue) + + if pref := tree.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + oneBlue := ids.Bag{} + oneBlue.Add(Blue) + tree.RecordPoll(oneBlue) + + if pref := tree.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + oneRed := ids.Bag{} + oneRed.Add(Red) + tree.RecordPoll(oneRed) + + if pref := tree.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + tree.RecordPoll(oneBlue) + + if pref := tree.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + tree.RecordPoll(oneBlue) + + if pref := tree.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if !tree.Finalized() { + t.Fatalf("Didn't finalized correctly") + } +} + +func TestSnowballLastBinary(t *testing.T) { + zero := ids.Empty + one := ids.NewID([32]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, + }) + + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 2, BetaRogue: 2, + } + tree := Tree{} + tree.Initialize(params, zero) + tree.Add(one) + + expected := "SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [0, 255)\n" + + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 255" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected %s got %s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + oneBag := ids.Bag{} + oneBag.Add(one) + tree.RecordPoll(oneBag) + + if pref := tree.Preference(); !one.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", one, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + tree.RecordPoll(oneBag) + + if pref := tree.Preference(); !one.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", one, pref) + } else if !tree.Finalized() { + t.Fatalf("Finalized too late") + } +} + +func TestSnowballTrinary(t *testing.T) { + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + tree := Tree{} + tree.Initialize(params, Green) + tree.Add(Red) + tree.Add(Blue) + + // * + // / \ + // R * + // / \ + // G B + + if pref := tree.Preference(); !Green.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Green, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + redBag := ids.Bag{} + redBag.Add(Red) + tree.RecordPoll(redBag) + + if pref := tree.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + blueBag := ids.Bag{} + blueBag.Add(Blue) + tree.RecordPoll(blueBag) + + if pref := tree.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + greenBag := ids.Bag{} + greenBag.Add(Green) + tree.RecordPoll(greenBag) + + // Here is a case where voting for a color makes a different color become + // the preferred color. This is intended behavior. + if pref := tree.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + tree.RecordPoll(redBag) + + if pref := tree.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Green, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + tree.RecordPoll(greenBag) + + if pref := tree.Preference(); !Green.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Green, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } +} + +func TestSnowballCloseTrinary(t *testing.T) { + yellow := ids.NewID([32]byte{0x01}) + cyan := ids.NewID([32]byte{0x02}) + magenta := ids.NewID([32]byte{0x03}) + + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + tree := Tree{} + tree.Initialize(params, yellow) + tree.Add(cyan) + tree.Add(magenta) + + if pref := tree.Preference(); !yellow.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", yellow, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + yellowBag := ids.Bag{} + yellowBag.Add(yellow) + tree.RecordPoll(yellowBag) + + if pref := tree.Preference(); !yellow.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", yellow, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + magentaBag := ids.Bag{} + magentaBag.Add(magenta) + tree.RecordPoll(magentaBag) + + if pref := tree.Preference(); !yellow.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", yellow, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + cyanBag := ids.Bag{} + cyanBag.Add(cyan) + tree.RecordPoll(cyanBag) + + if pref := tree.Preference(); !yellow.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", yellow, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + + tree.RecordPoll(cyanBag) + + if pref := tree.Preference(); !yellow.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", yellow, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } +} + +func TestSnowball5Colors(t *testing.T) { + numColors := 5 + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 5, Alpha: 5, BetaVirtuous: 20, BetaRogue: 30, + } + + colors := []ids.ID{} + for i := 0; i < numColors; i++ { + colors = append(colors, ids.Empty.Prefix(uint64(i))) + } + + tree0 := Tree{} + tree0.Initialize(params, colors[4]) + + tree0.Add(colors[0]) + tree0.Add(colors[1]) + tree0.Add(colors[2]) + tree0.Add(colors[3]) + + tree1 := Tree{} + tree1.Initialize(params, colors[3]) + + tree1.Add(colors[0]) + tree1.Add(colors[1]) + tree1.Add(colors[2]) + tree1.Add(colors[4]) + + s1 := tree0.String() + s2 := tree1.String() + if strings.Count(s1, " ") != strings.Count(s2, " ") { + t.Fatalf("Mis-matched initial values:\n\n%s\n\n%s", + s1, s2) + } +} + +func TestSnowballFineGrained(t *testing.T) { + c0000 := ids.NewID([32]byte{0x00}) + c1000 := ids.NewID([32]byte{0x01}) + c1100 := ids.NewID([32]byte{0x03}) + c0010 := ids.NewID([32]byte{0x04}) + + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + tree := Tree{} + tree.Initialize(params, c0000) + { + expected := "SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [0, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected %s got %s", expected, str) + } else if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + tree.Add(c1100) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 0\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [1, 256)\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected %s got %s", expected, str) + } else if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + tree.Add(c1000) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 0\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [1, 256)\n" + + " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 1, Confidence = 0, Finalized = false)) Bit = 1\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected %s got %s", expected, str) + } else if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + tree.Add(c0010) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 0\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [1, 2)\n" + + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 2\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [3, 256)\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [3, 256)\n" + + " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 1, Confidence = 0, Finalized = false)) Bit = 1\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected %s got %s", expected, str) + } else if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + c0000Bag := ids.Bag{} + c0000Bag.Add(c0000) + tree.RecordPoll(c0000Bag) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 1, Finalized = false)) Bit = 0\n" + + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 1, Finalized = false)) Bit = 2\n" + + " SB(NumSuccessfulPolls = 1, Confidence = 1, Finalized = true) Bits = [3, 256)\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [3, 256)\n" + + " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 1, Confidence = 0, Finalized = false)) Bit = 1\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)\n" + + " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected %s got %s", expected, str) + } else if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + c0010Bag := ids.Bag{} + c0010Bag.Add(c0010) + tree.RecordPoll(c0010Bag) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 1, SF = SF(Preference = 1, Confidence = 1, Finalized = false)) Bit = 2\n" + + " SB(NumSuccessfulPolls = 1, Confidence = 1, Finalized = true) Bits = [3, 256)\n" + + " SB(NumSuccessfulPolls = 1, Confidence = 1, Finalized = true) Bits = [3, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected %s got %s", expected, str) + } else if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + tree.RecordPoll(c0010Bag) + { + expected := "SB(NumSuccessfulPolls = 2, Confidence = 2, Finalized = true) Bits = [3, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected %s got %s", expected, str) + } else if pref := tree.Preference(); !c0010.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0010, pref) + } else if !tree.Finalized() { + t.Fatalf("Finalized too late") + } + } +} + +func TestSnowballDoubleAdd(t *testing.T) { + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + } + tree := Tree{} + tree.Initialize(params, Red) + tree.Add(Red) + + { + expected := "SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [0, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected %s got %s", expected, str) + } else if pref := tree.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } +} + +func TestSnowballConsistent(t *testing.T) { + numColors := 50 + numNodes := 100 + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, Alpha: 15, BetaVirtuous: 20, BetaRogue: 30, + } + seed := int64(0) + + rand.Seed(seed) + + n := Network{} + n.Initialize(params, numColors) + + for i := 0; i < numNodes; i++ { + n.AddNode(&Tree{}) + } + + for !n.Finalized() && !n.Disagreement() { + n.Round() + } + + if !n.Agreement() { + t.Fatalf("Network agreed on inconsistent values") + } +} diff --git a/snow/consensus/snowball/unary_snowball.go b/snow/consensus/snowball/unary_snowball.go new file mode 100644 index 0000000..6d0db07 --- /dev/null +++ b/snow/consensus/snowball/unary_snowball.go @@ -0,0 +1,72 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" +) + +// unarySnowball is the implementation of a unary snowball instance +type unarySnowball struct { + // beta is the number of consecutive successful queries required for + // finalization. + beta int + + // confidence tracks the number of successful polls in a row that have + // returned the preference + confidence int + + // numSuccessfulPolls tracks the total number of successful network polls + numSuccessfulPolls int + + // finalized prevents the state from changing after the required number of + // consecutive polls has been reached + finalized bool +} + +// Initialize implements the UnarySnowball interface +func (sb *unarySnowball) Initialize(beta int) { sb.beta = beta } + +// RecordSuccessfulPoll implements the UnarySnowball interface +func (sb *unarySnowball) RecordSuccessfulPoll() { + sb.numSuccessfulPolls++ + sb.confidence++ + sb.finalized = sb.finalized || sb.confidence >= sb.beta +} + +// RecordUnsuccessfulPoll implements the UnarySnowball interface +func (sb *unarySnowball) RecordUnsuccessfulPoll() { sb.confidence = 0 } + +// Finalized implements the UnarySnowball interface +func (sb *unarySnowball) Finalized() bool { return sb.finalized } + +// Extend implements the UnarySnowball interface +func (sb *unarySnowball) Extend(beta int, choice int) BinarySnowball { + bs := &binarySnowball{ + preference: choice, + snowflake: binarySnowflake{ + beta: beta, + preference: choice, + finalized: sb.Finalized(), + }, + } + return bs +} + +// Clone implements the UnarySnowball interface +func (sb *unarySnowball) Clone() UnarySnowball { + return &unarySnowball{ + beta: sb.beta, + numSuccessfulPolls: sb.numSuccessfulPolls, + confidence: sb.confidence, + finalized: sb.Finalized(), + } +} + +func (sb *unarySnowball) String() string { + return fmt.Sprintf("SB(NumSuccessfulPolls = %d, Confidence = %d, Finalized = %v)", + sb.numSuccessfulPolls, + sb.confidence, + sb.Finalized()) +} diff --git a/snow/consensus/snowball/unary_snowball_test.go b/snow/consensus/snowball/unary_snowball_test.go new file mode 100644 index 0000000..8bf098a --- /dev/null +++ b/snow/consensus/snowball/unary_snowball_test.go @@ -0,0 +1,60 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" +) + +func UnarySnowballStateTest(t *testing.T, sb *unarySnowball, expectedNumSuccessfulPolls, expectedConfidence int, expectedFinalized bool) { + if numSuccessfulPolls := sb.numSuccessfulPolls; numSuccessfulPolls != expectedNumSuccessfulPolls { + t.Fatalf("Wrong numSuccessfulPolls. Expected %d got %d", expectedNumSuccessfulPolls, numSuccessfulPolls) + } else if confidence := sb.confidence; confidence != expectedConfidence { + t.Fatalf("Wrong confidence. Expected %d got %d", expectedConfidence, confidence) + } else if finalized := sb.Finalized(); finalized != expectedFinalized { + t.Fatalf("Wrong finalized status. Expected %v got %v", expectedFinalized, finalized) + } +} + +func TestUnarySnowball(t *testing.T) { + beta := 2 + + sb := &unarySnowball{} + sb.Initialize(beta) + + sb.RecordSuccessfulPoll() + UnarySnowballStateTest(t, sb, 1, 1, false) + + sb.RecordUnsuccessfulPoll() + UnarySnowballStateTest(t, sb, 1, 0, false) + + sb.RecordSuccessfulPoll() + UnarySnowballStateTest(t, sb, 2, 1, false) + + sbCloneIntf := sb.Clone() + sbClone, ok := sbCloneIntf.(*unarySnowball) + if !ok { + t.Fatalf("Unexpectedly clone type") + } + + UnarySnowballStateTest(t, sbClone, 2, 1, false) + + binarySnowball := sbClone.Extend(beta, 0) + + binarySnowball.RecordUnsuccessfulPoll() + + binarySnowball.RecordSuccessfulPoll(1) + + if binarySnowball.Finalized() { + t.Fatalf("Should not have finalized") + } + + binarySnowball.RecordSuccessfulPoll(1) + + if binarySnowball.Preference() != 1 { + t.Fatalf("Wrong preference") + } else if !binarySnowball.Finalized() { + t.Fatalf("Should have finalized") + } +} diff --git a/snow/consensus/snowman/block.go b/snow/consensus/snowman/block.go new file mode 100644 index 0000000..3b07c11 --- /dev/null +++ b/snow/consensus/snowman/block.go @@ -0,0 +1,41 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/snow/choices" +) + +// Block is a possible decision that dictates the next canonical block. +// +// Blocks are guaranteed to be Verified, Accepted, and Rejected in topological +// order. Specifically, if Verify is called, then the parent has already been +// verified. If Accept is called, then the parent has already been accepted. If +// Reject is called, the parent has already been accepted or rejected. +// +// If the status of the block is Unknown, ID is assumed to be able to be called. +// If the status of the block is Accepted or Rejected; Parent, Verify, Accept, +// and Reject will never be called. +type Block interface { + choices.Decidable + + // Parent returns the block that this block points to. + // + // If the parent block is not known, a Block should be returned with the + // status Unknown. + Parent() Block + + // Verify that the state transition this block would make if accepted is + // valid. If the state transition is invalid, a non-nil error should be + // returned. + // + // It is guaranteed that the Parent has been successfully verified. + Verify() error + + // Bytes returns the binary representation of this block. + // + // This is used for sending blocks to peers. The bytes should be able to be + // parsed into the same block on another node. + Bytes() []byte +} diff --git a/snow/consensus/snowman/block_test.go b/snow/consensus/snowman/block_test.go new file mode 100644 index 0000000..609530b --- /dev/null +++ b/snow/consensus/snowman/block_test.go @@ -0,0 +1,45 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "sort" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" +) + +type Blk struct { + parent Block + id ids.ID + height int + status choices.Status + bytes []byte +} + +func (b *Blk) Parent() Block { return b.parent } +func (b *Blk) ID() ids.ID { return b.id } +func (b *Blk) Status() choices.Status { return b.status } +func (b *Blk) Accept() { + if b.status.Decided() && b.status != choices.Accepted { + panic("Dis-agreement") + } + b.status = choices.Accepted +} +func (b *Blk) Reject() { + if b.status.Decided() && b.status != choices.Rejected { + panic("Dis-agreement") + } + b.status = choices.Rejected +} +func (b *Blk) Verify() error { return nil } +func (b *Blk) Bytes() []byte { return b.bytes } + +type sortBlks []*Blk + +func (sb sortBlks) Less(i, j int) bool { return sb[i].height < sb[j].height } +func (sb sortBlks) Len() int { return len(sb) } +func (sb sortBlks) Swap(i, j int) { sb[j], sb[i] = sb[i], sb[j] } + +func SortVts(blks []*Blk) { sort.Sort(sortBlks(blks)) } diff --git a/snow/consensus/snowman/consensus.go b/snow/consensus/snowman/consensus.go new file mode 100644 index 0000000..022e910 --- /dev/null +++ b/snow/consensus/snowman/consensus.go @@ -0,0 +1,39 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/consensus/snowball" +) + +// Consensus represents a general snowman instance that can be used directly to +// process a series of dependent operations. +type Consensus interface { + // Takes in alpha, beta1, beta2, and an assumed accepted decision. + Initialize(*snow.Context, snowball.Parameters, ids.ID) + + // Returns the parameters that describe this snowman instance + Parameters() snowball.Parameters + + // Adds a new decision. Assumes the dependency has already been added. + Add(Block) + + // Issued returns true if the block has been issued into consensus + Issued(Block) bool + + // Returns the ID of the tail of the strongly preferred sequence of + // decisions. + Preference() ids.ID + + // RecordPoll collects the results of a network poll. Assumes all decisions + // have been previously added. + RecordPoll(ids.Bag) + + // Finalized returns true if all decisions that have been added have been + // finalized. Note, it is possible that after returning finalized, a new + // decision may be added such that this instance is no longer finalized. + Finalized() bool +} diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go new file mode 100644 index 0000000..3d2b30b --- /dev/null +++ b/snow/consensus/snowman/consensus_test.go @@ -0,0 +1,643 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowball" +) + +func ParamsTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Namespace: fmt.Sprintf("gecko_%s", ctx.ChainID), + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + } + + numProcessing := prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: params.Namespace, + Name: "processing", + }) + numAccepted := prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: params.Namespace, + Name: "accepted", + }) + numRejected := prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: params.Namespace, + Name: "rejected", + }) + + params.Metrics.Register(numProcessing) + params.Metrics.Register(numAccepted) + params.Metrics.Register(numRejected) + + sm.Initialize(ctx, params, Genesis.ID()) + + if p := sm.Parameters(); p.K != params.K { + t.Fatalf("Wrong K parameter") + } else if p.Alpha != params.Alpha { + t.Fatalf("Wrong Alpha parameter") + } else if p.BetaVirtuous != params.BetaVirtuous { + t.Fatalf("Wrong Beta1 parameter") + } else if p.BetaRogue != params.BetaRogue { + t.Fatalf("Wrong Beta2 parameter") + } +} + +func AddTest(t *testing.T, factory Factory) { + sm := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + } + sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) + + if pref := sm.Preference(); !pref.Equals(Genesis.ID()) { + t.Fatalf("Wrong preference. Expected %s, got %s", Genesis.ID(), pref) + } + + dep0 := &Blk{ + parent: Genesis, + id: ids.Empty.Prefix(1), + } + sm.Add(dep0) + if pref := sm.Preference(); !pref.Equals(dep0.id) { + t.Fatalf("Wrong preference. Expected %s, got %s", dep0.id, pref) + } + + dep1 := &Blk{ + parent: Genesis, + id: ids.Empty.Prefix(2), + } + sm.Add(dep1) + if pref := sm.Preference(); !pref.Equals(dep0.id) { + t.Fatalf("Wrong preference. Expected %s, got %s", dep0.id, pref) + } + + dep2 := &Blk{ + parent: dep0, + id: ids.Empty.Prefix(3), + } + sm.Add(dep2) + if pref := sm.Preference(); !pref.Equals(dep2.id) { + t.Fatalf("Wrong preference. Expected %s, got %s", dep2.id, pref) + } + + dep3 := &Blk{ + parent: &Blk{id: ids.Empty.Prefix(4)}, + id: ids.Empty.Prefix(5), + } + sm.Add(dep3) + if pref := sm.Preference(); !pref.Equals(dep2.id) { + t.Fatalf("Wrong preference. Expected %s, got %s", dep2.id, pref) + } +} + +func CollectTest(t *testing.T, factory Factory) { + sm := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + } + sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) + + dep1 := &Blk{ + parent: Genesis, + id: ids.Empty.Prefix(2), + } + sm.Add(dep1) + + dep0 := &Blk{ + parent: Genesis, + id: ids.Empty.Prefix(1), + } + sm.Add(dep0) + + dep2 := &Blk{ + parent: dep0, + id: ids.Empty.Prefix(3), + } + sm.Add(dep2) + + dep3 := &Blk{ + parent: dep0, + id: ids.Empty.Prefix(4), + } + sm.Add(dep3) + + // Current graph structure: + // G + // / \ + // 0 1 + // / \ + // 2 3 + // Tail = 1 + + dep2_2 := ids.Bag{} + dep2_2.AddCount(dep2.id, 2) + sm.RecordPoll(dep2_2) + + // Current graph structure: + // G + // / \ + // 0 1 + // / \ + // 2 3 + // Tail = 2 + + if sm.Finalized() { + t.Fatalf("Finalized too early") + } else if !dep2.id.Equals(sm.Preference()) { + t.Fatalf("Wrong preference listed") + } + + dep3_2 := ids.Bag{} + dep3_2.AddCount(dep3.id, 2) + sm.RecordPoll(dep3_2) + + // Current graph structure: + // 0 + // / \ + // 2 3 + // Tail = 2 + + if sm.Finalized() { + t.Fatalf("Finalized too early") + } else if !dep2.id.Equals(sm.Preference()) { + t.Fatalf("Wrong preference listed") + } + + sm.RecordPoll(dep2_2) + + // Current graph structure: + // 0 + // / \ + // 2 3 + // Tail = 2 + + if sm.Finalized() { + t.Fatalf("Finalized too early") + } else if !dep2.id.Equals(sm.Preference()) { + t.Fatalf("Wrong preference listed") + } + + sm.RecordPoll(dep2_2) + + // Current graph structure: + // 2 + // Tail = 2 + + if !sm.Finalized() { + t.Fatalf("Finalized too late") + } else if !dep2.id.Equals(sm.Preference()) { + t.Fatalf("Wrong preference listed") + } + + if dep0.Status() != choices.Accepted { + t.Fatalf("Should have accepted") + } else if dep1.Status() != choices.Rejected { + t.Fatalf("Should have rejected") + } else if dep2.Status() != choices.Accepted { + t.Fatalf("Should have accepted") + } else if dep3.Status() != choices.Rejected { + t.Fatalf("Should have rejected") + } +} + +func CollectNothingTest(t *testing.T, factory Factory) { + sm := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) + + // Current graph structure: + // G + // Tail = G + + genesis1 := ids.Bag{} + genesis1.AddCount(Genesis.ID(), 1) + sm.RecordPoll(genesis1) + + // Current graph structure: + // G + // Tail = G + + if !sm.Finalized() { + t.Fatalf("Finalized too late") + } else if !Genesis.ID().Equals(sm.Preference()) { + t.Fatalf("Wrong preference listed") + } +} + +func CollectTransRejectTest(t *testing.T, factory Factory) { + sm := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) + + dep1 := &Blk{ + parent: Genesis, + id: ids.Empty.Prefix(2), + } + sm.Add(dep1) + + dep0 := &Blk{ + parent: Genesis, + id: ids.Empty.Prefix(1), + } + sm.Add(dep0) + + dep2 := &Blk{ + parent: dep0, + id: ids.Empty.Prefix(3), + } + sm.Add(dep2) + + // Current graph structure: + // G + // / \ + // 0 1 + // / + // 2 + // Tail = 1 + + dep1_1 := ids.Bag{} + dep1_1.AddCount(dep1.id, 1) + sm.RecordPoll(dep1_1) + sm.RecordPoll(dep1_1) + + // Current graph structure: + // 1 + // Tail = 1 + + if !sm.Finalized() { + t.Fatalf("Finalized too late") + } else if !dep1.id.Equals(sm.Preference()) { + t.Fatalf("Wrong preference listed") + } + + if dep0.Status() != choices.Rejected { + t.Fatalf("Should have rejected") + } else if dep1.Status() != choices.Accepted { + t.Fatalf("Should have accepted") + } else if dep2.Status() != choices.Rejected { + t.Fatalf("Should have rejected") + } +} + +func CollectTransResetTest(t *testing.T, factory Factory) { + sm := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) + + dep1 := &Blk{ + parent: Genesis, + id: ids.Empty.Prefix(2), + status: choices.Processing, + } + sm.Add(dep1) + + dep0 := &Blk{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + sm.Add(dep0) + + dep2 := &Blk{ + parent: dep0, + id: ids.Empty.Prefix(3), + status: choices.Processing, + } + sm.Add(dep2) + + // Current graph structure: + // G + // / \ + // 0 1 + // / + // 2 + // Tail = 1 + + dep1_1 := ids.Bag{} + dep1_1.AddCount(dep1.id, 1) + sm.RecordPoll(dep1_1) + + // Current graph structure: + // G + // / \ + // 0 1 + // / + // 2 + // Tail = 1 + + dep2_1 := ids.Bag{} + dep2_1.AddCount(dep2.id, 1) + sm.RecordPoll(dep2_1) + + if sm.Finalized() { + t.Fatalf("Finalized too early") + } else if status := dep0.Status(); status != choices.Processing { + t.Fatalf("Shouldn't have accepted yet %s", status) + } + + if !dep1.id.Equals(sm.Preference()) { + t.Fatalf("Wrong preference listed") + } + + sm.RecordPoll(dep2_1) + sm.RecordPoll(dep2_1) + + if !sm.Finalized() { + t.Fatalf("Finalized too late") + } else if dep0.Status() != choices.Accepted { + t.Fatalf("Should have accepted") + } else if dep1.Status() != choices.Rejected { + t.Fatalf("Should have rejected") + } else if dep2.Status() != choices.Accepted { + t.Fatalf("Should have accepted") + } +} + +func CollectTransVoteTest(t *testing.T, factory Factory) { + sm := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 3, Alpha: 3, BetaVirtuous: 1, BetaRogue: 1, + } + sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) + + dep0 := &Blk{ + parent: Genesis, + id: ids.Empty.Prefix(1), + } + sm.Add(dep0) + + dep1 := &Blk{ + parent: dep0, + id: ids.Empty.Prefix(2), + } + sm.Add(dep1) + + dep2 := &Blk{ + parent: dep1, + id: ids.Empty.Prefix(3), + } + sm.Add(dep2) + + dep3 := &Blk{ + parent: dep0, + id: ids.Empty.Prefix(4), + } + sm.Add(dep3) + + dep4 := &Blk{ + parent: dep3, + id: ids.Empty.Prefix(5), + } + sm.Add(dep4) + + // Current graph structure: + // G + // / + // 0 + // / \ + // 1 3 + // / \ + // 2 4 + // Tail = 2 + + dep0_2_4_1 := ids.Bag{} + dep0_2_4_1.AddCount(dep0.id, 1) + dep0_2_4_1.AddCount(dep2.id, 1) + dep0_2_4_1.AddCount(dep4.id, 1) + sm.RecordPoll(dep0_2_4_1) + + // Current graph structure: + // 0 + // / \ + // 1 3 + // / \ + // 2 4 + // Tail = 2 + + if !dep2.id.Equals(sm.Preference()) { + t.Fatalf("Wrong preference listed") + } + + dep2_3 := ids.Bag{} + dep2_3.AddCount(dep2.id, 3) + sm.RecordPoll(dep2_3) + + // Current graph structure: + // 2 + // Tail = 2 + + if !dep2.id.Equals(sm.Preference()) { + t.Fatalf("Wrong preference listed") + } + + if !sm.Finalized() { + t.Fatalf("Finalized too late") + } else if dep0.Status() != choices.Accepted { + t.Fatalf("Should have accepted") + } else if dep1.Status() != choices.Accepted { + t.Fatalf("Should have accepted") + } else if dep2.Status() != choices.Accepted { + t.Fatalf("Should have accepted") + } else if dep3.Status() != choices.Rejected { + t.Fatalf("Should have rejected") + } else if dep4.Status() != choices.Rejected { + t.Fatalf("Should have rejected") + } +} + +func DivergedVotingTest(t *testing.T, factory Factory) { + sm := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) + + dep0 := &Blk{ + parent: Genesis, + id: ids.NewID([32]byte{0x0f}), // 0b1111 + } + sm.Add(dep0) + + dep1 := &Blk{ + parent: Genesis, + id: ids.NewID([32]byte{0x08}), // 0b1000 + } + sm.Add(dep1) + + dep0_1 := ids.Bag{} + dep0_1.AddCount(dep0.id, 1) + sm.RecordPoll(dep0_1) + + dep2 := &Blk{ + parent: Genesis, + id: ids.NewID([32]byte{0x01}), // 0b0001 + } + sm.Add(dep2) + + // dep2 is already rejected. + + dep3 := &Blk{ + parent: dep2, + id: ids.Empty.Prefix(3), + } + sm.Add(dep3) + + if dep0.Status() == choices.Accepted { + t.Fatalf("Shouldn't be accepted yet") + } + + // Transitively increases dep2. However, dep2 shares the first bit with + // dep0. Because dep2 is already rejected, this will accept dep0. + dep3_1 := ids.Bag{} + dep3_1.AddCount(dep3.id, 1) + sm.RecordPoll(dep3_1) + + if !sm.Finalized() { + t.Fatalf("Finalized too late") + } else if dep0.Status() != choices.Accepted { + t.Fatalf("Should be accepted") + } +} + +func IssuedTest(t *testing.T, factory Factory) { + sm := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + + sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) + + dep0 := &Blk{ + parent: Genesis, + id: ids.NewID([32]byte{0}), + status: choices.Processing, + } + + if sm.Issued(dep0) { + t.Fatalf("Hasn't been issued yet") + } + + sm.Add(dep0) + + if !sm.Issued(dep0) { + t.Fatalf("Has been issued") + } + + dep1 := &Blk{ + parent: Genesis, + id: ids.NewID([32]byte{0x1}), // 0b0001 + status: choices.Accepted, + } + + if !sm.Issued(dep1) { + t.Fatalf("Has accepted status") + } +} + +func MetricsErrorTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Namespace: fmt.Sprintf("gecko_%s", ctx.ChainID), + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + + numProcessing := prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: params.Namespace, + Name: "processing", + }) + numAccepted := prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: params.Namespace, + Name: "accepted", + }) + numRejected := prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: params.Namespace, + Name: "rejected", + }) + + if err := params.Metrics.Register(numProcessing); err != nil { + t.Fatal(err) + } + if err := params.Metrics.Register(numAccepted); err != nil { + t.Fatal(err) + } + if err := params.Metrics.Register(numRejected); err != nil { + t.Fatal(err) + } + + sm.Initialize(ctx, params, Genesis.ID()) +} + +func ConsistentTest(t *testing.T, factory Factory) { + numColors := 50 + numNodes := 100 + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, + Alpha: 15, + BetaVirtuous: 20, + BetaRogue: 30, + } + seed := int64(0) + + rand.Seed(seed) + + n := Network{} + n.Initialize(params, numColors) + + for i := 0; i < numNodes; i++ { + n.AddNode(factory.New()) + } + + for !n.Finalized() { + n.Round() + } + + if !n.Agreement() { + t.Fatalf("Network agreed on inconsistent values") + } +} diff --git a/snow/consensus/snowman/factory.go b/snow/consensus/snowman/factory.go new file mode 100644 index 0000000..dd45e2a --- /dev/null +++ b/snow/consensus/snowman/factory.go @@ -0,0 +1,9 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +// Factory returns new instances of Consensus +type Factory interface { + New() Consensus +} diff --git a/snow/consensus/snowman/ids_test.go b/snow/consensus/snowman/ids_test.go new file mode 100644 index 0000000..2fe47a1 --- /dev/null +++ b/snow/consensus/snowman/ids_test.go @@ -0,0 +1,43 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" +) + +var ( + Genesis = &Blk{ + id: ids.Empty.Prefix(0), + status: choices.Accepted, + } +) + +func Matches(a, b []ids.ID) bool { + if len(a) != len(b) { + return false + } + set := ids.Set{} + set.Add(a...) + for _, id := range b { + if !set.Contains(id) { + return false + } + } + return true +} +func MatchesShort(a, b []ids.ShortID) bool { + if len(a) != len(b) { + return false + } + set := ids.ShortSet{} + set.Add(a...) + for _, id := range b { + if !set.Contains(id) { + return false + } + } + return true +} diff --git a/snow/consensus/snowman/network_test.go b/snow/consensus/snowman/network_test.go new file mode 100644 index 0000000..7b27021 --- /dev/null +++ b/snow/consensus/snowman/network_test.go @@ -0,0 +1,114 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "math" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/utils/random" +) + +type Network struct { + params snowball.Parameters + colors []*Blk + nodes, running []Consensus +} + +func (n *Network) shuffleColors() { + s := random.Uniform{N: len(n.colors)} + colors := []*Blk(nil) + for s.CanSample() { + colors = append(colors, n.colors[s.Sample()]) + } + n.colors = colors + SortVts(n.colors) +} + +func (n *Network) Initialize(params snowball.Parameters, numColors int) { + n.params = params + n.colors = append(n.colors, &Blk{ + parent: Genesis, + id: ids.Empty.Prefix(uint64(random.Rand(0, math.MaxInt64))), + status: choices.Processing, + }) + + for i := 1; i < numColors; i++ { + dependency := n.colors[random.Rand(0, len(n.colors))] + n.colors = append(n.colors, &Blk{ + parent: dependency, + id: ids.Empty.Prefix(uint64(random.Rand(0, math.MaxInt64))), + height: dependency.height + 1, + status: choices.Processing, + }) + } +} + +func (n *Network) AddNode(sm Consensus) { + sm.Initialize(snow.DefaultContextTest(), n.params, Genesis.ID()) + + n.shuffleColors() + deps := map[[32]byte]Block{} + for _, blk := range n.colors { + myDep, found := deps[blk.parent.ID().Key()] + if !found { + myDep = blk.parent + } + myVtx := &Blk{ + parent: myDep, + id: blk.id, + height: blk.height, + status: blk.status, + } + sm.Add(myVtx) + deps[myVtx.ID().Key()] = myDep + } + n.nodes = append(n.nodes, sm) + n.running = append(n.running, sm) +} + +func (n *Network) Finalized() bool { return len(n.running) == 0 } + +func (n *Network) Round() { + if len(n.running) > 0 { + runningInd := random.Rand(0, len(n.running)) + running := n.running[runningInd] + + sampler := random.Uniform{N: len(n.nodes)} + sampledColors := ids.Bag{} + for i := 0; i < n.params.K; i++ { + peer := n.nodes[sampler.Sample()] + if peer != running { + sampledColors.Add(peer.Preference()) + } else { + i-- // So that we still sample k people + } + } + + running.RecordPoll(sampledColors) + + // If this node has been finalized, remove it from the poller + if running.Finalized() { + newSize := len(n.running) - 1 + n.running[runningInd] = n.running[newSize] + n.running = n.running[:newSize] + } + } +} + +func (n *Network) Agreement() bool { + if len(n.nodes) == 0 { + return true + } + pref := n.nodes[0].Preference() + for _, node := range n.nodes { + if !pref.Equals(node.Preference()) { + return false + } + } + return true +} diff --git a/snow/consensus/snowman/topological.go b/snow/consensus/snowman/topological.go new file mode 100644 index 0000000..6ad92c8 --- /dev/null +++ b/snow/consensus/snowman/topological.go @@ -0,0 +1,428 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/consensus/snowball" +) + +// TopologicalFactory implements Factory by returning a topological struct +type TopologicalFactory struct{} + +// New implements Factory +func (TopologicalFactory) New() Consensus { return &Topological{} } + +// Topological implements the Snowman interface by using a tree tracking the +// strongly preferred branch. This tree structure amortizes network polls to +// vote on more than just the next position. +type Topological struct { + ctx *snow.Context + params snowball.Parameters + + numProcessing prometheus.Gauge + numAccepted, numRejected prometheus.Counter + + head ids.ID + nodes map[[32]byte]node // ParentID -> Snowball instance + tail ids.ID +} + +// Tracks the state of a snowman vertex +type node struct { + ts *Topological + blkID ids.ID + blk Block + + shouldFalter bool + sb snowball.Consensus + children map[[32]byte]Block +} + +// Used to track the kahn topological sort status +type kahnNode struct { + inDegree int + votes ids.Bag +} + +// Used to track which children should receive votes +type votes struct { + id ids.ID + votes ids.Bag +} + +// Initialize implements the Snowman interface +func (ts *Topological) Initialize(ctx *snow.Context, params snowball.Parameters, rootID ids.ID) { + ctx.Log.AssertDeferredNoError(params.Valid) + + ts.ctx = ctx + ts.params = params + + ts.numProcessing = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: params.Namespace, + Name: "processing", + Help: "Number of currently processing blocks", + }) + ts.numAccepted = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: params.Namespace, + Name: "accepted", + Help: "Number of blocks accepted", + }) + ts.numRejected = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: params.Namespace, + Name: "rejected", + Help: "Number of blocks rejected", + }) + + if err := ts.params.Metrics.Register(ts.numProcessing); err != nil { + ts.ctx.Log.Error("Failed to register processing statistics due to %s", err) + } + if err := ts.params.Metrics.Register(ts.numAccepted); err != nil { + ts.ctx.Log.Error("Failed to register accepted statistics due to %s", err) + } + if err := ts.params.Metrics.Register(ts.numRejected); err != nil { + ts.ctx.Log.Error("Failed to register rejected statistics due to %s", err) + } + + ts.head = rootID + ts.nodes = map[[32]byte]node{ + rootID.Key(): node{ + ts: ts, + blkID: rootID, + }, + } + ts.tail = rootID +} + +// Parameters implements the Snowman interface +func (ts *Topological) Parameters() snowball.Parameters { return ts.params } + +// Add implements the Snowman interface +func (ts *Topological) Add(blk Block) { + parent := blk.Parent() + parentID := parent.ID() + parentKey := parentID.Key() + + blkID := blk.ID() + + bytes := blk.Bytes() + ts.ctx.DecisionDispatcher.Issue(ts.ctx.ChainID, blkID, bytes) + ts.ctx.ConsensusDispatcher.Issue(ts.ctx.ChainID, blkID, bytes) + + if parent, ok := ts.nodes[parentKey]; ok { + parent.Add(blk) + ts.nodes[parentKey] = parent + + ts.nodes[blkID.Key()] = node{ + ts: ts, + blkID: blkID, + blk: blk, + } + + // If we are extending the tail, this is the new tail + if ts.tail.Equals(parentID) { + ts.tail = blkID + } + + ts.numProcessing.Inc() + } else { + // If the ancestor is missing, this means the ancestor must have already + // been pruned. Therefore, the dependent is transitively rejected. + blk.Reject() + + bytes := blk.Bytes() + ts.ctx.DecisionDispatcher.Reject(ts.ctx.ChainID, blkID, bytes) + ts.ctx.ConsensusDispatcher.Reject(ts.ctx.ChainID, blkID, bytes) + + ts.numRejected.Inc() + } +} + +// Issued implements the Snowman interface +func (ts *Topological) Issued(blk Block) bool { + if blk.Status().Decided() { + return true + } + _, ok := ts.nodes[blk.ID().Key()] + return ok +} + +// Preference implements the Snowman interface +func (ts *Topological) Preference() ids.ID { return ts.tail } + +// RecordPoll implements the Snowman interface +// This performs Kahn’s algorithm. +// When a node is removed from the leaf queue, it is checked to see if the +// number of votes is >= alpha. If it is, then it is added to the vote stack. +// Once there are no nodes in the leaf queue. The vote stack is unwound and +// voted on. If a decision is made, then that choice is marked as accepted, and +// all alternative choices are marked as rejected. +// The complexity of this function is: +// Runtime = 3 * |live set| + |votes| +// Space = |live set| + |votes| +func (ts *Topological) RecordPoll(votes ids.Bag) { + // Runtime = |live set| + |votes| ; Space = |live set| + |votes| + kahnGraph, leaves := ts.calculateInDegree(votes) + + // Runtime = |live set| ; Space = |live set| + voteStack := ts.pushVotes(kahnGraph, leaves) + + // Runtime = |live set| ; Space = Constant + tail := ts.vote(voteStack) + tn := node{} + for tn = ts.nodes[tail.Key()]; tn.sb != nil; tn = ts.nodes[tail.Key()] { + tail = tn.sb.Preference() + } + + ts.tail = tn.blkID +} + +// Finalized implements the Snowman interface +func (ts *Topological) Finalized() bool { return len(ts.nodes) == 1 } + +// takes in a list of votes and sets up the topological ordering. Returns the +// reachable section of the graph annotated with the number of inbound edges and +// the non-transitively applied votes. Also returns the list of leaf nodes. +func (ts *Topological) calculateInDegree( + votes ids.Bag) (map[[32]byte]kahnNode, []ids.ID) { + kahns := make(map[[32]byte]kahnNode) + leaves := ids.Set{} + + for _, vote := range votes.List() { + voteNode, validVote := ts.nodes[vote.Key()] + // If it is not found, then the vote is either for something rejected, + // or something we haven't heard of yet. + if validVote && voteNode.blk != nil && !voteNode.blk.Status().Decided() { + parentID := voteNode.blk.Parent().ID() + parentKey := parentID.Key() + kahn, previouslySeen := kahns[parentKey] + // Add this new vote to the current bag of votes + kahn.votes.AddCount(vote, votes.Count(vote)) + kahns[parentKey] = kahn + + if !previouslySeen { + // If I've never seen this node before, it is currently a leaf. + leaves.Add(parentID) + + for n, e := ts.nodes[parentKey]; e; n, e = ts.nodes[parentKey] { + if n.blk == nil || n.blk.Status().Decided() { + break // Ensure that we haven't traversed off the tree + } + parentID := n.blk.Parent().ID() + parentKey = parentID.Key() + + kahn := kahns[parentKey] + kahn.inDegree++ + kahns[parentKey] = kahn + + if kahn.inDegree == 1 { + // If I am transitively seeing this node for the first + // time, it is no longer a leaf. + leaves.Remove(parentID) + } else { + // If I have already traversed this branch, stop. + break + } + } + } + } + } + + return kahns, leaves.List() +} + +// convert the tree into a branch of snowball instances with an alpha threshold +func (ts *Topological) pushVotes( + kahnNodes map[[32]byte]kahnNode, leaves []ids.ID) []votes { + voteStack := []votes(nil) + for len(leaves) > 0 { + newLeavesSize := len(leaves) - 1 + leaf := leaves[newLeavesSize] + leaves = leaves[:newLeavesSize] + + leafKey := leaf.Key() + kahn := kahnNodes[leafKey] + + if node, shouldVote := ts.nodes[leafKey]; shouldVote { + if kahn.votes.Len() >= ts.params.Alpha { + voteStack = append(voteStack, votes{ + id: leaf, + votes: kahn.votes, + }) + } + + if node.blk == nil || node.blk.Status().Decided() { + continue // Stop traversing once we pass into the decided frontier + } + + parentID := node.blk.Parent().ID() + parentKey := parentID.Key() + if depNode, notPruned := kahnNodes[parentKey]; notPruned { + // Remove one of the in-bound edges + depNode.inDegree-- + // Push the votes to my parent + depNode.votes.AddCount(leaf, kahn.votes.Len()) + kahnNodes[parentKey] = depNode + + if depNode.inDegree == 0 { + // Once I have no in-bound edges, I'm a leaf + leaves = append(leaves, parentID) + } + } + } + } + return voteStack +} + +func (ts *Topological) vote(voteStack []votes) ids.ID { + if len(voteStack) == 0 { + headKey := ts.head.Key() + headNode := ts.nodes[headKey] + headNode.shouldFalter = true + + ts.ctx.Log.Verbo("No progress was made on this vote even though we have %d nodes", len(ts.nodes)) + + ts.nodes[headKey] = headNode + return ts.tail + } + + onTail := true + tail := ts.head + for len(voteStack) > 0 { + newStackSize := len(voteStack) - 1 + voteGroup := voteStack[newStackSize] + voteStack = voteStack[:newStackSize] + + voteParentKey := voteGroup.id.Key() + parentNode, stillExists := ts.nodes[voteParentKey] + if !stillExists { + break + } + + shouldTransFalter := parentNode.shouldFalter + if parentNode.shouldFalter { + parentNode.sb.RecordUnsuccessfulPoll() + parentNode.shouldFalter = false + ts.ctx.Log.Verbo("Reset confidence on %s", parentNode.blkID) + } + parentNode.sb.RecordPoll(voteGroup.votes) + + // Only accept when you are finalized and the head. + if parentNode.sb.Finalized() && ts.head.Equals(voteGroup.id) { + ts.accept(parentNode) + tail = parentNode.sb.Preference() + delete(ts.nodes, voteParentKey) + ts.numProcessing.Dec() + } else { + ts.nodes[voteParentKey] = parentNode + } + + // If this is the last id that got votes, default to the empty id. This + // will cause all my children to be reset below. + nextID := ids.ID{} + if len(voteStack) > 0 { + nextID = voteStack[newStackSize-1].id + } + + onTail = onTail && nextID.Equals(parentNode.sb.Preference()) + if onTail { + tail = nextID + } + + // If there wasn't an alpha threshold on the branch (either on this vote + // or a past transitive vote), I should falter now. + for childIDBytes := range parentNode.children { + if childID := ids.NewID(childIDBytes); shouldTransFalter || !childID.Equals(nextID) { + if childNode, childExists := ts.nodes[childIDBytes]; childExists { + // The existence check is needed in case the current node + // was finalized. However, in this case, we still need to + // check for the next id. + ts.ctx.Log.Verbo("Defering confidence reset on %s with %d children. NextID: %s", childID, len(parentNode.children), nextID) + childNode.shouldFalter = true + ts.nodes[childIDBytes] = childNode + } + } + } + } + return tail +} + +func (ts *Topological) accept(n node) { + // Accept the preference, reject all transitive rejections + pref := n.sb.Preference() + + rejects := []ids.ID(nil) + for childIDBytes := range n.children { + if childID := ids.NewID(childIDBytes); !childID.Equals(pref) { + child := n.children[childIDBytes] + child.Reject() + + bytes := child.Bytes() + ts.ctx.DecisionDispatcher.Reject(ts.ctx.ChainID, childID, bytes) + ts.ctx.ConsensusDispatcher.Reject(ts.ctx.ChainID, childID, bytes) + + ts.numRejected.Inc() + rejects = append(rejects, childID) + } + } + ts.rejectTransitively(rejects...) + + ts.head = pref + child := n.children[pref.Key()] + ts.ctx.Log.Verbo("Accepting block with ID %s", child.ID()) + + bytes := child.Bytes() + ts.ctx.DecisionDispatcher.Accept(ts.ctx.ChainID, child.ID(), bytes) + ts.ctx.ConsensusDispatcher.Accept(ts.ctx.ChainID, child.ID(), bytes) + + child.Accept() + ts.numAccepted.Inc() +} + +// Takes in a list of newly rejected ids and rejects everything that depends on +// them +func (ts *Topological) rejectTransitively(rejected ...ids.ID) { + for len(rejected) > 0 { + newRejectedSize := len(rejected) - 1 + rejectID := rejected[newRejectedSize] + rejected = rejected[:newRejectedSize] + + rejectKey := rejectID.Key() + rejectNode := ts.nodes[rejectKey] + delete(ts.nodes, rejectKey) + ts.numProcessing.Dec() + + for childIDBytes, child := range rejectNode.children { + childID := ids.NewID(childIDBytes) + rejected = append(rejected, childID) + child.Reject() + + bytes := child.Bytes() + ts.ctx.DecisionDispatcher.Reject(ts.ctx.ChainID, childID, bytes) + ts.ctx.ConsensusDispatcher.Reject(ts.ctx.ChainID, childID, bytes) + + ts.numRejected.Inc() + } + } +} + +func (n *node) Add(child Block) { + childID := child.ID() + if n.sb == nil { + n.sb = &snowball.Tree{} + n.sb.Initialize(n.ts.params, childID) + } else { + n.sb.Add(childID) + } + if n.children == nil { + n.children = make(map[[32]byte]Block) + } + n.children[childID.Key()] = child +} diff --git a/snow/consensus/snowman/topological_test.go b/snow/consensus/snowman/topological_test.go new file mode 100644 index 0000000..a99f3b7 --- /dev/null +++ b/snow/consensus/snowman/topological_test.go @@ -0,0 +1,32 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "testing" +) + +func TestTopologicalParams(t *testing.T) { ParamsTest(t, TopologicalFactory{}) } + +func TestTopologicalAdd(t *testing.T) { AddTest(t, TopologicalFactory{}) } + +func TestTopologicalCollect(t *testing.T) { CollectTest(t, TopologicalFactory{}) } + +func TestTopologicalCollectNothing(t *testing.T) { CollectNothingTest(t, TopologicalFactory{}) } + +func TestTopologicalCollectTransReject(t *testing.T) { CollectTransRejectTest(t, TopologicalFactory{}) } + +func TestTopologicalCollectTransResetTest(t *testing.T) { + CollectTransResetTest(t, TopologicalFactory{}) +} + +func TestTopologicalCollectTransVote(t *testing.T) { CollectTransVoteTest(t, TopologicalFactory{}) } + +func TestTopologicalDivergedVoting(t *testing.T) { DivergedVotingTest(t, TopologicalFactory{}) } + +func TestTopologicalIssuedTest(t *testing.T) { IssuedTest(t, TopologicalFactory{}) } + +func TestTopologicalMetricsError(t *testing.T) { MetricsErrorTest(t, TopologicalFactory{}) } + +func TestTopologicalConsistent(t *testing.T) { ConsistentTest(t, TopologicalFactory{}) } diff --git a/snow/consensus/snowstorm/benchmark_test.go b/snow/consensus/snowstorm/benchmark_test.go new file mode 100644 index 0000000..914167d --- /dev/null +++ b/snow/consensus/snowstorm/benchmark_test.go @@ -0,0 +1,225 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "math/rand" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/snow/consensus/snowball" +) + +func Simulate( + numColors, colorsPerConsumer, maxInputConflicts, numNodes int, + params snowball.Parameters, + seed int64, + fact Factory, +) { + net := Network{} + rand.Seed(seed) + net.Initialize( + params, + numColors, + colorsPerConsumer, + maxInputConflicts, + ) + + rand.Seed(seed) + for i := 0; i < numNodes; i++ { + net.AddNode(fact.New()) + } + + numRounds := 0 + for !net.Finalized() && !net.Disagreement() && numRounds < 50 { + rand.Seed(int64(numRounds) + seed) + net.Round() + numRounds++ + } +} + +/* + ****************************************************************************** + ********************************** Virtuous ********************************** + ****************************************************************************** + */ + +func BenchmarkVirtuousDirected(b *testing.B) { + for n := 0; n < b.N; n++ { + Simulate( + /*numColors=*/ 25, + /*colorsPerConsumer=*/ 1, + /*maxInputConflicts=*/ 1, + /*numNodes=*/ 50, + /*params=*/ snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, + Alpha: 11, + BetaVirtuous: 20, + BetaRogue: 30, + }, + /*seed=*/ 0, + /*fact=*/ DirectedFactory{}, + ) + } +} + +func BenchmarkVirtuousInput(b *testing.B) { + for n := 0; n < b.N; n++ { + Simulate( + /*numColors=*/ 25, + /*colorsPerConsumer=*/ 1, + /*maxInputConflicts=*/ 1, + /*numNodes=*/ 50, + /*params=*/ snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, + Alpha: 11, + BetaVirtuous: 20, + BetaRogue: 30, + }, + /*seed=*/ 0, + /*fact=*/ InputFactory{}, + ) + } +} + +/* + ****************************************************************************** + *********************************** Rogue ************************************ + ****************************************************************************** + */ + +func BenchmarkRogueDirected(b *testing.B) { + for n := 0; n < b.N; n++ { + Simulate( + /*numColors=*/ 25, + /*colorsPerConsumer=*/ 1, + /*maxInputConflicts=*/ 3, + /*numNodes=*/ 50, + /*params=*/ snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, + Alpha: 11, + BetaVirtuous: 20, + BetaRogue: 30, + }, + /*seed=*/ 0, + /*fact=*/ DirectedFactory{}, + ) + } +} + +func BenchmarkRogueInput(b *testing.B) { + for n := 0; n < b.N; n++ { + Simulate( + /*numColors=*/ 25, + /*colorsPerConsumer=*/ 1, + /*maxInputConflicts=*/ 3, + /*numNodes=*/ 50, + /*params=*/ snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, + Alpha: 11, + BetaVirtuous: 20, + BetaRogue: 30, + }, + /*seed=*/ 0, + /*fact=*/ InputFactory{}, + ) + } +} + +/* + ****************************************************************************** + ******************************** Many Inputs ********************************* + ****************************************************************************** + */ + +func BenchmarkMultiDirected(b *testing.B) { + for n := 0; n < b.N; n++ { + Simulate( + /*numColors=*/ 50, + /*colorsPerConsumer=*/ 10, + /*maxInputConflicts=*/ 1, + /*numNodes=*/ 50, + /*params=*/ snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, + Alpha: 11, + BetaVirtuous: 20, + BetaRogue: 30, + }, + /*seed=*/ 0, + /*fact=*/ DirectedFactory{}, + ) + } +} + +func BenchmarkMultiInput(b *testing.B) { + for n := 0; n < b.N; n++ { + Simulate( + /*numColors=*/ 50, + /*colorsPerConsumer=*/ 10, + /*maxInputConflicts=*/ 1, + /*numNodes=*/ 50, + /*params=*/ snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, + Alpha: 11, + BetaVirtuous: 20, + BetaRogue: 30, + }, + /*seed=*/ 0, + /*fact=*/ InputFactory{}, + ) + } +} + +/* + ****************************************************************************** + ***************************** Many Rogue Inputs ****************************** + ****************************************************************************** + */ + +func BenchmarkMultiRogueDirected(b *testing.B) { + for n := 0; n < b.N; n++ { + Simulate( + /*numColors=*/ 50, + /*colorsPerConsumer=*/ 10, + /*maxInputConflicts=*/ 3, + /*numNodes=*/ 50, + /*params=*/ snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, + Alpha: 11, + BetaVirtuous: 20, + BetaRogue: 30, + }, + /*seed=*/ 0, + /*fact=*/ DirectedFactory{}, + ) + } +} + +func BenchmarkMultiRogueInput(b *testing.B) { + for n := 0; n < b.N; n++ { + Simulate( + /*numColors=*/ 50, + /*colorsPerConsumer=*/ 10, + /*maxInputConflicts=*/ 3, + /*numNodes=*/ 50, + /*params=*/ snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, + Alpha: 11, + BetaVirtuous: 20, + BetaRogue: 30, + }, + /*seed=*/ 0, + /*fact=*/ InputFactory{}, + ) + } +} diff --git a/snow/consensus/snowstorm/consensus.go b/snow/consensus/snowstorm/consensus.go new file mode 100644 index 0000000..0e05a12 --- /dev/null +++ b/snow/consensus/snowstorm/consensus.go @@ -0,0 +1,94 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowball" +) + +// Consensus is a snowball instance deciding between an unbounded number of +// non-transitive conflicts. After performing a network sample of k nodes, you +// should call collect with the responses. +type Consensus interface { + fmt.Stringer + + // Takes in the context, alpha, betaVirtuous, and betaRogue + Initialize(*snow.Context, snowball.Parameters) + + // Returns the parameters that describe this snowstorm instance + Parameters() snowball.Parameters + + // Returns true if transaction is virtuous. + // That is, no transaction has been added that conflicts with + IsVirtuous(Tx) bool + + // Adds a new transaction to vote on + Add(Tx) + + // Returns true iff transaction has been added + Issued(Tx) bool + + // Returns the set of virtuous transactions + // that have not yet been accepted or rejected + Virtuous() ids.Set + + // Returns the currently preferred transactions to be finalized + Preferences() ids.Set + + // Returns the set of transactions conflicting with + Conflicts(Tx) ids.Set + + // Collects the results of a network poll. Assumes all transactions + // have been previously added + RecordPoll(ids.Bag) + + // Returns true iff all remaining transactions are rogue. Note, it is + // possible that after returning quiesce, a new decision may be added such + // that this instance should no longer quiesce. + Quiesce() bool + + // Returns true iff all added transactions have been finalized. Note, it is + // possible that after returning finalized, a new decision may be added such + // that this instance is no longer finalized. + Finalized() bool +} + +// Tx consumes state. +type Tx interface { + choices.Decidable + + // Dependencies is a list of transactions upon which this transaction + // depends. Each element of Dependencies must be verified before Verify is + // called on this transaction. + // + // Similarly, each element of Dependencies must be accepted before this + // transaction is accepted. + Dependencies() []Tx + + // InputIDs is a set where each element is the ID of a piece of state that + // will be consumed if this transaction is accepted. + // + // In the context of a UTXO-based payments system, for example, this would + // be the IDs of the UTXOs consumed by this transaction + InputIDs() ids.Set + + // Verify that the state transition this transaction would make if it were + // accepted is valid. If the state transition is invalid, a non-nil error + // should be returned. + // + // It is guaranteed that when Verify is called, all the dependencies of + // this transaction have already been successfully verified. + Verify() error + + // Bytes returns the binary representation of this transaction. + // + // This is used for sending transactions to peers. Another node should be + // able to parse these bytes to the same transaction. + Bytes() []byte +} diff --git a/snow/consensus/snowstorm/consensus_test.go b/snow/consensus/snowstorm/consensus_test.go new file mode 100644 index 0000000..270292c --- /dev/null +++ b/snow/consensus/snowstorm/consensus_test.go @@ -0,0 +1,845 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowball" +) + +var ( + Red = &TestTx{Identifier: ids.Empty.Prefix(0)} + Green = &TestTx{Identifier: ids.Empty.Prefix(1)} + Blue = &TestTx{Identifier: ids.Empty.Prefix(2)} + Alpha = &TestTx{Identifier: ids.Empty.Prefix(3)} +) + +// R - G - B - A + +func init() { + X := ids.Empty.Prefix(4) + Y := ids.Empty.Prefix(5) + Z := ids.Empty.Prefix(6) + + Red.Ins.Add(X) + + Green.Ins.Add(X) + Green.Ins.Add(Y) + + Blue.Ins.Add(Y) + Blue.Ins.Add(Z) + + Alpha.Ins.Add(Z) +} + +func Setup() { + Red.Reset() + Green.Reset() + Blue.Reset() + Alpha.Reset() +} + +func ParamsTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + } + graph.Initialize(snow.DefaultContextTest(), params) + + if p := graph.Parameters(); p.K != params.K { + t.Fatalf("Wrong K parameter") + } else if p := graph.Parameters(); p.Alpha != params.Alpha { + t.Fatalf("Wrong Alpha parameter") + } else if p := graph.Parameters(); p.BetaVirtuous != params.BetaVirtuous { + t.Fatalf("Wrong Beta1 parameter") + } else if p := graph.Parameters(); p.BetaRogue != params.BetaRogue { + t.Fatalf("Wrong Beta2 parameter") + } +} + +func IssuedTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + } + graph.Initialize(snow.DefaultContextTest(), params) + + if issued := graph.Issued(Red); issued { + t.Fatalf("Haven't issued anything yet.") + } + + graph.Add(Red) + + if issued := graph.Issued(Red); !issued { + t.Fatalf("Have already issued.") + } + + Blue.Accept() + + if issued := graph.Issued(Blue); !issued { + t.Fatalf("Have already accepted.") + } +} + +func LeftoverInputTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + } + graph.Initialize(snow.DefaultContextTest(), params) + graph.Add(Red) + graph.Add(Green) + + if prefs := graph.Preferences(); prefs.Len() != 1 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s got %s", Red.ID(), prefs.List()[0]) + } else if graph.Finalized() { + t.Fatalf("Finalized too early") + } + + r := ids.Bag{} + r.SetThreshold(2) + r.AddCount(Red.ID(), 2) + graph.RecordPoll(r) + + if prefs := graph.Preferences(); prefs.Len() != 0 { + t.Fatalf("Wrong number of preferences.") + } else if !graph.Finalized() { + t.Fatalf("Finalized too late") + } + + if Red.Status() != choices.Accepted { + t.Fatalf("%s should have been accepted", Red.ID()) + } else if Green.Status() != choices.Rejected { + t.Fatalf("%s should have been rejected", Green.ID()) + } +} + +func LowerConfidenceTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + } + graph.Initialize(snow.DefaultContextTest(), params) + graph.Add(Red) + graph.Add(Green) + graph.Add(Blue) + + if prefs := graph.Preferences(); prefs.Len() != 1 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s got %s", Red.ID(), prefs.List()[0]) + } else if graph.Finalized() { + t.Fatalf("Finalized too early") + } + + r := ids.Bag{} + r.SetThreshold(2) + r.AddCount(Red.ID(), 2) + graph.RecordPoll(r) + + if prefs := graph.Preferences(); prefs.Len() != 1 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Blue.ID()) { + t.Fatalf("Wrong preference. Expected %s", Blue.ID()) + } else if graph.Finalized() { + t.Fatalf("Finalized too early") + } +} + +func MiddleConfidenceTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + } + graph.Initialize(snow.DefaultContextTest(), params) + graph.Add(Red) + graph.Add(Green) + graph.Add(Alpha) + graph.Add(Blue) + + if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s", Red.ID()) + } else if !prefs.Contains(Alpha.ID()) { + t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) + } else if graph.Finalized() { + t.Fatalf("Finalized too early") + } + + r := ids.Bag{} + r.SetThreshold(2) + r.AddCount(Red.ID(), 2) + graph.RecordPoll(r) + + if prefs := graph.Preferences(); prefs.Len() != 1 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Alpha.ID()) { + t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) + } else if graph.Finalized() { + t.Fatalf("Finalized too early") + } +} +func IndependentTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 2, BetaRogue: 2, + } + graph.Initialize(snow.DefaultContextTest(), params) + graph.Add(Red) + graph.Add(Alpha) + + if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s", Red.ID()) + } else if !prefs.Contains(Alpha.ID()) { + t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) + } else if graph.Finalized() { + t.Fatalf("Finalized too early") + } + + ra := ids.Bag{} + ra.SetThreshold(2) + ra.AddCount(Red.ID(), 2) + ra.AddCount(Alpha.ID(), 2) + graph.RecordPoll(ra) + + if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s", Red.ID()) + } else if !prefs.Contains(Alpha.ID()) { + t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) + } else if graph.Finalized() { + t.Fatalf("Finalized too early") + } + + graph.RecordPoll(ra) + + if prefs := graph.Preferences(); prefs.Len() != 0 { + t.Fatalf("Wrong number of preferences.") + } else if !graph.Finalized() { + t.Fatalf("Finalized too late") + } +} + +func VirtuousTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + } + graph.Initialize(snow.DefaultContextTest(), params) + graph.Add(Red) + + if virtuous := graph.Virtuous(); virtuous.Len() != 1 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(Red.ID()) { + t.Fatalf("Wrong virtuous. Expected %s", Red.ID()) + } + + graph.Add(Alpha) + + if virtuous := graph.Virtuous(); virtuous.Len() != 2 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(Red.ID()) { + t.Fatalf("Wrong virtuous. Expected %s", Red.ID()) + } else if !virtuous.Contains(Alpha.ID()) { + t.Fatalf("Wrong virtuous. Expected %s", Alpha.ID()) + } + + graph.Add(Green) + + if virtuous := graph.Virtuous(); virtuous.Len() != 1 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(Alpha.ID()) { + t.Fatalf("Wrong virtuous. Expected %s", Alpha.ID()) + } + + graph.Add(Blue) + + if virtuous := graph.Virtuous(); virtuous.Len() != 0 { + t.Fatalf("Wrong number of virtuous.") + } +} + +func IsVirtuousTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + } + graph.Initialize(snow.DefaultContextTest(), params) + + if !graph.IsVirtuous(Red) { + t.Fatalf("Should be virtuous") + } else if !graph.IsVirtuous(Green) { + t.Fatalf("Should be virtuous") + } else if !graph.IsVirtuous(Blue) { + t.Fatalf("Should be virtuous") + } else if !graph.IsVirtuous(Alpha) { + t.Fatalf("Should be virtuous") + } + + graph.Add(Red) + + if !graph.IsVirtuous(Red) { + t.Fatalf("Should be virtuous") + } else if graph.IsVirtuous(Green) { + t.Fatalf("Should not be virtuous") + } else if !graph.IsVirtuous(Blue) { + t.Fatalf("Should be virtuous") + } else if !graph.IsVirtuous(Alpha) { + t.Fatalf("Should be virtuous") + } + + graph.Add(Green) + + if graph.IsVirtuous(Red) { + t.Fatalf("Should not be virtuous") + } else if graph.IsVirtuous(Green) { + t.Fatalf("Should not be virtuous") + } else if graph.IsVirtuous(Blue) { + t.Fatalf("Should not be virtuous") + } +} + +func QuiesceTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, + } + graph.Initialize(snow.DefaultContextTest(), params) + + if !graph.Quiesce() { + t.Fatalf("Should quiesce") + } + + graph.Add(Red) + + if graph.Quiesce() { + t.Fatalf("Shouldn't quiesce") + } + + graph.Add(Green) + + if !graph.Quiesce() { + t.Fatalf("Should quiesce") + } +} + +func AcceptingDependencyTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + purple := &TestTx{ + Identifier: ids.Empty.Prefix(7), + Stat: choices.Processing, + } + purple.Ins.Add(ids.Empty.Prefix(8)) + purple.Deps = []Tx{Red} + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + graph.Initialize(snow.DefaultContextTest(), params) + + graph.Add(Red) + graph.Add(Green) + graph.Add(purple) + + if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s", Red.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + g := ids.Bag{} + g.Add(Green.ID()) + + graph.RecordPoll(g) + + if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Green.ID()) { + t.Fatalf("Wrong preference. Expected %s", Green.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + rp := ids.Bag{} + rp.Add(Red.ID(), purple.ID()) + + graph.RecordPoll(rp) + + if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Green.ID()) { + t.Fatalf("Wrong preference. Expected %s", Green.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + r := ids.Bag{} + r.Add(Red.ID()) + + graph.RecordPoll(r) + + if prefs := graph.Preferences(); prefs.Len() != 0 { + t.Fatalf("Wrong number of preferences.") + } else if Red.Status() != choices.Accepted { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Accepted) + } else if Green.Status() != choices.Rejected { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Rejected) + } else if purple.Status() != choices.Accepted { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Accepted) + } +} + +func RejectingDependencyTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + purple := &TestTx{ + Identifier: ids.Empty.Prefix(7), + Stat: choices.Processing, + } + purple.Ins.Add(ids.Empty.Prefix(8)) + purple.Deps = []Tx{Red, Blue} + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + graph.Initialize(snow.DefaultContextTest(), params) + + graph.Add(Red) + graph.Add(Green) + graph.Add(Blue) + graph.Add(purple) + + if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s", Red.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if Blue.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Blue.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + gp := ids.Bag{} + gp.Add(Green.ID(), purple.ID()) + + graph.RecordPoll(gp) + + if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Green.ID()) { + t.Fatalf("Wrong preference. Expected %s", Green.ID()) + } else if !prefs.Contains(purple.ID()) { + t.Fatalf("Wrong preference. Expected %s", purple.ID()) + } else if Red.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Processing) + } else if Green.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Processing) + } else if Blue.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", Blue.ID(), choices.Processing) + } else if purple.Status() != choices.Processing { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) + } + + graph.RecordPoll(gp) + + if prefs := graph.Preferences(); prefs.Len() != 0 { + t.Fatalf("Wrong number of preferences.") + } else if Red.Status() != choices.Rejected { + t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Rejected) + } else if Green.Status() != choices.Accepted { + t.Fatalf("Wrong status. %s should be %s", Green.ID(), choices.Accepted) + } else if Blue.Status() != choices.Rejected { + t.Fatalf("Wrong status. %s should be %s", Blue.ID(), choices.Rejected) + } else if purple.Status() != choices.Rejected { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Rejected) + } +} + +func VacuouslyAcceptedTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + purple := &TestTx{ + Identifier: ids.Empty.Prefix(7), + Stat: choices.Processing, + } + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + graph.Initialize(snow.DefaultContextTest(), params) + + graph.Add(purple) + + if prefs := graph.Preferences(); prefs.Len() != 0 { + t.Fatalf("Wrong number of preferences.") + } else if status := purple.Status(); status != choices.Accepted { + t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Accepted) + } +} + +func ConflictsTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + graph.Initialize(snow.DefaultContextTest(), params) + + conflictInputID := ids.Empty.Prefix(0) + + insPurple := ids.Set{} + insPurple.Add(conflictInputID) + + purple := &TestTx{ + Identifier: ids.Empty.Prefix(7), + Stat: choices.Processing, + Ins: insPurple, + } + + insOrange := ids.Set{} + insOrange.Add(conflictInputID) + + orange := &TestTx{ + Identifier: ids.Empty.Prefix(6), + Stat: choices.Processing, + Ins: insPurple, + } + + graph.Add(purple) + + if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 { + t.Fatalf("Wrong number of conflicts") + } else if !orangeConflicts.Contains(purple.Identifier) { + t.Fatalf("Conflicts does not contain the right transaction") + } + + graph.Add(orange) + + if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 { + t.Fatalf("Wrong number of conflicts") + } else if !orangeConflicts.Contains(purple.Identifier) { + t.Fatalf("Conflicts does not contain the right transaction") + } +} + +func VirtuousDependsOnRogueTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + graph.Initialize(snow.DefaultContextTest(), params) + + rogue1 := &TestTx{ + Identifier: ids.Empty.Prefix(0), + Stat: choices.Processing, + } + rogue2 := &TestTx{ + Identifier: ids.Empty.Prefix(1), + Stat: choices.Processing, + } + virtuous := &TestTx{ + Identifier: ids.Empty.Prefix(2), + Deps: []Tx{rogue1}, + Stat: choices.Processing, + } + + input1 := ids.Empty.Prefix(3) + input2 := ids.Empty.Prefix(4) + + rogue1.Ins.Add(input1) + rogue2.Ins.Add(input1) + + virtuous.Ins.Add(input2) + + graph.Add(rogue1) + graph.Add(rogue2) + graph.Add(virtuous) + + votes := ids.Bag{} + votes.Add(rogue1.ID()) + votes.Add(virtuous.ID()) + + graph.RecordPoll(votes) + + if status := rogue1.Status(); status != choices.Processing { + t.Fatalf("Rogue Tx is %s expected %s", status, choices.Processing) + } else if status := rogue2.Status(); status != choices.Processing { + t.Fatalf("Rogue Tx is %s expected %s", status, choices.Processing) + } else if status := virtuous.Status(); status != choices.Processing { + t.Fatalf("Virtuous Tx is %s expected %s", status, choices.Processing) + } else if !graph.Quiesce() { + t.Fatalf("Should quiesce as there are no pending virtuous transactions") + } +} + +func StringTest(t *testing.T, factory Factory, prefix string) { + Setup() + + graph := factory.New() + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + } + graph.Initialize(snow.DefaultContextTest(), params) + graph.Add(Red) + graph.Add(Green) + graph.Add(Blue) + graph.Add(Alpha) + + if prefs := graph.Preferences(); prefs.Len() != 1 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s got %s", Red.ID(), prefs.List()[0]) + } else if graph.Finalized() { + t.Fatalf("Finalized too early") + } + + rb := ids.Bag{} + rb.SetThreshold(2) + rb.AddCount(Red.ID(), 2) + rb.AddCount(Blue.ID(), 2) + graph.RecordPoll(rb) + graph.Add(Blue) + + { + expected := prefix + "(\n" + + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 1 Bias: 1\n" + + " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES Confidence: 0 Bias: 0\n" + + " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc Confidence: 0 Bias: 0\n" + + " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w Confidence: 1 Bias: 1\n" + + ")" + if str := graph.String(); str != expected { + t.Fatalf("Expected %s, got %s", expected, str) + } + } + + if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s", Red.ID()) + } else if !prefs.Contains(Blue.ID()) { + t.Fatalf("Wrong preference. Expected %s", Blue.ID()) + } else if graph.Finalized() { + t.Fatalf("Finalized too early") + } + + ga := ids.Bag{} + ga.SetThreshold(2) + ga.AddCount(Green.ID(), 2) + ga.AddCount(Alpha.ID(), 2) + graph.RecordPoll(ga) + + { + expected := prefix + "(\n" + + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 0 Bias: 1\n" + + " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES Confidence: 1 Bias: 1\n" + + " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc Confidence: 1 Bias: 1\n" + + " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w Confidence: 0 Bias: 1\n" + + ")" + if str := graph.String(); str != expected { + t.Fatalf("Expected %s, got %s", expected, str) + } + } + + if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s", Red.ID()) + } else if !prefs.Contains(Blue.ID()) { + t.Fatalf("Wrong preference. Expected %s", Blue.ID()) + } else if graph.Finalized() { + t.Fatalf("Finalized too early") + } + + empty := ids.Bag{} + graph.RecordPoll(empty) + + { + expected := prefix + "(\n" + + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 0 Bias: 1\n" + + " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES Confidence: 0 Bias: 1\n" + + " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc Confidence: 0 Bias: 1\n" + + " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w Confidence: 0 Bias: 1\n" + + ")" + if str := graph.String(); str != expected { + t.Fatalf("Expected %s, got %s", expected, str) + } + } + + if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Red.ID()) { + t.Fatalf("Wrong preference. Expected %s", Red.ID()) + } else if !prefs.Contains(Blue.ID()) { + t.Fatalf("Wrong preference. Expected %s", Blue.ID()) + } else if graph.Finalized() { + t.Fatalf("Finalized too early") + } + + graph.RecordPoll(ga) + + { + expected := prefix + "(\n" + + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 0 Bias: 1\n" + + " Choice[1] = ID: TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES Confidence: 1 Bias: 2\n" + + " Choice[2] = ID: Zda4gsqTjRaX6XVZekVNi3ovMFPHDRQiGbzYuAb7Nwqy1rGBc Confidence: 1 Bias: 2\n" + + " Choice[3] = ID: 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w Confidence: 0 Bias: 1\n" + + ")" + if str := graph.String(); str != expected { + t.Fatalf("Expected %s, got %s", expected, str) + } + } + + if prefs := graph.Preferences(); prefs.Len() != 2 { + t.Fatalf("Wrong number of preferences.") + } else if !prefs.Contains(Green.ID()) { + t.Fatalf("Wrong preference. Expected %s", Green.ID()) + } else if !prefs.Contains(Alpha.ID()) { + t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) + } else if graph.Finalized() { + t.Fatalf("Finalized too early") + } + + graph.RecordPoll(ga) + + { + expected := prefix + "()" + if str := graph.String(); str != expected { + t.Fatalf("Expected %s, got %s", expected, str) + } + } + + if prefs := graph.Preferences(); prefs.Len() != 0 { + t.Fatalf("Wrong number of preferences.") + } else if !graph.Finalized() { + t.Fatalf("Finalized too late") + } + + if Green.Status() != choices.Accepted { + t.Fatalf("%s should have been accepted", Green.ID()) + } else if Alpha.Status() != choices.Accepted { + t.Fatalf("%s should have been accepted", Alpha.ID()) + } else if Red.Status() != choices.Rejected { + t.Fatalf("%s should have been rejected", Red.ID()) + } else if Blue.Status() != choices.Rejected { + t.Fatalf("%s should have been rejected", Blue.ID()) + } + + graph.RecordPoll(rb) + + { + expected := prefix + "()" + if str := graph.String(); str != expected { + t.Fatalf("Expected %s, got %s", expected, str) + } + } + + if prefs := graph.Preferences(); prefs.Len() != 0 { + t.Fatalf("Wrong number of preferences.") + } else if !graph.Finalized() { + t.Fatalf("Finalized too late") + } + + if Green.Status() != choices.Accepted { + t.Fatalf("%s should have been accepted", Green.ID()) + } else if Alpha.Status() != choices.Accepted { + t.Fatalf("%s should have been accepted", Alpha.ID()) + } else if Red.Status() != choices.Rejected { + t.Fatalf("%s should have been rejected", Red.ID()) + } else if Blue.Status() != choices.Rejected { + t.Fatalf("%s should have been rejected", Blue.ID()) + } +} diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go new file mode 100644 index 0000000..375b9b3 --- /dev/null +++ b/snow/consensus/snowstorm/directed.go @@ -0,0 +1,513 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/events" + "github.com/ava-labs/gecko/utils/formatting" +) + +// DirectedFactory implements Factory by returning a directed struct +type DirectedFactory struct{} + +// New implements Factory +func (DirectedFactory) New() Consensus { return &Directed{} } + +// Directed is an implementation of a multi-color, non-transitive, snowball +// instance +type Directed struct { + ctx *snow.Context + params snowball.Parameters + + numProcessingVirtuous, numProcessingRogue prometheus.Gauge + numAccepted, numRejected prometheus.Counter + + // Each element of preferences is the ID of a transaction that is preferred. + // That is, each transaction has no out edges + preferences ids.Set + + // Each element of virtuous is the ID of a transaction that is virtuous. + // That is, each transaction that has no incident edges + virtuous ids.Set + + // Each element is in the virtuous set and is still being voted on + virtuousVoting ids.Set + + // Key: UTXO ID + // Value: IDs of transactions that consume the UTXO specified in the key + spends map[[32]byte]ids.Set + + // Key: Transaction ID + // Value: Node that represents this transaction in the conflict graph + nodes map[[32]byte]*flatNode + + // Keep track of whether dependencies have been accepted or rejected + pendingAccept, pendingReject events.Blocker + + // Number of times RecordPoll has been called + currentVote int +} + +type flatNode struct { + bias, confidence, lastVote int + + pendingAccept, accepted, rogue bool + ins, outs ids.Set + + tx Tx +} + +// Initialize implements the Consensus interface +func (dg *Directed) Initialize(ctx *snow.Context, params snowball.Parameters) { + ctx.Log.AssertDeferredNoError(params.Valid) + + dg.ctx = ctx + dg.params = params + + dg.numProcessingVirtuous = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: params.Namespace, + Name: "tx_processing_virtuous", + Help: "Number of processing virtuous transactions", + }) + dg.numProcessingRogue = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: params.Namespace, + Name: "tx_processing_rogue", + Help: "Number of processing rogue transactions", + }) + dg.numAccepted = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: params.Namespace, + Name: "tx_accepted", + Help: "Number of transactions accepted", + }) + dg.numRejected = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: params.Namespace, + Name: "tx_rejected", + Help: "Number of transactions rejected", + }) + + if err := dg.params.Metrics.Register(dg.numProcessingVirtuous); err != nil { + dg.ctx.Log.Error("Failed to register tx_processing_virtuous statistics due to %s", err) + } + if err := dg.params.Metrics.Register(dg.numProcessingRogue); err != nil { + dg.ctx.Log.Error("Failed to register tx_processing_rogue statistics due to %s", err) + } + if err := dg.params.Metrics.Register(dg.numAccepted); err != nil { + dg.ctx.Log.Error("Failed to register tx_accepted statistics due to %s", err) + } + if err := dg.params.Metrics.Register(dg.numRejected); err != nil { + dg.ctx.Log.Error("Failed to register tx_rejected statistics due to %s", err) + } + + dg.spends = make(map[[32]byte]ids.Set) + dg.nodes = make(map[[32]byte]*flatNode) +} + +// Parameters implements the Snowstorm interface +func (dg *Directed) Parameters() snowball.Parameters { return dg.params } + +// IsVirtuous implements the Consensus interface +func (dg *Directed) IsVirtuous(tx Tx) bool { + id := tx.ID() + if node, exists := dg.nodes[id.Key()]; exists { + return !node.rogue + } + for _, input := range tx.InputIDs().List() { + if _, exists := dg.spends[input.Key()]; exists { + return false + } + } + return true +} + +// Conflicts implements the Consensus interface +func (dg *Directed) Conflicts(tx Tx) ids.Set { + id := tx.ID() + conflicts := ids.Set{} + + if node, exists := dg.nodes[id.Key()]; exists { + conflicts.Union(node.ins) + conflicts.Union(node.outs) + } else { + for _, input := range tx.InputIDs().List() { + if spends, exists := dg.spends[input.Key()]; exists { + conflicts.Union(spends) + } + } + conflicts.Remove(id) + } + + return conflicts +} + +// Add implements the Consensus interface +func (dg *Directed) Add(tx Tx) { + if dg.Issued(tx) { + return // Already inserted + } + + txID := tx.ID() + bytes := tx.Bytes() + + dg.ctx.DecisionDispatcher.Issue(dg.ctx.ChainID, txID, bytes) + inputs := tx.InputIDs() + // If there are no inputs, Tx is vacuously accepted + if inputs.Len() == 0 { + tx.Accept() + dg.ctx.DecisionDispatcher.Accept(dg.ctx.ChainID, txID, bytes) + dg.numAccepted.Inc() + return + } + + id := tx.ID() + fn := &flatNode{tx: tx} + + // Note: Below, for readability, we sometimes say "transaction" when we actually mean + // "the flatNode representing a transaction." + // For each UTXO input to Tx: + // * Get all transactions that consume that UTXO + // * Add edges from Tx to those transactions in the conflict graph + // * Mark those transactions as rogue + for _, inputID := range inputs.List() { + inputKey := inputID.Key() + spends := dg.spends[inputKey] // Transactions spending this UTXO + + // Add edges to conflict graph + fn.outs.Union(spends) + + // Mark transactions conflicting with Tx as rogue + for _, conflictID := range spends.List() { + conflictKey := conflictID.Key() + conflict := dg.nodes[conflictKey] + + if !conflict.rogue { + dg.numProcessingVirtuous.Dec() + dg.numProcessingRogue.Inc() + } + + dg.virtuous.Remove(conflictID) + dg.virtuousVoting.Remove(conflictID) + + conflict.rogue = true + conflict.ins.Add(id) + + dg.nodes[conflictKey] = conflict + } + // Add Tx to list of transactions consuming UTXO whose ID is id + spends.Add(id) + dg.spends[inputKey] = spends + } + fn.rogue = fn.outs.Len() != 0 // Mark this transaction as rogue if it has conflicts + + // Add the node representing Tx to the node set + dg.nodes[id.Key()] = fn + if !fn.rogue { + // I'm not rogue + dg.virtuous.Add(id) + dg.virtuousVoting.Add(id) + + // If I'm not rogue, I must be preferred + dg.preferences.Add(id) + dg.numProcessingVirtuous.Inc() + } else { + dg.numProcessingRogue.Inc() + } + + // Tx can be accepted only if the transactions it depends on are also accepted + // If any transactions that Tx depends on are rejected, reject Tx + toReject := &directedRejector{ + dg: dg, + fn: fn, + } + for _, dependency := range tx.Dependencies() { + if !dependency.Status().Decided() { + toReject.deps.Add(dependency.ID()) + } + } + dg.pendingReject.Register(toReject) +} + +// Issued implements the Consensus interface +func (dg *Directed) Issued(tx Tx) bool { + if tx.Status().Decided() { + return true + } + _, ok := dg.nodes[tx.ID().Key()] + return ok +} + +// Virtuous implements the Consensus interface +func (dg *Directed) Virtuous() ids.Set { return dg.virtuous } + +// Preferences implements the Consensus interface +func (dg *Directed) Preferences() ids.Set { return dg.preferences } + +// RecordPoll implements the Consensus interface +func (dg *Directed) RecordPoll(votes ids.Bag) { + dg.currentVote++ + + votes.SetThreshold(dg.params.Alpha) + threshold := votes.Threshold() // Each element is ID of transaction preferred by >= Alpha poll respondents + for _, toInc := range threshold.List() { + incKey := toInc.Key() + fn, exist := dg.nodes[incKey] + if !exist { + // Votes for decided consumers are ignored + continue + } + + if fn.lastVote+1 != dg.currentVote { + fn.confidence = 0 + } + fn.lastVote = dg.currentVote + + dg.ctx.Log.Verbo("Increasing (bias, confidence) of %s from (%d, %d) to (%d, %d)", toInc, fn.bias, fn.confidence, fn.bias+1, fn.confidence+1) + + fn.bias++ + fn.confidence++ + + if !fn.pendingAccept && + ((!fn.rogue && fn.confidence >= dg.params.BetaVirtuous) || + fn.confidence >= dg.params.BetaRogue) { + dg.deferAcceptance(fn) + } + if !fn.accepted { + dg.redirectEdges(fn) + } + } +} + +// Quiesce implements the Consensus interface +func (dg *Directed) Quiesce() bool { + numVirtuous := dg.virtuousVoting.Len() + dg.ctx.Log.Verbo("Conflict graph has %d voting virtuous transactions and %d transactions", numVirtuous, len(dg.nodes)) + return numVirtuous == 0 +} + +// Finalized implements the Consensus interface +func (dg *Directed) Finalized() bool { + numNodes := len(dg.nodes) + dg.ctx.Log.Verbo("Conflict graph has %d pending transactions", numNodes) + return numNodes == 0 +} + +func (dg *Directed) String() string { + nodes := []*flatNode{} + for _, fn := range dg.nodes { + nodes = append(nodes, fn) + } + sortFlatNodes(nodes) + + sb := strings.Builder{} + + sb.WriteString("DG(") + + format := fmt.Sprintf( + "\n Choice[%s] = ID: %%50s Confidence: %s Bias: %%d", + formatting.IntFormat(len(dg.nodes)-1), + formatting.IntFormat(dg.params.BetaRogue-1)) + + for i, fn := range nodes { + confidence := fn.confidence + if fn.lastVote != dg.currentVote { + confidence = 0 + } + sb.WriteString(fmt.Sprintf(format, + i, fn.tx.ID(), confidence, fn.bias)) + } + + if len(nodes) > 0 { + sb.WriteString("\n") + } + sb.WriteString(")") + + return sb.String() +} + +func (dg *Directed) deferAcceptance(fn *flatNode) { + fn.pendingAccept = true + + toAccept := &directedAccepter{ + dg: dg, + fn: fn, + } + for _, dependency := range fn.tx.Dependencies() { + if !dependency.Status().Decided() { + toAccept.deps.Add(dependency.ID()) + } + } + + dg.virtuousVoting.Remove(fn.tx.ID()) + dg.pendingAccept.Register(toAccept) +} + +func (dg *Directed) reject(ids ...ids.ID) { + for _, conflict := range ids { + conflictKey := conflict.Key() + conf := dg.nodes[conflictKey] + delete(dg.nodes, conflictKey) + + if conf.rogue { + dg.numProcessingRogue.Dec() + } else { + dg.numProcessingVirtuous.Dec() + } + + dg.preferences.Remove(conflict) + + // remove the edge between this node and all its neighbors + dg.removeConflict(conflict, conf.ins.List()...) + dg.removeConflict(conflict, conf.outs.List()...) + + // Mark it as rejected + conf.tx.Reject() + dg.ctx.DecisionDispatcher.Reject(dg.ctx.ChainID, conf.tx.ID(), conf.tx.Bytes()) + dg.numRejected.Inc() + dg.pendingAccept.Abandon(conflict) + dg.pendingReject.Fulfill(conflict) + } +} + +func (dg *Directed) redirectEdges(fn *flatNode) { + for _, conflictID := range fn.outs.List() { + dg.redirectEdge(fn, conflictID) + } +} + +// Set the confidence of all conflicts to 0 +// Change the direction of edges if needed +func (dg *Directed) redirectEdge(fn *flatNode, conflictID ids.ID) { + nodeID := fn.tx.ID() + if conflict := dg.nodes[conflictID.Key()]; fn.bias > conflict.bias { + conflict.confidence = 0 + + // Change the edge direction + conflict.ins.Remove(nodeID) + conflict.outs.Add(nodeID) + dg.preferences.Remove(conflictID) // This consumer now has an out edge + + fn.ins.Add(conflictID) + fn.outs.Remove(conflictID) + if fn.outs.Len() == 0 { + // If I don't have out edges, I'm preferred + dg.preferences.Add(nodeID) + } + } +} + +func (dg *Directed) removeConflict(id ids.ID, ids ...ids.ID) { + for _, neighborID := range ids { + neighborKey := neighborID.Key() + // If the neighbor doesn't exist, they may have already been rejected + if neighbor, exists := dg.nodes[neighborKey]; exists { + neighbor.ins.Remove(id) + neighbor.outs.Remove(id) + + if neighbor.outs.Len() == 0 { + // Make sure to mark the neighbor as preferred if needed + dg.preferences.Add(neighborID) + } + + dg.nodes[neighborKey] = neighbor + } + } +} + +type directedAccepter struct { + dg *Directed + deps ids.Set + rejected bool + fn *flatNode +} + +func (a *directedAccepter) Dependencies() ids.Set { return a.deps } + +func (a *directedAccepter) Fulfill(id ids.ID) { + a.deps.Remove(id) + a.Update() +} + +func (a *directedAccepter) Abandon(id ids.ID) { a.rejected = true } + +func (a *directedAccepter) Update() { + // If I was rejected or I am still waiting on dependencies to finish do nothing. + if a.rejected || a.deps.Len() != 0 { + return + } + + id := a.fn.tx.ID() + delete(a.dg.nodes, id.Key()) + + for _, inputID := range a.fn.tx.InputIDs().List() { + delete(a.dg.spends, inputID.Key()) + } + a.dg.virtuous.Remove(id) + a.dg.preferences.Remove(id) + + // Reject the conflicts + a.dg.reject(a.fn.ins.List()...) + a.dg.reject(a.fn.outs.List()...) // Should normally be empty + + // Mark it as accepted + a.fn.accepted = true + a.fn.tx.Accept() + a.dg.ctx.DecisionDispatcher.Accept(a.dg.ctx.ChainID, id, a.fn.tx.Bytes()) + a.dg.numAccepted.Inc() + + if a.fn.rogue { + a.dg.numProcessingRogue.Dec() + } else { + a.dg.numProcessingVirtuous.Dec() + } + + a.dg.pendingAccept.Fulfill(id) + a.dg.pendingReject.Abandon(id) +} + +// directedRejector implements Blockable +type directedRejector struct { + dg *Directed + deps ids.Set + rejected bool // true if the transaction represented by fn has been rejected + fn *flatNode +} + +func (r *directedRejector) Dependencies() ids.Set { return r.deps } + +func (r *directedRejector) Fulfill(id ids.ID) { + if r.rejected { + return + } + r.rejected = true + r.dg.reject(r.fn.tx.ID()) +} + +func (*directedRejector) Abandon(id ids.ID) {} + +func (*directedRejector) Update() {} + +type sortFlatNodeData []*flatNode + +func (fnd sortFlatNodeData) Less(i, j int) bool { + return bytes.Compare( + fnd[i].tx.ID().Bytes(), + fnd[j].tx.ID().Bytes()) == -1 +} +func (fnd sortFlatNodeData) Len() int { return len(fnd) } +func (fnd sortFlatNodeData) Swap(i, j int) { fnd[j], fnd[i] = fnd[i], fnd[j] } + +func sortFlatNodes(nodes []*flatNode) { sort.Sort(sortFlatNodeData(nodes)) } diff --git a/snow/consensus/snowstorm/directed_test.go b/snow/consensus/snowstorm/directed_test.go new file mode 100644 index 0000000..39bc5bf --- /dev/null +++ b/snow/consensus/snowstorm/directed_test.go @@ -0,0 +1,40 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "testing" +) + +func TestDirectedParams(t *testing.T) { ParamsTest(t, DirectedFactory{}) } + +func TestDirectedIssued(t *testing.T) { IssuedTest(t, DirectedFactory{}) } + +func TestDirectedLeftoverInput(t *testing.T) { LeftoverInputTest(t, DirectedFactory{}) } + +func TestDirectedLowerConfidence(t *testing.T) { LowerConfidenceTest(t, DirectedFactory{}) } + +func TestDirectedMiddleConfidence(t *testing.T) { MiddleConfidenceTest(t, DirectedFactory{}) } + +func TestDirectedIndependent(t *testing.T) { IndependentTest(t, DirectedFactory{}) } + +func TestDirectedVirtuous(t *testing.T) { VirtuousTest(t, DirectedFactory{}) } + +func TestDirectedIsVirtuous(t *testing.T) { IsVirtuousTest(t, DirectedFactory{}) } + +func TestDirectedConflicts(t *testing.T) { ConflictsTest(t, DirectedFactory{}) } + +func TestDirectedQuiesce(t *testing.T) { QuiesceTest(t, DirectedFactory{}) } + +func TestDirectedAcceptingDependency(t *testing.T) { AcceptingDependencyTest(t, DirectedFactory{}) } + +func TestDirectedRejectingDependency(t *testing.T) { RejectingDependencyTest(t, DirectedFactory{}) } + +func TestDirectedVacuouslyAccepted(t *testing.T) { VacuouslyAcceptedTest(t, DirectedFactory{}) } + +func TestDirectedVirtuousDependsOnRogue(t *testing.T) { + VirtuousDependsOnRogueTest(t, DirectedFactory{}) +} + +func TestDirectedString(t *testing.T) { StringTest(t, DirectedFactory{}, "DG") } diff --git a/snow/consensus/snowstorm/equality_test.go b/snow/consensus/snowstorm/equality_test.go new file mode 100644 index 0000000..8298c48 --- /dev/null +++ b/snow/consensus/snowstorm/equality_test.go @@ -0,0 +1,70 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "math/rand" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/snow/consensus/snowball" +) + +func TestConflictGraphEquality(t *testing.T) { + Setup() + + numColors := 5 + colorsPerConsumer := 2 + maxInputConflicts := 2 + numNodes := 100 + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 20, + Alpha: 11, + BetaVirtuous: 20, + BetaRogue: 30, + } + seed := int64(0) + + nDirected := Network{} + rand.Seed(seed) + nDirected.Initialize(params, numColors, colorsPerConsumer, maxInputConflicts) + + nInput := Network{} + rand.Seed(seed) + nInput.Initialize(params, numColors, colorsPerConsumer, maxInputConflicts) + + rand.Seed(seed) + for i := 0; i < numNodes; i++ { + nDirected.AddNode(&Directed{}) + } + + rand.Seed(seed) + for i := 0; i < numNodes; i++ { + nInput.AddNode(&Input{}) + } + + numRounds := 0 + for !nDirected.Finalized() && !nDirected.Disagreement() && !nInput.Finalized() && !nInput.Disagreement() { + rand.Seed(int64(numRounds) + seed) + nDirected.Round() + + rand.Seed(int64(numRounds) + seed) + nInput.Round() + numRounds++ + } + + if nDirected.Disagreement() || nInput.Disagreement() { + t.Fatalf("Network agreed on inconsistent values") + } + + if !nDirected.Finalized() || + !nInput.Finalized() { + t.Fatalf("Network agreed on values faster with one of the implementations") + } + if !nDirected.Agreement() || !nInput.Agreement() { + t.Fatalf("Network agreed on inconsistent values") + } +} diff --git a/snow/consensus/snowstorm/factory.go b/snow/consensus/snowstorm/factory.go new file mode 100644 index 0000000..839ca5c --- /dev/null +++ b/snow/consensus/snowstorm/factory.go @@ -0,0 +1,9 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +// Factory returns new instances of Consensus +type Factory interface { + New() Consensus +} diff --git a/snow/consensus/snowstorm/input.go b/snow/consensus/snowstorm/input.go new file mode 100644 index 0000000..ec9f767 --- /dev/null +++ b/snow/consensus/snowstorm/input.go @@ -0,0 +1,562 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/events" + "github.com/ava-labs/gecko/utils/formatting" +) + +// InputFactory implements Factory by returning an input struct +type InputFactory struct{} + +// New implements Factory +func (InputFactory) New() Consensus { return &Input{} } + +// Input is an implementation of a multi-color, non-transitive, snowball +// instance +type Input struct { + ctx *snow.Context + params snowball.Parameters + + numProcessing prometheus.Gauge + numAccepted, numRejected prometheus.Counter + + // preferences is the set of consumerIDs that have only in edges + // virtuous is the set of consumerIDs that have no edges + preferences, virtuous, virtuousVoting ids.Set + + txs map[[32]byte]txNode // Map consumerID -> consumerNode + inputs map[[32]byte]inputNode // Map inputID -> inputNode + + pendingAccept, pendingReject events.Blocker + + time uint64 + + // Number of times RecordPoll has been called + currentVote int +} + +type txNode struct { + bias int + tx Tx + + timestamp uint64 +} + +type inputNode struct { + bias, confidence, lastVote int + rogue bool + preference ids.ID + color ids.ID + conflicts ids.Set +} + +// Initialize implements the ConflictGraph interface +func (ig *Input) Initialize(ctx *snow.Context, params snowball.Parameters) { + ctx.Log.AssertDeferredNoError(params.Valid) + + ig.ctx = ctx + ig.params = params + + namespace := fmt.Sprintf("gecko_%s", ig.ctx.ChainID) + + ig.numProcessing = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "tx_processing", + Help: "Number of processing transactions", + }) + ig.numAccepted = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "tx_accepted", + Help: "Number of transactions accepted", + }) + ig.numRejected = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "tx_rejected", + Help: "Number of transactions rejected", + }) + + if err := ig.params.Metrics.Register(ig.numProcessing); err != nil { + ig.ctx.Log.Error("Failed to register tx_processing statistics due to %s", err) + } + if err := ig.params.Metrics.Register(ig.numAccepted); err != nil { + ig.ctx.Log.Error("Failed to register tx_accepted statistics due to %s", err) + } + if err := ig.params.Metrics.Register(ig.numRejected); err != nil { + ig.ctx.Log.Error("Failed to register tx_rejected statistics due to %s", err) + } + + ig.txs = make(map[[32]byte]txNode) + ig.inputs = make(map[[32]byte]inputNode) +} + +// Parameters implements the Snowstorm interface +func (ig *Input) Parameters() snowball.Parameters { return ig.params } + +// IsVirtuous implements the ConflictGraph interface +func (ig *Input) IsVirtuous(tx Tx) bool { + id := tx.ID() + for _, consumption := range tx.InputIDs().List() { + input := ig.inputs[consumption.Key()] + if input.rogue || + (input.conflicts.Len() > 0 && !input.conflicts.Contains(id)) { + return false + } + } + return true +} + +// Add implements the ConflictGraph interface +func (ig *Input) Add(tx Tx) { + if ig.Issued(tx) { + return // Already inserted + } + + txID := tx.ID() + bytes := tx.Bytes() + + ig.ctx.DecisionDispatcher.Issue(ig.ctx.ChainID, txID, bytes) + inputs := tx.InputIDs() + // If there are no inputs, they are vacuously accepted + if inputs.Len() == 0 { + tx.Accept() + ig.ctx.DecisionDispatcher.Accept(ig.ctx.ChainID, txID, bytes) + ig.numAccepted.Inc() + return + } + + id := tx.ID() + cn := txNode{tx: tx} + virtuous := true + // If there are inputs, they must be voted on + for _, consumption := range inputs.List() { + consumptionKey := consumption.Key() + input, exists := ig.inputs[consumptionKey] + input.rogue = exists // If the input exists for a conflict + if exists { + for _, conflictID := range input.conflicts.List() { + ig.virtuous.Remove(conflictID) + ig.virtuousVoting.Remove(conflictID) + } + } else { + input.preference = id // If there isn't a conflict, I'm preferred + } + input.conflicts.Add(id) + ig.inputs[consumptionKey] = input + + virtuous = virtuous && !exists + } + + // Add the node to the set + ig.txs[id.Key()] = cn + if virtuous { + // If I'm preferred in all my conflict sets, I'm preferred. + // Because the preference graph is a DAG, there will always be at least + // one preferred consumer, if there is a consumer + ig.preferences.Add(id) + ig.virtuous.Add(id) + ig.virtuousVoting.Add(id) + } + + ig.numProcessing.Inc() + + toReject := &inputRejector{ + ig: ig, + tn: cn, + } + + for _, dependency := range tx.Dependencies() { + if !dependency.Status().Decided() { + toReject.deps.Add(dependency.ID()) + } + } + ig.pendingReject.Register(toReject) +} + +// Issued implements the ConflictGraph interface +func (ig *Input) Issued(tx Tx) bool { + if tx.Status().Decided() { + return true + } + _, ok := ig.txs[tx.ID().Key()] + return ok +} + +// Virtuous implements the ConflictGraph interface +func (ig *Input) Virtuous() ids.Set { return ig.virtuous } + +// Preferences implements the ConflictGraph interface +func (ig *Input) Preferences() ids.Set { return ig.preferences } + +// Conflicts implements the ConflictGraph interface +func (ig *Input) Conflicts(tx Tx) ids.Set { + id := tx.ID() + conflicts := ids.Set{} + + for _, input := range tx.InputIDs().List() { + inputNode := ig.inputs[input.Key()] + conflicts.Union(inputNode.conflicts) + } + + conflicts.Remove(id) + return conflicts +} + +// RecordPoll implements the ConflictGraph interface +func (ig *Input) RecordPoll(votes ids.Bag) { + ig.currentVote++ + + votes.SetThreshold(ig.params.Alpha) + threshold := votes.Threshold() + for _, toInc := range threshold.List() { + incKey := toInc.Key() + tx, exist := ig.txs[incKey] + if !exist { + // Votes for decided consumptions are ignored + continue + } + + tx.bias++ + + // The timestamp is needed to ensure correctness in the case that a + // consumer was rejected from a conflict set, when it was preferred in + // this conflict set, when there is a tie for the second highest + // confidence. + ig.time++ + tx.timestamp = ig.time + + preferred := true + rogue := false + confidence := ig.params.BetaRogue + + consumptions := tx.tx.InputIDs().List() + for _, inputID := range consumptions { + inputKey := inputID.Key() + input := ig.inputs[inputKey] + + // If I did not receive a vote in the last vote, reset my confidence to 0 + if input.lastVote+1 != ig.currentVote { + input.confidence = 0 + } + input.lastVote = ig.currentVote + + // check the snowflake preference + if !toInc.Equals(input.color) { + input.confidence = 0 + } + // update the snowball preference + if tx.bias > input.bias { + // if the previous preference lost it's preference in this + // input, it can't be preferred in all the inputs + ig.preferences.Remove(input.preference) + + input.bias = tx.bias + input.preference = toInc + } + + // update snowflake vars + input.color = toInc + input.confidence++ + + ig.inputs[inputKey] = input + + // track cumulative statistics + preferred = preferred && toInc.Equals(input.preference) + rogue = rogue || input.rogue + if confidence > input.confidence { + confidence = input.confidence + } + } + + // If the node wasn't accepted, but was preferred, make sure it is + // marked as preferred + if preferred { + ig.preferences.Add(toInc) + } + + if (!rogue && confidence >= ig.params.BetaVirtuous) || + confidence >= ig.params.BetaRogue { + ig.deferAcceptance(tx) + continue + } + + ig.txs[incKey] = tx + } +} + +func (ig *Input) deferAcceptance(tn txNode) { + toAccept := &inputAccepter{ + ig: ig, + tn: tn, + } + + for _, dependency := range tn.tx.Dependencies() { + if !dependency.Status().Decided() { + toAccept.deps.Add(dependency.ID()) + } + } + + ig.virtuousVoting.Remove(tn.tx.ID()) + ig.pendingAccept.Register(toAccept) +} + +// reject all the ids and remove them from their conflict sets +func (ig *Input) reject(ids ...ids.ID) { + for _, conflict := range ids { + conflictKey := conflict.Key() + cn := ig.txs[conflictKey] + delete(ig.txs, conflictKey) + ig.numProcessing.Dec() + ig.preferences.Remove(conflict) // A rejected value isn't preferred + + // Remove from all conflict sets + ig.removeConflict(conflict, cn.tx.InputIDs().List()...) + + // Mark it as rejected + cn.tx.Reject() + ig.ctx.DecisionDispatcher.Reject(ig.ctx.ChainID, cn.tx.ID(), cn.tx.Bytes()) + ig.numRejected.Inc() + ig.pendingAccept.Abandon(conflict) + ig.pendingReject.Fulfill(conflict) + } +} + +// Remove id from all of its conflict sets +func (ig *Input) removeConflict(id ids.ID, inputIDs ...ids.ID) { + for _, inputID := range inputIDs { + inputKey := inputID.Key() + // if the input doesn't exists, it was already decided + if input, exists := ig.inputs[inputKey]; exists { + input.conflicts.Remove(id) + + // If there is nothing attempting to consume the input, remove it + // from memory + if input.conflicts.Len() == 0 { + delete(ig.inputs, inputKey) + continue + } + + // If I was previously preferred, I must find who should now be + // preferred. This shouldn't normally happen, therefore it is okay + // to be fairly slow here + if input.preference.Equals(id) { + newPreference := ids.ID{} + newBias := -1 + newBiasTime := uint64(0) + + // Find the highest bias conflict + for _, spend := range input.conflicts.List() { + tx := ig.txs[spend.Key()] + if tx.bias > newBias || + (tx.bias == newBias && + newBiasTime < tx.timestamp) { + newPreference = spend + newBias = tx.bias + newBiasTime = tx.timestamp + } + } + + // Set the preferences to the highest bias + input.preference = newPreference + input.bias = newBias + + ig.inputs[inputKey] = input + + // We need to check if this node is now preferred + preferenceNode, exist := ig.txs[newPreference.Key()] + if exist { + isPreferred := true + inputIDs := preferenceNode.tx.InputIDs().List() + for _, inputID := range inputIDs { + inputKey := inputID.Key() + input := ig.inputs[inputKey] + + if !newPreference.Equals(input.preference) { + // If this preference isn't the preferred color, it + // isn't preferred. Input might not exist, in which + // case this still isn't the preferred color + isPreferred = false + break + } + } + if isPreferred { + // If I'm preferred in all my conflict sets, I'm + // preferred + ig.preferences.Add(newPreference) + } + } + } else { + // If i'm rejecting the non-preference, do nothing + ig.inputs[inputKey] = input + } + } + } +} + +// Quiesce implements the ConflictGraph interface +func (ig *Input) Quiesce() bool { + numVirtuous := ig.virtuousVoting.Len() + ig.ctx.Log.Verbo("Conflict graph has %d voting virtuous transactions and %d transactions", numVirtuous, len(ig.txs)) + return numVirtuous == 0 +} + +// Finalized implements the ConflictGraph interface +func (ig *Input) Finalized() bool { + numTxs := len(ig.txs) + ig.ctx.Log.Verbo("Conflict graph has %d pending transactions", numTxs) + return numTxs == 0 +} + +func (ig *Input) String() string { + nodes := []tempNode{} + for _, tx := range ig.txs { + id := tx.tx.ID() + + confidence := ig.params.BetaRogue + for _, inputID := range tx.tx.InputIDs().List() { + input := ig.inputs[inputID.Key()] + if input.lastVote != ig.currentVote { + confidence = 0 + break + } + + if input.confidence < confidence { + confidence = input.confidence + } + if !id.Equals(input.color) { + confidence = 0 + break + } + } + + nodes = append(nodes, tempNode{ + id: id, + bias: tx.bias, + confidence: confidence, + }) + } + sortTempNodes(nodes) + + sb := strings.Builder{} + + sb.WriteString("IG(") + + format := fmt.Sprintf( + "\n Choice[%s] = ID: %%50s Confidence: %s Bias: %%d", + formatting.IntFormat(len(nodes)-1), + formatting.IntFormat(ig.params.BetaRogue-1)) + + for i, cn := range nodes { + sb.WriteString(fmt.Sprintf(format, i, cn.id, cn.confidence, cn.bias)) + } + + if len(nodes) > 0 { + sb.WriteString("\n") + } + sb.WriteString(")") + + return sb.String() +} + +type inputAccepter struct { + ig *Input + deps ids.Set + rejected bool + tn txNode +} + +func (a *inputAccepter) Dependencies() ids.Set { return a.deps } + +func (a *inputAccepter) Fulfill(id ids.ID) { + a.deps.Remove(id) + a.Update() +} + +func (a *inputAccepter) Abandon(id ids.ID) { a.rejected = true } + +func (a *inputAccepter) Update() { + if a.rejected || a.deps.Len() != 0 { + return + } + + id := a.tn.tx.ID() + delete(a.ig.txs, id.Key()) + + // Remove Tx from all of its conflicts + inputIDs := a.tn.tx.InputIDs() + a.ig.removeConflict(id, inputIDs.List()...) + + a.ig.virtuous.Remove(id) + a.ig.preferences.Remove(id) + + // Reject the conflicts + conflicts := ids.Set{} + for inputKey, exists := range inputIDs { + if exists { + inputNode := a.ig.inputs[inputKey] + conflicts.Union(inputNode.conflicts) + } + } + a.ig.reject(conflicts.List()...) + + // Mark it as accepted + a.tn.tx.Accept() + a.ig.ctx.DecisionDispatcher.Accept(a.ig.ctx.ChainID, id, a.tn.tx.Bytes()) + a.ig.numAccepted.Inc() + a.ig.numProcessing.Dec() + + a.ig.pendingAccept.Fulfill(id) + a.ig.pendingReject.Abandon(id) +} + +// inputRejector implements Blockable +type inputRejector struct { + ig *Input + deps ids.Set + rejected bool // true if the transaction represented by fn has been rejected + tn txNode +} + +func (r *inputRejector) Dependencies() ids.Set { return r.deps } + +func (r *inputRejector) Fulfill(id ids.ID) { + if r.rejected { + return + } + r.rejected = true + r.ig.reject(r.tn.tx.ID()) +} + +func (*inputRejector) Abandon(id ids.ID) {} + +func (*inputRejector) Update() {} + +type tempNode struct { + id ids.ID + bias, confidence int +} + +type sortTempNodeData []tempNode + +func (tnd sortTempNodeData) Less(i, j int) bool { + return bytes.Compare(tnd[i].id.Bytes(), tnd[j].id.Bytes()) == -1 +} +func (tnd sortTempNodeData) Len() int { return len(tnd) } +func (tnd sortTempNodeData) Swap(i, j int) { tnd[j], tnd[i] = tnd[i], tnd[j] } + +func sortTempNodes(nodes []tempNode) { sort.Sort(sortTempNodeData(nodes)) } diff --git a/snow/consensus/snowstorm/input_test.go b/snow/consensus/snowstorm/input_test.go new file mode 100644 index 0000000..46a0033 --- /dev/null +++ b/snow/consensus/snowstorm/input_test.go @@ -0,0 +1,38 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "testing" +) + +func TestInputParams(t *testing.T) { ParamsTest(t, InputFactory{}) } + +func TestInputIssued(t *testing.T) { IssuedTest(t, InputFactory{}) } + +func TestInputLeftoverInput(t *testing.T) { LeftoverInputTest(t, InputFactory{}) } + +func TestInputLowerConfidence(t *testing.T) { LowerConfidenceTest(t, InputFactory{}) } + +func TestInputMiddleConfidence(t *testing.T) { MiddleConfidenceTest(t, InputFactory{}) } + +func TestInputIndependent(t *testing.T) { IndependentTest(t, InputFactory{}) } + +func TestInputVirtuous(t *testing.T) { VirtuousTest(t, InputFactory{}) } + +func TestInputIsVirtuous(t *testing.T) { IsVirtuousTest(t, InputFactory{}) } + +func TestInputConflicts(t *testing.T) { ConflictsTest(t, InputFactory{}) } + +func TestInputQuiesce(t *testing.T) { QuiesceTest(t, InputFactory{}) } + +func TestInputAcceptingDependency(t *testing.T) { AcceptingDependencyTest(t, InputFactory{}) } + +func TestInputRejectingDependency(t *testing.T) { RejectingDependencyTest(t, InputFactory{}) } + +func TestInputVacuouslyAccepted(t *testing.T) { VacuouslyAcceptedTest(t, InputFactory{}) } + +func TestInputVirtuousDependsOnRogue(t *testing.T) { VirtuousDependsOnRogueTest(t, InputFactory{}) } + +func TestInputString(t *testing.T) { StringTest(t, InputFactory{}, "IG") } diff --git a/snow/consensus/snowstorm/network_test.go b/snow/consensus/snowstorm/network_test.go new file mode 100644 index 0000000..b0081ae --- /dev/null +++ b/snow/consensus/snowstorm/network_test.go @@ -0,0 +1,174 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/utils/random" +) + +type Network struct { + params snowball.Parameters + consumers []*TestTx + nodeTxs []map[[32]byte]*TestTx + nodes, running []Consensus +} + +func (n *Network) shuffleConsumers() { + s := random.Uniform{N: len(n.consumers)} + consumers := []*TestTx(nil) + for s.CanSample() { + consumers = append(consumers, n.consumers[s.Sample()]) + } + n.consumers = consumers +} + +func (n *Network) Initialize(params snowball.Parameters, numColors, colorsPerConsumer, maxInputConflicts int) { + n.params = params + + idCount := uint64(0) + + colorMap := map[[32]byte]int{} + colors := []ids.ID{} + for i := 0; i < numColors; i++ { + idCount++ + color := ids.Empty.Prefix(idCount) + colorMap[color.Key()] = i + colors = append(colors, color) + } + + count := map[[32]byte]int{} + for len(colors) > 0 { + selected := []ids.ID{} + sampler := random.Uniform{N: len(colors)} + for i := 0; i < colorsPerConsumer && sampler.CanSample(); i++ { + selected = append(selected, colors[sampler.Sample()]) + } + + for _, sID := range selected { + sKey := sID.Key() + newCount := count[sKey] + 1 + count[sKey] = newCount + if newCount >= maxInputConflicts { + i := colorMap[sKey] + e := len(colorMap) - 1 + + eID := colors[e] + eKey := eID.Key() + + colorMap[eKey] = i + colors[i] = eID + + delete(colorMap, sKey) + colors = colors[:e] + } + } + + idCount++ + tx := &TestTx{Identifier: ids.Empty.Prefix(idCount)} + tx.Ins.Add(selected...) + + n.consumers = append(n.consumers, tx) + } +} + +func (n *Network) AddNode(cg Consensus) { + cg.Initialize(snow.DefaultContextTest(), n.params) + + n.shuffleConsumers() + + txs := map[[32]byte]*TestTx{} + for _, tx := range n.consumers { + newTx := &TestTx{ + Identifier: tx.ID(), + Ins: tx.Ins, + Stat: choices.Processing, + } + txs[newTx.ID().Key()] = newTx + + cg.Add(newTx) + } + + n.nodeTxs = append(n.nodeTxs, txs) + n.nodes = append(n.nodes, cg) + n.running = append(n.running, cg) +} + +func (n *Network) Finalized() bool { + return len(n.running) == 0 +} + +func (n *Network) Round() { + if len(n.running) > 0 { + runningInd := random.Rand(0, len(n.running)) + running := n.running[runningInd] + + sampler := random.Uniform{N: len(n.nodes)} + sampledColors := ids.Bag{} + sampledColors.SetThreshold(n.params.Alpha) + for i := 0; i < n.params.K; i++ { + sample := sampler.Sample() + peer := n.nodes[sample] + peerTxs := n.nodeTxs[sample] + + if peer != running { + preferences := peer.Preferences() + for _, color := range preferences.List() { + sampledColors.Add(color) + } + for _, tx := range peerTxs { + if tx.Status() == choices.Accepted { + sampledColors.Add(tx.ID()) + } + } + } else { + i-- // So that we still sample k people + } + } + + running.RecordPoll(sampledColors) + + // If this node has been finalized, remove it from the poller + if running.Finalized() { + newSize := len(n.running) - 1 + n.running[runningInd] = n.running[newSize] + n.running = n.running[:newSize] + } + } +} + +func (n *Network) Disagreement() bool { + for _, color := range n.consumers { + accepted := false + rejected := false + for _, nodeTx := range n.nodeTxs { + tx := nodeTx[color.ID().Key()] + accepted = accepted || tx.Status() == choices.Accepted + rejected = rejected || tx.Status() == choices.Rejected + } + if accepted && rejected { + return true + } + } + return false +} + +func (n *Network) Agreement() bool { + statuses := map[[32]byte]choices.Status{} + for _, color := range n.consumers { + for _, nodeTx := range n.nodeTxs { + key := color.ID().Key() + tx := nodeTx[key] + prevStatus, exists := statuses[key] + if exists && prevStatus != tx.Status() { + return false + } + statuses[key] = tx.Status() + } + } + return !n.Disagreement() +} diff --git a/snow/consensus/snowstorm/test_tx.go b/snow/consensus/snowstorm/test_tx.go new file mode 100644 index 0000000..d5fec1b --- /dev/null +++ b/snow/consensus/snowstorm/test_tx.go @@ -0,0 +1,45 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" +) + +// TestTx is a useful test transaction +type TestTx struct { + Identifier ids.ID + Deps []Tx + Ins ids.Set + Stat choices.Status + Bits []byte +} + +// ID implements the Consumer interface +func (tx *TestTx) ID() ids.ID { return tx.Identifier } + +// Dependencies implements the Consumer interface +func (tx *TestTx) Dependencies() []Tx { return tx.Deps } + +// InputIDs implements the Consumer interface +func (tx *TestTx) InputIDs() ids.Set { return tx.Ins } + +// Status implements the Consumer interface +func (tx *TestTx) Status() choices.Status { return tx.Stat } + +// Accept implements the Consumer interface +func (tx *TestTx) Accept() { tx.Stat = choices.Accepted } + +// Reject implements the Consumer interface +func (tx *TestTx) Reject() { tx.Stat = choices.Rejected } + +// Reset sets the status to pending +func (tx *TestTx) Reset() { tx.Stat = choices.Processing } + +// Verify returns nil +func (tx *TestTx) Verify() error { return nil } + +// Bytes returns the bits +func (tx *TestTx) Bytes() []byte { return tx.Bits } diff --git a/snow/consensus/snowstorm/test_tx_test.go b/snow/consensus/snowstorm/test_tx_test.go new file mode 100644 index 0000000..1297036 --- /dev/null +++ b/snow/consensus/snowstorm/test_tx_test.go @@ -0,0 +1,24 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "testing" +) + +func TestTxVerify(t *testing.T) { + Setup() + + if err := Red.Verify(); err != nil { + t.Fatal(err) + } +} + +func TestTxBytes(t *testing.T) { + Setup() + + if Red.Bytes() != nil { + t.Fatalf("Expected nil bytes") + } +} diff --git a/snow/context.go b/snow/context.go new file mode 100644 index 0000000..ce213c1 --- /dev/null +++ b/snow/context.go @@ -0,0 +1,64 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snow + +import ( + "io" + "net/http" + "sync" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/triggers" + "github.com/ava-labs/gecko/utils/logging" +) + +// Callable ... +type Callable interface { + Call(writer http.ResponseWriter, method, base, endpoint string, body io.Reader, headers map[string]string) error +} + +// Keystore ... +type Keystore interface { + GetDatabase(username, password string) (database.Database, error) +} + +// AliasLookup ... +type AliasLookup interface { + Lookup(alias string) (ids.ID, error) + PrimaryAlias(id ids.ID) (string, error) +} + +// Context is information about the current execution. +// [NetworkID] is the ID of the network this context exists within. +// [ChainID] is the ID of the chain this context exists within. +// [NodeID] is the ID of this node +type Context struct { + NetworkID uint32 + ChainID ids.ID + NodeID ids.ShortID + Log logging.Logger + DecisionDispatcher *triggers.EventDispatcher + ConsensusDispatcher *triggers.EventDispatcher + Lock sync.RWMutex + HTTP Callable + Keystore Keystore + BCLookup AliasLookup +} + +// DefaultContextTest ... +func DefaultContextTest() *Context { + decisionED := triggers.EventDispatcher{} + decisionED.Initialize(logging.NoLog{}) + consensusED := triggers.EventDispatcher{} + consensusED.Initialize(logging.NoLog{}) + return &Context{ + ChainID: ids.Empty, + NodeID: ids.ShortEmpty, + Log: logging.NoLog{}, + DecisionDispatcher: &decisionED, + ConsensusDispatcher: &consensusED, + BCLookup: &ids.Aliaser{}, + } +} diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go new file mode 100644 index 0000000..7d3d7c8 --- /dev/null +++ b/snow/engine/avalanche/bootstrapper.go @@ -0,0 +1,209 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/engine/common/queue" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/prometheus/client_golang/prometheus" +) + +// BootstrapConfig ... +type BootstrapConfig struct { + common.Config + + // VtxBlocked tracks operations that are blocked on vertices + // TxBlocked tracks operations that are blocked on transactions + VtxBlocked, TxBlocked *queue.Jobs + + State State + VM DAGVM +} + +type bootstrapper struct { + BootstrapConfig + metrics + common.Bootstrapper + + pending ids.Set + finished bool + onFinished func() +} + +// Initialize this engine. +func (b *bootstrapper) Initialize(config BootstrapConfig) { + b.BootstrapConfig = config + + b.VtxBlocked.SetParser(&vtxParser{ + numAccepted: b.numBootstrappedVtx, + numDropped: b.numDroppedVtx, + state: b.State, + }) + + b.TxBlocked.SetParser(&txParser{ + numAccepted: b.numBootstrappedTx, + numDropped: b.numDroppedTx, + vm: b.VM, + }) + + config.Bootstrapable = b + b.Bootstrapper.Initialize(config.Config) +} + +// CurrentAcceptedFrontier ... +func (b *bootstrapper) CurrentAcceptedFrontier() ids.Set { + acceptedFrontier := ids.Set{} + acceptedFrontier.Add(b.State.Edge()...) + return acceptedFrontier +} + +// FilterAccepted ... +func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set { + acceptedVtxIDs := ids.Set{} + for _, vtxID := range containerIDs.List() { + if vtx, err := b.State.GetVertex(vtxID); err == nil && vtx.Status() == choices.Accepted { + acceptedVtxIDs.Add(vtxID) + } + } + return acceptedVtxIDs +} + +// ForceAccepted ... +func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) { + for _, vtxID := range acceptedContainerIDs.List() { + b.fetch(vtxID) + } + + if numPending := b.pending.Len(); numPending == 0 { + // TODO: This typically indicates bootstrapping has failed, so this + // should be handled appropriately + b.finish() + } +} + +// Put ... +func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) { + b.BootstrapConfig.Context.Log.Verbo("Put called for vertexID %s", vtxID) + + if !b.pending.Contains(vtxID) { + return + } + + vtx, err := b.State.ParseVertex(vtxBytes) + if err != nil { + b.BootstrapConfig.Context.Log.Warn("ParseVertex failed due to %s for block:\n%s", + err, + formatting.DumpBytes{Bytes: vtxBytes}) + b.GetFailed(vdr, requestID, vtxID) + return + } + + b.addVertex(vtx) +} + +// GetFailed ... +func (b *bootstrapper) GetFailed(_ ids.ShortID, _ uint32, vtxID ids.ID) { b.sendRequest(vtxID) } + +func (b *bootstrapper) fetch(vtxID ids.ID) { + if b.pending.Contains(vtxID) { + return + } + + vtx, err := b.State.GetVertex(vtxID) + if err != nil { + b.sendRequest(vtxID) + return + } + b.addVertex(vtx) +} + +func (b *bootstrapper) sendRequest(vtxID ids.ID) { + validators := b.BootstrapConfig.Validators.Sample(1) + if len(validators) == 0 { + b.BootstrapConfig.Context.Log.Error("Dropping request for %s as there are no validators", vtxID) + return + } + validatorID := validators[0].ID() + b.RequestID++ + + b.pending.Add(vtxID) + b.BootstrapConfig.Sender.Get(validatorID, b.RequestID, vtxID) + + b.numPendingRequests.Set(float64(b.pending.Len())) +} + +func (b *bootstrapper) addVertex(vtx avalanche.Vertex) { + vts := []avalanche.Vertex{vtx} + + for len(vts) > 0 { + newLen := len(vts) - 1 + vtx := vts[newLen] + vts = vts[:newLen] + + vtxID := vtx.ID() + switch status := vtx.Status(); status { + case choices.Unknown: + b.sendRequest(vtxID) + case choices.Processing: + b.pending.Remove(vtxID) + + if err := b.VtxBlocked.Push(&vertexJob{ + numAccepted: b.numBootstrappedVtx, + numDropped: b.numDroppedVtx, + vtx: vtx, + }); err == nil { + b.numBlockedVtx.Inc() + } + for _, tx := range vtx.Txs() { + if err := b.TxBlocked.Push(&txJob{ + numAccepted: b.numBootstrappedVtx, + numDropped: b.numDroppedVtx, + tx: tx, + }); err == nil { + b.numBlockedTx.Inc() + } + } + + for _, parent := range vtx.Parents() { + vts = append(vts, parent) + } + case choices.Accepted: + b.BootstrapConfig.Context.Log.Verbo("Bootstrapping confirmed %s", vtxID) + case choices.Rejected: + b.BootstrapConfig.Context.Log.Error("Bootstrapping wants to accept %s, however it was previously rejected", vtxID) + } + } + + numPending := b.pending.Len() + b.numPendingRequests.Set(float64(numPending)) + if numPending == 0 { + b.finish() + } +} + +func (b *bootstrapper) finish() { + if b.finished { + return + } + + b.executeAll(b.TxBlocked, b.numBlockedTx) + b.executeAll(b.VtxBlocked, b.numBlockedVtx) + + // Start consensus + b.onFinished() + b.finished = true +} + +func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge) { + for job, err := jobs.Pop(); err == nil; job, err = jobs.Pop() { + numBlocked.Dec() + if err := jobs.Execute(job); err != nil { + b.BootstrapConfig.Context.Log.Warn("Error executing: %s", err) + } + } +} diff --git a/snow/engine/avalanche/bootstrapper_test.go b/snow/engine/avalanche/bootstrapper_test.go new file mode 100644 index 0000000..d1be936 --- /dev/null +++ b/snow/engine/avalanche/bootstrapper_test.go @@ -0,0 +1,959 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "bytes" + "errors" + "fmt" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/database/prefixdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/engine/common/queue" + "github.com/ava-labs/gecko/snow/networking/handler" + "github.com/ava-labs/gecko/snow/networking/router" + "github.com/ava-labs/gecko/snow/networking/timeout" + "github.com/ava-labs/gecko/snow/validators" +) + +var ( + errUnknownVertex = errors.New("unknown vertex") + errParsedUnknownVertex = errors.New("parsed unknown vertex") +) + +func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest, *stateTest, *VMTest) { + ctx := snow.DefaultContextTest() + + peers := validators.NewSet() + db := memdb.New() + sender := &common.SenderTest{} + state := &stateTest{} + vm := &VMTest{} + engine := &Transitive{} + handler := &handler.Handler{} + router := &router.ChainRouter{} + timeouts := &timeout.Manager{} + + sender.T = t + state.t = t + vm.T = t + + sender.Default(true) + state.Default(true) + vm.Default(true) + + sender.CantGetAcceptedFrontier = false + + peer := validators.GenerateRandomValidator(1) + peerID := peer.ID() + peers.Add(peer) + + handler.Initialize(engine, make(chan common.Message), 1) + timeouts.Initialize(0) + router.Initialize(ctx.Log, timeouts) + + vtxBlocker, _ := queue.New(prefixdb.New([]byte("vtx"), db)) + txBlocker, _ := queue.New(prefixdb.New([]byte("tx"), db)) + + commonConfig := common.Config{ + Context: ctx, + Validators: peers, + Beacons: peers, + Alpha: peers.Len()/2 + 1, + Sender: sender, + } + return BootstrapConfig{ + Config: commonConfig, + VtxBlocked: vtxBlocker, + TxBlocked: txBlocker, + State: state, + VM: vm, + }, peerID, sender, state, vm +} + +func TestBootstrapperSingleFrontier(t *testing.T) { + config, peerID, sender, state, _ := newConfig(t) + + vtxID0 := ids.Empty.Prefix(0) + vtxID1 := ids.Empty.Prefix(1) + vtxID2 := ids.Empty.Prefix(2) + + vtxBytes0 := []byte{0} + vtxBytes1 := []byte{1} + vtxBytes2 := []byte{2} + + vtx0 := &Vtx{ + id: vtxID0, + height: 0, + status: choices.Processing, + bytes: vtxBytes0, + } + vtx1 := &Vtx{ + id: vtxID1, + height: 0, + status: choices.Processing, + bytes: vtxBytes1, + } + vtx2 := &Vtx{ + id: vtxID2, + height: 0, + status: choices.Processing, + bytes: vtxBytes2, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add( + vtxID0, + vtxID1, + vtxID2, + ) + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0), vtxID.Equals(vtxID1), vtxID.Equals(vtxID2): + return nil, errUnknownVertex + default: + t.Fatal(errUnknownVertex) + panic(errUnknownVertex) + } + } + + vtxIDToReqID := map[[32]byte]uint32{} + sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) + } + switch { + case vtxID.Equals(vtxID0), vtxID.Equals(vtxID1), vtxID.Equals(vtxID2): + default: + t.Fatalf("Requested unknown vertex") + } + + vtxKey := vtxID.Key() + if _, ok := vtxIDToReqID[vtxKey]; ok { + t.Fatalf("Message sent multiple times") + } + vtxIDToReqID[vtxKey] = reqID + } + + bs.ForceAccepted(acceptedIDs) + + state.getVertex = nil + sender.GetF = nil + + if numReqs := len(vtxIDToReqID); numReqs != 3 { + t.Fatalf("Should have requested %d vertices, %d were requested", 3, numReqs) + } + + state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(vtxBytes, vtxBytes0): + return vtx0, nil + case bytes.Equal(vtxBytes, vtxBytes1): + return vtx1, nil + case bytes.Equal(vtxBytes, vtxBytes2): + return vtx2, nil + } + t.Fatal(errParsedUnknownVertex) + return nil, errParsedUnknownVertex + } + + state.edge = func() []ids.ID { + return []ids.ID{ + vtxID0, + vtxID1, + vtxID2, + } + } + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return vtx0, nil + case vtxID.Equals(vtxID1): + return vtx1, nil + case vtxID.Equals(vtxID2): + return vtx2, nil + default: + t.Fatalf("Requested unknown vertex") + panic("Requested unknown vertex") + } + } + + finished := new(bool) + bs.onFinished = func() { *finished = true } + + for vtxKey, reqID := range vtxIDToReqID { + vtxID := ids.NewID(vtxKey) + + switch { + case vtxID.Equals(vtxID0): + bs.Put(peerID, reqID, vtxID, vtxBytes0) + case vtxID.Equals(vtxID1): + bs.Put(peerID, reqID, vtxID, vtxBytes1) + case vtxID.Equals(vtxID2): + bs.Put(peerID, reqID, vtxID, vtxBytes2) + default: + t.Fatalf("Requested unknown vertex") + } + } + + state.parseVertex = nil + state.edge = nil + bs.onFinished = nil + + if !*finished { + t.Fatalf("Bootstrapping should have finished") + } + if vtx0.Status() != choices.Accepted { + t.Fatalf("Vertex should be accepted") + } + if vtx1.Status() != choices.Accepted { + t.Fatalf("Vertex should be accepted") + } + if vtx2.Status() != choices.Accepted { + t.Fatalf("Vertex should be accepted") + } +} + +func TestBootstrapperUnknownByzantineResponse(t *testing.T) { + config, peerID, sender, state, _ := newConfig(t) + + vtxID0 := ids.Empty.Prefix(0) + vtxID1 := ids.Empty.Prefix(1) + + vtxBytes0 := []byte{0} + vtxBytes1 := []byte{1} + + vtx0 := &Vtx{ + id: vtxID0, + height: 0, + status: choices.Processing, + bytes: vtxBytes0, + } + vtx1 := &Vtx{ + id: vtxID1, + height: 0, + status: choices.Processing, + bytes: vtxBytes1, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add( + vtxID0, + ) + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return nil, errUnknownVertex + default: + t.Fatal(errUnknownVertex) + panic(errUnknownVertex) + } + } + + requestID := new(uint32) + sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) + } + switch { + case vtxID.Equals(vtxID0): + default: + t.Fatalf("Requested unknown vertex") + } + + *requestID = reqID + } + + bs.ForceAccepted(acceptedIDs) + + state.getVertex = nil + sender.GetF = nil + + state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(vtxBytes, vtxBytes0): + return vtx0, nil + case bytes.Equal(vtxBytes, vtxBytes1): + return vtx1, nil + } + t.Fatal(errParsedUnknownVertex) + return nil, errParsedUnknownVertex + } + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return vtx0, nil + case vtxID.Equals(vtxID1): + return vtx1, nil + default: + t.Fatal(errUnknownVertex) + panic(errUnknownVertex) + } + } + + finished := new(bool) + bs.onFinished = func() { *finished = true } + + bs.Put(peerID, *requestID, vtxID1, vtxBytes1) + bs.Put(peerID, *requestID, vtxID0, vtxBytes0) + + state.parseVertex = nil + state.edge = nil + bs.onFinished = nil + + if !*finished { + t.Fatalf("Bootstrapping should have finished") + } + if vtx0.Status() != choices.Accepted { + t.Fatalf("Vertex should be accepted") + } + if vtx1.Status() != choices.Processing { + t.Fatalf("Vertex should be processing") + } +} + +func TestBootstrapperVertexDependencies(t *testing.T) { + config, peerID, sender, state, _ := newConfig(t) + + vtxID0 := ids.Empty.Prefix(0) + vtxID1 := ids.Empty.Prefix(1) + + vtxBytes0 := []byte{0} + vtxBytes1 := []byte{1} + + vtx0 := &Vtx{ + id: vtxID0, + height: 0, + status: choices.Unknown, + bytes: vtxBytes0, + } + vtx1 := &Vtx{ + parents: []avalanche.Vertex{vtx0}, + id: vtxID1, + height: 1, + status: choices.Processing, + bytes: vtxBytes1, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add( + vtxID1, + ) + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID1): + return nil, errUnknownVertex + default: + t.Fatal(errUnknownVertex) + panic(errUnknownVertex) + } + } + + reqIDPtr := new(uint32) + sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) + } + switch { + case vtxID.Equals(vtxID1): + default: + t.Fatalf("Requested unknown vertex") + } + + *reqIDPtr = reqID + } + + bs.ForceAccepted(acceptedIDs) + + state.getVertex = nil + sender.GetF = nil + + state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(vtxBytes, vtxBytes1): + return vtx1, nil + } + t.Fatal(errParsedUnknownVertex) + return nil, errParsedUnknownVertex + } + sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) + } + switch { + case vtxID.Equals(vtxID0): + default: + t.Fatalf("Requested wrong vertex") + } + + *reqIDPtr = reqID + } + + bs.Put(peerID, *reqIDPtr, vtxID1, vtxBytes1) + + state.parseVertex = nil + sender.GetF = nil + + if vtx0.Status() != choices.Unknown { + t.Fatalf("Vertex should be unknown") + } + if vtx1.Status() != choices.Processing { + t.Fatalf("Vertex should be processing") + } + + vtx0.status = choices.Processing + + state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(vtxBytes, vtxBytes0): + return vtx0, nil + case bytes.Equal(vtxBytes, vtxBytes1): + return vtx1, nil + } + t.Fatal(errParsedUnknownVertex) + return nil, errParsedUnknownVertex + } + + state.edge = func() []ids.ID { + return []ids.ID{ + vtxID0, + vtxID1, + } + } + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return vtx0, nil + case vtxID.Equals(vtxID1): + return vtx1, nil + default: + t.Fatalf("Requested unknown vertex") + panic("Requested unknown vertex") + } + } + + finished := new(bool) + bs.onFinished = func() { *finished = true } + + bs.Put(peerID, *reqIDPtr, vtxID0, vtxBytes0) + + state.parseVertex = nil + bs.onFinished = nil + + if !*finished { + t.Fatalf("Bootstrapping should have finished") + } + if vtx0.Status() != choices.Accepted { + t.Fatalf("Vertex should be accepted") + } + if vtx1.Status() != choices.Accepted { + t.Fatalf("Vertex should be accepted") + } +} + +func TestBootstrapperTxDependencies(t *testing.T) { + config, peerID, sender, state, vm := newConfig(t) + + utxos := []ids.ID{GenerateID(), GenerateID()} + + txID0 := GenerateID() + txID1 := GenerateID() + + txBytes0 := []byte{0} + txBytes1 := []byte{1} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: txID0, + Stat: choices.Processing, + }, + bytes: txBytes0, + } + tx0.Ins.Add(utxos[0]) + + tx1 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: txID1, + Deps: []snowstorm.Tx{tx0}, + Stat: choices.Processing, + }, + bytes: txBytes1, + } + tx1.Ins.Add(utxos[1]) + + vtxID0 := GenerateID() + vtxID1 := GenerateID() + + vtxBytes0 := []byte{2} + vtxBytes1 := []byte{3} + + vtx0 := &Vtx{ + id: vtxID0, + txs: []snowstorm.Tx{tx1}, + height: 0, + status: choices.Unknown, + bytes: vtxBytes0, + } + vtx1 := &Vtx{ + parents: []avalanche.Vertex{vtx0}, + id: vtxID1, + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + bytes: vtxBytes1, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add( + vtxID1, + ) + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID1): + return nil, errUnknownVertex + default: + t.Fatal(errUnknownVertex) + panic(errUnknownVertex) + } + } + + reqIDPtr := new(uint32) + sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) + } + switch { + case vtxID.Equals(vtxID1): + default: + t.Fatal(errUnknownVertex) + } + + *reqIDPtr = reqID + } + + bs.ForceAccepted(acceptedIDs) + + state.getVertex = nil + sender.GetF = nil + + state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(vtxBytes, vtxBytes1): + return vtx1, nil + } + t.Fatal(errParsedUnknownVertex) + return nil, errParsedUnknownVertex + } + sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) + } + switch { + case vtxID.Equals(vtxID0): + default: + t.Fatalf("Requested wrong vertex") + } + + *reqIDPtr = reqID + } + + bs.Put(peerID, *reqIDPtr, vtxID1, vtxBytes1) + + state.parseVertex = nil + sender.GetF = nil + + if tx0.Status() != choices.Processing { + t.Fatalf("Tx should be processing") + } + if tx1.Status() != choices.Processing { + t.Fatalf("Tx should be processing") + } + + if vtx0.Status() != choices.Unknown { + t.Fatalf("Vertex should be unknown") + } + if vtx1.Status() != choices.Processing { + t.Fatalf("Vertex should be processing") + } + + tx0.Stat = choices.Processing + vtx0.status = choices.Processing + + vm.ParseTxF = func(txBytes []byte) (snowstorm.Tx, error) { + switch { + case bytes.Equal(txBytes, txBytes0): + return tx0, nil + case bytes.Equal(txBytes, txBytes1): + return tx1, nil + } + t.Fatal(errParsedUnknownVertex) + return nil, errParsedUnknownVertex + } + state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(vtxBytes, vtxBytes0): + return vtx0, nil + case bytes.Equal(vtxBytes, vtxBytes1): + return vtx1, nil + } + t.Fatal(errParsedUnknownVertex) + return nil, errParsedUnknownVertex + } + + state.edge = func() []ids.ID { + return []ids.ID{ + vtxID0, + vtxID1, + } + } + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return vtx0, nil + case vtxID.Equals(vtxID1): + return vtx1, nil + default: + t.Fatalf("Requested unknown vertex") + panic("Requested unknown vertex") + } + } + + finished := new(bool) + bs.onFinished = func() { *finished = true } + + bs.Put(peerID, *reqIDPtr, vtxID0, vtxBytes0) + + state.parseVertex = nil + bs.onFinished = nil + + if !*finished { + t.Fatalf("Should have finished bootstrapping") + } + if tx0.Status() != choices.Accepted { + t.Fatalf("Tx should be accepted") + } + if tx1.Status() != choices.Accepted { + t.Fatalf("Tx should be accepted") + } + + if vtx0.Status() != choices.Accepted { + t.Fatalf("Vertex should be accepted") + } + if vtx1.Status() != choices.Accepted { + t.Fatalf("Vertex should be accepted") + } +} + +func TestBootstrapperMissingTxDependency(t *testing.T) { + config, peerID, sender, state, vm := newConfig(t) + + utxos := []ids.ID{GenerateID(), GenerateID()} + + txID0 := GenerateID() + txID1 := GenerateID() + + txBytes0 := []byte{0} + txBytes1 := []byte{1} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: txID0, + Stat: choices.Unknown, + }, + } + + tx1 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: txID1, + Deps: []snowstorm.Tx{tx0}, + Stat: choices.Processing, + }, + bytes: txBytes1, + } + tx1.Ins.Add(utxos[1]) + + vtxID0 := GenerateID() + vtxID1 := GenerateID() + + vtxBytes0 := []byte{2} + vtxBytes1 := []byte{3} + + vtx0 := &Vtx{ + id: vtxID0, + height: 0, + status: choices.Unknown, + bytes: vtxBytes0, + } + vtx1 := &Vtx{ + parents: []avalanche.Vertex{vtx0}, + id: vtxID1, + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + bytes: vtxBytes1, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add( + vtxID1, + ) + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID1): + return nil, errUnknownVertex + default: + t.Fatal(errUnknownVertex) + panic(errUnknownVertex) + } + } + + reqIDPtr := new(uint32) + sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) + } + switch { + case vtxID.Equals(vtxID1): + default: + t.Fatalf("Requested unknown vertex") + } + + *reqIDPtr = reqID + } + + bs.ForceAccepted(acceptedIDs) + + state.getVertex = nil + sender.GetF = nil + + state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(vtxBytes, vtxBytes1): + return vtx1, nil + } + t.Fatal(errParsedUnknownVertex) + return nil, errParsedUnknownVertex + } + sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) + } + switch { + case vtxID.Equals(vtxID0): + default: + t.Fatalf("Requested wrong vertex") + } + + *reqIDPtr = reqID + } + + bs.Put(peerID, *reqIDPtr, vtxID1, vtxBytes1) + + state.parseVertex = nil + sender.GetF = nil + + if tx0.Status() != choices.Unknown { + t.Fatalf("Tx should be unknown") + } + if tx1.Status() != choices.Processing { + t.Fatalf("Tx should be processing") + } + + if vtx0.Status() != choices.Unknown { + t.Fatalf("Vertex should be unknown") + } + if vtx1.Status() != choices.Processing { + t.Fatalf("Vertex should be processing") + } + + vtx0.status = choices.Processing + + vm.ParseTxF = func(txBytes []byte) (snowstorm.Tx, error) { + switch { + case bytes.Equal(txBytes, txBytes0): + return tx0, nil + case bytes.Equal(txBytes, txBytes1): + return tx1, nil + } + t.Fatal(errParsedUnknownVertex) + return nil, errParsedUnknownVertex + } + state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(vtxBytes, vtxBytes0): + return vtx0, nil + case bytes.Equal(vtxBytes, vtxBytes1): + return vtx1, nil + } + t.Fatal(errParsedUnknownVertex) + return nil, errParsedUnknownVertex + } + + state.edge = func() []ids.ID { + return []ids.ID{ + vtxID0, + } + } + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return vtx0, nil + default: + t.Fatalf("Requested unknown vertex") + panic("Requested unknown vertex") + } + } + + finished := new(bool) + bs.onFinished = func() { *finished = true } + + bs.Put(peerID, *reqIDPtr, vtxID0, vtxBytes0) + + state.parseVertex = nil + bs.onFinished = nil + + if !*finished { + t.Fatalf("Bootstrapping should have finished") + } + if tx0.Status() != choices.Unknown { + t.Fatalf("Tx should be unknown") + } + if tx1.Status() != choices.Processing { + t.Fatalf("Tx should be processing") + } + + if vtx0.Status() != choices.Accepted { + t.Fatalf("Vertex should be accepted") + } + if vtx1.Status() != choices.Processing { + t.Fatalf("Vertex should be processing") + } +} + +func TestBootstrapperAcceptedFrontier(t *testing.T) { + config, _, _, state, _ := newConfig(t) + + vtxID0 := GenerateID() + vtxID1 := GenerateID() + vtxID2 := GenerateID() + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + state.edge = func() []ids.ID { + return []ids.ID{ + vtxID0, + vtxID1, + } + } + + accepted := bs.CurrentAcceptedFrontier() + + state.edge = nil + + if !accepted.Contains(vtxID0) { + t.Fatalf("Vtx should be accepted") + } + if !accepted.Contains(vtxID1) { + t.Fatalf("Vtx should be accepted") + } + if accepted.Contains(vtxID2) { + t.Fatalf("Vtx shouldn't be accepted") + } +} + +func TestBootstrapperFilterAccepted(t *testing.T) { + config, _, _, state, _ := newConfig(t) + + vtxID0 := GenerateID() + vtxID1 := GenerateID() + vtxID2 := GenerateID() + + vtx0 := &Vtx{ + id: vtxID0, + status: choices.Accepted, + } + vtx1 := &Vtx{ + id: vtxID1, + status: choices.Accepted, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + vtxIDs := ids.Set{} + vtxIDs.Add( + vtxID0, + vtxID1, + vtxID2, + ) + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return vtx0, nil + case vtxID.Equals(vtxID1): + return vtx1, nil + case vtxID.Equals(vtxID2): + return nil, errUnknownVertex + } + t.Fatal(errUnknownVertex) + return nil, errUnknownVertex + } + + accepted := bs.FilterAccepted(vtxIDs) + + state.getVertex = nil + + if !accepted.Contains(vtxID0) { + t.Fatalf("Vtx should be accepted") + } + if !accepted.Contains(vtxID1) { + t.Fatalf("Vtx should be accepted") + } + if accepted.Contains(vtxID2) { + t.Fatalf("Vtx shouldn't be accepted") + } +} diff --git a/snow/engine/avalanche/config.go b/snow/engine/avalanche/config.go new file mode 100644 index 0000000..6bb0d05 --- /dev/null +++ b/snow/engine/avalanche/config.go @@ -0,0 +1,16 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/ava-labs/gecko/snow/consensus/avalanche" +) + +// Config wraps all the parameters needed for an avalanche engine +type Config struct { + BootstrapConfig + + Params avalanche.Parameters + Consensus avalanche.Consensus +} diff --git a/snow/engine/avalanche/config_test.go b/snow/engine/avalanche/config_test.go new file mode 100644 index 0000000..4906559 --- /dev/null +++ b/snow/engine/avalanche/config_test.go @@ -0,0 +1,40 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/engine/common/queue" +) + +func DefaultConfig() Config { + vtxBlocked, _ := queue.New(memdb.New()) + txBlocked, _ := queue.New(memdb.New()) + return Config{ + BootstrapConfig: BootstrapConfig{ + Config: common.DefaultConfigTest(), + VtxBlocked: vtxBlocked, + TxBlocked: txBlocked, + State: &stateTest{}, + VM: &VMTest{}, + }, + Params: avalanche.Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + }, + Consensus: &avalanche.Topological{}, + } +} diff --git a/snow/engine/avalanche/convincer.go b/snow/engine/avalanche/convincer.go new file mode 100644 index 0000000..9873661 --- /dev/null +++ b/snow/engine/avalanche/convincer.go @@ -0,0 +1,36 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/engine/common" +) + +type convincer struct { + consensus avalanche.Consensus + sender common.Sender + vdr ids.ShortID + requestID uint32 + abandoned bool + deps ids.Set +} + +func (c *convincer) Dependencies() ids.Set { return c.deps } + +func (c *convincer) Fulfill(id ids.ID) { + c.deps.Remove(id) + c.Update() +} + +func (c *convincer) Abandon(ids.ID) { c.abandoned = true } + +func (c *convincer) Update() { + if c.abandoned || c.deps.Len() != 0 { + return + } + + c.sender.Chits(c.vdr, c.requestID, c.consensus.Preferences()) +} diff --git a/snow/engine/avalanche/engine.go b/snow/engine/avalanche/engine.go new file mode 100644 index 0000000..3ccedbc --- /dev/null +++ b/snow/engine/avalanche/engine.go @@ -0,0 +1,22 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/ava-labs/gecko/snow/engine/common" +) + +// Engine describes the events that can occur on a consensus instance +type Engine interface { + common.Engine + + /* + *************************************************************************** + ***************************** Setup/Teardown ****************************** + *************************************************************************** + */ + + // Initialize this engine. + Initialize(Config) +} diff --git a/snow/engine/avalanche/engine_test.go b/snow/engine/avalanche/engine_test.go new file mode 100644 index 0000000..1b1c013 --- /dev/null +++ b/snow/engine/avalanche/engine_test.go @@ -0,0 +1,85 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "sort" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" +) + +var ( + Genesis = GenerateID() + offset = uint64(0) +) + +func GenerateID() ids.ID { + offset++ + return ids.Empty.Prefix(offset) +} + +type Vtx struct { + parents []avalanche.Vertex + id ids.ID + txs []snowstorm.Tx + + height int + status choices.Status + + bytes []byte +} + +func (v *Vtx) ID() ids.ID { return v.id } +func (v *Vtx) DependencyIDs() []ids.ID { return nil } +func (v *Vtx) Parents() []avalanche.Vertex { return v.parents } +func (v *Vtx) Txs() []snowstorm.Tx { return v.txs } +func (v *Vtx) Status() choices.Status { return v.status } +func (v *Vtx) Accept() { v.status = choices.Accepted } +func (v *Vtx) Reject() { v.status = choices.Rejected } +func (v *Vtx) Bytes() []byte { return v.bytes } + +type sortVts []*Vtx + +func (sv sortVts) Less(i, j int) bool { return sv[i].height < sv[j].height } +func (sv sortVts) Len() int { return len(sv) } +func (sv sortVts) Swap(i, j int) { sv[j], sv[i] = sv[i], sv[j] } + +func SortVts(vts []*Vtx) { sort.Sort(sortVts(vts)) } + +type TestTx struct { + snowstorm.TestTx + bytes []byte +} + +func (tx *TestTx) Bytes() []byte { return tx.bytes } + +func Matches(a, b []ids.ID) bool { + if len(a) != len(b) { + return false + } + set := ids.Set{} + set.Add(a...) + for _, id := range b { + if !set.Contains(id) { + return false + } + } + return true +} +func MatchesShort(a, b []ids.ShortID) bool { + if len(a) != len(b) { + return false + } + set := ids.ShortSet{} + set.Add(a...) + for _, id := range b { + if !set.Contains(id) { + return false + } + } + return true +} diff --git a/snow/engine/avalanche/issuer.go b/snow/engine/avalanche/issuer.go new file mode 100644 index 0000000..befe973 --- /dev/null +++ b/snow/engine/avalanche/issuer.go @@ -0,0 +1,92 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/avalanche" +) + +type issuer struct { + t *Transitive + vtx avalanche.Vertex + issued, abandoned bool + vtxDeps, txDeps ids.Set +} + +func (i *issuer) FulfillVtx(id ids.ID) { + i.vtxDeps.Remove(id) + i.Update() +} + +func (i *issuer) FulfillTx(id ids.ID) { + i.txDeps.Remove(id) + i.Update() +} + +func (i *issuer) Abandon() { + if !i.abandoned { + vtxID := i.vtx.ID() + i.t.pending.Remove(vtxID) + i.abandoned = true + + i.t.vtxBlocked.Abandon(vtxID) + } +} + +func (i *issuer) Update() { + if i.abandoned || i.issued || i.vtxDeps.Len() != 0 || i.txDeps.Len() != 0 || i.t.Consensus.VertexIssued(i.vtx) { + return + } + i.issued = true + + vtxID := i.vtx.ID() + i.t.pending.Remove(vtxID) + + for _, tx := range i.vtx.Txs() { + if err := tx.Verify(); err != nil { + i.t.Config.Context.Log.Debug("Transaction failed verification due to %s, dropping vertex", err) + i.t.vtxBlocked.Abandon(vtxID) + return + } + } + + i.t.Config.Context.Log.Verbo("Adding vertex to consensus:\n%s", i.vtx) + + i.t.Consensus.Add(i.vtx) + + p := i.t.Consensus.Parameters() + vdrs := i.t.Config.Validators.Sample(p.K) // Validators to sample + + vdrSet := ids.ShortSet{} // Validators to sample repr. as a set + for _, vdr := range vdrs { + vdrSet.Add(vdr.ID()) + } + + i.t.RequestID++ + if numVdrs := len(vdrs); numVdrs == p.K && i.t.polls.Add(i.t.RequestID, vdrSet.Len()) { + i.t.Config.Sender.PushQuery(vdrSet, i.t.RequestID, vtxID, i.vtx.Bytes()) + } else if numVdrs < p.K { + i.t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", vtxID) + } + + i.t.vtxBlocked.Fulfill(vtxID) + for _, tx := range i.vtx.Txs() { + i.t.txBlocked.Fulfill(tx.ID()) + } +} + +type vtxIssuer struct{ i *issuer } + +func (vi *vtxIssuer) Dependencies() ids.Set { return vi.i.vtxDeps } +func (vi *vtxIssuer) Fulfill(id ids.ID) { vi.i.FulfillVtx(id) } +func (vi *vtxIssuer) Abandon(ids.ID) { vi.i.Abandon() } +func (vi *vtxIssuer) Update() { vi.i.Update() } + +type txIssuer struct{ i *issuer } + +func (ti *txIssuer) Dependencies() ids.Set { return ti.i.txDeps } +func (ti *txIssuer) Fulfill(id ids.ID) { ti.i.FulfillTx(id) } +func (ti *txIssuer) Abandon(ids.ID) { ti.i.Abandon() } +func (ti *txIssuer) Update() { ti.i.Update() } diff --git a/snow/engine/avalanche/metrics.go b/snow/engine/avalanche/metrics.go new file mode 100644 index 0000000..c1a1594 --- /dev/null +++ b/snow/engine/avalanche/metrics.go @@ -0,0 +1,122 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/utils/logging" +) + +type metrics struct { + numPendingRequests, numBlockedVtx, numBlockedTx prometheus.Gauge + numBootstrappedVtx, numDroppedVtx, + numBootstrappedTx, numDroppedTx prometheus.Counter + + numPolls, numVtxRequests, numTxRequests, numPendingVtx prometheus.Gauge +} + +// Initialize implements the Engine interface +func (m *metrics) Initialize(log logging.Logger, namespace string, registerer prometheus.Registerer) { + m.numPendingRequests = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "av_bs_vtx_requests", + Help: "Number of pending bootstrap vertex requests", + }) + m.numBlockedVtx = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "av_bs_blocked_vts", + Help: "Number of blocked bootstrap vertices", + }) + m.numBlockedTx = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "av_bs_blocked_txs", + Help: "Number of blocked bootstrap txs", + }) + m.numBootstrappedVtx = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "av_bs_accepted_vts", + Help: "Number of accepted vertices", + }) + m.numDroppedVtx = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "av_bs_dropped_vts", + Help: "Number of dropped vertices", + }) + m.numBootstrappedTx = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "av_bs_accepted_txs", + Help: "Number of accepted txs", + }) + m.numDroppedTx = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "av_bs_dropped_txs", + Help: "Number of dropped txs", + }) + m.numPolls = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "av_polls", + Help: "Number of pending network polls", + }) + m.numVtxRequests = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "av_vtx_requests", + Help: "Number of pending vertex requests", + }) + m.numTxRequests = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "av_tx_requests", + Help: "Number of pending transactions", + }) + m.numPendingVtx = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "av_blocked_vts", + Help: "Number of blocked vertices", + }) + + if err := registerer.Register(m.numPendingRequests); err != nil { + log.Error("Failed to register av_bs_vtx_requests statistics due to %s", err) + } + if err := registerer.Register(m.numBlockedVtx); err != nil { + log.Error("Failed to register av_bs_blocked_vts statistics due to %s", err) + } + if err := registerer.Register(m.numBlockedTx); err != nil { + log.Error("Failed to register av_bs_blocked_txs statistics due to %s", err) + } + if err := registerer.Register(m.numBootstrappedVtx); err != nil { + log.Error("Failed to register av_bs_accepted_vts statistics due to %s", err) + } + if err := registerer.Register(m.numDroppedVtx); err != nil { + log.Error("Failed to register av_bs_dropped_vts statistics due to %s", err) + } + if err := registerer.Register(m.numBootstrappedTx); err != nil { + log.Error("Failed to register av_bs_accepted_txs statistics due to %s", err) + } + if err := registerer.Register(m.numDroppedTx); err != nil { + log.Error("Failed to register av_bs_dropped_txs statistics due to %s", err) + } + if err := registerer.Register(m.numPolls); err != nil { + log.Error("Failed to register av_polls statistics due to %s", err) + } + if err := registerer.Register(m.numVtxRequests); err != nil { + log.Error("Failed to register av_vtx_requests statistics due to %s", err) + } + if err := registerer.Register(m.numTxRequests); err != nil { + log.Error("Failed to register av_tx_requests statistics due to %s", err) + } + if err := registerer.Register(m.numPendingVtx); err != nil { + log.Error("Failed to register av_blocked_vts statistics due to %s", err) + } +} diff --git a/snow/engine/avalanche/polls.go b/snow/engine/avalanche/polls.go new file mode 100644 index 0000000..282fe6a --- /dev/null +++ b/snow/engine/avalanche/polls.go @@ -0,0 +1,101 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "fmt" + "strings" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" +) + +// TODO: There is a conservative early termination case that doesn't require dag +// traversals we may want to implement. The algorithm would go as follows: +// Keep track of the number of response that reference an ID. If an ID gets >= +// alpha responses, then remove it from all responses and place it into a chit +// list. Remove all empty responses. If the number of responses + the number of +// pending responses is less than alpha, terminate the poll. +// In the synchronous + virtuous case, when everyone returns the same hash, the +// poll now terminates after receiving alpha responses. +// In the rogue case, it is possible that the poll doesn't terminate as quickly +// as possible, because IDs may have the alpha threshold but only when counting +// transitive votes. In this case, we may wait even if it is no longer possible +// for another ID to earn alpha votes. +// Because alpha is typically set close to k, this may not be performance +// critical. However, early termination may be performance critical with crashed +// nodes. + +type polls struct { + log logging.Logger + numPolls prometheus.Gauge + m map[uint32]poll +} + +// Add to the current set of polls +// Returns true if the poll was registered correctly and the network sample +// should be made. +func (p *polls) Add(requestID uint32, numPolled int) bool { + poll, exists := p.m[requestID] + if !exists { + poll.numPending = numPolled + p.m[requestID] = poll + + p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics + } + return !exists +} + +// Vote registers the connections response to a query for [id]. If there was no +// query, or the response has already be registered, nothing is performed. +func (p *polls) Vote(requestID uint32, vdr ids.ShortID, votes []ids.ID) (ids.UniqueBag, bool) { + p.log.Verbo("Vote. requestID: %d. validatorID: %s.", requestID, vdr) + poll, exists := p.m[requestID] + p.log.Verbo("Poll: %+v", poll) + if !exists { + return nil, false + } + + poll.Vote(votes) + if poll.Finished() { + p.log.Verbo("Poll is finished") + delete(p.m, requestID) + p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics + return poll.votes, true + } + p.m[requestID] = poll + return nil, false +} + +func (p *polls) String() string { + sb := strings.Builder{} + + sb.WriteString(fmt.Sprintf("Current polls: (Size = %d)", len(p.m))) + for requestID, poll := range p.m { + sb.WriteString(fmt.Sprintf("\n %d: %s", requestID, poll)) + } + + return sb.String() +} + +// poll represents the current state of a network poll for a vertex +type poll struct { + votes ids.UniqueBag + numPending int +} + +// Vote registers a vote for this poll +func (p *poll) Vote(votes []ids.ID) { + if p.numPending > 0 { + p.numPending-- + p.votes.Add(uint(p.numPending), votes...) + } +} + +// Finished returns true if the poll has completed, with no more required +// responses +func (p poll) Finished() bool { return p.numPending <= 0 } +func (p poll) String() string { return fmt.Sprintf("Waiting on %d chits", p.numPending) } diff --git a/snow/engine/avalanche/state.go b/snow/engine/avalanche/state.go new file mode 100644 index 0000000..554e450 --- /dev/null +++ b/snow/engine/avalanche/state.go @@ -0,0 +1,25 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" +) + +// State defines the persistant storage that is required by the consensus engine +type State interface { + // Create a new vertex from the contents of a vertex + BuildVertex(parentIDs ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) + + // Attempt to convert a stream of bytes into a vertex + ParseVertex(vertex []byte) (avalanche.Vertex, error) + + // GetVertex attempts to load a vertex by hash from storage + GetVertex(vtxID ids.ID) (avalanche.Vertex, error) + + // Edge returns a list of accepted vertex IDs with no accepted children + Edge() (vtxIDs []ids.ID) +} diff --git a/snow/engine/avalanche/state/codec.go b/snow/engine/avalanche/state/codec.go new file mode 100644 index 0000000..9c0c794 --- /dev/null +++ b/snow/engine/avalanche/state/codec.go @@ -0,0 +1,38 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +// ID is an identifier for a codec +type ID uint32 + +// Codec types +const ( + NoID ID = iota + GenericID + CustomID +) + +// Verify that the codec is a known codec value. Returns nil if the codec is +// valid. +func (c ID) Verify() error { + switch c { + case NoID, GenericID, CustomID: + return nil + default: + return errBadCodec + } +} + +func (c ID) String() string { + switch c { + case NoID: + return "No Codec" + case GenericID: + return "Generic Codec" + case CustomID: + return "Custom Codec" + default: + return "Unknown Codec" + } +} diff --git a/snow/engine/avalanche/state/prefixed_state.go b/snow/engine/avalanche/state/prefixed_state.go new file mode 100644 index 0000000..a35a368 --- /dev/null +++ b/snow/engine/avalanche/state/prefixed_state.go @@ -0,0 +1,92 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" +) + +const ( + vtxID uint64 = iota + vtxStatusID + edgeID +) + +var ( + uniqueEdgeID = ids.Empty.Prefix(edgeID) +) + +type prefixedState struct { + state *state + + vtx, status cache.Cacher + uniqueVtx cache.Deduplicator +} + +func newPrefixedState(state *state, idCacheSizes int) *prefixedState { + return &prefixedState{ + state: state, + vtx: &cache.LRU{Size: idCacheSizes}, + status: &cache.LRU{Size: idCacheSizes}, + uniqueVtx: &cache.EvictableLRU{Size: idCacheSizes}, + } +} + +func (s *prefixedState) UniqueVertex(vtx *uniqueVertex) *uniqueVertex { + return s.uniqueVtx.Deduplicate(vtx).(*uniqueVertex) +} + +func (s *prefixedState) Vertex(id ids.ID) *vertex { + vID := ids.ID{} + if cachedVtxIDIntf, found := s.vtx.Get(id); found { + vID = cachedVtxIDIntf.(ids.ID) + } else { + vID = id.Prefix(vtxID) + s.vtx.Put(id, vID) + } + + return s.state.Vertex(vID) +} + +func (s *prefixedState) SetVertex(vtx *vertex) { + vID := ids.ID{} + if cachedVtxIDIntf, found := s.vtx.Get(vtx.id); found { + vID = cachedVtxIDIntf.(ids.ID) + } else { + vID = vtx.id.Prefix(vtxID) + s.vtx.Put(vtx.id, vID) + } + + s.state.SetVertex(vID, vtx) +} + +func (s *prefixedState) Status(id ids.ID) choices.Status { + sID := ids.ID{} + if cachedStatusIDIntf, found := s.status.Get(id); found { + sID = cachedStatusIDIntf.(ids.ID) + } else { + sID = id.Prefix(vtxStatusID) + s.status.Put(id, sID) + } + + return s.state.Status(sID) +} + +func (s *prefixedState) SetStatus(id ids.ID, status choices.Status) { + sID := ids.ID{} + if cachedStatusIDIntf, found := s.status.Get(id); found { + sID = cachedStatusIDIntf.(ids.ID) + } else { + sID = id.Prefix(vtxStatusID) + s.status.Put(id, sID) + } + + s.state.SetStatus(sID, status) +} + +func (s *prefixedState) Edge() []ids.ID { return s.state.Edge(uniqueEdgeID) } + +func (s *prefixedState) SetEdge(frontier []ids.ID) { s.state.SetEdge(uniqueEdgeID, frontier) } diff --git a/snow/engine/avalanche/state/serializer.go b/snow/engine/avalanche/state/serializer.go new file mode 100644 index 0000000..f076548 --- /dev/null +++ b/snow/engine/avalanche/state/serializer.go @@ -0,0 +1,151 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Package state manages the meta-data required by consensus for an avalanche +// dag. +package state + +import ( + "errors" + + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/math" + + avacon "github.com/ava-labs/gecko/snow/consensus/avalanche" + avaeng "github.com/ava-labs/gecko/snow/engine/avalanche" +) + +const ( + dbCacheSize = 10000 + idCacheSize = 1000 +) + +var ( + errUnknownVertex = errors.New("unknown vertex") + errWrongChainID = errors.New("wrong ChainID in vertex") +) + +// Serializer manages the state of multiple vertices +type Serializer struct { + ctx *snow.Context + vm avaeng.DAGVM + state *prefixedState + db *versiondb.Database + edge ids.Set +} + +// Initialize implements the avalanche.State interface +func (s *Serializer) Initialize(ctx *snow.Context, vm avaeng.DAGVM, db database.Database) { + s.ctx = ctx + s.vm = vm + + vdb := versiondb.New(db) + dbCache := &cache.LRU{Size: dbCacheSize} + rawState := &state{ + serializer: s, + dbCache: dbCache, + db: vdb, + } + s.state = newPrefixedState(rawState, idCacheSize) + s.db = vdb + + s.edge.Add(s.state.Edge()...) +} + +// ParseVertex implements the avalanche.State interface +func (s *Serializer) ParseVertex(b []byte) (avacon.Vertex, error) { + vtx, err := s.parseVertex(b) + if err != nil { + return nil, err + } + if err := vtx.Verify(); err != nil { + return nil, err + } + uVtx := &uniqueVertex{ + serializer: s, + vtxID: vtx.ID(), + } + if uVtx.Status() == choices.Unknown { + uVtx.setVertex(vtx) + } + + s.db.Commit() + return uVtx, nil +} + +// BuildVertex implements the avalanche.State interface +func (s *Serializer) BuildVertex(parentSet ids.Set, txs []snowstorm.Tx) (avacon.Vertex, error) { + parentIDs := parentSet.List() + ids.SortIDs(parentIDs) + sortTxs(txs) + + height := uint64(0) + for _, parentID := range parentIDs { + parent, err := s.getVertex(parentID) + if err != nil { + return nil, err + } + height = math.Max64(height, parent.v.vtx.height) + } + + vtx := &vertex{ + chainID: s.ctx.ChainID, + height: height + 1, + parentIDs: parentIDs, + txs: txs, + } + + bytes, err := vtx.Marshal() + if err != nil { + return nil, err + } + vtx.bytes = bytes + vtx.id = ids.NewID(hashing.ComputeHash256Array(vtx.bytes)) + + uVtx := &uniqueVertex{ + serializer: s, + vtxID: vtx.ID(), + } + // It is possible this vertex already exists in the database, even though we + // just made it. + if uVtx.Status() == choices.Unknown { + uVtx.setVertex(vtx) + } + + s.db.Commit() + return uVtx, nil +} + +// GetVertex implements the avalanche.State interface +func (s *Serializer) GetVertex(vtxID ids.ID) (avacon.Vertex, error) { return s.getVertex(vtxID) } + +// Edge implements the avalanche.State interface +func (s *Serializer) Edge() []ids.ID { return s.edge.List() } + +func (s *Serializer) parseVertex(b []byte) (*vertex, error) { + vtx := &vertex{} + if err := vtx.Unmarshal(b, s.vm); err != nil { + return nil, err + } else if !vtx.chainID.Equals(s.ctx.ChainID) { + return nil, errWrongChainID + } + return vtx, nil +} + +func (s *Serializer) getVertex(vtxID ids.ID) (*uniqueVertex, error) { + vtx := &uniqueVertex{ + serializer: s, + vtxID: vtxID, + } + if vtx.Status() == choices.Unknown { + return nil, errUnknownVertex + } + return vtx, nil +} diff --git a/snow/engine/avalanche/state/state.go b/snow/engine/avalanche/state/state.go new file mode 100644 index 0000000..2aa3f7c --- /dev/null +++ b/snow/engine/avalanche/state/state.go @@ -0,0 +1,144 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/wrappers" +) + +type state struct { + serializer *Serializer + + dbCache cache.Cacher + db database.Database +} + +func (s *state) Vertex(id ids.ID) *vertex { + if vtxIntf, found := s.dbCache.Get(id); found { + vtx, _ := vtxIntf.(*vertex) + return vtx + } + + if b, err := s.db.Get(id.Bytes()); err == nil { + // The key was in the database + if vtx, err := s.serializer.parseVertex(b); err == nil { + s.dbCache.Put(id, vtx) // Cache the element + return vtx + } + s.serializer.ctx.Log.Error("Parsing failed on saved vertex.\nPrefixed key = %s\nBytes = %s", + id, + formatting.DumpBytes{Bytes: b}) + } + + s.dbCache.Put(id, nil) // Cache the miss + return nil +} + +func (s *state) SetVertex(id ids.ID, vtx *vertex) { + s.dbCache.Put(id, vtx) + + if vtx == nil { + s.db.Delete(id.Bytes()) + return + } + + s.db.Put(id.Bytes(), vtx.bytes) +} + +func (s *state) Status(id ids.ID) choices.Status { + if statusIntf, found := s.dbCache.Get(id); found { + status, _ := statusIntf.(choices.Status) + return status + } + + if b, err := s.db.Get(id.Bytes()); err == nil { + // The key was in the database + p := wrappers.Packer{Bytes: b} + status := choices.Status(p.UnpackInt()) + if p.Offset == len(b) && !p.Errored() { + s.dbCache.Put(id, status) + return status + } + s.serializer.ctx.Log.Error("Parsing failed on saved status.\nPrefixed key = %s\nBytes = \n%s", + id, + formatting.DumpBytes{Bytes: b}) + } + + s.dbCache.Put(id, choices.Unknown) + return choices.Unknown +} + +func (s *state) SetStatus(id ids.ID, status choices.Status) { + s.dbCache.Put(id, status) + + if status == choices.Unknown { + s.db.Delete(id.Bytes()) + return + } + + p := wrappers.Packer{Bytes: make([]byte, 4)} + + p.PackInt(uint32(status)) + + s.serializer.ctx.Log.AssertNoError(p.Err) + s.serializer.ctx.Log.AssertTrue(p.Offset == len(p.Bytes), "Wrong offset after packing") + + s.db.Put(id.Bytes(), p.Bytes) +} + +func (s *state) Edge(id ids.ID) []ids.ID { + if frontierIntf, found := s.dbCache.Get(id); found { + frontier, _ := frontierIntf.([]ids.ID) + return frontier + } + + if b, err := s.db.Get(id.Bytes()); err == nil { + p := wrappers.Packer{Bytes: b} + + frontier := []ids.ID{} + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + id, _ := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + frontier = append(frontier, id) + } + + if p.Offset == len(b) && !p.Errored() { + s.dbCache.Put(id, frontier) + return frontier + } + s.serializer.ctx.Log.Error("Parsing failed on saved ids.\nPrefixed key = %s\nBytes = %s", + id, + formatting.DumpBytes{Bytes: b}) + } + + s.dbCache.Put(id, nil) // Cache the miss + return nil +} + +func (s *state) SetEdge(id ids.ID, frontier []ids.ID) { + s.dbCache.Put(id, frontier) + + if len(frontier) == 0 { + s.db.Delete(id.Bytes()) + return + } + + size := wrappers.IntLen + hashing.HashLen*len(frontier) + p := wrappers.Packer{Bytes: make([]byte, size)} + + p.PackInt(uint32(len(frontier))) + for _, id := range frontier { + p.PackFixedBytes(id.Bytes()) + } + + s.serializer.ctx.Log.AssertNoError(p.Err) + s.serializer.ctx.Log.AssertTrue(p.Offset == len(p.Bytes), "Wrong offset after packing") + + s.db.Put(id.Bytes(), p.Bytes) +} diff --git a/snow/engine/avalanche/state/unique_vertex.go b/snow/engine/avalanche/state/unique_vertex.go new file mode 100644 index 0000000..fe88c63 --- /dev/null +++ b/snow/engine/avalanche/state/unique_vertex.go @@ -0,0 +1,178 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "fmt" + "strings" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/utils/formatting" +) + +// uniqueVertex acts as a cache for vertices in the database. +// +// If a vertex is loaded, it will have one canonical uniqueVertex. The vertex +// will eventually be evicted from memory, when the uniqueVertex is evicted from +// the cache. If the uniqueVertex has a function called again afther this +// eviction, the vertex will be re-loaded from the database. +type uniqueVertex struct { + serializer *Serializer + + vtxID ids.ID + v *vertexState +} + +func (vtx *uniqueVertex) refresh() { + if vtx.v == nil { + vtx.v = &vertexState{} + } + if !vtx.v.unique { + unique := vtx.serializer.state.UniqueVertex(vtx) + prevVtx := vtx.v.vtx + if unique == vtx { + vtx.v.status = vtx.serializer.state.Status(vtx.ID()) + vtx.v.unique = true + } else { + // If someone is in the cache, they must be up to date + *vtx = *unique + } + + switch { + case vtx.v.vtx == nil && prevVtx == nil: + vtx.v.vtx = vtx.serializer.state.Vertex(vtx.ID()) + case vtx.v.vtx == nil: + vtx.v.vtx = prevVtx + } + } +} + +func (vtx *uniqueVertex) Evict() { + if vtx.v != nil { + vtx.v.unique = false + } +} + +func (vtx *uniqueVertex) setVertex(innerVtx *vertex) { + vtx.refresh() + if vtx.v.vtx == nil { + vtx.v.vtx = innerVtx + vtx.serializer.state.SetVertex(innerVtx) + vtx.setStatus(choices.Processing) + } +} + +func (vtx *uniqueVertex) setStatus(status choices.Status) { + vtx.refresh() + if vtx.v.status != status { + vtx.serializer.state.SetStatus(vtx.ID(), status) + vtx.v.status = status + } +} + +func (vtx *uniqueVertex) ID() ids.ID { return vtx.vtxID } + +func (vtx *uniqueVertex) Accept() { + vtx.setStatus(choices.Accepted) + + vtx.serializer.edge.Add(vtx.vtxID) + for _, parent := range vtx.Parents() { + vtx.serializer.edge.Remove(parent.ID()) + } + + vtx.serializer.state.SetEdge(vtx.serializer.edge.List()) + + // Should never traverse into parents of a decided vertex. Allows for the + // parents to be garbage collected + vtx.v.parents = nil + + vtx.serializer.db.Commit() +} + +func (vtx *uniqueVertex) Reject() { + vtx.setStatus(choices.Rejected) + + // Should never traverse into parents of a decided vertex. Allows for the + // parents to be garbage collected + vtx.v.parents = nil + + vtx.serializer.db.Commit() +} + +func (vtx *uniqueVertex) Status() choices.Status { vtx.refresh(); return vtx.v.status } + +func (vtx *uniqueVertex) Parents() []avalanche.Vertex { + vtx.refresh() + + if len(vtx.v.parents) != len(vtx.v.vtx.parentIDs) { + vtx.v.parents = make([]avalanche.Vertex, len(vtx.v.vtx.parentIDs)) + for i, parentID := range vtx.v.vtx.parentIDs { + vtx.v.parents[i] = &uniqueVertex{ + serializer: vtx.serializer, + vtxID: parentID, + } + } + } + + return vtx.v.parents +} + +func (vtx *uniqueVertex) Txs() []snowstorm.Tx { + vtx.refresh() + + if len(vtx.v.vtx.txs) != len(vtx.v.txs) { + vtx.v.txs = make([]snowstorm.Tx, len(vtx.v.vtx.txs)) + for i, tx := range vtx.v.vtx.txs { + vtx.v.txs[i] = tx + } + } + + return vtx.v.txs +} + +func (vtx *uniqueVertex) Bytes() []byte { return vtx.v.vtx.Bytes() } + +func (vtx *uniqueVertex) Verify() error { return vtx.v.vtx.Verify() } + +func (vtx *uniqueVertex) String() string { + sb := strings.Builder{} + + parents := vtx.Parents() + txs := vtx.Txs() + + sb.WriteString(fmt.Sprintf( + "Vertex(ID = %s, Status = %s, Number of Dependencies = %d, Number of Transactions = %d)", + vtx.ID(), + vtx.Status(), + len(parents), + len(txs), + )) + + parentFormat := fmt.Sprintf("\n Parent[%s]: ID = %%s, Status = %%s", + formatting.IntFormat(len(parents)-1)) + for i, parent := range parents { + sb.WriteString(fmt.Sprintf(parentFormat, i, parent.ID(), parent.Status())) + } + + txFormat := fmt.Sprintf("\n Transaction[%s]: ID = %%s, Status = %%s", + formatting.IntFormat(len(txs)-1)) + for i, tx := range txs { + sb.WriteString(fmt.Sprintf(txFormat, i, tx.ID(), tx.Status())) + } + + return sb.String() +} + +type vertexState struct { + unique bool + + vtx *vertex + status choices.Status + + parents []avalanche.Vertex + txs []snowstorm.Tx +} diff --git a/snow/engine/avalanche/state/vertex.go b/snow/engine/avalanche/state/vertex.go new file mode 100644 index 0000000..928c11c --- /dev/null +++ b/snow/engine/avalanche/state/vertex.go @@ -0,0 +1,144 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "bytes" + "errors" + "sort" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/snow/engine/avalanche" + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/wrappers" +) + +// maxSize is the maximum allowed vertex size. It is necessary to deter DoS. +const maxSize = 1 << 20 + +var ( + errBadCodec = errors.New("invalid codec") + errExtraSpace = errors.New("trailing buffer space") + errInvalidParents = errors.New("vertex contains non-sorted or duplicated parentIDs") + errInvalidTxs = errors.New("vertex contains non-sorted or duplicated transactions") +) + +type vertex struct { + id ids.ID + + chainID ids.ID + height uint64 + + parentIDs []ids.ID + txs []snowstorm.Tx + + bytes []byte +} + +func (vtx *vertex) ID() ids.ID { return vtx.id } +func (vtx *vertex) Bytes() []byte { return vtx.bytes } + +func (vtx *vertex) Verify() error { + switch { + case !ids.IsSortedAndUniqueIDs(vtx.parentIDs): + return errInvalidParents + case !isSortedAndUniqueTxs(vtx.txs): + return errInvalidTxs + default: + return nil + } +} + +/* + * Vertex: + * Codec | 04 Bytes + * Chain | 32 Bytes + * Height | 08 Bytes + * NumParents | 04 Bytes + * Repeated (NumParents): + * ParentID | 32 bytes + * NumTxs | 04 Bytes + * Repeated (NumTxs): + * TxSize | 04 bytes + * Tx | ?? bytes + */ + +// Marshal creates the byte representation of the vertex +func (vtx *vertex) Marshal() ([]byte, error) { + p := wrappers.Packer{MaxSize: maxSize} + + p.PackInt(uint32(CustomID)) + p.PackFixedBytes(vtx.chainID.Bytes()) + p.PackLong(vtx.height) + + p.PackInt(uint32(len(vtx.parentIDs))) + for _, parentID := range vtx.parentIDs { + p.PackFixedBytes(parentID.Bytes()) + } + + p.PackInt(uint32(len(vtx.txs))) + for _, tx := range vtx.txs { + p.PackBytes(tx.Bytes()) + } + return p.Bytes, p.Err +} + +// Unmarshal attempts to set the contents of this vertex to the value encoded in +// the stream of bytes. +func (vtx *vertex) Unmarshal(b []byte, vm avalanche.DAGVM) error { + p := wrappers.Packer{Bytes: b} + + if codecID := ID(p.UnpackInt()); codecID != CustomID { + p.Add(errBadCodec) + } + + chainID, _ := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + height := p.UnpackLong() + + parentIDs := []ids.ID(nil) + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + parentID, _ := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + parentIDs = append(parentIDs, parentID) + } + + txs := []snowstorm.Tx(nil) + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + tx, err := vm.ParseTx(p.UnpackBytes()) + p.Add(err) + txs = append(txs, tx) + } + + if p.Offset != len(b) { + p.Add(errExtraSpace) + } + + if p.Errored() { + return p.Err + } + + *vtx = vertex{ + id: ids.NewID(hashing.ComputeHash256Array(b)), + parentIDs: parentIDs, + chainID: chainID, + height: height, + txs: txs, + bytes: b, + } + return nil +} + +type sortTxsData []snowstorm.Tx + +func (txs sortTxsData) Less(i, j int) bool { + return bytes.Compare(txs[i].ID().Bytes(), txs[j].ID().Bytes()) == -1 +} +func (txs sortTxsData) Len() int { return len(txs) } +func (txs sortTxsData) Swap(i, j int) { txs[j], txs[i] = txs[i], txs[j] } + +func sortTxs(txs []snowstorm.Tx) { sort.Sort(sortTxsData(txs)) } +func isSortedAndUniqueTxs(txs []snowstorm.Tx) bool { + return utils.IsSortedAndUnique(sortTxsData(txs)) +} diff --git a/snow/engine/avalanche/state_test.go b/snow/engine/avalanche/state_test.go new file mode 100644 index 0000000..e9e8ff9 --- /dev/null +++ b/snow/engine/avalanche/state_test.go @@ -0,0 +1,87 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "errors" + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" +) + +var ( + errParseVertex = errors.New("unexpectedly called ParseVertex") + errBuildVertex = errors.New("unexpectedly called BuildVertex") + errGetVertex = errors.New("unexpectedly called GetVertex") +) + +type stateTest struct { + t *testing.T + + cantParseVertex, cantBuildVertex, cantGetVertex, cantEdge, cantSaveEdge bool + + parseVertex func([]byte) (avalanche.Vertex, error) + buildVertex func(ids.Set, []snowstorm.Tx) (avalanche.Vertex, error) + getVertex func(ids.ID) (avalanche.Vertex, error) + + edge func() []ids.ID + saveEdge func([]ids.ID) +} + +func (s *stateTest) Default(cant bool) { + s.cantParseVertex = cant + s.cantBuildVertex = cant + s.cantGetVertex = cant + s.cantEdge = cant + s.cantSaveEdge = cant +} + +func (s *stateTest) ParseVertex(b []byte) (avalanche.Vertex, error) { + if s.parseVertex != nil { + return s.parseVertex(b) + } else if s.cantParseVertex && s.t != nil { + s.t.Fatal(errParseVertex) + } + return nil, errParseVertex +} + +func (s *stateTest) BuildVertex(set ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) { + if s.buildVertex != nil { + return s.buildVertex(set, txs) + } + if s.cantBuildVertex && s.t != nil { + s.t.Fatal(errBuildVertex) + } + return nil, errBuildVertex +} + +func (s *stateTest) GetVertex(id ids.ID) (avalanche.Vertex, error) { + if s.getVertex != nil { + return s.getVertex(id) + } + if s.cantGetVertex && s.t != nil { + s.t.Fatal(errGetVertex) + } + return nil, errGetVertex +} + +func (s *stateTest) Edge() []ids.ID { + if s.edge != nil { + return s.edge() + } + if s.cantEdge && s.t != nil { + s.t.Fatalf("Unexpectedly called Edge") + } + return nil +} + +func (s *stateTest) SaveEdge(idList []ids.ID) { + if s.saveEdge != nil { + s.saveEdge(idList) + } else if s.cantSaveEdge && s.t != nil { + s.t.Fatalf("Unexpectedly called SaveEdge") + } +} diff --git a/snow/engine/avalanche/test_vm.go b/snow/engine/avalanche/test_vm.go new file mode 100644 index 0000000..536b02c --- /dev/null +++ b/snow/engine/avalanche/test_vm.go @@ -0,0 +1,85 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/snow/engine/common" +) + +var ( + errParseTx = errors.New("unexpectedly called ParseTx") + errIssueTx = errors.New("unexpectedly called IssueTx") + errGetTx = errors.New("unexpectedly called GetTx") +) + +// VMTest ... +type VMTest struct { + common.VMTest + + CantPendingTxs, CantParseTx, CantIssueTx, CantGetTx bool + + PendingTxsF func() []snowstorm.Tx + ParseTxF func([]byte) (snowstorm.Tx, error) + IssueTxF func([]byte, func(choices.Status), func(choices.Status)) (ids.ID, error) + GetTxF func(ids.ID) (snowstorm.Tx, error) +} + +// Default ... +func (vm *VMTest) Default(cant bool) { + vm.VMTest.Default(cant) + + vm.CantPendingTxs = cant + vm.CantParseTx = cant + vm.CantIssueTx = cant + vm.CantGetTx = cant +} + +// PendingTxs ... +func (vm *VMTest) PendingTxs() []snowstorm.Tx { + if vm.PendingTxsF != nil { + return vm.PendingTxsF() + } + if vm.CantPendingTxs && vm.T != nil { + vm.T.Fatalf("Unexpectedly called PendingTxs") + } + return nil +} + +// ParseTx ... +func (vm *VMTest) ParseTx(b []byte) (snowstorm.Tx, error) { + if vm.ParseTxF != nil { + return vm.ParseTxF(b) + } + if vm.CantParseTx && vm.T != nil { + vm.T.Fatal(errParseTx) + } + return nil, errParseTx +} + +// IssueTx ... +func (vm *VMTest) IssueTx(b []byte, issued, finalized func(choices.Status)) (ids.ID, error) { + if vm.IssueTxF != nil { + return vm.IssueTxF(b, issued, finalized) + } + if vm.CantIssueTx && vm.T != nil { + vm.T.Fatal(errIssueTx) + } + return ids.ID{}, errIssueTx +} + +// GetTx ... +func (vm *VMTest) GetTx(txID ids.ID) (snowstorm.Tx, error) { + if vm.GetTxF != nil { + return vm.GetTxF(txID) + } + if vm.CantGetTx && vm.T != nil { + vm.T.Fatal(errGetTx) + } + return nil, errGetTx +} diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go new file mode 100644 index 0000000..4d6617f --- /dev/null +++ b/snow/engine/avalanche/transitive.go @@ -0,0 +1,353 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/events" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/random" +) + +// Transitive implements the Engine interface by attempting to fetch all +// transitive dependencies. +type Transitive struct { + Config + bootstrapper + + polls polls // track people I have asked for their preference + + // vtxReqs prevents asking validators for the same vertex + // missingTxs tracks transaction that are missing + vtxReqs, missingTxs, pending ids.Set + + // vtxBlocked tracks operations that are blocked on vertices + // txBlocked tracks operations that are blocked on transactions + vtxBlocked, txBlocked events.Blocker + + bootstrapped bool +} + +// Initialize implements the Engine interface +func (t *Transitive) Initialize(config Config) { + config.Context.Log.Info("Initializing Avalanche consensus") + + t.Config = config + t.metrics.Initialize(config.Context.Log, config.Params.Namespace, config.Params.Metrics) + + t.onFinished = t.finishBootstrapping + t.bootstrapper.Initialize(config.BootstrapConfig) + + t.polls.log = config.Context.Log + t.polls.numPolls = t.numPolls + t.polls.m = make(map[uint32]poll) +} + +func (t *Transitive) finishBootstrapping() { + // Load the vertices that were last saved as the accepted frontier + frontier := []avalanche.Vertex(nil) + for _, vtxID := range t.Config.State.Edge() { + if vtx, err := t.Config.State.GetVertex(vtxID); err == nil { + frontier = append(frontier, vtx) + } else { + t.Config.Context.Log.Error("Vertex %s failed to be loaded from the frontier with %s", vtxID, err) + } + } + t.Consensus.Initialize(t.Config.Context, t.Params, frontier) + t.bootstrapped = true +} + +// Shutdown implements the Engine interface +func (t *Transitive) Shutdown() { + t.Config.Context.Log.Info("Shutting down Avalanche consensus") + t.Config.VM.Shutdown() +} + +// Context implements the Engine interface +func (t *Transitive) Context() *snow.Context { return t.Config.Context } + +// Get implements the Engine interface +func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, vtxID ids.ID) { + // If this engine has access to the requested vertex, provide it + if vtx, err := t.Config.State.GetVertex(vtxID); err == nil { + t.Config.Sender.Put(vdr, requestID, vtxID, vtx.Bytes()) + } +} + +// Put implements the Engine interface +func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) { + t.Config.Context.Log.Verbo("Put called for vertexID %s", vtxID) + + if !t.bootstrapped { + t.bootstrapper.Put(vdr, requestID, vtxID, vtxBytes) + return + } + + vtx, err := t.Config.State.ParseVertex(vtxBytes) + if err != nil { + t.Config.Context.Log.Warn("ParseVertex failed due to %s for block:\n%s", + err, + formatting.DumpBytes{Bytes: vtxBytes}) + t.GetFailed(vdr, requestID, vtxID) + return + } + t.insertFrom(vdr, vtx) +} + +// GetFailed implements the Engine interface +func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32, vtxID ids.ID) { + if !t.bootstrapped { + t.bootstrapper.GetFailed(vdr, requestID, vtxID) + return + } + + t.pending.Remove(vtxID) + t.vtxBlocked.Abandon(vtxID) + t.vtxReqs.Remove(vtxID) + + if t.vtxReqs.Len() == 0 { + for _, txID := range t.missingTxs.List() { + t.txBlocked.Abandon(txID) + } + t.missingTxs.Clear() + } + + // Track performance statistics + t.numVtxRequests.Set(float64(t.vtxReqs.Len())) + t.numTxRequests.Set(float64(t.missingTxs.Len())) + t.numBlockedVtx.Set(float64(t.pending.Len())) +} + +// PullQuery implements the Engine interface +func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID) { + if !t.bootstrapped { + t.Config.Context.Log.Debug("Dropping PullQuery for %s due to bootstrapping", vtxID) + return + } + + c := &convincer{ + consensus: t.Consensus, + sender: t.Config.Sender, + vdr: vdr, + requestID: requestID, + } + + if !t.reinsertFrom(vdr, vtxID) { + c.deps.Add(vtxID) + } + + t.vtxBlocked.Register(c) +} + +// PushQuery implements the Engine interface +func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtx []byte) { + if !t.bootstrapped { + t.Config.Context.Log.Debug("Dropping PushQuery for %s due to bootstrapping", vtxID) + return + } + + t.Put(vdr, requestID, vtxID, vtx) + t.PullQuery(vdr, requestID, vtxID) +} + +// Chits implements the Engine interface +func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) { + if !t.bootstrapped { + t.Config.Context.Log.Warn("Dropping Chits due to bootstrapping") + return + } + + v := &voter{ + t: t, + vdr: vdr, + requestID: requestID, + response: votes, + } + voteList := votes.List() + for _, vote := range voteList { + if !t.reinsertFrom(vdr, vote) { + v.deps.Add(vote) + } + } + + t.vtxBlocked.Register(v) +} + +// QueryFailed implements the Engine interface +func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) { + t.Chits(vdr, requestID, ids.Set{}) +} + +// Notify implements the Engine interface +func (t *Transitive) Notify(msg common.Message) { + if !t.bootstrapped { + t.Config.Context.Log.Warn("Dropping Notify due to bootstrapping") + return + } + + switch msg { + case common.PendingTxs: + txs := t.Config.VM.PendingTxs() + t.batch(txs, false /*=force*/, false /*=empty*/) + } +} + +func (t *Transitive) repoll() { + txs := t.Config.VM.PendingTxs() + t.batch(txs, false /*=force*/, true /*=empty*/) +} + +func (t *Transitive) reinsertFrom(vdr ids.ShortID, vtxID ids.ID) bool { + vtx, err := t.Config.State.GetVertex(vtxID) + if err != nil { + t.sendRequest(vdr, vtxID) + return false + } + return t.insertFrom(vdr, vtx) +} + +func (t *Transitive) insertFrom(vdr ids.ShortID, vtx avalanche.Vertex) bool { + issued := true + vts := []avalanche.Vertex{vtx} + for len(vts) > 0 { + vtx := vts[0] + vts = vts[1:] + + if t.Consensus.VertexIssued(vtx) { + continue + } + if t.pending.Contains(vtx.ID()) { + issued = false + continue + } + + for _, parent := range vtx.Parents() { + if !parent.Status().Fetched() { + t.sendRequest(vdr, parent.ID()) + issued = false + } else { + vts = append(vts, parent) + } + } + + t.insert(vtx) + } + return issued +} + +func (t *Transitive) insert(vtx avalanche.Vertex) { + vtxID := vtx.ID() + + t.pending.Add(vtxID) + t.vtxReqs.Remove(vtxID) + + i := &issuer{ + t: t, + vtx: vtx, + } + + for _, parent := range vtx.Parents() { + if !t.Consensus.VertexIssued(parent) { + i.vtxDeps.Add(parent.ID()) + } + } + + txs := vtx.Txs() + + txIDs := ids.Set{} + for _, tx := range txs { + txIDs.Add(tx.ID()) + } + + for _, tx := range txs { + for _, dep := range tx.Dependencies() { + depID := dep.ID() + if !txIDs.Contains(depID) && !t.Consensus.TxIssued(dep) { + t.missingTxs.Add(depID) + i.txDeps.Add(depID) + } + } + } + + t.Config.Context.Log.Verbo("Vertex: %s is blocking on %d vertices and %d transactions", vtxID, i.vtxDeps.Len(), i.txDeps.Len()) + + t.vtxBlocked.Register(&vtxIssuer{i: i}) + t.txBlocked.Register(&txIssuer{i: i}) + + if t.vtxReqs.Len() == 0 { + for _, txID := range t.missingTxs.List() { + t.txBlocked.Abandon(txID) + } + t.missingTxs.Clear() + } + + // Track performance statistics + t.numVtxRequests.Set(float64(t.vtxReqs.Len())) + t.numTxRequests.Set(float64(t.missingTxs.Len())) + t.numBlockedVtx.Set(float64(t.pending.Len())) +} + +func (t *Transitive) batch(txs []snowstorm.Tx, force, empty bool) { + batch := []snowstorm.Tx(nil) + issuedTxs := ids.Set{} + consumed := ids.Set{} + issued := false + for _, tx := range txs { + inputs := tx.InputIDs() + overlaps := consumed.Overlaps(inputs) + if len(batch) >= t.Params.BatchSize || (force && overlaps) { + t.issueBatch(batch) + batch = nil + consumed.Clear() + issued = true + overlaps = false + } + + // Force allows for a conflict to be issued + if txID := tx.ID(); !overlaps && !issuedTxs.Contains(txID) && (force || (t.Consensus.IsVirtuous(tx))) && !tx.Status().Decided() { + batch = append(batch, tx) + issuedTxs.Add(txID) + consumed.Union(inputs) + } + } + + if len(batch) > 0 || (empty && !issued) { + t.issueBatch(batch) + } +} + +func (t *Transitive) issueBatch(txs []snowstorm.Tx) { + t.Config.Context.Log.Verbo("Batching %d transactions into a new vertex", len(txs)) + + virtuousIDs := t.Consensus.Virtuous().List() + sampler := random.Uniform{N: len(virtuousIDs)} + parentIDs := ids.Set{} + for i := 0; i < t.Params.Parents && sampler.CanSample(); i++ { + parentIDs.Add(virtuousIDs[sampler.Sample()]) + } + + if vtx, err := t.Config.State.BuildVertex(parentIDs, txs); err == nil { + t.insert(vtx) + } else { + t.Config.Context.Log.Warn("Error building new vertex with %d parents and %d transactions", len(parentIDs), len(txs)) + } +} + +func (t *Transitive) sendRequest(vdr ids.ShortID, vtxID ids.ID) { + if t.vtxReqs.Contains(vtxID) { + t.Config.Context.Log.Debug("Not requesting a vertex because we have recently sent a request") + return + } + + t.vtxReqs.Add(vtxID) + + t.numVtxRequests.Set(float64(t.vtxReqs.Len())) // Tracks performance statistics + + t.RequestID++ + t.Config.Sender.Get(vdr, t.RequestID, vtxID) +} diff --git a/snow/engine/avalanche/transitive_test.go b/snow/engine/avalanche/transitive_test.go new file mode 100644 index 0000000..6f5b5ed --- /dev/null +++ b/snow/engine/avalanche/transitive_test.go @@ -0,0 +1,2365 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "bytes" + "errors" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/validators" +) + +var ( + errFailedParsing = errors.New("failed parsing") + errMissing = errors.New("missing") +) + +func TestEngineAdd(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + st.cantEdge = false + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + if !te.Context().ChainID.Equals(ids.Empty) { + t.Fatalf("Wrong chain ID") + } + + vtx := &Vtx{ + parents: []avalanche.Vertex{ + &Vtx{ + id: GenerateID(), + status: choices.Unknown, + }, + }, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + asked := new(bool) + sender.GetF = func(inVdr ids.ShortID, _ uint32, vtxID ids.ID) { + if *asked { + t.Fatalf("Asked multiple times") + } + *asked = true + if !vdr.ID().Equals(inVdr) { + t.Fatalf("Asking wrong validator for vertex") + } + if !vtx.parents[0].ID().Equals(vtxID) { + t.Fatalf("Asking for wrong vertex") + } + } + + st.parseVertex = func(b []byte) (avalanche.Vertex, error) { + if !bytes.Equal(b, vtx.Bytes()) { + t.Fatalf("Wrong bytes") + } + return vtx, nil + } + + te.Put(vdr.ID(), 0, vtx.ID(), vtx.Bytes()) + + st.parseVertex = nil + + if !*asked { + t.Fatalf("Didn't ask for a missing vertex") + } + + if len(te.vtxBlocked) != 1 { + t.Fatalf("Should have been blocking on request") + } + + st.parseVertex = func(b []byte) (avalanche.Vertex, error) { return nil, errFailedParsing } + + te.Put(vdr.ID(), 0, vtx.parents[0].ID(), nil) + + st.parseVertex = nil + + if len(te.vtxBlocked) != 0 { + t.Fatalf("Should have finished blocking issue") + } +} + +func TestEngineQuery(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + utxos := []ids.ID{GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{Identifier: GenerateID()}, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + bytes: []byte{0, 1, 2, 3}, + } + + st.edge = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + vertexed := new(bool) + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + if *vertexed { + t.Fatalf("Sent multiple requests") + } + *vertexed = true + if !vtxID.Equals(vtx0.ID()) { + t.Fatalf("Wrong vertex requested") + } + return nil, errUnknownVertex + } + + asked := new(bool) + sender.GetF = func(inVdr ids.ShortID, _ uint32, vtxID ids.ID) { + if *asked { + t.Fatalf("Asked multiple times") + } + *asked = true + if !vdr.ID().Equals(inVdr) { + t.Fatalf("Asking wrong validator for vertex") + } + if !vtx0.ID().Equals(vtxID) { + t.Fatalf("Asking for wrong vertex") + } + } + + te.PullQuery(vdr.ID(), 0, vtx0.ID()) + if !*vertexed { + t.Fatalf("Didn't request vertex") + } + if !*asked { + t.Fatalf("Didn't request vertex from validator") + } + + queried := new(bool) + queryRequestID := new(uint32) + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, vtx []byte) { + if *queried { + t.Fatalf("Asked multiple times") + } + *queried = true + *queryRequestID = requestID + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !vtx0.ID().Equals(vtxID) { + t.Fatalf("Asking for wrong vertex") + } + } + + chitted := new(bool) + sender.ChitsF = func(inVdr ids.ShortID, _ uint32, prefs ids.Set) { + if *chitted { + t.Fatalf("Sent multiple chits") + } + *chitted = true + if !Matches(prefs.List(), []ids.ID{vtx0.ID()}) { + t.Fatalf("Wrong chits preferences") + } + } + + st.parseVertex = func(b []byte) (avalanche.Vertex, error) { + if !bytes.Equal(b, vtx0.Bytes()) { + t.Fatalf("Wrong bytes") + } + return vtx0, nil + } + te.Put(vdr.ID(), 0, vtx0.ID(), vtx0.Bytes()) + st.parseVertex = nil + + if !*queried { + t.Fatalf("Didn't ask for preferences") + } + if !*chitted { + t.Fatalf("Didn't provide preferences") + } + + vtx1 := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + bytes: []byte{5, 4, 3, 2, 1, 9}, + } + + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + if vtxID.Equals(vtx0.ID()) { + return &Vtx{status: choices.Processing}, nil + } + if vtxID.Equals(vtx1.ID()) { + return nil, errUnknownVertex + } + t.Fatalf("Wrong vertex requested") + panic("Should have failed") + } + + *asked = false + sender.GetF = func(inVdr ids.ShortID, _ uint32, vtxID ids.ID) { + if *asked { + t.Fatalf("Asked multiple times") + } + *asked = true + if !vdr.ID().Equals(inVdr) { + t.Fatalf("Asking wrong validator for vertex") + } + if !vtx1.ID().Equals(vtxID) { + t.Fatalf("Asking for wrong vertex") + } + } + + s := ids.Set{} + s.Add(vtx1.ID()) + te.Chits(vdr.ID(), *queryRequestID, s) + + *queried = false + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, vtx []byte) { + if *queried { + t.Fatalf("Asked multiple times") + } + *queried = true + *queryRequestID = requestID + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !vtx1.ID().Equals(vtxID) { + t.Fatalf("Asking for wrong vertex") + } + } + + st.parseVertex = func(b []byte) (avalanche.Vertex, error) { + if !bytes.Equal(b, vtx1.Bytes()) { + t.Fatalf("Wrong bytes") + } + return vtx1, nil + } + te.Put(vdr.ID(), 0, vtx1.ID(), vtx1.Bytes()) + st.parseVertex = nil + + if vtx0.Status() != choices.Accepted { + t.Fatalf("Should have executed vertex") + } + if len(te.vtxBlocked) != 0 { + t.Fatalf("Should have finished blocking") + } + + _ = te.polls.String() // Shouldn't panic + + te.QueryFailed(vdr.ID(), *queryRequestID) + if len(te.vtxBlocked) != 0 { + t.Fatalf("Should have finished blocking") + } +} + +func TestEngineMultipleQuery(t *testing.T) { + config := DefaultConfig() + + config.Params = avalanche.Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + + vdr0 := validators.GenerateRandomValidator(1) + vdr1 := validators.GenerateRandomValidator(1) + vdr2 := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr0) + vals.Add(vdr1) + vals.Add(vdr2) + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + st := &stateTest{t: t} + config.State = st + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + utxos := []ids.ID{GenerateID()} + + st.edge = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{Identifier: GenerateID()}, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + queried := new(bool) + queryRequestID := new(uint32) + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, vtx []byte) { + if *queried { + t.Fatalf("Asked multiple times") + } + *queried = true + *queryRequestID = requestID + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !vtx0.ID().Equals(vtxID) { + t.Fatalf("Asking for wrong vertex") + } + } + + te.insert(vtx0) + + vtx1 := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + case id.Equals(vtx0.ID()): + return vtx0, nil + case id.Equals(vtx1.ID()): + return nil, errUnknownVertex + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + asked := new(bool) + sender.GetF = func(inVdr ids.ShortID, _ uint32, vtxID ids.ID) { + if *asked { + t.Fatalf("Asked multiple times") + } + *asked = true + if !vdr0.ID().Equals(inVdr) { + t.Fatalf("Asking wrong validator for vertex") + } + if !vtx1.ID().Equals(vtxID) { + t.Fatalf("Asking for wrong vertex") + } + } + + s0 := ids.Set{} + s0.Add(vtx0.ID()) + s0.Add(vtx1.ID()) + + s2 := ids.Set{} + s2.Add(vtx0.ID()) + + te.Chits(vdr0.ID(), *queryRequestID, s0) + te.QueryFailed(vdr1.ID(), *queryRequestID) + te.Chits(vdr2.ID(), *queryRequestID, s2) + + // Should be dropped because the query was marked as failed + te.Chits(vdr1.ID(), *queryRequestID, s0) + + te.GetFailed(vdr0.ID(), 0, vtx1.ID()) + + if vtx0.Status() != choices.Accepted { + t.Fatalf("Should have executed vertex") + } + if len(te.vtxBlocked) != 0 { + t.Fatalf("Should have finished blocking") + } +} + +func TestEngineBlockedIssue(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + st := &stateTest{t: t} + config.State = st + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + utxos := []ids.ID{GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{Identifier: GenerateID()}, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + vtx1 := &Vtx{ + parents: []avalanche.Vertex{&Vtx{ + id: vtx0.ID(), + status: choices.Unknown, + }}, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + te.insert(vtx1) + + vtx1.parents[0] = vtx0 + te.insert(vtx0) + + if !Matches(te.Consensus.Preferences().List(), []ids.ID{vtx1.ID()}) { + t.Fatalf("Should have issued vtx1") + } +} + +func TestEngineAbandonResponse(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + st := &stateTest{t: t} + config.State = st + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + utxos := []ids.ID{GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{Identifier: GenerateID()}, + } + tx0.Ins.Add(utxos[0]) + + vtx := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { return nil, errUnknownVertex } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + te.PullQuery(vdr.ID(), 0, vtx.ID()) + te.GetFailed(vdr.ID(), 0, vtx.ID()) + + if len(te.vtxBlocked) != 0 { + t.Fatalf("Should have removed blocking event") + } +} + +func TestEngineScheduleRepoll(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + utxos := []ids.ID{GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{Identifier: GenerateID()}, + } + tx0.Ins.Add(utxos[0]) + + vtx := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + st.cantEdge = false + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + requestID := new(uint32) + sender.PushQueryF = func(_ ids.ShortSet, reqID uint32, _ ids.ID, _ []byte) { + *requestID = reqID + } + + te.insert(vtx) + + sender.PushQueryF = nil + + st.buildVertex = func(_ ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) { + consumers := []snowstorm.Tx{} + for _, tx := range txs { + consumers = append(consumers, tx) + } + return &Vtx{ + parents: []avalanche.Vertex{gVtx, mVtx}, + id: GenerateID(), + txs: consumers, + status: choices.Processing, + bytes: []byte{1}, + }, nil + } + + repolled := new(bool) + sender.PushQueryF = func(_ ids.ShortSet, _ uint32, _ ids.ID, _ []byte) { + *repolled = true + } + + te.QueryFailed(vdr.ID(), *requestID) + + if !*repolled { + t.Fatalf("Should have issued a noop") + } +} + +func TestEngineRejectDoubleSpendTx(t *testing.T) { + config := DefaultConfig() + + config.Params.BatchSize = 2 + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + vm := &VMTest{} + vm.T = t + config.VM = vm + + vm.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + gTx := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Accepted, + }, + } + + utxos := []ids.ID{GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Deps: []snowstorm.Tx{gTx}, + Stat: choices.Processing, + }, + } + tx0.Ins.Add(utxos[0]) + + tx1 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Deps: []snowstorm.Tx{gTx}, + Stat: choices.Processing, + }, + } + tx1.Ins.Add(utxos[0]) + + st.edge = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + st.buildVertex = func(_ ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) { + consumers := []snowstorm.Tx{} + for _, tx := range txs { + consumers = append(consumers, tx) + } + return &Vtx{ + parents: []avalanche.Vertex{gVtx, mVtx}, + id: GenerateID(), + txs: consumers, + status: choices.Processing, + bytes: []byte{1}, + }, nil + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + sender.CantPushQuery = false + + vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx0, tx1} } + te.Notify(common.PendingTxs) +} + +func TestEngineRejectDoubleSpendIssuedTx(t *testing.T) { + config := DefaultConfig() + + config.Params.BatchSize = 2 + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + vm := &VMTest{} + vm.T = t + config.VM = vm + + vm.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + gTx := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Accepted, + }, + } + + utxos := []ids.ID{GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Deps: []snowstorm.Tx{gTx}, + Stat: choices.Processing, + }, + } + tx0.Ins.Add(utxos[0]) + + tx1 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Deps: []snowstorm.Tx{gTx}, + Stat: choices.Processing, + }, + } + tx1.Ins.Add(utxos[0]) + + st.edge = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + st.buildVertex = func(_ ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) { + consumers := []snowstorm.Tx{} + for _, tx := range txs { + consumers = append(consumers, tx) + } + return &Vtx{ + parents: []avalanche.Vertex{gVtx, mVtx}, + id: GenerateID(), + txs: consumers, + status: choices.Processing, + bytes: []byte{1}, + }, nil + } + + sender.CantPushQuery = false + + vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx0} } + te.Notify(common.PendingTxs) + + vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx1} } + te.Notify(common.PendingTxs) +} + +func TestEngineIssueRepoll(t *testing.T) { + config := DefaultConfig() + + config.Params.BatchSize = 2 + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + st.edge = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + newVtxID := new(ids.ID) + + st.buildVertex = func(s ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) { + if len(txs) != 0 { + t.Fatalf("Wrong vertex issued") + } + if s.Len() != 2 || !s.Contains(gVtx.ID()) || !s.Contains(mVtx.ID()) { + t.Fatalf("Wrong vertex issued") + } + + vtx := &Vtx{ + parents: []avalanche.Vertex{gVtx, mVtx}, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + *newVtxID = vtx.ID() + return vtx, nil + } + + sender.PushQueryF = func(vdrs ids.ShortSet, _ uint32, vtxID ids.ID, vtx []byte) { + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr.ID()) + if !vdrs.Equals(vdrSet) || !vtxID.Equals(*newVtxID) { + t.Fatalf("Wrong query message") + } + } + + te.repoll() +} + +func TestEngineReissue(t *testing.T) { + config := DefaultConfig() + config.Params.BatchSize = 2 + config.Params.BetaVirtuous = 5 + config.Params.BetaRogue = 5 + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + vm := &VMTest{} + vm.T = t + config.VM = vm + + vm.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + gTx := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Accepted, + }, + } + + utxos := []ids.ID{GenerateID(), GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Deps: []snowstorm.Tx{gTx}, + Stat: choices.Processing, + }, + } + tx0.Ins.Add(utxos[0]) + + tx1 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Deps: []snowstorm.Tx{gTx}, + Stat: choices.Processing, + }, + } + tx1.Ins.Add(utxos[1]) + + tx2 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Deps: []snowstorm.Tx{gTx}, + Stat: choices.Processing, + }, + } + tx2.Ins.Add(utxos[1]) + + tx3 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Deps: []snowstorm.Tx{gTx}, + Stat: choices.Processing, + }, + } + tx3.Ins.Add(utxos[0]) + + vtx := &Vtx{ + parents: []avalanche.Vertex{gVtx, mVtx}, + txs: []snowstorm.Tx{tx2}, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{42}, + } + + st.edge = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + case id.Equals(vtx.ID()): + return vtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + lastVtx := new(Vtx) + st.buildVertex = func(_ ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) { + consumers := []snowstorm.Tx{} + for _, tx := range txs { + consumers = append(consumers, tx) + } + lastVtx = &Vtx{ + parents: []avalanche.Vertex{gVtx, mVtx}, + id: GenerateID(), + txs: consumers, + status: choices.Processing, + bytes: []byte{1}, + } + return lastVtx, nil + } + + vm.GetTxF = func(id ids.ID) (snowstorm.Tx, error) { + if !id.Equals(tx0.ID()) { + t.Fatalf("Wrong tx") + } + return tx0, nil + } + + queryRequestID := new(uint32) + sender.PushQueryF = func(_ ids.ShortSet, requestID uint32, _ ids.ID, _ []byte) { + *queryRequestID = requestID + } + + vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx0, tx1} } + te.Notify(common.PendingTxs) + + st.parseVertex = func(b []byte) (avalanche.Vertex, error) { + if !bytes.Equal(b, vtx.Bytes()) { + t.Fatalf("Wrong bytes") + } + return vtx, nil + } + te.Put(vdr.ID(), 0, vtx.ID(), vtx.Bytes()) + st.parseVertex = nil + + vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx3} } + te.Notify(common.PendingTxs) + + s := ids.Set{} + s.Add(vtx.ID()) + te.Chits(vdr.ID(), *queryRequestID, s) + + if len(lastVtx.txs) != 1 || !lastVtx.txs[0].ID().Equals(tx0.ID()) { + t.Fatalf("Should have re-issued the tx") + } +} + +func TestEngineLargeIssue(t *testing.T) { + config := DefaultConfig() + config.Params.BatchSize = 1 + config.Params.BetaVirtuous = 5 + config.Params.BetaRogue = 5 + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + vm := &VMTest{} + vm.T = t + config.VM = vm + + vm.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + gTx := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Accepted, + }, + } + + utxos := []ids.ID{GenerateID(), GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Deps: []snowstorm.Tx{gTx}, + Stat: choices.Processing, + }, + } + tx0.Ins.Add(utxos[0]) + + tx1 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Deps: []snowstorm.Tx{gTx}, + Stat: choices.Processing, + }, + } + tx1.Ins.Add(utxos[1]) + + st.edge = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + lastVtx := new(Vtx) + st.buildVertex = func(_ ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) { + consumers := []snowstorm.Tx{} + for _, tx := range txs { + consumers = append(consumers, tx) + } + lastVtx = &Vtx{ + parents: []avalanche.Vertex{gVtx, mVtx}, + id: GenerateID(), + txs: consumers, + status: choices.Processing, + bytes: []byte{1}, + } + return lastVtx, nil + } + + sender.CantPushQuery = false + + vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx0, tx1} } + te.Notify(common.PendingTxs) + + if len(lastVtx.txs) != 1 || !lastVtx.txs[0].ID().Equals(tx1.ID()) { + t.Fatalf("Should have issued txs differently") + } +} + +func TestEngineGetVertex(t *testing.T) { + config := DefaultConfig() + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + vdr := validators.GenerateRandomValidator(1) + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + st.edge = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + sender.PutF = func(v ids.ShortID, _ uint32, vtxID ids.ID, vtx []byte) { + if !v.Equals(vdr.ID()) { + t.Fatalf("Wrong validator") + } + if !mVtx.ID().Equals(vtxID) { + t.Fatalf("Wrong vertex") + } + } + + te.Get(vdr.ID(), 0, mVtx.ID()) +} + +func TestEngineInsufficientValidators(t *testing.T) { + config := DefaultConfig() + + vals := validators.NewSet() + config.Validators = vals + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + + vtx := &Vtx{ + parents: vts, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{0, 1, 2, 3}, + } + + st.edge = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + queried := new(bool) + sender.PushQueryF = func(inVdrs ids.ShortSet, _ uint32, vtxID ids.ID, vtx []byte) { + *queried = true + } + + te.insert(vtx) + + if *queried { + t.Fatalf("Unknown query") + } +} + +func TestEnginePushGossip(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + vals := validators.NewSet() + vals.Add(vdr) + config.Validators = vals + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + + vtx := &Vtx{ + parents: vts, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{0, 1, 2, 3}, + } + + st.edge = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + case id.Equals(vtx.ID()): + return vtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + requested := new(bool) + sender.GetF = func(vdr ids.ShortID, _ uint32, vtxID ids.ID) { + *requested = true + } + + st.parseVertex = func(b []byte) (avalanche.Vertex, error) { + if bytes.Equal(b, vtx.bytes) { + return vtx, nil + } + t.Fatalf("Unknown vertex bytes") + panic("Should have errored") + } + + sender.CantPushQuery = false + sender.CantChits = false + te.PushQuery(vdr.ID(), 0, vtx.ID(), vtx.Bytes()) + + if *requested { + t.Fatalf("Shouldn't have requested the vertex") + } +} + +func TestEngineSingleQuery(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + vals := validators.NewSet() + vals.Add(vdr) + config.Validators = vals + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + + vtx := &Vtx{ + parents: vts, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{0, 1, 2, 3}, + } + + st.edge = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + case id.Equals(vtx.ID()): + return vtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + sender.CantPushQuery = false + sender.CantPullQuery = false + + te.insert(vtx) +} + +func TestEngineParentBlockingInsert(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + vals := validators.NewSet() + vals.Add(vdr) + config.Validators = vals + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + + missingVtx := &Vtx{ + parents: vts, + id: GenerateID(), + height: 1, + status: choices.Unknown, + bytes: []byte{0, 1, 2, 3}, + } + + parentVtx := &Vtx{ + parents: []avalanche.Vertex{missingVtx}, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{0, 1, 2, 3}, + } + + blockingVtx := &Vtx{ + parents: []avalanche.Vertex{parentVtx}, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{0, 1, 2, 3}, + } + + st.edge = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + te.insert(parentVtx) + te.insert(blockingVtx) + + if len(te.vtxBlocked) != 2 { + t.Fatalf("Both inserts should be blocking") + } + + sender.CantPushQuery = false + + missingVtx.status = choices.Processing + te.insert(missingVtx) + + if len(te.vtxBlocked) != 0 { + t.Fatalf("Both inserts should not longer be blocking") + } +} + +func TestEngineBlockingChitRequest(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + vals := validators.NewSet() + vals.Add(vdr) + config.Validators = vals + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + + missingVtx := &Vtx{ + parents: vts, + id: GenerateID(), + height: 1, + status: choices.Unknown, + bytes: []byte{0, 1, 2, 3}, + } + + parentVtx := &Vtx{ + parents: []avalanche.Vertex{missingVtx}, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{1, 1, 2, 3}, + } + + blockingVtx := &Vtx{ + parents: []avalanche.Vertex{parentVtx}, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{2, 1, 2, 3}, + } + + st.edge = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + te.insert(parentVtx) + + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(blockingVtx.ID()): + return blockingVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + st.parseVertex = func(b []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(b, blockingVtx.Bytes()): + return blockingVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te.PushQuery(vdr.ID(), 0, blockingVtx.ID(), blockingVtx.Bytes()) + + if len(te.vtxBlocked) != 3 { + t.Fatalf("Both inserts and the query should be blocking") + } + + sender.CantPushQuery = false + sender.CantChits = false + + missingVtx.status = choices.Processing + te.insert(missingVtx) + + if len(te.vtxBlocked) != 0 { + t.Fatalf("Both inserts should not longer be blocking") + } +} + +func TestEngineBlockingChitResponse(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + vals := validators.NewSet() + vals.Add(vdr) + config.Validators = vals + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + + issuedVtx := &Vtx{ + parents: vts, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{0, 1, 2, 3}, + } + + missingVtx := &Vtx{ + parents: vts, + id: GenerateID(), + height: 1, + status: choices.Unknown, + bytes: []byte{0, 1, 2, 3}, + } + + blockingVtx := &Vtx{ + parents: []avalanche.Vertex{missingVtx}, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{2, 1, 2, 3}, + } + + st.edge = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + te.insert(blockingVtx) + + queryRequestID := new(uint32) + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, vtx []byte) { + *queryRequestID = requestID + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !issuedVtx.ID().Equals(vtxID) { + t.Fatalf("Asking for wrong vertex") + } + } + + te.insert(issuedVtx) + + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(blockingVtx.ID()): + return blockingVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + voteSet := ids.Set{} + voteSet.Add(blockingVtx.ID()) + te.Chits(vdr.ID(), *queryRequestID, voteSet) + + if len(te.vtxBlocked) != 2 { + t.Fatalf("The insert should be blocking, as well as the chit response") + } + + sender.PushQueryF = nil + sender.CantPushQuery = false + sender.CantChits = false + + missingVtx.status = choices.Processing + te.insert(missingVtx) + + if len(te.vtxBlocked) != 0 { + t.Fatalf("Both inserts should not longer be blocking") + } +} + +func TestEngineMissingTx(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + vals := validators.NewSet() + vals.Add(vdr) + config.Validators = vals + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + + issuedVtx := &Vtx{ + parents: vts, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{0, 1, 2, 3}, + } + + missingVtx := &Vtx{ + parents: vts, + id: GenerateID(), + height: 1, + status: choices.Unknown, + bytes: []byte{0, 1, 2, 3}, + } + + blockingVtx := &Vtx{ + parents: []avalanche.Vertex{missingVtx}, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{2, 1, 2, 3}, + } + + st.edge = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + te.insert(blockingVtx) + + queryRequestID := new(uint32) + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, vtx []byte) { + *queryRequestID = requestID + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !issuedVtx.ID().Equals(vtxID) { + t.Fatalf("Asking for wrong vertex") + } + } + + te.insert(issuedVtx) + + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(blockingVtx.ID()): + return blockingVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + voteSet := ids.Set{} + voteSet.Add(blockingVtx.ID()) + te.Chits(vdr.ID(), *queryRequestID, voteSet) + + if len(te.vtxBlocked) != 2 { + t.Fatalf("The insert should be blocking, as well as the chit response") + } + + sender.PushQueryF = nil + sender.CantPushQuery = false + sender.CantChits = false + + missingVtx.status = choices.Processing + te.insert(missingVtx) + + if len(te.vtxBlocked) != 0 { + t.Fatalf("Both inserts should not longer be blocking") + } +} + +func TestEngineIssueBlockingTx(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + st := &stateTest{t: t} + config.State = st + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx} + utxos := []ids.ID{GenerateID(), GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + }, + } + tx0.Ins.Add(utxos[0]) + + tx1 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Deps: []snowstorm.Tx{tx0}, + Stat: choices.Processing, + }, + } + tx1.Ins.Add(utxos[1]) + + vtx := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0, tx1}, + height: 1, + status: choices.Processing, + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + te.insert(vtx) + + if prefs := te.Consensus.Preferences(); !prefs.Contains(vtx.ID()) { + t.Fatalf("Vertex should be preferred") + } +} + +func TestEngineReissueAbortedVertex(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + vdrID := vdr.ID() + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx} + + vtxID0 := GenerateID() + vtxID1 := GenerateID() + + vtxBytes0 := []byte{0} + vtxBytes1 := []byte{1} + + vtx0 := &Vtx{ + parents: vts, + id: vtxID0, + height: 1, + status: choices.Unknown, + bytes: vtxBytes0, + } + vtx1 := &Vtx{ + parents: []avalanche.Vertex{vtx0}, + id: vtxID1, + height: 2, + status: choices.Processing, + bytes: vtxBytes1, + } + + st.edge = func() []ids.ID { + return []ids.ID{gVtx.ID()} + } + + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(gVtx.ID()): + return gVtx, nil + } + t.Fatalf("Unknown vertex requested") + panic("Unknown vertex requested") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + st.edge = nil + st.getVertex = nil + + requestID := new(uint32) + sender.GetF = func(vID ids.ShortID, reqID uint32, vtxID ids.ID) { + *requestID = reqID + } + st.parseVertex = func(b []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(b, vtxBytes1): + return vtx1, nil + } + t.Fatalf("Unknown bytes provided") + panic("Unknown bytes provided") + } + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID1): + return vtx1, nil + } + t.Fatalf("Unknown bytes provided") + panic("Unknown bytes provided") + } + + te.PushQuery(vdrID, 0, vtxID1, vtx1.Bytes()) + + sender.GetF = nil + st.parseVertex = nil + + te.GetFailed(vdrID, *requestID, vtxID0) + + requested := new(bool) + sender.GetF = func(_ ids.ShortID, _ uint32, vtxID ids.ID) { + if vtxID.Equals(vtxID0) { + *requested = true + } + } + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID1): + return vtx1, nil + } + t.Fatalf("Unknown bytes provided") + panic("Unknown bytes provided") + } + + te.PullQuery(vdrID, 0, vtxID1) + + if !*requested { + t.Fatalf("Should have requested the missing vertex") + } +} + +func TestEngineBootstrappingIntoConsensus(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + vdrID := vdr.ID() + + vals := validators.NewSet() + config.Validators = vals + config.Beacons = vals + + vals.Add(vdr) + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + vm := &VMTest{} + vm.T = t + config.VM = vm + + vm.Default(true) + + utxos := []ids.ID{GenerateID(), GenerateID()} + + txID0 := GenerateID() + txID1 := GenerateID() + + txBytes0 := []byte{0} + txBytes1 := []byte{1} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: txID0, + Stat: choices.Processing, + }, + bytes: txBytes0, + } + tx0.Ins.Add(utxos[0]) + + tx1 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: txID1, + Deps: []snowstorm.Tx{tx0}, + Stat: choices.Processing, + }, + bytes: txBytes1, + } + tx1.Ins.Add(utxos[1]) + + vtxID0 := GenerateID() + vtxID1 := GenerateID() + + vtxBytes0 := []byte{2} + vtxBytes1 := []byte{3} + + vtx0 := &Vtx{ + id: vtxID0, + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + bytes: vtxBytes0, + } + vtx1 := &Vtx{ + parents: []avalanche.Vertex{vtx0}, + id: vtxID1, + txs: []snowstorm.Tx{tx1}, + height: 2, + status: choices.Processing, + bytes: vtxBytes1, + } + + requested := new(bool) + requestID := new(uint32) + sender.GetAcceptedFrontierF = func(vdrs ids.ShortSet, reqID uint32) { + if vdrs.Len() != 1 { + t.Fatalf("Should have requested from the validators") + } + if !vdrs.Contains(vdrID) { + t.Fatalf("Should have requested from %s", vdrID) + } + *requested = true + *requestID = reqID + } + + te := &Transitive{} + te.Initialize(config) + te.Startup() + + sender.GetAcceptedFrontierF = nil + + if !*requested { + t.Fatalf("Should have requested from the validators during Initialize") + } + + acceptedFrontier := ids.Set{} + acceptedFrontier.Add(vtxID0) + + *requested = false + sender.GetAcceptedF = func(vdrs ids.ShortSet, reqID uint32, proposedAccepted ids.Set) { + if vdrs.Len() != 1 { + t.Fatalf("Should have requested from the validators") + } + if !vdrs.Contains(vdrID) { + t.Fatalf("Should have requested from %s", vdrID) + } + if !acceptedFrontier.Equals(proposedAccepted) { + t.Fatalf("Wrong proposedAccepted vertices.\nExpected: %s\nGot: %s", acceptedFrontier, proposedAccepted) + } + *requested = true + *requestID = reqID + } + + te.AcceptedFrontier(vdrID, *requestID, acceptedFrontier) + + if !*requested { + t.Fatalf("Should have requested from the validators during AcceptedFrontier") + } + + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return nil, errMissing + } + t.Fatalf("Unknown vertex requested") + panic("Unknown vertex requested") + } + + sender.GetF = func(inVdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdrID.Equals(inVdr) { + t.Fatalf("Asking wrong validator for vertex") + } + if !vtx0.ID().Equals(vtxID) { + t.Fatalf("Asking for wrong vertex") + } + *requestID = reqID + } + + te.Accepted(vdrID, *requestID, acceptedFrontier) + + st.getVertex = nil + sender.GetF = nil + + vm.ParseTxF = func(b []byte) (snowstorm.Tx, error) { + switch { + case bytes.Equal(b, txBytes0): + return tx0, nil + } + t.Fatalf("Unknown bytes provided") + panic("Unknown bytes provided") + } + st.parseVertex = func(b []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(b, vtxBytes0): + return vtx0, nil + } + t.Fatalf("Unknown bytes provided") + panic("Unknown bytes provided") + } + st.edge = func() []ids.ID { + return []ids.ID{vtxID0} + } + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return vtx0, nil + } + t.Fatalf("Unknown bytes provided") + panic("Unknown bytes provided") + } + + te.Put(vdrID, *requestID, vtxID0, vtxBytes0) + + vm.ParseTxF = nil + st.parseVertex = nil + st.edge = nil + st.getVertex = nil + + if tx0.Status() != choices.Accepted { + t.Fatalf("Should have accepted %s", txID0) + } + if vtx0.Status() != choices.Accepted { + t.Fatalf("Should have accepted %s", vtxID0) + } + + st.parseVertex = func(b []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(b, vtxBytes1): + return vtx1, nil + } + t.Fatalf("Unknown bytes provided") + panic("Unknown bytes provided") + } + sender.ChitsF = func(inVdr ids.ShortID, _ uint32, chits ids.Set) { + if !inVdr.Equals(vdrID) { + t.Fatalf("Sent to the wrong validator") + } + + expected := ids.Set{} + expected.Add(vtxID1) + + if !expected.Equals(chits) { + t.Fatalf("Returned wrong chits") + } + } + sender.PushQueryF = func(vdrs ids.ShortSet, _ uint32, vtxID ids.ID, vtx []byte) { + if vdrs.Len() != 1 { + t.Fatalf("Should have requested from the validators") + } + if !vdrs.Contains(vdrID) { + t.Fatalf("Should have requested from %s", vdrID) + } + + if !vtxID1.Equals(vtxID) { + t.Fatalf("Sent wrong query ID") + } + if !bytes.Equal(vtxBytes1, vtx) { + t.Fatalf("Sent wrong query bytes") + } + } + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID1): + return vtx1, nil + } + t.Fatalf("Unknown bytes provided") + panic("Unknown bytes provided") + } + + te.PushQuery(vdrID, 0, vtxID1, vtxBytes1) + + st.parseVertex = nil + sender.ChitsF = nil + sender.PushQueryF = nil + st.getVertex = nil +} diff --git a/snow/engine/avalanche/tx_job.go b/snow/engine/avalanche/tx_job.go new file mode 100644 index 0000000..0462bd3 --- /dev/null +++ b/snow/engine/avalanche/tx_job.go @@ -0,0 +1,65 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/snow/engine/common/queue" +) + +type txParser struct { + numAccepted, numDropped prometheus.Counter + vm DAGVM +} + +func (p *txParser) Parse(txBytes []byte) (queue.Job, error) { + tx, err := p.vm.ParseTx(txBytes) + if err != nil { + return nil, err + } + return &txJob{ + numAccepted: p.numAccepted, + numDropped: p.numDropped, + tx: tx, + }, nil +} + +type txJob struct { + numAccepted, numDropped prometheus.Counter + tx snowstorm.Tx +} + +func (t *txJob) ID() ids.ID { return t.tx.ID() } +func (t *txJob) MissingDependencies() ids.Set { + missing := ids.Set{} + for _, dep := range t.tx.Dependencies() { + if dep.Status() != choices.Accepted { + missing.Add(dep.ID()) + } + } + return missing +} +func (t *txJob) Execute() { + if t.MissingDependencies().Len() != 0 { + t.numDropped.Inc() + return + } + + switch t.tx.Status() { + case choices.Unknown, choices.Rejected: + t.numDropped.Inc() + case choices.Processing: + if err := t.tx.Verify(); err == nil { + t.tx.Accept() + t.numAccepted.Inc() + } else { + t.numDropped.Inc() + } + } +} +func (t *txJob) Bytes() []byte { return t.tx.Bytes() } diff --git a/snow/engine/avalanche/vertex_job.go b/snow/engine/avalanche/vertex_job.go new file mode 100644 index 0000000..2de19a0 --- /dev/null +++ b/snow/engine/avalanche/vertex_job.go @@ -0,0 +1,66 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/engine/common/queue" +) + +type vtxParser struct { + numAccepted, numDropped prometheus.Counter + state State +} + +func (p *vtxParser) Parse(vtxBytes []byte) (queue.Job, error) { + vtx, err := p.state.ParseVertex(vtxBytes) + if err != nil { + return nil, err + } + return &vertexJob{ + numAccepted: p.numAccepted, + numDropped: p.numDropped, + vtx: vtx, + }, nil +} + +type vertexJob struct { + numAccepted, numDropped prometheus.Counter + vtx avalanche.Vertex +} + +func (v *vertexJob) ID() ids.ID { return v.vtx.ID() } +func (v *vertexJob) MissingDependencies() ids.Set { + missing := ids.Set{} + for _, parent := range v.vtx.Parents() { + if parent.Status() != choices.Accepted { + missing.Add(parent.ID()) + } + } + return missing +} +func (v *vertexJob) Execute() { + if v.MissingDependencies().Len() != 0 { + v.numDropped.Inc() + return + } + for _, tx := range v.vtx.Txs() { + if tx.Status() != choices.Accepted { + v.numDropped.Inc() + return + } + } + switch v.vtx.Status() { + case choices.Unknown, choices.Rejected: + v.numDropped.Inc() + case choices.Processing: + v.vtx.Accept() + v.numAccepted.Inc() + } +} +func (v *vertexJob) Bytes() []byte { return v.vtx.Bytes() } diff --git a/snow/engine/avalanche/vm.go b/snow/engine/avalanche/vm.go new file mode 100644 index 0000000..2826c1d --- /dev/null +++ b/snow/engine/avalanche/vm.go @@ -0,0 +1,25 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/snow/engine/common" +) + +// DAGVM defines the minimum functionality that an avalanche VM must +// implement +type DAGVM interface { + common.VM + + // Return any transactions that have not been sent to consensus yet + PendingTxs() []snowstorm.Tx + + // Convert a stream of bytes to a transaction or return an error + ParseTx(tx []byte) (snowstorm.Tx, error) + + // Retrieve a transaction that was submitted previously + GetTx(ids.ID) (snowstorm.Tx, error) +} diff --git a/snow/engine/avalanche/voter.go b/snow/engine/avalanche/voter.go new file mode 100644 index 0000000..72a1b53 --- /dev/null +++ b/snow/engine/avalanche/voter.go @@ -0,0 +1,64 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" +) + +type voter struct { + t *Transitive + vdr ids.ShortID + requestID uint32 + response ids.Set + deps ids.Set +} + +func (v *voter) Dependencies() ids.Set { return v.deps } + +func (v *voter) Fulfill(id ids.ID) { + v.deps.Remove(id) + v.Update() +} + +func (v *voter) Abandon(id ids.ID) { v.Fulfill(id) } + +func (v *voter) Update() { + if v.deps.Len() != 0 { + return + } + + results, finished := v.t.polls.Vote(v.requestID, v.vdr, v.response.List()) + if !finished { + return + } + + v.t.Config.Context.Log.Debug("Finishing poll with:\n%s", &results) + v.t.Consensus.RecordPoll(results) + + txs := []snowstorm.Tx(nil) + for _, orphanID := range v.t.Consensus.Orphans().List() { + if tx, err := v.t.Config.VM.GetTx(orphanID); err == nil { + txs = append(txs, tx) + } else { + v.t.Config.Context.Log.Warn("Failed to fetch %s during attempted re-issuance", orphanID) + } + } + if len(txs) > 0 { + v.t.Config.Context.Log.Debug("Re-issuing %d transactions", len(txs)) + } + v.t.batch(txs, true /*=force*/, false /*empty*/) + + if v.t.Consensus.Quiesce() { + v.t.Config.Context.Log.Verbo("Avalanche engine can quiesce") + return + } + + v.t.Config.Context.Log.Verbo("Avalanche engine can't quiesce") + + if len(v.t.polls.m) == 0 { + v.t.repoll() + } +} diff --git a/snow/engine/common/bootstrapable.go b/snow/engine/common/bootstrapable.go new file mode 100644 index 0000000..d0ddc37 --- /dev/null +++ b/snow/engine/common/bootstrapable.go @@ -0,0 +1,21 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "github.com/ava-labs/gecko/ids" +) + +// Bootstrapable defines the functionality required to support bootstrapping +type Bootstrapable interface { + // Returns the set of containerIDs that are accepted, but have no accepted + // children. + CurrentAcceptedFrontier() ids.Set + + // Returns the subset of containerIDs that are accepted by this chain. + FilterAccepted(containerIDs ids.Set) (acceptedContainerIDs ids.Set) + + // Force the provided containers to be accepted. + ForceAccepted(acceptedContainerIDs ids.Set) +} diff --git a/snow/engine/common/bootstrapper.go b/snow/engine/common/bootstrapper.go new file mode 100644 index 0000000..9eebe0e --- /dev/null +++ b/snow/engine/common/bootstrapper.go @@ -0,0 +1,110 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "github.com/ava-labs/gecko/ids" +) + +// Bootstrapper implements the Engine interface. +type Bootstrapper struct { + Config + + pendingAcceptedFrontier ids.ShortSet + acceptedFrontier ids.Set + + pendingAccepted ids.ShortSet + accepted ids.Bag + + RequestID uint32 +} + +// Initialize implements the Engine interface. +func (b *Bootstrapper) Initialize(config Config) { + b.Config = config + + for _, vdr := range b.Beacons.List() { + vdrID := vdr.ID() + b.pendingAcceptedFrontier.Add(vdrID) + b.pendingAccepted.Add(vdrID) + } + + b.accepted.SetThreshold(config.Alpha) +} + +// Startup implements the Engine interface. +func (b *Bootstrapper) Startup() { + if b.pendingAcceptedFrontier.Len() == 0 { + b.Context.Log.Info("Bootstrapping skipped due to no provided bootstraps") + b.Bootstrapable.ForceAccepted(ids.Set{}) + return + } + + vdrs := ids.ShortSet{} + vdrs.Union(b.pendingAcceptedFrontier) + + b.RequestID++ + b.Sender.GetAcceptedFrontier(vdrs, b.RequestID) +} + +// GetAcceptedFrontier implements the Engine interface. +func (b *Bootstrapper) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) { + b.Sender.AcceptedFrontier(validatorID, requestID, b.Bootstrapable.CurrentAcceptedFrontier()) +} + +// GetAcceptedFrontierFailed implements the Engine interface. +func (b *Bootstrapper) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) { + b.AcceptedFrontier(validatorID, requestID, ids.Set{}) +} + +// AcceptedFrontier implements the Engine interface. +func (b *Bootstrapper) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + if !b.pendingAcceptedFrontier.Contains(validatorID) { + b.Context.Log.Debug("Received an AcceptedFrontier message from %s unexpectedly", validatorID) + return + } + b.pendingAcceptedFrontier.Remove(validatorID) + + b.acceptedFrontier.Union(containerIDs) + + if b.pendingAcceptedFrontier.Len() == 0 { + vdrs := ids.ShortSet{} + vdrs.Union(b.pendingAccepted) + + b.RequestID++ + b.Sender.GetAccepted(vdrs, b.RequestID, b.acceptedFrontier) + } +} + +// GetAccepted implements the Engine interface. +func (b *Bootstrapper) GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + b.Sender.Accepted(validatorID, requestID, b.Bootstrapable.FilterAccepted(containerIDs)) +} + +// GetAcceptedFailed implements the Engine interface. +func (b *Bootstrapper) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) { + b.Accepted(validatorID, requestID, ids.Set{}) +} + +// Accepted implements the Engine interface. +func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + if !b.pendingAccepted.Contains(validatorID) { + b.Context.Log.Debug("Received an Accepted message from %s unexpectedly", validatorID) + return + } + b.pendingAccepted.Remove(validatorID) + + b.accepted.Add(containerIDs.List()...) + + if b.pendingAccepted.Len() == 0 { + accepted := b.accepted.Threshold() + if size := accepted.Len(); size == 0 && b.Config.Beacons.Len() > 0 { + b.Context.Log.Warn("Bootstrapping finished with no accepted frontier. This is likely a result of failing to be able to connect to the specified bootstraps, or no transactions have been issued on this network yet") + } else { + b.Context.Log.Info("Bootstrapping finished with %d vertices in the accepted frontier", size) + } + + b.Bootstrapable.ForceAccepted(accepted) + } +} diff --git a/snow/engine/common/config.go b/snow/engine/common/config.go new file mode 100644 index 0000000..e3e6b10 --- /dev/null +++ b/snow/engine/common/config.go @@ -0,0 +1,21 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/validators" +) + +// Config wraps the common configurations that are needed by a Snow consensus +// engine +type Config struct { + Context *snow.Context + Validators validators.Set + Beacons validators.Set + + Alpha int + Sender Sender + Bootstrapable Bootstrapable +} diff --git a/snow/engine/common/engine.go b/snow/engine/common/engine.go new file mode 100644 index 0000000..c4e431b --- /dev/null +++ b/snow/engine/common/engine.go @@ -0,0 +1,120 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" +) + +// Engine describes the standard interface of a consensus engine +type Engine interface { + Handler + + // Return the context of the chain this engine is working on + Context() *snow.Context +} + +// Handler defines the functions that are acted on the node +type Handler interface { + ExternalHandler + InternalHandler +} + +// ExternalHandler defines how a consensus engine reacts to messages and +// requests from other validators +type ExternalHandler interface { + FrontierHandler + AcceptedHandler + FetchHandler + QueryHandler +} + +// FrontierHandler defines how a consensus engine reacts to frontier messages +// from other validators +type FrontierHandler interface { + // GetAcceptedFrontier notifies this consensus engine that its accepted + // frontier is requested by the specified validator + GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) + + // AcceptedFrontier notifies this consensus engine of the specified + // validators current accepted frontier + AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) + + // GetAcceptedFrontierFailed notifies this consensus engine that the + // requested accepted frontier from the specified validator should be + // considered lost + GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) +} + +// AcceptedHandler defines how a consensus engine reacts to messages pertaining +// to accepted containers from other validators +type AcceptedHandler interface { + // GetAccepted notifies this consensus engine that it should send the set of + // containerIDs that it has accepted from the provided set to the specified + // validator + GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) + + // Accepted notifies this consensus engine of a set of accepted containerIDs + Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) + + // GetAcceptedFailed notifies this consensus engine that the requested + // accepted containers requested from the specified validator should be + // considered lost + GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) +} + +// FetchHandler defines how a consensus engine reacts to retrieval messages from +// other validators +type FetchHandler interface { + // Get notifies this consensus engine that the specified validator requested + // that this engine send the specified container to it + Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) + + // Put the container with the specified ID and body. + // This engine needs to request and receive missing ancestors of the + // container before adding the container to consensus. Once all ancestor + // containers are added, pushes the container into the consensus. + Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) + + // Notify this engine that a get request it issued has failed. + GetFailed(validatorID ids.ShortID, requestID uint32, containerID ids.ID) +} + +// QueryHandler defines how a consensus engine reacts to query messages from +// other validators +type QueryHandler interface { + // Notify this engine that the specified validator queried it about the + // specified container. That is, the validator would like to know whether + // this engine prefers the specified container. If the ancestry of the + // container is incomplete, or the container is unknown, request the missing + // data. Once complete, sends this validator the current preferences. + PullQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID) + + // Notify this engine that the specified validator queried it about the + // specified container. That is, the validator would like to know whether + // this engine prefers the specified container. If the ancestry of the + // container is incomplete, request it. Once complete, sends this validator + // the current preferences. + PushQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) + + // Notify this engine of the specified validators preferences. + Chits(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) + + // Notify this engine that a query it issued has failed. + QueryFailed(validatorID ids.ShortID, requestID uint32) +} + +// InternalHandler defines how this consensus engine reacts to messages from +// other components of this validator +type InternalHandler interface { + // Startup this engine. + Startup() + + // Shutdown this engine. + Shutdown() + + // Notify this engine that the vm has sent a message to it. + Notify(Message) +} diff --git a/snow/engine/common/fx.go b/snow/engine/common/fx.go new file mode 100644 index 0000000..b3bada7 --- /dev/null +++ b/snow/engine/common/fx.go @@ -0,0 +1,14 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "github.com/ava-labs/gecko/ids" +) + +// Fx wraps an instance of a feature extension +type Fx struct { + ID ids.ID + Fx interface{} +} diff --git a/snow/engine/common/http_handler.go b/snow/engine/common/http_handler.go new file mode 100644 index 0000000..c6b898d --- /dev/null +++ b/snow/engine/common/http_handler.go @@ -0,0 +1,24 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "net/http" +) + +// LockOption allows the vm to specify their lock option based on their endpoint +type LockOption int + +// List of all allowed options +const ( + WriteLock = iota + ReadLock + NoLock +) + +// HTTPHandler ... +type HTTPHandler struct { + LockOptions LockOption + Handler http.Handler +} diff --git a/snow/engine/common/message.go b/snow/engine/common/message.go new file mode 100644 index 0000000..f987902 --- /dev/null +++ b/snow/engine/common/message.go @@ -0,0 +1,29 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "fmt" +) + +// TODO: Consider renaming Message to, say, VMMessage + +// Message is an enum of the message types that vms can send to consensus +type Message int + +const ( + // PendingTxs notifies a consensus engine that + // its VM has pending transactions + // (i.e. it would like to add a new block/vertex to consensus) + PendingTxs Message = iota +) + +func (msg Message) String() string { + switch msg { + case PendingTxs: + return "Pending Transactions" + default: + return fmt.Sprintf("Unknown Message: %d", msg) + } +} diff --git a/snow/engine/common/queue/job.go b/snow/engine/common/queue/job.go new file mode 100644 index 0000000..027994a --- /dev/null +++ b/snow/engine/common/queue/job.go @@ -0,0 +1,18 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package queue + +import ( + "github.com/ava-labs/gecko/ids" +) + +// Job ... +type Job interface { + ID() ids.ID + + MissingDependencies() ids.Set + Execute() + + Bytes() []byte +} diff --git a/snow/engine/common/queue/jobs.go b/snow/engine/common/queue/jobs.go new file mode 100644 index 0000000..0a5c451 --- /dev/null +++ b/snow/engine/common/queue/jobs.go @@ -0,0 +1,145 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package queue + +import ( + "errors" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/wrappers" +) + +var ( + errEmpty = errors.New("no available containers") + errDuplicate = errors.New("duplicated container") +) + +// Jobs ... +type Jobs struct { + parser Parser + baseDB database.Database + db *versiondb.Database + // Dynamic sized stack of ready to execute items + // Map from itemID to list of itemIDs that are blocked on this item + state prefixedState +} + +// New ... +func New(db database.Database) (*Jobs, error) { + jobs := &Jobs{ + baseDB: db, + db: versiondb.New(db), + } + jobs.state.jobs = jobs + + if _, err := jobs.HasNext(); err == nil { + return jobs, nil + } + return jobs, jobs.state.SetStackSize(jobs.db, 0) +} + +// SetParser ... +func (j *Jobs) SetParser(parser Parser) { j.parser = parser } + +// Push ... +func (j *Jobs) Push(job Job) error { + if deps := job.MissingDependencies(); deps.Len() != 0 { + return j.block(job, deps) + } + return j.push(job) +} + +// Pop ... +func (j *Jobs) Pop() (Job, error) { + size, err := j.state.StackSize(j.db) + if err != nil { + return nil, err + } + if size == 0 { + return nil, errEmpty + } + if err := j.state.SetStackSize(j.db, size-1); err != nil { + return nil, err + } + job, err := j.state.StackIndex(j.db, size-1) + if err != nil { + return nil, err + } + return job, j.state.DeleteStackIndex(j.db, size-1) +} + +// HasNext ... +func (j *Jobs) HasNext() (bool, error) { + size, err := j.state.StackSize(j.db) + return size > 0, err +} + +// Execute ... +func (j *Jobs) Execute(job Job) error { + job.Execute() + + jobID := job.ID() + + blocking, _ := j.state.Blocking(j.db, jobID) + j.state.DeleteBlocking(j.db, jobID) + + for _, blockedID := range blocking.List() { + job, err := j.state.Job(j.db, blockedID) + if err != nil { + return err + } + if job.MissingDependencies().Len() > 0 { + continue + } + j.state.DeleteJob(j.db, blockedID) + if err := j.push(job); err != nil { + return err + } + } + + return nil +} + +// Commit ... +func (j *Jobs) Commit() error { return j.db.Commit() } + +func (j *Jobs) push(job Job) error { + if has, err := j.state.HasJob(j.db, job.ID()); err != nil { + return err + } else if has { + return errDuplicate + } + + if err := j.state.SetJob(j.db, job); err != nil { + return err + } + + errs := wrappers.Errs{} + + size, err := j.state.StackSize(j.db) + errs.Add(err) + errs.Add(j.state.SetStackIndex(j.db, size, job)) + errs.Add(j.state.SetStackSize(j.db, size+1)) + + return errs.Err +} + +func (j *Jobs) block(job Job, deps ids.Set) error { + if err := j.state.SetJob(j.db, job); err != nil { + return err + } + + jobID := job.ID() + for _, depID := range deps.List() { + blocking, _ := j.state.Blocking(j.db, depID) + blocking.Add(jobID) + if err := j.state.SetBlocking(j.db, depID, blocking); err != nil { + return err + } + } + + return nil +} diff --git a/snow/engine/common/queue/jobs_test.go b/snow/engine/common/queue/jobs_test.go new file mode 100644 index 0000000..ba2d4d1 --- /dev/null +++ b/snow/engine/common/queue/jobs_test.go @@ -0,0 +1,252 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package queue + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" +) + +func TestNew(t *testing.T) { + parser := &TestParser{T: t} + db := memdb.New() + + jobs, err := New(db) + if err != nil { + t.Fatal(err) + } + + jobs.SetParser(parser) + + if hasNext, err := jobs.HasNext(); err != nil { + t.Fatal(err) + } else if hasNext { + t.Fatalf("Haven't pushed anything yet, shouldn't be able to pop") + } +} + +func TestPushPop(t *testing.T) { + parser := &TestParser{T: t} + db := memdb.New() + + jobs, err := New(db) + if err != nil { + t.Fatal(err) + } + + jobs.SetParser(parser) + + id := ids.Empty.Prefix(0) + job := &TestJob{ + T: t, + + IDF: func() ids.ID { return id }, + MissingDependenciesF: func() ids.Set { return ids.Set{} }, + ExecuteF: func() {}, + BytesF: func() []byte { return []byte{0} }, + } + + if err := jobs.Push(job); err != nil { + t.Fatal(err) + } + + if err := jobs.Commit(); err != nil { + t.Fatal(err) + } + + jobs, err = New(db) + if err != nil { + t.Fatal(err) + } + + jobs.SetParser(parser) + + if hasNext, err := jobs.HasNext(); err != nil { + t.Fatal(err) + } else if !hasNext { + t.Fatalf("Should have a container ready to pop") + } + + parser.ParseF = func(b []byte) (Job, error) { + if !bytes.Equal(b, []byte{0}) { + t.Fatalf("Unknown job") + } + return job, nil + } + + returnedBlockable, err := jobs.Pop() + if err != nil { + t.Fatal(err) + } + + if returnedBlockable != job { + t.Fatalf("Returned wrong job") + } + + if hasNext, err := jobs.HasNext(); err != nil { + t.Fatal(err) + } else if hasNext { + t.Fatalf("Shouldn't have a container ready to pop") + } +} + +func TestExecute(t *testing.T) { + parser := &TestParser{T: t} + db := memdb.New() + + jobs, err := New(db) + if err != nil { + t.Fatal(err) + } + + jobs.SetParser(parser) + + id0 := ids.Empty.Prefix(0) + executed0 := new(bool) + job0 := &TestJob{ + T: t, + + IDF: func() ids.ID { return id0 }, + MissingDependenciesF: func() ids.Set { return ids.Set{} }, + ExecuteF: func() { *executed0 = true }, + BytesF: func() []byte { return []byte{0} }, + } + + id1 := ids.Empty.Prefix(0) + executed1 := new(bool) + job1 := &TestJob{ + T: t, + + IDF: func() ids.ID { return id1 }, + MissingDependenciesF: func() ids.Set { return ids.Set{id0.Key(): true} }, + ExecuteF: func() { *executed1 = true }, + BytesF: func() []byte { return []byte{1} }, + } + + if err := jobs.Push(job0); err != nil { + t.Fatal(err) + } + + if err := jobs.Push(job1); err != nil { + t.Fatal(err) + } + + if hasNext, err := jobs.HasNext(); err != nil { + t.Fatal(err) + } else if !hasNext { + t.Fatalf("Should have a container ready to pop") + } + + parser.ParseF = func(b []byte) (Job, error) { + if !bytes.Equal(b, []byte{0}) { + t.Fatalf("Unknown job") + } + return job0, nil + } + + returnedBlockable, err := jobs.Pop() + if err != nil { + t.Fatal(err) + } + + parser.ParseF = nil + + if returnedBlockable != job0 { + t.Fatalf("Returned wrong job") + } + + job1.MissingDependenciesF = func() ids.Set { return ids.Set{} } + parser.ParseF = func(b []byte) (Job, error) { + if !bytes.Equal(b, []byte{1}) { + t.Fatalf("Unknown job") + } + return job1, nil + } + + if err := jobs.Execute(job0); err != nil { + t.Fatal(err) + } + + if !*executed0 { + t.Fatalf("Should have executed the container") + } + + if hasNext, err := jobs.HasNext(); err != nil { + t.Fatal(err) + } else if !hasNext { + t.Fatalf("Should have a container ready to pop") + } +} + +func TestDuplicatedPush(t *testing.T) { + parser := &TestParser{T: t} + db := memdb.New() + + jobs, err := New(db) + if err != nil { + t.Fatal(err) + } + + jobs.SetParser(parser) + + id := ids.Empty.Prefix(0) + job := &TestJob{ + T: t, + + IDF: func() ids.ID { return id }, + MissingDependenciesF: func() ids.Set { return ids.Set{} }, + ExecuteF: func() {}, + BytesF: func() []byte { return []byte{0} }, + } + + if err := jobs.Push(job); err != nil { + t.Fatal(err) + } + + if err := jobs.Push(job); err == nil { + t.Fatalf("Should have failed on push") + } + + if err := jobs.Commit(); err != nil { + t.Fatal(err) + } + + jobs, err = New(db) + if err != nil { + t.Fatal(err) + } + + jobs.SetParser(parser) + + if hasNext, err := jobs.HasNext(); err != nil { + t.Fatal(err) + } else if !hasNext { + t.Fatalf("Should have a container ready to pop") + } + + parser.ParseF = func(b []byte) (Job, error) { + if !bytes.Equal(b, []byte{0}) { + t.Fatalf("Unknown job") + } + return job, nil + } + + returnedBlockable, err := jobs.Pop() + if err != nil { + t.Fatal(err) + } + + if returnedBlockable != job { + t.Fatalf("Returned wrong job") + } + + if hasNext, err := jobs.HasNext(); err != nil { + t.Fatal(err) + } else if hasNext { + t.Fatalf("Shouldn't have a container ready to pop") + } +} diff --git a/snow/engine/common/queue/parser.go b/snow/engine/common/queue/parser.go new file mode 100644 index 0000000..b5ef232 --- /dev/null +++ b/snow/engine/common/queue/parser.go @@ -0,0 +1,9 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package queue + +// Parser ... +type Parser interface { + Parse([]byte) (Job, error) +} diff --git a/snow/engine/common/queue/prefixed_state.go b/snow/engine/common/queue/prefixed_state.go new file mode 100644 index 0000000..58c0408 --- /dev/null +++ b/snow/engine/common/queue/prefixed_state.go @@ -0,0 +1,123 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package queue + +import ( + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/wrappers" +) + +// Constants +const ( + stackSizeID byte = iota + stackID + jobID + blockingID +) + +var ( + stackSize = []byte{stackSizeID} +) + +type prefixedState struct{ state } + +func (ps *prefixedState) SetStackSize(db database.Database, size uint32) error { + return ps.state.SetInt(db, stackSize, size) +} + +func (ps *prefixedState) StackSize(db database.Database) (uint32, error) { + return ps.state.Int(db, stackSize) +} + +func (ps *prefixedState) SetStackIndex(db database.Database, index uint32, job Job) error { + p := wrappers.Packer{Bytes: make([]byte, 1+wrappers.IntLen)} + + p.PackByte(stackID) + p.PackInt(index) + + return ps.state.SetJob(db, p.Bytes, job) +} + +func (ps *prefixedState) DeleteStackIndex(db database.Database, index uint32) error { + p := wrappers.Packer{Bytes: make([]byte, 1+wrappers.IntLen)} + + p.PackByte(stackID) + p.PackInt(index) + + return db.Delete(p.Bytes) +} + +func (ps *prefixedState) StackIndex(db database.Database, index uint32) (Job, error) { + p := wrappers.Packer{Bytes: make([]byte, 1+wrappers.IntLen)} + + p.PackByte(stackID) + p.PackInt(index) + + return ps.state.Job(db, p.Bytes) +} + +func (ps *prefixedState) SetJob(db database.Database, job Job) error { + p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)} + + p.PackByte(jobID) + p.PackFixedBytes(job.ID().Bytes()) + + return ps.state.SetJob(db, p.Bytes, job) +} + +func (ps *prefixedState) HasJob(db database.Database, id ids.ID) (bool, error) { + p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)} + + p.PackByte(jobID) + p.PackFixedBytes(id.Bytes()) + + return db.Has(p.Bytes) +} + +func (ps *prefixedState) DeleteJob(db database.Database, id ids.ID) error { + p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)} + + p.PackByte(jobID) + p.PackFixedBytes(id.Bytes()) + + return db.Delete(p.Bytes) +} + +func (ps *prefixedState) Job(db database.Database, id ids.ID) (Job, error) { + p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)} + + p.PackByte(jobID) + p.PackFixedBytes(id.Bytes()) + + return ps.state.Job(db, p.Bytes) +} + +func (ps *prefixedState) SetBlocking(db database.Database, id ids.ID, blocking ids.Set) error { + p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)} + + p.PackByte(blockingID) + p.PackFixedBytes(id.Bytes()) + + return ps.state.SetIDs(db, p.Bytes, blocking) +} + +func (ps *prefixedState) DeleteBlocking(db database.Database, id ids.ID) error { + p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)} + + p.PackByte(blockingID) + p.PackFixedBytes(id.Bytes()) + + return db.Delete(p.Bytes) +} + +func (ps *prefixedState) Blocking(db database.Database, id ids.ID) (ids.Set, error) { + p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)} + + p.PackByte(blockingID) + p.PackFixedBytes(id.Bytes()) + + return ps.state.IDs(db, p.Bytes) +} diff --git a/snow/engine/common/queue/state.go b/snow/engine/common/queue/state.go new file mode 100644 index 0000000..24d7a9b --- /dev/null +++ b/snow/engine/common/queue/state.go @@ -0,0 +1,71 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package queue + +import ( + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/wrappers" +) + +type state struct{ jobs *Jobs } + +func (s *state) SetInt(db database.Database, key []byte, size uint32) error { + p := wrappers.Packer{Bytes: make([]byte, wrappers.IntLen)} + + p.PackInt(size) + + return db.Put(key, p.Bytes) +} + +func (s *state) Int(db database.Database, key []byte) (uint32, error) { + value, err := db.Get(key) + if err != nil { + return 0, err + } + + p := wrappers.Packer{Bytes: value} + return p.UnpackInt(), p.Err +} + +func (s *state) SetJob(db database.Database, key []byte, job Job) error { + return db.Put(key, job.Bytes()) +} + +func (s *state) Job(db database.Database, key []byte) (Job, error) { + value, err := db.Get(key) + if err != nil { + return nil, err + } + return s.jobs.parser.Parse(value) +} + +func (s *state) SetIDs(db database.Database, key []byte, blocking ids.Set) error { + p := wrappers.Packer{Bytes: make([]byte, wrappers.IntLen+hashing.HashLen*blocking.Len())} + + p.PackInt(uint32(blocking.Len())) + for _, id := range blocking.List() { + p.PackFixedBytes(id.Bytes()) + } + + return db.Put(key, p.Bytes) +} + +func (s *state) IDs(db database.Database, key []byte) (ids.Set, error) { + bytes, err := db.Get(key) + if err != nil { + return nil, err + } + + p := wrappers.Packer{Bytes: bytes} + + blocking := ids.Set{} + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + id, _ := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + blocking.Add(id) + } + + return blocking, p.Err +} diff --git a/snow/engine/common/queue/test_job.go b/snow/engine/common/queue/test_job.go new file mode 100644 index 0000000..b70a325 --- /dev/null +++ b/snow/engine/common/queue/test_job.go @@ -0,0 +1,75 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package queue + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +// TestJob is a test Job +type TestJob struct { + T *testing.T + + CantID, + CantMissingDependencies, + CantExecute, + CantBytes bool + + IDF func() ids.ID + MissingDependenciesF func() ids.Set + ExecuteF func() + BytesF func() []byte +} + +// Default ... +func (j *TestJob) Default(cant bool) { + j.CantID = cant + j.CantMissingDependencies = cant + j.CantExecute = cant + j.CantBytes = cant +} + +// ID ... +func (j *TestJob) ID() ids.ID { + if j.IDF != nil { + return j.IDF() + } + if j.CantID && j.T != nil { + j.T.Fatalf("Unexpectedly called ID") + } + return ids.ID{} +} + +// MissingDependencies ... +func (j *TestJob) MissingDependencies() ids.Set { + if j.MissingDependenciesF != nil { + return j.MissingDependenciesF() + } + if j.CantMissingDependencies && j.T != nil { + j.T.Fatalf("Unexpectedly called MissingDependencies") + } + return ids.Set{} +} + +// Execute ... +func (j *TestJob) Execute() { + if j.ExecuteF != nil { + j.ExecuteF() + } else if j.CantExecute && j.T != nil { + j.T.Fatalf("Unexpectedly called Execute") + } +} + +// Bytes ... +func (j *TestJob) Bytes() []byte { + if j.BytesF != nil { + return j.BytesF() + } + if j.CantBytes && j.T != nil { + j.T.Fatalf("Unexpectedly called Bytes") + } + return nil +} diff --git a/snow/engine/common/queue/test_parser.go b/snow/engine/common/queue/test_parser.go new file mode 100644 index 0000000..1e820e7 --- /dev/null +++ b/snow/engine/common/queue/test_parser.go @@ -0,0 +1,38 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package queue + +import ( + "errors" + "testing" +) + +var ( + errParse = errors.New("unexpectedly called Parse") +) + +// TestParser is a test Parser +type TestParser struct { + T *testing.T + + CantParse bool + + ParseF func([]byte) (Job, error) +} + +// Default ... +func (p *TestParser) Default(cant bool) { + p.CantParse = cant +} + +// Parse ... +func (p *TestParser) Parse(b []byte) (Job, error) { + if p.ParseF != nil { + return p.ParseF(b) + } + if p.CantParse && p.T != nil { + p.T.Fatal(errParse) + } + return nil, errParse +} diff --git a/snow/engine/common/sender.go b/snow/engine/common/sender.go new file mode 100644 index 0000000..d808b3a --- /dev/null +++ b/snow/engine/common/sender.go @@ -0,0 +1,72 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "github.com/ava-labs/gecko/ids" +) + +// Sender defines how a consensus engine sends messages and requests to other +// validators +type Sender interface { + FrontierSender + AcceptedSender + FetchSender + QuerySender +} + +// FrontierSender defines how a consensus engine sends frontier messages to +// other validators +type FrontierSender interface { + // GetAcceptedFrontier requests that every validator in [validatorIDs] sends + // an AcceptedFrontier message. + GetAcceptedFrontier(validatorIDs ids.ShortSet, requestID uint32) + + // AcceptedFrontier responds to a AcceptedFrontier message with this + // engine's current accepted frontier. + AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) +} + +// AcceptedSender defines how a consensus engine sends messages pertaining to +// accepted containers +type AcceptedSender interface { + // GetAccepted requests that every validator in [validatorIDs] sends an + // Accepted message with all the IDs in [containerIDs] that the validator + // thinks is accepted. + GetAccepted(validatorIDs ids.ShortSet, requestID uint32, containerIDs ids.Set) + + // Accepted responds to a GetAccepted message with a set of IDs of + // containers that are accepted. + Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) +} + +// FetchSender defines how a consensus engine sends retrieval messages to other +// validators +type FetchSender interface { + // Request a container from a validator. + // Request that the specified validator send the specified container + // to this validator + Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) + + // Tell the specified validator that the container whose ID is + // has body + Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) +} + +// QuerySender defines how a consensus engine sends query messages to other +// validators +type QuerySender interface { + // Request from the specified validators their preferred frontier, given the + // existence of the specified container. + // This is the same as PullQuery, except that this message includes not only + // the ID of the container but also its body. + PushQuery(validatorIDs ids.ShortSet, requestID uint32, containerID ids.ID, container []byte) + + // Request from the specified validators their preferred frontier, given the + // existence of the specified container. + PullQuery(validatorIDs ids.ShortSet, requestID uint32, containerID ids.ID) + + // Chits sends chits to the specified validator + Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set) +} diff --git a/snow/engine/common/test_bootstrapable.go b/snow/engine/common/test_bootstrapable.go new file mode 100644 index 0000000..79698c6 --- /dev/null +++ b/snow/engine/common/test_bootstrapable.go @@ -0,0 +1,61 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +// BootstrapableTest is a test engine that supports bootstrapping +type BootstrapableTest struct { + T *testing.T + + CantCurrentAcceptedFrontier, + CantFilterAccepted, + CantForceAccepted bool + + CurrentAcceptedFrontierF func() (acceptedContainerIDs ids.Set) + FilterAcceptedF func(containerIDs ids.Set) (acceptedContainerIDs ids.Set) + ForceAcceptedF func(acceptedContainerIDs ids.Set) +} + +// Default sets the default on call handling +func (b *BootstrapableTest) Default(cant bool) { + b.CantCurrentAcceptedFrontier = cant + b.CantFilterAccepted = cant + b.CantForceAccepted = cant +} + +// CurrentAcceptedFrontier implements the Bootstrapable interface +func (b *BootstrapableTest) CurrentAcceptedFrontier() ids.Set { + if b.CurrentAcceptedFrontierF != nil { + return b.CurrentAcceptedFrontierF() + } + if b.CantCurrentAcceptedFrontier && b.T != nil { + b.T.Fatalf("Unexpectedly called CurrentAcceptedFrontier") + } + return ids.Set{} +} + +// FilterAccepted implements the Bootstrapable interface +func (b *BootstrapableTest) FilterAccepted(containerIDs ids.Set) ids.Set { + if b.FilterAcceptedF != nil { + return b.FilterAcceptedF(containerIDs) + } + if b.CantFilterAccepted && b.T != nil { + b.T.Fatalf("Unexpectedly called FilterAccepted") + } + return ids.Set{} +} + +// ForceAccepted implements the Bootstrapable interface +func (b *BootstrapableTest) ForceAccepted(containerIDs ids.Set) { + if b.ForceAcceptedF != nil { + b.ForceAcceptedF(containerIDs) + } else if b.CantForceAccepted && b.T != nil { + b.T.Fatalf("Unexpectedly called ForceAccepted") + } +} diff --git a/snow/engine/common/test_config.go b/snow/engine/common/test_config.go new file mode 100644 index 0000000..930dd15 --- /dev/null +++ b/snow/engine/common/test_config.go @@ -0,0 +1,20 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/validators" +) + +// DefaultConfigTest returns a test configuration +func DefaultConfigTest() Config { + return Config{ + Context: snow.DefaultContextTest(), + Validators: validators.NewSet(), + Beacons: validators.NewSet(), + Sender: &SenderTest{}, + Bootstrapable: &BootstrapableTest{}, + } +} diff --git a/snow/engine/common/test_engine.go b/snow/engine/common/test_engine.go new file mode 100644 index 0000000..d708008 --- /dev/null +++ b/snow/engine/common/test_engine.go @@ -0,0 +1,230 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" +) + +// EngineTest is a test engine +type EngineTest struct { + T *testing.T + + CantStartup, + CantShutdown, + + CantContext, + + CantNotify, + + CantGetAcceptedFrontier, + CantGetAcceptedFrontierFailed, + CantAcceptedFrontier, + + CantGetAccepted, + CantGetAcceptedFailed, + CantAccepted, + + CantGet, + CantGetFailed, + CantPut, + + CantPushQuery, + CantPullQuery, + CantQueryFailed, + CantChits bool + + StartupF, ShutdownF func() + ContextF func() *snow.Context + NotifyF func(Message) + GetF, GetFailedF, PullQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID) + PutF, PushQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) + GetAcceptedFrontierF, GetAcceptedFrontierFailedF, GetAcceptedFailedF, QueryFailedF func(validatorID ids.ShortID, requestID uint32) + AcceptedFrontierF, GetAcceptedF, AcceptedF, ChitsF func(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) +} + +// Default ... +func (e *EngineTest) Default(cant bool) { + e.CantStartup = cant + e.CantShutdown = cant + + e.CantContext = cant + + e.CantNotify = cant + + e.CantGetAcceptedFrontier = cant + e.CantGetAcceptedFrontierFailed = cant + e.CantAcceptedFrontier = cant + + e.CantGetAccepted = cant + e.CantGetAcceptedFailed = cant + e.CantAccepted = cant + + e.CantGet = cant + e.CantGetFailed = cant + e.CantPut = cant + + e.CantPushQuery = cant + e.CantPullQuery = cant + e.CantQueryFailed = cant + e.CantChits = cant +} + +// Startup ... +func (e *EngineTest) Startup() { + if e.StartupF != nil { + e.StartupF() + } else if e.CantStartup && e.T != nil { + e.T.Fatalf("Unexpectedly called Startup") + } +} + +// Shutdown ... +func (e *EngineTest) Shutdown() { + if e.ShutdownF != nil { + e.ShutdownF() + } else if e.CantShutdown && e.T != nil { + e.T.Fatalf("Unexpectedly called Shutdown") + } +} + +// Context ... +func (e *EngineTest) Context() *snow.Context { + if e.ContextF != nil { + return e.ContextF() + } + if e.CantContext && e.T != nil { + e.T.Fatalf("Unexpectedly called Context") + } + return nil +} + +// Notify ... +func (e *EngineTest) Notify(msg Message) { + if e.NotifyF != nil { + e.NotifyF(msg) + } else if e.CantNotify && e.T != nil { + e.T.Fatalf("Unexpectedly called Notify") + } +} + +// GetAcceptedFrontier ... +func (e *EngineTest) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) { + if e.GetAcceptedFrontierF != nil { + e.GetAcceptedFrontierF(validatorID, requestID) + } else if e.CantGetAcceptedFrontier && e.T != nil { + e.T.Fatalf("Unexpectedly called GetAcceptedFrontier") + } +} + +// GetAcceptedFrontierFailed ... +func (e *EngineTest) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) { + if e.GetAcceptedFrontierFailedF != nil { + e.GetAcceptedFrontierFailedF(validatorID, requestID) + } else if e.CantGetAcceptedFrontierFailed && e.T != nil { + e.T.Fatalf("Unexpectedly called GetAcceptedFrontierFailed") + } +} + +// AcceptedFrontier ... +func (e *EngineTest) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + if e.AcceptedFrontierF != nil { + e.AcceptedFrontierF(validatorID, requestID, containerIDs) + } else if e.CantAcceptedFrontier && e.T != nil { + e.T.Fatalf("Unexpectedly called AcceptedFrontierF") + } +} + +// GetAccepted ... +func (e *EngineTest) GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + if e.GetAcceptedF != nil { + e.GetAcceptedF(validatorID, requestID, containerIDs) + } else if e.CantGetAccepted && e.T != nil { + e.T.Fatalf("Unexpectedly called GetAccepted") + } +} + +// GetAcceptedFailed ... +func (e *EngineTest) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) { + if e.GetAcceptedFailedF != nil { + e.GetAcceptedFailedF(validatorID, requestID) + } else if e.CantGetAcceptedFailed && e.T != nil { + e.T.Fatalf("Unexpectedly called GetAcceptedFailed") + } +} + +// Accepted ... +func (e *EngineTest) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + if e.AcceptedF != nil { + e.AcceptedF(validatorID, requestID, containerIDs) + } else if e.CantAccepted && e.T != nil { + e.T.Fatalf("Unexpectedly called Accepted") + } +} + +// Get ... +func (e *EngineTest) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) { + if e.GetF != nil { + e.GetF(validatorID, requestID, containerID) + } else if e.CantGet && e.T != nil { + e.T.Fatalf("Unexpectedly called Get") + } +} + +// GetFailed ... +func (e *EngineTest) GetFailed(validatorID ids.ShortID, requestID uint32, containerID ids.ID) { + if e.GetFailedF != nil { + e.GetFailedF(validatorID, requestID, containerID) + } else if e.CantGetFailed && e.T != nil { + e.T.Fatalf("Unexpectedly called GetFailed") + } +} + +// Put ... +func (e *EngineTest) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) { + if e.PutF != nil { + e.PutF(validatorID, requestID, containerID, container) + } else if e.CantPut && e.T != nil { + e.T.Fatalf("Unexpectedly called Put") + } +} + +// PushQuery ... +func (e *EngineTest) PushQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) { + if e.PushQueryF != nil { + e.PushQueryF(validatorID, requestID, containerID, container) + } else if e.CantPushQuery && e.T != nil { + e.T.Fatalf("Unexpectedly called PushQuery") + } +} + +// PullQuery ... +func (e *EngineTest) PullQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID) { + if e.PullQueryF != nil { + e.PullQueryF(validatorID, requestID, containerID) + } else if e.CantPullQuery && e.T != nil { + e.T.Fatalf("Unexpectedly called PullQuery") + } +} + +// QueryFailed ... +func (e *EngineTest) QueryFailed(validatorID ids.ShortID, requestID uint32) { + if e.QueryFailedF != nil { + e.QueryFailedF(validatorID, requestID) + } else if e.CantQueryFailed && e.T != nil { + e.T.Fatalf("Unexpectedly called QueryFailed") + } +} + +// Chits ... +func (e *EngineTest) Chits(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + if e.ChitsF != nil { + e.ChitsF(validatorID, requestID, containerIDs) + } else if e.CantChits && e.T != nil { + e.T.Fatalf("Unexpectedly called Chits") + } +} diff --git a/snow/engine/common/test_sender.go b/snow/engine/common/test_sender.go new file mode 100644 index 0000000..10dd9c2 --- /dev/null +++ b/snow/engine/common/test_sender.go @@ -0,0 +1,142 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +// SenderTest is a test sender +type SenderTest struct { + T *testing.T + + CantGetAcceptedFrontier, CantAcceptedFrontier, + CantGetAccepted, CantAccepted, + CantGet, CantPut, + CantPullQuery, CantPushQuery, CantChits bool + + GetAcceptedFrontierF func(ids.ShortSet, uint32) + AcceptedFrontierF func(ids.ShortID, uint32, ids.Set) + GetAcceptedF func(ids.ShortSet, uint32, ids.Set) + AcceptedF func(ids.ShortID, uint32, ids.Set) + GetF func(ids.ShortID, uint32, ids.ID) + PutF func(ids.ShortID, uint32, ids.ID, []byte) + PushQueryF func(ids.ShortSet, uint32, ids.ID, []byte) + PullQueryF func(ids.ShortSet, uint32, ids.ID) + ChitsF func(ids.ShortID, uint32, ids.Set) +} + +// Default set the default callable value to [cant] +func (s *SenderTest) Default(cant bool) { + s.CantGetAcceptedFrontier = cant + s.CantAcceptedFrontier = cant + s.CantGetAccepted = cant + s.CantAccepted = cant + s.CantGet = cant + s.CantPut = cant + s.CantPullQuery = cant + s.CantPushQuery = cant + s.CantChits = cant +} + +// GetAcceptedFrontier calls GetAcceptedFrontierF if it was initialized. If it +// wasn't initialized and this function shouldn't be called and testing was +// initialized, then testing will fail. +func (s *SenderTest) GetAcceptedFrontier(validatorIDs ids.ShortSet, requestID uint32) { + if s.GetAcceptedFrontierF != nil { + s.GetAcceptedFrontierF(validatorIDs, requestID) + } else if s.CantGetAcceptedFrontier && s.T != nil { + s.T.Fatalf("Unexpectedly called GetAcceptedFrontier") + } +} + +// AcceptedFrontier calls AcceptedFrontierF if it was initialized. If it wasn't +// initialized and this function shouldn't be called and testing was +// initialized, then testing will fail. +func (s *SenderTest) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + if s.AcceptedFrontierF != nil { + s.AcceptedFrontierF(validatorID, requestID, containerIDs) + } else if s.CantAcceptedFrontier && s.T != nil { + s.T.Fatalf("Unexpectedly called AcceptedFrontier") + } +} + +// GetAccepted calls GetAcceptedF if it was initialized. If it wasn't +// initialized and this function shouldn't be called and testing was +// initialized, then testing will fail. +func (s *SenderTest) GetAccepted(validatorIDs ids.ShortSet, requestID uint32, containerIDs ids.Set) { + if s.GetAcceptedF != nil { + s.GetAcceptedF(validatorIDs, requestID, containerIDs) + } else if s.CantGetAccepted && s.T != nil { + s.T.Fatalf("Unexpectedly called GetAccepted") + } +} + +// Accepted calls AcceptedF if it was initialized. If it wasn't initialized and +// this function shouldn't be called and testing was initialized, then testing +// will fail. +func (s *SenderTest) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + if s.AcceptedF != nil { + s.AcceptedF(validatorID, requestID, containerIDs) + } else if s.CantAccepted && s.T != nil { + s.T.Fatalf("Unexpectedly called Accepted") + } +} + +// Get calls GetF if it was initialized. If it wasn't initialized and this +// function shouldn't be called and testing was initialized, then testing will +// fail. +func (s *SenderTest) Get(vdr ids.ShortID, requestID uint32, vtxID ids.ID) { + if s.GetF != nil { + s.GetF(vdr, requestID, vtxID) + } else if s.CantGet && s.T != nil { + s.T.Fatalf("Unexpectedly called Get") + } +} + +// Put calls PutF if it was initialized. If it wasn't initialized and this +// function shouldn't be called and testing was initialized, then testing will +// fail. +func (s *SenderTest) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtx []byte) { + if s.PutF != nil { + s.PutF(vdr, requestID, vtxID, vtx) + } else if s.CantPut && s.T != nil { + s.T.Fatalf("Unexpectedly called Put") + } +} + +// PushQuery calls PushQueryF if it was initialized. If it wasn't initialized +// and this function shouldn't be called and testing was initialized, then +// testing will fail. +func (s *SenderTest) PushQuery(vdrs ids.ShortSet, requestID uint32, vtxID ids.ID, vtx []byte) { + if s.PushQueryF != nil { + s.PushQueryF(vdrs, requestID, vtxID, vtx) + } else if s.CantPushQuery && s.T != nil { + s.T.Fatalf("Unexpectedly called PushQuery") + } +} + +// PullQuery calls PullQueryF if it was initialized. If it wasn't initialized +// and this function shouldn't be called and testing was initialized, then +// testing will fail. +func (s *SenderTest) PullQuery(vdrs ids.ShortSet, requestID uint32, vtxID ids.ID) { + if s.PullQueryF != nil { + s.PullQueryF(vdrs, requestID, vtxID) + } else if s.CantPullQuery && s.T != nil { + s.T.Fatalf("Unexpectedly called PullQuery") + } +} + +// Chits calls ChitsF if it was initialized. If it wasn't initialized and this +// function shouldn't be called and testing was initialized, then testing will +// fail. +func (s *SenderTest) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) { + if s.ChitsF != nil { + s.ChitsF(vdr, requestID, votes) + } else if s.CantChits && s.T != nil { + s.T.Fatalf("Unexpectedly called Chits") + } +} diff --git a/snow/engine/common/test_vm.go b/snow/engine/common/test_vm.go new file mode 100644 index 0000000..d997c1e --- /dev/null +++ b/snow/engine/common/test_vm.go @@ -0,0 +1,77 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "errors" + "testing" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/snow" +) + +var ( + errInitialize = errors.New("unexpectedly called Initialize") +) + +// VMTest is a test vm +type VMTest struct { + T *testing.T + + CantInitialize, CantShutdown, CantCreateHandlers, CantCreateStaticHandlers bool + + InitializeF func(*snow.Context, database.Database, []byte, chan<- Message, []*Fx) error + ShutdownF func() + CreateHandlersF func() map[string]*HTTPHandler + CreateStaticHandlersF func() map[string]*HTTPHandler +} + +// Default ... +func (vm *VMTest) Default(cant bool) { + vm.CantInitialize = cant + vm.CantShutdown = cant + vm.CantCreateHandlers = cant +} + +// Initialize ... +func (vm *VMTest) Initialize(ctx *snow.Context, db database.Database, initState []byte, msgChan chan<- Message, fxs []*Fx) error { + if vm.InitializeF != nil { + return vm.InitializeF(ctx, db, initState, msgChan, fxs) + } + if vm.CantInitialize && vm.T != nil { + vm.T.Fatal(errInitialize) + } + return errInitialize +} + +// Shutdown ... +func (vm *VMTest) Shutdown() { + if vm.ShutdownF != nil { + vm.ShutdownF() + } else if vm.CantShutdown && vm.T != nil { + vm.T.Fatalf("Unexpectedly called Shutdown") + } +} + +// CreateHandlers ... +func (vm *VMTest) CreateHandlers() map[string]*HTTPHandler { + if vm.CreateHandlersF != nil { + return vm.CreateHandlersF() + } + if vm.CantCreateHandlers && vm.T != nil { + vm.T.Fatalf("Unexpectedly called CreateHandlers") + } + return nil +} + +// CreateStaticHandlers ... +func (vm *VMTest) CreateStaticHandlers() map[string]*HTTPHandler { + if vm.CreateStaticHandlersF != nil { + return vm.CreateStaticHandlersF() + } + if vm.CantCreateStaticHandlers && vm.T != nil { + vm.T.Fatalf("Unexpectedly called CreateStaticHandlers") + } + return nil +} diff --git a/snow/engine/common/vm.go b/snow/engine/common/vm.go new file mode 100644 index 0000000..15991c5 --- /dev/null +++ b/snow/engine/common/vm.go @@ -0,0 +1,75 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/snow" +) + +// VM describes the interface that all consensus VMs must implement +type VM interface { + // Initialize this VM. + // [ctx]: Metadata about this VM. + // [ctx.networkID]: The ID of the network this VM's chain is running + // on. + // [ctx.chainID]: The unique ID of the chain this VM is running on. + // [ctx.Log]: Used to log messages + // [ctx.NodeID]: The unique staker ID of this node. + // [ctx.Lock]: A Read/Write lock shared by this VM and the consensus + // engine that manages this VM. The write lock is held + // whenever code in the consensus engine calls the VM. + // [db]: The database this VM will persist data to. + // [genesisBytes]: The byte-encoding of the genesis information of this + // VM. The VM uses it to initialize its state. For + // example, if this VM were an account-based payments + // system, `genesisBytes` would probably contain a genesis + // transaction that gives coins to some accounts, and this + // transaction would be in the genesis block. + // [toEngine]: The channel used to send messages to the consensus engine. + // [fxs]: Feature extensions that attach to this VM. + Initialize( + ctx *snow.Context, + db database.Database, + genesisBytes []byte, + toEngine chan<- Message, + fxs []*Fx, + ) error + + // Shutdown is called when the node is shutting down. + Shutdown() + + // Creates the HTTP handlers for custom chain network calls. + // + // This exposes handlers that the outside world can use to communicate with + // the chain. Each handler has the path: + // [Address of node]/ext/bc/[chain ID]/[extension] + // + // Returns a mapping from [extension]s to HTTP handlers. + // + // Each extension can specify how locking is managed for convenience. + // + // For example, if this VM implements an account-based payments system, + // it have an extension called `accounts`, where clients could get + // information about their accounts. + CreateHandlers() map[string]*HTTPHandler +} + +// StaticVM describes the functionality that allows a user to interact with a VM +// statically. +type StaticVM interface { + // Creates the HTTP handlers for custom VM network calls. + // + // This exposes handlers that the outside world can use to communicate with + // a static reference to the VM. Each handler has the path: + // [Address of node]/ext/VM/[VM ID]/[extension] + // + // Returns a mapping from [extension]s to HTTP handlers. + // + // Each extension can specify how locking is managed for convenience. + // + // For example, it might make sense to have an extension for creating + // genesis bytes this VM can interpret. + CreateStaticHandlers() map[string]*HTTPHandler +} diff --git a/snow/engine/snowman/block_job.go b/snow/engine/snowman/block_job.go new file mode 100644 index 0000000..aab227f --- /dev/null +++ b/snow/engine/snowman/block_job.go @@ -0,0 +1,62 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common/queue" +) + +type parser struct { + numAccepted, numDropped prometheus.Counter + vm ChainVM +} + +func (p *parser) Parse(blkBytes []byte) (queue.Job, error) { + blk, err := p.vm.ParseBlock(blkBytes) + if err != nil { + return nil, err + } + return &blockJob{ + numAccepted: p.numAccepted, + numDropped: p.numDropped, + blk: blk, + }, nil +} + +type blockJob struct { + numAccepted, numDropped prometheus.Counter + blk snowman.Block +} + +func (b *blockJob) ID() ids.ID { return b.blk.ID() } +func (b *blockJob) MissingDependencies() ids.Set { + missing := ids.Set{} + if parent := b.blk.Parent(); parent.Status() != choices.Accepted { + missing.Add(parent.ID()) + } + return missing +} +func (b *blockJob) Execute() { + if b.MissingDependencies().Len() != 0 { + b.numDropped.Inc() + return + } + switch b.blk.Status() { + case choices.Unknown, choices.Rejected: + b.numDropped.Inc() + case choices.Processing: + if err := b.blk.Verify(); err == nil { + b.blk.Accept() + b.numAccepted.Inc() + } else { + b.numDropped.Inc() + } + } +} +func (b *blockJob) Bytes() []byte { return b.blk.Bytes() } diff --git a/snow/engine/snowman/bootstrapper.go b/snow/engine/snowman/bootstrapper.go new file mode 100644 index 0000000..88724ed --- /dev/null +++ b/snow/engine/snowman/bootstrapper.go @@ -0,0 +1,192 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/engine/common/queue" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/prometheus/client_golang/prometheus" +) + +// BootstrapConfig ... +type BootstrapConfig struct { + common.Config + + // Blocked tracks operations that are blocked on blocks + Blocked *queue.Jobs + + VM ChainVM + + Bootstrapped func() +} + +type bootstrapper struct { + BootstrapConfig + metrics + common.Bootstrapper + + pending ids.Set + finished bool + onFinished func() +} + +// Initialize this engine. +func (b *bootstrapper) Initialize(config BootstrapConfig) { + b.BootstrapConfig = config + + b.Blocked.SetParser(&parser{ + numAccepted: b.numBootstrapped, + numDropped: b.numDropped, + vm: b.VM, + }) + + config.Bootstrapable = b + b.Bootstrapper.Initialize(config.Config) +} + +// CurrentAcceptedFrontier ... +func (b *bootstrapper) CurrentAcceptedFrontier() ids.Set { + acceptedFrontier := ids.Set{} + acceptedFrontier.Add(b.VM.LastAccepted()) + return acceptedFrontier +} + +// FilterAccepted ... +func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set { + acceptedIDs := ids.Set{} + for _, blkID := range containerIDs.List() { + if blk, err := b.VM.GetBlock(blkID); err == nil && blk.Status() == choices.Accepted { + acceptedIDs.Add(blkID) + } + } + return acceptedIDs +} + +// ForceAccepted ... +func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) { + for _, blkID := range acceptedContainerIDs.List() { + b.fetch(blkID) + } + + if numPending := b.pending.Len(); numPending == 0 { + // TODO: This typically indicates bootstrapping has failed, so this + // should be handled appropriately + b.finish() + } +} + +// Put ... +func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) { + b.BootstrapConfig.Context.Log.Verbo("Put called for blkID %s", blkID) + + if !b.pending.Contains(blkID) { + return + } + + blk, err := b.VM.ParseBlock(blkBytes) + if err != nil { + b.BootstrapConfig.Context.Log.Warn("ParseBlock failed due to %s for block:\n%s", + err, + formatting.DumpBytes{Bytes: blkBytes}) + b.GetFailed(vdr, requestID, blkID) + return + } + + b.addBlock(blk) +} + +// GetFailed ... +func (b *bootstrapper) GetFailed(_ ids.ShortID, _ uint32, blkID ids.ID) { b.sendRequest(blkID) } + +func (b *bootstrapper) fetch(blkID ids.ID) { + if b.pending.Contains(blkID) { + return + } + + blk, err := b.VM.GetBlock(blkID) + if err != nil { + b.sendRequest(blkID) + return + } + b.addBlock(blk) +} + +func (b *bootstrapper) sendRequest(blkID ids.ID) { + validators := b.BootstrapConfig.Validators.Sample(1) + if len(validators) == 0 { + b.BootstrapConfig.Context.Log.Error("Dropping request for %s as there are no validators", blkID) + return + } + validatorID := validators[0].ID() + b.RequestID++ + + b.pending.Add(blkID) + b.BootstrapConfig.Sender.Get(validatorID, b.RequestID, blkID) + + b.numPendingRequests.Set(float64(b.pending.Len())) +} + +func (b *bootstrapper) addBlock(blk snowman.Block) { + status := blk.Status() + blkID := blk.ID() + for status == choices.Processing { + b.pending.Remove(blkID) + + if err := b.Blocked.Push(&blockJob{ + numAccepted: b.numBootstrapped, + numDropped: b.numDropped, + blk: blk, + }); err == nil { + b.numBlocked.Inc() + } + + blk = blk.Parent() + status = blk.Status() + blkID = blk.ID() + } + + switch status := blk.Status(); status { + case choices.Unknown: + b.sendRequest(blkID) + case choices.Accepted: + b.BootstrapConfig.Context.Log.Verbo("Bootstrapping confirmed %s", blkID) + case choices.Rejected: + b.BootstrapConfig.Context.Log.Error("Bootstrapping wants to accept %s, however it was previously rejected", blkID) + } + + numPending := b.pending.Len() + b.numPendingRequests.Set(float64(numPending)) + if numPending == 0 { + b.finish() + } +} + +func (b *bootstrapper) finish() { + if b.finished { + return + } + + b.executeAll(b.Blocked, b.numBlocked) + + // Start consensus + b.onFinished() + b.finished = true + + if b.Bootstrapped != nil { + b.Bootstrapped() + } +} + +func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge) { + for job, err := jobs.Pop(); err == nil; job, err = jobs.Pop() { + numBlocked.Dec() + if err := jobs.Execute(job); err != nil { + b.BootstrapConfig.Context.Log.Warn("Error executing: %s", err) + } + } +} diff --git a/snow/engine/snowman/bootstrapper_test.go b/snow/engine/snowman/bootstrapper_test.go new file mode 100644 index 0000000..9cb0968 --- /dev/null +++ b/snow/engine/snowman/bootstrapper_test.go @@ -0,0 +1,427 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "bytes" + "errors" + "fmt" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/engine/common/queue" + "github.com/ava-labs/gecko/snow/networking/handler" + "github.com/ava-labs/gecko/snow/networking/router" + "github.com/ava-labs/gecko/snow/networking/timeout" + "github.com/ava-labs/gecko/snow/validators" +) + +var ( + errUnknownBlock = errors.New("unknown block") +) + +func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest, *VMTest) { + ctx := snow.DefaultContextTest() + + peers := validators.NewSet() + db := memdb.New() + sender := &common.SenderTest{} + vm := &VMTest{} + engine := &Transitive{} + handler := &handler.Handler{} + router := &router.ChainRouter{} + timeouts := &timeout.Manager{} + + sender.T = t + vm.T = t + + sender.Default(true) + vm.Default(true) + + sender.CantGetAcceptedFrontier = false + + peer := validators.GenerateRandomValidator(1) + peerID := peer.ID() + peers.Add(peer) + + handler.Initialize(engine, make(chan common.Message), 1) + timeouts.Initialize(0) + router.Initialize(ctx.Log, timeouts) + + blocker, _ := queue.New(db) + + commonConfig := common.Config{ + Context: ctx, + Validators: peers, + Beacons: peers, + Alpha: peers.Len()/2 + 1, + Sender: sender, + } + return BootstrapConfig{ + Config: commonConfig, + Blocked: blocker, + VM: vm, + }, peerID, sender, vm +} + +func TestBootstrapperSingleFrontier(t *testing.T) { + config, peerID, sender, vm := newConfig(t) + + blkID0 := ids.Empty.Prefix(0) + blkID1 := ids.Empty.Prefix(1) + + blkBytes0 := []byte{0} + blkBytes1 := []byte{1} + + blk0 := &Blk{ + id: blkID0, + height: 0, + status: choices.Accepted, + bytes: blkBytes0, + } + blk1 := &Blk{ + parent: blk0, + id: blkID1, + height: 1, + status: choices.Processing, + bytes: blkBytes1, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add(blkID1) + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blkID1): + return nil, errUnknownBlock + default: + t.Fatal(errUnknownBlock) + panic(errUnknownBlock) + } + } + + reqID := new(uint32) + sender.GetF = func(vdr ids.ShortID, innerReqID uint32, blkID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) + } + switch { + case blkID.Equals(blkID1): + default: + t.Fatalf("Requested unknown vertex") + } + + *reqID = innerReqID + } + + bs.ForceAccepted(acceptedIDs) + + vm.GetBlockF = nil + sender.GetF = nil + + vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + switch { + case bytes.Equal(blkBytes, blkBytes1): + return blk1, nil + } + t.Fatal(errUnknownBlock) + return nil, errUnknownBlock + } + + finished := new(bool) + bs.onFinished = func() { *finished = true } + + bs.Put(peerID, *reqID, blkID1, blkBytes1) + + vm.ParseBlockF = nil + bs.onFinished = nil + + if !*finished { + t.Fatalf("Bootstrapping should have finished") + } + if blk1.Status() != choices.Accepted { + t.Fatalf("Block should be accepted") + } +} + +func TestBootstrapperUnknownByzantineResponse(t *testing.T) { + config, peerID, sender, vm := newConfig(t) + + blkID0 := ids.Empty.Prefix(0) + blkID1 := ids.Empty.Prefix(1) + blkID2 := ids.Empty.Prefix(2) + + blkBytes0 := []byte{0} + blkBytes1 := []byte{1} + blkBytes2 := []byte{2} + + blk0 := &Blk{ + id: blkID0, + height: 0, + status: choices.Accepted, + bytes: blkBytes0, + } + blk1 := &Blk{ + parent: blk0, + id: blkID1, + height: 1, + status: choices.Processing, + bytes: blkBytes1, + } + blk2 := &Blk{ + parent: blk1, + id: blkID2, + height: 2, + status: choices.Processing, + bytes: blkBytes2, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add(blkID1) + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blkID1): + return nil, errUnknownBlock + default: + t.Fatal(errUnknownBlock) + panic(errUnknownBlock) + } + } + + requestID := new(uint32) + sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) + } + switch { + case vtxID.Equals(blkID1): + default: + t.Fatalf("Requested unknown block") + } + + *requestID = reqID + } + + bs.ForceAccepted(acceptedIDs) + + vm.GetBlockF = nil + sender.GetF = nil + + vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + switch { + case bytes.Equal(blkBytes, blkBytes1): + return blk1, nil + } + t.Fatal(errUnknownBlock) + return nil, errUnknownBlock + } + + finished := new(bool) + bs.onFinished = func() { *finished = true } + + bs.Put(peerID, *requestID, blkID2, blkBytes2) + bs.Put(peerID, *requestID, blkID1, blkBytes1) + + vm.ParseBlockF = nil + + if !*finished { + t.Fatalf("Bootstrapping should have finished") + } + if blk1.Status() != choices.Accepted { + t.Fatalf("Block should be accepted") + } + if blk2.Status() != choices.Processing { + t.Fatalf("Block should be processing") + } +} + +func TestBootstrapperDependency(t *testing.T) { + config, peerID, sender, vm := newConfig(t) + + blkID0 := ids.Empty.Prefix(0) + blkID1 := ids.Empty.Prefix(1) + blkID2 := ids.Empty.Prefix(2) + + blkBytes0 := []byte{0} + blkBytes1 := []byte{1} + blkBytes2 := []byte{2} + + blk0 := &Blk{ + id: blkID0, + height: 0, + status: choices.Accepted, + bytes: blkBytes0, + } + blk1 := &Blk{ + parent: blk0, + id: blkID1, + height: 1, + status: choices.Unknown, + bytes: blkBytes1, + } + blk2 := &Blk{ + parent: blk1, + id: blkID2, + height: 2, + status: choices.Processing, + bytes: blkBytes2, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add(blkID2) + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blkID2): + return blk2, nil + default: + t.Fatalf("Requested unknown block") + panic("Requested unknown block") + } + } + + requestID := new(uint32) + sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) + } + switch { + case vtxID.Equals(blkID1): + default: + t.Fatalf("Requested unknown block") + } + + *requestID = reqID + } + + bs.ForceAccepted(acceptedIDs) + + vm.GetBlockF = nil + sender.GetF = nil + + vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + switch { + case bytes.Equal(blkBytes, blkBytes1): + return blk1, nil + case bytes.Equal(blkBytes, blkBytes2): + return blk2, nil + } + t.Fatal(errUnknownBlock) + return nil, errUnknownBlock + } + + blk1.status = choices.Processing + + finished := new(bool) + bs.onFinished = func() { *finished = true } + + bs.Put(peerID, *requestID, blkID1, blkBytes1) + + if !*finished { + t.Fatalf("Bootstrapping should have finished") + } + if blk1.Status() != choices.Accepted { + t.Fatalf("Block should be accepted") + } + if blk2.Status() != choices.Accepted { + t.Fatalf("Block should be accepted") + } +} + +func TestBootstrapperAcceptedFrontier(t *testing.T) { + config, _, _, vm := newConfig(t) + + blkID := GenerateID() + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + vm.LastAcceptedF = func() ids.ID { return blkID } + + accepted := bs.CurrentAcceptedFrontier() + + if accepted.Len() != 1 { + t.Fatalf("Only one block should be accepted") + } + if !accepted.Contains(blkID) { + t.Fatalf("Blk should be accepted") + } +} + +func TestBootstrapperFilterAccepted(t *testing.T) { + config, _, _, vm := newConfig(t) + + blkID0 := GenerateID() + blkID1 := GenerateID() + blkID2 := GenerateID() + + blk0 := &Blk{ + id: blkID0, + status: choices.Accepted, + } + blk1 := &Blk{ + id: blkID1, + status: choices.Accepted, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + blkIDs := ids.Set{} + blkIDs.Add( + blkID0, + blkID1, + blkID2, + ) + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blkID0): + return blk0, nil + case blkID.Equals(blkID1): + return blk1, nil + case blkID.Equals(blkID2): + return nil, errUnknownBlock + } + t.Fatal(errUnknownBlock) + return nil, errUnknownBlock + } + + accepted := bs.FilterAccepted(blkIDs) + + if accepted.Len() != 2 { + t.Fatalf("Two blocks should be accepted") + } + if !accepted.Contains(blkID0) { + t.Fatalf("Blk should be accepted") + } + if !accepted.Contains(blkID1) { + t.Fatalf("Blk should be accepted") + } + if accepted.Contains(blkID2) { + t.Fatalf("Blk shouldn't be accepted") + } +} diff --git a/snow/engine/snowman/config.go b/snow/engine/snowman/config.go new file mode 100644 index 0000000..a2c940b --- /dev/null +++ b/snow/engine/snowman/config.go @@ -0,0 +1,17 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/consensus/snowman" +) + +// Config wraps all the parameters needed for a snowman engine +type Config struct { + BootstrapConfig + + Params snowball.Parameters + Consensus snowman.Consensus +} diff --git a/snow/engine/snowman/config_test.go b/snow/engine/snowman/config_test.go new file mode 100644 index 0000000..1b590b7 --- /dev/null +++ b/snow/engine/snowman/config_test.go @@ -0,0 +1,33 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/engine/common/queue" +) + +func DefaultConfig() Config { + blocked, _ := queue.New(memdb.New()) + return Config{ + BootstrapConfig: BootstrapConfig{ + Config: common.DefaultConfigTest(), + Blocked: blocked, + VM: &VMTest{}, + }, + Params: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Consensus: &snowman.Topological{}, + } +} diff --git a/snow/engine/snowman/convincer.go b/snow/engine/snowman/convincer.go new file mode 100644 index 0000000..5eef1e9 --- /dev/null +++ b/snow/engine/snowman/convincer.go @@ -0,0 +1,39 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" +) + +type convincer struct { + consensus snowman.Consensus + sender common.Sender + vdr ids.ShortID + requestID uint32 + abandoned bool + deps ids.Set +} + +func (c *convincer) Dependencies() ids.Set { return c.deps } + +func (c *convincer) Fulfill(id ids.ID) { + c.deps.Remove(id) + c.Update() +} + +func (c *convincer) Abandon(ids.ID) { c.abandoned = true } + +func (c *convincer) Update() { + if c.abandoned || c.deps.Len() != 0 { + return + } + + pref := c.consensus.Preference() + prefSet := ids.Set{} + prefSet.Add(pref) + c.sender.Chits(c.vdr, c.requestID, prefSet) +} diff --git a/snow/engine/snowman/engine.go b/snow/engine/snowman/engine.go new file mode 100644 index 0000000..9e9f042 --- /dev/null +++ b/snow/engine/snowman/engine.go @@ -0,0 +1,23 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/snow/engine/common" +) + +// Engine describes the events that can occur to a Snowman instance. +// +// The engine is used to fetch, order, and decide on the fate of blocks. This +// engine runs the leaderless version of the Snowman consensus protocol. +// Therefore, the liveness of this protocol tolerant to O(sqrt(n)) Byzantine +// Nodes where n is the number of nodes in the network. Therefore, this protocol +// should only be run in a Crash Fault Tolerant environment, or in an +// environment where lose of liveness and manual intervention is tolerable. +type Engine interface { + common.Engine + + // Initialize this engine. + Initialize(Config) +} diff --git a/snow/engine/snowman/engine_test.go b/snow/engine/snowman/engine_test.go new file mode 100644 index 0000000..e149970 --- /dev/null +++ b/snow/engine/snowman/engine_test.go @@ -0,0 +1,75 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "sort" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" +) + +var ( + Genesis = GenerateID() + offset = uint64(0) +) + +func GenerateID() ids.ID { + offset++ + return ids.Empty.Prefix(offset) +} + +type Blk struct { + parent snowman.Block + id ids.ID + + height int + status choices.Status + + bytes []byte +} + +func (b *Blk) ID() ids.ID { return b.id } +func (b *Blk) Parent() snowman.Block { return b.parent } +func (b *Blk) Accept() { b.status = choices.Accepted } +func (b *Blk) Reject() { b.status = choices.Rejected } +func (b *Blk) Status() choices.Status { return b.status } +func (b *Blk) Verify() error { return nil } +func (b *Blk) Bytes() []byte { return b.bytes } + +type sortBks []*Blk + +func (sb sortBks) Less(i, j int) bool { return sb[i].height < sb[j].height } +func (sb sortBks) Len() int { return len(sb) } +func (sb sortBks) Swap(i, j int) { sb[j], sb[i] = sb[i], sb[j] } + +func SortBks(bks []*Blk) { sort.Sort(sortBks(bks)) } + +func Matches(a, b []ids.ID) bool { + if len(a) != len(b) { + return false + } + set := ids.Set{} + set.Add(a...) + for _, id := range b { + if !set.Contains(id) { + return false + } + } + return true +} +func MatchesShort(a, b []ids.ShortID) bool { + if len(a) != len(b) { + return false + } + set := ids.ShortSet{} + set.Add(a...) + for _, id := range b { + if !set.Contains(id) { + return false + } + } + return true +} diff --git a/snow/engine/snowman/issuer.go b/snow/engine/snowman/issuer.go new file mode 100644 index 0000000..5e66fd0 --- /dev/null +++ b/snow/engine/snowman/issuer.go @@ -0,0 +1,44 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/snowman" +) + +type issuer struct { + t *Transitive + blk snowman.Block + abandoned bool + deps ids.Set +} + +func (i *issuer) Dependencies() ids.Set { return i.deps } + +func (i *issuer) Fulfill(id ids.ID) { + i.deps.Remove(id) + i.Update() +} + +func (i *issuer) Abandon(ids.ID) { + if !i.abandoned { + blkID := i.blk.ID() + i.t.pending.Remove(blkID) + i.t.blocked.Abandon(blkID) + + // Tracks performance statistics + i.t.numBlkRequests.Set(float64(i.t.blkReqs.Len())) + i.t.numBlockedBlk.Set(float64(i.t.pending.Len())) + } + i.abandoned = true +} + +func (i *issuer) Update() { + if i.abandoned || i.deps.Len() != 0 { + return + } + + i.t.deliver(i.blk) +} diff --git a/snow/engine/snowman/metrics.go b/snow/engine/snowman/metrics.go new file mode 100644 index 0000000..f17d360 --- /dev/null +++ b/snow/engine/snowman/metrics.go @@ -0,0 +1,85 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/utils/logging" +) + +type metrics struct { + numPendingRequests, numBlocked prometheus.Gauge + numBootstrapped, numDropped prometheus.Counter + + numPolls, numBlkRequests, numBlockedBlk prometheus.Gauge +} + +// Initialize implements the Engine interface +func (m *metrics) Initialize(log logging.Logger, namespace string, registerer prometheus.Registerer) { + m.numPendingRequests = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "sm_bs_requests", + Help: "Number of pending bootstrap requests", + }) + m.numBlocked = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "sm_bs_blocked", + Help: "Number of blocked bootstrap blocks", + }) + m.numBootstrapped = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sm_bs_accepted", + Help: "Number of accepted bootstrap blocks", + }) + m.numDropped = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "sm_bs_dropped", + Help: "Number of dropped bootstrap blocks", + }) + m.numPolls = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "sm_polls", + Help: "Number of pending network polls", + }) + m.numBlkRequests = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "sm_blk_requests", + Help: "Number of pending vertex requests", + }) + m.numBlockedBlk = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "sm_blocked_blks", + Help: "Number of blocked vertices", + }) + + if err := registerer.Register(m.numPendingRequests); err != nil { + log.Error("Failed to register sm_bs_requests statistics due to %s", err) + } + if err := registerer.Register(m.numBlocked); err != nil { + log.Error("Failed to register sm_bs_blocked statistics due to %s", err) + } + if err := registerer.Register(m.numBootstrapped); err != nil { + log.Error("Failed to register sm_bs_accepted statistics due to %s", err) + } + if err := registerer.Register(m.numDropped); err != nil { + log.Error("Failed to register sm_bs_dropped statistics due to %s", err) + } + if err := registerer.Register(m.numPolls); err != nil { + log.Error("Failed to register sm_polls statistics due to %s", err) + } + if err := registerer.Register(m.numBlkRequests); err != nil { + log.Error("Failed to register sm_blk_requests statistics due to %s", err) + } + if err := registerer.Register(m.numBlockedBlk); err != nil { + log.Error("Failed to register sm_blocked_blks statistics due to %s", err) + } +} diff --git a/snow/engine/snowman/oracle_block.go b/snow/engine/snowman/oracle_block.go new file mode 100644 index 0000000..996c2bc --- /dev/null +++ b/snow/engine/snowman/oracle_block.go @@ -0,0 +1,21 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/snow/consensus/snowman" +) + +// OracleBlock is a block that only has two valid children. The children should +// be returned in preferential order. +// +// This ordering does not need to be deterministically created from the chain +// state. +type OracleBlock interface { + snowman.Block + + // Options returns the possible children of this block in the order this + // validator prefers the blocks. + Options() [2]snowman.Block +} diff --git a/snow/engine/snowman/polls.go b/snow/engine/snowman/polls.go new file mode 100644 index 0000000..6e666dc --- /dev/null +++ b/snow/engine/snowman/polls.go @@ -0,0 +1,118 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "fmt" + "strings" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" + "github.com/prometheus/client_golang/prometheus" +) + +type polls struct { + log logging.Logger + numPolls prometheus.Gauge + alpha int + m map[uint32]poll +} + +// Add to the current set of polls +// Returns true if the poll was registered correctly and the network sample +// should be made. +func (p *polls) Add(requestID uint32, numPolled int) bool { + poll, exists := p.m[requestID] + if !exists { + poll.alpha = p.alpha + poll.numPolled = numPolled + p.m[requestID] = poll + + p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics + } + return !exists +} + +// Vote registers the connections response to a query for [id]. If there was no +// query, or the response has already be registered, nothing is performed. +func (p *polls) Vote(requestID uint32, vdr ids.ShortID, vote ids.ID) (ids.Bag, bool) { + p.log.Verbo("[polls.Vote] Vote: requestID: %d. validatorID: %s. Vote: %s", requestID, vdr, vote) + poll, exists := p.m[requestID] + if !exists { + return ids.Bag{}, false + } + poll.Vote(vote) + if poll.Finished() { + delete(p.m, requestID) + p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics + return poll.votes, true + } + p.m[requestID] = poll + return ids.Bag{}, false +} + +// CancelVote registers the connections failure to respond to a query for [id]. +func (p *polls) CancelVote(requestID uint32, vdr ids.ShortID) (ids.Bag, bool) { + p.log.Verbo("CancelVote received. requestID: %d. validatorID: %s. Vote: %s", requestID, vdr) + poll, exists := p.m[requestID] + if !exists { + return ids.Bag{}, false + } + + poll.CancelVote() + if poll.Finished() { + delete(p.m, requestID) + p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics + return poll.votes, true + } + p.m[requestID] = poll + return ids.Bag{}, false +} + +func (p *polls) String() string { + sb := strings.Builder{} + + sb.WriteString(fmt.Sprintf("Current polls: (Size = %d)", len(p.m))) + for requestID, poll := range p.m { + sb.WriteString(fmt.Sprintf("\n %d: %s", requestID, poll)) + } + + return sb.String() +} + +// poll represents the current state of a network poll for a block +type poll struct { + alpha int + votes ids.Bag + numPolled int +} + +// Vote registers a vote for this poll +func (p *poll) CancelVote() { + if p.numPolled > 0 { + p.numPolled-- + } +} + +// Vote registers a vote for this poll +func (p *poll) Vote(vote ids.ID) { + if p.numPolled > 0 { + p.numPolled-- + p.votes.Add(vote) + } +} + +// Finished returns true if the poll has completed, with no more required +// responses +func (p poll) Finished() bool { + received := p.votes.Len() + _, freq := p.votes.Mode() + return p.numPolled == 0 || // All k nodes responded + freq >= p.alpha || // An alpha majority has returned + received+p.numPolled < p.alpha // An alpha majority can never return +} + +func (p poll) String() string { + return fmt.Sprintf("Waiting on %d chits", p.numPolled) +} diff --git a/snow/engine/snowman/test_vm.go b/snow/engine/snowman/test_vm.go new file mode 100644 index 0000000..d2f0e2d --- /dev/null +++ b/snow/engine/snowman/test_vm.go @@ -0,0 +1,99 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" +) + +var ( + errBuildBlock = errors.New("unexpectedly called BuildBlock") + errParseBlock = errors.New("unexpectedly called ParseBlock") + errGetBlock = errors.New("unexpectedly called GetBlock") +) + +// VMTest ... +type VMTest struct { + common.VMTest + + CantBuildBlock, + CantParseBlock, + CantGetBlock, + CantSetPreference, + CantLastAccepted bool + + BuildBlockF func() (snowman.Block, error) + ParseBlockF func([]byte) (snowman.Block, error) + GetBlockF func(ids.ID) (snowman.Block, error) + SetPreferenceF func(ids.ID) + LastAcceptedF func() ids.ID +} + +// Default ... +func (vm *VMTest) Default(cant bool) { + vm.VMTest.Default(cant) + + vm.CantBuildBlock = cant + vm.CantParseBlock = cant + vm.CantGetBlock = cant + vm.CantSetPreference = cant + vm.CantLastAccepted = cant +} + +// BuildBlock ... +func (vm *VMTest) BuildBlock() (snowman.Block, error) { + if vm.BuildBlockF != nil { + return vm.BuildBlockF() + } + if vm.CantBuildBlock && vm.T != nil { + vm.T.Fatal(errBuildBlock) + } + return nil, errBuildBlock +} + +// ParseBlock ... +func (vm *VMTest) ParseBlock(b []byte) (snowman.Block, error) { + if vm.ParseBlockF != nil { + return vm.ParseBlockF(b) + } + if vm.CantParseBlock && vm.T != nil { + vm.T.Fatal(errParseBlock) + } + return nil, errParseBlock +} + +// GetBlock ... +func (vm *VMTest) GetBlock(id ids.ID) (snowman.Block, error) { + if vm.GetBlockF != nil { + return vm.GetBlockF(id) + } + if vm.CantGetBlock && vm.T != nil { + vm.T.Fatal(errGetBlock) + } + return nil, errGetBlock +} + +// SetPreference ... +func (vm *VMTest) SetPreference(id ids.ID) { + if vm.SetPreferenceF != nil { + vm.SetPreferenceF(id) + } else if vm.CantSetPreference && vm.T != nil { + vm.T.Fatalf("Unexpectedly called SetPreference") + } +} + +// LastAccepted ... +func (vm *VMTest) LastAccepted() ids.ID { + if vm.LastAcceptedF != nil { + return vm.LastAcceptedF() + } + if vm.CantLastAccepted && vm.T != nil { + vm.T.Fatalf("Unexpectedly called LastAccepted") + } + return ids.ID{} +} diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go new file mode 100644 index 0000000..e023a7d --- /dev/null +++ b/snow/engine/snowman/transitive.go @@ -0,0 +1,379 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/events" + "github.com/ava-labs/gecko/utils/formatting" +) + +// Transitive implements the Engine interface by attempting to fetch all +// transitive dependencies. +type Transitive struct { + Config + bootstrapper + + polls polls // track people I have asked for their preference + + blkReqs, pending ids.Set // prevent asking validators for the same block + + blocked events.Blocker // track operations that are blocked on blocks + + bootstrapped bool +} + +// Initialize implements the Engine interface +func (t *Transitive) Initialize(config Config) { + config.Context.Log.Info("Initializing Snowman consensus") + + t.Config = config + t.metrics.Initialize(config.Context.Log, config.Params.Namespace, config.Params.Metrics) + + t.onFinished = t.finishBootstrapping + t.bootstrapper.Initialize(config.BootstrapConfig) + + t.polls.log = config.Context.Log + t.polls.numPolls = t.numPolls + t.polls.alpha = t.Params.Alpha + t.polls.m = make(map[uint32]poll) +} + +func (t *Transitive) finishBootstrapping() { + tail := t.Config.VM.LastAccepted() + t.Config.VM.SetPreference(tail) + t.Consensus.Initialize(t.Config.Context, t.Params, tail) + t.bootstrapped = true +} + +// Shutdown implements the Engine interface +func (t *Transitive) Shutdown() { + t.Config.Context.Log.Info("Shutting down Snowman consensus") + t.Config.VM.Shutdown() +} + +// Context implements the Engine interface +func (t *Transitive) Context() *snow.Context { return t.Config.Context } + +// Get implements the Engine interface +func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, blkID ids.ID) { + if blk, err := t.Config.VM.GetBlock(blkID); err == nil { + t.Config.Sender.Put(vdr, requestID, blkID, blk.Bytes()) + } +} + +// Put implements the Engine interface +func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) { + t.Config.Context.Log.Verbo("Put called for blockID %s", blkID) + + if !t.bootstrapped { + t.bootstrapper.Put(vdr, requestID, blkID, blkBytes) + return + } + + blk, err := t.Config.VM.ParseBlock(blkBytes) + if err != nil { + t.Config.Context.Log.Warn("ParseBlock failed due to %s for block:\n%s", + err, + formatting.DumpBytes{Bytes: blkBytes}) + t.GetFailed(vdr, requestID, blkID) + return + } + + t.insertFrom(vdr, blk) +} + +// GetFailed implements the Engine interface +func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32, blkID ids.ID) { + if !t.bootstrapped { + t.bootstrapper.GetFailed(vdr, requestID, blkID) + return + } + + t.pending.Remove(blkID) + t.blocked.Abandon(blkID) + t.blkReqs.Remove(blkID) + + // Tracks performance statistics + t.numBlockedBlk.Set(float64(t.pending.Len())) +} + +// PullQuery implements the Engine interface +func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID) { + if !t.bootstrapped { + t.Config.Context.Log.Debug("Dropping PullQuery for %s due to bootstrapping", blkID) + return + } + + c := &convincer{ + consensus: t.Consensus, + sender: t.Config.Sender, + vdr: vdr, + requestID: requestID, + } + + if !t.reinsertFrom(vdr, blkID) { + c.deps.Add(blkID) + } + + t.blocked.Register(c) +} + +// PushQuery implements the Engine interface +func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID, blk []byte) { + if !t.bootstrapped { + t.Config.Context.Log.Debug("Dropping PushQuery for %s due to bootstrapping", blkID) + return + } + + t.Put(vdr, requestID, blkID, blk) + t.PullQuery(vdr, requestID, blkID) +} + +// Chits implements the Engine interface +func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) { + if !t.bootstrapped { + t.Config.Context.Log.Warn("Dropping Chits due to bootstrapping") + return + } + + // Since this is snowman, there should only be one ID in the vote set + if votes.Len() != 1 { + t.Config.Context.Log.Warn("Chits was called with the wrong number of votes %d. ValidatorID: %s, RequestID: %d", votes.Len(), vdr, requestID) + t.QueryFailed(vdr, requestID) + return + } + vote := votes.List()[0] + + t.Config.Context.Log.Verbo("Chit was called. RequestID: %v. Vote: %s", requestID, vote) + + v := &voter{ + t: t, + vdr: vdr, + requestID: requestID, + response: vote, + } + + if !t.reinsertFrom(vdr, vote) { + v.deps.Add(vote) + } + + t.blocked.Register(v) +} + +// QueryFailed implements the Engine interface +func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) { + if !t.bootstrapped { + t.Config.Context.Log.Warn("Dropping QueryFailed due to bootstrapping") + return + } + + t.blocked.Register(&voter{ + t: t, + vdr: vdr, + requestID: requestID, + }) +} + +// Notify implements the Engine interface +func (t *Transitive) Notify(msg common.Message) { + if !t.bootstrapped { + t.Config.Context.Log.Warn("Dropping Notify due to bootstrapping") + return + } + + t.Config.Context.Log.Verbo("Snowman engine notified of %s from the vm", msg) + switch msg { + case common.PendingTxs: + if blk, err := t.Config.VM.BuildBlock(); err == nil { + if status := blk.Status(); status != choices.Processing { + t.Config.Context.Log.Warn("Attempting to issue a block with status: %s, expected Processing", status) + } + parentID := blk.Parent().ID() + if pref := t.Consensus.Preference(); !parentID.Equals(pref) { + t.Config.Context.Log.Warn("Built block with parent: %s, expected %s", parentID, pref) + } + if t.insertAll(blk) { + t.Config.Context.Log.Verbo("Successfully issued new block from the VM") + } else { + t.Config.Context.Log.Warn("VM.BuildBlock returned a block that is pending for ancestors") + } + } else { + t.Config.Context.Log.Verbo("VM.BuildBlock errored with %s", err) + } + default: + t.Config.Context.Log.Warn("Unexpected message from the VM: %s", msg) + } +} + +func (t *Transitive) repoll() { + prefID := t.Consensus.Preference() + t.pullSample(prefID) +} + +func (t *Transitive) reinsertFrom(vdr ids.ShortID, blkID ids.ID) bool { + blk, err := t.Config.VM.GetBlock(blkID) + if err != nil { + t.sendRequest(vdr, blkID) + return false + } + return t.insertFrom(vdr, blk) +} + +func (t *Transitive) insertFrom(vdr ids.ShortID, blk snowman.Block) bool { + blkID := blk.ID() + for !t.Consensus.Issued(blk) && !t.pending.Contains(blkID) { + t.insert(blk) + + parent := blk.Parent() + parentID := parent.ID() + if parentStatus := parent.Status(); !parentStatus.Fetched() { + t.sendRequest(vdr, parentID) + return false + } + + blk = parent + blkID = parentID + } + return !t.pending.Contains(blkID) +} + +func (t *Transitive) insertAll(blk snowman.Block) bool { + blkID := blk.ID() + for blk.Status().Fetched() && !t.Consensus.Issued(blk) && !t.pending.Contains(blkID) { + t.insert(blk) + blk = blk.Parent() + } + return !t.pending.Contains(blkID) +} + +func (t *Transitive) insert(blk snowman.Block) { + blkID := blk.ID() + + t.pending.Add(blkID) + t.blkReqs.Remove(blkID) + + i := &issuer{ + t: t, + blk: blk, + } + + if parent := blk.Parent(); !t.Consensus.Issued(parent) { + parentID := parent.ID() + t.Config.Context.Log.Verbo("Block waiting for parent %s", parentID) + i.deps.Add(parentID) + } + + t.blocked.Register(i) + + // Tracks performance statistics + t.numBlkRequests.Set(float64(t.blkReqs.Len())) + t.numBlockedBlk.Set(float64(t.pending.Len())) +} + +func (t *Transitive) sendRequest(vdr ids.ShortID, blkID ids.ID) { + if !t.blkReqs.Contains(blkID) { + t.blkReqs.Add(blkID) + + t.numBlkRequests.Set(float64(t.blkReqs.Len())) // Tracks performance statistics + + t.RequestID++ + t.Config.Context.Log.Verbo("Sending Get message for %s", blkID) + t.Config.Sender.Get(vdr, t.RequestID, blkID) + } +} + +func (t *Transitive) pullSample(blkID ids.ID) { + t.Config.Context.Log.Verbo("About to sample from: %s", t.Config.Validators) + p := t.Consensus.Parameters() + vdrs := t.Config.Validators.Sample(p.K) + vdrSet := ids.ShortSet{} + for _, vdr := range vdrs { + vdrSet.Add(vdr.ID()) + } + + t.RequestID++ + if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) { + t.Config.Sender.PullQuery(vdrSet, t.RequestID, blkID) + } else if numVdrs < p.K { + t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blkID) + } +} + +func (t *Transitive) pushSample(blk snowman.Block) { + t.Config.Context.Log.Verbo("About to sample from: %s", t.Config.Validators) + p := t.Consensus.Parameters() + vdrs := t.Config.Validators.Sample(p.K) + vdrSet := ids.ShortSet{} + for _, vdr := range vdrs { + vdrSet.Add(vdr.ID()) + } + + t.RequestID++ + if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) { + t.Config.Sender.PushQuery(vdrSet, t.RequestID, blk.ID(), blk.Bytes()) + } else if numVdrs < p.K { + t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blk.ID()) + } +} + +func (t *Transitive) deliver(blk snowman.Block) { + if t.Consensus.Issued(blk) { + return + } + + blkID := blk.ID() + t.pending.Remove(blkID) + + if err := blk.Verify(); err != nil { + t.Config.Context.Log.Debug("Block failed verification due to %s, dropping block", err) + t.blocked.Abandon(blkID) + t.numBlockedBlk.Set(float64(t.pending.Len())) // Tracks performance statistics + return + } + + t.Config.Context.Log.Verbo("Adding block to consensus: %s", blkID) + + t.Consensus.Add(blk) + t.pushSample(blk) + + added := []snowman.Block{} + dropped := []snowman.Block{} + switch blk := blk.(type) { + case OracleBlock: + for _, blk := range blk.Options() { + if err := blk.Verify(); err != nil { + t.Config.Context.Log.Debug("Block failed verification due to %s, dropping block", err) + t.blocked.Abandon(blk.ID()) + dropped = append(dropped, blk) + } else { + t.Consensus.Add(blk) + t.pushSample(blk) + added = append(added, blk) + } + } + } + + t.Config.VM.SetPreference(t.Consensus.Preference()) + t.blocked.Fulfill(blkID) + + for _, blk := range added { + blkID := blk.ID() + t.pending.Remove(blkID) + t.blocked.Fulfill(blkID) + } + for _, blk := range dropped { + blkID := blk.ID() + t.pending.Remove(blkID) + t.blocked.Abandon(blkID) + } + + // Tracks performance statistics + t.numBlkRequests.Set(float64(t.blkReqs.Len())) + t.numBlockedBlk.Set(float64(t.pending.Len())) +} diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go new file mode 100644 index 0000000..1920d8c --- /dev/null +++ b/snow/engine/snowman/transitive_test.go @@ -0,0 +1,1078 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "bytes" + "errors" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/validators" +) + +var ( + errUnknownBytes = errors.New("unknown bytes") +) + +func setup(t *testing.T) (validators.Validator, validators.Set, *common.SenderTest, *VMTest, *Transitive, snowman.Block) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + + vm := &VMTest{} + vm.T = t + config.VM = vm + + vm.Default(true) + vm.CantSetPreference = false + + gBlk := &Blk{ + id: GenerateID(), + status: choices.Accepted, + } + + vm.LastAcceptedF = func() ids.ID { return gBlk.ID() } + sender.CantGetAcceptedFrontier = false + + te := &Transitive{} + + te.Initialize(config) + te.finishBootstrapping() + + vm.LastAcceptedF = nil + sender.CantGetAcceptedFrontier = true + + return vdr, vals, sender, vm, te, gBlk +} + +func TestEngineAdd(t *testing.T) { + vdr, _, sender, vm, te, _ := setup(t) + + if !te.Context().ChainID.Equals(ids.Empty) { + t.Fatalf("Wrong chain ID") + } + + blk := &Blk{ + parent: &Blk{ + id: GenerateID(), + status: choices.Unknown, + }, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + asked := new(bool) + sender.GetF = func(inVdr ids.ShortID, _ uint32, blkID ids.ID) { + if *asked { + t.Fatalf("Asked multiple times") + } + *asked = true + if !vdr.ID().Equals(inVdr) { + t.Fatalf("Asking wrong validator for block") + } + if !blkID.Equals(blk.Parent().ID()) { + t.Fatalf("Asking for wrong block") + } + } + + vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + if !bytes.Equal(b, blk.Bytes()) { + t.Fatalf("Wrong bytes") + } + return blk, nil + } + + te.Put(vdr.ID(), 0, blk.ID(), blk.Bytes()) + + vm.ParseBlockF = nil + + if !*asked { + t.Fatalf("Didn't ask for a missing block") + } + + if len(te.blocked) != 1 { + t.Fatalf("Should have been blocking on request") + } + + vm.ParseBlockF = func(b []byte) (snowman.Block, error) { return nil, errParseBlock } + + te.Put(vdr.ID(), 0, blk.Parent().ID(), nil) + + vm.ParseBlockF = nil + + if len(te.blocked) != 0 { + t.Fatalf("Should have finished blocking issue") + } +} + +func TestEngineQuery(t *testing.T) { + vdr, _, sender, vm, te, gBlk := setup(t) + + blk := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + blocked := new(bool) + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + if *blocked { + t.Fatalf("Sent multiple requests") + } + *blocked = true + if !blkID.Equals(blk.ID()) { + t.Fatalf("Wrong block requested") + } + return &Blk{id: blkID, status: choices.Unknown}, errUnknownBlock + } + + asked := new(bool) + getRequestID := new(uint32) + sender.GetF = func(inVdr ids.ShortID, requestID uint32, blkID ids.ID) { + if *asked { + t.Fatalf("Asked multiple times") + } + *asked = true + *getRequestID = requestID + if !vdr.ID().Equals(inVdr) { + t.Fatalf("Asking wrong validator for block") + } + if !blk.ID().Equals(blkID) { + t.Fatalf("Asking for wrong block") + } + } + + te.PullQuery(vdr.ID(), 15, blk.ID()) + if !*blocked { + t.Fatalf("Didn't request block") + } + if !*asked { + t.Fatalf("Didn't request block from validator") + } + + queried := new(bool) + queryRequestID := new(uint32) + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { + if *queried { + t.Fatalf("Asked multiple times") + } + *queried = true + *queryRequestID = requestID + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !blk.ID().Equals(blkID) { + t.Fatalf("Asking for wrong block") + } + } + + chitted := new(bool) + sender.ChitsF = func(inVdr ids.ShortID, requestID uint32, prefSet ids.Set) { + if *chitted { + t.Fatalf("Sent multiple chits") + } + *chitted = true + if requestID != 15 { + t.Fatalf("Wrong request ID") + } + if prefSet.Len() != 1 { + t.Fatal("Should only be one vote") + } + if !blk.ID().Equals(prefSet.List()[0]) { + t.Fatalf("Wrong chits block") + } + } + + vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + if !bytes.Equal(b, blk.Bytes()) { + t.Fatalf("Wrong bytes") + } + return blk, nil + } + te.Put(vdr.ID(), *getRequestID, blk.ID(), blk.Bytes()) + vm.ParseBlockF = nil + + if !*queried { + t.Fatalf("Didn't ask for preferences") + } + if !*chitted { + t.Fatalf("Didn't provide preferences") + } + + blk1 := &Blk{ + parent: blk, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{5, 4, 3, 2, 1, 9}, + } + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blk.ID()): + return blk, nil + case blkID.Equals(blk1.ID()): + return &Blk{id: blkID, status: choices.Unknown}, errUnknownBlock + } + t.Fatalf("Wrong block requested") + panic("Should have failed") + } + + *asked = false + sender.GetF = func(inVdr ids.ShortID, requestID uint32, blkID ids.ID) { + if *asked { + t.Fatalf("Asked multiple times") + } + *asked = true + *getRequestID = requestID + if !vdr.ID().Equals(inVdr) { + t.Fatalf("Asking wrong validator for block") + } + if !blk1.ID().Equals(blkID) { + t.Fatalf("Asking for wrong block") + } + } + blkSet := ids.Set{} + blkSet.Add(blk1.ID()) + te.Chits(vdr.ID(), *queryRequestID, blkSet) + + *queried = false + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { + if *queried { + t.Fatalf("Asked multiple times") + } + *queried = true + *queryRequestID = requestID + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !blkID.Equals(blk1.ID()) { + t.Fatalf("Asking for wrong block") + } + } + + vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + if !bytes.Equal(b, blk1.Bytes()) { + t.Fatalf("Wrong bytes") + } + return blk1, nil + } + te.Put(vdr.ID(), *getRequestID, blk1.ID(), blk1.Bytes()) + vm.ParseBlockF = nil + + if blk1.Status() != choices.Accepted { + t.Fatalf("Should have executed block") + } + if len(te.blocked) != 0 { + t.Fatalf("Should have finished blocking") + } + + _ = te.polls.String() // Shouldn't panic + + te.QueryFailed(vdr.ID(), *queryRequestID) + if len(te.blocked) != 0 { + t.Fatalf("Should have finished blocking") + } +} + +func TestEngineMultipleQuery(t *testing.T) { + config := DefaultConfig() + + config.Params = snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + } + + vdr0 := validators.GenerateRandomValidator(1) + vdr1 := validators.GenerateRandomValidator(1) + vdr2 := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr0) + vals.Add(vdr1) + vals.Add(vdr2) + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + + vm := &VMTest{} + vm.T = t + config.VM = vm + + vm.Default(true) + vm.CantSetPreference = false + + gBlk := &Blk{ + id: GenerateID(), + status: choices.Accepted, + } + + vm.LastAcceptedF = func() ids.ID { return gBlk.ID() } + sender.CantGetAcceptedFrontier = false + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + vm.LastAcceptedF = nil + sender.CantGetAcceptedFrontier = true + + blk0 := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + queried := new(bool) + queryRequestID := new(uint32) + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { + if *queried { + t.Fatalf("Asked multiple times") + } + *queried = true + *queryRequestID = requestID + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !blk0.ID().Equals(blkID) { + t.Fatalf("Asking for wrong block") + } + } + + te.insert(blk0) + + blk1 := &Blk{ + parent: blk0, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + switch { + case id.Equals(gBlk.ID()): + return gBlk, nil + case id.Equals(blk0.ID()): + return blk0, nil + case id.Equals(blk1.ID()): + return &Blk{id: blk0.ID(), status: choices.Unknown}, errUnknownBlock + } + t.Fatalf("Unknown block") + panic("Should have errored") + } + + asked := new(bool) + getRequestID := new(uint32) + sender.GetF = func(inVdr ids.ShortID, requestID uint32, blkID ids.ID) { + if *asked { + t.Fatalf("Asked multiple times") + } + *asked = true + *getRequestID = requestID + if !vdr0.ID().Equals(inVdr) { + t.Fatalf("Asking wrong validator for block") + } + if !blk1.ID().Equals(blkID) { + t.Fatalf("Asking for wrong block") + } + } + blkSet := ids.Set{} + blkSet.Add(blk1.ID()) + te.Chits(vdr0.ID(), *queryRequestID, blkSet) + te.Chits(vdr1.ID(), *queryRequestID, blkSet) + + vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + return blk1, nil + } + + *queried = false + secondQueryRequestID := new(uint32) + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { + if *queried { + t.Fatalf("Asked multiple times") + } + *queried = true + *secondQueryRequestID = requestID + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !blk1.ID().Equals(blkID) { + t.Fatalf("Asking for wrong block") + } + } + te.Put(vdr0.ID(), *getRequestID, blk1.ID(), blk1.Bytes()) + + // Should be dropped because the query was already filled + blkSet = ids.Set{} + blkSet.Add(blk0.ID()) + te.Chits(vdr2.ID(), *queryRequestID, blkSet) + + if blk1.Status() != choices.Accepted { + t.Fatalf("Should have executed block") + } + if len(te.blocked) != 0 { + t.Fatalf("Should have finished blocking") + } +} + +func TestEngineBlockedIssue(t *testing.T) { + _, _, sender, _, te, gBlk := setup(t) + + sender.Default(false) + + blk0 := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Unknown, + bytes: []byte{1}, + } + + blk1 := &Blk{ + parent: blk0, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + te.insert(blk1) + + blk0.status = choices.Processing + te.insert(blk0) + + if !blk1.ID().Equals(te.Consensus.Preference()) { + t.Fatalf("Should have issued blk1") + } +} + +func TestEngineAbandonResponse(t *testing.T) { + vdr, _, sender, _, te, gBlk := setup(t) + + sender.Default(false) + + blk := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Unknown, + bytes: []byte{1}, + } + + te.insert(blk) + te.QueryFailed(vdr.ID(), 1) + + if len(te.blocked) != 0 { + t.Fatalf("Should have removed blocking event") + } +} + +func TestEngineFetchBlock(t *testing.T) { + vdr, _, sender, vm, te, gBlk := setup(t) + + sender.Default(false) + + vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + if id.Equals(gBlk.ID()) { + return gBlk, nil + } + t.Fatalf("Unknown block") + panic("Should have failed") + } + + added := new(bool) + sender.PutF = func(inVdr ids.ShortID, requestID uint32, blkID ids.ID, blk []byte) { + if !vdr.ID().Equals(inVdr) { + t.Fatalf("Wrong validator") + } + if requestID != 123 { + t.Fatalf("Wrong request id") + } + if !gBlk.ID().Equals(blkID) { + t.Fatalf("Wrong blockID") + } + *added = true + } + + te.Get(vdr.ID(), 123, gBlk.ID()) + + if !*added { + t.Fatalf("Should have sent block to peer") + } +} + +func TestEnginePushQuery(t *testing.T) { + vdr, _, sender, vm, te, gBlk := setup(t) + + sender.Default(true) + + blk := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + if bytes.Equal(b, blk.Bytes()) { + return blk, nil + } + return nil, errUnknownBytes + } + + vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + if id.Equals(blk.ID()) { + return blk, nil + } + t.Fatal(errUnknownBytes) + panic(errUnknownBytes) + } + + chitted := new(bool) + sender.ChitsF = func(inVdr ids.ShortID, requestID uint32, votes ids.Set) { + if *chitted { + t.Fatalf("Sent chit multiple times") + } + *chitted = true + if !inVdr.Equals(vdr.ID()) { + t.Fatalf("Asking wrong validator for preference") + } + if requestID != 20 { + t.Fatalf("Wrong request id") + } + if votes.Len() != 1 { + t.Fatal("votes should only have one element") + } + vote := votes.List()[0] + if !blk.ID().Equals(vote) { + t.Fatalf("Asking for wrong block") + } + } + + queried := new(bool) + sender.PushQueryF = func(inVdrs ids.ShortSet, _ uint32, blkID ids.ID, blkBytes []byte) { + if *queried { + t.Fatalf("Asked multiple times") + } + *queried = true + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !blk.ID().Equals(blkID) { + t.Fatalf("Asking for wrong block") + } + } + + te.PushQuery(vdr.ID(), 20, blk.ID(), blk.Bytes()) + + if !*chitted { + t.Fatalf("Should have sent a chit to the peer") + } + if !*queried { + t.Fatalf("Should have sent a query to the peer") + } +} + +func TestEngineBuildBlock(t *testing.T) { + vdr, _, sender, vm, te, gBlk := setup(t) + + sender.Default(true) + + blk := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + queried := new(bool) + sender.PushQueryF = func(inVdrs ids.ShortSet, _ uint32, blkID ids.ID, blkBytes []byte) { + if *queried { + t.Fatalf("Asked multiple times") + } + *queried = true + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + } + + vm.BuildBlockF = func() (snowman.Block, error) { return blk, nil } + te.Notify(common.PendingTxs) + + if !*queried { + t.Fatalf("Should have sent a query to the peer") + } +} + +func TestEngineRepoll(t *testing.T) { + vdr, _, sender, _, te, _ := setup(t) + + sender.Default(true) + + queried := new(bool) + sender.PullQueryF = func(inVdrs ids.ShortSet, _ uint32, blkID ids.ID) { + if *queried { + t.Fatalf("Asked multiple times") + } + *queried = true + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + } + + te.repoll() + + if !*queried { + t.Fatalf("Should have sent a query to the peer") + } +} + +func TestVoteCanceling(t *testing.T) { + config := DefaultConfig() + + config.Params = snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + } + + vdr0 := validators.GenerateRandomValidator(1) + vdr1 := validators.GenerateRandomValidator(1) + vdr2 := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr0) + vals.Add(vdr1) + vals.Add(vdr2) + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + + vm := &VMTest{} + vm.T = t + config.VM = vm + + vm.Default(true) + vm.CantSetPreference = false + + gBlk := &Blk{ + id: GenerateID(), + status: choices.Accepted, + } + + vm.LastAcceptedF = func() ids.ID { return gBlk.ID() } + vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + switch { + case id.Equals(gBlk.ID()): + return gBlk, nil + default: + t.Fatalf("Loaded unknown block") + panic("Should have failed") + } + } + sender.CantGetAcceptedFrontier = false + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + vm.LastAcceptedF = nil + sender.CantGetAcceptedFrontier = true + + blk := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + queried := new(bool) + queryRequestID := new(uint32) + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { + if *queried { + t.Fatalf("Asked multiple times") + } + *queried = true + *queryRequestID = requestID + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr0.ID(), vdr1.ID(), vdr2.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !blk.ID().Equals(blkID) { + t.Fatalf("Asking for wrong block") + } + } + + te.insert(blk) + + if len(te.polls.m) != 1 { + t.Fatalf("Shouldn't have finished blocking issue") + } + + te.QueryFailed(vdr0.ID(), *queryRequestID) + + if len(te.polls.m) != 1 { + t.Fatalf("Shouldn't have finished blocking issue") + } + + repolled := new(bool) + sender.PullQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID) { + *repolled = true + } + te.QueryFailed(vdr1.ID(), *queryRequestID) + + if !*repolled { + t.Fatalf("Should have finished blocking issue and repolled the network") + } +} + +func TestEngineNoQuery(t *testing.T) { + config := DefaultConfig() + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + gBlk := &Blk{ + id: GenerateID(), + status: choices.Accepted, + } + + vm := &VMTest{} + vm.T = t + vm.LastAcceptedF = func() ids.ID { return gBlk.ID() } + + config.VM = vm + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + blk := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + te.insert(blk) +} + +func TestEngineNoRepollQuery(t *testing.T) { + config := DefaultConfig() + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + gBlk := &Blk{ + id: GenerateID(), + status: choices.Accepted, + } + + vm := &VMTest{} + vm.T = t + vm.LastAcceptedF = func() ids.ID { return gBlk.ID() } + + config.VM = vm + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + te.repoll() +} + +func TestEngineAbandonQuery(t *testing.T) { + vdr, _, sender, vm, te, _ := setup(t) + + sender.Default(true) + + blkID := GenerateID() + + vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + switch { + case id.Equals(blkID): + return &Blk{status: choices.Unknown}, errUnknownBlock + default: + t.Fatalf("Loaded unknown block") + panic("Should have failed") + } + } + sender.CantGet = false + + te.PullQuery(vdr.ID(), 0, blkID) + + if len(te.blocked) != 1 { + t.Fatalf("Should have blocked on request") + } + + te.GetFailed(vdr.ID(), 0, blkID) + + if len(te.blocked) != 0 { + t.Fatalf("Should have removed request") + } +} + +func TestEngineAbandonChit(t *testing.T) { + vdr, _, sender, vm, te, gBlk := setup(t) + + sender.Default(true) + + blk := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + sender.CantPushQuery = false + + te.insert(blk) + + fakeBlkID := GenerateID() + vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + switch { + case id.Equals(fakeBlkID): + return &Blk{status: choices.Unknown}, errUnknownBlock + default: + t.Fatalf("Loaded unknown block") + panic("Should have failed") + } + } + sender.CantGet = false + fakeBlkIDSet := ids.Set{} + fakeBlkIDSet.Add(fakeBlkID) + te.Chits(vdr.ID(), 0, fakeBlkIDSet) + + if len(te.blocked) != 1 { + t.Fatalf("Should have blocked on request") + } + + te.GetFailed(vdr.ID(), 0, fakeBlkID) + + if len(te.blocked) != 0 { + t.Fatalf("Should have removed request") + } +} + +func TestEngineBlockingChitRequest(t *testing.T) { + vdr, _, sender, vm, te, gBlk := setup(t) + + sender.Default(true) + + missingBlk := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Unknown, + bytes: []byte{1}, + } + parentBlk := &Blk{ + parent: missingBlk, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + blockingBlk := &Blk{ + parent: parentBlk, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + te.insert(parentBlk) + + vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + switch { + case bytes.Equal(b, blockingBlk.Bytes()): + return blockingBlk, nil + default: + t.Fatalf("Loaded unknown block") + panic("Should have failed") + } + } + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blockingBlk.ID()): + return blockingBlk, nil + default: + t.Fatalf("Loaded unknown block") + panic("Should have failed") + } + } + + te.PushQuery(vdr.ID(), 0, blockingBlk.ID(), blockingBlk.Bytes()) + + if len(te.blocked) != 3 { + t.Fatalf("Both inserts should be blocking in addition to the chit request") + } + + sender.CantPushQuery = false + sender.CantChits = false + + missingBlk.status = choices.Processing + te.insert(missingBlk) + + if len(te.blocked) != 0 { + t.Fatalf("Both inserts should not longer be blocking") + } +} + +func TestEngineBlockingChitResponse(t *testing.T) { + vdr, _, sender, vm, te, gBlk := setup(t) + + sender.Default(true) + + issuedBlk := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + missingBlk := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Unknown, + bytes: []byte{1}, + } + blockingBlk := &Blk{ + parent: missingBlk, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + te.insert(blockingBlk) + + queryRequestID := new(uint32) + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { + *queryRequestID = requestID + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !blkID.Equals(issuedBlk.ID()) { + t.Fatalf("Asking for wrong block") + } + } + + te.insert(issuedBlk) + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blockingBlk.ID()): + return blockingBlk, nil + default: + t.Fatalf("Loaded unknown block") + panic("Should have failed") + } + } + blockingBlkIDSet := ids.Set{} + blockingBlkIDSet.Add(blockingBlk.ID()) + te.Chits(vdr.ID(), *queryRequestID, blockingBlkIDSet) + + if len(te.blocked) != 2 { + t.Fatalf("The insert and the chit should be blocking") + } + + sender.PushQueryF = nil + sender.CantPushQuery = false + + missingBlk.status = choices.Processing + te.insert(missingBlk) +} + +func TestEngineRetryFetch(t *testing.T) { + vdr, _, sender, vm, te, gBlk := setup(t) + + sender.Default(true) + + missingBlk := &Blk{ + parent: gBlk, + id: GenerateID(), + height: 1, + status: choices.Unknown, + bytes: []byte{1}, + } + + vm.CantGetBlock = false + sender.CantGet = false + + te.PullQuery(vdr.ID(), 0, missingBlk.ID()) + + vm.CantGetBlock = true + sender.CantGet = true + + te.GetFailed(vdr.ID(), 0, missingBlk.ID()) + + vm.CantGetBlock = false + + called := new(bool) + sender.GetF = func(ids.ShortID, uint32, ids.ID) { + *called = true + } + + te.PullQuery(vdr.ID(), 0, missingBlk.ID()) + + vm.CantGetBlock = true + sender.CantGet = true + + if !*called { + t.Fatalf("Should have requested the block again") + } +} diff --git a/snow/engine/snowman/vm.go b/snow/engine/snowman/vm.go new file mode 100644 index 0000000..2600314 --- /dev/null +++ b/snow/engine/snowman/vm.go @@ -0,0 +1,57 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" +) + +// ChainVM defines the required functionality of a Snowman VM. +// +// A Snowman VM is responsible for defining the representation of state, +// the representation of operations on that state, the application of operations +// on that state, and the creation of the operations. Consensus will decide on +// if the operation is executed and the order operations are executed in. +// +// For example, suppose we have a VM that tracks an increasing number that +// is agreed upon by the network. +// The state is a single number. +// The operation is setting the number to a new, larger value. +// Applying the operation will save to the database the new value. +// The VM can attempt to issue a new number, of larger value, at any time. +// Consensus will ensure the network agrees on the number at every block height. +type ChainVM interface { + common.VM + + // Attempt to create a new block from data contained in the VM. + // + // If the VM doesn't want to issue a new block, an error should be + // returned. + BuildBlock() (snowman.Block, error) + + // Attempt to create a block from a stream of bytes. + // + // The block should be represented by the full byte array, without extra + // bytes. + ParseBlock([]byte) (snowman.Block, error) + + // Attempt to load a block. + // + // If the block does not exist, then an error should be returned. + GetBlock(ids.ID) (snowman.Block, error) + + // Notify the VM of the currently preferred block. + // + // This should always be a block that has no children known to consensus. + SetPreference(ids.ID) + + // LastAccepted returns the ID of the last accepted block. + // + // If no blocks have been accepted by consensus yet, it is assumed there is + // a definitionally accepted block, the Genesis block, that will be + // returned. + LastAccepted() ids.ID +} diff --git a/snow/engine/snowman/voter.go b/snow/engine/snowman/voter.go new file mode 100644 index 0000000..d9c8a7f --- /dev/null +++ b/snow/engine/snowman/voter.go @@ -0,0 +1,59 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/ids" +) + +type voter struct { + t *Transitive + vdr ids.ShortID + requestID uint32 + response ids.ID + deps ids.Set +} + +func (v *voter) Dependencies() ids.Set { return v.deps } + +func (v *voter) Fulfill(id ids.ID) { + v.deps.Remove(id) + v.Update() +} + +func (v *voter) Abandon(id ids.ID) { v.Fulfill(id) } + +func (v *voter) Update() { + if v.deps.Len() != 0 { + return + } + + results := ids.Bag{} + finished := false + if v.response.IsZero() { + results, finished = v.t.polls.CancelVote(v.requestID, v.vdr) + } else { + results, finished = v.t.polls.Vote(v.requestID, v.vdr, v.response) + } + + if !finished { + return + } + + v.t.Config.Context.Log.Verbo("Finishing poll [%d] with:\n%s", v.requestID, &results) + v.t.Consensus.RecordPoll(results) + + v.t.Config.VM.SetPreference(v.t.Consensus.Preference()) + + if v.t.Consensus.Finalized() { + v.t.Config.Context.Log.Verbo("Snowman engine can quiesce") + return + } + + v.t.Config.Context.Log.Verbo("Snowman engine can't quiesce") + + if len(v.t.polls.m) == 0 { + v.t.repoll() + } +} diff --git a/snow/events/blockable.go b/snow/events/blockable.go new file mode 100644 index 0000000..964d0b9 --- /dev/null +++ b/snow/events/blockable.go @@ -0,0 +1,20 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package events + +import ( + "github.com/ava-labs/gecko/ids" +) + +// Blockable defines what an object must implement to be able to block on events +type Blockable interface { + // IDs that this object is blocking on + Dependencies() ids.Set + // Notify this object that an event has been fulfilled + Fulfill(ids.ID) + // Notify this object that an event has been abandoned + Abandon(ids.ID) + // Update the state of this object without changing the status of any events + Update() +} diff --git a/snow/events/blockable_test.go b/snow/events/blockable_test.go new file mode 100644 index 0000000..45efdad --- /dev/null +++ b/snow/events/blockable_test.go @@ -0,0 +1,38 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package events + +import ( + "github.com/ava-labs/gecko/ids" +) + +var ( + offset = uint64(0) +) + +func GenerateID() ids.ID { + offset++ + return ids.Empty.Prefix(offset) +} + +type blockable struct { + dependencies func() ids.Set + fulfill func(ids.ID) + abandon func(ids.ID) + update func() +} + +func (b *blockable) Default() { + *b = blockable{ + dependencies: func() ids.Set { return ids.Set{} }, + fulfill: func(ids.ID) {}, + abandon: func(ids.ID) {}, + update: func() {}, + } +} + +func (b *blockable) Dependencies() ids.Set { return b.dependencies() } +func (b *blockable) Fulfill(id ids.ID) { b.fulfill(id) } +func (b *blockable) Abandon(id ids.ID) { b.abandon(id) } +func (b *blockable) Update() { b.update() } diff --git a/snow/events/blocker.go b/snow/events/blocker.go new file mode 100644 index 0000000..6bfdd7b --- /dev/null +++ b/snow/events/blocker.go @@ -0,0 +1,81 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package events + +import ( + "fmt" + "strings" + + "github.com/ava-labs/gecko/ids" +) + +// Blocker tracks objects that are blocked +type Blocker map[[32]byte][]Blockable + +func (b *Blocker) init() { + if *b == nil { + *b = make(map[[32]byte][]Blockable) + } +} + +// Fulfill notifies all objects blocking on the event whose ID is that +// the event has happened +func (b *Blocker) Fulfill(id ids.ID) { + b.init() + + key := id.Key() + blocking := (*b)[key] + delete(*b, key) + + for _, pending := range blocking { + pending.Fulfill(id) + } +} + +// Abandon notifies all objects blocking on the event whose ID is that +// the event has been abandoned +func (b *Blocker) Abandon(id ids.ID) { + b.init() + + key := id.Key() + blocking := (*b)[key] + delete(*b, key) + + for _, pending := range blocking { + pending.Abandon(id) + } +} + +// Register a new Blockable and its dependencies +func (b *Blocker) Register(pending Blockable) { + b.init() + + for _, pendingID := range pending.Dependencies().List() { + key := pendingID.Key() + (*b)[key] = append((*b)[key], pending) + } + + pending.Update() +} + +// PrefixedString returns the same value as the String function, with all the +// new lines prefixed by [prefix] +func (b *Blocker) PrefixedString(prefix string) string { + b.init() + + s := strings.Builder{} + + s.WriteString(fmt.Sprintf("Blocking on %d IDs:", len(*b))) + + for key, value := range *b { + s.WriteString(fmt.Sprintf("\n%sID[%s]: %d", + prefix, + ids.NewID(key), + len(value))) + } + + return strings.TrimSuffix(s.String(), "\n") +} + +func (b *Blocker) String() string { return b.PrefixedString("") } diff --git a/snow/events/blocker_test.go b/snow/events/blocker_test.go new file mode 100644 index 0000000..2d74514 --- /dev/null +++ b/snow/events/blocker_test.go @@ -0,0 +1,79 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package events + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestBlocker(t *testing.T) { + b := Blocker(nil) + + a := &blockable{} + a.Default() + + id0 := GenerateID() + id1 := GenerateID() + id2 := GenerateID() + + calledDep := new(bool) + a.dependencies = func() ids.Set { + *calledDep = true + + s := ids.Set{} + s.Add(id0, id1) + return s + } + calledFill := new(bool) + a.fulfill = func(ids.ID) { + *calledFill = true + } + calledAbandon := new(bool) + a.abandon = func(ids.ID) { + *calledAbandon = true + } + calledUpdate := new(bool) + a.update = func() { + *calledUpdate = true + } + + b.Register(a) + + switch { + case !*calledDep, *calledFill, *calledAbandon, !*calledUpdate: + t.Fatalf("Called wrong function") + } + + b.Fulfill(id2) + b.Abandon(id2) + + switch { + case !*calledDep, *calledFill, *calledAbandon, !*calledUpdate: + t.Fatalf("Called wrong function") + } + + b.Fulfill(id0) + + switch { + case !*calledDep, !*calledFill, *calledAbandon, !*calledUpdate: + t.Fatalf("Called wrong function") + } + + b.Abandon(id0) + + switch { + case !*calledDep, !*calledFill, *calledAbandon, !*calledUpdate: + t.Fatalf("Called wrong function") + } + + b.Abandon(id1) + + switch { + case !*calledDep, !*calledFill, !*calledAbandon, !*calledUpdate: + t.Fatalf("Called wrong function") + } + +} diff --git a/snow/networking/awaiting_connections.go b/snow/networking/awaiting_connections.go new file mode 100644 index 0000000..0b5047d --- /dev/null +++ b/snow/networking/awaiting_connections.go @@ -0,0 +1,34 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package networking + +import ( + "github.com/ava-labs/gecko/ids" +) + +// AwaitingConnections ... +type AwaitingConnections struct { + Requested ids.ShortSet + NumRequired int + Finish func() + + connected ids.ShortSet +} + +// Add ... +func (aw *AwaitingConnections) Add(conn ids.ShortID) { + if aw.Requested.Contains(conn) { + aw.connected.Add(conn) + } +} + +// Remove ... +func (aw *AwaitingConnections) Remove(conn ids.ShortID) { + aw.connected.Remove(conn) +} + +// Ready ... +func (aw *AwaitingConnections) Ready() bool { + return aw.connected.Len() >= aw.NumRequired +} diff --git a/snow/networking/handler/handler.go b/snow/networking/handler/handler.go new file mode 100644 index 0000000..dec10b7 --- /dev/null +++ b/snow/networking/handler/handler.go @@ -0,0 +1,244 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handler + +import ( + "sync" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/engine/common" +) + +// Handler passes incoming messages from the network to the consensus engine +// (Actually, it receives the incoming messages from a ChainRouter, but same difference) +type Handler struct { + msgs chan message + wg sync.WaitGroup + engine common.Engine + msgChan <-chan common.Message +} + +// Initialize this consensus handler +func (h *Handler) Initialize(engine common.Engine, msgChan <-chan common.Message, bufferSize int) { + h.msgs = make(chan message, bufferSize) + h.engine = engine + h.msgChan = msgChan + + h.wg.Add(1) +} + +// Context of this Handler +func (h *Handler) Context() *snow.Context { return h.engine.Context() } + +// Dispatch waits for incoming messages from the network +// and, when they arrive, sends them to the consensus engine +func (h *Handler) Dispatch() { + defer h.wg.Done() + + for { + select { + case msg := <-h.msgs: + if !h.dispatchMsg(msg) { + return + } + case msg := <-h.msgChan: + if !h.dispatchMsg(message{messageType: notifyMsg, notification: msg}) { + return + } + } + } +} + +// Dispatch a message to the consensus engine. +// Returns false iff this consensus handler (and its associated engine) should shutdown +// (due to receipt of a shutdown message) +func (h *Handler) dispatchMsg(msg message) bool { + ctx := h.engine.Context() + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + ctx.Log.Verbo("Forwarding message to consensus: %s", msg) + + switch msg.messageType { + case getAcceptedFrontierMsg: + h.engine.GetAcceptedFrontier(msg.validatorID, msg.requestID) + case acceptedFrontierMsg: + h.engine.AcceptedFrontier(msg.validatorID, msg.requestID, msg.containerIDs) + case getAcceptedFrontierFailedMsg: + h.engine.GetAcceptedFrontierFailed(msg.validatorID, msg.requestID) + case getAcceptedMsg: + h.engine.GetAccepted(msg.validatorID, msg.requestID, msg.containerIDs) + case acceptedMsg: + h.engine.Accepted(msg.validatorID, msg.requestID, msg.containerIDs) + case getAcceptedFailedMsg: + h.engine.GetAcceptedFailed(msg.validatorID, msg.requestID) + case getMsg: + h.engine.Get(msg.validatorID, msg.requestID, msg.containerID) + case getFailedMsg: + h.engine.GetFailed(msg.validatorID, msg.requestID, msg.containerID) + case putMsg: + h.engine.Put(msg.validatorID, msg.requestID, msg.containerID, msg.container) + case pushQueryMsg: + h.engine.PushQuery(msg.validatorID, msg.requestID, msg.containerID, msg.container) + case pullQueryMsg: + h.engine.PullQuery(msg.validatorID, msg.requestID, msg.containerID) + case queryFailedMsg: + h.engine.QueryFailed(msg.validatorID, msg.requestID) + case chitsMsg: + h.engine.Chits(msg.validatorID, msg.requestID, msg.containerIDs) + case notifyMsg: + h.engine.Notify(msg.notification) + case shutdownMsg: + h.engine.Shutdown() + return false + } + return true +} + +// GetAcceptedFrontier passes a GetAcceptedFrontier message received from the +// network to the consensus engine. +func (h *Handler) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) { + h.msgs <- message{ + messageType: getAcceptedFrontierMsg, + validatorID: validatorID, + requestID: requestID, + } +} + +// AcceptedFrontier passes a AcceptedFrontier message received from the network +// to the consensus engine. +func (h *Handler) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + h.msgs <- message{ + messageType: acceptedFrontierMsg, + validatorID: validatorID, + requestID: requestID, + containerIDs: containerIDs, + } +} + +// GetAcceptedFrontierFailed passes a GetAcceptedFrontierFailed message received +// from the network to the consensus engine. +func (h *Handler) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) { + h.msgs <- message{ + messageType: getAcceptedFrontierFailedMsg, + validatorID: validatorID, + requestID: requestID, + } +} + +// GetAccepted passes a GetAccepted message received from the +// network to the consensus engine. +func (h *Handler) GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + h.msgs <- message{ + messageType: getAcceptedMsg, + validatorID: validatorID, + requestID: requestID, + containerIDs: containerIDs, + } +} + +// Accepted passes a Accepted message received from the network to the consensus +// engine. +func (h *Handler) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + h.msgs <- message{ + messageType: acceptedMsg, + validatorID: validatorID, + requestID: requestID, + containerIDs: containerIDs, + } +} + +// GetAcceptedFailed passes a GetAcceptedFailed message received from the +// network to the consensus engine. +func (h *Handler) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) { + h.msgs <- message{ + messageType: getAcceptedFailedMsg, + validatorID: validatorID, + requestID: requestID, + } +} + +// Get passes a Get message received from the network to the consensus engine. +func (h *Handler) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) { + h.msgs <- message{ + messageType: getMsg, + validatorID: validatorID, + requestID: requestID, + containerID: containerID, + } +} + +// Put passes a Put message received from the network to the consensus engine. +func (h *Handler) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) { + h.msgs <- message{ + messageType: putMsg, + validatorID: validatorID, + requestID: requestID, + containerID: containerID, + container: container, + } +} + +// GetFailed passes a GetFailed message to the consensus engine. +func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32, containerID ids.ID) { + h.msgs <- message{ + messageType: getFailedMsg, + validatorID: validatorID, + requestID: requestID, + containerID: containerID, + } +} + +// PushQuery passes a PushQuery message received from the network to the consensus engine. +func (h *Handler) PushQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID, block []byte) { + h.msgs <- message{ + messageType: pushQueryMsg, + validatorID: validatorID, + requestID: requestID, + containerID: blockID, + container: block, + } +} + +// PullQuery passes a PullQuery message received from the network to the consensus engine. +func (h *Handler) PullQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID) { + h.msgs <- message{ + messageType: pullQueryMsg, + validatorID: validatorID, + requestID: requestID, + containerID: blockID, + } +} + +// Chits passes a Chits message received from the network to the consensus engine. +func (h *Handler) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set) { + h.msgs <- message{ + messageType: chitsMsg, + validatorID: validatorID, + requestID: requestID, + containerIDs: votes, + } +} + +// QueryFailed passes a QueryFailed message received from the network to the consensus engine. +func (h *Handler) QueryFailed(validatorID ids.ShortID, requestID uint32) { + h.msgs <- message{ + messageType: queryFailedMsg, + validatorID: validatorID, + requestID: requestID, + } +} + +// Shutdown shuts down the dispatcher +func (h *Handler) Shutdown() { h.msgs <- message{messageType: shutdownMsg}; h.wg.Wait() } + +// Notify ... +func (h *Handler) Notify(msg common.Message) { + h.msgs <- message{ + messageType: notifyMsg, + notification: msg, + } +} diff --git a/snow/networking/handler/message.go b/snow/networking/handler/message.go new file mode 100644 index 0000000..27d852d --- /dev/null +++ b/snow/networking/handler/message.go @@ -0,0 +1,95 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handler + +import ( + "fmt" + "strings" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/engine/common" +) + +type msgType int + +const ( + nullMsg msgType = iota + getAcceptedFrontierMsg + acceptedFrontierMsg + getAcceptedFrontierFailedMsg + getAcceptedMsg + acceptedMsg + getAcceptedFailedMsg + getMsg + putMsg + getFailedMsg + pushQueryMsg + pullQueryMsg + chitsMsg + queryFailedMsg + notifyMsg + shutdownMsg +) + +type message struct { + messageType msgType + validatorID ids.ShortID + requestID uint32 + containerID ids.ID + container []byte + containerIDs ids.Set + notification common.Message +} + +func (m message) String() string { + sb := strings.Builder{} + sb.WriteString(fmt.Sprintf("\n messageType: %s", m.messageType.String())) + sb.WriteString(fmt.Sprintf("\n validatorID: %s", m.validatorID.String())) + sb.WriteString(fmt.Sprintf("\n requestID: %d", m.requestID)) + sb.WriteString(fmt.Sprintf("\n containerID: %s", m.containerID.String())) + sb.WriteString(fmt.Sprintf("\n containerIDs: %s", m.containerIDs.String())) + if m.messageType == notifyMsg { + sb.WriteString(fmt.Sprintf("\n notification: %s", m.notification.String())) + } + return sb.String() +} + +func (t msgType) String() string { + switch t { + case nullMsg: + return "Null Message" + case getAcceptedFrontierMsg: + return "Get Accepted Frontier Message" + case acceptedFrontierMsg: + return "Accepted Frontier Message" + case getAcceptedFrontierFailedMsg: + return "Get Accepted Frontier Failed Message" + case getAcceptedMsg: + return "Get Accepted Message" + case acceptedMsg: + return "Accepted Message" + case getAcceptedFailedMsg: + return "Get Accepted Failed Message" + case getMsg: + return "Get Message" + case putMsg: + return "Put Message" + case getFailedMsg: + return "Get Failed Message" + case pushQueryMsg: + return "Push Query Message" + case pullQueryMsg: + return "Pull Query Message" + case chitsMsg: + return "Chits Message" + case queryFailedMsg: + return "Query Failed Message" + case notifyMsg: + return "Notify Message" + case shutdownMsg: + return "Shutdown Message" + default: + return fmt.Sprintf("Unknown Message Type: %d", t) + } +} diff --git a/snow/networking/router/router.go b/snow/networking/router/router.go new file mode 100644 index 0000000..4ed00f8 --- /dev/null +++ b/snow/networking/router/router.go @@ -0,0 +1,45 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package router + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/networking/handler" + "github.com/ava-labs/gecko/snow/networking/timeout" + "github.com/ava-labs/gecko/utils/logging" +) + +// Router routes consensus messages to the Handler of the consensus +// engine that the messages are intended for +type Router interface { + ExternalRouter + InternalRouter + + AddChain(chain *handler.Handler) + RemoveChain(chainID ids.ID) + Shutdown() + Initialize(log logging.Logger, timeouts *timeout.Manager) +} + +// ExternalRouter routes messages from the network to the +// Handler of the consensus engine that the message is intended for +type ExternalRouter interface { + GetAcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32) + AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) + GetAccepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) + Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) + Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) + Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) + PushQuery(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) + PullQuery(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) + Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) +} + +// InternalRouter deals with messages internal to this node +type InternalRouter interface { + GetAcceptedFrontierFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32) + GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32) + GetFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) + QueryFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32) +} diff --git a/snow/networking/router/subnet_router.go b/snow/networking/router/subnet_router.go new file mode 100644 index 0000000..93da106 --- /dev/null +++ b/snow/networking/router/subnet_router.go @@ -0,0 +1,256 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package router + +import ( + "sync" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/networking/handler" + "github.com/ava-labs/gecko/snow/networking/timeout" + "github.com/ava-labs/gecko/utils/logging" +) + +// ChainRouter routes incoming messages from the validator network +// to the consensus engines that the messages are intended for. +// Note that consensus engines are uniquely identified by the ID of the chain +// that they are working on. +type ChainRouter struct { + log logging.Logger + lock sync.RWMutex + chains map[[32]byte]*handler.Handler + timeouts *timeout.Manager +} + +// Initialize the router +// When this router receives an incoming message, it cancels the timeout in [timeouts] +// associated with the request that caused the incoming message, if applicable +func (sr *ChainRouter) Initialize(log logging.Logger, timeouts *timeout.Manager) { + sr.log = log + sr.chains = make(map[[32]byte]*handler.Handler) + sr.timeouts = timeouts +} + +// AddChain registers the specified chain so that incoming +// messages can be routed to it +func (sr *ChainRouter) AddChain(chain *handler.Handler) { + sr.lock.Lock() + defer sr.lock.Unlock() + + sr.chains[chain.Context().ChainID.Key()] = chain +} + +// RemoveChain removes the specified chain so that incoming +// messages can't be routed to it +func (sr *ChainRouter) RemoveChain(chainID ids.ID) { + sr.lock.Lock() + defer sr.lock.Unlock() + + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.Shutdown() + delete(sr.chains, chainID.Key()) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// GetAcceptedFrontier routes an incoming GetAcceptedFrontier request from the +// validator with ID [validatorID] to the consensus engine working on the +// chain with ID [chainID] +func (sr *ChainRouter) GetAcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.GetAcceptedFrontier(validatorID, requestID) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// AcceptedFrontier routes an incoming AcceptedFrontier request from the +// validator with ID [validatorID] to the consensus engine working on the +// chain with ID [chainID] +func (sr *ChainRouter) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + sr.timeouts.Cancel(validatorID, chainID, requestID) + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.AcceptedFrontier(validatorID, requestID, containerIDs) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// GetAcceptedFrontierFailed routes an incoming GetAcceptedFrontierFailed +// request from the validator with ID [validatorID] to the consensus engine +// working on the chain with ID [chainID] +func (sr *ChainRouter) GetAcceptedFrontierFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + sr.timeouts.Cancel(validatorID, chainID, requestID) + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.GetAcceptedFrontierFailed(validatorID, requestID) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// GetAccepted routes an incoming GetAccepted request from the +// validator with ID [validatorID] to the consensus engine working on the +// chain with ID [chainID] +func (sr *ChainRouter) GetAccepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.GetAccepted(validatorID, requestID, containerIDs) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// Accepted routes an incoming Accepted request from the validator with ID +// [validatorID] to the consensus engine working on the chain with ID +// [chainID] +func (sr *ChainRouter) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + sr.timeouts.Cancel(validatorID, chainID, requestID) + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.Accepted(validatorID, requestID, containerIDs) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// GetAcceptedFailed routes an incoming GetAcceptedFailed request from the +// validator with ID [validatorID] to the consensus engine working on the +// chain with ID [chainID] +func (sr *ChainRouter) GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + sr.timeouts.Cancel(validatorID, chainID, requestID) + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.GetAcceptedFailed(validatorID, requestID) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// Get routes an incoming Get request from the validator with ID [validatorID] +// to the consensus engine working on the chain with ID [chainID] +func (sr *ChainRouter) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.Get(validatorID, requestID, containerID) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// Put routes an incoming Put request from the validator with ID [validatorID] +// to the consensus engine working on the chain with ID [chainID] +func (sr *ChainRouter) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + // This message came in response to a Get message from this node, and when we sent that Get + // message we set a timeout. Since we got a response, cancel the timeout. + sr.timeouts.Cancel(validatorID, chainID, requestID) + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.Put(validatorID, requestID, containerID, container) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// GetFailed routes an incoming GetFailed message from the validator with ID [validatorID] +// to the consensus engine working on the chain with ID [chainID] +func (sr *ChainRouter) GetFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + sr.timeouts.Cancel(validatorID, chainID, requestID) + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.GetFailed(validatorID, requestID, containerID) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// PushQuery routes an incoming PushQuery request from the validator with ID [validatorID] +// to the consensus engine working on the chain with ID [chainID] +func (sr *ChainRouter) PushQuery(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.PushQuery(validatorID, requestID, containerID, container) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// PullQuery routes an incoming PullQuery request from the validator with ID [validatorID] +// to the consensus engine working on the chain with ID [chainID] +func (sr *ChainRouter) PullQuery(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.PullQuery(validatorID, requestID, containerID) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// Chits routes an incoming Chits message from the validator with ID [validatorID] +// to the consensus engine working on the chain with ID [chainID] +func (sr *ChainRouter) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + // Cancel timeout we set when sent the message asking for these Chits + sr.timeouts.Cancel(validatorID, chainID, requestID) + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.Chits(validatorID, requestID, votes) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// QueryFailed routes an incoming QueryFailed message from the validator with ID [validatorID] +// to the consensus engine working on the chain with ID [chainID] +func (sr *ChainRouter) QueryFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32) { + sr.lock.RLock() + defer sr.lock.RUnlock() + + sr.timeouts.Cancel(validatorID, chainID, requestID) + if chain, exists := sr.chains[chainID.Key()]; exists { + chain.QueryFailed(validatorID, requestID) + } else { + sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID) + } +} + +// Shutdown shuts down this router +func (sr *ChainRouter) Shutdown() { + sr.lock.RLock() + defer sr.lock.RUnlock() + + sr.shutdown() +} + +func (sr *ChainRouter) shutdown() { + for _, chain := range sr.chains { + chain.Shutdown() + } +} diff --git a/snow/networking/sender/external_sender.go b/snow/networking/sender/external_sender.go new file mode 100644 index 0000000..6bb02db --- /dev/null +++ b/snow/networking/sender/external_sender.go @@ -0,0 +1,23 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sender + +import "github.com/ava-labs/gecko/ids" + +// ExternalSender sends consensus messages to other validators +// Right now this is implemented in the networking package +type ExternalSender interface { + GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32) + AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) + + GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set) + Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) + + Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) + Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) + + PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) + PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID) + Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) +} diff --git a/snow/networking/sender/sender.go b/snow/networking/sender/sender.go new file mode 100644 index 0000000..f72c842 --- /dev/null +++ b/snow/networking/sender/sender.go @@ -0,0 +1,165 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sender + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/networking/router" + "github.com/ava-labs/gecko/snow/networking/timeout" +) + +// Sender sends consensus messages to other validators +type Sender struct { + ctx *snow.Context + sender ExternalSender // Actually does the sending over the network + router router.Router + timeouts *timeout.Manager +} + +// Initialize this sender +func (s *Sender) Initialize(ctx *snow.Context, sender ExternalSender, router router.Router, timeouts *timeout.Manager) { + s.ctx = ctx + s.sender = sender + s.router = router + s.timeouts = timeouts +} + +// Context of this sender +func (s *Sender) Context() *snow.Context { return s.ctx } + +// GetAcceptedFrontier ... +func (s *Sender) GetAcceptedFrontier(validatorIDs ids.ShortSet, requestID uint32) { + if validatorIDs.Contains(s.ctx.NodeID) { + validatorIDs.Remove(s.ctx.NodeID) + go s.router.GetAcceptedFrontier(s.ctx.NodeID, s.ctx.ChainID, requestID) + } + validatorList := validatorIDs.List() + for _, validatorID := range validatorList { + vID := validatorID + s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { + s.router.GetAcceptedFrontierFailed(vID, s.ctx.ChainID, requestID) + }) + } + s.sender.GetAcceptedFrontier(validatorIDs, s.ctx.ChainID, requestID) +} + +// AcceptedFrontier ... +func (s *Sender) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + if validatorID.Equals(s.ctx.NodeID) { + go s.router.AcceptedFrontier(validatorID, s.ctx.ChainID, requestID, containerIDs) + return + } + s.sender.AcceptedFrontier(validatorID, s.ctx.ChainID, requestID, containerIDs) +} + +// GetAccepted ... +func (s *Sender) GetAccepted(validatorIDs ids.ShortSet, requestID uint32, containerIDs ids.Set) { + if validatorIDs.Contains(s.ctx.NodeID) { + validatorIDs.Remove(s.ctx.NodeID) + go s.router.GetAccepted(s.ctx.NodeID, s.ctx.ChainID, requestID, containerIDs) + } + validatorList := validatorIDs.List() + for _, validatorID := range validatorList { + vID := validatorID + s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { + s.router.GetAcceptedFailed(vID, s.ctx.ChainID, requestID) + }) + } + s.sender.GetAccepted(validatorIDs, s.ctx.ChainID, requestID, containerIDs) +} + +// Accepted ... +func (s *Sender) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { + if validatorID.Equals(s.ctx.NodeID) { + go s.router.Accepted(validatorID, s.ctx.ChainID, requestID, containerIDs) + return + } + s.sender.Accepted(validatorID, s.ctx.ChainID, requestID, containerIDs) +} + +// Get sends a Get message to the consensus engine running on the specified +// chain to the specified validator. The Get message signifies that this +// consensus engine would like the recipient to send this consensus engine the +// specified container. +func (s *Sender) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) { + s.ctx.Log.Verbo("Sending Get to validator %s. RequestID: %d. ContainerID: %s", validatorID, requestID, containerID) + // Add a timeout -- if we don't get a response before the timeout expires, + // send this consensus engine a GetFailed message + s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { + s.router.GetFailed(validatorID, s.ctx.ChainID, requestID, containerID) + }) + s.sender.Get(validatorID, s.ctx.ChainID, requestID, containerID) +} + +// Put sends a Put message to the consensus engine running on the specified chain +// on the specified validator. +// The Put message signifies that this consensus engine is giving to the recipient +// the contents of the specified container. +func (s *Sender) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) { + s.ctx.Log.Verbo("Sending Put to validator %s. RequestID: %d. ContainerID: %s", validatorID, requestID, containerID) + s.sender.Put(validatorID, s.ctx.ChainID, requestID, containerID, container) +} + +// PushQuery sends a PushQuery message to the consensus engines running on the specified chains +// on the specified validators. +// The PushQuery message signifies that this consensus engine would like each validator to send +// their preferred frontier given the existence of the specified container. +func (s *Sender) PushQuery(validatorIDs ids.ShortSet, requestID uint32, containerID ids.ID, container []byte) { + s.ctx.Log.Verbo("Sending PushQuery to validators %v. RequestID: %d. ContainerID: %s", validatorIDs, requestID, containerID) + // If one of the validators in [validatorIDs] is myself, send this message directly + // to my own router rather than sending it over the network + if validatorIDs.Contains(s.ctx.NodeID) { // One of the validators in [validatorIDs] was myself + validatorIDs.Remove(s.ctx.NodeID) + // We use a goroutine to avoid a deadlock in the case where the consensus engine queries itself. + // The flow of execution in that case is handler --> engine --> sender --> chain router --> handler + // If this were not a goroutine, then we would deadlock here when [handler].msgs is full + go s.router.PushQuery(s.ctx.NodeID, s.ctx.ChainID, requestID, containerID, container) + } + validatorList := validatorIDs.List() // Convert set to list for easier iteration + for _, validatorID := range validatorList { + vID := validatorID + s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { + s.router.QueryFailed(vID, s.ctx.ChainID, requestID) + }) + } + s.sender.PushQuery(validatorIDs, s.ctx.ChainID, requestID, containerID, container) +} + +// PullQuery sends a PullQuery message to the consensus engines running on the specified chains +// on the specified validators. +// The PullQuery message signifies that this consensus engine would like each validator to send +// their preferred frontier. +func (s *Sender) PullQuery(validatorIDs ids.ShortSet, requestID uint32, containerID ids.ID) { + s.ctx.Log.Verbo("Sending PullQuery. RequestID: %d. ContainerID: %s", requestID, containerID) + // If one of the validators in [validatorIDs] is myself, send this message directly + // to my own router rather than sending it over the network + if validatorIDs.Contains(s.ctx.NodeID) { // One of the validators in [validatorIDs] was myself + validatorIDs.Remove(s.ctx.NodeID) + // We use a goroutine to avoid a deadlock in the case where the consensus engine queries itself. + // The flow of execution in that case is handler --> engine --> sender --> chain router --> handler + // If this were not a goroutine, then we would deadlock when [handler].msgs is full + go s.router.PullQuery(s.ctx.NodeID, s.ctx.ChainID, requestID, containerID) + } + validatorList := validatorIDs.List() // Convert set to list for easier iteration + for _, validatorID := range validatorList { + vID := validatorID + s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { + s.router.QueryFailed(vID, s.ctx.ChainID, requestID) + }) + } + s.sender.PullQuery(validatorIDs, s.ctx.ChainID, requestID, containerID) +} + +// Chits sends chits +func (s *Sender) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set) { + s.ctx.Log.Verbo("Sending Chits to validator %s. RequestID: %d. Votes: %s", validatorID, requestID, votes) + // If [validatorID] is myself, send this message directly + // to my own router rather than sending it over the network + if validatorID.Equals(s.ctx.NodeID) { + go s.router.Chits(validatorID, s.ctx.ChainID, requestID, votes) + return + } + s.sender.Chits(validatorID, s.ctx.ChainID, requestID, votes) +} diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go new file mode 100644 index 0000000..f33a68c --- /dev/null +++ b/snow/networking/sender/sender_test.go @@ -0,0 +1,62 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sender + +import ( + "sync" + "testing" + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/networking/handler" + "github.com/ava-labs/gecko/snow/networking/router" + "github.com/ava-labs/gecko/snow/networking/timeout" + "github.com/ava-labs/gecko/utils/logging" +) + +func TestTimeout(t *testing.T) { + tm := timeout.Manager{} + tm.Initialize(time.Millisecond) + go tm.Dispatch() + + router := router.ChainRouter{} + router.Initialize(logging.NoLog{}, &tm) + + sender := Sender{} + sender.Initialize(snow.DefaultContextTest(), &ExternalSenderTest{}, &router, &tm) + + engine := common.EngineTest{T: t} + engine.Default(true) + + engine.ContextF = snow.DefaultContextTest + + wg := sync.WaitGroup{} + wg.Add(2) + + failedVDRs := ids.ShortSet{} + engine.QueryFailedF = func(validatorID ids.ShortID, _ uint32) { + failedVDRs.Add(validatorID) + wg.Done() + } + + handler := handler.Handler{} + handler.Initialize(&engine, nil, 1) + go handler.Dispatch() + + router.AddChain(&handler) + + vdrIDs := ids.ShortSet{} + vdrIDs.Add(ids.NewShortID([20]byte{255})) + vdrIDs.Add(ids.NewShortID([20]byte{254})) + + sender.PullQuery(vdrIDs, 0, ids.Empty) + + wg.Wait() + + if !failedVDRs.Equals(vdrIDs) { + t.Fatalf("Timeouts should have fired") + } +} diff --git a/snow/networking/sender/test_external_sender.go b/snow/networking/sender/test_external_sender.go new file mode 100644 index 0000000..aabe8cc --- /dev/null +++ b/snow/networking/sender/test_external_sender.go @@ -0,0 +1,161 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sender + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +// ExternalSenderTest is a test sender +type ExternalSenderTest struct { + T *testing.T + B *testing.B + + CantGetAcceptedFrontier, CantAcceptedFrontier, + CantGetAccepted, CantAccepted, + CantGet, CantPut, + CantPullQuery, CantPushQuery, CantChits bool + + GetAcceptedFrontierF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32) + AcceptedFrontierF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) + GetAcceptedF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set) + AcceptedF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) + GetF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) + PutF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) + PushQueryF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) + PullQueryF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID) + ChitsF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) +} + +// Default set the default callable value to [cant] +func (s *ExternalSenderTest) Default(cant bool) { + s.CantGetAcceptedFrontier = cant + s.CantAcceptedFrontier = cant + s.CantGetAccepted = cant + s.CantAccepted = cant + s.CantGet = cant + s.CantPut = cant + s.CantPullQuery = cant + s.CantPushQuery = cant + s.CantChits = cant +} + +// GetAcceptedFrontier calls GetAcceptedFrontierF if it was initialized. If it +// wasn't initialized and this function shouldn't be called and testing was +// initialized, then testing will fail. +func (s *ExternalSenderTest) GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32) { + if s.GetAcceptedFrontierF != nil { + s.GetAcceptedFrontierF(validatorIDs, chainID, requestID) + } else if s.CantGetAcceptedFrontier && s.T != nil { + s.T.Fatalf("Unexpectedly called GetAcceptedFrontier") + } else if s.CantGetAcceptedFrontier && s.B != nil { + s.B.Fatalf("Unexpectedly called GetAcceptedFrontier") + } +} + +// AcceptedFrontier calls AcceptedFrontierF if it was initialized. If it wasn't +// initialized and this function shouldn't be called and testing was +// initialized, then testing will fail. +func (s *ExternalSenderTest) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) { + if s.AcceptedFrontierF != nil { + s.AcceptedFrontierF(validatorID, chainID, requestID, containerIDs) + } else if s.CantAcceptedFrontier && s.T != nil { + s.T.Fatalf("Unexpectedly called AcceptedFrontier") + } else if s.CantAcceptedFrontier && s.B != nil { + s.B.Fatalf("Unexpectedly called AcceptedFrontier") + } +} + +// GetAccepted calls GetAcceptedF if it was initialized. If it wasn't +// initialized and this function shouldn't be called and testing was +// initialized, then testing will fail. +func (s *ExternalSenderTest) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set) { + if s.GetAcceptedF != nil { + s.GetAcceptedF(validatorIDs, chainID, requestID, containerIDs) + } else if s.CantGetAccepted && s.T != nil { + s.T.Fatalf("Unexpectedly called GetAccepted") + } else if s.CantGetAccepted && s.B != nil { + s.B.Fatalf("Unexpectedly called GetAccepted") + } +} + +// Accepted calls AcceptedF if it was initialized. If it wasn't initialized and +// this function shouldn't be called and testing was initialized, then testing +// will fail. +func (s *ExternalSenderTest) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) { + if s.AcceptedF != nil { + s.AcceptedF(validatorID, chainID, requestID, containerIDs) + } else if s.CantAccepted && s.T != nil { + s.T.Fatalf("Unexpectedly called Accepted") + } else if s.CantAccepted && s.B != nil { + s.B.Fatalf("Unexpectedly called Accepted") + } +} + +// Get calls GetF if it was initialized. If it wasn't initialized and this +// function shouldn't be called and testing was initialized, then testing will +// fail. +func (s *ExternalSenderTest) Get(vdr ids.ShortID, chainID ids.ID, requestID uint32, vtxID ids.ID) { + if s.GetF != nil { + s.GetF(vdr, chainID, requestID, vtxID) + } else if s.CantGet && s.T != nil { + s.T.Fatalf("Unexpectedly called Get") + } else if s.CantGet && s.B != nil { + s.B.Fatalf("Unexpectedly called Get") + } +} + +// Put calls PutF if it was initialized. If it wasn't initialized and this +// function shouldn't be called and testing was initialized, then testing will +// fail. +func (s *ExternalSenderTest) Put(vdr ids.ShortID, chainID ids.ID, requestID uint32, vtxID ids.ID, vtx []byte) { + if s.PutF != nil { + s.PutF(vdr, chainID, requestID, vtxID, vtx) + } else if s.CantPut && s.T != nil { + s.T.Fatalf("Unexpectedly called Put") + } else if s.CantPut && s.B != nil { + s.B.Fatalf("Unexpectedly called Put") + } +} + +// PushQuery calls PushQueryF if it was initialized. If it wasn't initialized +// and this function shouldn't be called and testing was initialized, then +// testing will fail. +func (s *ExternalSenderTest) PushQuery(vdrs ids.ShortSet, chainID ids.ID, requestID uint32, vtxID ids.ID, vtx []byte) { + if s.PushQueryF != nil { + s.PushQueryF(vdrs, chainID, requestID, vtxID, vtx) + } else if s.CantPushQuery && s.T != nil { + s.T.Fatalf("Unexpectedly called PushQuery") + } else if s.CantPushQuery && s.B != nil { + s.B.Fatalf("Unexpectedly called PushQuery") + } +} + +// PullQuery calls PullQueryF if it was initialized. If it wasn't initialized +// and this function shouldn't be called and testing was initialized, then +// testing will fail. +func (s *ExternalSenderTest) PullQuery(vdrs ids.ShortSet, chainID ids.ID, requestID uint32, vtxID ids.ID) { + if s.PullQueryF != nil { + s.PullQueryF(vdrs, chainID, requestID, vtxID) + } else if s.CantPullQuery && s.T != nil { + s.T.Fatalf("Unexpectedly called PullQuery") + } else if s.CantPullQuery && s.B != nil { + s.B.Fatalf("Unexpectedly called PullQuery") + } +} + +// Chits calls ChitsF if it was initialized. If it wasn't initialized and this +// function shouldn't be called and testing was initialized, then testing will +// fail. +func (s *ExternalSenderTest) Chits(vdr ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) { + if s.ChitsF != nil { + s.ChitsF(vdr, chainID, requestID, votes) + } else if s.CantChits && s.T != nil { + s.T.Fatalf("Unexpectedly called Chits") + } else if s.CantChits && s.B != nil { + s.B.Fatalf("Unexpectedly called Chits") + } +} diff --git a/snow/networking/timeout/manager.go b/snow/networking/timeout/manager.go new file mode 100644 index 0000000..5cf0d39 --- /dev/null +++ b/snow/networking/timeout/manager.go @@ -0,0 +1,46 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timeout + +import ( + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/utils/wrappers" +) + +// Manager registers and fires timeouts for the snow API. +type Manager struct{ tm timer.TimeoutManager } + +// Initialize this timeout manager. +// +// External requests are requests that depend on other nodes to perform an +// action. Internal requests are requests that only exist inside this node. +// +// [duration] is the amount of time to allow for external requests +// before the request times out. +func (m *Manager) Initialize(duration time.Duration) { m.tm.Initialize(duration) } + +// Dispatch ... +func (m *Manager) Dispatch() { m.tm.Dispatch() } + +// Register request to time out unless Manager.Cancel is called +// before the timeout duration passes, with the same request parameters. +func (m *Manager) Register(validatorID ids.ShortID, chainID ids.ID, requestID uint32, timeout func()) { + m.tm.Put(createRequestID(validatorID, chainID, requestID), timeout) +} + +// Cancel request timeout with the specified parameters. +func (m *Manager) Cancel(validatorID ids.ShortID, chainID ids.ID, requestID uint32) { + m.tm.Remove(createRequestID(validatorID, chainID, requestID)) +} + +func createRequestID(validatorID ids.ShortID, chainID ids.ID, requestID uint32) ids.ID { + p := wrappers.Packer{Bytes: make([]byte, wrappers.IntLen)} + p.PackInt(requestID) + + return ids.NewID(hashing.ByteArraysToHash256Array(validatorID.Bytes(), chainID.Bytes(), p.Bytes)) +} diff --git a/snow/networking/timeout/manager_test.go b/snow/networking/timeout/manager_test.go new file mode 100644 index 0000000..4b6648b --- /dev/null +++ b/snow/networking/timeout/manager_test.go @@ -0,0 +1,48 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timeout + +import ( + "sync" + "testing" + "time" + + "github.com/ava-labs/gecko/ids" +) + +func TestManagerFire(t *testing.T) { + manager := Manager{} + manager.Initialize(time.Millisecond) + go manager.Dispatch() + + wg := sync.WaitGroup{} + wg.Add(1) + + manager.Register(ids.NewShortID([20]byte{}), ids.NewID([32]byte{}), 0, wg.Done) + + wg.Wait() +} + +func TestManagerCancel(t *testing.T) { + manager := Manager{} + manager.Initialize(50 * time.Millisecond) + go manager.Dispatch() + + wg := sync.WaitGroup{} + wg.Add(1) + + fired := new(bool) + + manager.Register(ids.NewShortID([20]byte{}), ids.NewID([32]byte{}), 0, func() { *fired = true }) + + manager.Cancel(ids.NewShortID([20]byte{}), ids.NewID([32]byte{}), 0) + + manager.Register(ids.NewShortID([20]byte{}), ids.NewID([32]byte{}), 1, wg.Done) + + wg.Wait() + + if *fired { + t.Fatalf("Should have cancelled the function") + } +} diff --git a/snow/triggers/dispatcher.go b/snow/triggers/dispatcher.go new file mode 100644 index 0000000..a053321 --- /dev/null +++ b/snow/triggers/dispatcher.go @@ -0,0 +1,192 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package triggers + +import ( + "fmt" + "sync" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" +) + +// EventDispatcher receives events from consensus and dispatches the events to triggers +type EventDispatcher struct { + lock sync.Mutex + log logging.Logger + chainHandlers map[[32]byte]map[string]interface{} + handlers map[string]interface{} +} + +// Initialize creates the EventDispatcher's initial values +func (ed *EventDispatcher) Initialize(log logging.Logger) { + ed.log = log + ed.chainHandlers = make(map[[32]byte]map[string]interface{}) + ed.handlers = make(map[string]interface{}) +} + +// Accept is called when a transaction or block is accepted +func (ed *EventDispatcher) Accept(chainID, containerID ids.ID, container []byte) { + ed.lock.Lock() + defer ed.lock.Unlock() + + for id, handler := range ed.handlers { + handler, ok := handler.(Acceptor) + if !ok { + continue + } + + if err := handler.Accept(chainID, containerID, container); err != nil { + ed.log.Error("unable to Accept on %s for chainID %s: %s", id, chainID, err) + } + } + + events, exist := ed.chainHandlers[chainID.Key()] + if !exist { + return + } + for id, handler := range events { + handler, ok := handler.(Acceptor) + if !ok { + continue + } + + if err := handler.Accept(chainID, containerID, container); err != nil { + ed.log.Error("unable to Accept on %s for chainID %s: %s", id, chainID, err) + } + } +} + +// Reject is called when a transaction or block is rejected +func (ed *EventDispatcher) Reject(chainID, containerID ids.ID, container []byte) { + ed.lock.Lock() + defer ed.lock.Unlock() + + for id, handler := range ed.handlers { + handler, ok := handler.(Rejector) + if !ok { + continue + } + + if err := handler.Reject(chainID, containerID, container); err != nil { + ed.log.Error("unable to Reject on %s for chainID %s: %s", id, chainID, err) + } + } + + events, exist := ed.chainHandlers[chainID.Key()] + if !exist { + return + } + for id, handler := range events { + handler, ok := handler.(Rejector) + if !ok { + continue + } + + if err := handler.Reject(chainID, containerID, container); err != nil { + ed.log.Error("unable to Reject on %s for chainID %s: %s", id, chainID, err) + } + } +} + +// Issue is called when a transaction or block is issued +func (ed *EventDispatcher) Issue(chainID, containerID ids.ID, container []byte) { + ed.lock.Lock() + defer ed.lock.Unlock() + + for id, handler := range ed.handlers { + handler, ok := handler.(Issuer) + if !ok { + continue + } + + if err := handler.Issue(chainID, containerID, container); err != nil { + ed.log.Error("unable to Issue on %s for chainID %s: %s", id, chainID, err) + } + } + + events, exist := ed.chainHandlers[chainID.Key()] + if !exist { + return + } + for id, handler := range events { + handler, ok := handler.(Issuer) + if !ok { + continue + } + + if err := handler.Issue(chainID, containerID, container); err != nil { + ed.log.Error("unable to Issue on %s for chainID %s: %s", id, chainID, err) + } + } +} + +// RegisterChain places a new chain handler into the system +func (ed *EventDispatcher) RegisterChain(chainID ids.ID, identifier string, handler interface{}) error { + ed.lock.Lock() + defer ed.lock.Unlock() + + chainIDKey := chainID.Key() + events, exist := ed.chainHandlers[chainIDKey] + if !exist { + events = make(map[string]interface{}) + ed.chainHandlers[chainIDKey] = events + } + + if _, ok := events[identifier]; ok { + return fmt.Errorf("handler %s already exists on chain %s", identifier, chainID) + } + + events[identifier] = handler + return nil +} + +// DeregisterChain removes a chain handler from the system +func (ed *EventDispatcher) DeregisterChain(chainID ids.ID, identifier string) error { + ed.lock.Lock() + defer ed.lock.Unlock() + + chainIDKey := chainID.Key() + events, exist := ed.chainHandlers[chainIDKey] + if !exist { + return fmt.Errorf("chain %s has no handlers", chainID) + } + + if _, ok := events[identifier]; !ok { + return fmt.Errorf("handler %s does not exist on chain %s", identifier, chainID) + } + + if len(events) == 1 { + delete(ed.chainHandlers, chainIDKey) + } else { + delete(events, identifier) + } + return nil +} + +// Register places a new handler into the system +func (ed *EventDispatcher) Register(identifier string, handler interface{}) error { + ed.lock.Lock() + defer ed.lock.Unlock() + + if _, exist := ed.handlers[identifier]; exist { + return fmt.Errorf("handler %s already exists", identifier) + } + + ed.handlers[identifier] = handler + return nil +} + +// Deregister removes a handler from the system +func (ed *EventDispatcher) Deregister(identifier string) error { + ed.lock.Lock() + defer ed.lock.Unlock() + + if _, exist := ed.handlers[identifier]; !exist { + return fmt.Errorf("handler %s already exists", identifier) + } + + delete(ed.handlers, identifier) + return nil +} diff --git a/snow/triggers/events.go b/snow/triggers/events.go new file mode 100644 index 0000000..63f9e46 --- /dev/null +++ b/snow/triggers/events.go @@ -0,0 +1,21 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package triggers + +import "github.com/ava-labs/gecko/ids" + +// Acceptor is implemented when a struct is monitoring if a message is accepted +type Acceptor interface { + Accept(chainID, containerID ids.ID, container []byte) error +} + +// Rejector is implemented when a struct is monitoring if a message is rejected +type Rejector interface { + Reject(chainID, containerID ids.ID, container []byte) error +} + +// Issuer is implemented when a struct is monitoring if a message is issued +type Issuer interface { + Issue(chainID, containerID ids.ID, container []byte) error +} diff --git a/snow/validators/manager.go b/snow/validators/manager.go new file mode 100644 index 0000000..1a85b46 --- /dev/null +++ b/snow/validators/manager.go @@ -0,0 +1,62 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "sync" + + "github.com/ava-labs/gecko/ids" +) + +// Manager holds the validator set of each subnet +type Manager interface { + // PutValidatorSet puts associaties the given subnet ID with the given validator set + PutValidatorSet(ids.ID, Set) + + // RemoveValidatorSet removes the specified validator set + RemoveValidatorSet(ids.ID) + + // GetGroup returns: + // 1) the validator set of the subnet with the specified ID + // 2) false if there is no subnet with the specified ID + GetValidatorSet(ids.ID) (Set, bool) +} + +// NewManager returns a new, empty manager +func NewManager() Manager { + return &manager{ + validatorSets: make(map[[32]byte]Set), + } +} + +// manager implements Manager +type manager struct { + lock sync.Mutex + validatorSets map[[32]byte]Set +} + +// PutValidatorSet implements the Manager interface. +func (m *manager) PutValidatorSet(subnetID ids.ID, set Set) { + m.lock.Lock() + defer m.lock.Unlock() + + m.validatorSets[subnetID.Key()] = set +} + +// RemoveValidatorSet implements the Manager interface. +func (m *manager) RemoveValidatorSet(subnetID ids.ID) { + m.lock.Lock() + defer m.lock.Unlock() + + delete(m.validatorSets, subnetID.Key()) +} + +// GetValidatorSet implements the Manager interface. +func (m *manager) GetValidatorSet(subnetID ids.ID) (Set, bool) { + m.lock.Lock() + defer m.lock.Unlock() + + set, exists := m.validatorSets[subnetID.Key()] + return set, exists +} diff --git a/snow/validators/set.go b/snow/validators/set.go new file mode 100644 index 0000000..26dd22f --- /dev/null +++ b/snow/validators/set.go @@ -0,0 +1,210 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "fmt" + "strings" + "sync" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/random" +) + +// Set of validators that can be sampled +type Set interface { + fmt.Stringer + + // Set removes all the current validators and adds all the provided + // validators to the set. + Set([]Validator) + + // Add the provided validator to the set. + Add(Validator) + + // Remove the validator with the specified ID. + Remove(ids.ShortID) + + // Contains returns true if there is a validator with the specified ID + // currently in the set. + Contains(ids.ShortID) bool + + // Len returns the number of validators currently in the set. + Len() int + + // List all the ids of validators in this group + List() []Validator + + // Sample returns a collection of validator IDs. If there aren't enough + // validators, the length of the returned validators may be less than + // [size]. Otherwise, the length of the returned validators will equal + // [size]. + Sample(size int) []Validator +} + +// NewSet returns a new, empty set of validators. +func NewSet() Set { return &set{vdrMap: make(map[[20]byte]int)} } + +// set of validators. Validator function results are cached. Therefore, to +// update a validators weight, one should ensure to call add with the updated +// validator. Sample will run in O(NumValidators) time. All other functions run +// in O(1) time. +// set implements Set +type set struct { + lock sync.Mutex + vdrMap map[[20]byte]int + vdrSlice []Validator + sampler random.Weighted +} + +// Set implements the Set interface. +func (s *set) Set(vdrs []Validator) { + s.lock.Lock() + defer s.lock.Unlock() + + s.set(vdrs) +} + +func (s *set) set(vdrs []Validator) { + s.vdrMap = make(map[[20]byte]int, len(vdrs)) + s.vdrSlice = s.vdrSlice[:0] + s.sampler.Weights = s.sampler.Weights[:0] + + for _, vdr := range vdrs { + s.add(vdr) + } +} + +// Add implements the Set interface. +func (s *set) Add(vdr Validator) { + s.lock.Lock() + defer s.lock.Unlock() + + s.add(vdr) +} + +func (s *set) add(vdr Validator) { + vdrID := vdr.ID() + if s.contains(vdrID) { + s.remove(vdrID) + } + + w := vdr.Weight() + if w == 0 { + return // This validator would never be sampled anyway + } + + i := len(s.vdrSlice) + s.vdrMap[vdrID.Key()] = i + s.vdrSlice = append(s.vdrSlice, vdr) + s.sampler.Weights = append(s.sampler.Weights, w) +} + +// Remove implements the Set interface. +func (s *set) Remove(vdrID ids.ShortID) { + s.lock.Lock() + defer s.lock.Unlock() + + s.remove(vdrID) +} + +func (s *set) remove(vdrID ids.ShortID) { + // Get the element to remove + iKey := vdrID.Key() + i, contains := s.vdrMap[iKey] + if !contains { + return + } + + // Get the last element + e := len(s.vdrSlice) - 1 + eVdr := s.vdrSlice[e] + eKey := eVdr.ID().Key() + + // Move e -> i + s.vdrMap[eKey] = i + s.vdrSlice[i] = eVdr + s.sampler.Weights[i] = s.sampler.Weights[e] + + // Remove i + delete(s.vdrMap, iKey) + s.vdrSlice = s.vdrSlice[:e] + s.sampler.Weights = s.sampler.Weights[:e] +} + +// Contains implements the Set interface. +func (s *set) Contains(vdrID ids.ShortID) bool { + s.lock.Lock() + defer s.lock.Unlock() + + return s.contains(vdrID) +} + +func (s *set) contains(vdrID ids.ShortID) bool { + _, contains := s.vdrMap[vdrID.Key()] + return contains +} + +// Len implements the Set interface. +func (s *set) Len() int { + s.lock.Lock() + defer s.lock.Unlock() + + return s.len() +} + +func (s *set) len() int { return len(s.vdrSlice) } + +// List implements the Group interface. +func (s *set) List() []Validator { + s.lock.Lock() + defer s.lock.Unlock() + + return s.list() +} + +func (s *set) list() []Validator { + list := make([]Validator, len(s.vdrSlice)) + copy(list, s.vdrSlice) + return list +} + +// Sample implements the Group interface. +func (s *set) Sample(size int) []Validator { + s.lock.Lock() + defer s.lock.Unlock() + + return s.sample(size) +} + +func (s *set) sample(size int) []Validator { + list := make([]Validator, size)[:0] + + s.sampler.Replace() // Must replace, otherwise changes won't be reflected + for ; size > 0 && s.sampler.CanSample(); size-- { + i := s.sampler.Sample() + list = append(list, s.vdrSlice[i]) + } + return list +} + +func (s *set) String() string { + s.lock.Lock() + defer s.lock.Unlock() + + return s.string() +} + +func (s *set) string() string { + sb := strings.Builder{} + + sb.WriteString(fmt.Sprintf("Validator Set: (Size = %d)", len(s.vdrSlice))) + format := fmt.Sprintf("\n Validator[%s]: %%33s, %%d", formatting.IntFormat(len(s.vdrSlice)-1)) + for i, vdr := range s.vdrSlice { + sb.WriteString(fmt.Sprintf(format, i, vdr.ID(), s.sampler.Weights[i])) + } + + return sb.String() +} diff --git a/snow/validators/set_test.go b/snow/validators/set_test.go new file mode 100644 index 0000000..5ad381c --- /dev/null +++ b/snow/validators/set_test.go @@ -0,0 +1,149 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "math" + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestSamplerSample(t *testing.T) { + vdr0 := GenerateRandomValidator(1) + vdr1 := GenerateRandomValidator(math.MaxInt64 - 1) + + s := NewSet() + s.Add(vdr0) + + if sampled := s.Sample(1); len(sampled) != 1 { + t.Fatalf("Should have sampled 1 validator") + } else if !sampled[0].ID().Equals(vdr0.ID()) { + t.Fatalf("Should have sampled vdr0") + } else if s.Len() != 1 { + t.Fatalf("Wrong size") + } + + s.Add(vdr1) + + if sampled := s.Sample(1); len(sampled) != 1 { + t.Fatalf("Should have sampled 1 validator") + } else if !sampled[0].ID().Equals(vdr1.ID()) { + t.Fatalf("Should have sampled vdr1") + } else if s.Len() != 2 { + t.Fatalf("Wrong size") + } + + if sampled := s.Sample(2); len(sampled) != 2 { + t.Fatalf("Should have sampled 2 validators") + } else if !sampled[1].ID().Equals(vdr0.ID()) { + t.Fatalf("Should have sampled vdr0") + } else if !sampled[0].ID().Equals(vdr1.ID()) { + t.Fatalf("Should have sampled vdr1") + } + + if sampled := s.Sample(3); len(sampled) != 2 { + t.Fatalf("Should have sampled 2 validators") + } else if !sampled[1].ID().Equals(vdr0.ID()) { + t.Fatalf("Should have sampled vdr0") + } else if !sampled[0].ID().Equals(vdr1.ID()) { + t.Fatalf("Should have sampled vdr1") + } + + if list := s.List(); len(list) != 2 { + t.Fatalf("Should have returned 2 validators") + } else if !list[0].ID().Equals(vdr0.ID()) { + t.Fatalf("Should have returned vdr0") + } else if !list[1].ID().Equals(vdr1.ID()) { + t.Fatalf("Should have returned vdr1") + } +} + +func TestSamplerDuplicate(t *testing.T) { + vdr0 := GenerateRandomValidator(1) + vdr1_0 := GenerateRandomValidator(math.MaxInt64 - 1) + vdr1_1 := NewValidator(vdr1_0.ID(), 0) + + s := NewSet() + s.Add(vdr0) + s.Add(vdr1_0) + + if sampled := s.Sample(1); len(sampled) != 1 { + t.Fatalf("Should have sampled 1 validator") + } else if !sampled[0].ID().Equals(vdr1_0.ID()) { + t.Fatalf("Should have sampled vdr1") + } + + s.Add(vdr1_1) + + if sampled := s.Sample(1); len(sampled) != 1 { + t.Fatalf("Should have sampled 1 validator") + } else if !sampled[0].ID().Equals(vdr0.ID()) { + t.Fatalf("Should have sampled vdr0") + } + + if sampled := s.Sample(2); len(sampled) != 1 { + t.Fatalf("Should have only sampled 1 validator") + } else if !sampled[0].ID().Equals(vdr0.ID()) { + t.Fatalf("Should have sampled vdr0") + } + + s.Remove(vdr1_1.ID()) + + if sampled := s.Sample(2); len(sampled) != 1 { + t.Fatalf("Should have only sampled 1 validator") + } else if !sampled[0].ID().Equals(vdr0.ID()) { + t.Fatalf("Should have sampled vdr0") + } +} + +func TestSamplerSimple(t *testing.T) { + vdr := GenerateRandomValidator(1) + + s := NewSet() + s.Add(vdr) + + if sampled := s.Sample(1); len(sampled) != 1 { + t.Fatalf("Should have sampled 1 validator") + } +} + +func TestSamplerContains(t *testing.T) { + vdr := GenerateRandomValidator(1) + + s := NewSet() + s.Add(vdr) + + if !s.Contains(vdr.ID()) { + t.Fatalf("Should have contained validator") + } + + s.Remove(vdr.ID()) + + if s.Contains(vdr.ID()) { + t.Fatalf("Shouldn't have contained validator") + } +} + +func TestSamplerString(t *testing.T) { + vdr0 := NewValidator(ids.ShortEmpty, 1) + vdr1 := NewValidator( + ids.NewShortID([20]byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + }), + math.MaxInt64-1, + ) + + s := NewSet() + s.Add(vdr0) + s.Add(vdr1) + + expected := "Validator Set: (Size = 2)\n" + + " Validator[0]: 111111111111111111116DBWJs, 1\n" + + " Validator[1]: QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806" + if str := s.String(); str != expected { + t.Fatalf("Got:\n%s\nExpected:\n%s", str, expected) + } +} diff --git a/snow/validators/test_validator.go b/snow/validators/test_validator.go new file mode 100644 index 0000000..ea26709 --- /dev/null +++ b/snow/validators/test_validator.go @@ -0,0 +1,41 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "github.com/ava-labs/gecko/ids" +) + +// testValidator is a struct that contains the base values required by the +// validator interface. This struct is used only for testing. +type testValidator struct { + id ids.ShortID + weight uint64 +} + +func (v *testValidator) ID() ids.ShortID { return v.id } +func (v *testValidator) Weight() uint64 { return v.weight } + +// NewValidator returns a validator object that implements the Validator +// interface +func NewValidator(id ids.ShortID, weight uint64) Validator { + return &testValidator{ + id: id, + weight: weight, + } +} + +var ( + vdrOffset = uint64(0) +) + +// GenerateRandomValidator creates a random validator with the provided weight +func GenerateRandomValidator(weight uint64) Validator { + vdrOffset++ + id := ids.Empty.Prefix(vdrOffset) + bytes := id.Bytes() + hash := [20]byte{} + copy(hash[:], bytes) + return NewValidator(ids.NewShortID(hash), weight) +} diff --git a/snow/validators/validator.go b/snow/validators/validator.go new file mode 100644 index 0000000..3b2efdc --- /dev/null +++ b/snow/validators/validator.go @@ -0,0 +1,18 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "github.com/ava-labs/gecko/ids" +) + +// Validator is the minimal description of someone that can be sampled. +type Validator interface { + // ID returns the unique id of this validator + ID() ids.ShortID + + // Weight that can be used for weighted sampling. + // If this validator is validating the default subnet, returns the amount of $AVA staked + Weight() uint64 +} diff --git a/utils/crypto/crypto.go b/utils/crypto/crypto.go new file mode 100644 index 0000000..9146ce1 --- /dev/null +++ b/utils/crypto/crypto.go @@ -0,0 +1,47 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package crypto + +import ( + "github.com/ava-labs/gecko/ids" +) + +// EnableCrypto ... +// TODO: Remove this from this package, this should be in a config file +var EnableCrypto = true + +// Factory ... +type Factory interface { + NewPrivateKey() (PrivateKey, error) + + ToPublicKey([]byte) (PublicKey, error) + ToPrivateKey([]byte) (PrivateKey, error) +} + +// RecoverableFactory ... +type RecoverableFactory interface { + Factory + + RecoverPublicKey(message, signature []byte) (PublicKey, error) + RecoverHashPublicKey(hash, signature []byte) (PublicKey, error) +} + +// PublicKey ... +type PublicKey interface { + Verify(message, signature []byte) bool + VerifyHash(hash, signature []byte) bool + + Address() ids.ShortID + Bytes() []byte +} + +// PrivateKey ... +type PrivateKey interface { + PublicKey() PublicKey + + Sign(message []byte) ([]byte, error) + SignHash(hash []byte) ([]byte, error) + + Bytes() []byte +} diff --git a/utils/crypto/crypto_benchmark_test.go b/utils/crypto/crypto_benchmark_test.go new file mode 100644 index 0000000..2d4d2f9 --- /dev/null +++ b/utils/crypto/crypto_benchmark_test.go @@ -0,0 +1,103 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package crypto + +import ( + "testing" + + "github.com/ava-labs/gecko/utils/hashing" +) + +// NumVerifies is the number of verifications to run per operation +const NumVerifies = 1 + +// The different signature schemes +const ( + RSA = iota + RSAPSS + ED25519 + SECP256K1 +) + +var ( + hashes [][]byte + + keys [][]PublicKey + sigs [][][]byte +) + +func init() { + // Setup hashes: + bytes := [32]byte{} + for i := uint64(0); i < NumVerifies; i++ { + bytes[i%32]++ + hash := hashing.ComputeHash256(bytes[:]) + hashes = append(hashes, hash) + } + + // Setup signatures: + factories := []Factory{ + RSA: &FactoryRSA{}, + RSAPSS: &FactoryRSAPSS{}, + ED25519: &FactoryED25519{}, + SECP256K1: &FactorySECP256K1{}, + } + for _, f := range factories { + fKeys := []PublicKey{} + fSigs := [][]byte{} + for i := uint64(0); i < NumVerifies; i++ { + privateKey, err := f.NewPrivateKey() + if err != nil { + panic(err) + } + + publicKey := privateKey.PublicKey() + sig, err := privateKey.SignHash(hashes[i]) + if err != nil { + panic(err) + } + + fKeys = append(fKeys, publicKey) + fSigs = append(fSigs, sig) + } + keys = append(keys, fKeys) + sigs = append(sigs, fSigs) + } +} + +func verify(algo int) { + for i := 0; i < NumVerifies; i++ { + if !keys[algo][i].VerifyHash(hashes[i], sigs[algo][i]) { + panic("Verification failed") + } + } +} + +// BenchmarkRSAVerify runs the benchmark with RSA keys +func BenchmarkRSAVerify(b *testing.B) { + for n := 0; n < b.N; n++ { + verify(RSA) + } +} + +// BenchmarkRSAPSSVerify runs the benchmark with RSAPSS keys +func BenchmarkRSAPSSVerify(b *testing.B) { + for n := 0; n < b.N; n++ { + verify(RSAPSS) + } +} + +// BenchmarkED25519Verify runs the benchmark with ED25519 keys +func BenchmarkED25519Verify(b *testing.B) { + for n := 0; n < b.N; n++ { + verify(ED25519) + } +} + +// BenchmarkSECP256k1Verify runs the benchmark with SECP256K1 keys +func BenchmarkSECP256k1Verify(b *testing.B) { + for n := 0; n < b.N; n++ { + verify(SECP256K1) + } +} diff --git a/utils/crypto/ed25519.go b/utils/crypto/ed25519.go new file mode 100644 index 0000000..c92392f --- /dev/null +++ b/utils/crypto/ed25519.go @@ -0,0 +1,103 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package crypto + +import ( + "errors" + + "golang.org/x/crypto/ed25519" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/hashing" +) + +var ( + errWrongPublicKeySize = errors.New("wrong public key size") + errWrongPrivateKeySize = errors.New("wrong private key size") +) + +// FactoryED25519 ... +type FactoryED25519 struct{} + +// NewPrivateKey implements the Factory interface +func (*FactoryED25519) NewPrivateKey() (PrivateKey, error) { + _, k, err := ed25519.GenerateKey(nil) + return &PrivateKeyED25519{sk: k}, err +} + +// ToPublicKey implements the Factory interface +func (*FactoryED25519) ToPublicKey(b []byte) (PublicKey, error) { + if len(b) != ed25519.PublicKeySize { + return nil, errWrongPublicKeySize + } + return &PublicKeyED25519{pk: b}, nil +} + +// ToPrivateKey implements the Factory interface +func (*FactoryED25519) ToPrivateKey(b []byte) (PrivateKey, error) { + if len(b) != ed25519.PrivateKeySize { + return nil, errWrongPrivateKeySize + } + return &PrivateKeyED25519{sk: b}, nil +} + +// PublicKeyED25519 ... +type PublicKeyED25519 struct { + pk ed25519.PublicKey + addr ids.ShortID +} + +// Verify implements the PublicKey interface +func (k *PublicKeyED25519) Verify(msg, sig []byte) bool { + return ed25519.Verify(ed25519.PublicKey(k.pk), msg, sig) +} + +// VerifyHash implements the PublicKey interface +func (k *PublicKeyED25519) VerifyHash(hash, sig []byte) bool { + return k.Verify(hash, sig) +} + +// Address implements the PublicKey interface +func (k *PublicKeyED25519) Address() ids.ShortID { + if k.addr.IsZero() { + addr, err := ids.ToShortID(hashing.PubkeyBytesToAddress(k.Bytes())) + if err != nil { + panic(err) + } + k.addr = addr + } + return k.addr +} + +// Bytes implements the PublicKey interface +func (k *PublicKeyED25519) Bytes() []byte { return k.pk } + +// PrivateKeyED25519 ... +type PrivateKeyED25519 struct { + sk ed25519.PrivateKey + pk *PublicKeyED25519 +} + +// PublicKey implements the PrivateKey interface +func (k *PrivateKeyED25519) PublicKey() PublicKey { + if k.pk == nil { + k.pk = &PublicKeyED25519{ + pk: ed25519.PrivateKey(k.sk).Public().(ed25519.PublicKey), + } + } + return k.pk +} + +// Sign implements the PrivateKey interface +func (k *PrivateKeyED25519) Sign(msg []byte) ([]byte, error) { + return ed25519.Sign(ed25519.PrivateKey(k.sk), msg), nil +} + +// SignHash implements the PrivateKey interface +func (k PrivateKeyED25519) SignHash(hash []byte) ([]byte, error) { + return k.Sign(hash) +} + +// Bytes implements the PrivateKey interface +func (k PrivateKeyED25519) Bytes() []byte { return k.sk } diff --git a/utils/crypto/errors.go b/utils/crypto/errors.go new file mode 100644 index 0000000..2aad688 --- /dev/null +++ b/utils/crypto/errors.go @@ -0,0 +1,13 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package crypto + +import ( + "errors" +) + +var ( + errInvalidSigLen = errors.New("invalid signature length") + errMutatedSig = errors.New("signature was mutated from its original format") +) diff --git a/utils/crypto/rsa.go b/utils/crypto/rsa.go new file mode 100644 index 0000000..3ed65ff --- /dev/null +++ b/utils/crypto/rsa.go @@ -0,0 +1,136 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package crypto + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/hashing" +) + +var ( + errWrongKeyType = errors.New("wrong key type") +) + +const rsaSize = 3072 + +// FactoryRSA ... +type FactoryRSA struct{} + +// NewPrivateKey implements the Factory interface +func (*FactoryRSA) NewPrivateKey() (PrivateKey, error) { + k, err := rsa.GenerateKey(rand.Reader, rsaSize) + if err != nil { + return nil, err + } + return &PrivateKeyRSA{sk: k}, nil +} + +// ToPublicKey implements the Factory interface +func (*FactoryRSA) ToPublicKey(b []byte) (PublicKey, error) { + key, err := x509.ParsePKIXPublicKey(b) + if err != nil { + return nil, err + } + switch key := key.(type) { + case *rsa.PublicKey: + return &PublicKeyRSA{ + pk: key, + bytes: b, + }, nil + default: + return nil, errWrongKeyType + } +} + +// ToPrivateKey implements the Factory interface +func (*FactoryRSA) ToPrivateKey(b []byte) (PrivateKey, error) { + key, err := x509.ParsePKCS1PrivateKey(b) + if err != nil { + return nil, err + } + return &PrivateKeyRSA{ + sk: key, + bytes: b, + }, nil +} + +// PublicKeyRSA ... +type PublicKeyRSA struct { + pk *rsa.PublicKey + addr ids.ShortID + bytes []byte +} + +// Verify implements the PublicKey interface +func (k *PublicKeyRSA) Verify(msg, sig []byte) bool { + return k.VerifyHash(hashing.ComputeHash256(msg), sig) +} + +// VerifyHash implements the PublicKey interface +func (k *PublicKeyRSA) VerifyHash(hash, sig []byte) bool { + return rsa.VerifyPKCS1v15(k.pk, crypto.SHA256, hash, sig) == nil +} + +// Address implements the PublicKey interface +func (k *PublicKeyRSA) Address() ids.ShortID { + if k.addr.IsZero() { + addr, err := ids.ToShortID(hashing.PubkeyBytesToAddress(k.Bytes())) + if err != nil { + panic(err) + } + k.addr = addr + } + return k.addr +} + +// Bytes implements the PublicKey interface +func (k *PublicKeyRSA) Bytes() []byte { + if k.bytes == nil { + b, err := x509.MarshalPKIXPublicKey(k.pk) + if err != nil { + panic(err) + } + k.bytes = b + } + return k.bytes +} + +// PrivateKeyRSA ... +type PrivateKeyRSA struct { + sk *rsa.PrivateKey + pk *PublicKeyRSA + bytes []byte +} + +// PublicKey implements the PrivateKey interface +func (k *PrivateKeyRSA) PublicKey() PublicKey { + if k.pk == nil { + k.pk = &PublicKeyRSA{pk: &k.sk.PublicKey} + } + return k.pk +} + +// Sign implements the PrivateKey interface +func (k *PrivateKeyRSA) Sign(msg []byte) ([]byte, error) { + return k.SignHash(hashing.ComputeHash256(msg)) +} + +// SignHash implements the PrivateKey interface +func (k *PrivateKeyRSA) SignHash(hash []byte) ([]byte, error) { + return rsa.SignPKCS1v15(rand.Reader, k.sk, crypto.SHA256, hash) +} + +// Bytes implements the PrivateKey interface +func (k *PrivateKeyRSA) Bytes() []byte { + if k.bytes == nil { + k.bytes = x509.MarshalPKCS1PrivateKey(k.sk) + } + return k.bytes +} diff --git a/utils/crypto/rsapss.go b/utils/crypto/rsapss.go new file mode 100644 index 0000000..cad213f --- /dev/null +++ b/utils/crypto/rsapss.go @@ -0,0 +1,131 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package crypto + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/hashing" +) + +const rsaPSSSize = 3072 + +// FactoryRSAPSS ... +type FactoryRSAPSS struct{} + +// NewPrivateKey implements the Factory interface +func (*FactoryRSAPSS) NewPrivateKey() (PrivateKey, error) { + k, err := rsa.GenerateKey(rand.Reader, rsaPSSSize) + if err != nil { + return nil, err + } + return &PrivateKeyRSAPSS{sk: k}, nil +} + +// ToPublicKey implements the Factory interface +func (*FactoryRSAPSS) ToPublicKey(b []byte) (PublicKey, error) { + key, err := x509.ParsePKIXPublicKey(b) + if err != nil { + return nil, err + } + switch key := key.(type) { + case *rsa.PublicKey: + return &PublicKeyRSAPSS{ + pk: key, + bytes: b, + }, nil + default: + return nil, errWrongKeyType + } +} + +// ToPrivateKey implements the Factory interface +func (*FactoryRSAPSS) ToPrivateKey(b []byte) (PrivateKey, error) { + key, err := x509.ParsePKCS1PrivateKey(b) + if err != nil { + return nil, err + } + return &PrivateKeyRSAPSS{ + sk: key, + bytes: b, + }, nil +} + +// PublicKeyRSAPSS ... +type PublicKeyRSAPSS struct { + pk *rsa.PublicKey + addr ids.ShortID + bytes []byte +} + +// Verify implements the PublicKey interface +func (k *PublicKeyRSAPSS) Verify(msg, sig []byte) bool { + return k.VerifyHash(hashing.ComputeHash256(msg), sig) +} + +// VerifyHash implements the PublicKey interface +func (k *PublicKeyRSAPSS) VerifyHash(hash, sig []byte) bool { + return rsa.VerifyPSS(k.pk, crypto.SHA256, hash, sig, nil) == nil +} + +// Address implements the PublicKey interface +func (k *PublicKeyRSAPSS) Address() ids.ShortID { + if k.addr.IsZero() { + addr, err := ids.ToShortID(hashing.PubkeyBytesToAddress(k.Bytes())) + if err != nil { + panic(err) + } + k.addr = addr + } + return k.addr +} + +// Bytes implements the PublicKey interface +func (k *PublicKeyRSAPSS) Bytes() []byte { + if k.bytes == nil { + b, err := x509.MarshalPKIXPublicKey(k.pk) + if err != nil { + panic(err) + } + k.bytes = b + } + return k.bytes +} + +// PrivateKeyRSAPSS ... +type PrivateKeyRSAPSS struct { + sk *rsa.PrivateKey + pk *PublicKeyRSAPSS + bytes []byte +} + +// PublicKey implements the PrivateKey interface +func (k *PrivateKeyRSAPSS) PublicKey() PublicKey { + if k.pk == nil { + k.pk = &PublicKeyRSAPSS{pk: &k.sk.PublicKey} + } + return k.pk +} + +// Sign implements the PrivateKey interface +func (k *PrivateKeyRSAPSS) Sign(msg []byte) ([]byte, error) { + return k.SignHash(hashing.ComputeHash256(msg)) +} + +// SignHash implements the PrivateKey interface +func (k *PrivateKeyRSAPSS) SignHash(hash []byte) ([]byte, error) { + return rsa.SignPSS(rand.Reader, k.sk, crypto.SHA256, hash, nil) +} + +// Bytes implements the PrivateKey interface +func (k *PrivateKeyRSAPSS) Bytes() []byte { + if k.bytes == nil { + k.bytes = x509.MarshalPKCS1PrivateKey(k.sk) + } + return k.bytes +} diff --git a/utils/crypto/secp256k1.go b/utils/crypto/secp256k1.go new file mode 100644 index 0000000..72f4451 --- /dev/null +++ b/utils/crypto/secp256k1.go @@ -0,0 +1,146 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package crypto + +import ( + "crypto/ecdsa" + "crypto/rand" + "math/big" + + "github.com/ava-labs/go-ethereum/crypto" + "github.com/ava-labs/go-ethereum/crypto/secp256k1" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/hashing" +) + +const ( + // SECP256K1SigLen is the number of bytes in a secp2561k signature + SECP256K1SigLen = 64 + + // SECP256K1SKLen is the number of bytes in a secp2561k private key + SECP256K1SKLen = 32 +) + +// FactorySECP256K1 ... +type FactorySECP256K1 struct{} + +// NewPrivateKey implements the Factory interface +func (*FactorySECP256K1) NewPrivateKey() (PrivateKey, error) { + k, err := ecdsa.GenerateKey(secp256k1.S256(), rand.Reader) + if err != nil { + return nil, err + } + return &PrivateKeySECP256K1{sk: k}, nil +} + +// ToPublicKey implements the Factory interface +func (*FactorySECP256K1) ToPublicKey(b []byte) (PublicKey, error) { + key, err := crypto.DecompressPubkey(b) + return &PublicKeySECP256K1{ + pk: key, + bytes: b, + }, err +} + +// ToPrivateKey implements the Factory interface +func (*FactorySECP256K1) ToPrivateKey(b []byte) (PrivateKey, error) { + key, err := crypto.ToECDSA(b) + return &PrivateKeySECP256K1{ + sk: key, + bytes: b, + }, err +} + +// PublicKeySECP256K1 ... +type PublicKeySECP256K1 struct { + pk *ecdsa.PublicKey + addr ids.ShortID + bytes []byte +} + +// Verify implements the PublicKey interface +func (k *PublicKeySECP256K1) Verify(msg, sig []byte) bool { + return k.VerifyHash(hashing.ComputeHash256(msg), sig) +} + +// VerifyHash implements the PublicKey interface +func (k *PublicKeySECP256K1) VerifyHash(hash, sig []byte) bool { + if verifySECP256K1SignatureFormat(sig) != nil { + return false + } + return crypto.VerifySignature(k.Bytes(), hash, sig) +} + +// Address implements the PublicKey interface +func (k *PublicKeySECP256K1) Address() ids.ShortID { + if k.addr.IsZero() { + addr, err := ids.ToShortID(hashing.PubkeyBytesToAddress(k.Bytes())) + if err != nil { + panic(err) + } + k.addr = addr + } + return k.addr +} + +// Bytes implements the PublicKey interface +func (k *PublicKeySECP256K1) Bytes() []byte { + if k.bytes == nil { + k.bytes = crypto.CompressPubkey(k.pk) + } + return k.bytes +} + +// PrivateKeySECP256K1 ... +type PrivateKeySECP256K1 struct { + sk *ecdsa.PrivateKey + pk *PublicKeySECP256K1 + bytes []byte +} + +// PublicKey implements the PrivateKey interface +func (k *PrivateKeySECP256K1) PublicKey() PublicKey { + if k.pk == nil { + k.pk = &PublicKeySECP256K1{pk: (*ecdsa.PublicKey)(&k.sk.PublicKey)} + } + return k.pk +} + +// Sign implements the PrivateKey interface +func (k *PrivateKeySECP256K1) Sign(msg []byte) ([]byte, error) { + return k.SignHash(hashing.ComputeHash256(msg)) +} + +// SignHash implements the PrivateKey interface +func (k *PrivateKeySECP256K1) SignHash(hash []byte) ([]byte, error) { + sig, err := crypto.Sign(hash, k.sk) + if err != nil { + return nil, err + } + return sig[:len(sig)-1], err +} + +// Bytes implements the PrivateKey interface +func (k *PrivateKeySECP256K1) Bytes() []byte { + if k.bytes == nil { + k.bytes = make([]byte, SECP256K1SKLen) + bytes := k.sk.D.Bytes() + copy(k.bytes[SECP256K1SKLen-len(bytes):], bytes) + } + return k.bytes +} + +func verifySECP256K1SignatureFormat(sig []byte) error { + if len(sig) != SECP256K1SigLen { + return errInvalidSigLen + } + var r, s big.Int + r.SetBytes(sig[:32]) + s.SetBytes(sig[32:]) + if !crypto.ValidateSignatureValues(0, &r, &s, true) { + return errMutatedSig + } + return nil +} diff --git a/utils/crypto/secp256k1_recover_benchmark_test.go b/utils/crypto/secp256k1_recover_benchmark_test.go new file mode 100644 index 0000000..d1a3530 --- /dev/null +++ b/utils/crypto/secp256k1_recover_benchmark_test.go @@ -0,0 +1,48 @@ +package crypto + +import ( + "testing" + + "github.com/ava-labs/gecko/utils/hashing" +) + +// NumRecoveries is the number of recoveries to run per operation +const NumRecoveries = 1 + +var ( + secpSigs [][]byte +) + +func init() { + factory := FactorySECP256K1R{} + + hash := hashing.ComputeHash256(nil) + for i := byte(0); i < NumRecoveries; i++ { + key, err := factory.NewPrivateKey() + if err != nil { + panic(err) + } + sig, err := key.SignHash(hash) + if err != nil { + panic(err) + } + secpSigs = append(secpSigs, sig) + } +} + +func recover() { + factory := FactorySECP256K1R{} + hash := hashing.ComputeHash256(nil) + for _, sig := range secpSigs { + if _, err := factory.RecoverHashPublicKey(hash, sig); err != nil { + panic(err) + } + } +} + +// BenchmarkSecp256k1RecoverVerify runs the benchmark with secp sig +func BenchmarkSecp256k1RecoverVerify(b *testing.B) { + for n := 0; n < b.N; n++ { + recover() + } +} diff --git a/utils/crypto/secp256k1r.go b/utils/crypto/secp256k1r.go new file mode 100644 index 0000000..6de3515 --- /dev/null +++ b/utils/crypto/secp256k1r.go @@ -0,0 +1,190 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package crypto + +import ( + "bytes" + "crypto/ecdsa" + "crypto/rand" + "math/big" + "sort" + + "github.com/ava-labs/go-ethereum/crypto" + "github.com/ava-labs/go-ethereum/crypto/secp256k1" + + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/hashing" +) + +const ( + // SECP256K1RSigLen is the number of bytes in a secp2561k recoverable + // signature + SECP256K1RSigLen = 65 + + // SECP256K1RSKLen is the number of bytes in a secp2561k recoverable private + // key + SECP256K1RSKLen = 32 +) + +// FactorySECP256K1R ... +type FactorySECP256K1R struct{ Cache cache.LRU } + +// NewPrivateKey implements the Factory interface +func (*FactorySECP256K1R) NewPrivateKey() (PrivateKey, error) { + k, err := ecdsa.GenerateKey(secp256k1.S256(), rand.Reader) + if err != nil { + return nil, err + } + return &PrivateKeySECP256K1R{sk: k}, nil +} + +// ToPublicKey implements the Factory interface +func (*FactorySECP256K1R) ToPublicKey(b []byte) (PublicKey, error) { + key, err := crypto.DecompressPubkey(b) + return &PublicKeySECP256K1R{ + pk: key, + bytes: b, + }, err +} + +// ToPrivateKey implements the Factory interface +func (*FactorySECP256K1R) ToPrivateKey(b []byte) (PrivateKey, error) { + key, err := crypto.ToECDSA(b) + return &PrivateKeySECP256K1R{ + sk: key, + bytes: b, + }, err +} + +// RecoverPublicKey returns the public key from a 65 byte signature +func (f *FactorySECP256K1R) RecoverPublicKey(msg, sig []byte) (PublicKey, error) { + return f.RecoverHashPublicKey(hashing.ComputeHash256(msg), sig) +} + +// RecoverHashPublicKey returns the public key from a 65 byte signature +func (f *FactorySECP256K1R) RecoverHashPublicKey(hash, sig []byte) (PublicKey, error) { + cacheBytes := make([]byte, len(hash)+len(sig)) + copy(cacheBytes, hash) + copy(cacheBytes[len(hash):], sig) + id := ids.NewID(hashing.ComputeHash256Array(cacheBytes)) + if cachedPublicKey, ok := f.Cache.Get(id); ok { + return cachedPublicKey.(*PublicKeySECP256K1), nil + } + + if err := verifySECP256K1RSignatureFormat(sig); err != nil { + return nil, err + } + + rawPubkey, err := crypto.SigToPub(hash, sig) + if err != nil { + return nil, err + } + pubkey := &PublicKeySECP256K1{pk: rawPubkey} + f.Cache.Put(id, pubkey) + return pubkey, nil +} + +// PublicKeySECP256K1R ... +type PublicKeySECP256K1R struct { + pk *ecdsa.PublicKey + addr ids.ShortID + bytes []byte +} + +// Verify implements the PublicKey interface +func (k *PublicKeySECP256K1R) Verify(msg, sig []byte) bool { + return k.VerifyHash(hashing.ComputeHash256(msg), sig) +} + +// VerifyHash implements the PublicKey interface +func (k *PublicKeySECP256K1R) VerifyHash(hash, sig []byte) bool { + if verifySECP256K1RSignatureFormat(sig) != nil { + return false + } + return crypto.VerifySignature(k.Bytes(), hash, sig[:SECP256K1RSigLen-1]) +} + +// Address implements the PublicKey interface +func (k *PublicKeySECP256K1R) Address() ids.ShortID { + if k.addr.IsZero() { + addr, err := ids.ToShortID(hashing.PubkeyBytesToAddress(k.Bytes())) + if err != nil { + panic(err) + } + k.addr = addr + } + return k.addr +} + +// Bytes implements the PublicKey interface +func (k *PublicKeySECP256K1R) Bytes() []byte { + if k.bytes == nil { + k.bytes = crypto.CompressPubkey(k.pk) + } + return k.bytes +} + +// PrivateKeySECP256K1R ... +type PrivateKeySECP256K1R struct { + sk *ecdsa.PrivateKey + pk *PublicKeySECP256K1R + bytes []byte +} + +// PublicKey implements the PrivateKey interface +func (k *PrivateKeySECP256K1R) PublicKey() PublicKey { + if k.pk == nil { + k.pk = &PublicKeySECP256K1R{pk: (*ecdsa.PublicKey)(&k.sk.PublicKey)} + } + return k.pk +} + +// Sign implements the PrivateKey interface +func (k *PrivateKeySECP256K1R) Sign(msg []byte) ([]byte, error) { + return k.SignHash(hashing.ComputeHash256(msg)) +} + +// SignHash implements the PrivateKey interface +func (k *PrivateKeySECP256K1R) SignHash(hash []byte) ([]byte, error) { + return crypto.Sign(hash, k.sk) +} + +// Bytes implements the PrivateKey interface +func (k *PrivateKeySECP256K1R) Bytes() []byte { + if k.bytes == nil { + k.bytes = make([]byte, SECP256K1RSKLen) + bytes := k.sk.D.Bytes() + copy(k.bytes[SECP256K1RSKLen-len(bytes):], bytes) + } + return k.bytes +} + +func verifySECP256K1RSignatureFormat(sig []byte) error { + if len(sig) != SECP256K1RSigLen { + return errInvalidSigLen + } + var r, s big.Int + r.SetBytes(sig[:32]) + s.SetBytes(sig[32:64]) + if !crypto.ValidateSignatureValues(sig[64], &r, &s, true) { + return errMutatedSig + } + return nil +} + +type innerSortSECP2561RSigs [][SECP256K1RSigLen]byte + +func (lst innerSortSECP2561RSigs) Less(i, j int) bool { return bytes.Compare(lst[i][:], lst[j][:]) < 0 } +func (lst innerSortSECP2561RSigs) Len() int { return len(lst) } +func (lst innerSortSECP2561RSigs) Swap(i, j int) { lst[j], lst[i] = lst[i], lst[j] } + +// SortSECP2561RSigs sorts a slice of SECP2561R signatures +func SortSECP2561RSigs(lst [][SECP256K1RSigLen]byte) { sort.Sort(innerSortSECP2561RSigs(lst)) } + +// IsSortedAndUniqueSECP2561RSigs returns true if [sigs] is sorted +func IsSortedAndUniqueSECP2561RSigs(sigs [][SECP256K1RSigLen]byte) bool { + return utils.IsSortedAndUnique(innerSortSECP2561RSigs(sigs)) +} diff --git a/utils/crypto/secp256k1r_test.go b/utils/crypto/secp256k1r_test.go new file mode 100644 index 0000000..244b30b --- /dev/null +++ b/utils/crypto/secp256k1r_test.go @@ -0,0 +1,75 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package crypto + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" +) + +func TestRecover(t *testing.T) { + f := FactorySECP256K1R{} + key, _ := f.NewPrivateKey() + + msg := []byte{1, 2, 3} + sig, _ := key.Sign(msg) + + pub := key.PublicKey() + pubRec, _ := f.RecoverPublicKey(msg, sig) + + if !bytes.Equal(pub.Bytes(), pubRec.Bytes()) { + t.Fatalf("Should have been equal") + } +} + +func TestCachedRecover(t *testing.T) { + f := FactorySECP256K1R{Cache: cache.LRU{Size: 1}} + key, _ := f.NewPrivateKey() + + msg := []byte{1, 2, 3} + sig, _ := key.Sign(msg) + + pub1, _ := f.RecoverPublicKey(msg, sig) + pub2, _ := f.RecoverPublicKey(msg, sig) + + if pub1 != pub2 { + t.Fatalf("Should have returned the same public key") + } +} + +func TestExtensive(t *testing.T) { + f := FactorySECP256K1R{} + + hash := hashing.ComputeHash256([]byte{1, 2, 3}) + for i := 0; i < 1000; i++ { + if key, err := f.NewPrivateKey(); err != nil { + t.Fatalf("Generated bad private key") + } else if _, err := key.SignHash(hash); err != nil { + t.Fatalf("Failed signing with:\n%s", formatting.DumpBytes{Bytes: key.Bytes()}) + } + } +} + +func TestGenRecreate(t *testing.T) { + f := FactorySECP256K1R{} + + for i := 0; i < 1000; i++ { + sk, err := f.NewPrivateKey() + if err != nil { + t.Fatal(err) + } + skBytes := sk.Bytes() + recoveredSk, err := f.ToPrivateKey(skBytes) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(sk.PublicKey().Address().Bytes(), recoveredSk.PublicKey().Address().Bytes()) { + t.Fatalf("Wrong public key") + } + } +} diff --git a/utils/formatting/cb58.go b/utils/formatting/cb58.go new file mode 100644 index 0000000..8137f2a --- /dev/null +++ b/utils/formatting/cb58.go @@ -0,0 +1,71 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package formatting + +import ( + "bytes" + "errors" + + "github.com/mr-tron/base58/base58" + + "github.com/ava-labs/gecko/utils/hashing" +) + +var ( + errMissingQuotes = errors.New("missing quotes") + errMissingChecksum = errors.New("input string is smaller than the checksum size") + errBadChecksum = errors.New("invalid input checksum") +) + +// CB58 formats bytes in checksummed base-58 encoding +type CB58 struct{ Bytes []byte } + +// UnmarshalJSON ... +func (cb58 *CB58) UnmarshalJSON(b []byte) error { + str := string(b) + if str == "null" { + return nil + } + + if len(str) < 2 { + return errMissingQuotes + } + + lastIndex := len(str) - 1 + if str[0] != '"' || str[lastIndex] != '"' { + return errMissingQuotes + } + return cb58.FromString(str[1:lastIndex]) +} + +// MarshalJSON ... +func (cb58 CB58) MarshalJSON() ([]byte, error) { return []byte("\"" + cb58.String() + "\""), nil } + +// FromString ... +func (cb58 *CB58) FromString(str string) error { + b, err := base58.Decode(str) + if err != nil { + return err + } + if len(b) < 4 { + return errMissingChecksum + } + + rawBytes := b[:len(b)-4] + checksum := b[len(b)-4:] + + if !bytes.Equal(checksum, hashing.Checksum(rawBytes, 4)) { + return errBadChecksum + } + + cb58.Bytes = rawBytes + return nil +} + +func (cb58 CB58) String() string { + checked := make([]byte, len(cb58.Bytes)+4) + copy(checked, cb58.Bytes) + copy(checked[len(cb58.Bytes):], hashing.Checksum(cb58.Bytes, 4)) + return base58.Encode(checked) +} diff --git a/utils/formatting/cb58_test.go b/utils/formatting/cb58_test.go new file mode 100644 index 0000000..74e7aae --- /dev/null +++ b/utils/formatting/cb58_test.go @@ -0,0 +1,80 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package formatting + +import ( + "bytes" + "testing" +) + +func TestCB58(t *testing.T) { + addr := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255} + result := CB58{addr}.String() + expected := "1NVSVezva3bAtJesnUj" + if result != expected { + t.Fatalf("Expected %s, got %s", expected, result) + } +} + +func TestCB58Single(t *testing.T) { + addr := []byte{0} + result := CB58{addr}.String() + expected := "1c7hwa" + if result != expected { + t.Fatalf("Expected %s, got %s", expected, result) + } +} + +func TestCB58ParseBytes(t *testing.T) { + ui := "1NVSVezva3bAtJesnUj" + cb58 := CB58{} + err := cb58.FromString(ui) + if err != nil { + t.Fatalf("Failed to process %s", ui) + } + expected := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255} + if !bytes.Equal(cb58.Bytes, expected) { + t.Fatalf("Expected 0x%x, got 0x%x", expected, cb58.Bytes) + } +} + +func TestCB58ParseBytesSingle(t *testing.T) { + ui := "1c7hwa" + cb58 := CB58{} + err := cb58.FromString(ui) + if err != nil { + t.Fatalf("Failed to process %s", ui) + } + expected := []byte{0} + if !bytes.Equal(cb58.Bytes, expected) { + t.Fatalf("Expected 0x%x, got 0x%x", expected, cb58.Bytes) + } +} + +func TestCB58ParseBytesError(t *testing.T) { + ui := "0" + cb58 := CB58{} + err := cb58.FromString(ui) + if err == nil { + t.Fatalf("Incorrectly parsed %s", ui) + } + + ui = "13pP7vbI" + err = cb58.FromString(ui) + if err == nil { + t.Fatalf("Incorrectly parsed %s", ui) + } + + ui = "13" + err = cb58.FromString(ui) + if err == nil { + t.Fatalf("Incorrectly parsed %s", ui) + } + + ui = "13pP7vb3" + err = cb58.FromString(ui) + if err == nil { + t.Fatalf("Incorrectly parsed %s", ui) + } +} diff --git a/utils/formatting/custom_stringer.go b/utils/formatting/custom_stringer.go new file mode 100644 index 0000000..8f67c6f --- /dev/null +++ b/utils/formatting/custom_stringer.go @@ -0,0 +1,9 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package formatting + +// CustomStringer ... +type CustomStringer struct{ Stringer func() string } + +func (cs CustomStringer) String() string { return cs.Stringer() } diff --git a/utils/formatting/dump_bytes.go b/utils/formatting/dump_bytes.go new file mode 100644 index 0000000..63a8a42 --- /dev/null +++ b/utils/formatting/dump_bytes.go @@ -0,0 +1,14 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package formatting + +import ( + "encoding/hex" + "strings" +) + +// DumpBytes ... +type DumpBytes struct{ Bytes []byte } + +func (db DumpBytes) String() string { return strings.TrimSpace(hex.Dump(db.Bytes)) } diff --git a/utils/formatting/int_format.go b/utils/formatting/int_format.go new file mode 100644 index 0000000..8d9222e --- /dev/null +++ b/utils/formatting/int_format.go @@ -0,0 +1,18 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package formatting + +import ( + "fmt" + "math" +) + +// IntFormat ... +func IntFormat(maxValue int) string { + log := 1 + if maxValue > 0 { + log = int(math.Ceil(math.Log10(float64(maxValue + 1)))) + } + return fmt.Sprintf("%%0%dd", log) +} diff --git a/utils/formatting/int_format_test.go b/utils/formatting/int_format_test.go new file mode 100644 index 0000000..8ec270d --- /dev/null +++ b/utils/formatting/int_format_test.go @@ -0,0 +1,44 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package formatting + +import ( + "testing" +) + +func TestIntFormat(t *testing.T) { + if format := IntFormat(0); format != "%01d" { + t.Fatalf("Wrong int format: %s", format) + } + if format := IntFormat(9); format != "%01d" { + t.Fatalf("Wrong int format: %s", format) + } + if format := IntFormat(10); format != "%02d" { + t.Fatalf("Wrong int format: %s", format) + } + if format := IntFormat(99); format != "%02d" { + t.Fatalf("Wrong int format: %s", format) + } + if format := IntFormat(100); format != "%03d" { + t.Fatalf("Wrong int format: %s", format) + } + if format := IntFormat(999); format != "%03d" { + t.Fatalf("Wrong int format: %s", format) + } + if format := IntFormat(1000); format != "%04d" { + t.Fatalf("Wrong int format: %s", format) + } + if format := IntFormat(9999); format != "%04d" { + t.Fatalf("Wrong int format: %s", format) + } + if format := IntFormat(10000); format != "%05d" { + t.Fatalf("Wrong int format: %s", format) + } + if format := IntFormat(99999); format != "%05d" { + t.Fatalf("Wrong int format: %s", format) + } + if format := IntFormat(100000); format != "%06d" { + t.Fatalf("Wrong int format: %s", format) + } +} diff --git a/utils/formatting/prefixed_stringer.go b/utils/formatting/prefixed_stringer.go new file mode 100644 index 0000000..320dfd6 --- /dev/null +++ b/utils/formatting/prefixed_stringer.go @@ -0,0 +1,15 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package formatting + +import ( + "fmt" +) + +// PrefixedStringer extends a stringer that adds a prefix +type PrefixedStringer interface { + fmt.Stringer + + PrefixedString(prefix string) string +} diff --git a/utils/hashing/hashing.go b/utils/hashing/hashing.go new file mode 100644 index 0000000..94fb1b3 --- /dev/null +++ b/utils/hashing/hashing.go @@ -0,0 +1,127 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package hashing + +import ( + "bytes" + "crypto/sha256" + "encoding/binary" + "errors" + "fmt" + "io" + + "golang.org/x/crypto/ripemd160" +) + +var ( + errBadLength = errors.New("input has insufficient length") +) + +// HashLen ... +const HashLen = sha256.Size + +// AddrLen ... +const AddrLen = ripemd160.Size + +// Hash256 A 256 bit long hash value. +type Hash256 = [HashLen]byte + +// Hash160 A 160 bit long hash value. +type Hash160 = [ripemd160.Size]byte + +// ComputeHash256Array Compute a cryptographically strong 256 bit hash of the +// input byte slice. +func ComputeHash256Array(buf []byte) Hash256 { + return sha256.Sum256(buf) +} + +// ComputeHash256 Compute a cryptographically strong 256 bit hash of the input +// byte slice. +func ComputeHash256(buf []byte) []byte { + arr := ComputeHash256Array(buf) + return arr[:] +} + +// ByteArraysToHash256Array takes in byte arrays and outputs a fixed 32 length +// byte array for the hash +func ByteArraysToHash256Array(byteArray ...[]byte) [32]byte { + buffer := new(bytes.Buffer) + for _, b := range byteArray { + err := binary.Write(buffer, binary.LittleEndian, b) + if err != nil { + fmt.Println(err) + } + } + return ComputeHash256Array(buffer.Bytes()) +} + +// ComputeHash256Ranges Compute a cryptographically strong 256 bit hash of the input +// byte slice in the ranges specified. +// Example: ComputeHash256Ranges({1, 2, 4, 8, 16}, {{1, 2}, +// {3, 5}}) +// is equivalent to ComputeHash256({2, 8, 16}). +func ComputeHash256Ranges(buf []byte, ranges [][2]int) []byte { + hashBuilder := sha256.New() + for _, r := range ranges { + _, err := hashBuilder.Write(buf[r[0]:r[1]]) + if err != nil { + panic(err) + } + } + return hashBuilder.Sum(nil) +} + +// ComputeHash160Array Compute a cryptographically strong 160 bit hash of the +// input byte slice. +func ComputeHash160Array(buf []byte) Hash160 { + h, err := ToHash160(ComputeHash160(buf)) + if err != nil { + panic(err) + } + return h +} + +// ComputeHash160 Compute a cryptographically strong 160 bit hash of the input +// byte slice. +func ComputeHash160(buf []byte) []byte { + ripe := ripemd160.New() + _, err := io.Writer(ripe).Write(buf) + if err != nil { + panic(err) + } + return ripe.Sum(nil) +} + +// Checksum Create checksum of [length] bytes from the 256 bit hash of the byte slice. +// Returns the lower [length] bytes of the hash +// Errors if length > 32. +func Checksum(bytes []byte, length int) []byte { + hash := ComputeHash256Array(bytes) + return hash[len(hash)-length:] +} + +// ToHash256 ... +func ToHash256(bytes []byte) (Hash256, error) { + hash := Hash256{} + if len(bytes) != HashLen { + return hash, errBadLength + } + copy(hash[:], bytes) + return hash, nil +} + +// ToHash160 ... +func ToHash160(bytes []byte) (Hash160, error) { + hash := Hash160{} + if len(bytes) != ripemd160.Size { + return hash, errBadLength + } + copy(hash[:], bytes) + return hash, nil +} + +// PubkeyBytesToAddress ... +func PubkeyBytesToAddress(key []byte) []byte { + return ComputeHash160(ComputeHash256(key)) +} diff --git a/utils/ip.go b/utils/ip.go new file mode 100644 index 0000000..cca055d --- /dev/null +++ b/utils/ip.go @@ -0,0 +1,64 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "errors" + "fmt" + "net" + "strconv" + "strings" +) + +var ( + errBadIP = errors.New("bad ip format") +) + +// IPDesc ... +type IPDesc struct { + IP net.IP + Port uint16 +} + +// Equal ... +func (ipDesc IPDesc) Equal(otherIPDesc IPDesc) bool { + return ipDesc.Port == otherIPDesc.Port && + ipDesc.IP.Equal(otherIPDesc.IP) +} + +// PortString ... +func (ipDesc IPDesc) PortString() string { + return fmt.Sprintf(":%d", ipDesc.Port) +} + +func (ipDesc IPDesc) String() string { + return fmt.Sprintf("%s%s", ipDesc.IP, ipDesc.PortString()) +} + +// ToIPDesc ... +// TODO: this was kinda hacked together, it should be verified. +func ToIPDesc(str string) (IPDesc, error) { + parts := strings.Split(str, ":") + if len(parts) != 2 { + return IPDesc{}, errBadIP + } + port, err := strconv.ParseUint(parts[1], 10 /*=base*/, 16 /*=size*/) + if err != nil { + return IPDesc{}, err + } + ip := net.ParseIP(parts[0]) + if ip == nil { + return IPDesc{}, errBadIP + } + return IPDesc{ + IP: ip, + Port: uint16(port), + }, nil +} + +// MyIP ... +func MyIP() net.IP { + // TODO: Change this to consult a json-returning external service + return net.ParseIP("127.0.0.1") +} diff --git a/utils/json/codec.go b/utils/json/codec.go new file mode 100644 index 0000000..3058007 --- /dev/null +++ b/utils/json/codec.go @@ -0,0 +1,52 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package json + +import ( + "errors" + "fmt" + "net/http" + "strings" + "unicode" + "unicode/utf8" + + "github.com/gorilla/rpc/v2" + "github.com/gorilla/rpc/v2/json2" +) + +var ( + errUppercaseMethod = errors.New("method must start with a non-uppercase letter") +) + +// NewCodec returns a new json codec that will convert the first character of +// the method to uppercase +func NewCodec() rpc.Codec { + return lowercase{json2.NewCodec()} +} + +type lowercase struct{ *json2.Codec } + +func (lc lowercase) NewRequest(r *http.Request) rpc.CodecRequest { + return &request{lc.Codec.NewRequest(r).(*json2.CodecRequest)} +} + +type request struct{ *json2.CodecRequest } + +func (r *request) Method() (string, error) { + method, err := r.CodecRequest.Method() + methodSections := strings.SplitN(method, ".", 2) + if len(methodSections) != 2 || err != nil { + return method, err + } + class, function := methodSections[0], methodSections[1] + firstRune, runeLen := utf8.DecodeRuneInString(function) + if firstRune == utf8.RuneError { + return method, nil + } + if unicode.IsUpper(firstRune) { + return method, errUppercaseMethod + } + uppercaseRune := string(unicode.ToUpper(firstRune)) + return fmt.Sprintf("%s.%s%s", class, string(uppercaseRune), function[runeLen:]), nil +} diff --git a/utils/json/pubsub_server.go b/utils/json/pubsub_server.go new file mode 100644 index 0000000..dff8ed6 --- /dev/null +++ b/utils/json/pubsub_server.go @@ -0,0 +1,260 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package json + +import ( + "errors" + "net/http" + "sync" + "time" + + "github.com/gorilla/websocket" + + "github.com/ava-labs/gecko/snow" +) + +const ( + // Size of the ws read buffer + readBufferSize = 1024 + + // Size of the ws write buffer + writeBufferSize = 1024 + + // Time allowed to write a message to the peer. + writeWait = 10 * time.Second + + // Time allowed to read the next pong message from the peer. + pongWait = 60 * time.Second + + // Send pings to peer with this period. Must be less than pongWait. + pingPeriod = (pongWait * 9) / 10 + + // Maximum message size allowed from peer. + maxMessageSize = 512 // bytes + + // Maximum number of pending messages to send to a peer. + maxPendingMessages = 256 // messages +) + +var upgrader = websocket.Upgrader{ + ReadBufferSize: readBufferSize, + WriteBufferSize: writeBufferSize, + CheckOrigin: func(*http.Request) bool { return true }, +} + +var ( + errDuplicateChannel = errors.New("duplicate channel") +) + +// PubSubServer maintains the set of active clients and sends messages to the clients. +type PubSubServer struct { + ctx *snow.Context + + lock sync.Mutex + conns map[*Connection]map[string]struct{} + channels map[string]map[*Connection]struct{} +} + +// NewPubSubServer ... +func NewPubSubServer(ctx *snow.Context) *PubSubServer { + return &PubSubServer{ + ctx: ctx, + conns: make(map[*Connection]map[string]struct{}), + channels: make(map[string]map[*Connection]struct{}), + } +} + +func (s *PubSubServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + wsConn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + s.ctx.Log.Debug("Failed to upgrade %s", err) + return + } + conn := &Connection{s: s, conn: wsConn, send: make(chan interface{}, maxPendingMessages)} + s.addConnection(conn) +} + +// Publish ... +func (s *PubSubServer) Publish(channel string, msg interface{}) { + s.lock.Lock() + defer s.lock.Unlock() + + conns, exists := s.channels[channel] + if !exists { + s.ctx.Log.Warn("attempted to publush to an unknown channel %s", channel) + return + } + + pubMsg := &publish{ + Channel: channel, + Value: msg, + } + + for conn := range conns { + select { + case conn.send <- pubMsg: + default: + s.ctx.Log.Verbo("dropping message to subscribed connection due to too many pending messages") + } + } +} + +// Register ... +func (s *PubSubServer) Register(channel string) error { + s.lock.Lock() + defer s.lock.Unlock() + + if _, exists := s.channels[channel]; exists { + return errDuplicateChannel + } + + s.channels[channel] = make(map[*Connection]struct{}) + return nil +} + +func (s *PubSubServer) addConnection(conn *Connection) { + s.lock.Lock() + defer s.lock.Unlock() + s.conns[conn] = make(map[string]struct{}) + + go conn.writePump() + go conn.readPump() +} + +func (s *PubSubServer) removeConnection(conn *Connection) { + s.lock.Lock() + defer s.lock.Unlock() + + channels, exists := s.conns[conn] + if !exists { + s.ctx.Log.Warn("attempted to remove an unknown connection") + return + } + + for channel := range channels { + delete(s.channels[channel], conn) + } +} + +func (s *PubSubServer) addChannel(conn *Connection, channel string) { + s.lock.Lock() + defer s.lock.Unlock() + + channels, exists := s.conns[conn] + if !exists { + return + } + + conns, exists := s.channels[channel] + if !exists { + return + } + + channels[channel] = struct{}{} + conns[conn] = struct{}{} +} + +func (s *PubSubServer) removeChannel(conn *Connection, channel string) { + s.lock.Lock() + defer s.lock.Unlock() + + channels, exists := s.conns[conn] + if !exists { + return + } + + conns, exists := s.channels[channel] + if !exists { + return + } + + delete(channels, channel) + delete(conns, conn) +} + +type publish struct { + Channel string `json:"channel"` + Value interface{} `json:"value"` +} + +type subscribe struct { + Channel string `json:"channel"` + Unsubscribe bool `json:"unsubscribe"` +} + +// Connection is a representation of the websocket connection. +type Connection struct { + s *PubSubServer + + // The websocket connection. + conn *websocket.Conn + + // Buffered channel of outbound messages. + send chan interface{} +} + +// readPump pumps messages from the websocket connection to the hub. +// +// The application runs readPump in a per-connection goroutine. The application +// ensures that there is at most one reader on a connection by executing all +// reads from this goroutine. +func (c *Connection) readPump() { + defer func() { + c.s.removeConnection(c) + c.conn.Close() + }() + + c.conn.SetReadLimit(maxMessageSize) + c.conn.SetReadDeadline(time.Now().Add(pongWait)) + c.conn.SetPongHandler(func(string) error { c.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil }) + + for { + msg := subscribe{} + err := c.conn.ReadJSON(&msg) + if err != nil { + if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) { + c.s.ctx.Log.Debug("Unexpected close in websockets: %s", err) + } + break + } + if msg.Unsubscribe { + c.s.removeChannel(c, msg.Channel) + } else { + c.s.addChannel(c, msg.Channel) + } + } +} + +// writePump pumps messages from the hub to the websocket connection. +// +// A goroutine running writePump is started for each connection. The +// application ensures that there is at most one writer to a connection by +// executing all writes from this goroutine. +func (c *Connection) writePump() { + ticker := time.NewTicker(pingPeriod) + defer func() { + ticker.Stop() + c.conn.Close() + }() + for { + select { + case message, ok := <-c.send: + c.conn.SetWriteDeadline(time.Now().Add(writeWait)) + if !ok { + // The hub closed the channel. + c.conn.WriteMessage(websocket.CloseMessage, []byte{}) + return + } + + if err := c.conn.WriteJSON(message); err != nil { + return + } + case <-ticker.C: + c.conn.SetWriteDeadline(time.Now().Add(writeWait)) + if err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil { + return + } + } + } +} diff --git a/utils/json/uint16.go b/utils/json/uint16.go new file mode 100644 index 0000000..14aaffb --- /dev/null +++ b/utils/json/uint16.go @@ -0,0 +1,41 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package json + +import ( + "errors" + "math" + "strconv" +) + +var ( + errTooLarge16 = errors.New("value overflowed uint16") +) + +// Uint16 ... +type Uint16 uint16 + +// MarshalJSON ... +func (u Uint16) MarshalJSON() ([]byte, error) { + return []byte("\"" + strconv.FormatUint(uint64(u), 10) + "\""), nil +} + +// UnmarshalJSON ... +func (u *Uint16) UnmarshalJSON(b []byte) error { + str := string(b) + if str == "null" { + return nil + } + if len(str) >= 2 { + if lastIndex := len(str) - 1; str[0] == '"' && str[lastIndex] == '"' { + str = str[1:lastIndex] + } + } + val, err := strconv.ParseUint(str, 10, 0) + if val > math.MaxUint16 { + return errTooLarge16 + } + *u = Uint16(val) + return err +} diff --git a/utils/json/uint32.go b/utils/json/uint32.go new file mode 100644 index 0000000..64410e1 --- /dev/null +++ b/utils/json/uint32.go @@ -0,0 +1,41 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package json + +import ( + "errors" + "math" + "strconv" +) + +var ( + errTooLarge32 = errors.New("value overflowed uint32") +) + +// Uint32 ... +type Uint32 uint32 + +// MarshalJSON ... +func (u Uint32) MarshalJSON() ([]byte, error) { + return []byte("\"" + strconv.FormatUint(uint64(u), 10) + "\""), nil +} + +// UnmarshalJSON ... +func (u *Uint32) UnmarshalJSON(b []byte) error { + str := string(b) + if str == "null" { + return nil + } + if len(str) >= 2 { + if lastIndex := len(str) - 1; str[0] == '"' && str[lastIndex] == '"' { + str = str[1:lastIndex] + } + } + val, err := strconv.ParseUint(str, 10, 0) + if val > math.MaxUint32 { + return errTooLarge32 + } + *u = Uint32(val) + return err +} diff --git a/utils/json/uint64.go b/utils/json/uint64.go new file mode 100644 index 0000000..2c20186 --- /dev/null +++ b/utils/json/uint64.go @@ -0,0 +1,30 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package json + +import "strconv" + +// Uint64 ... +type Uint64 uint64 + +// MarshalJSON ... +func (u Uint64) MarshalJSON() ([]byte, error) { + return []byte("\"" + strconv.FormatUint(uint64(u), 10) + "\""), nil +} + +// UnmarshalJSON ... +func (u *Uint64) UnmarshalJSON(b []byte) error { + str := string(b) + if str == "null" { + return nil + } + if len(str) >= 2 { + if lastIndex := len(str) - 1; str[0] == '"' && str[lastIndex] == '"' { + str = str[1:lastIndex] + } + } + val, err := strconv.ParseUint(str, 10, 0) + *u = Uint64(val) + return err +} diff --git a/utils/json/uint8.go b/utils/json/uint8.go new file mode 100644 index 0000000..a25d426 --- /dev/null +++ b/utils/json/uint8.go @@ -0,0 +1,41 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package json + +import ( + "errors" + "math" + "strconv" +) + +var ( + errTooLarge8 = errors.New("value overflowed uint8") +) + +// Uint8 ... +type Uint8 uint8 + +// MarshalJSON ... +func (u Uint8) MarshalJSON() ([]byte, error) { + return []byte("\"" + strconv.FormatUint(uint64(u), 10) + "\""), nil +} + +// UnmarshalJSON ... +func (u *Uint8) UnmarshalJSON(b []byte) error { + str := string(b) + if str == "null" { + return nil + } + if len(str) >= 2 { + if lastIndex := len(str) - 1; str[0] == '"' && str[lastIndex] == '"' { + str = str[1:lastIndex] + } + } + val, err := strconv.ParseUint(str, 10, 0) + if val > math.MaxUint8 { + return errTooLarge8 + } + *u = Uint8(val) + return err +} diff --git a/utils/logging/color.go b/utils/logging/color.go new file mode 100644 index 0000000..cb89ef6 --- /dev/null +++ b/utils/logging/color.go @@ -0,0 +1,34 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package logging + +// Color ... +type Color string + +// Colors +const ( + Black Color = "\033[0;30m" + DarkGray Color = "\033[1;30m" + Red Color = "\033[0;31m" + LightRed Color = "\033[1;31m" + Green Color = "\033[0;32m" + LightGreen Color = "\033[1;32m" + Orange Color = "\033[0;33m" + Yellow Color = "\033[1;33m" + Blue Color = "\033[0;34m" + LightBlue Color = "\033[1;34m" + Purple Color = "\033[0;35m" + LightPurple Color = "\033[1;35m" + Cyan Color = "\033[0;36m" + LightCyan Color = "\033[1;36m" + LightGray Color = "\033[0;37m" + White Color = "\033[1;37m" + + Reset Color = "\033[0;0m" + Bold Color = "\033[;1m" + Reverse Color = "\033[;7m" +) + +// Wrap ... +func (lc Color) Wrap(text string) string { return string(lc) + text + string(Reset) } diff --git a/utils/logging/config.go b/utils/logging/config.go new file mode 100644 index 0000000..b08e88a --- /dev/null +++ b/utils/logging/config.go @@ -0,0 +1,36 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package logging + +import ( + "time" + + "github.com/mitchellh/go-homedir" +) + +// DefaultLogDirectory ... +const DefaultLogDirectory = "~/.gecko/logs" + +// Config ... +type Config struct { + RotationInterval time.Duration + FileSize, RotationSize, FlushSize int + DisableLogging, DisableDisplaying, DisableContextualDisplaying, DisableFlushOnWrite, Assertions bool + LogLevel, DisplayLevel Level + Directory, MsgPrefix string +} + +// DefaultConfig ... +func DefaultConfig() (Config, error) { + dir, err := homedir.Expand(DefaultLogDirectory) + return Config{ + RotationInterval: 24 * time.Hour, + FileSize: 1 << 23, // 8 MB + RotationSize: 7, + FlushSize: 1, + DisplayLevel: Info, + LogLevel: Debug, + Directory: dir, + }, err +} diff --git a/utils/logging/factory.go b/utils/logging/factory.go new file mode 100644 index 0000000..112d03c --- /dev/null +++ b/utils/logging/factory.go @@ -0,0 +1,74 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package logging + +import ( + "path" + + "github.com/ava-labs/gecko/ids" +) + +// Factory ... +type Factory interface { + Make() (Logger, error) + MakeChain(chainID ids.ID, subdir string) (Logger, error) + MakeSubdir(subdir string) (Logger, error) + Close() +} + +// factory ... +type factory struct { + config Config + + loggers []Logger +} + +// NewFactory ... +func NewFactory(config Config) Factory { + return &factory{ + config: config, + } +} + +// Make ... +func (f *factory) Make() (Logger, error) { + l, err := New(f.config) + if err == nil { + f.loggers = append(f.loggers, l) + } + return l, err +} + +// MakeChain ... +func (f *factory) MakeChain(chainID ids.ID, subdir string) (Logger, error) { + config := f.config + config.MsgPrefix = "SN " + chainID.String() + config.Directory = path.Join(config.Directory, "chain", chainID.String(), subdir) + + log, err := New(config) + if err == nil { + f.loggers = append(f.loggers, log) + } + return log, err +} + +// MakeSubdir ... +func (f *factory) MakeSubdir(subdir string) (Logger, error) { + config := f.config + config.Directory = path.Join(config.Directory, subdir) + + log, err := New(config) + if err == nil { + f.loggers = append(f.loggers, log) + } + return log, err +} + +// Close ... +func (f *factory) Close() { + for _, log := range f.loggers { + log.Stop() + } + f.loggers = nil +} diff --git a/utils/logging/level.go b/utils/logging/level.go new file mode 100644 index 0000000..dc6b032 --- /dev/null +++ b/utils/logging/level.go @@ -0,0 +1,84 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package logging + +import ( + "fmt" + "strings" +) + +// Level ... +type Level int + +// Enum ... +const ( + Off Level = iota + Fatal + Error + Warn + Info + Debug + Verbo +) + +// ToLevel ... +func ToLevel(l string) (Level, error) { + switch strings.ToUpper(l) { + case "OFF": + return Off, nil + case "FATAL": + return Fatal, nil + case "ERROR": + return Error, nil + case "WARN": + return Warn, nil + case "INFO": + return Info, nil + case "DEBUG": + return Debug, nil + case "VERBO": + return Verbo, nil + default: + return Info, fmt.Errorf("unknown log level: %s", l) + } +} + +// Color ... +func (l Level) Color() Color { + switch l { + case Fatal: + return Red + case Error: + return Orange + case Warn: + return Yellow + case Info: + return White + case Debug: + return LightBlue + case Verbo: + return LightGreen + default: + return Reset + } +} + +func (l Level) String() string { + switch l { + case Fatal: + return "FATAL" + case Error: + return "ERROR" + case Warn: + return "WARN " + case Info: + return "INFO " + case Debug: + return "DEBUG" + case Verbo: + return "VERBO" + default: + return "?????" + } +} diff --git a/utils/logging/log.go b/utils/logging/log.go new file mode 100644 index 0000000..a9e6aec --- /dev/null +++ b/utils/logging/log.go @@ -0,0 +1,302 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package logging + +import ( + "bufio" + "fmt" + "os" + "path" + "runtime" + "strings" + "sync" + "time" +) + +// Log ... +type Log struct { + config Config + + messages []string + size int + + wg sync.WaitGroup + flushLock, writeLock, configLock sync.Mutex + needsFlush *sync.Cond + w *bufio.Writer + + closed bool +} + +// New ... +func New(config Config) (*Log, error) { + if err := os.MkdirAll(config.Directory, os.ModePerm); err != nil { + return nil, err + } + l := &Log{config: config} + l.needsFlush = sync.NewCond(&l.flushLock) + + l.wg.Add(1) + + go l.RecoverAndPanic(l.run) + + return l, nil +} + +func (l *Log) run() { + defer l.wg.Done() + + l.writeLock.Lock() + defer l.writeLock.Unlock() + + fileIndex := 0 + filename := path.Join(l.config.Directory, fmt.Sprintf("%d.log", fileIndex)) + f, err := os.Create(filename) + if err != nil { + panic(err) + } + l.w = bufio.NewWriter(f) + + closed := false + nextRotation := time.Now().Add(l.config.RotationInterval) + currentSize := 0 + for !closed { + l.writeLock.Unlock() + l.flushLock.Lock() + for l.size < l.config.FlushSize && !l.closed { + l.needsFlush.Wait() + } + closed = l.closed + prevMessages := l.messages + l.messages = nil + l.size = 0 + l.flushLock.Unlock() + l.writeLock.Lock() + + for _, msg := range prevMessages { + n, _ := l.w.WriteString(msg) + currentSize += n + } + + if !l.config.DisableFlushOnWrite { + l.w.Flush() + } + + if now := time.Now(); nextRotation.Before(now) || currentSize > l.config.FileSize { + nextRotation = now.Add(l.config.RotationInterval) + currentSize = 0 + l.w.Flush() + f.Close() + + fileIndex = (fileIndex + 1) % l.config.RotationSize + filename := path.Join(l.config.Directory, fmt.Sprintf("%d.log", fileIndex)) + f, err = os.Create(filename) + if err != nil { + panic(err) + } + l.w = bufio.NewWriter(f) + } + } + l.w.Flush() + f.Close() +} + +func (l *Log) Write(p []byte) (int, error) { + l.writeLock.Lock() + defer l.writeLock.Unlock() + + return l.w.Write(p) +} + +// Stop ... +func (l *Log) Stop() { + l.flushLock.Lock() + l.closed = true + l.needsFlush.Signal() + l.flushLock.Unlock() + + l.wg.Wait() +} + +// Should only be called from [Level] functions. +func (l *Log) log(level Level, format string, args ...interface{}) { + if l == nil { + return + } + + l.configLock.Lock() + defer l.configLock.Unlock() + + shouldLog := !l.config.DisableLogging && level <= l.config.LogLevel + shouldDisplay := (!l.config.DisableDisplaying && level <= l.config.DisplayLevel) || level == Fatal + + if !shouldLog && !shouldDisplay { + return + } + + output := l.format(level, format, args...) + + if shouldLog { + l.flushLock.Lock() + l.messages = append(l.messages, output) + l.size += len(output) + l.needsFlush.Signal() + l.flushLock.Unlock() + } + + if shouldDisplay { + if l.config.DisableContextualDisplaying { + fmt.Println(fmt.Sprintf(format, args...)) + } else { + fmt.Print(level.Color().Wrap(output)) + } + } +} + +func (l *Log) format(level Level, format string, args ...interface{}) string { + loc := "?" + if _, file, no, ok := runtime.Caller(3); ok { + loc = fmt.Sprintf("%s#%d", file, no) + } + if i := strings.Index(loc, "gecko/"); i != -1 { + loc = loc[i+5:] + } + text := fmt.Sprintf("%s: %s", loc, fmt.Sprintf(format, args...)) + + prefix := "" + if l.config.MsgPrefix != "" { + prefix = fmt.Sprintf(" <%s>", l.config.MsgPrefix) + } + + return fmt.Sprintf("%s[%s]%s %s\n", + level, + time.Now().Format("01-02|15:04:05"), + prefix, + text) +} + +// Fatal ... +func (l *Log) Fatal(format string, args ...interface{}) { l.log(Fatal, format, args...) } + +// Error ... +func (l *Log) Error(format string, args ...interface{}) { l.log(Error, format, args...) } + +// Warn ... +func (l *Log) Warn(format string, args ...interface{}) { l.log(Warn, format, args...) } + +// Info ... +func (l *Log) Info(format string, args ...interface{}) { l.log(Info, format, args...) } + +// Debug ... +func (l *Log) Debug(format string, args ...interface{}) { l.log(Debug, format, args...) } + +// Verbo ... +func (l *Log) Verbo(format string, args ...interface{}) { l.log(Verbo, format, args...) } + +// AssertNoError ... +func (l *Log) AssertNoError(err error) { + if err != nil { + l.log(Fatal, "%s", err) + } + if l.config.Assertions && err != nil { + l.Stop() + panic(err) + } +} + +// AssertTrue ... +func (l *Log) AssertTrue(b bool, format string, args ...interface{}) { + if !b { + l.log(Fatal, format, args...) + } + if l.config.Assertions && !b { + l.Stop() + panic(fmt.Sprintf(format, args...)) + } +} + +// AssertDeferredTrue ... +func (l *Log) AssertDeferredTrue(f func() bool, format string, args ...interface{}) { + // Note, the logger will only be notified here if assertions are enabled + if l.config.Assertions && !f() { + err := fmt.Sprintf(format, args...) + l.log(Fatal, err) + l.Stop() + panic(err) + } +} + +// AssertDeferredNoError ... +func (l *Log) AssertDeferredNoError(f func() error) { + if l.config.Assertions { + err := f() + if err != nil { + l.log(Fatal, "%s", err) + } + if l.config.Assertions && err != nil { + l.Stop() + panic(err) + } + } +} + +// StopOnPanic ... +func (l *Log) StopOnPanic() { + if r := recover(); r != nil { + l.Fatal("Panicing due to:\n%s\nFrom:\n%s", r, Stacktrace{}) + l.Stop() + panic(r) + } +} + +// RecoverAndPanic ... +func (l *Log) RecoverAndPanic(f func()) { defer l.StopOnPanic(); f() } + +// SetLogLevel ... +func (l *Log) SetLogLevel(lvl Level) { + l.configLock.Lock() + defer l.configLock.Unlock() + + l.config.LogLevel = lvl +} + +// SetDisplayLevel ... +func (l *Log) SetDisplayLevel(lvl Level) { + l.configLock.Lock() + defer l.configLock.Unlock() + + l.config.DisplayLevel = lvl +} + +// SetPrefix ... +func (l *Log) SetPrefix(prefix string) { + l.configLock.Lock() + defer l.configLock.Unlock() + + l.config.MsgPrefix = prefix +} + +// SetLoggingEnabled ... +func (l *Log) SetLoggingEnabled(enabled bool) { + l.configLock.Lock() + defer l.configLock.Unlock() + + l.config.DisableLogging = !enabled +} + +// SetDisplayingEnabled ... +func (l *Log) SetDisplayingEnabled(enabled bool) { + l.configLock.Lock() + defer l.configLock.Unlock() + + l.config.DisableDisplaying = !enabled +} + +// SetContextualDisplayingEnabled ... +func (l *Log) SetContextualDisplayingEnabled(enabled bool) { + l.configLock.Lock() + defer l.configLock.Unlock() + + l.config.DisableContextualDisplaying = !enabled +} diff --git a/utils/logging/logger.go b/utils/logging/logger.go new file mode 100644 index 0000000..b1ad11c --- /dev/null +++ b/utils/logging/logger.go @@ -0,0 +1,60 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package logging + +import ( + "io" +) + +// Logger defines the interface that is used to keep a record of all events that +// happen to the program +type Logger interface { + io.Writer // For logging pre-formated messages + + // Log that a fatal error has occurred. The program should likely exit soon + // after this is called + Fatal(format string, args ...interface{}) + // Log that an error has occurred. The program should be able to recover + // from this error + Error(format string, args ...interface{}) + // Log that an event has occurred that may indicate a future error or + // vulnerability + Warn(format string, args ...interface{}) + // Log an event that may be useful for a user to see to measure the progress + // of the protocol + Info(format string, args ...interface{}) + // Log an event that may be useful for a programmer to see when debuging the + // execution of the protocol + Debug(format string, args ...interface{}) + // Log extremely detailed events that can be useful for inspecting every + // aspect of the program + Verbo(format string, args ...interface{}) + + // If assertions are enabled, will result in a panic if err is non-nil + AssertNoError(err error) + // If assertions are enabled, will result in a panic if b is false + AssertTrue(b bool, format string, args ...interface{}) + // If assertions are enabled, the function will be called and will result in + // a panic the returned value is non-nil + AssertDeferredNoError(f func() error) + // If assertions are enabled, the function will be called and will result in + // a panic the returned value is false + AssertDeferredTrue(f func() bool, format string, args ...interface{}) + + // Recovers a panic, logs the error, and rethrows the panic. + StopOnPanic() + // If a function panics, this will log that panic and then re-panic ensuring + // that the program logs the error before exiting. + RecoverAndPanic(f func()) + + SetLogLevel(Level) + SetDisplayLevel(Level) + SetPrefix(string) + SetLoggingEnabled(bool) + SetDisplayingEnabled(bool) + SetContextualDisplayingEnabled(bool) + + // Stop this logger and write back all meta-data. + Stop() +} diff --git a/utils/logging/stack.go b/utils/logging/stack.go new file mode 100644 index 0000000..c5391ba --- /dev/null +++ b/utils/logging/stack.go @@ -0,0 +1,34 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package logging + +import ( + "bytes" + "fmt" + "runtime" + "strconv" +) + +// Stacktrace can print the current stacktrace +type Stacktrace struct { + Global bool +} + +func (st Stacktrace) String() string { + buf := make([]byte, 1<<16) + n := runtime.Stack(buf, st.Global) + return fmt.Sprintf("%s", buf[:n]) +} + +// RoutineID can print the current goroutine ID +type RoutineID struct{} + +func (RoutineID) String() string { + b := make([]byte, 64) + b = b[:runtime.Stack(b, false)] + b = bytes.TrimPrefix(b, []byte("goroutine ")) + b = b[:bytes.IndexByte(b, ' ')] + n, _ := strconv.ParseUint(string(b), 10, 64) + return fmt.Sprintf("Goroutine: %d", n) +} diff --git a/utils/logging/test_factory.go b/utils/logging/test_factory.go new file mode 100644 index 0000000..cebf91a --- /dev/null +++ b/utils/logging/test_factory.go @@ -0,0 +1,23 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package logging + +import ( + "github.com/ava-labs/gecko/ids" +) + +// NoFactory ... +type NoFactory struct{} + +// Make ... +func (NoFactory) Make() (Logger, error) { return NoLog{}, nil } + +// MakeChain ... +func (NoFactory) MakeChain(ids.ID, string) (Logger, error) { return NoLog{}, nil } + +// MakeSubdir ... +func (NoFactory) MakeSubdir(string) (Logger, error) { return NoLog{}, nil } + +// Close ... +func (NoFactory) Close() {} diff --git a/utils/logging/test_log.go b/utils/logging/test_log.go new file mode 100644 index 0000000..e249de4 --- /dev/null +++ b/utils/logging/test_log.go @@ -0,0 +1,74 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package logging + +import ( + "errors" +) + +var ( + errNoLoggerWrite = errors.New("NoLogger can't write") +) + +// NoLog ... +type NoLog struct{} + +func (NoLog) Write([]byte) (int, error) { return 0, errNoLoggerWrite } + +// Fatal ... +func (NoLog) Fatal(format string, args ...interface{}) {} + +// Error ... +func (NoLog) Error(format string, args ...interface{}) {} + +// Warn ... +func (NoLog) Warn(format string, args ...interface{}) {} + +// Info ... +func (NoLog) Info(format string, args ...interface{}) {} + +// Debug ... +func (NoLog) Debug(format string, args ...interface{}) {} + +// Verbo ... +func (NoLog) Verbo(format string, args ...interface{}) {} + +// AssertNoError ... +func (NoLog) AssertNoError(error) {} + +// AssertTrue ... +func (NoLog) AssertTrue(b bool, format string, args ...interface{}) {} + +// AssertDeferredTrue ... +func (NoLog) AssertDeferredTrue(f func() bool, format string, args ...interface{}) {} + +// AssertDeferredNoError ... +func (NoLog) AssertDeferredNoError(f func() error) {} + +// StopOnPanic ... +func (NoLog) StopOnPanic() {} + +// RecoverAndPanic ... +func (NoLog) RecoverAndPanic(f func()) { f() } + +// Stop ... +func (NoLog) Stop() {} + +// SetLogLevel ... +func (NoLog) SetLogLevel(Level) {} + +// SetDisplayLevel ... +func (NoLog) SetDisplayLevel(Level) {} + +// SetPrefix ... +func (NoLog) SetPrefix(string) {} + +// SetLoggingEnabled ... +func (NoLog) SetLoggingEnabled(bool) {} + +// SetDisplayingEnabled ... +func (NoLog) SetDisplayingEnabled(bool) {} + +// SetContextualDisplayingEnabled ... +func (NoLog) SetContextualDisplayingEnabled(bool) {} diff --git a/utils/math/safe_math.go b/utils/math/safe_math.go new file mode 100644 index 0000000..37ef85f --- /dev/null +++ b/utils/math/safe_math.go @@ -0,0 +1,60 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package math + +import ( + "errors" + "math" +) + +var ( + errOverflow = errors.New("overflow occurred") +) + +// Max64 ... +func Max64(a, b uint64) uint64 { + if a < b { + return b + } + return a +} + +// Min64 ... +func Min64(a, b uint64) uint64 { + if a < b { + return a + } + return b +} + +// Add64 ... +func Add64(a, b uint64) (uint64, error) { + if a > math.MaxUint64-b { + return 0, errOverflow + } + return a + b, nil +} + +// Sub64 returns: +// 1) a - b +// 2) If there is underflow, an error +func Sub64(a, b uint64) (uint64, error) { + if a < b { + return 0, errOverflow + } + return a - b, nil +} + +// Mul64 ... +func Mul64(a, b uint64) (uint64, error) { + if b != 0 && a > math.MaxUint64/b { + return 0, errOverflow + } + return a * b, nil +} + +// Diff64 ... +func Diff64(a, b uint64) uint64 { + return Max64(a, b) - Min64(a, b) +} diff --git a/utils/math/safe_math_test.go b/utils/math/safe_math_test.go new file mode 100644 index 0000000..c8428b5 --- /dev/null +++ b/utils/math/safe_math_test.go @@ -0,0 +1,70 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package math + +import ( + "math" + "testing" +) + +const maxUint64 uint64 = math.MaxUint64 + +func TestAdd64(t *testing.T) { + sum, err := Add64(0, maxUint64) + if err != nil { + t.Fatalf("Add64 failed unexpectedly") + } + if sum != maxUint64 { + t.Fatalf("Expected %d, got %d", maxUint64, sum) + } + + sum, err = Add64(maxUint64, 0) + if err != nil { + t.Fatalf("Add64 failed unexpectedly") + } + if sum != math.MaxUint64 { + t.Fatalf("Expected %d, got %d", maxUint64, sum) + } + + sum, err = Add64(1<<62, 1<<62) + if err != nil { + t.Fatalf("Add64 failed unexpectedly") + } + if sum != uint64(1<<63) { + t.Fatalf("Expected %d, got %d", uint64(1<<63), sum) + } + + sum, err = Add64(1, maxUint64) + if err == nil { + t.Fatalf("Add64 succeeded unexpectedly") + } + + sum, err = Add64(maxUint64, 1) + if err == nil { + t.Fatalf("Add64 succeeded unexpectedly") + } + + sum, err = Add64(maxUint64, maxUint64) + if err == nil { + t.Fatalf("Add64 succeeded unexpectedly") + } +} + +func TestMul64(t *testing.T) { + if prod, err := Mul64(maxUint64, 0); err != nil { + t.Fatalf("Mul64 failed unexpectedly") + } else if prod != 0 { + t.Fatalf("Mul64 returned wrong value") + } + + if prod, err := Mul64(maxUint64, 1); err != nil { + t.Fatalf("Mul64 failed unexpectedly") + } else if prod != maxUint64 { + t.Fatalf("Mul64 returned wrong value") + } + + if _, err := Mul64(maxUint64-1, 2); err == nil { + t.Fatalf("Mul64 overflowed") + } +} diff --git a/utils/random/random.go b/utils/random/random.go new file mode 100644 index 0000000..d6d3025 --- /dev/null +++ b/utils/random/random.go @@ -0,0 +1,27 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package random + +import ( + "crypto/rand" + "encoding/binary" +) + +// NewMasterseed Compute a uniformly random series of 32 bytes. +// Errors if the underlying generator does not have sufficient +// entropy. +func NewMasterseed() ([32]byte, error) { + bits := [32]byte{} + _, err := rand.Read(bits[:]) + return bits, err +} + +// NewNonce Compute a uniformly random uint64. +// Errors if the underlying generator does not have sufficient +// entropy. +func NewNonce() (uint64, error) { + bits := [8]byte{} + _, err := rand.Read(bits[:]) + return binary.BigEndian.Uint64(bits[:]), err +} diff --git a/utils/random/random_test.go b/utils/random/random_test.go new file mode 100644 index 0000000..6517f73 --- /dev/null +++ b/utils/random/random_test.go @@ -0,0 +1,58 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package random + +import ( + "testing" +) + +func TestNewMasterseed(t *testing.T) { + seed, err := NewMasterseed() + if err != nil { + t.Fatalf("GetMasterseed returned error, either there is a lack of" + + " entropy or something went wrong.") + } + + hadOnes := false + hadZeros := false + for _, b := range seed { + hadOnes = hadOnes || (b > 0) + hadZeros = hadZeros || (b <= 255) + } + if !hadOnes { + t.Fatalf("GetMasterseed doesn't return ones. Something is very wrong.") + } + if !hadZeros { + t.Fatalf("GetMasterseed doesn't return zeros. Something is very wrong.") + } +} + +func TestNewNonce(t *testing.T) { + max := uint64(0) + hadEven := false + hadOdd := false + for i := 0; i < 100; i++ { + nonce, err := NewNonce() + if err != nil { + t.Fatalf("NewNonce returned error, either there is a lack of" + + " entropy or something went wrong.") + } + if nonce > max { + max = nonce + } + hadEven = hadEven || (nonce%2 == 0) + hadOdd = hadOdd || (nonce%2 == 1) + } + // The probabilities are related, but they act as a rule of thumb, the + // probabilities are negligible. + if max < 9223372036854775808 { + t.Fatalf("GetNonce doesn't range from [0, 2^64 - 1] with p = 1 - 2^-100") + } + if !hadEven { + t.Fatalf("GetNonce doesn't have even numbers with p = 1 - 2^-100") + } + if !hadOdd { + t.Fatalf("GetNonce doesn't have odd numbers with p = 1 - 2^-100") + } +} diff --git a/utils/random/sampler.go b/utils/random/sampler.go new file mode 100644 index 0000000..559943d --- /dev/null +++ b/utils/random/sampler.go @@ -0,0 +1,12 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package random + +// Sampler allows the sampling of integers +type Sampler interface { + Sample() int + SampleReplace() int + CanSample() bool + Replace() +} diff --git a/utils/random/sudorandom.go b/utils/random/sudorandom.go new file mode 100644 index 0000000..1c617e4 --- /dev/null +++ b/utils/random/sudorandom.go @@ -0,0 +1,27 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package random + +import ( + "math/rand" + "time" +) + +func init() { rand.Seed(time.Now().UnixNano()) } + +// Rand returns a number inside [min, max). Panics if min >= max +func Rand(min, max int) int { return rand.Intn(max-min) + min } + +// Subset creates a list of at most k unique numbers sampled from the sampler. +// Runs in O(k) * O(Sample) time with O(k) space used. +func Subset(s Sampler, k int) []int { + inds := []int{} + for i := 0; i < k && s.CanSample(); i++ { + inds = append(inds, s.Sample()) + } + return inds +} + +// Bernoulli ... +func Bernoulli(p float64) bool { return rand.Float64() < p } diff --git a/utils/random/sudorandom_test.go b/utils/random/sudorandom_test.go new file mode 100644 index 0000000..5448e39 --- /dev/null +++ b/utils/random/sudorandom_test.go @@ -0,0 +1,38 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package random + +import ( + "sort" + "testing" +) + +func TestSubsetUniform(t *testing.T) { + s := &Uniform{N: 5} + subset := Subset(s, 5) + if len(subset) != 5 { + t.Fatalf("Returned wrong number of elements") + } + sort.Ints(subset) + + for i := 0; i < 5; i++ { + if i != subset[i] { + t.Fatalf("Returned wrong element") + } + } +} +func TestSubsetWeighted(t *testing.T) { + s := &Weighted{Weights: []uint64{1, 2, 3, 4, 5}} + subset := Subset(s, 5) + if len(subset) != 5 { + t.Fatalf("Returned wrong number of elements") + } + sort.Ints(subset) + + for i := 0; i < 5; i++ { + if i != subset[i] { + t.Fatalf("Returned wrong element") + } + } +} diff --git a/utils/random/uniform.go b/utils/random/uniform.go new file mode 100644 index 0000000..4677eb0 --- /dev/null +++ b/utils/random/uniform.go @@ -0,0 +1,49 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package random + +type defaultMap map[int]int + +func (m *defaultMap) get(key int, def int) int { + if *m == nil { + *m = make(defaultMap) + } + if r, ok := (*m)[key]; ok { + return r + } + return def +} + +// Uniform implements the Sampler interface by using the uniform distribution in +// the range [0, N). All operations run in O(1) time. +type Uniform struct { + drawn defaultMap + N, i int +} + +// Sample implements the Sampler interface +func (s *Uniform) Sample() int { + r := Rand(s.i, s.N) + + ret := s.drawn.get(r, r) + s.drawn[r] = s.drawn.get(s.i, s.i) + + s.i++ + return ret +} + +// SampleReplace implements the Sampler interface +func (s *Uniform) SampleReplace() int { + r := Rand(s.i, s.N) + return s.drawn.get(r, r) +} + +// CanSample implements the Sampler interface +func (s *Uniform) CanSample() bool { return s.i < s.N } + +// Replace implements the Sampler interface +func (s *Uniform) Replace() { + s.drawn = make(defaultMap) + s.i = 0 +} diff --git a/utils/random/uniform_test.go b/utils/random/uniform_test.go new file mode 100644 index 0000000..cb867cf --- /dev/null +++ b/utils/random/uniform_test.go @@ -0,0 +1,79 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package random + +import ( + "fmt" + "math" + "testing" +) + +const ( + countSize = 5 + subsetSize = 3 + + iterations = 1000 + threshold = 100 +) + +func TestUniform(t *testing.T) { + counts := [countSize]int{} + for i := 0; i < iterations; i++ { + s := &Uniform{N: 5} + subset := Subset(s, subsetSize) + for _, j := range subset { + counts[j]++ + } + if len(subset) != subsetSize { + t.Fatalf("Incorrect size") + } + } + + expected := iterations * float64(subsetSize) / countSize + for i := 0; i < countSize; i++ { + if math.Abs(float64(counts[i])-expected) > threshold { + t.Fatalf("Index seems biased: %s", fmt.Sprint(counts)) + } + } +} + +func TestUniformReset(t *testing.T) { + s := &Uniform{N: 1} + + if !s.CanSample() { + t.Fatalf("Should be able to sample") + } + if s.SampleReplace() != 0 { + t.Fatalf("Wrong sample") + } + + if !s.CanSample() { + t.Fatalf("Should be able to sample") + } + if s.Sample() != 0 { + t.Fatalf("Wrong sample") + } + if s.CanSample() { + t.Fatalf("Shouldn't be able to sample") + } + + s.Replace() + + if !s.CanSample() { + t.Fatalf("Should be able to sample") + } + if s.SampleReplace() != 0 { + t.Fatalf("Wrong sample") + } + + if !s.CanSample() { + t.Fatalf("Should be able to sample") + } + if s.Sample() != 0 { + t.Fatalf("Wrong sample") + } + if s.CanSample() { + t.Fatalf("Shouldn't be able to sample") + } +} diff --git a/utils/random/weighted.go b/utils/random/weighted.go new file mode 100644 index 0000000..74343dc --- /dev/null +++ b/utils/random/weighted.go @@ -0,0 +1,116 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package random + +import ( + "math" + "math/rand" +) + +// Weighted implements the Sampler interface by sampling based on a heap +// structure. +// +// Node weight is defined as the node's given weight along with it's +// children's recursive weights. Once sampled, a nodes given weight is set to 0. +// +// Replacing runs in O(n) time while sampling runs in O(log(n)) time. +type Weighted struct { + Weights []uint64 + + // The reason this is separated from Weights, is because it is set to 0 + // after being sampled. + weights []int64 + cumWeights []int64 +} + +func (s *Weighted) init() { + if len(s.Weights) != len(s.weights) { + s.Replace() + } +} + +// Sample returns a number in [0, len(weights)) with probability proportional to +// the weight of the item at that index. Assumes Len > 0. Sample takes +// O(log(len(weights))) time. +func (s *Weighted) Sample() int { + i := s.SampleReplace() + s.changeWeight(i, 0) + return i +} + +// SampleReplace returns a number in [0, len(weights)) with probability +// proportional to the weight of the item at that index. Assumes CanSample +// returns true. Sample takes O(log(len(weights))) time. The returned index is +// not removed. +func (s *Weighted) SampleReplace() int { + s.init() + for w, i := rand.Int63n(s.cumWeights[0]), 0; ; { + w -= s.weights[i] + if w < 0 { + return i + } + + i = i*2 + 1 // We shouldn't return the root, so check the left child + + if lw := s.cumWeights[i]; lw <= w { + // If the weight is greater than the left weight, you should move to + // the right child + w -= lw + i++ + } + } +} + +// CanSample returns the number of items left that can be sampled +func (s *Weighted) CanSample() bool { + s.init() + return len(s.cumWeights) > 0 && s.cumWeights[0] > 0 +} + +// Replace all the sampled elements. Takes O(len(weights)) time. +func (s *Weighted) Replace() { + // Attempt to malloc as few times as possible + if s.weights == nil || cap(s.weights) < len(s.Weights) { + s.weights = make([]int64, len(s.Weights)) + } else { + s.weights = s.weights[:len(s.Weights)] + } + if s.cumWeights == nil || cap(s.cumWeights) < len(s.Weights) { + s.cumWeights = make([]int64, len(s.Weights)) + } else { + s.cumWeights = s.cumWeights[:len(s.Weights)] + } + + for i, w := range s.Weights { + if w > math.MaxInt64 { + panic("Weight too large") + } + s.weights[i] = int64(w) + } + + copy(s.cumWeights, s.weights) + + // Initialize the heap + for i := len(s.cumWeights) - 1; i > 0; i-- { + parent := (i - 1) / 2 + w := uint64(s.cumWeights[parent]) + uint64(s.cumWeights[i]) + if w > math.MaxInt64 { + panic("Weight too large") + } + s.cumWeights[parent] = int64(w) + } +} + +func (s *Weighted) changeWeight(i int, newWeight int64) { + change := s.weights[i] - newWeight + + s.weights[i] = newWeight + + // Decrease my weight and all my parents weights. + s.cumWeights[i] -= change + for i > 0 { + i = (i - 1) / 2 + s.cumWeights[i] -= change + } +} diff --git a/utils/random/weighted_test.go b/utils/random/weighted_test.go new file mode 100644 index 0000000..84e93ef --- /dev/null +++ b/utils/random/weighted_test.go @@ -0,0 +1,94 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package random + +import ( + "fmt" + "math" + "math/rand" + "testing" +) + +func TestWeighted(t *testing.T) { + rand.Seed(0) + + counts := [countSize]int{} + for i := 0; i < iterations; i++ { + s := &Weighted{Weights: []uint64{0, 1, 2, 3, 4}} + subset := Subset(s, 1) + for _, j := range subset { + counts[j]++ + } + if len(subset) != 1 { + t.Fatalf("Incorrect size") + } + } + + for i := 0; i < countSize; i++ { + expected := float64(i) * iterations / 10 + if math.Abs(float64(counts[i])-expected) > threshold { + t.Fatalf("Index seems biased: %s i=%d e=%f", fmt.Sprint(counts), i, expected) + } + } +} + +func TestWeightedReset(t *testing.T) { + s := &Weighted{Weights: []uint64{0, 1, 0, 0, 0}} + + if !s.CanSample() { + t.Fatalf("Should be able to sample") + } + if s.SampleReplace() != 1 { + t.Fatalf("Wrong sample") + } + + if !s.CanSample() { + t.Fatalf("Should be able to sample") + } + if s.Sample() != 1 { + t.Fatalf("Wrong sample") + } + if s.CanSample() { + t.Fatalf("Shouldn't be able to sample") + } + + s.Replace() + + if !s.CanSample() { + t.Fatalf("Should be able to sample") + } + if s.SampleReplace() != 1 { + t.Fatalf("Wrong sample") + } + + if !s.CanSample() { + t.Fatalf("Should be able to sample") + } + if s.Sample() != 1 { + t.Fatalf("Wrong sample") + } + if s.CanSample() { + t.Fatalf("Shouldn't be able to sample") + } + + s.Weights = []uint64{0, 0, 1, 0, 0} + s.Replace() + + if !s.CanSample() { + t.Fatalf("Should be able to sample") + } + if s.SampleReplace() != 2 { + t.Fatalf("Wrong sample") + } + + if !s.CanSample() { + t.Fatalf("Should be able to sample") + } + if s.Sample() != 2 { + t.Fatalf("Wrong sample") + } + if s.CanSample() { + t.Fatalf("Shouldn't be able to sample") + } +} diff --git a/utils/sorting.go b/utils/sorting.go new file mode 100644 index 0000000..f979920 --- /dev/null +++ b/utils/sorting.go @@ -0,0 +1,30 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "sort" +) + +// IsSortedAndUnique returns true if the elements in the data are unique and sorted. +func IsSortedAndUnique(data sort.Interface) bool { + for i := data.Len() - 2; i >= 0; i-- { + if !data.Less(i, i+1) { + return false + } + } + return true +} + +type innerSortUint32 []uint32 + +func (su32 innerSortUint32) Less(i, j int) bool { return su32[i] < su32[j] } +func (su32 innerSortUint32) Len() int { return len(su32) } +func (su32 innerSortUint32) Swap(i, j int) { su32[j], su32[i] = su32[i], su32[j] } + +// SortUint32 sorts an uint32 array +func SortUint32(u32 []uint32) { sort.Sort(innerSortUint32(u32)) } + +// IsSortedAndUniqueUint32 returns true if the array of uint32s are sorted and unique +func IsSortedAndUniqueUint32(u32 []uint32) bool { return IsSortedAndUnique(innerSortUint32(u32)) } diff --git a/utils/timer/clock.go b/utils/timer/clock.go new file mode 100644 index 0000000..658e566 --- /dev/null +++ b/utils/timer/clock.go @@ -0,0 +1,37 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timer + +import ( + "time" +) + +// Clock acts as a thin wrapper around global time that allows for easy testing +type Clock struct { + faked bool + time time.Time +} + +// Set the time on the clock +func (c *Clock) Set(time time.Time) { c.faked = true; c.time = time } + +// Sync this clock with global time +func (c *Clock) Sync() { c.faked = false } + +// Time returns the time on this clock +func (c *Clock) Time() time.Time { + if c.faked { + return c.time + } + return time.Now() +} + +// Unix returns the unix time on this clock. +func (c *Clock) Unix() uint64 { + unix := c.Time().Unix() + if unix < 0 { + unix = 0 + } + return uint64(unix) +} diff --git a/utils/timer/executor.go b/utils/timer/executor.go new file mode 100644 index 0000000..dc28f39 --- /dev/null +++ b/utils/timer/executor.go @@ -0,0 +1,64 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timer + +import ( + "sync" +) + +// Executor ... +type Executor struct { + lock sync.Mutex + cond *sync.Cond + wg sync.WaitGroup + finished bool + events []func() +} + +// Initialize ... +func (e *Executor) Initialize() { + e.cond = sync.NewCond(&e.lock) + e.wg.Add(1) +} + +// Add new function to call +func (e *Executor) Add(event func()) { + e.lock.Lock() + defer e.lock.Unlock() + + e.events = append(e.events, event) + e.cond.Signal() +} + +// Stop executing functions +func (e *Executor) Stop() { + e.lock.Lock() + if !e.finished { + defer e.wg.Wait() + } + defer e.lock.Unlock() + + e.finished = true + e.cond.Broadcast() +} + +// Dispatch the events. Will only return after stop is called. +func (e *Executor) Dispatch() { + e.lock.Lock() + defer e.lock.Unlock() + defer e.wg.Done() + + for !e.finished { + if len(e.events) == 0 { + e.cond.Wait() + } else { + event := e.events[0] + e.events = e.events[1:] + + e.lock.Unlock() + event() + e.lock.Lock() + } + } +} diff --git a/utils/timer/meter.go b/utils/timer/meter.go new file mode 100644 index 0000000..f259a7c --- /dev/null +++ b/utils/timer/meter.go @@ -0,0 +1,12 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timer + +// Meter tracks the number of occurrences of a specified event +type Meter interface { + // Notify this meter of a new event for it to rate + Tick() + // Return the number of events this meter is currently tracking + Ticks() int +} diff --git a/utils/timer/repeater.go b/utils/timer/repeater.go new file mode 100644 index 0000000..08c53ed --- /dev/null +++ b/utils/timer/repeater.go @@ -0,0 +1,83 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timer + +import ( + "sync" + "time" +) + +// Repeater ... +type Repeater struct { + handler func() + timeout chan struct{} + + lock sync.Mutex + wg sync.WaitGroup + finished bool + frequency time.Duration +} + +// NewRepeater ... +func NewRepeater(handler func(), frequency time.Duration) *Repeater { + repeater := &Repeater{ + handler: handler, + timeout: make(chan struct{}, 1), + frequency: frequency, + } + repeater.wg.Add(1) + + return repeater +} + +// Stop ... +func (r *Repeater) Stop() { + r.lock.Lock() + if !r.finished { + defer r.wg.Wait() + } + defer r.lock.Unlock() + + r.finished = true + r.reset() +} + +// Dispatch ... +func (r *Repeater) Dispatch() { + r.lock.Lock() + defer r.lock.Unlock() + defer r.wg.Done() + + timer := time.NewTimer(r.frequency) + cleared := false + for !r.finished { + r.lock.Unlock() + + cleared = false + select { + case <-r.timeout: + case <-timer.C: + cleared = true + } + + if !timer.Stop() && !cleared { + <-timer.C + } + + if cleared { + r.handler() + } + + timer.Reset(r.frequency) + + r.lock.Lock() + } +} + +func (r *Repeater) reset() { + select { + case r.timeout <- struct{}{}: + default: + } +} diff --git a/utils/timer/repeater_test.go b/utils/timer/repeater_test.go new file mode 100644 index 0000000..5499202 --- /dev/null +++ b/utils/timer/repeater_test.go @@ -0,0 +1,27 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timer + +import ( + "sync" + "testing" + "time" +) + +func TestRepeater(t *testing.T) { + wg := sync.WaitGroup{} + wg.Add(2) + + val := new(int) + repeater := NewRepeater(func() { + if *val < 2 { + wg.Done() + *val++ + } + }, time.Millisecond) + go repeater.Dispatch() + + wg.Wait() + repeater.Stop() +} diff --git a/utils/timer/timed_meter.go b/utils/timer/timed_meter.go new file mode 100644 index 0000000..82196f7 --- /dev/null +++ b/utils/timer/timed_meter.go @@ -0,0 +1,74 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timer + +import ( + "container/list" + "sync" + "time" +) + +// TimedMeter is a meter that discards old events +type TimedMeter struct { + lock sync.Mutex + // Amount of time to keep a tick + Duration time.Duration + // TODO: Currently this list has an entry for each tick... This isn't really + // sustainable at high tick numbers. We should be batching ticks with + // similar times into the same bucket. + tickList *list.List +} + +// Tick implements the Meter interface +func (tm *TimedMeter) Tick() { + tm.lock.Lock() + defer tm.lock.Unlock() + + tm.tick() +} + +// Ticks implements the Meter interface +func (tm *TimedMeter) Ticks() int { + tm.lock.Lock() + defer tm.lock.Unlock() + + return tm.ticks() +} + +func (tm *TimedMeter) init() { + if tm.tickList == nil { + tm.tickList = list.New() + } +} + +func (tm *TimedMeter) tick() { + tm.init() + tm.tickList.PushBack(time.Now()) +} + +func (tm *TimedMeter) ticks() int { + tm.init() + + timeBound := time.Now().Add(-tm.Duration) + // removeExpiredHead returns false once there is nothing left to remove + for tm.removeExpiredHead(timeBound) { + } + return tm.tickList.Len() +} + +// Returns true if the head was removed, false otherwise +func (tm *TimedMeter) removeExpiredHead(t time.Time) bool { + if tm.tickList.Len() == 0 { + return false + } + + head := tm.tickList.Front() + headTime := head.Value.(time.Time) + + if headTime.Before(t) { + tm.tickList.Remove(head) + return true + } + return false +} diff --git a/utils/timer/timeout_manager.go b/utils/timer/timeout_manager.go new file mode 100644 index 0000000..c196e96 --- /dev/null +++ b/utils/timer/timeout_manager.go @@ -0,0 +1,142 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timer + +import ( + "container/list" + "sync" + "time" + + "github.com/ava-labs/gecko/ids" +) + +type timeoutHandler func() + +type timeout struct { + id ids.ID + handler timeoutHandler + timer time.Time +} + +// TimeoutManager is a manager for timeouts. +type TimeoutManager struct { + lock sync.Mutex + duration time.Duration // Amount of time before a timeout + timeoutMap map[[32]byte]*list.Element + timeoutList *list.List + timer *Timer // Timer that will fire to clear the timeouts +} + +// Initialize is a constructor b/c Golang, in its wisdom, doesn't ... have them? +func (tm *TimeoutManager) Initialize(duration time.Duration) { + tm.duration = duration + tm.timeoutMap = make(map[[32]byte]*list.Element) + tm.timeoutList = list.New() + tm.timer = NewTimer(tm.Timeout) +} + +// Dispatch ... +func (tm *TimeoutManager) Dispatch() { tm.timer.Dispatch() } + +// Stop executing timeouts +func (tm *TimeoutManager) Stop() { tm.timer.Stop() } + +// Put puts hash into the hash map +func (tm *TimeoutManager) Put(id ids.ID, handler func()) { + tm.lock.Lock() + defer tm.lock.Unlock() + + tm.put(id, handler) +} + +// Remove the item that no longer needs to be there. +func (tm *TimeoutManager) Remove(id ids.ID) { + tm.lock.Lock() + defer tm.lock.Unlock() + + tm.remove(id) +} + +// Timeout registers a timeout +func (tm *TimeoutManager) Timeout() { + tm.lock.Lock() + defer tm.lock.Unlock() + + tm.timeout() +} + +func (tm *TimeoutManager) timeout() { + timeBound := time.Now().Add(-tm.duration) + // removeExpiredHead returns false once there is nothing left to remove + for { + timeout := tm.removeExpiredHead(timeBound) + if timeout == nil { + break + } + + // Don't execute a callback with a lock held + tm.lock.Unlock() + timeout() + tm.lock.Lock() + } + tm.registerTimeout() +} + +func (tm *TimeoutManager) put(id ids.ID, handler timeoutHandler) { + tm.remove(id) + + tm.timeoutMap[id.Key()] = tm.timeoutList.PushBack(timeout{ + id: id, + handler: handler, + timer: time.Now(), + }) + + if tm.timeoutList.Len() == 1 { + tm.registerTimeout() + } +} + +func (tm *TimeoutManager) remove(id ids.ID) { + key := id.Key() + e, exists := tm.timeoutMap[key] + if !exists { + return + } + delete(tm.timeoutMap, key) + tm.timeoutList.Remove(e) +} + +// Returns true if the head was removed, false otherwise +func (tm *TimeoutManager) removeExpiredHead(t time.Time) func() { + if tm.timeoutList.Len() == 0 { + return nil + } + + e := tm.timeoutList.Front() + head := e.Value.(timeout) + + headTime := head.timer + if headTime.Before(t) { + tm.remove(head.id) + return head.handler + } + return nil +} + +func (tm *TimeoutManager) registerTimeout() { + if tm.timeoutList.Len() == 0 { + // There are no pending timeouts + tm.timer.Cancel() + return + } + + e := tm.timeoutList.Front() + head := e.Value.(timeout) + + timeBound := time.Now().Add(-tm.duration) + headTime := head.timer + duration := headTime.Sub(timeBound) + + tm.timer.SetTimeoutIn(duration) +} diff --git a/utils/timer/timeout_manager_test.go b/utils/timer/timeout_manager_test.go new file mode 100644 index 0000000..da0d542 --- /dev/null +++ b/utils/timer/timeout_manager_test.go @@ -0,0 +1,25 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timer + +import ( + "sync" + "testing" + "time" + + "github.com/ava-labs/gecko/ids" +) + +func TestTimeoutManager(t *testing.T) { + wg := sync.WaitGroup{} + wg.Add(2) + defer wg.Wait() + + tm := TimeoutManager{} + tm.Initialize(time.Millisecond) + go tm.Dispatch() + + tm.Put(ids.NewID([32]byte{}), wg.Done) + tm.Put(ids.NewID([32]byte{1}), wg.Done) +} diff --git a/utils/timer/timer.go b/utils/timer/timer.go new file mode 100644 index 0000000..fd23fdf --- /dev/null +++ b/utils/timer/timer.go @@ -0,0 +1,109 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timer + +import ( + "sync" + "time" +) + +// Timer wraps a timer object. This allows a user to specify a handler. Once +// specifying the handler, the dispatch thread can be called. The dispatcher +// will only return after calling Stop. SetTimeoutIn will result in calling the +// handler in the specified amount of time. +type Timer struct { + handler func() + timeout chan struct{} + + lock sync.Mutex + wg sync.WaitGroup + finished, shouldExecute bool + duration time.Duration +} + +// NewTimer creates a new timer object +func NewTimer(handler func()) *Timer { + timer := &Timer{ + handler: handler, + timeout: make(chan struct{}, 1), + } + timer.wg.Add(1) + + return timer +} + +// SetTimeoutIn will set the timer to fire the handler in [duration] +func (t *Timer) SetTimeoutIn(duration time.Duration) { + t.lock.Lock() + defer t.lock.Unlock() + + t.duration = duration + t.shouldExecute = true + t.reset() +} + +// Cancel the currently scheduled event +func (t *Timer) Cancel() { + t.lock.Lock() + defer t.lock.Unlock() + + t.shouldExecute = false + t.reset() +} + +// Stop this timer from executing any more. +func (t *Timer) Stop() { + t.lock.Lock() + if !t.finished { + defer t.wg.Wait() + } + defer t.lock.Unlock() + + t.finished = true + t.reset() +} + +// Dispatch ... +func (t *Timer) Dispatch() { + t.lock.Lock() + defer t.lock.Unlock() + defer t.wg.Done() + + timer := time.NewTimer(0) + cleared := false + reset := false + for !t.finished { // t.finished needs to be thread safe + if !reset && !timer.Stop() && !cleared { + <-timer.C + } + + if cleared && t.shouldExecute { + t.lock.Unlock() + t.handler() + } else { + t.lock.Unlock() + } + + cleared = false + reset = false + select { + case <-t.timeout: + t.lock.Lock() + if t.shouldExecute { + timer.Reset(t.duration) + } + reset = true + case <-timer.C: + t.lock.Lock() + cleared = true + } + } +} + +func (t *Timer) reset() { + select { + case t.timeout <- struct{}{}: + default: + } +} diff --git a/utils/timer/timer_test.go b/utils/timer/timer_test.go new file mode 100644 index 0000000..ddff3d8 --- /dev/null +++ b/utils/timer/timer_test.go @@ -0,0 +1,21 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timer + +import ( + "sync" + "testing" + "time" +) + +func TestTimer(t *testing.T) { + wg := sync.WaitGroup{} + wg.Add(1) + defer wg.Wait() + + timer := NewTimer(wg.Done) + go timer.Dispatch() + + timer.SetTimeoutIn(time.Millisecond) +} diff --git a/utils/units/ava.go b/utils/units/ava.go new file mode 100644 index 0000000..3643b45 --- /dev/null +++ b/utils/units/ava.go @@ -0,0 +1,15 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package units + +// Denominations of value +const ( + NanoAva uint64 = 1 + MicroAva uint64 = 1000 * NanoAva + Schmeckle uint64 = 49*MicroAva + 463*NanoAva + MilliAva uint64 = 1000 * MicroAva + Ava uint64 = 1000 * MilliAva + KiloAva uint64 = 1000 * Ava + MegaAva uint64 = 1000 * KiloAva +) diff --git a/utils/wrappers/errors.go b/utils/wrappers/errors.go new file mode 100644 index 0000000..bce04c5 --- /dev/null +++ b/utils/wrappers/errors.go @@ -0,0 +1,22 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package wrappers + +// Errs ... +type Errs struct{ Err error } + +// Errored ... +func (errs *Errs) Errored() bool { return errs.Err != nil } + +// Add ... +func (errs *Errs) Add(errors ...error) { + if errs.Err == nil { + for _, err := range errors { + if err != nil { + errs.Err = err + break + } + } + } +} diff --git a/utils/wrappers/packing.go b/utils/wrappers/packing.go new file mode 100644 index 0000000..da9bfc7 --- /dev/null +++ b/utils/wrappers/packing.go @@ -0,0 +1,471 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package wrappers + +import ( + "encoding/binary" + "errors" + "math" + + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/hashing" +) + +const ( + // MaxStringLen ... + MaxStringLen = math.MaxUint16 + + // ByteLen is the number of bytes per byte... + ByteLen = 1 + // ShortLen is the number of bytes per short + ShortLen = 2 + // IntLen is the number of bytes per int + IntLen = 4 + // LongLen is the number of bytes per long + LongLen = 8 +) + +var ( + errBadLength = errors.New("packer has insufficient length for input") + errNegativeOffset = errors.New("negative offset") + errInvalidInput = errors.New("input does not match expected format") + errBadType = errors.New("wrong type passed") + errBadBool = errors.New("unexpected value when unpacking bool") +) + +// Packer packs and unpacks a byte array from/to standard values +type Packer struct { + Errs + + // The largest allowed size of expanding the byte array + MaxSize int + // The current byte array + Bytes []byte + // The offset that is being written to in the byte array + Offset int +} + +// CheckSpace requires that there is at least [bytes] of write space left in the +// byte array. If this is not true, an error is added to the packer +func (p *Packer) CheckSpace(bytes int) { + switch { + case p.Offset < 0: + p.Add(errNegativeOffset) + case bytes < 0: + p.Add(errInvalidInput) + case len(p.Bytes)-p.Offset < bytes: + p.Add(errBadLength) + } +} + +// Expand ensures that there is [bytes] bytes left of space in the byte array. +// If this is not allowed due to the maximum size, an error is added to the +// packer +func (p *Packer) Expand(bytes int) { + p.CheckSpace(0) + if p.Errored() { + return + } + + neededSize := bytes + p.Offset + if neededSize <= len(p.Bytes) { + return + } + + if neededSize > p.MaxSize { + p.Add(errBadLength) + } else if neededSize > cap(p.Bytes) { + p.Bytes = append(p.Bytes[:cap(p.Bytes)], make([]byte, neededSize-cap(p.Bytes))...) + } else { + p.Bytes = p.Bytes[:neededSize] + } +} + +// PackByte append a byte to the byte array +func (p *Packer) PackByte(val byte) { + p.Expand(ByteLen) + if p.Errored() { + return + } + + p.Bytes[p.Offset] = val + p.Offset++ +} + +// UnpackByte unpack a byte from the byte array +func (p *Packer) UnpackByte() byte { + p.CheckSpace(ByteLen) + if p.Errored() { + return 0 + } + + val := p.Bytes[p.Offset] + p.Offset++ + return val +} + +// PackShort append a short to the byte array +func (p *Packer) PackShort(val uint16) { + p.Expand(ShortLen) + if p.Errored() { + return + } + + binary.BigEndian.PutUint16(p.Bytes[p.Offset:], val) + p.Offset += ShortLen +} + +// UnpackShort unpack a short from the byte array +func (p *Packer) UnpackShort() uint16 { + p.CheckSpace(ShortLen) + if p.Errored() { + return 0 + } + + val := binary.BigEndian.Uint16(p.Bytes[p.Offset:]) + p.Offset += ShortLen + return val +} + +// PackInt append an int to the byte array +func (p *Packer) PackInt(val uint32) { + p.Expand(IntLen) + if p.Errored() { + return + } + + binary.BigEndian.PutUint32(p.Bytes[p.Offset:], val) + p.Offset += IntLen +} + +// UnpackInt unpack an int from the byte array +func (p *Packer) UnpackInt() uint32 { + p.CheckSpace(IntLen) + if p.Errored() { + return 0 + } + + val := binary.BigEndian.Uint32(p.Bytes[p.Offset:]) + p.Offset += IntLen + return val +} + +// PackLong append a long to the byte array +func (p *Packer) PackLong(val uint64) { + p.Expand(LongLen) + if p.Errored() { + return + } + + binary.BigEndian.PutUint64(p.Bytes[p.Offset:], val) + p.Offset += LongLen +} + +// UnpackLong unpack a long from the byte array +func (p *Packer) UnpackLong() uint64 { + p.CheckSpace(LongLen) + if p.Errored() { + return 0 + } + + val := binary.BigEndian.Uint64(p.Bytes[p.Offset:]) + p.Offset += LongLen + return val +} + +// PackBool packs a bool into the byte array +func (p *Packer) PackBool(b bool) { + if b { + p.PackByte(1) + } else { + p.PackByte(0) + } +} + +// UnpackBool unpacks a bool from the byte array +func (p *Packer) UnpackBool() bool { + b := p.UnpackByte() + switch b { + case 0: + return false + case 1: + return true + default: + p.Add(errBadBool) + return false + } +} + +// PackFixedBytes append a byte slice, with no length descriptor to the byte +// array +func (p *Packer) PackFixedBytes(bytes []byte) { + p.Expand(len(bytes)) + if p.Errored() { + return + } + + copy(p.Bytes[p.Offset:], bytes) + p.Offset += len(bytes) +} + +// UnpackFixedBytes unpack a byte slice, with no length descriptor from the byte +// array +func (p *Packer) UnpackFixedBytes(size int) []byte { + p.CheckSpace(size) + if p.Errored() { + return nil + } + + bytes := p.Bytes[p.Offset : p.Offset+size] + p.Offset += size + return bytes +} + +// PackBytes append a byte slice to the byte array +func (p *Packer) PackBytes(bytes []byte) { + p.PackInt(uint32(len(bytes))) + p.PackFixedBytes(bytes) +} + +// UnpackBytes unpack a byte slice from the byte array +func (p *Packer) UnpackBytes() []byte { + size := p.UnpackInt() + return p.UnpackFixedBytes(int(size)) +} + +// PackFixedByteSlices append a byte slice slice to the byte array +func (p *Packer) PackFixedByteSlices(byteSlices [][]byte) { + p.PackInt(uint32(len(byteSlices))) + for _, bytes := range byteSlices { + p.PackFixedBytes(bytes) + } +} + +// UnpackFixedByteSlices unpack a byte slice slice to the byte array +func (p *Packer) UnpackFixedByteSlices(size int) [][]byte { + sliceSize := p.UnpackInt() + bytes := [][]byte(nil) + for i := uint32(0); i < sliceSize && !p.Errored(); i++ { + bytes = append(bytes, p.UnpackFixedBytes(size)) + } + return bytes +} + +// PackStr append a string to the byte array +func (p *Packer) PackStr(str string) { + strSize := len(str) + if strSize > MaxStringLen { + p.Add(errInvalidInput) + } + p.PackShort(uint16(strSize)) + p.PackFixedBytes([]byte(str)) +} + +// UnpackStr unpacks a string from the byte array +func (p *Packer) UnpackStr() string { + strSize := p.UnpackShort() + return string(p.UnpackFixedBytes(int(strSize))) +} + +// PackIP unpacks an ip port pair from the byte array +func (p *Packer) PackIP(ip utils.IPDesc) { + p.PackFixedBytes(ip.IP.To16()) + p.PackShort(ip.Port) +} + +// UnpackIP unpacks an ip port pair from the byte array +func (p *Packer) UnpackIP() utils.IPDesc { + ip := p.UnpackFixedBytes(16) + port := p.UnpackShort() + return utils.IPDesc{ + IP: ip, + Port: port, + } +} + +// PackIPs unpacks an ip port pair slice from the byte array +func (p *Packer) PackIPs(ips []utils.IPDesc) { + p.PackInt(uint32(len(ips))) + for i := 0; i < len(ips) && !p.Errored(); i++ { + p.PackIP(ips[i]) + } +} + +// UnpackIPs unpacks an ip port pair slice from the byte array +func (p *Packer) UnpackIPs() []utils.IPDesc { + sliceSize := p.UnpackInt() + ips := []utils.IPDesc(nil) + for i := uint32(0); i < sliceSize && !p.Errored(); i++ { + ips = append(ips, p.UnpackIP()) + } + return ips +} + +// TryPackByte attempts to pack the value as a byte +func TryPackByte(packer *Packer, valIntf interface{}) { + if val, ok := valIntf.(uint8); ok { + packer.PackByte(val) + } else { + packer.Add(errBadType) + } +} + +// TryUnpackByte attempts to unpack a value as a byte +func TryUnpackByte(packer *Packer) interface{} { + return packer.UnpackByte() +} + +// TryPackShort attempts to pack the value as a short +func TryPackShort(packer *Packer, valIntf interface{}) { + if val, ok := valIntf.(uint16); ok { + packer.PackShort(val) + } else { + packer.Add(errBadType) + } +} + +// TryUnpackShort attempts to unpack a value as a short +func TryUnpackShort(packer *Packer) interface{} { + return packer.UnpackShort() +} + +// TryPackInt attempts to pack the value as an int +func TryPackInt(packer *Packer, valIntf interface{}) { + if val, ok := valIntf.(uint32); ok { + packer.PackInt(val) + } else { + packer.Add(errBadType) + } +} + +// TryUnpackInt attempts to unpack a value as an int +func TryUnpackInt(packer *Packer) interface{} { + return packer.UnpackInt() +} + +// TryPackLong attempts to pack the value as a long +func TryPackLong(packer *Packer, valIntf interface{}) { + if val, ok := valIntf.(uint64); ok { + packer.PackLong(val) + } else { + packer.Add(errBadType) + } +} + +// TryUnpackLong attempts to unpack a value as a long +func TryUnpackLong(packer *Packer) interface{} { + return packer.UnpackLong() +} + +// TryPackHash attempts to pack the value as a 32-byte sequence +func TryPackHash(packer *Packer, valIntf interface{}) { + if val, ok := valIntf.([]byte); ok { + packer.PackFixedBytes(val) + } else { + packer.Add(errBadType) + } +} + +// TryUnpackHash attempts to unpack the value as a 32-byte sequence +func TryUnpackHash(packer *Packer) interface{} { + return packer.UnpackFixedBytes(hashing.HashLen) +} + +// TryPackHashes attempts to pack the value as a list of 32-byte sequences +func TryPackHashes(packer *Packer, valIntf interface{}) { + if val, ok := valIntf.([][]byte); ok { + packer.PackFixedByteSlices(val) + } else { + packer.Add(errBadType) + } +} + +// TryUnpackHashes attempts to unpack the value as a list of 32-byte sequences +func TryUnpackHashes(packer *Packer) interface{} { + return packer.UnpackFixedByteSlices(hashing.HashLen) +} + +// TryPackAddr attempts to pack the value as a 20-byte sequence +func TryPackAddr(packer *Packer, valIntf interface{}) { + if val, ok := valIntf.([]byte); ok { + packer.PackFixedBytes(val) + } else { + packer.Add(errBadType) + } +} + +// TryUnpackAddr attempts to unpack the value as a 20-byte sequence +func TryUnpackAddr(packer *Packer) interface{} { + return packer.UnpackFixedBytes(hashing.AddrLen) +} + +// TryPackAddrList attempts to pack the value as a list of 20-byte sequences +func TryPackAddrList(packer *Packer, valIntf interface{}) { + if val, ok := valIntf.([][]byte); ok { + packer.PackFixedByteSlices(val) + } else { + packer.Add(errBadType) + } +} + +// TryUnpackAddrList attempts to unpack the value as a list of 20-byte sequences +func TryUnpackAddrList(packer *Packer) interface{} { + return packer.UnpackFixedByteSlices(hashing.AddrLen) +} + +// TryPackBytes attempts to pack the value as a list of bytes +func TryPackBytes(packer *Packer, valIntf interface{}) { + if val, ok := valIntf.([]byte); ok { + packer.PackBytes(val) + } else { + packer.Add(errBadType) + } +} + +// TryUnpackBytes attempts to unpack the value as a list of bytes +func TryUnpackBytes(packer *Packer) interface{} { + return packer.UnpackBytes() +} + +// TryPackStr attempts to pack the value as a string +func TryPackStr(packer *Packer, valIntf interface{}) { + if val, ok := valIntf.(string); ok { + packer.PackStr(val) + } else { + packer.Add(errBadType) + } +} + +// TryUnpackStr attempts to unpack the value as a string +func TryUnpackStr(packer *Packer) interface{} { + return packer.UnpackStr() +} + +// TryPackIP attempts to pack the value as an ip port pair +func TryPackIP(packer *Packer, valIntf interface{}) { + if val, ok := valIntf.(utils.IPDesc); ok { + packer.PackIP(val) + } else { + packer.Add(errBadType) + } +} + +// TryUnpackIP attempts to unpack the value as an ip port pair +func TryUnpackIP(packer *Packer) interface{} { + return packer.UnpackIP() +} + +// TryPackIPList attempts to pack the value as an ip port pair list +func TryPackIPList(packer *Packer, valIntf interface{}) { + if val, ok := valIntf.([]utils.IPDesc); ok { + packer.PackIPs(val) + } else { + packer.Add(errBadType) + } +} + +// TryUnpackIPList attempts to unpack the value as an ip port pair list +func TryUnpackIPList(packer *Packer) interface{} { + return packer.UnpackIPs() +} diff --git a/utils/wrappers/packing_test.go b/utils/wrappers/packing_test.go new file mode 100644 index 0000000..a97463f --- /dev/null +++ b/utils/wrappers/packing_test.go @@ -0,0 +1,153 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package wrappers + +import ( + "bytes" + "testing" +) + +func TestPackerByte(t *testing.T) { + p := Packer{MaxSize: 1} + + p.PackByte(0x01) + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 1 { + t.Fatalf("Packer.PackByte wrote %d byte(s) but expected %d byte(s)", size, 1) + } + + expected := []byte{0x01} + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackByte wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } +} + +func TestPackerShort(t *testing.T) { + p := Packer{MaxSize: 2} + + p.PackShort(0x0102) + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 2 { + t.Fatalf("Packer.PackShort wrote %d byte(s) but expected %d byte(s)", size, 2) + } + + expected := []byte{0x01, 0x02} + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackShort wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } +} + +func TestPackerInt(t *testing.T) { + p := Packer{MaxSize: 4} + + p.PackInt(0x01020304) + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 4 { + t.Fatalf("Packer.PackInt wrote %d byte(s) but expected %d byte(s)", size, 4) + } + + expected := []byte{0x01, 0x02, 0x03, 0x04} + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackInt wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } +} + +func TestPackerLong(t *testing.T) { + p := Packer{MaxSize: 8} + + p.PackLong(0x0102030405060708) + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 8 { + t.Fatalf("Packer.PackLong wrote %d byte(s) but expected %d byte(s)", size, 8) + } + + expected := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08} + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackLong wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } +} + +func TestPackerString(t *testing.T) { + p := Packer{MaxSize: 5} + + p.PackStr("Ava") + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 5 { + t.Fatalf("Packer.PackStr wrote %d byte(s) but expected %d byte(s)", size, 5) + } + + expected := []byte{0x00, 0x03, 0x41, 0x76, 0x61} + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackStr wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } +} + +func TestPacker(t *testing.T) { + packer := Packer{ + MaxSize: 3, + } + + if packer.Errored() { + t.Fatalf("Packer has error %s", packer.Err) + } + + packer.PackShort(17) + if len(packer.Bytes) != 2 { + t.Fatalf("Wrong byte length") + } + + packer.PackShort(1) + if !packer.Errored() { + t.Fatalf("Packer should have error") + } + + newPacker := Packer{ + Bytes: packer.Bytes, + } + + if newPacker.UnpackShort() != 17 { + t.Fatalf("Unpacked wrong value") + } +} + +func TestPackBool(t *testing.T) { + p := Packer{MaxSize: 3} + p.PackBool(false) + p.PackBool(true) + p.PackBool(false) + if p.Errored() { + t.Fatal("should have been able to pack 3 bools") + } + + p2 := Packer{Bytes: p.Bytes} + bool1, bool2, bool3 := p2.UnpackBool(), p2.UnpackBool(), p2.UnpackBool() + + if p.Errored() { + t.Fatalf("errors while unpacking bools: %v", p.Errs) + } + + if bool1 || !bool2 || bool3 { + t.Fatal("got back wrong values") + } +} diff --git a/vms/avm/asset.go b/vms/avm/asset.go new file mode 100644 index 0000000..fc9ef07 --- /dev/null +++ b/vms/avm/asset.go @@ -0,0 +1,35 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" +) + +var ( + errNilAssetID = errors.New("nil asset ID is not valid") + errEmptyAssetID = errors.New("empty asset ID is not valid") +) + +// Asset ... +type Asset struct { + ID ids.ID `serialize:"true"` +} + +// AssetID returns the ID of the contained asset +func (asset *Asset) AssetID() ids.ID { return asset.ID } + +// Verify implements the verify.Verifiable interface +func (asset *Asset) Verify() error { + switch { + case asset == nil: + return errNilAssetID + case asset.ID.IsZero(): + return errEmptyAssetID + default: + return nil + } +} diff --git a/vms/avm/asset_test.go b/vms/avm/asset_test.go new file mode 100644 index 0000000..209cc81 --- /dev/null +++ b/vms/avm/asset_test.go @@ -0,0 +1,60 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/codec" +) + +func TestAssetVerifyNil(t *testing.T) { + id := (*Asset)(nil) + if err := id.Verify(); err == nil { + t.Fatalf("Should have errored due to nil AssetID") + } +} + +func TestAssetVerifyEmpty(t *testing.T) { + id := Asset{} + if err := id.Verify(); err == nil { + t.Fatalf("Should have errored due to empty AssetID") + } +} + +func TestAssetID(t *testing.T) { + c := codec.NewDefault() + + id := Asset{ + ID: ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }), + } + + if err := id.Verify(); err != nil { + t.Fatal(err) + } + + bytes, err := c.Marshal(&id) + if err != nil { + t.Fatal(err) + } + + newID := Asset{} + if err := c.Unmarshal(bytes, &newID); err != nil { + t.Fatal(err) + } + + if err := newID.Verify(); err != nil { + t.Fatal(err) + } + + if !id.AssetID().Equals(newID.AssetID()) { + t.Fatalf("Parsing returned the wrong Asset ID") + } +} diff --git a/vms/avm/base_tx.go b/vms/avm/base_tx.go new file mode 100644 index 0000000..8f90eb7 --- /dev/null +++ b/vms/avm/base_tx.go @@ -0,0 +1,223 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/vms/components/codec" +) + +var ( + errNilTx = errors.New("nil tx is not valid") + errWrongNetworkID = errors.New("tx has wrong network ID") + errWrongChainID = errors.New("tx has wrong chain ID") + + errOutputsNotSorted = errors.New("outputs not sorted") + errInputsNotSortedUnique = errors.New("inputs not sorted and unique") + + errInputOverflow = errors.New("inputs overflowed uint64") + errOutputOverflow = errors.New("outputs overflowed uint64") + errInsufficientFunds = errors.New("insufficient funds") +) + +// BaseTx is the basis of all transactions. +type BaseTx struct { + metadata + + NetID uint32 `serialize:"true"` // ID of the network this chain lives on + BCID ids.ID `serialize:"true"` // ID of the chain on which this transaction exists (prevents replay attacks) + Outs []*TransferableOutput `serialize:"true"` // The outputs of this transaction + Ins []*TransferableInput `serialize:"true"` // The inputs to this transaction +} + +// NetworkID is the ID of the network on which this transaction exists +func (t *BaseTx) NetworkID() uint32 { return t.NetID } + +// ChainID is the ID of the chain on which this transaction exists +func (t *BaseTx) ChainID() ids.ID { return t.BCID } + +// Outputs track which outputs this transaction is producing. The returned array +// should not be modified. +func (t *BaseTx) Outputs() []*TransferableOutput { return t.Outs } + +// Inputs track which UTXOs this transaction is consuming. The returned array +// should not be modified. +func (t *BaseTx) Inputs() []*TransferableInput { return t.Ins } + +// InputUTXOs track which UTXOs this transaction is consuming. +func (t *BaseTx) InputUTXOs() []*UTXOID { + utxos := []*UTXOID(nil) + for _, in := range t.Ins { + utxos = append(utxos, &in.UTXOID) + } + return utxos +} + +// AssetIDs returns the IDs of the assets this transaction depends on +func (t *BaseTx) AssetIDs() ids.Set { + assets := ids.Set{} + for _, in := range t.Ins { + assets.Add(in.AssetID()) + } + return assets +} + +// UTXOs returns the UTXOs transaction is producing. +func (t *BaseTx) UTXOs() []*UTXO { + txID := t.ID() + utxos := make([]*UTXO, len(t.Outs)) + for i, out := range t.Outs { + utxos[i] = &UTXO{ + UTXOID: UTXOID{ + TxID: txID, + OutputIndex: uint32(i), + }, + Asset: Asset{ + ID: out.AssetID(), + }, + Out: out.Out, + } + } + return utxos +} + +// SyntacticVerify that this transaction is well-formed. +func (t *BaseTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, _ int) error { + switch { + case t == nil: + return errNilTx + case t.NetID != ctx.NetworkID: + return errWrongNetworkID + case !t.BCID.Equals(ctx.ChainID): + return errWrongChainID + } + + for _, out := range t.Outs { + if err := out.Verify(); err != nil { + return err + } + } + if !isSortedTransferableOutputs(t.Outs, c) { + return errOutputsNotSorted + } + + for _, in := range t.Ins { + if err := in.Verify(); err != nil { + return err + } + } + if !isSortedAndUniqueTransferableInputs(t.Ins) { + return errInputsNotSortedUnique + } + + consumedFunds := map[[32]byte]uint64{} + for _, in := range t.Ins { + assetID := in.AssetID() + amount := in.Input().Amount() + + var err error + assetIDKey := assetID.Key() + consumedFunds[assetIDKey], err = math.Add64(consumedFunds[assetIDKey], amount) + + if err != nil { + return errInputOverflow + } + } + producedFunds := map[[32]byte]uint64{} + for _, out := range t.Outs { + assetID := out.AssetID() + amount := out.Output().Amount() + + var err error + assetIDKey := assetID.Key() + producedFunds[assetIDKey], err = math.Add64(producedFunds[assetIDKey], amount) + + if err != nil { + return errOutputOverflow + } + } + + // TODO: Add the Tx fee to the producedFunds + + for assetID, producedAssetAmount := range producedFunds { + consumedAssetAmount := consumedFunds[assetID] + if producedAssetAmount > consumedAssetAmount { + return errInsufficientFunds + } + } + + return t.metadata.Verify() +} + +// SemanticVerify that this transaction is valid to be spent. +func (t *BaseTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) error { + for i, in := range t.Ins { + cred := creds[i] + + fxIndex, err := vm.getFx(cred.Cred) + if err != nil { + return err + } + fx := vm.fxs[fxIndex].Fx + + utxoID := in.InputID() + utxo, err := vm.state.UTXO(utxoID) + if err == nil { + utxoAssetID := utxo.AssetID() + inAssetID := in.AssetID() + if !utxoAssetID.Equals(inAssetID) { + return errAssetIDMismatch + } + + if !vm.verifyFxUsage(fxIndex, inAssetID) { + return errIncompatibleFx + } + + err = fx.VerifyTransfer(uTx, utxo.Out, in.In, cred.Cred) + if err == nil { + continue + } + return err + } + + inputTx, inputIndex := in.InputSource() + parent := UniqueTx{ + vm: vm, + txID: inputTx, + } + + if err := parent.Verify(); err != nil { + return errMissingUTXO + } else if status := parent.Status(); status.Decided() { + return errMissingUTXO + } + + utxos := parent.UTXOs() + + if uint32(len(utxos)) <= inputIndex || int(inputIndex) < 0 { + return errInvalidUTXO + } + + utxo = utxos[int(inputIndex)] + + utxoAssetID := utxo.AssetID() + inAssetID := in.AssetID() + if !utxoAssetID.Equals(inAssetID) { + return errAssetIDMismatch + } + + if !vm.verifyFxUsage(fxIndex, inAssetID) { + return errIncompatibleFx + } + + if err := fx.VerifyTransfer(uTx, utxo.Out, in.In, cred); err != nil { + return err + } + } + return nil +} diff --git a/vms/avm/base_tx_test.go b/vms/avm/base_tx_test.go new file mode 100644 index 0000000..0c3732d --- /dev/null +++ b/vms/avm/base_tx_test.go @@ -0,0 +1,2051 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "math" + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestBaseTxSerialization(t *testing.T) { + expected := []byte{ + // txID: + 0x00, 0x00, 0x00, 0x00, + // networkID: + 0x00, 0x00, 0xa8, 0x66, + // blockchainID: + 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of outs: + 0x00, 0x00, 0x00, 0x01, + // output[0]: + // assetID: + 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // fxID: + 0x00, 0x00, 0x00, 0x04, + // secp256k1 Transferable Output: + // amount: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + // locktime: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold: + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // address[0] + 0xfc, 0xed, 0xa8, 0xf9, 0x0f, 0xcb, 0x5d, 0x30, + 0x61, 0x4b, 0x99, 0xd7, 0x9f, 0xc4, 0xba, 0xa2, + 0x93, 0x07, 0x76, 0x26, + // number of inputs: + 0x00, 0x00, 0x00, 0x01, + // txID: + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + // utxo index: + 0x00, 0x00, 0x00, 0x01, + // assetID: + 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // fxID: + 0x00, 0x00, 0x00, 0x06, + // amount: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, + // number of signatures: + 0x00, 0x00, 0x00, 0x01, + // signature index[0]: + 0x00, 0x00, 0x00, 0x02, + } + + tx := &Tx{UnsignedTx: &BaseTx{ + NetID: networkID, + BCID: chainID, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }, + }, + }} + + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + b, err := c.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + result := tx.Bytes() + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } +} + +func TestBaseTxGetters(t *testing.T) { + tx := &BaseTx{ + NetID: networkID, + BCID: chainID, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }, + }, + } + tx.Initialize([]byte{}) + + txID := tx.ID() + + if netID := tx.NetworkID(); netID != networkID { + t.Fatalf("Wrong network ID returned") + } else if bcID := tx.ChainID(); !bcID.Equals(chainID) { + t.Fatalf("Wrong chain ID returned") + } else if outs := tx.Outputs(); len(outs) != 1 { + t.Fatalf("Outputs returned wrong number of outs") + } else if out := outs[0]; out != tx.Outs[0] { + t.Fatalf("Outputs returned wrong output") + } else if ins := tx.Inputs(); len(ins) != 1 { + t.Fatalf("Inputs returned wrong number of ins") + } else if in := ins[0]; in != tx.Ins[0] { + t.Fatalf("Inputs returned wrong input") + } else if assets := tx.AssetIDs(); assets.Len() != 1 { + t.Fatalf("Wrong number of assets returned") + } else if !assets.Contains(asset) { + t.Fatalf("Wrong asset returned") + } else if utxos := tx.UTXOs(); len(utxos) != 1 { + t.Fatalf("Wrong number of utxos returned") + } else if utxo := utxos[0]; !utxo.TxID.Equals(txID) { + t.Fatalf("Wrong tx ID returned") + } else if utxoIndex := utxo.OutputIndex; utxoIndex != 0 { + t.Fatalf("Wrong output index returned") + } else if assetID := utxo.AssetID(); !assetID.Equals(asset) { + t.Fatalf("Wrong asset ID returned") + } else if utxoOut := utxo.Out; utxoOut != out.Out { + t.Fatalf("Wrong output returned") + } +} + +func TestBaseTxSyntacticVerify(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + tx := &BaseTx{ + NetID: networkID, + BCID: chainID, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }, + }, + } + tx.Initialize([]byte{}) + + if err := tx.SyntacticVerify(ctx, c, 0); err != nil { + t.Fatal(err) + } +} + +func TestBaseTxSyntacticVerifyNil(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + tx := (*BaseTx)(nil) + if err := tx.SyntacticVerify(ctx, c, 0); err == nil { + t.Fatalf("Nil BaseTx should have errored") + } +} + +func TestBaseTxSyntacticVerifyWrongNetworkID(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + tx := &BaseTx{ + NetID: 0, + BCID: chainID, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }, + }, + } + tx.Initialize([]byte{}) + + if err := tx.SyntacticVerify(ctx, c, 0); err == nil { + t.Fatalf("Wrong networkID should have errored") + } +} + +func TestBaseTxSyntacticVerifyWrongChainID(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + tx := &BaseTx{ + NetID: networkID, + BCID: ids.Empty, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }, + }, + } + tx.Initialize([]byte{}) + + if err := tx.SyntacticVerify(ctx, c, 0); err == nil { + t.Fatalf("Wrong chain ID should have errored") + } +} + +func TestBaseTxSyntacticVerifyInvalidOutput(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + tx := &BaseTx{ + NetID: networkID, + BCID: chainID, + Outs: []*TransferableOutput{ + nil, + }, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }, + }, + } + tx.Initialize([]byte{}) + + if err := tx.SyntacticVerify(ctx, c, 0); err == nil { + t.Fatalf("Invalid output should have errored") + } +} + +func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + tx := &BaseTx{ + NetID: networkID, + BCID: chainID, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }, + }, + } + tx.Initialize([]byte{}) + + if err := tx.SyntacticVerify(ctx, c, 0); err == nil { + t.Fatalf("Unsorted outputs should have errored") + } +} + +func TestBaseTxSyntacticVerifyInvalidInput(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + tx := &BaseTx{ + NetID: networkID, + BCID: chainID, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + Ins: []*TransferableInput{ + nil, + }, + } + tx.Initialize([]byte{}) + + if err := tx.SyntacticVerify(ctx, c, 0); err == nil { + t.Fatalf("Invalid input should have errored") + } +} + +func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + tx := &BaseTx{ + NetID: networkID, + BCID: chainID, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: math.MaxUint64, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }, + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 1, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }, + }, + } + tx.Initialize([]byte{}) + + if err := tx.SyntacticVerify(ctx, c, 0); err == nil { + t.Fatalf("Input overflow should have errored") + } +} + +func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + tx := &BaseTx{ + NetID: networkID, + BCID: chainID, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 1, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }, + }, + } + tx.Initialize([]byte{}) + + if err := tx.SyntacticVerify(ctx, c, 0); err == nil { + t.Fatalf("Output overflow should have errored") + } +} + +func TestBaseTxSyntacticVerifyInsufficientFunds(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + tx := &BaseTx{ + NetID: networkID, + BCID: chainID, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 1, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }, + }, + } + tx.Initialize([]byte{}) + + if err := tx.SyntacticVerify(ctx, c, 0); err == nil { + t.Fatalf("Insufficient funds should have errored") + } +} + +func TestBaseTxSyntacticVerifyUninitialized(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + tx := &BaseTx{ + NetID: networkID, + BCID: chainID, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }, + }, + } + + if err := tx.SyntacticVerify(ctx, c, 0); err == nil { + t.Fatalf("Uninitialized tx should have errored") + } +} + +func TestBaseTxSemanticVerify(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + uTx := &UniqueTx{ + vm: vm, + txID: tx.ID(), + t: &txState{ + tx: tx, + }, + } + + if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err != nil { + t.Fatal(err) + } +} + +func TestBaseTxSemanticVerifyUnknownFx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + vm.codec.RegisterType(&testVerifiable{}) + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &testVerifiable{}, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + uTx := &UniqueTx{ + vm: vm, + txID: tx.ID(), + t: &txState{ + tx: tx, + }, + } + + if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { + t.Fatalf("should have errored due to an unknown feature extension") + } +} + +func TestBaseTxSemanticVerifyWrongAssetID(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + vm.codec.RegisterType(&testVerifiable{}) + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: Asset{ + ID: asset, + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + uTx := &UniqueTx{ + vm: vm, + txID: tx.ID(), + t: &txState{ + tx: tx, + }, + } + + if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { + t.Fatalf("should have errored due to an asset ID mismatch") + } +} + +func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{ + &common.Fx{ + ID: ids.NewID([32]byte{1}), + Fx: &testFx{}, + }, + &common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }, + }, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + cr := codecRegistry{ + index: 1, + typeToFxIndex: vm.typeToFxIndex, + codec: vm.codec, + } + + cr.RegisterType(&TestTransferable{}) + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &TestTransferable{}, + }, + }, + }}} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + uTx := &UniqueTx{ + vm: vm, + txID: tx.ID(), + t: &txState{ + tx: tx, + }, + } + + if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { + t.Fatalf("should have errored due to an unsupported fx") + } +} + +func TestBaseTxSemanticVerifyInvalidSignature(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + [crypto.SECP256K1RSigLen]byte{}, + }, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + uTx := &UniqueTx{ + vm: vm, + txID: tx.ID(), + t: &txState{ + tx: tx, + }, + } + + if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { + t.Fatalf("Invalid credential should have failed verification") + } +} + +func TestBaseTxSemanticVerifyMissingUTXO(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + uTx := &UniqueTx{ + vm: vm, + txID: tx.ID(), + t: &txState{ + tx: tx, + }, + } + + if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { + t.Fatalf("Unknown UTXO should have failed verification") + } +} + +func TestBaseTxSemanticVerifyInvalidUTXO(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: math.MaxUint32, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + uTx := &UniqueTx{ + vm: vm, + txID: tx.ID(), + t: &txState{ + tx: tx, + }, + } + + if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { + t.Fatalf("Invalid UTXO should have failed verification") + } +} + +func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + pendingTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ + ID: genesisTx.ID(), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + }}} + + unsignedBytes, err := vm.codec.Marshal(&pendingTx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + pendingTx.Creds = append(pendingTx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err := vm.codec.Marshal(pendingTx) + if err != nil { + t.Fatal(err) + } + + txID, err := vm.IssueTx(b) + if err != nil { + t.Fatal(err) + } + + ctx.Lock.Unlock() + + <-issuer + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm.PendingTxs() + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + unsignedBytes, err = vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + sig, err = key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig = [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err = vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + uTx := &UniqueTx{ + vm: vm, + txID: tx.ID(), + t: &txState{ + tx: tx, + }, + } + + if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { + t.Fatalf("Invalid UTXO should have failed verification") + } +} + +func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + pendingTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ + ID: genesisTx.ID(), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + }}} + + unsignedBytes, err := vm.codec.Marshal(&pendingTx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + pendingTx.Creds = append(pendingTx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err := vm.codec.Marshal(pendingTx) + if err != nil { + t.Fatal(err) + } + + txID, err := vm.IssueTx(b) + if err != nil { + t.Fatal(err) + } + + ctx.Lock.Unlock() + + <-issuer + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm.PendingTxs() + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: txID, + OutputIndex: 0, + }, + Asset: Asset{ + ID: asset, + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + unsignedBytes, err = vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + sig, err = key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig = [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err = vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + uTx := &UniqueTx{ + vm: vm, + txID: tx.ID(), + t: &txState{ + tx: tx, + }, + } + + if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { + t.Fatalf("Wrong asset ID should have failed verification") + } +} + +func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{ + &common.Fx{ + ID: ids.NewID([32]byte{1}), + Fx: &secp256k1fx.Fx{}, + }, + &common.Fx{ + ID: ids.Empty, + Fx: &testFx{}, + }, + }, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + cr := codecRegistry{ + index: 1, + typeToFxIndex: vm.typeToFxIndex, + codec: vm.codec, + } + + cr.RegisterType(&testVerifiable{}) + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + pendingTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ + ID: genesisTx.ID(), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + }}} + + unsignedBytes, err := vm.codec.Marshal(&pendingTx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + pendingTx.Creds = append(pendingTx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err := vm.codec.Marshal(pendingTx) + if err != nil { + t.Fatal(err) + } + + txID, err := vm.IssueTx(b) + if err != nil { + t.Fatal(err) + } + + ctx.Lock.Unlock() + + <-issuer + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm.PendingTxs() + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: txID, + OutputIndex: 0, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &testVerifiable{}, + }) + + b, err = vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + uTx := &UniqueTx{ + vm: vm, + txID: tx.ID(), + t: &txState{ + tx: tx, + }, + } + + if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { + t.Fatalf("Unsupported feature extension should have failed verification") + } +} + +func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{ + &common.Fx{ + ID: ids.NewID([32]byte{1}), + Fx: &secp256k1fx.Fx{}, + }, + &common.Fx{ + ID: ids.Empty, + Fx: &testFx{}, + }, + }, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + cr := codecRegistry{ + index: 1, + typeToFxIndex: vm.typeToFxIndex, + codec: vm.codec, + } + + cr.RegisterType(&testVerifiable{}) + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + pendingTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ + ID: genesisTx.ID(), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + }}} + + unsignedBytes, err := vm.codec.Marshal(&pendingTx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + pendingTx.Creds = append(pendingTx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err := vm.codec.Marshal(pendingTx) + if err != nil { + t.Fatal(err) + } + + txID, err := vm.IssueTx(b) + if err != nil { + t.Fatal(err) + } + + ctx.Lock.Unlock() + + <-issuer + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm.PendingTxs() + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: txID, + OutputIndex: 0, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + [crypto.SECP256K1RSigLen]byte{}, + }, + }, + }) + + b, err = vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + uTx := &UniqueTx{ + vm: vm, + txID: tx.ID(), + t: &txState{ + tx: tx, + }, + } + + if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { + t.Fatalf("Invalid signature should have failed verification") + } +} diff --git a/vms/avm/create_asset_tx.go b/vms/avm/create_asset_tx.go new file mode 100644 index 0000000..c606b6b --- /dev/null +++ b/vms/avm/create_asset_tx.go @@ -0,0 +1,119 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + "fmt" + "strings" + "unicode" + + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/codec" +) + +const ( + maxNameLen = 128 + maxSymbolLen = 4 + maxDenomination = 32 +) + +var ( + errInitialStatesNotSortedUnique = errors.New("initial states not sorted and unique") + errNameTooLong = fmt.Errorf("name is too long, maximum size is %d", maxNameLen) + errSymbolTooLong = fmt.Errorf("symbol is too long, maximum size is %d", maxSymbolLen) + errNoFxs = errors.New("assets must support at least one Fx") + errUnprintableASCIICharacter = errors.New("unprintable ascii character was provided") + errUnexpectedWhitespace = errors.New("unexpected whitespace provided") + errDenominationTooLarge = errors.New("denomination is too large") +) + +// CreateAssetTx is a transaction that creates a new asset. +type CreateAssetTx struct { + BaseTx `serialize:"true"` + Name string `serialize:"true"` + Symbol string `serialize:"true"` + Denomination byte `serialize:"true"` + States []*InitialState `serialize:"true"` +} + +// InitialStates track which virtual machines, and the initial state of these +// machines, this asset uses. The returned array should not be modified. +func (t *CreateAssetTx) InitialStates() []*InitialState { return t.States } + +// UTXOs returns the UTXOs transaction is producing. +func (t *CreateAssetTx) UTXOs() []*UTXO { + txID := t.ID() + utxos := t.BaseTx.UTXOs() + + for _, state := range t.States { + for _, out := range state.Outs { + utxos = append(utxos, &UTXO{ + UTXOID: UTXOID{ + TxID: txID, + OutputIndex: uint32(len(utxos)), + }, + Asset: Asset{ + ID: txID, + }, + Out: out, + }) + } + } + + return utxos +} + +// SyntacticVerify that this transaction is well-formed. +func (t *CreateAssetTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs int) error { + switch { + case t == nil: + return errNilTx + case len(t.Name) > maxNameLen: + return errNameTooLong + case len(t.Symbol) > maxSymbolLen: + return errSymbolTooLong + case len(t.States) == 0: + return errNoFxs + case t.Denomination > maxDenomination: + return errDenominationTooLarge + case strings.TrimSpace(t.Name) != t.Name: + return errUnexpectedWhitespace + case strings.TrimSpace(t.Symbol) != t.Symbol: + return errUnexpectedWhitespace + } + + for _, r := range t.Name { + if r > unicode.MaxASCII || !unicode.IsPrint(r) { + return errUnprintableASCIICharacter + } + } + for _, r := range t.Symbol { + if r > unicode.MaxASCII || !unicode.IsPrint(r) { + return errUnprintableASCIICharacter + } + } + + if err := t.BaseTx.SyntacticVerify(ctx, c, numFxs); err != nil { + return err + } + + for _, state := range t.States { + if err := state.Verify(c, numFxs); err != nil { + return err + } + } + if !isSortedAndUniqueInitialStates(t.States) { + return errInitialStatesNotSortedUnique + } + return nil +} + +// SemanticVerify that this transaction is well-formed. +func (t *CreateAssetTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) error { + return t.BaseTx.SemanticVerify(vm, uTx, creds) +} + +// Sort ... +func (t *CreateAssetTx) Sort() { sortInitialStates(t.States) } diff --git a/vms/avm/create_asset_tx_test.go b/vms/avm/create_asset_tx_test.go new file mode 100644 index 0000000..2dabd5c --- /dev/null +++ b/vms/avm/create_asset_tx_test.go @@ -0,0 +1,205 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestCreateAssetTxSerialization(t *testing.T) { + expected := []byte{ + // txID: + 0x00, 0x00, 0x00, 0x01, + // networkID: + 0x00, 0x00, 0x00, 0x02, + // blockchainID: + 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, + 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, + 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, + 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, + // number of outs: + 0x00, 0x00, 0x00, 0x01, + // output[0]: + // assetID: + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + // output: + 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + // number of inputs: + 0x00, 0x00, 0x00, 0x01, + // txID: + 0xf1, 0xe1, 0xd1, 0xc1, 0xb1, 0xa1, 0x91, 0x81, + 0x71, 0x61, 0x51, 0x41, 0x31, 0x21, 0x11, 0x01, + 0xf0, 0xe0, 0xd0, 0xc0, 0xb0, 0xa0, 0x90, 0x80, + 0x70, 0x60, 0x50, 0x40, 0x30, 0x20, 0x10, 0x00, + // utxoIndex: + 0x00, 0x00, 0x00, 0x05, + // assetID: + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + // input: + 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, + 0x07, 0x5b, 0xcd, 0x15, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, + // name: + 0x00, 0x10, 0x56, 0x6f, 0x6c, 0x61, 0x74, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x20, 0x49, 0x6e, 0x64, + 0x65, 0x78, + // symbol: + 0x00, 0x03, 0x56, 0x49, 0x58, + // denomination: + 0x02, + // number of InitialStates: + 0x00, 0x00, 0x00, 0x01, + // InitialStates[0]: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + + tx := &Tx{UnsignedTx: &CreateAssetTx{ + BaseTx: BaseTx{ + NetID: 2, + BCID: ids.NewID([32]byte{ + 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, + 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, + 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, + 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, + }), + Outs: []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ + ID: ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + Locktime: 54321, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID([20]byte{ + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, + }), + ids.NewShortID([20]byte{ + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + }), + }, + }, + }, + }, + }, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xf1, 0xe1, 0xd1, 0xc1, 0xb1, 0xa1, 0x91, 0x81, + 0x71, 0x61, 0x51, 0x41, 0x31, 0x21, 0x11, 0x01, + 0xf0, 0xe0, 0xd0, 0xc0, 0xb0, 0xa0, 0x90, 0x80, + 0x70, 0x60, 0x50, 0x40, 0x30, 0x20, 0x10, 0x00, + }), + OutputIndex: 5, + }, + Asset: Asset{ + ID: ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }), + }, + In: &secp256k1fx.TransferInput{ + Amt: 123456789, + Input: secp256k1fx.Input{ + SigIndices: []uint32{3, 7}, + }, + }, + }, + }, + }, + Name: "Volatility Index", + Symbol: "VIX", + Denomination: 2, + States: []*InitialState{ + &InitialState{ + FxID: 0, + Outs: []verify.Verifiable{ + &secp256k1fx.TransferOutput{ + Amt: 12345, + Locktime: 54321, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID([20]byte{ + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, + }), + ids.NewShortID([20]byte{ + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + }), + }, + }, + }, + }, + }, + }, + }} + + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + b, err := c.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + result := tx.Bytes() + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } +} diff --git a/vms/avm/credential.go b/vms/avm/credential.go new file mode 100644 index 0000000..d5fb8ee --- /dev/null +++ b/vms/avm/credential.go @@ -0,0 +1,36 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/components/verify" +) + +var ( + errNilCredential = errors.New("nil credential is not valid") + errNilFxCredential = errors.New("nil feature extension credential is not valid") +) + +// Credential ... +type Credential struct { + Cred verify.Verifiable `serialize:"true"` +} + +// Credential returns the feature extension credential that this Credential is +// using. +func (cred *Credential) Credential() verify.Verifiable { return cred.Cred } + +// Verify implements the verify.Verifiable interface +func (cred *Credential) Verify() error { + switch { + case cred == nil: + return errNilCredential + case cred.Cred == nil: + return errNilFxCredential + default: + return cred.Cred.Verify() + } +} diff --git a/vms/avm/credential_test.go b/vms/avm/credential_test.go new file mode 100644 index 0000000..867a89f --- /dev/null +++ b/vms/avm/credential_test.go @@ -0,0 +1,36 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" +) + +func TestCredentialVerifyNil(t *testing.T) { + cred := (*Credential)(nil) + if err := cred.Verify(); err == nil { + t.Fatalf("Should have errored due to nil credential") + } +} + +func TestCredentialVerifyNilFx(t *testing.T) { + cred := &Credential{} + if err := cred.Verify(); err == nil { + t.Fatalf("Should have errored due to nil fx credential") + } +} + +func TestCredential(t *testing.T) { + cred := &Credential{ + Cred: &testVerifiable{}, + } + + if err := cred.Verify(); err != nil { + t.Fatal(err) + } + + if cred.Credential() != cred.Cred { + t.Fatalf("Should have returned the fx credential") + } +} diff --git a/vms/avm/factory.go b/vms/avm/factory.go new file mode 100644 index 0000000..b76606d --- /dev/null +++ b/vms/avm/factory.go @@ -0,0 +1,19 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "github.com/ava-labs/gecko/ids" +) + +// ID that this VM uses when labeled +var ( + ID = ids.NewID([32]byte{'a', 'v', 'm'}) +) + +// Factory ... +type Factory struct{} + +// New ... +func (f *Factory) New() interface{} { return &VM{} } diff --git a/vms/avm/fx.go b/vms/avm/fx.go new file mode 100644 index 0000000..cc4d8e4 --- /dev/null +++ b/vms/avm/fx.go @@ -0,0 +1,49 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/verify" +) + +type parsedFx struct { + ID ids.ID + Fx Fx +} + +// Fx is the interface a feature extension must implement to support the AVM. +type Fx interface { + // Initialize this feature extension to be running under this VM. Should + // return an error if the VM is incompatible. + Initialize(vm interface{}) error + + // VerifyTransfer verifies that the specified transaction can spend the + // provided utxo with no restrictions on the destination. If the transaction + // can't spend the output based on the input and credential, a non-nil error + // should be returned. + VerifyTransfer(tx, utxo, in, cred interface{}) error + + // VerifyOperation verifies that the specified transaction can spend the + // provided utxos conditioned on the result being restricted to the provided + // outputs. If the transaction can't spend the output based on the input and + // credential, a non-nil error should be returned. + VerifyOperation(tx interface{}, utxos, ins, creds, outs []interface{}) error +} + +// FxTransferable is the interface a feature extension must provide to transfer +// value between features extensions. +type FxTransferable interface { + verify.Verifiable + + // Amount returns how much value this output consumes of the asset in its + // transaction. + Amount() uint64 +} + +// FxAddressable is the interface a feature extension must provide to be able to +// be tracked as a part of the utxo set for a set of addresses +type FxAddressable interface { + Addresses() [][]byte +} diff --git a/vms/avm/fx_test.go b/vms/avm/fx_test.go new file mode 100644 index 0000000..a0863b2 --- /dev/null +++ b/vms/avm/fx_test.go @@ -0,0 +1,14 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +type testFx struct { + initialize, verifyTransfer, verifyOperation error +} + +func (fx *testFx) Initialize(_ interface{}) error { return fx.initialize } +func (fx *testFx) VerifyTransfer(_, _, _, _ interface{}) error { return fx.verifyTransfer } +func (fx *testFx) VerifyOperation(_ interface{}, _, _, _, _ []interface{}) error { + return fx.verifyOperation +} diff --git a/vms/avm/genesis.go b/vms/avm/genesis.go new file mode 100644 index 0000000..2153408 --- /dev/null +++ b/vms/avm/genesis.go @@ -0,0 +1,37 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "sort" + "strings" + + "github.com/ava-labs/gecko/utils" +) + +// Genesis ... +type Genesis struct { + Txs []*GenesisAsset `serialize:"true"` +} + +// Less ... +func (g *Genesis) Less(i, j int) bool { return strings.Compare(g.Txs[i].Alias, g.Txs[j].Alias) == -1 } + +// Len ... +func (g *Genesis) Len() int { return len(g.Txs) } + +// Swap ... +func (g *Genesis) Swap(i, j int) { g.Txs[j], g.Txs[i] = g.Txs[i], g.Txs[j] } + +// Sort ... +func (g *Genesis) Sort() { sort.Sort(g) } + +// IsSortedAndUnique ... +func (g *Genesis) IsSortedAndUnique() bool { return utils.IsSortedAndUnique(g) } + +// GenesisAsset ... +type GenesisAsset struct { + Alias string `serialize:"true"` + CreateAssetTx `serialize:"true"` +} diff --git a/vms/avm/initial_state.go b/vms/avm/initial_state.go new file mode 100644 index 0000000..58dae84 --- /dev/null +++ b/vms/avm/initial_state.go @@ -0,0 +1,91 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "errors" + "sort" + + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" +) + +var ( + errNilInitialState = errors.New("nil initial state is not valid") + errNilFxOutput = errors.New("nil feature extension output is not valid") +) + +// InitialState ... +type InitialState struct { + FxID uint32 `serialize:"true"` + Outs []verify.Verifiable `serialize:"true"` +} + +// Verify implements the verify.Verifiable interface +func (is *InitialState) Verify(c codec.Codec, numFxs int) error { + switch { + case is == nil: + return errNilInitialState + case is.FxID >= uint32(numFxs): + return errUnknownFx + } + + for _, out := range is.Outs { + if out == nil { + return errNilFxOutput + } + if err := out.Verify(); err != nil { + return err + } + } + if !isSortedVerifiables(is.Outs, c) { + return errOutputsNotSorted + } + + return nil +} + +// Sort ... +func (is *InitialState) Sort(c codec.Codec) { sortVerifiables(is.Outs, c) } + +type innerSortVerifiables struct { + vers []verify.Verifiable + codec codec.Codec +} + +func (vers *innerSortVerifiables) Less(i, j int) bool { + iVer := vers.vers[i] + jVer := vers.vers[j] + + iBytes, err := vers.codec.Marshal(&iVer) + if err != nil { + return false + } + jBytes, err := vers.codec.Marshal(&jVer) + if err != nil { + return false + } + return bytes.Compare(iBytes, jBytes) == -1 +} +func (vers *innerSortVerifiables) Len() int { return len(vers.vers) } +func (vers *innerSortVerifiables) Swap(i, j int) { v := vers.vers; v[j], v[i] = v[i], v[j] } + +func sortVerifiables(vers []verify.Verifiable, c codec.Codec) { + sort.Sort(&innerSortVerifiables{vers: vers, codec: c}) +} +func isSortedVerifiables(vers []verify.Verifiable, c codec.Codec) bool { + return sort.IsSorted(&innerSortVerifiables{vers: vers, codec: c}) +} + +type innerSortInitialState []*InitialState + +func (iss innerSortInitialState) Less(i, j int) bool { return iss[i].FxID < iss[j].FxID } +func (iss innerSortInitialState) Len() int { return len(iss) } +func (iss innerSortInitialState) Swap(i, j int) { iss[j], iss[i] = iss[i], iss[j] } + +func sortInitialStates(iss []*InitialState) { sort.Sort(innerSortInitialState(iss)) } +func isSortedAndUniqueInitialStates(iss []*InitialState) bool { + return sort.IsSorted(innerSortInitialState(iss)) +} diff --git a/vms/avm/initial_state_test.go b/vms/avm/initial_state_test.go new file mode 100644 index 0000000..267947e --- /dev/null +++ b/vms/avm/initial_state_test.go @@ -0,0 +1,148 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "errors" + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestInitialStateVerifyNil(t *testing.T) { + c := codec.NewDefault() + numFxs := 1 + + is := (*InitialState)(nil) + if err := is.Verify(c, numFxs); err == nil { + t.Fatalf("Should have errored due to nil initial state") + } +} + +func TestInitialStateVerifyUnknownFxID(t *testing.T) { + c := codec.NewDefault() + numFxs := 1 + + is := InitialState{ + FxID: 1, + } + if err := is.Verify(c, numFxs); err == nil { + t.Fatalf("Should have errored due to unknown FxID") + } +} + +func TestInitialStateVerifyNilOutput(t *testing.T) { + c := codec.NewDefault() + numFxs := 1 + + is := InitialState{ + FxID: 0, + Outs: []verify.Verifiable{nil}, + } + if err := is.Verify(c, numFxs); err == nil { + t.Fatalf("Should have errored due to a nil output") + } +} + +func TestInitialStateVerifyInvalidOutput(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&testVerifiable{}) + numFxs := 1 + + is := InitialState{ + FxID: 0, + Outs: []verify.Verifiable{ + &testVerifiable{err: errors.New("")}, + }, + } + if err := is.Verify(c, numFxs); err == nil { + t.Fatalf("Should have errored due to an invalid output") + } +} + +func TestInitialStateVerifyUnsortedOutputs(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&TestTransferable{}) + numFxs := 1 + + is := InitialState{ + FxID: 0, + Outs: []verify.Verifiable{ + &TestTransferable{Val: 1}, + &TestTransferable{Val: 0}, + }, + } + if err := is.Verify(c, numFxs); err == nil { + t.Fatalf("Should have errored due to unsorted outputs") + } + + is.Sort(c) + + if err := is.Verify(c, numFxs); err != nil { + t.Fatal(err) + } +} + +func TestInitialStateVerifySerialization(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&secp256k1fx.TransferOutput{}) + + expected := []byte{ + // fxID: + 0x00, 0x00, 0x00, 0x00, + // num outputs: + 0x00, 0x00, 0x00, 0x01, + // output: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + + is := &InitialState{ + FxID: 0, + Outs: []verify.Verifiable{ + &secp256k1fx.TransferOutput{ + Amt: 12345, + Locktime: 54321, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID([20]byte{ + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, + }), + ids.NewShortID([20]byte{ + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + }), + }, + }, + }, + }, + } + + isBytes, err := c.Marshal(is) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(isBytes, expected) { + t.Fatalf("Expected:\n%s\nResult:\n%s", + formatting.DumpBytes{Bytes: expected}, + formatting.DumpBytes{Bytes: isBytes}, + ) + } +} diff --git a/vms/avm/metadata.go b/vms/avm/metadata.go new file mode 100644 index 0000000..fb29b44 --- /dev/null +++ b/vms/avm/metadata.go @@ -0,0 +1,45 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/hashing" +) + +var ( + errNilMetadata = errors.New("nil metadata is not valid") + errMetadataNotInitialize = errors.New("metadata was never initialized and is not valid") +) + +type metadata struct { + id ids.ID // The ID of this data + bytes []byte // Byte representation of this data +} + +// Bytes returns the binary representation of this data +func (md *metadata) Initialize(bytes []byte) { + md.id = ids.NewID(hashing.ComputeHash256Array(bytes)) + md.bytes = bytes +} + +// ID returns the unique ID of this data +func (md *metadata) ID() ids.ID { return md.id } + +// Bytes returns the binary representation of this data +func (md *metadata) Bytes() []byte { return md.bytes } + +// Verify implements the verify.Verifiable interface +func (md *metadata) Verify() error { + switch { + case md == nil: + return errNilMetadata + case md.id.IsZero(): + return errMetadataNotInitialize + default: + return nil + } +} diff --git a/vms/avm/metadata_test.go b/vms/avm/metadata_test.go new file mode 100644 index 0000000..09c559b --- /dev/null +++ b/vms/avm/metadata_test.go @@ -0,0 +1,22 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" +) + +func TestMetaDataVerifyNil(t *testing.T) { + md := (*metadata)(nil) + if err := md.Verify(); err == nil { + t.Fatalf("Should have errored due to nil metadata") + } +} + +func TestMetaDataVerifyUninitialized(t *testing.T) { + md := &metadata{} + if err := md.Verify(); err == nil { + t.Fatalf("Should have errored due to uninitialized metadata") + } +} diff --git a/vms/avm/operables.go b/vms/avm/operables.go new file mode 100644 index 0000000..7aac3a0 --- /dev/null +++ b/vms/avm/operables.go @@ -0,0 +1,116 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "errors" + "sort" + + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" +) + +var ( + errNilOperableOutput = errors.New("nil operable output is not valid") + errNilOperableFxOutput = errors.New("nil operable feature extension output is not valid") + + errNilOperableInput = errors.New("nil operable input is not valid") + errNilOperableFxInput = errors.New("nil operable feature extension input is not valid") +) + +// OperableOutput ... +type OperableOutput struct { + Out verify.Verifiable `serialize:"true"` +} + +// Output returns the feature extension output that this Output is using. +func (out *OperableOutput) Output() verify.Verifiable { return out.Out } + +// Verify implements the verify.Verifiable interface +func (out *OperableOutput) Verify() error { + switch { + case out == nil: + return errNilOperableOutput + case out.Out == nil: + return errNilOperableFxOutput + default: + return out.Out.Verify() + } +} + +type innerSortOperableOutputs struct { + outs []*OperableOutput + codec codec.Codec +} + +func (outs *innerSortOperableOutputs) Less(i, j int) bool { + iOut := outs.outs[i] + jOut := outs.outs[j] + + iBytes, err := outs.codec.Marshal(&iOut.Out) + if err != nil { + return false + } + jBytes, err := outs.codec.Marshal(&jOut.Out) + if err != nil { + return false + } + return bytes.Compare(iBytes, jBytes) == -1 +} +func (outs *innerSortOperableOutputs) Len() int { return len(outs.outs) } +func (outs *innerSortOperableOutputs) Swap(i, j int) { o := outs.outs; o[j], o[i] = o[i], o[j] } + +func sortOperableOutputs(outs []*OperableOutput, c codec.Codec) { + sort.Sort(&innerSortOperableOutputs{outs: outs, codec: c}) +} +func isSortedOperableOutputs(outs []*OperableOutput, c codec.Codec) bool { + return sort.IsSorted(&innerSortOperableOutputs{outs: outs, codec: c}) +} + +// OperableInput ... +type OperableInput struct { + UTXOID `serialize:"true"` + + In verify.Verifiable `serialize:"true"` +} + +// Input returns the feature extension input that this Input is using. +func (in *OperableInput) Input() verify.Verifiable { return in.In } + +// Verify implements the verify.Verifiable interface +func (in *OperableInput) Verify() error { + switch { + case in == nil: + return errNilOperableInput + case in.In == nil: + return errNilOperableFxInput + default: + return verify.All(&in.UTXOID, in.In) + } +} + +type innerSortOperableInputs []*OperableInput + +func (ins innerSortOperableInputs) Less(i, j int) bool { + iID, iIndex := ins[i].InputSource() + jID, jIndex := ins[j].InputSource() + + switch bytes.Compare(iID.Bytes(), jID.Bytes()) { + case -1: + return true + case 0: + return iIndex < jIndex + default: + return false + } +} +func (ins innerSortOperableInputs) Len() int { return len(ins) } +func (ins innerSortOperableInputs) Swap(i, j int) { ins[j], ins[i] = ins[i], ins[j] } + +func sortOperableInputs(ins []*OperableInput) { sort.Sort(innerSortOperableInputs(ins)) } +func isSortedAndUniqueOperableInputs(ins []*OperableInput) bool { + return utils.IsSortedAndUnique(innerSortOperableInputs(ins)) +} diff --git a/vms/avm/operables_test.go b/vms/avm/operables_test.go new file mode 100644 index 0000000..98e0996 --- /dev/null +++ b/vms/avm/operables_test.go @@ -0,0 +1,175 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/codec" +) + +func TestOperableOutputVerifyNil(t *testing.T) { + oo := (*OperableOutput)(nil) + if err := oo.Verify(); err == nil { + t.Fatalf("Should have errored due to nil operable output") + } +} + +func TestOperableOutputVerifyNilFx(t *testing.T) { + oo := &OperableOutput{} + if err := oo.Verify(); err == nil { + t.Fatalf("Should have errored due to nil operable fx output") + } +} + +func TestOperableOutputVerify(t *testing.T) { + oo := &OperableOutput{ + Out: &testVerifiable{}, + } + if err := oo.Verify(); err != nil { + t.Fatal(err) + } + if oo.Output() != oo.Out { + t.Fatalf("Should have returned the fx output") + } +} + +func TestOperableOutputSorting(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&TestTransferable{}) + c.RegisterType(&testVerifiable{}) + + outs := []*OperableOutput{ + &OperableOutput{ + Out: &TestTransferable{Val: 1}, + }, + &OperableOutput{ + Out: &TestTransferable{Val: 0}, + }, + &OperableOutput{ + Out: &TestTransferable{Val: 0}, + }, + &OperableOutput{ + Out: &testVerifiable{}, + }, + } + + if isSortedOperableOutputs(outs, c) { + t.Fatalf("Shouldn't be sorted") + } + sortOperableOutputs(outs, c) + if !isSortedOperableOutputs(outs, c) { + t.Fatalf("Should be sorted") + } + if result := outs[0].Out.(*TestTransferable).Val; result != 0 { + t.Fatalf("Val expected: %d ; result: %d", 0, result) + } + if result := outs[1].Out.(*TestTransferable).Val; result != 0 { + t.Fatalf("Val expected: %d ; result: %d", 0, result) + } + if result := outs[2].Out.(*TestTransferable).Val; result != 1 { + t.Fatalf("Val expected: %d ; result: %d", 0, result) + } + if _, ok := outs[3].Out.(*testVerifiable); !ok { + t.Fatalf("testVerifiable expected") + } +} + +func TestOperableInputVerifyNil(t *testing.T) { + oi := (*OperableInput)(nil) + if err := oi.Verify(); err == nil { + t.Fatalf("Should have errored due to nil operable input") + } +} + +func TestOperableInputVerifyNilFx(t *testing.T) { + oi := &OperableInput{} + if err := oi.Verify(); err == nil { + t.Fatalf("Should have errored due to nil operable fx input") + } +} + +func TestOperableInputVerify(t *testing.T) { + oi := &OperableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + }, + In: &testVerifiable{}, + } + if err := oi.Verify(); err != nil { + t.Fatal(err) + } + if oi.Input() != oi.In { + t.Fatalf("Should have returned the fx input") + } +} + +func TestOperableInputSorting(t *testing.T) { + ins := []*OperableInput{ + &OperableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + In: &testVerifiable{}, + }, + &OperableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{1}), + OutputIndex: 1, + }, + In: &testVerifiable{}, + }, + &OperableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + In: &testVerifiable{}, + }, + &OperableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{1}), + OutputIndex: 0, + }, + In: &testVerifiable{}, + }, + } + if isSortedAndUniqueOperableInputs(ins) { + t.Fatalf("Shouldn't be sorted") + } + sortOperableInputs(ins) + if !isSortedAndUniqueOperableInputs(ins) { + t.Fatalf("Should be sorted") + } + if result := ins[0].OutputIndex; result != 0 { + t.Fatalf("OutputIndex expected: %d ; result: %d", 0, result) + } + if result := ins[1].OutputIndex; result != 1 { + t.Fatalf("OutputIndex expected: %d ; result: %d", 1, result) + } + if result := ins[2].OutputIndex; result != 0 { + t.Fatalf("OutputIndex expected: %d ; result: %d", 0, result) + } + if result := ins[3].OutputIndex; result != 1 { + t.Fatalf("OutputIndex expected: %d ; result: %d", 1, result) + } + if result := ins[0].TxID; !result.Equals(ids.Empty) { + t.Fatalf("OutputIndex expected: %s ; result: %s", ids.Empty, result) + } + if result := ins[0].TxID; !result.Equals(ids.Empty) { + t.Fatalf("OutputIndex expected: %s ; result: %s", ids.Empty, result) + } + ins = append(ins, &OperableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + In: &testVerifiable{}, + }) + if isSortedAndUniqueOperableInputs(ins) { + t.Fatalf("Shouldn't be unique") + } +} diff --git a/vms/avm/operation.go b/vms/avm/operation.go new file mode 100644 index 0000000..516e8fa --- /dev/null +++ b/vms/avm/operation.go @@ -0,0 +1,85 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "errors" + "sort" + + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/vms/components/codec" +) + +var ( + errNilOperation = errors.New("nil operation is not valid") + errEmptyOperation = errors.New("empty operation is not valid") +) + +// Operation ... +type Operation struct { + Asset `serialize:"true"` + + Ins []*OperableInput `serialize:"true"` + Outs []*OperableOutput `serialize:"true"` +} + +// Verify implements the verify.Verifiable interface +func (op *Operation) Verify(c codec.Codec) error { + switch { + case op == nil: + return errNilOperation + case len(op.Ins) == 0 && len(op.Outs) == 0: + return errEmptyOperation + } + + for _, in := range op.Ins { + if err := in.Verify(); err != nil { + return err + } + } + if !isSortedAndUniqueOperableInputs(op.Ins) { + return errInputsNotSortedUnique + } + + for _, out := range op.Outs { + if err := out.Verify(); err != nil { + return err + } + } + if !isSortedOperableOutputs(op.Outs, c) { + return errOutputsNotSorted + } + + return op.Asset.Verify() +} + +type innerSortOperation struct { + ops []*Operation + codec codec.Codec +} + +func (ops *innerSortOperation) Less(i, j int) bool { + iOp := ops.ops[i] + jOp := ops.ops[j] + + iBytes, err := ops.codec.Marshal(iOp) + if err != nil { + return false + } + jBytes, err := ops.codec.Marshal(jOp) + if err != nil { + return false + } + return bytes.Compare(iBytes, jBytes) == -1 +} +func (ops *innerSortOperation) Len() int { return len(ops.ops) } +func (ops *innerSortOperation) Swap(i, j int) { o := ops.ops; o[j], o[i] = o[i], o[j] } + +func sortOperations(ops []*Operation, c codec.Codec) { + sort.Sort(&innerSortOperation{ops: ops, codec: c}) +} +func isSortedAndUniqueOperations(ops []*Operation, c codec.Codec) bool { + return utils.IsSortedAndUnique(&innerSortOperation{ops: ops, codec: c}) +} diff --git a/vms/avm/operation_test.go b/vms/avm/operation_test.go new file mode 100644 index 0000000..9215448 --- /dev/null +++ b/vms/avm/operation_test.go @@ -0,0 +1,188 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/codec" +) + +func TestOperationVerifyNil(t *testing.T) { + c := codec.NewDefault() + op := (*Operation)(nil) + if err := op.Verify(c); err == nil { + t.Fatalf("Should have errored due to nil operation") + } +} + +func TestOperationVerifyEmpty(t *testing.T) { + c := codec.NewDefault() + op := &Operation{ + Asset: Asset{ + ID: ids.Empty, + }, + } + if err := op.Verify(c); err == nil { + t.Fatalf("Should have errored due to empty operation") + } +} + +func TestOperationVerifyInvalidInput(t *testing.T) { + c := codec.NewDefault() + op := &Operation{ + Asset: Asset{ + ID: ids.Empty, + }, + Ins: []*OperableInput{ + &OperableInput{}, + }, + } + if err := op.Verify(c); err == nil { + t.Fatalf("Should have errored due to an invalid input") + } +} + +func TestOperationVerifyInvalidOutput(t *testing.T) { + c := codec.NewDefault() + op := &Operation{ + Asset: Asset{ + ID: ids.Empty, + }, + Outs: []*OperableOutput{ + &OperableOutput{}, + }, + } + if err := op.Verify(c); err == nil { + t.Fatalf("Should have errored due to an invalid output") + } +} + +func TestOperationVerifyInputsNotSorted(t *testing.T) { + c := codec.NewDefault() + op := &Operation{ + Asset: Asset{ + ID: ids.Empty, + }, + Ins: []*OperableInput{ + &OperableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + In: &testVerifiable{}, + }, + &OperableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + In: &testVerifiable{}, + }, + }, + } + if err := op.Verify(c); err == nil { + t.Fatalf("Should have errored due to unsorted inputs") + } +} + +func TestOperationVerifyOutputsNotSorted(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&TestTransferable{}) + + op := &Operation{ + Asset: Asset{ + ID: ids.Empty, + }, + Outs: []*OperableOutput{ + &OperableOutput{ + Out: &TestTransferable{Val: 1}, + }, + &OperableOutput{ + Out: &TestTransferable{Val: 0}, + }, + }, + } + if err := op.Verify(c); err == nil { + t.Fatalf("Should have errored due to unsorted outputs") + } +} + +func TestOperationVerify(t *testing.T) { + c := codec.NewDefault() + op := &Operation{ + Asset: Asset{ + ID: ids.Empty, + }, + Outs: []*OperableOutput{ + &OperableOutput{ + Out: &testVerifiable{}, + }, + }, + } + if err := op.Verify(c); err != nil { + t.Fatal(err) + } +} + +func TestOperationSorting(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&testVerifiable{}) + + ops := []*Operation{ + &Operation{ + Asset: Asset{ + ID: ids.Empty, + }, + Ins: []*OperableInput{ + &OperableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + In: &testVerifiable{}, + }, + }, + }, + &Operation{ + Asset: Asset{ + ID: ids.Empty, + }, + Ins: []*OperableInput{ + &OperableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + In: &testVerifiable{}, + }, + }, + }, + } + if isSortedAndUniqueOperations(ops, c) { + t.Fatalf("Shouldn't be sorted") + } + sortOperations(ops, c) + if !isSortedAndUniqueOperations(ops, c) { + t.Fatalf("Should be sorted") + } + ops = append(ops, &Operation{ + Asset: Asset{ + ID: ids.Empty, + }, + Ins: []*OperableInput{ + &OperableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + In: &testVerifiable{}, + }, + }, + }) + if isSortedAndUniqueOperations(ops, c) { + t.Fatalf("Shouldn't be unique") + } +} diff --git a/vms/avm/operation_tx.go b/vms/avm/operation_tx.go new file mode 100644 index 0000000..07d8947 --- /dev/null +++ b/vms/avm/operation_tx.go @@ -0,0 +1,195 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/codec" +) + +var ( + errOperationsNotSortedUnique = errors.New("operations not sorted and unique") + + errDoubleSpend = errors.New("inputs attempt to double spend an input") +) + +// OperationTx is a transaction with no credentials. +type OperationTx struct { + BaseTx `serialize:"true"` + Ops []*Operation `serialize:"true"` +} + +// Operations track which ops this transaction is performing. The returned array +// should not be modified. +func (t *OperationTx) Operations() []*Operation { return t.Ops } + +// InputUTXOs track which UTXOs this transaction is consuming. +func (t *OperationTx) InputUTXOs() []*UTXOID { + utxos := t.BaseTx.InputUTXOs() + for _, op := range t.Ops { + for _, in := range op.Ins { + utxos = append(utxos, &in.UTXOID) + } + } + return utxos +} + +// AssetIDs returns the IDs of the assets this transaction depends on +func (t *OperationTx) AssetIDs() ids.Set { + assets := t.BaseTx.AssetIDs() + for _, op := range t.Ops { + assets.Add(op.AssetID()) + } + return assets +} + +// UTXOs returns the UTXOs transaction is producing. +func (t *OperationTx) UTXOs() []*UTXO { + txID := t.ID() + utxos := t.BaseTx.UTXOs() + + for _, op := range t.Ops { + asset := op.AssetID() + for _, out := range op.Outs { + utxos = append(utxos, &UTXO{ + UTXOID: UTXOID{ + TxID: txID, + OutputIndex: uint32(len(utxos)), + }, + Asset: Asset{ + ID: asset, + }, + Out: out.Out, + }) + } + } + + return utxos +} + +// SyntacticVerify that this transaction is well-formed. +func (t *OperationTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs int) error { + switch { + case t == nil: + return errNilTx + } + + if err := t.BaseTx.SyntacticVerify(ctx, c, numFxs); err != nil { + return err + } + + inputs := ids.Set{} + for _, in := range t.Ins { + inputs.Add(in.InputID()) + } + + for _, op := range t.Ops { + if err := op.Verify(c); err != nil { + return err + } + for _, in := range op.Ins { + inputID := in.InputID() + if inputs.Contains(inputID) { + return errDoubleSpend + } + inputs.Add(inputID) + } + } + if !isSortedAndUniqueOperations(t.Ops, c) { + return errOperationsNotSortedUnique + } + return nil +} + +// SemanticVerify that this transaction is well-formed. +func (t *OperationTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) error { + if err := t.BaseTx.SemanticVerify(vm, uTx, creds); err != nil { + return err + } + offset := len(t.BaseTx.Ins) + for _, op := range t.Ops { + opAssetID := op.AssetID() + + utxos := []interface{}{} + ins := []interface{}{} + credIntfs := []interface{}{} + outs := []interface{}{} + + for i, in := range op.Ins { + ins = append(ins, in.In) + + cred := creds[i+offset] + credIntfs = append(credIntfs, cred.Cred) + + utxoID := in.InputID() + utxo, err := vm.state.UTXO(utxoID) + if err == nil { + utxoAssetID := utxo.AssetID() + if !utxoAssetID.Equals(opAssetID) { + return errAssetIDMismatch + } + + utxos = append(utxos, utxo.Out) + continue + } + + inputTx, inputIndex := in.InputSource() + parent := UniqueTx{ + vm: vm, + txID: inputTx, + } + + if err := parent.Verify(); err != nil { + return errMissingUTXO + } else if status := parent.Status(); status.Decided() { + return errMissingUTXO + } + + parentUTXOs := parent.UTXOs() + + if uint32(len(parentUTXOs)) <= inputIndex || int(inputIndex) < 0 { + return errInvalidUTXO + } + + utxo = parentUTXOs[int(inputIndex)] + + utxoAssetID := utxo.AssetID() + if !utxoAssetID.Equals(opAssetID) { + return errAssetIDMismatch + } + utxos = append(utxos, utxo.Out) + } + offset += len(op.Ins) + for _, out := range op.Outs { + outs = append(outs, out.Out) + } + + var fxObj interface{} + switch { + case len(ins) > 0: + fxObj = ins[0] + case len(outs) > 0: + fxObj = outs[0] + } + + fxIndex, err := vm.getFx(fxObj) + if err != nil { + return err + } + fx := vm.fxs[fxIndex].Fx + + if !vm.verifyFxUsage(fxIndex, opAssetID) { + return errIncompatibleFx + } + + err = fx.VerifyOperation(uTx, utxos, ins, credIntfs, outs) + if err != nil { + return err + } + } + return nil +} diff --git a/vms/avm/prefixed_state.go b/vms/avm/prefixed_state.go new file mode 100644 index 0000000..1314857 --- /dev/null +++ b/vms/avm/prefixed_state.go @@ -0,0 +1,155 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/hashing" +) + +const ( + txID uint64 = iota + utxoID + txStatusID + fundsID + dbInitializedID +) + +var ( + dbInitialized = ids.Empty.Prefix(dbInitializedID) +) + +// prefixedState wraps a state object. By prefixing the state, there will be no +// collisions between different types of objects that have the same hash. +type prefixedState struct { + state *state + + tx, utxo, txStatus, funds cache.Cacher + uniqueTx cache.Deduplicator +} + +// UniqueTx de-duplicates the transaction. +func (s *prefixedState) UniqueTx(tx *UniqueTx) *UniqueTx { + return s.uniqueTx.Deduplicate(tx).(*UniqueTx) +} + +// Tx attempts to load a transaction from storage. +func (s *prefixedState) Tx(id ids.ID) (*Tx, error) { return s.state.Tx(s.uniqueID(id, txID, s.tx)) } + +// SetTx saves the provided transaction to storage. +func (s *prefixedState) SetTx(id ids.ID, tx *Tx) error { + return s.state.SetTx(s.uniqueID(id, txID, s.tx), tx) +} + +// UTXO attempts to load a utxo from storage. +func (s *prefixedState) UTXO(id ids.ID) (*UTXO, error) { + return s.state.UTXO(s.uniqueID(id, utxoID, s.utxo)) +} + +// SetUTXO saves the provided utxo to storage. +func (s *prefixedState) SetUTXO(id ids.ID, utxo *UTXO) error { + return s.state.SetUTXO(s.uniqueID(id, utxoID, s.utxo), utxo) +} + +// Status returns the status of the provided transaction id from storage. +func (s *prefixedState) Status(id ids.ID) (choices.Status, error) { + return s.state.Status(s.uniqueID(id, txStatusID, s.txStatus)) +} + +// SetStatus saves the provided status to storage. +func (s *prefixedState) SetStatus(id ids.ID, status choices.Status) error { + return s.state.SetStatus(s.uniqueID(id, txStatusID, s.txStatus), status) +} + +// DBInitialized returns the status of this database. If the database is +// uninitialized, the status will be unknown. +func (s *prefixedState) DBInitialized() (choices.Status, error) { return s.state.Status(dbInitialized) } + +// SetDBInitialized saves the provided status of the database. +func (s *prefixedState) SetDBInitialized(status choices.Status) error { + return s.state.SetStatus(dbInitialized, status) +} + +// Funds returns the mapping from the 32 byte representation of an address to a +// list of utxo IDs that reference the address. +func (s *prefixedState) Funds(id ids.ID) ([]ids.ID, error) { + return s.state.IDs(s.uniqueID(id, fundsID, s.funds)) +} + +// SetFunds saves the mapping from address to utxo IDs to storage. +func (s *prefixedState) SetFunds(id ids.ID, idSlice []ids.ID) error { + return s.state.SetIDs(s.uniqueID(id, fundsID, s.funds), idSlice) +} + +func (s *prefixedState) uniqueID(id ids.ID, prefix uint64, cacher cache.Cacher) ids.ID { + if cachedIDIntf, found := cacher.Get(id); found { + return cachedIDIntf.(ids.ID) + } + uID := id.Prefix(prefix) + cacher.Put(id, uID) + return uID +} + +// SpendUTXO consumes the provided utxo. +func (s *prefixedState) SpendUTXO(utxoID ids.ID) error { + utxo, err := s.UTXO(utxoID) + if err != nil { + return err + } + if err := s.SetUTXO(utxoID, nil); err != nil { + return err + } + + addressable, ok := utxo.Out.(FxAddressable) + if !ok { + return nil + } + + return s.removeUTXO(addressable.Addresses(), utxoID) +} + +func (s *prefixedState) removeUTXO(addrs [][]byte, utxoID ids.ID) error { + for _, addr := range addrs { + addrID := ids.NewID(hashing.ComputeHash256Array(addr)) + utxos := ids.Set{} + funds, _ := s.Funds(addrID) + utxos.Add(funds...) + utxos.Remove(utxoID) + if err := s.SetFunds(addrID, utxos.List()); err != nil { + return err + } + } + return nil +} + +// FundUTXO adds the provided utxo to the database +func (s *prefixedState) FundUTXO(utxo *UTXO) error { + utxoID := utxo.InputID() + if err := s.SetUTXO(utxoID, utxo); err != nil { + return err + } + + addressable, ok := utxo.Out.(FxAddressable) + if !ok { + return nil + } + + return s.addUTXO(addressable.Addresses(), utxoID) +} + +func (s *prefixedState) addUTXO(addrs [][]byte, utxoID ids.ID) error { + for _, addr := range addrs { + addrID := ids.NewID(hashing.ComputeHash256Array(addr)) + utxos := ids.Set{} + funds, _ := s.Funds(addrID) + utxos.Add(funds...) + utxos.Add(utxoID) + if err := s.SetFunds(addrID, utxos.List()); err != nil { + return err + } + } + return nil +} diff --git a/vms/avm/prefixed_state_test.go b/vms/avm/prefixed_state_test.go new file mode 100644 index 0000000..2b5d739 --- /dev/null +++ b/vms/avm/prefixed_state_test.go @@ -0,0 +1,179 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestPrefixedSetsAndGets(t *testing.T) { + vm := GenesisVM(t) + state := vm.state + + vm.codec.RegisterType(&testVerifiable{}) + + utxo := &UTXO{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + Asset: Asset{ID: ids.Empty}, + Out: &testVerifiable{}, + } + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: Asset{ + ID: asset, + }, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + unsignedBytes, err := vm.codec.Marshal(tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + if err := state.SetUTXO(ids.Empty, utxo); err != nil { + t.Fatal(err) + } + if err := state.SetTx(ids.Empty, tx); err != nil { + t.Fatal(err) + } + if err := state.SetStatus(ids.Empty, choices.Accepted); err != nil { + t.Fatal(err) + } + + resultUTXO, err := state.UTXO(ids.Empty) + if err != nil { + t.Fatal(err) + } + resultTx, err := state.Tx(ids.Empty) + if err != nil { + t.Fatal(err) + } + resultStatus, err := state.Status(ids.Empty) + if err != nil { + t.Fatal(err) + } + + if resultUTXO.OutputIndex != 1 { + t.Fatalf("Wrong UTXO returned") + } + if !resultTx.ID().Equals(tx.ID()) { + t.Fatalf("Wrong Tx returned") + } + if resultStatus != choices.Accepted { + t.Fatalf("Wrong Status returned") + } +} + +func TestPrefixedFundingNoAddresses(t *testing.T) { + vm := GenesisVM(t) + state := vm.state + + vm.codec.RegisterType(&testVerifiable{}) + + utxo := &UTXO{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + Asset: Asset{ID: ids.Empty}, + Out: &testVerifiable{}, + } + + if err := state.FundUTXO(utxo); err != nil { + t.Fatal(err) + } + if err := state.SpendUTXO(utxo.InputID()); err != nil { + t.Fatal(err) + } +} + +func TestPrefixedFundingAddresses(t *testing.T) { + vm := GenesisVM(t) + state := vm.state + + vm.codec.RegisterType(&testAddressable{}) + + utxo := &UTXO{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + Asset: Asset{ID: ids.Empty}, + Out: &testAddressable{ + Addrs: [][]byte{ + []byte{0}, + }, + }, + } + + if err := state.FundUTXO(utxo); err != nil { + t.Fatal(err) + } + funds, err := state.Funds(ids.NewID(hashing.ComputeHash256Array([]byte{0}))) + if err != nil { + t.Fatal(err) + } + if len(funds) != 1 { + t.Fatalf("Should have returned 1 utxoIDs") + } + if utxoID := funds[0]; !utxoID.Equals(utxo.InputID()) { + t.Fatalf("Returned wrong utxoID") + } + if err := state.SpendUTXO(utxo.InputID()); err != nil { + t.Fatal(err) + } + _, err = state.Funds(ids.NewID(hashing.ComputeHash256Array([]byte{0}))) + if err == nil { + t.Fatalf("Should have returned no utxoIDs") + } +} diff --git a/vms/avm/service.go b/vms/avm/service.go new file mode 100644 index 0000000..36408be --- /dev/null +++ b/vms/avm/service.go @@ -0,0 +1,991 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "errors" + "fmt" + "net/http" + "sort" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/json" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errUnknownAssetID = errors.New("unknown asset ID") + errTxNotCreateAsset = errors.New("transaction doesn't create an asset") + errNoHolders = errors.New("initialHolders must not be empty") + errNoMinters = errors.New("no minters provided") + errInvalidAmount = errors.New("amount must be positive") + errSpendOverflow = errors.New("spent amount overflows uint64") + errInvalidMintAmount = errors.New("amount minted must be positive") + errAddressesCantMintAsset = errors.New("provided addresses don't have the authority to mint the provided asset") + errCanOnlySignSingleInputTxs = errors.New("can only sign transactions with one input") + errUnknownUTXO = errors.New("unknown utxo") + errInvalidUTXO = errors.New("invalid utxo") + errUnknownOutputType = errors.New("unknown output type") + errUnneededAddress = errors.New("address not required to sign") + errUnknownCredentialType = errors.New("unknown credential type") +) + +// Service defines the base service for the asset vm +type Service struct{ vm *VM } + +// IssueTxArgs are arguments for passing into IssueTx requests +type IssueTxArgs struct { + Tx formatting.CB58 `json:"tx"` +} + +// IssueTxReply defines the IssueTx replies returned from the API +type IssueTxReply struct { + TxID ids.ID `json:"txID"` +} + +// IssueTx attempts to issue a transaction into consensus +func (service *Service) IssueTx(r *http.Request, args *IssueTxArgs, reply *IssueTxReply) error { + service.vm.ctx.Log.Verbo("IssueTx called with %s", args.Tx) + + txID, err := service.vm.IssueTx(args.Tx.Bytes) + if err != nil { + return err + } + + reply.TxID = txID + return nil +} + +// GetTxStatusArgs are arguments for passing into GetTxStatus requests +type GetTxStatusArgs struct { + TxID ids.ID `json:"txID"` +} + +// GetTxStatusReply defines the GetTxStatus replies returned from the API +type GetTxStatusReply struct { + Status choices.Status `json:"status"` +} + +// GetTxStatus returns the status of the specified transaction +func (service *Service) GetTxStatus(r *http.Request, args *GetTxStatusArgs, reply *GetTxStatusReply) error { + service.vm.ctx.Log.Verbo("GetTxStatus called with %s", args.TxID) + + if args.TxID.IsZero() { + return errNilTxID + } + + tx := UniqueTx{ + vm: service.vm, + txID: args.TxID, + } + + reply.Status = tx.Status() + return nil +} + +// GetUTXOsArgs are arguments for passing into GetUTXOs requests +type GetUTXOsArgs struct { + Addresses []string `json:"addresses"` +} + +// GetUTXOsReply defines the GetUTXOs replies returned from the API +type GetUTXOsReply struct { + UTXOs []formatting.CB58 `json:"utxos"` +} + +// GetUTXOs creates an empty account with the name passed in +func (service *Service) GetUTXOs(r *http.Request, args *GetUTXOsArgs, reply *GetUTXOsReply) error { + service.vm.ctx.Log.Verbo("GetUTXOs called with %s", args.Addresses) + + addrSet := ids.Set{} + for _, addr := range args.Addresses { + addrBytes, err := service.vm.Parse(addr) + if err != nil { + return err + } + addrSet.Add(ids.NewID(hashing.ComputeHash256Array(addrBytes))) + } + + utxos, err := service.vm.GetUTXOs(addrSet) + if err != nil { + return err + } + + reply.UTXOs = []formatting.CB58{} + for _, utxo := range utxos { + b, err := service.vm.codec.Marshal(utxo) + if err != nil { + return err + } + reply.UTXOs = append(reply.UTXOs, formatting.CB58{Bytes: b}) + } + return nil +} + +// GetAssetDescriptionArgs are arguments for passing into GetAssetDescription requests +type GetAssetDescriptionArgs struct { + AssetID string `json:"assetID"` +} + +// GetAssetDescriptionReply defines the GetAssetDescription replies returned from the API +type GetAssetDescriptionReply struct { + AssetID ids.ID `json:"assetID"` + Name string `json:"name"` + Symbol string `json:"symbol"` + Denomination json.Uint8 `json:"denomination"` +} + +// GetAssetDescription creates an empty account with the name passed in +func (service *Service) GetAssetDescription(_ *http.Request, args *GetAssetDescriptionArgs, reply *GetAssetDescriptionReply) error { + service.vm.ctx.Log.Verbo("GetAssetDescription called with %s", args.AssetID) + + assetID, err := service.vm.Lookup(args.AssetID) + if err != nil { + assetID, err = ids.FromString(args.AssetID) + if err != nil { + return err + } + } + + tx := &UniqueTx{ + vm: service.vm, + txID: assetID, + } + if status := tx.Status(); !status.Fetched() { + return errUnknownAssetID + } + createAssetTx, ok := tx.t.tx.UnsignedTx.(*CreateAssetTx) + if !ok { + return errTxNotCreateAsset + } + + reply.AssetID = assetID + reply.Name = createAssetTx.Name + reply.Symbol = createAssetTx.Symbol + reply.Denomination = json.Uint8(createAssetTx.Denomination) + + return nil +} + +// GetBalanceArgs are arguments for passing into GetBalance requests +type GetBalanceArgs struct { + Address string `json:"address"` + AssetID string `json:"assetID"` +} + +// GetBalanceReply defines the GetBalance replies returned from the API +type GetBalanceReply struct { + Balance json.Uint64 `json:"balance"` +} + +// GetBalance returns the amount of an asset that an address at least partially owns +func (service *Service) GetBalance(r *http.Request, args *GetBalanceArgs, reply *GetBalanceReply) error { + service.vm.ctx.Log.Verbo("GetBalance called with address: %s assetID: %s", args.Address, args.AssetID) + + address, err := service.vm.Parse(args.Address) + if err != nil { + return err + } + + assetID, err := service.vm.Lookup(args.AssetID) + if err != nil { + assetID, err = ids.FromString(args.AssetID) + if err != nil { + return err + } + } + + addrSet := ids.Set{} + addrSet.Add(ids.NewID(hashing.ComputeHash256Array(address))) + + utxos, err := service.vm.GetUTXOs(addrSet) + if err != nil { + return err + } + + for _, utxo := range utxos { + if utxo.AssetID().Equals(assetID) { + transferable, ok := utxo.Out.(FxTransferable) + if !ok { + continue + } + amt, err := math.Add64(transferable.Amount(), uint64(reply.Balance)) + if err != nil { + return err + } + reply.Balance = json.Uint64(amt) + } + } + return nil +} + +// CreateFixedCapAssetArgs are arguments for passing into CreateFixedCapAsset requests +type CreateFixedCapAssetArgs struct { + Username string `json:"username"` + Password string `json:"password"` + Name string `json:"name"` + Symbol string `json:"symbol"` + Denomination byte `json:"denomination"` + InitialHolders []*Holder `json:"initialHolders"` +} + +// Holder describes how much an address owns of an asset +type Holder struct { + Amount json.Uint64 `json:"amount"` + Address string `json:"address"` +} + +// CreateFixedCapAssetReply defines the CreateFixedCapAsset replies returned from the API +type CreateFixedCapAssetReply struct { + AssetID ids.ID `json:"assetID"` +} + +// CreateFixedCapAsset returns ID of the newly created asset +func (service *Service) CreateFixedCapAsset(r *http.Request, args *CreateFixedCapAssetArgs, reply *CreateFixedCapAssetReply) error { + service.vm.ctx.Log.Verbo("CreateFixedCapAsset called with name: %s symbol: %s number of holders: %d", + args.Name, + args.Symbol, + len(args.InitialHolders), + ) + + if len(args.InitialHolders) == 0 { + return errNoHolders + } + + initialState := &InitialState{ + FxID: 0, // TODO: Should lookup secp256k1fx FxID + Outs: []verify.Verifiable{}, + } + + tx := &Tx{UnsignedTx: &CreateAssetTx{ + BaseTx: BaseTx{ + NetID: service.vm.ctx.NetworkID, + BCID: service.vm.ctx.ChainID, + }, + Name: args.Name, + Symbol: args.Symbol, + Denomination: args.Denomination, + States: []*InitialState{ + initialState, + }, + }} + + for _, holder := range args.InitialHolders { + address, err := service.vm.Parse(holder.Address) + if err != nil { + return err + } + addr, err := ids.ToShortID(address) + if err != nil { + return err + } + initialState.Outs = append(initialState.Outs, &secp256k1fx.TransferOutput{ + Amt: uint64(holder.Amount), + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }) + } + initialState.Sort(service.vm.codec) + + b, err := service.vm.codec.Marshal(tx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + assetID, err := service.vm.IssueTx(b) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.AssetID = assetID + + return nil +} + +// CreateVariableCapAssetArgs are arguments for passing into CreateVariableCapAsset requests +type CreateVariableCapAssetArgs struct { + Username string `json:"username"` + Password string `json:"password"` + Name string `json:"name"` + Symbol string `json:"symbol"` + Denomination byte `json:"denomination"` + MinterSets []Owners `json:"minterSets"` +} + +// Owners describes who can perform an action +type Owners struct { + Threshold json.Uint32 `json:"threshold"` + Minters []string `json:"minters"` +} + +// CreateVariableCapAssetReply defines the CreateVariableCapAsset replies returned from the API +type CreateVariableCapAssetReply struct { + AssetID ids.ID `json:"assetID"` +} + +// CreateVariableCapAsset returns ID of the newly created asset +func (service *Service) CreateVariableCapAsset(r *http.Request, args *CreateVariableCapAssetArgs, reply *CreateVariableCapAssetReply) error { + service.vm.ctx.Log.Verbo("CreateFixedCapAsset called with name: %s symbol: %s number of minters: %d", + args.Name, + args.Symbol, + len(args.MinterSets), + ) + + if len(args.MinterSets) == 0 { + return errNoMinters + } + + initialState := &InitialState{ + FxID: 0, // TODO: Should lookup secp256k1fx FxID + Outs: []verify.Verifiable{}, + } + + tx := &Tx{UnsignedTx: &CreateAssetTx{ + BaseTx: BaseTx{ + NetID: service.vm.ctx.NetworkID, + BCID: service.vm.ctx.ChainID, + }, + Name: args.Name, + Symbol: args.Symbol, + Denomination: args.Denomination, + States: []*InitialState{ + initialState, + }, + }} + + for _, owner := range args.MinterSets { + minter := &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: uint32(owner.Threshold), + }, + } + for _, address := range owner.Minters { + addrBytes, err := service.vm.Parse(address) + if err != nil { + return err + } + addr, err := ids.ToShortID(addrBytes) + if err != nil { + return err + } + minter.Addrs = append(minter.Addrs, addr) + } + ids.SortShortIDs(minter.Addrs) + initialState.Outs = append(initialState.Outs, minter) + } + initialState.Sort(service.vm.codec) + + b, err := service.vm.codec.Marshal(tx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + assetID, err := service.vm.IssueTx(b) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.AssetID = assetID + + return nil +} + +// CreateAddressArgs are arguments for calling CreateAddress +type CreateAddressArgs struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// CreateAddressReply define the reply from a CreateAddress call +type CreateAddressReply struct { + Address string `json:"address"` +} + +// CreateAddress creates an address for the user [args.Username] +func (service *Service) CreateAddress(r *http.Request, args *CreateAddressArgs, reply *CreateAddressReply) error { + service.vm.ctx.Log.Verbo("CreateAddress called for user '%s'", args.Username) + + db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return fmt.Errorf("problem retrieving user: %w", err) + } + + user := userState{vm: service.vm} + + factory := crypto.FactorySECP256K1R{} + skIntf, err := factory.NewPrivateKey() + if err != nil { + return fmt.Errorf("problem generating private key: %w", err) + } + sk := skIntf.(*crypto.PrivateKeySECP256K1R) + + if err := user.SetKey(db, sk); err != nil { + return fmt.Errorf("problem saving private key: %w", err) + } + + addresses, _ := user.Addresses(db) + addresses = append(addresses, ids.NewID(hashing.ComputeHash256Array(sk.PublicKey().Address().Bytes()))) + + if err := user.SetAddresses(db, addresses); err != nil { + return fmt.Errorf("problem saving address: %w", err) + } + + reply.Address = service.vm.Format(sk.PublicKey().Address().Bytes()) + return nil +} + +// ExportKeyArgs are arguments for ExportKey +type ExportKeyArgs struct { + Username string `json:"username"` + Password string `json:"password"` + Address string `json:"address"` +} + +// ExportKeyReply is the response for ExportKey +type ExportKeyReply struct { + // The decrypted PrivateKey for the Address provided in the arguments + PrivateKey formatting.CB58 `json:"privateKey"` +} + +// ExportKey returns a private key from the provided user +func (service *Service) ExportKey(r *http.Request, args *ExportKeyArgs, reply *ExportKeyReply) error { + service.vm.ctx.Log.Verbo("ExportKey called for user '%s'", args.Username) + + address, err := service.vm.Parse(args.Address) + if err != nil { + return fmt.Errorf("problem parsing address: %w", err) + } + + db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return fmt.Errorf("problem retrieving user: %w", err) + } + + user := userState{vm: service.vm} + + sk, err := user.Key(db, ids.NewID(hashing.ComputeHash256Array(address))) + if err != nil { + return fmt.Errorf("problem retrieving private key: %w", err) + } + + reply.PrivateKey.Bytes = sk.Bytes() + return nil +} + +// ImportKeyArgs are arguments for ImportKey +type ImportKeyArgs struct { + Username string `json:"username"` + Password string `json:"password"` + PrivateKey formatting.CB58 `json:"privateKey"` +} + +// ImportKeyReply is the response for ImportKey +type ImportKeyReply struct { + // The address controlled by the PrivateKey provided in the arguments + Address string `json:"address"` +} + +// ImportKey adds a private key to the provided user +func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *ImportKeyReply) error { + service.vm.ctx.Log.Verbo("ImportKey called for user '%s'", args.Username) + + db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return fmt.Errorf("problem retrieving data: %w", err) + } + + user := userState{vm: service.vm} + + factory := crypto.FactorySECP256K1R{} + skIntf, err := factory.ToPrivateKey(args.PrivateKey.Bytes) + if err != nil { + return fmt.Errorf("problem parsing private key %s: %w", args.PrivateKey, err) + } + sk := skIntf.(*crypto.PrivateKeySECP256K1R) + + if err := user.SetKey(db, sk); err != nil { + return fmt.Errorf("problem saving key %w", err) + } + + addresses, _ := user.Addresses(db) + addresses = append(addresses, ids.NewID(hashing.ComputeHash256Array(sk.PublicKey().Address().Bytes()))) + + if err := user.SetAddresses(db, addresses); err != nil { + return fmt.Errorf("problem saving addresses: %w", err) + } + + reply.Address = service.vm.Format(sk.PublicKey().Address().Bytes()) + return nil +} + +// SendArgs are arguments for passing into Send requests +type SendArgs struct { + Username string `json:"username"` + Password string `json:"password"` + Amount json.Uint64 `json:"amount"` + AssetID string `json:"assetID"` + To string `json:"to"` +} + +// SendReply defines the Send replies returned from the API +type SendReply struct { + TxID ids.ID `json:"txID"` +} + +// Send returns the ID of the newly created transaction +func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) error { + service.vm.ctx.Log.Verbo("Send called with username: %s", args.Username) + + if args.Amount == 0 { + return errInvalidAmount + } + + assetID, err := service.vm.Lookup(args.AssetID) + if err != nil { + assetID, err = ids.FromString(args.AssetID) + if err != nil { + return fmt.Errorf("asset '%s' not found", args.AssetID) + } + } + + toBytes, err := service.vm.Parse(args.To) + if err != nil { + return fmt.Errorf("problem parsing to address: %w", err) + } + to, err := ids.ToShortID(toBytes) + if err != nil { + return fmt.Errorf("problem parsing to address: %w", err) + } + + db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return fmt.Errorf("problem retrieving user: %w", err) + } + + user := userState{vm: service.vm} + + addresses, _ := user.Addresses(db) + + addrs := ids.Set{} + addrs.Add(addresses...) + utxos, err := service.vm.GetUTXOs(addrs) + if err != nil { + return fmt.Errorf("problem retrieving user's UTXOs: %w", err) + } + + kc := secp256k1fx.NewKeychain() + for _, addr := range addresses { + sk, err := user.Key(db, addr) + if err != nil { + return fmt.Errorf("problem retrieving private key: %w", err) + } + kc.Add(sk) + } + + amountSpent := uint64(0) + time := service.vm.clock.Unix() + + ins := []*TransferableInput{} + keys := [][]*crypto.PrivateKeySECP256K1R{} + for _, utxo := range utxos { + if !utxo.AssetID().Equals(assetID) { + continue + } + inputIntf, signers, err := kc.Spend(utxo.Out, time) + if err != nil { + continue + } + input, ok := inputIntf.(FxTransferable) + if !ok { + continue + } + spent, err := math.Add64(amountSpent, input.Amount()) + if err != nil { + return errSpendOverflow + } + amountSpent = spent + + in := &TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: Asset{ID: assetID}, + In: input, + } + + ins = append(ins, in) + keys = append(keys, signers) + + if amountSpent >= uint64(args.Amount) { + break + } + } + + if amountSpent < uint64(args.Amount) { + return errInsufficientFunds + } + + sortTransferableInputsWithSigners(ins, keys) + + outs := []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ + ID: assetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: uint64(args.Amount), + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, + }, + }, + }, + } + + if amountSpent > uint64(args.Amount) { + changeAddr := kc.Keys[0].PublicKey().Address() + outs = append(outs, + &TransferableOutput{ + Asset: Asset{ + ID: assetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: amountSpent - uint64(args.Amount), + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }, + }, + }, + ) + } + + sortTransferableOutputs(outs, service.vm.codec) + + tx := Tx{ + UnsignedTx: &BaseTx{ + NetID: service.vm.ctx.NetworkID, + BCID: service.vm.ctx.ChainID, + Outs: outs, + Ins: ins, + }, + } + + unsignedBytes, err := service.vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + hash := hashing.ComputeHash256(unsignedBytes) + + for _, credKeys := range keys { + cred := &secp256k1fx.Credential{} + for _, key := range credKeys { + sig, err := key.SignHash(hash) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + cred.Sigs = append(cred.Sigs, fixedSig) + } + tx.Creds = append(tx.Creds, &Credential{Cred: cred}) + } + + b, err := service.vm.codec.Marshal(tx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + txID, err := service.vm.IssueTx(b) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + return nil +} + +type innerSortTransferableInputsWithSigners struct { + ins []*TransferableInput + signers [][]*crypto.PrivateKeySECP256K1R +} + +func (ins *innerSortTransferableInputsWithSigners) Less(i, j int) bool { + iID, iIndex := ins.ins[i].InputSource() + jID, jIndex := ins.ins[j].InputSource() + + switch bytes.Compare(iID.Bytes(), jID.Bytes()) { + case -1: + return true + case 0: + return iIndex < jIndex + default: + return false + } +} +func (ins *innerSortTransferableInputsWithSigners) Len() int { return len(ins.ins) } +func (ins *innerSortTransferableInputsWithSigners) Swap(i, j int) { + ins.ins[j], ins.ins[i] = ins.ins[i], ins.ins[j] + ins.signers[j], ins.signers[i] = ins.signers[i], ins.signers[j] +} + +func sortTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*crypto.PrivateKeySECP256K1R) { + sort.Sort(&innerSortTransferableInputsWithSigners{ins: ins, signers: signers}) +} +func isSortedAndUniqueTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*crypto.PrivateKeySECP256K1R) bool { + return utils.IsSortedAndUnique(&innerSortTransferableInputsWithSigners{ins: ins, signers: signers}) +} + +// CreateMintTxArgs are arguments for passing into CreateMintTx requests +type CreateMintTxArgs struct { + Amount json.Uint64 `json:"amount"` + AssetID string `json:"assetID"` + To string `json:"to"` + Minters []string `json:"minters"` +} + +// CreateMintTxReply defines the CreateMintTx replies returned from the API +type CreateMintTxReply struct { + Tx formatting.CB58 `json:"tx"` +} + +// CreateMintTx returns the newly created unsigned transaction +func (service *Service) CreateMintTx(r *http.Request, args *CreateMintTxArgs, reply *CreateMintTxReply) error { + service.vm.ctx.Log.Verbo("CreateMintTx called") + + if args.Amount == 0 { + return errInvalidMintAmount + } + + assetID, err := service.vm.Lookup(args.AssetID) + if err != nil { + assetID, err = ids.FromString(args.AssetID) + if err != nil { + return fmt.Errorf("asset '%s' not found", args.AssetID) + } + } + + toBytes, err := service.vm.Parse(args.To) + if err != nil { + return fmt.Errorf("problem parsing to address '%s': %w", args.To, err) + } + to, err := ids.ToShortID(toBytes) + if err != nil { + return fmt.Errorf("problem parsing to address '%s': %w", args.To, err) + } + + addrs := ids.Set{} + minters := ids.ShortSet{} + for _, minter := range args.Minters { + addrBytes, err := service.vm.Parse(minter) + if err != nil { + return fmt.Errorf("problem parsing minter address '%s': %w", minter, err) + } + addr, err := ids.ToShortID(addrBytes) + if err != nil { + return fmt.Errorf("problem parsing minter address '%s': %w", minter, err) + } + addrs.Add(ids.NewID(hashing.ComputeHash256Array(addrBytes))) + minters.Add(addr) + } + + utxos, err := service.vm.GetUTXOs(addrs) + if err != nil { + return fmt.Errorf("problem getting user's UTXOs: %w", err) + } + + for _, utxo := range utxos { + switch out := utxo.Out.(type) { + case *secp256k1fx.MintOutput: + if !utxo.AssetID().Equals(assetID) { + continue + } + sigs := []uint32{} + for i := uint32(0); i < uint32(len(out.Addrs)) && uint32(len(sigs)) < out.Threshold; i++ { + if minters.Contains(out.Addrs[i]) { + sigs = append(sigs, i) + } + } + + if uint32(len(sigs)) != out.Threshold { + continue + } + + tx := Tx{ + UnsignedTx: &OperationTx{ + BaseTx: BaseTx{ + NetID: service.vm.ctx.NetworkID, + BCID: service.vm.ctx.ChainID, + }, + Ops: []*Operation{ + &Operation{ + Asset: Asset{ + ID: assetID, + }, + Ins: []*OperableInput{ + &OperableInput{ + UTXOID: utxo.UTXOID, + In: &secp256k1fx.MintInput{ + Input: secp256k1fx.Input{ + SigIndices: sigs, + }, + }, + }, + }, + Outs: []*OperableOutput{ + &OperableOutput{ + &secp256k1fx.MintOutput{ + OutputOwners: out.OutputOwners, + }, + }, + &OperableOutput{ + &secp256k1fx.TransferOutput{ + Amt: uint64(args.Amount), + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, + }, + }, + }, + }, + }, + }, + }, + } + + txBytes, err := service.vm.codec.Marshal(&tx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + reply.Tx.Bytes = txBytes + return nil + } + } + + return errAddressesCantMintAsset +} + +// SignMintTxArgs are arguments for passing into SignMintTx requests +type SignMintTxArgs struct { + Username string `json:"username"` + Password string `json:"password"` + Minter string `json:"minter"` + Tx formatting.CB58 `json:"tx"` +} + +// SignMintTxReply defines the SignMintTx replies returned from the API +type SignMintTxReply struct { + Tx formatting.CB58 `json:"tx"` +} + +// SignMintTx returns the newly signed transaction +func (service *Service) SignMintTx(r *http.Request, args *SignMintTxArgs, reply *SignMintTxReply) error { + service.vm.ctx.Log.Verbo("SignMintTx called") + + minter, err := service.vm.Parse(args.Minter) + if err != nil { + return fmt.Errorf("problem parsing address '%s': %w", args.Minter, err) + } + + db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return fmt.Errorf("problem retrieving user: %w", err) + } + + user := userState{vm: service.vm} + + addr := ids.NewID(hashing.ComputeHash256Array(minter)) + sk, err := user.Key(db, addr) + if err != nil { + return fmt.Errorf("problem retriving private key: %w", err) + } + + tx := Tx{} + if err := service.vm.codec.Unmarshal(args.Tx.Bytes, &tx); err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + inputUTXOs := tx.InputUTXOs() + if len(inputUTXOs) != 1 { + return errCanOnlySignSingleInputTxs + } + inputUTXO := inputUTXOs[0] + + inputTxID, utxoIndex := inputUTXO.InputSource() + utx := UniqueTx{ + vm: service.vm, + txID: inputTxID, + } + if !utx.Status().Fetched() { + return errUnknownUTXO + } + utxos := utx.UTXOs() + if uint32(len(utxos)) <= utxoIndex { + return errInvalidUTXO + } + + utxo := utxos[int(utxoIndex)] + + i := -1 + size := 0 + switch out := utxo.Out.(type) { + case *secp256k1fx.MintOutput: + size = int(out.Threshold) + for j, addr := range out.Addrs { + if bytes.Equal(addr.Bytes(), minter) { + i = j + break + } + } + default: + return errUnknownOutputType + } + if i == -1 { + return errUnneededAddress + + } + + if len(tx.Creds) == 0 { + tx.Creds = append(tx.Creds, &Credential{Cred: &secp256k1fx.Credential{}}) + } + + cred := tx.Creds[0] + switch cred := cred.Cred.(type) { + case *secp256k1fx.Credential: + if len(cred.Sigs) != size { + cred.Sigs = make([][crypto.SECP256K1RSigLen]byte, size) + } + + unsignedBytes, err := service.vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + sig, err := sk.Sign(unsignedBytes) + if err != nil { + return fmt.Errorf("problem signing transaction: %w", err) + } + copy(cred.Sigs[i][:], sig) + default: + return errUnknownCredentialType + } + + txBytes, err := service.vm.codec.Marshal(&tx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + reply.Tx.Bytes = txBytes + return nil +} diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go new file mode 100644 index 0000000..16be290 --- /dev/null +++ b/vms/avm/service_test.go @@ -0,0 +1,188 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestGetAssetDescription(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + make(chan common.Message, 1), + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + defer vm.Shutdown() + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + avaAssetID := genesisTx.ID() + + s := Service{vm: vm} + + reply := GetAssetDescriptionReply{} + err = s.GetAssetDescription(nil, &GetAssetDescriptionArgs{ + AssetID: avaAssetID.String(), + }, &reply) + if err != nil { + t.Fatal(err) + } + + if reply.Name != "myFixedCapAsset" { + t.Fatalf("Wrong name returned from GetAssetDescription %s", reply.Name) + } + if reply.Symbol != "MFCA" { + t.Fatalf("Wrong name returned from GetAssetDescription %s", reply.Symbol) + } +} + +func TestGetBalance(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + make(chan common.Message, 1), + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + defer vm.Shutdown() + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + avaAssetID := genesisTx.ID() + + s := Service{vm: vm} + + reply := GetBalanceReply{} + err = s.GetBalance(nil, &GetBalanceArgs{ + Address: vm.Format(keys[0].PublicKey().Address().Bytes()), + AssetID: avaAssetID.String(), + }, &reply) + if err != nil { + t.Fatal(err) + } + + if reply.Balance != 300000 { + t.Fatalf("Wrong balance returned from GetBalance %d", reply.Balance) + } +} + +func TestCreateFixedCapAsset(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + make(chan common.Message, 1), + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + defer vm.Shutdown() + + s := Service{vm: vm} + + reply := CreateFixedCapAssetReply{} + err = s.CreateFixedCapAsset(nil, &CreateFixedCapAssetArgs{ + Name: "test asset", + Symbol: "test", + Denomination: 1, + InitialHolders: []*Holder{&Holder{ + Amount: 123456789, + Address: vm.Format(keys[0].PublicKey().Address().Bytes()), + }}, + }, &reply) + if err != nil { + t.Fatal(err) + } + + if reply.AssetID.String() != "27ySRc5CE4obYwkS6kyvj5S8eGxGkr994157Hdo82mKVHTWpUT" { + t.Fatalf("Wrong assetID returned from CreateFixedCapAsset %s", reply.AssetID) + } +} + +func TestCreateVariableCapAsset(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + make(chan common.Message, 1), + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + defer vm.Shutdown() + + s := Service{vm: vm} + + reply := CreateVariableCapAssetReply{} + err = s.CreateVariableCapAsset(nil, &CreateVariableCapAssetArgs{ + Name: "test asset", + Symbol: "test", + MinterSets: []Owners{ + Owners{ + Threshold: 1, + Minters: []string{ + vm.Format(keys[0].PublicKey().Address().Bytes()), + }, + }, + }, + }, &reply) + if err != nil { + t.Fatal(err) + } + + if reply.AssetID.String() != "2vnRkWvRN3G9JJ7pixBmNdq4pfwRFkpew4kccf27WokYLH9VYY" { + t.Fatalf("Wrong assetID returned from CreateFixedCapAsset %s", reply.AssetID) + } +} diff --git a/vms/avm/state.go b/vms/avm/state.go new file mode 100644 index 0000000..b9d045f --- /dev/null +++ b/vms/avm/state.go @@ -0,0 +1,176 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" +) + +var ( + errCacheTypeMismatch = errors.New("type returned from cache doesn't match the expected type") +) + +// state is a thin wrapper around a database to provide, caching, serialization, +// and de-serialization. +type state struct { + c cache.Cacher + vm *VM +} + +// Tx attempts to load a transaction from storage. +func (s *state) Tx(id ids.ID) (*Tx, error) { + if txIntf, found := s.c.Get(id); found { + if tx, ok := txIntf.(*Tx); ok { + return tx, nil + } + return nil, errCacheTypeMismatch + } + + bytes, err := s.vm.db.Get(id.Bytes()) + if err != nil { + return nil, err + } + + // The key was in the database + tx := &Tx{} + if err := s.vm.codec.Unmarshal(bytes, tx); err != nil { + return nil, err + } + tx.Initialize(bytes) + + s.c.Put(id, tx) + return tx, nil +} + +// SetTx saves the provided transaction to storage. +func (s *state) SetTx(id ids.ID, tx *Tx) error { + if tx == nil { + s.c.Evict(id) + return s.vm.db.Delete(id.Bytes()) + } + + s.c.Put(id, tx) + return s.vm.db.Put(id.Bytes(), tx.Bytes()) +} + +// UTXO attempts to load a utxo from storage. +func (s *state) UTXO(id ids.ID) (*UTXO, error) { + if utxoIntf, found := s.c.Get(id); found { + if utxo, ok := utxoIntf.(*UTXO); ok { + return utxo, nil + } + return nil, errCacheTypeMismatch + } + + bytes, err := s.vm.db.Get(id.Bytes()) + if err != nil { + return nil, err + } + + // The key was in the database + utxo := &UTXO{} + if err := s.vm.codec.Unmarshal(bytes, utxo); err != nil { + return nil, err + } + + s.c.Put(id, utxo) + return utxo, nil +} + +// SetUTXO saves the provided utxo to storage. +func (s *state) SetUTXO(id ids.ID, utxo *UTXO) error { + if utxo == nil { + s.c.Evict(id) + return s.vm.db.Delete(id.Bytes()) + } + + bytes, err := s.vm.codec.Marshal(utxo) + if err != nil { + return err + } + + s.c.Put(id, utxo) + return s.vm.db.Put(id.Bytes(), bytes) +} + +// Status returns a status from storage. +func (s *state) Status(id ids.ID) (choices.Status, error) { + if statusIntf, found := s.c.Get(id); found { + if status, ok := statusIntf.(choices.Status); ok { + return status, nil + } + return choices.Unknown, errCacheTypeMismatch + } + + bytes, err := s.vm.db.Get(id.Bytes()) + if err != nil { + return choices.Unknown, err + } + + var status choices.Status + s.vm.codec.Unmarshal(bytes, &status) + + s.c.Put(id, status) + return status, nil +} + +// SetStatus saves a status in storage. +func (s *state) SetStatus(id ids.ID, status choices.Status) error { + if status == choices.Unknown { + s.c.Evict(id) + return s.vm.db.Delete(id.Bytes()) + } + + s.c.Put(id, status) + + bytes, err := s.vm.codec.Marshal(status) + if err != nil { + return err + } + return s.vm.db.Put(id.Bytes(), bytes) +} + +// IDs returns a slice of IDs from storage +func (s *state) IDs(id ids.ID) ([]ids.ID, error) { + if idsIntf, found := s.c.Get(id); found { + if idSlice, ok := idsIntf.([]ids.ID); ok { + return idSlice, nil + } + return nil, errCacheTypeMismatch + } + + bytes, err := s.vm.db.Get(id.Bytes()) + if err != nil { + return nil, err + } + + idSlice := []ids.ID(nil) + if err := s.vm.codec.Unmarshal(bytes, &idSlice); err != nil { + return nil, err + } + + s.c.Put(id, idSlice) + return idSlice, nil +} + +// SetIDs saves a slice of IDs to the database. +func (s *state) SetIDs(id ids.ID, idSlice []ids.ID) error { + if len(idSlice) == 0 { + s.c.Evict(id) + return s.vm.db.Delete(id.Bytes()) + } + + s.c.Put(id, idSlice) + + bytes, err := s.vm.codec.Marshal(idSlice) + if err != nil { + return err + } + + return s.vm.db.Put(id.Bytes(), bytes) +} diff --git a/vms/avm/state_test.go b/vms/avm/state_test.go new file mode 100644 index 0000000..0485a1e --- /dev/null +++ b/vms/avm/state_test.go @@ -0,0 +1,349 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestStateIDs(t *testing.T) { + vm := GenesisVM(t) + state := vm.state.state + + id0 := ids.NewID([32]byte{0xff, 0}) + id1 := ids.NewID([32]byte{0xff, 0}) + id2 := ids.NewID([32]byte{0xff, 0}) + + if _, err := state.IDs(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading ids") + } + + expected := []ids.ID{id0, id1} + if err := state.SetIDs(ids.Empty, expected); err != nil { + t.Fatal(err) + } + + result, err := state.IDs(ids.Empty) + if err != nil { + t.Fatal(err) + } + + if len(result) != len(expected) { + t.Fatalf("Returned the wrong number of ids") + } + + for i, resultID := range result { + expectedID := expected[i] + if !expectedID.Equals(resultID) { + t.Fatalf("Wrong ID returned") + } + } + + expected = []ids.ID{id1, id2} + if err := state.SetIDs(ids.Empty, expected); err != nil { + t.Fatal(err) + } + + result, err = state.IDs(ids.Empty) + if err != nil { + t.Fatal(err) + } + + if len(result) != len(expected) { + t.Fatalf("Returned the wrong number of ids") + } + + for i, resultID := range result { + expectedID := expected[i] + if !expectedID.Equals(resultID) { + t.Fatalf("Wrong ID returned") + } + } + + state.c.Flush() + + result, err = state.IDs(ids.Empty) + if err != nil { + t.Fatal(err) + } + + if len(result) != len(expected) { + t.Fatalf("Returned the wrong number of ids") + } + + for i, resultID := range result { + expectedID := expected[i] + if !expectedID.Equals(resultID) { + t.Fatalf("Wrong ID returned") + } + } + + if err := state.SetStatus(ids.Empty, choices.Accepted); err != nil { + t.Fatal(err) + } + + result, err = state.IDs(ids.Empty) + if err == nil { + t.Fatalf("Should have errored during cache lookup") + } + + state.c.Flush() + + result, err = state.IDs(ids.Empty) + if err == nil { + t.Fatalf("Should have errored during parsing") + } + + statusResult, err := state.Status(ids.Empty) + if err != nil { + t.Fatal(err) + } + if statusResult != choices.Accepted { + t.Fatalf("Should have returned the %s status", choices.Accepted) + } + + if err := state.SetIDs(ids.Empty, []ids.ID{ids.ID{}}); err == nil { + t.Fatalf("Should have errored during serialization") + } + + if err := state.SetIDs(ids.Empty, []ids.ID{}); err != nil { + t.Fatal(err) + } + + if _, err := state.IDs(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading ids") + } +} + +func TestStateStatuses(t *testing.T) { + vm := GenesisVM(t) + state := vm.state.state + + if _, err := state.Status(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading ids") + } + + if err := state.SetStatus(ids.Empty, choices.Accepted); err != nil { + t.Fatal(err) + } + + status, err := state.Status(ids.Empty) + if err != nil { + t.Fatal(err) + } + if status != choices.Accepted { + t.Fatalf("Should have returned the %s status", choices.Accepted) + } + + if err := state.SetIDs(ids.Empty, []ids.ID{ids.Empty}); err != nil { + t.Fatal(err) + } + if _, err := state.Status(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading ids") + } + + if err := state.SetStatus(ids.Empty, choices.Accepted); err != nil { + t.Fatal(err) + } + + status, err = state.Status(ids.Empty) + if err != nil { + t.Fatal(err) + } + if status != choices.Accepted { + t.Fatalf("Should have returned the %s status", choices.Accepted) + } + + if err := state.SetStatus(ids.Empty, choices.Unknown); err != nil { + t.Fatal(err) + } + + if _, err := state.Status(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading ids") + } +} + +func TestStateUTXOs(t *testing.T) { + vm := GenesisVM(t) + state := vm.state.state + + vm.codec.RegisterType(&testVerifiable{}) + + if _, err := state.UTXO(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading utxo") + } + + utxo := &UTXO{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + Asset: Asset{ID: ids.Empty}, + Out: &testVerifiable{}, + } + + if err := state.SetUTXO(ids.Empty, utxo); err != nil { + t.Fatal(err) + } + + result, err := state.UTXO(ids.Empty) + if err != nil { + t.Fatal(err) + } + + if result.OutputIndex != 1 { + t.Fatalf("Wrong UTXO returned") + } + + state.c.Flush() + + result, err = state.UTXO(ids.Empty) + if err != nil { + t.Fatal(err) + } + + if result.OutputIndex != 1 { + t.Fatalf("Wrong UTXO returned") + } + + if err := state.SetUTXO(ids.Empty, nil); err != nil { + t.Fatal(err) + } + + if _, err := state.UTXO(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading utxo") + } + + if err := state.SetUTXO(ids.Empty, &UTXO{}); err == nil { + t.Fatalf("Should have errored packing the utxo") + } + + if err := state.SetStatus(ids.Empty, choices.Accepted); err != nil { + t.Fatal(err) + } + + if _, err := state.UTXO(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading utxo") + } + + state.c.Flush() + + if _, err := state.UTXO(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading utxo") + } +} + +func TestStateTXs(t *testing.T) { + vm := GenesisVM(t) + state := vm.state.state + + vm.codec.RegisterType(&TestTransferable{}) + + if _, err := state.Tx(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading tx") + } + + tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: Asset{ + ID: asset, + }, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + unsignedBytes, err := vm.codec.Marshal(tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + if err := state.SetTx(ids.Empty, tx); err != nil { + t.Fatal(err) + } + + result, err := state.Tx(ids.Empty) + if err != nil { + t.Fatal(err) + } + + if !result.ID().Equals(tx.ID()) { + t.Fatalf("Wrong Tx returned") + } + + state.c.Flush() + + result, err = state.Tx(ids.Empty) + if err != nil { + t.Fatal(err) + } + + if !result.ID().Equals(tx.ID()) { + t.Fatalf("Wrong Tx returned") + } + + if err := state.SetTx(ids.Empty, nil); err != nil { + t.Fatal(err) + } + + if _, err := state.Tx(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading tx") + } + + if err := state.SetStatus(ids.Empty, choices.Accepted); err != nil { + t.Fatal(err) + } + + if _, err := state.Tx(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading tx") + } + + state.c.Flush() + + if _, err := state.Tx(ids.Empty); err == nil { + t.Fatalf("Should have errored when reading tx") + } +} diff --git a/vms/avm/static_service.go b/vms/avm/static_service.go new file mode 100644 index 0000000..47ef942 --- /dev/null +++ b/vms/avm/static_service.go @@ -0,0 +1,155 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "encoding/json" + "errors" + "net/http" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" + + cjson "github.com/ava-labs/gecko/utils/json" +) + +var ( + errUnknownAssetType = errors.New("unknown asset type") +) + +// StaticService defines the base service for the asset vm +type StaticService struct{} + +// BuildGenesisArgs are arguments for BuildGenesis +type BuildGenesisArgs struct { + GenesisData map[string]AssetDefinition `json:"genesisData"` +} + +// AssetDefinition ... +type AssetDefinition struct { + Name string `json:"name"` + Symbol string `json:"symbol"` + Denomination cjson.Uint8 `json:"denomination"` + InitialState map[string][]interface{} `json:"initialState"` +} + +// BuildGenesisReply is the reply from BuildGenesis +type BuildGenesisReply struct { + Bytes formatting.CB58 `json:"bytes"` +} + +// BuildGenesis returns the UTXOs such that at least one address in [args.Addresses] is +// referenced in the UTXO. +func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + g := Genesis{} + for assetAlias, assetDefinition := range args.GenesisData { + asset := GenesisAsset{ + Alias: assetAlias, + CreateAssetTx: CreateAssetTx{ + BaseTx: BaseTx{ + BCID: ids.Empty, + }, + Name: assetDefinition.Name, + Symbol: assetDefinition.Symbol, + Denomination: byte(assetDefinition.Denomination), + }, + } + for assetType, initialStates := range assetDefinition.InitialState { + switch assetType { + case "fixedCap": + initialState := &InitialState{ + FxID: 0, // TODO: Should lookup secp256k1fx FxID + } + for _, state := range initialStates { + b, err := json.Marshal(state) + if err != nil { + return err + } + holder := Holder{} + if err := json.Unmarshal(b, &holder); err != nil { + return err + } + cb58 := formatting.CB58{} + if err := cb58.FromString(holder.Address); err != nil { + return err + } + addr, err := ids.ToShortID(cb58.Bytes) + if err != nil { + return err + } + initialState.Outs = append(initialState.Outs, &secp256k1fx.TransferOutput{ + Amt: uint64(holder.Amount), + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }) + } + initialState.Sort(c) + asset.States = append(asset.States, initialState) + case "variableCap": + initialState := &InitialState{ + FxID: 0, // TODO: Should lookup secp256k1fx FxID + } + for _, state := range initialStates { + b, err := json.Marshal(state) + if err != nil { + return err + } + owners := Owners{} + if err := json.Unmarshal(b, &owners); err != nil { + return err + } + + out := &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + }, + } + for _, address := range owners.Minters { + cb58 := formatting.CB58{} + if err := cb58.FromString(address); err != nil { + return err + } + addr, err := ids.ToShortID(cb58.Bytes) + if err != nil { + return err + } + out.Addrs = append(out.Addrs, addr) + } + out.Sort() + + initialState.Outs = append(initialState.Outs, out) + } + initialState.Sort(c) + asset.States = append(asset.States, initialState) + default: + return errUnknownAssetType + } + } + asset.Sort() + g.Txs = append(g.Txs, &asset) + } + g.Sort() + + b, err := c.Marshal(&g) + if err != nil { + return err + } + + reply.Bytes.Bytes = b + return nil +} diff --git a/vms/avm/static_service_test.go b/vms/avm/static_service_test.go new file mode 100644 index 0000000..612132e --- /dev/null +++ b/vms/avm/static_service_test.go @@ -0,0 +1,98 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" + + "github.com/ava-labs/gecko/utils/formatting" +) + +func TestBuildGenesis(t *testing.T) { + ss := StaticService{} + + args := BuildGenesisArgs{GenesisData: map[string]AssetDefinition{ + "asset1": AssetDefinition{ + Name: "myFixedCapAsset", + Symbol: "MFCA", + Denomination: 8, + InitialState: map[string][]interface{}{ + "fixedCap": []interface{}{ + Holder{ + Amount: 100000, + Address: "A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy", + }, + Holder{ + Amount: 100000, + Address: "6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv", + }, + Holder{ + Amount: 50000, + Address: "6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa", + }, + Holder{ + Amount: 50000, + Address: "Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7", + }, + }, + }, + }, + "asset2": AssetDefinition{ + Name: "myVarCapAsset", + Symbol: "MVCA", + InitialState: map[string][]interface{}{ + "variableCap": []interface{}{ + Owners{ + Threshold: 1, + Minters: []string{ + "A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy", + "6mxBGnjGDCKgkVe7yfrmvMA7xE7qCv3vv", + }, + }, + Owners{ + Threshold: 2, + Minters: []string{ + "6ncQ19Q2U4MamkCYzshhD8XFjfwAWFzTa", + "Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7", + }, + }, + }, + }, + }, + "asset3": AssetDefinition{ + Name: "myOtherVarCapAsset", + InitialState: map[string][]interface{}{ + "variableCap": []interface{}{ + Owners{ + Threshold: 1, + Minters: []string{ + "A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy", + }, + }, + }, + }, + }, + }} + reply := BuildGenesisReply{} + err := ss.BuildGenesis(nil, &args, &reply) + if err != nil { + t.Fatal(err) + } + + expected := "1112YAVd1YsJ7JBDMQssciuuu9ySgebznWfmfT8JSw5vUKERtP4WGyitE7z38J8tExNmvK2kuwHsUP3erfcncXBWmJkdnd9nDJoj9tCiQHJmW1pstNQn3zXHdTnw6KJcG8Ro36ahknQkuy9ZSXgnZtpFhqUuwSd7mPj8vzZcqJMXLXorCBfvhwypTbZKogM9tUshyUfngfkg256ZsoU2ufMjhTG14PBBrgJkXD2F38uVSXWvYbubMVWDZbDnUzbyD3Azrs2Hydf8Paio6aNjwfwc1py61oXS5ehC55wiYbKpfzwE4px3bfYBu9yV6rvhivksB56vop9LEo8Pdo71tFAMkhR5toZmYcqRKyLXAnYqonUgmPsyxNwU22as8oscT5dj3Qxy1jsg6bEp6GwQepNqsWufGYx6Hiby2r5hyRZeYdk6xsXMPGBSBWUXhKX3ReTxBnjcrVE2Zc3G9eMvRho1tKzt7ppkutpcQemdDy2dxGryMqaFmPJaTaqcH2vB197KgVFbPgmHZY3ufUdfpVzzHax365pwCmzQD2PQh8hCqEP7rfV5e8uXKQiSynngoNDM4ak145zTpcUaX8htMGinfs45aKQvo5WHcD6ccRnHzc7dyXN8xJRnMznsuRN7D6k66DdbfDYhc2NbVUgXRAF4wSNTtsuZGxCGTEjQyYaoUoJowGXvnxmXAWHvLyMJswNizBeYgw1agRg5qB4AEKX96BFXhJq3MbsBRiypLR6nSuZgPFhCrLdBtstxEC2SPQNuUVWW9Qy68dDWQ3Fxx95n1pnjVru9wDJFoemg2imXRR" + + cb58 := formatting.CB58{} + if err := cb58.FromString(expected); err != nil { + t.Fatal(err) + } + expectedBytes := cb58.Bytes + + if result := reply.Bytes.String(); result != expected { + t.Fatalf("Create genesis returned unexpected bytes:\n\n%s\n\n%s\n\n%s", + reply.Bytes, + formatting.DumpBytes{Bytes: reply.Bytes.Bytes}, + formatting.DumpBytes{Bytes: expectedBytes}, + ) + } +} diff --git a/vms/avm/transferables.go b/vms/avm/transferables.go new file mode 100644 index 0000000..fc5536b --- /dev/null +++ b/vms/avm/transferables.go @@ -0,0 +1,129 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "errors" + "sort" + + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" +) + +var ( + errNilTransferableOutput = errors.New("nil transferable output is not valid") + errNilTransferableFxOutput = errors.New("nil transferable feature extension output is not valid") + + errNilTransferableInput = errors.New("nil transferable input is not valid") + errNilTransferableFxInput = errors.New("nil transferable feature extension input is not valid") +) + +// TransferableOutput ... +type TransferableOutput struct { + Asset `serialize:"true"` + + Out FxTransferable `serialize:"true"` +} + +// Output returns the feature extension output that this Output is using. +func (out *TransferableOutput) Output() FxTransferable { return out.Out } + +// Verify implements the verify.Verifiable interface +func (out *TransferableOutput) Verify() error { + switch { + case out == nil: + return errNilTransferableOutput + case out.Out == nil: + return errNilTransferableFxOutput + default: + return verify.All(&out.Asset, out.Out) + } +} + +type innerSortTransferableOutputs struct { + outs []*TransferableOutput + codec codec.Codec +} + +func (outs *innerSortTransferableOutputs) Less(i, j int) bool { + iOut := outs.outs[i] + jOut := outs.outs[j] + + iAssetID := iOut.AssetID() + jAssetID := jOut.AssetID() + + switch bytes.Compare(iAssetID.Bytes(), jAssetID.Bytes()) { + case -1: + return true + case 1: + return false + } + + iBytes, err := outs.codec.Marshal(&iOut.Out) + if err != nil { + return false + } + jBytes, err := outs.codec.Marshal(&jOut.Out) + if err != nil { + return false + } + return bytes.Compare(iBytes, jBytes) == -1 +} +func (outs *innerSortTransferableOutputs) Len() int { return len(outs.outs) } +func (outs *innerSortTransferableOutputs) Swap(i, j int) { o := outs.outs; o[j], o[i] = o[i], o[j] } + +func sortTransferableOutputs(outs []*TransferableOutput, c codec.Codec) { + sort.Sort(&innerSortTransferableOutputs{outs: outs, codec: c}) +} +func isSortedTransferableOutputs(outs []*TransferableOutput, c codec.Codec) bool { + return sort.IsSorted(&innerSortTransferableOutputs{outs: outs, codec: c}) +} + +// TransferableInput ... +type TransferableInput struct { + UTXOID `serialize:"true"` + Asset `serialize:"true"` + + In FxTransferable `serialize:"true"` +} + +// Input returns the feature extension input that this Input is using. +func (in *TransferableInput) Input() FxTransferable { return in.In } + +// Verify implements the verify.Verifiable interface +func (in *TransferableInput) Verify() error { + switch { + case in == nil: + return errNilTransferableInput + case in.In == nil: + return errNilTransferableFxInput + default: + return verify.All(&in.UTXOID, &in.Asset, in.In) + } +} + +type innerSortTransferableInputs []*TransferableInput + +func (ins innerSortTransferableInputs) Less(i, j int) bool { + iID, iIndex := ins[i].InputSource() + jID, jIndex := ins[j].InputSource() + + switch bytes.Compare(iID.Bytes(), jID.Bytes()) { + case -1: + return true + case 0: + return iIndex < jIndex + default: + return false + } +} +func (ins innerSortTransferableInputs) Len() int { return len(ins) } +func (ins innerSortTransferableInputs) Swap(i, j int) { ins[j], ins[i] = ins[i], ins[j] } + +func sortTransferableInputs(ins []*TransferableInput) { sort.Sort(innerSortTransferableInputs(ins)) } +func isSortedAndUniqueTransferableInputs(ins []*TransferableInput) bool { + return utils.IsSortedAndUnique(innerSortTransferableInputs(ins)) +} diff --git a/vms/avm/transferables_test.go b/vms/avm/transferables_test.go new file mode 100644 index 0000000..015ac5d --- /dev/null +++ b/vms/avm/transferables_test.go @@ -0,0 +1,327 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestTransferableOutputVerifyNil(t *testing.T) { + to := (*TransferableOutput)(nil) + if err := to.Verify(); err == nil { + t.Fatalf("Should have errored due to nil transferable output") + } +} + +func TestTransferableOutputVerifyNilFx(t *testing.T) { + to := &TransferableOutput{ + Asset: Asset{ + ID: ids.Empty, + }, + } + if err := to.Verify(); err == nil { + t.Fatalf("Should have errored due to nil transferable fx output") + } +} + +func TestTransferableOutputVerify(t *testing.T) { + to := &TransferableOutput{ + Asset: Asset{ + ID: ids.Empty, + }, + Out: &TestTransferable{ + Val: 1, + }, + } + if err := to.Verify(); err != nil { + t.Fatal(err) + } + if to.Output() != to.Out { + t.Fatalf("Should have returned the fx output") + } +} + +func TestTransferableOutputSorting(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&TestTransferable{}) + + outs := []*TransferableOutput{ + &TransferableOutput{ + Asset: Asset{ + ID: ids.NewID([32]byte{1}), + }, + Out: &TestTransferable{Val: 1}, + }, + &TransferableOutput{ + Asset: Asset{ + ID: ids.Empty, + }, + Out: &TestTransferable{Val: 1}, + }, + &TransferableOutput{ + Asset: Asset{ + ID: ids.NewID([32]byte{1}), + }, + Out: &TestTransferable{Val: 0}, + }, + &TransferableOutput{ + Asset: Asset{ + ID: ids.Empty, + }, + Out: &TestTransferable{Val: 0}, + }, + &TransferableOutput{ + Asset: Asset{ + ID: ids.Empty, + }, + Out: &TestTransferable{Val: 0}, + }, + } + + if isSortedTransferableOutputs(outs, c) { + t.Fatalf("Shouldn't be sorted") + } + sortTransferableOutputs(outs, c) + if !isSortedTransferableOutputs(outs, c) { + t.Fatalf("Should be sorted") + } + if result := outs[0].Out.(*TestTransferable).Val; result != 0 { + t.Fatalf("Val expected: %d ; result: %d", 0, result) + } + if result := outs[1].Out.(*TestTransferable).Val; result != 0 { + t.Fatalf("Val expected: %d ; result: %d", 0, result) + } + if result := outs[2].Out.(*TestTransferable).Val; result != 1 { + t.Fatalf("Val expected: %d ; result: %d", 0, result) + } + if result := outs[3].AssetID(); !result.Equals(ids.NewID([32]byte{1})) { + t.Fatalf("Val expected: %s ; result: %s", ids.NewID([32]byte{1}), result) + } + if result := outs[4].AssetID(); !result.Equals(ids.NewID([32]byte{1})) { + t.Fatalf("Val expected: %s ; result: %s", ids.NewID([32]byte{1}), result) + } +} + +func TestTransferableOutputSerialization(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&secp256k1fx.TransferOutput{}) + + expected := []byte{ + // assetID: + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + // output: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + + out := &TransferableOutput{ + Asset: Asset{ + ID: ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + Locktime: 54321, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID([20]byte{ + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, + }), + ids.NewShortID([20]byte{ + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + }), + }, + }, + }, + } + + outBytes, err := c.Marshal(out) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(outBytes, expected) { + t.Fatalf("Expected:\n%s\nResult:\n%s", + formatting.DumpBytes{Bytes: expected}, + formatting.DumpBytes{Bytes: outBytes}, + ) + } +} + +func TestTransferableInputVerifyNil(t *testing.T) { + ti := (*TransferableInput)(nil) + if err := ti.Verify(); err == nil { + t.Fatalf("Should have errored due to nil transferable input") + } +} + +func TestTransferableInputVerifyNilFx(t *testing.T) { + ti := &TransferableInput{ + UTXOID: UTXOID{TxID: ids.Empty}, + Asset: Asset{ID: ids.Empty}, + } + if err := ti.Verify(); err == nil { + t.Fatalf("Should have errored due to nil transferable fx input") + } +} + +func TestTransferableInputVerify(t *testing.T) { + ti := &TransferableInput{ + UTXOID: UTXOID{TxID: ids.Empty}, + Asset: Asset{ID: ids.Empty}, + In: &TestTransferable{}, + } + if err := ti.Verify(); err != nil { + t.Fatal(err) + } + if ti.Input() != ti.In { + t.Fatalf("Should have returned the fx input") + } +} + +func TestTransferableInputSorting(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&TestTransferable{}) + + ins := []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{1}), + OutputIndex: 1, + }, + Asset: Asset{ID: ids.Empty}, + In: &TestTransferable{}, + }, + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{1}), + OutputIndex: 0, + }, + Asset: Asset{ID: ids.Empty}, + In: &TestTransferable{}, + }, + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + Asset: Asset{ID: ids.Empty}, + In: &TestTransferable{}, + }, + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: Asset{ID: ids.Empty}, + In: &TestTransferable{}, + }, + } + + if isSortedAndUniqueTransferableInputs(ins) { + t.Fatalf("Shouldn't be sorted") + } + sortTransferableInputs(ins) + if !isSortedAndUniqueTransferableInputs(ins) { + t.Fatalf("Should be sorted") + } + + ins = append(ins, &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + Asset: Asset{ID: ids.Empty}, + In: &TestTransferable{}, + }) + + if isSortedAndUniqueTransferableInputs(ins) { + t.Fatalf("Shouldn't be unique") + } +} + +func TestTransferableInputSerialization(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&secp256k1fx.TransferInput{}) + + expected := []byte{ + // txID: + 0xf1, 0xe1, 0xd1, 0xc1, 0xb1, 0xa1, 0x91, 0x81, + 0x71, 0x61, 0x51, 0x41, 0x31, 0x21, 0x11, 0x01, + 0xf0, 0xe0, 0xd0, 0xc0, 0xb0, 0xa0, 0x90, 0x80, + 0x70, 0x60, 0x50, 0x40, 0x30, 0x20, 0x10, 0x00, + // utxoIndex: + 0x00, 0x00, 0x00, 0x05, + // assetID: + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + // input: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x07, 0x5b, 0xcd, 0x15, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, + } + + in := &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xf1, 0xe1, 0xd1, 0xc1, 0xb1, 0xa1, 0x91, 0x81, + 0x71, 0x61, 0x51, 0x41, 0x31, 0x21, 0x11, 0x01, + 0xf0, 0xe0, 0xd0, 0xc0, 0xb0, 0xa0, 0x90, 0x80, + 0x70, 0x60, 0x50, 0x40, 0x30, 0x20, 0x10, 0x00, + }), + OutputIndex: 5, + }, + Asset: Asset{ + ID: ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }), + }, + In: &secp256k1fx.TransferInput{ + Amt: 123456789, + Input: secp256k1fx.Input{ + SigIndices: []uint32{3, 7}, + }, + }, + } + + inBytes, err := c.Marshal(in) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(inBytes, expected) { + t.Fatalf("Expected:\n%s\nResult:\n%s", + formatting.DumpBytes{Bytes: expected}, + formatting.DumpBytes{Bytes: inBytes}, + ) + } +} diff --git a/vms/avm/tx.go b/vms/avm/tx.go new file mode 100644 index 0000000..4fced32 --- /dev/null +++ b/vms/avm/tx.go @@ -0,0 +1,83 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/codec" +) + +var ( + errWrongNumberOfCredentials = errors.New("should have the same number of credentials as inputs") +) + +// UnsignedTx ... +type UnsignedTx interface { + Initialize(bytes []byte) + ID() ids.ID + Bytes() []byte + + NetworkID() uint32 + ChainID() ids.ID + Outputs() []*TransferableOutput + Inputs() []*TransferableInput + + AssetIDs() ids.Set + InputUTXOs() []*UTXOID + UTXOs() []*UTXO + SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs int) error + SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) error +} + +// Tx is the core operation that can be performed. The tx uses the UTXO model. +// Specifically, a txs inputs will consume previous txs outputs. A tx will be +// valid if the inputs have the authority to consume the outputs they are +// attempting to consume and the inputs consume sufficient state to produce the +// outputs. +type Tx struct { + UnsignedTx `serialize:"true"` + + Creds []*Credential `serialize:"true"` // The credentials of this transaction +} + +// Credentials describes the authorization that allows the Inputs to consume the +// specified UTXOs. The returned array should not be modified. +func (t *Tx) Credentials() []*Credential { return t.Creds } + +// SyntacticVerify verifies that this transaction is well-formed. +func (t *Tx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs int) error { + switch { + case t == nil || t.UnsignedTx == nil: + return errNilTx + } + + if err := t.UnsignedTx.SyntacticVerify(ctx, c, numFxs); err != nil { + return err + } + + for _, cred := range t.Creds { + if err := cred.Verify(); err != nil { + return err + } + } + + numInputs := len(t.InputUTXOs()) + if numInputs != len(t.Creds) { + return errWrongNumberOfCredentials + } + return nil +} + +// SemanticVerify verifies that this transaction is well-formed. +func (t *Tx) SemanticVerify(vm *VM, uTx *UniqueTx) error { + if t == nil { + return errNilTx + } + + return t.UnsignedTx.SemanticVerify(vm, uTx, t.Creds) +} diff --git a/vms/avm/tx_test.go b/vms/avm/tx_test.go new file mode 100644 index 0000000..bc01f36 --- /dev/null +++ b/vms/avm/tx_test.go @@ -0,0 +1,306 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestTxNil(t *testing.T) { + c := codec.NewDefault() + tx := (*Tx)(nil) + if err := tx.SyntacticVerify(ctx, c, 1); err == nil { + t.Fatalf("Should have errored due to nil tx") + } +} + +func TestTxEmpty(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + + tx := &Tx{} + if err := tx.SyntacticVerify(ctx, c, 1); err == nil { + t.Fatalf("Should have errored due to nil tx") + } +} + +func TestTxInvalidCredential(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + c.RegisterType(&testVerifiable{}) + + tx := &Tx{ + UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: Asset{ + ID: asset, + }, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}, + Creds: []*Credential{ + &Credential{ + Cred: &testVerifiable{err: errUnneededAddress}, + }, + }, + } + + b, err := c.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + if err := tx.SyntacticVerify(ctx, c, 1); err == nil { + t.Fatalf("Tx should have failed due to an invalid credential") + } +} + +func TestTxInvalidUnsignedTx(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + c.RegisterType(&testVerifiable{}) + + tx := &Tx{ + UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: Asset{ + ID: asset, + }, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: Asset{ + ID: asset, + }, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}, + Creds: []*Credential{ + &Credential{ + Cred: &testVerifiable{}, + }, + &Credential{ + Cred: &testVerifiable{}, + }, + }, + } + + b, err := c.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + if err := tx.SyntacticVerify(ctx, c, 1); err == nil { + t.Fatalf("Tx should have failed due to an invalid unsigned tx") + } +} + +func TestTxInvalidNumberOfCredentials(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + c.RegisterType(&testVerifiable{}) + + tx := &Tx{ + UnsignedTx: &OperationTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: Asset{ + ID: asset, + }, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }, + Ops: []*Operation{ + &Operation{ + Asset: Asset{ + ID: asset, + }, + Ins: []*OperableInput{ + &OperableInput{ + UTXOID: UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + In: &testVerifiable{}, + }, + }, + }, + }, + }, + Creds: []*Credential{ + &Credential{ + Cred: &testVerifiable{}, + }, + }, + } + + b, err := c.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + if err := tx.SyntacticVerify(ctx, c, 1); err == nil { + t.Fatalf("Tx should have failed due to an invalid unsigned tx") + } +} + +func TestTxDocumentation(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + txBytes := []byte{ + // unsigned transaction: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, + 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, + 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, + 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, 0xc3, 0x34, 0x41, 0x28, + 0xe0, 0x60, 0x12, 0x8e, 0xde, 0x35, 0x23, 0xa2, + 0x4a, 0x46, 0x1c, 0x89, 0x43, 0xab, 0x08, 0x59, + 0x00, 0x00, 0x00, 0x01, 0xf1, 0xe1, 0xd1, 0xc1, + 0xb1, 0xa1, 0x91, 0x81, 0x71, 0x61, 0x51, 0x41, + 0x31, 0x21, 0x11, 0x01, 0xf0, 0xe0, 0xd0, 0xc0, + 0xb0, 0xa0, 0x90, 0x80, 0x70, 0x60, 0x50, 0x40, + 0x30, 0x20, 0x10, 0x00, 0x00, 0x00, 0x00, 0x05, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, + 0x07, 0x5b, 0xcd, 0x15, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, + // number of credentials: + 0x00, 0x00, 0x00, 0x01, + // credential[0]: + 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1e, 0x1d, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2e, 0x2d, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x00, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, + 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, + 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, + 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5e, 0x5d, + 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, + 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6e, 0x6d, + 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, + 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, + 0x7f, 0x00, + } + + tx := Tx{} + err := c.Unmarshal(txBytes, &tx) + if err != nil { + t.Fatal(err) + } +} diff --git a/vms/avm/unique_tx.go b/vms/avm/unique_tx.go new file mode 100644 index 0000000..6d219f3 --- /dev/null +++ b/vms/avm/unique_tx.go @@ -0,0 +1,276 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" +) + +var ( + errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") + errMissingUTXO = errors.New("missing utxo") + errUnknownTx = errors.New("transaction is unknown") + errRejectedTx = errors.New("transaction is rejected") +) + +// UniqueTx provides a de-duplication service for txs. This only provides a +// performance boost +type UniqueTx struct { + vm *VM + txID ids.ID + t *txState +} + +type txState struct { + unique, verifiedTx, verifiedState bool + validity error + + tx *Tx + inputs ids.Set + inputUTXOs []*UTXOID + utxos []*UTXO + deps []snowstorm.Tx + + status choices.Status +} + +func (tx *UniqueTx) refresh() { + if tx.t == nil { + tx.t = &txState{} + } + if tx.t.unique { + return + } + unique := tx.vm.state.UniqueTx(tx) + prevTx := tx.t.tx + if unique == tx { + // If no one was in the cache, make sure that there wasn't an + // intermediate object whose state I must reflect + if status, err := tx.vm.state.Status(tx.ID()); err == nil { + tx.t.status = status + tx.t.unique = true + } + } else { + // If someone is in the cache, they must be up to date + + // This ensures that every unique tx object points to the same tx state + tx.t = unique.t + } + + if tx.t.tx != nil { + return + } + + if prevTx == nil { + if innerTx, err := tx.vm.state.Tx(tx.ID()); err == nil { + tx.t.tx = innerTx + } + } else { + tx.t.tx = prevTx + } +} + +// Evict is called when this UniqueTx will no longer be returned from a cache +// lookup +func (tx *UniqueTx) Evict() { tx.t.unique = false } // Lock is already held here + +func (tx *UniqueTx) setStatus(status choices.Status) error { + tx.refresh() + if tx.t.status == status { + return nil + } + tx.t.status = status + return tx.vm.state.SetStatus(tx.ID(), status) +} + +// ID returns the wrapped txID +func (tx *UniqueTx) ID() ids.ID { return tx.txID } + +// Accept is called when the transaction was finalized as accepted by consensus +func (tx *UniqueTx) Accept() { + if err := tx.setStatus(choices.Accepted); err != nil { + tx.vm.ctx.Log.Error("Failed to accept tx %s due to %s", tx.txID, err) + return + } + + // Remove spent utxos + for _, utxoID := range tx.InputIDs().List() { + if err := tx.vm.state.SpendUTXO(utxoID); err != nil { + tx.vm.ctx.Log.Error("Failed to spend utxo %s due to %s", utxoID, err) + return + } + } + + // Add new utxos + for _, utxo := range tx.UTXOs() { + if err := tx.vm.state.FundUTXO(utxo); err != nil { + tx.vm.ctx.Log.Error("Failed to fund utxo %s due to %s", utxoID, err) + return + } + } + + txID := tx.ID() + tx.vm.ctx.Log.Verbo("Accepting Tx: %s", txID) + + if err := tx.vm.db.Commit(); err != nil { + tx.vm.ctx.Log.Error("Failed to commit accept %s due to %s", tx.txID, err) + } + + tx.vm.pubsub.Publish("accepted", txID) + + tx.t.deps = nil // Needed to prevent a memory leak +} + +// Reject is called when the transaction was finalized as rejected by consensus +func (tx *UniqueTx) Reject() { + if err := tx.setStatus(choices.Rejected); err != nil { + tx.vm.ctx.Log.Error("Failed to reject tx %s due to %s", tx.txID, err) + return + } + + txID := tx.ID() + tx.vm.ctx.Log.Debug("Rejecting Tx: %s", txID) + + if err := tx.vm.db.Commit(); err != nil { + tx.vm.ctx.Log.Error("Failed to commit reject %s due to %s", tx.txID, err) + } + + tx.vm.pubsub.Publish("rejected", txID) + + tx.t.deps = nil // Needed to prevent a memory leak +} + +// Status returns the current status of this transaction +func (tx *UniqueTx) Status() choices.Status { + tx.refresh() + return tx.t.status +} + +// Dependencies returns the set of transactions this transaction builds on +func (tx *UniqueTx) Dependencies() []snowstorm.Tx { + tx.refresh() + if tx.t.tx == nil || len(tx.t.deps) != 0 { + return tx.t.deps + } + + txIDs := ids.Set{} + for _, in := range tx.InputUTXOs() { + txID, _ := in.InputSource() + if !txIDs.Contains(txID) { + txIDs.Add(txID) + tx.t.deps = append(tx.t.deps, &UniqueTx{ + vm: tx.vm, + txID: txID, + }) + } + } + for _, assetID := range tx.t.tx.AssetIDs().List() { + if !txIDs.Contains(assetID) { + txIDs.Add(assetID) + tx.t.deps = append(tx.t.deps, &UniqueTx{ + vm: tx.vm, + txID: assetID, + }) + } + } + return tx.t.deps +} + +// InputIDs returns the set of utxoIDs this transaction consumes +func (tx *UniqueTx) InputIDs() ids.Set { + tx.refresh() + if tx.t.tx == nil || tx.t.inputs.Len() != 0 { + return tx.t.inputs + } + + for _, utxo := range tx.InputUTXOs() { + tx.t.inputs.Add(utxo.InputID()) + } + return tx.t.inputs +} + +// InputUTXOs returns the utxos that will be consumed on tx acceptance +func (tx *UniqueTx) InputUTXOs() []*UTXOID { + tx.refresh() + if tx.t.tx == nil || len(tx.t.inputUTXOs) != 0 { + return tx.t.inputUTXOs + } + tx.t.inputUTXOs = tx.t.tx.InputUTXOs() + return tx.t.inputUTXOs +} + +// UTXOs returns the utxos that will be added to the UTXO set on tx acceptance +func (tx *UniqueTx) UTXOs() []*UTXO { + tx.refresh() + if tx.t.tx == nil || len(tx.t.utxos) != 0 { + return tx.t.utxos + } + tx.t.utxos = tx.t.tx.UTXOs() + return tx.t.utxos +} + +// Bytes returns the binary representation of this transaction +func (tx *UniqueTx) Bytes() []byte { + tx.refresh() + return tx.t.tx.Bytes() +} + +// Verify the validity of this transaction +func (tx *UniqueTx) Verify() error { + switch status := tx.Status(); status { + case choices.Unknown: + return errUnknownTx + case choices.Accepted: + return nil + case choices.Rejected: + return errRejectedTx + default: + return tx.SemanticVerify() + } +} + +// SyntacticVerify verifies that this transaction is well formed +func (tx *UniqueTx) SyntacticVerify() error { + tx.refresh() + + if tx.t.tx == nil { + return errUnknownTx + } + + if tx.t.verifiedTx { + return tx.t.validity + } + + tx.t.verifiedTx = true + tx.t.validity = tx.t.tx.SyntacticVerify(tx.vm.ctx, tx.vm.codec, len(tx.vm.fxs)) + return tx.t.validity +} + +// SemanticVerify the validity of this transaction +func (tx *UniqueTx) SemanticVerify() error { + tx.SyntacticVerify() + + if tx.t.validity != nil || tx.t.verifiedState { + return tx.t.validity + } + + tx.t.verifiedState = true + tx.t.validity = tx.t.tx.SemanticVerify(tx.vm, tx) + + if tx.t.validity == nil { + tx.vm.pubsub.Publish("verified", tx.ID()) + } + return tx.t.validity +} + +// UnsignedBytes returns the unsigned bytes of the transaction +func (tx *UniqueTx) UnsignedBytes() []byte { + b, err := tx.vm.codec.Marshal(&tx.t.tx.UnsignedTx) + tx.vm.ctx.Log.AssertNoError(err) + return b +} diff --git a/vms/avm/user_state.go b/vms/avm/user_state.go new file mode 100644 index 0000000..363b47f --- /dev/null +++ b/vms/avm/user_state.go @@ -0,0 +1,53 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" +) + +var addresses = ids.Empty + +type userState struct{ vm *VM } + +func (s *userState) SetAddresses(db database.Database, addrs []ids.ID) error { + bytes, err := s.vm.codec.Marshal(addrs) + if err != nil { + return err + } + return db.Put(addresses.Bytes(), bytes) +} + +func (s *userState) Addresses(db database.Database) ([]ids.ID, error) { + bytes, err := db.Get(addresses.Bytes()) + if err != nil { + return nil, err + } + addresses := []ids.ID{} + if err := s.vm.codec.Unmarshal(bytes, &addresses); err != nil { + return nil, err + } + return addresses, nil +} + +func (s *userState) SetKey(db database.Database, sk *crypto.PrivateKeySECP256K1R) error { + return db.Put(hashing.ComputeHash256(sk.PublicKey().Address().Bytes()), sk.Bytes()) +} + +func (s *userState) Key(db database.Database, address ids.ID) (*crypto.PrivateKeySECP256K1R, error) { + factory := crypto.FactorySECP256K1R{} + + bytes, err := db.Get(address.Bytes()) + if err != nil { + return nil, err + } + sk, err := factory.ToPrivateKey(bytes) + if err != nil { + return nil, err + } + return sk.(*crypto.PrivateKeySECP256K1R), nil +} diff --git a/vms/avm/utxo.go b/vms/avm/utxo.go new file mode 100644 index 0000000..e431e55 --- /dev/null +++ b/vms/avm/utxo.go @@ -0,0 +1,35 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/components/verify" +) + +var ( + errNilUTXO = errors.New("nil utxo is not valid") + errEmptyUTXO = errors.New("empty utxo is not valid") +) + +// UTXO ... +type UTXO struct { + UTXOID `serialize:"true"` + Asset `serialize:"true"` + + Out verify.Verifiable `serialize:"true"` +} + +// Verify implements the verify.Verifiable interface +func (utxo *UTXO) Verify() error { + switch { + case utxo == nil: + return errNilUTXO + case utxo.Out == nil: + return errEmptyUTXO + default: + return verify.All(&utxo.UTXOID, &utxo.Asset, utxo.Out) + } +} diff --git a/vms/avm/utxo_id.go b/vms/avm/utxo_id.go new file mode 100644 index 0000000..9852c5d --- /dev/null +++ b/vms/avm/utxo_id.go @@ -0,0 +1,48 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" +) + +var ( + errNilUTXOID = errors.New("nil utxo ID is not valid") + errNilTxID = errors.New("nil tx ID is not valid") +) + +// UTXOID ... +type UTXOID struct { + // Serialized: + TxID ids.ID `serialize:"true"` + OutputIndex uint32 `serialize:"true"` + + // Cached: + id ids.ID +} + +// InputSource returns the source of the UTXO that this input is spending +func (utxo *UTXOID) InputSource() (ids.ID, uint32) { return utxo.TxID, utxo.OutputIndex } + +// InputID returns a unique ID of the UTXO that this input is spending +func (utxo *UTXOID) InputID() ids.ID { + if utxo.id.IsZero() { + utxo.id = utxo.TxID.Prefix(uint64(utxo.OutputIndex)) + } + return utxo.id +} + +// Verify implements the verify.Verifiable interface +func (utxo *UTXOID) Verify() error { + switch { + case utxo == nil: + return errNilUTXOID + case utxo.TxID.IsZero(): + return errNilTxID + default: + return nil + } +} diff --git a/vms/avm/utxo_id_test.go b/vms/avm/utxo_id_test.go new file mode 100644 index 0000000..fed513f --- /dev/null +++ b/vms/avm/utxo_id_test.go @@ -0,0 +1,63 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/codec" +) + +func TestUTXOIDVerifyNil(t *testing.T) { + utxoID := (*UTXOID)(nil) + + if err := utxoID.Verify(); err == nil { + t.Fatalf("Should have errored due to a nil utxo ID") + } +} + +func TestUTXOIDVerifyEmpty(t *testing.T) { + utxoID := &UTXOID{} + + if err := utxoID.Verify(); err == nil { + t.Fatalf("Should have errored due to an empty utxo ID") + } +} + +func TestUTXOID(t *testing.T) { + c := codec.NewDefault() + + utxoID := UTXOID{ + TxID: ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }), + OutputIndex: 0x20212223, + } + + if err := utxoID.Verify(); err != nil { + t.Fatal(err) + } + + bytes, err := c.Marshal(&utxoID) + if err != nil { + t.Fatal(err) + } + + newUTXOID := UTXOID{} + if err := c.Unmarshal(bytes, &newUTXOID); err != nil { + t.Fatal(err) + } + + if err := newUTXOID.Verify(); err != nil { + t.Fatal(err) + } + + if !utxoID.InputID().Equals(newUTXOID.InputID()) { + t.Fatalf("Parsing returned the wrong UTXO ID") + } +} diff --git a/vms/avm/utxo_test.go b/vms/avm/utxo_test.go new file mode 100644 index 0000000..6f043db --- /dev/null +++ b/vms/avm/utxo_test.go @@ -0,0 +1,119 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestUTXOVerifyNil(t *testing.T) { + utxo := (*UTXO)(nil) + + if err := utxo.Verify(); err == nil { + t.Fatalf("Should have errored due to a nil utxo") + } +} + +func TestUTXOVerifyEmpty(t *testing.T) { + utxo := &UTXO{ + UTXOID: UTXOID{TxID: ids.Empty}, + Asset: Asset{ID: ids.Empty}, + } + + if err := utxo.Verify(); err == nil { + t.Fatalf("Should have errored due to an empty utxo") + } +} + +func TestUTXOSerialize(t *testing.T) { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + expected := []byte{ + // txID: + 0xf9, 0x66, 0x75, 0x0f, 0x43, 0x88, 0x67, 0xc3, + 0xc9, 0x82, 0x8d, 0xdc, 0xdb, 0xe6, 0x60, 0xe2, + 0x1c, 0xcd, 0xbb, 0x36, 0xa9, 0x27, 0x69, 0x58, + 0xf0, 0x11, 0xba, 0x47, 0x2f, 0x75, 0xd4, 0xe7, + // utxo index: + 0x00, 0x00, 0x00, 0x00, + // assetID: + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + // output: + 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, + 0x24, 0x25, 0x26, 0x27, + } + utxo := &UTXO{ + UTXOID: UTXOID{ + TxID: ids.NewID([32]byte{ + 0xf9, 0x66, 0x75, 0x0f, 0x43, 0x88, 0x67, 0xc3, + 0xc9, 0x82, 0x8d, 0xdc, 0xdb, 0xe6, 0x60, 0xe2, + 0x1c, 0xcd, 0xbb, 0x36, 0xa9, 0x27, 0x69, 0x58, + 0xf0, 0x11, 0xba, 0x47, 0x2f, 0x75, 0xd4, 0xe7, + }), + OutputIndex: 0, + }, + Asset: Asset{ + ID: ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + Locktime: 54321, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID([20]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, + }), + ids.NewShortID([20]byte{ + 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + }), + }, + }, + }, + } + + utxoBytes, err := c.Marshal(utxo) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(utxoBytes, expected) { + t.Fatalf("Expected:\n%s\nResult:\n%s", + formatting.DumpBytes{Bytes: expected}, + formatting.DumpBytes{Bytes: utxoBytes}, + ) + } +} diff --git a/vms/avm/verifiable_test.go b/vms/avm/verifiable_test.go new file mode 100644 index 0000000..65630d2 --- /dev/null +++ b/vms/avm/verifiable_test.go @@ -0,0 +1,24 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +type testVerifiable struct{ err error } + +func (v *testVerifiable) Verify() error { return v.err } + +type TestTransferable struct { + testVerifiable + + Val uint64 `serialize:"true"` +} + +func (t *TestTransferable) Amount() uint64 { return t.Val } + +type testAddressable struct { + TestTransferable `serialize:"true"` + + Addrs [][]byte `serialize:"true"` +} + +func (a *testAddressable) Addresses() [][]byte { return a.Addrs } diff --git a/vms/avm/vm.go b/vms/avm/vm.go new file mode 100644 index 0000000..d1097aa --- /dev/null +++ b/vms/avm/vm.go @@ -0,0 +1,494 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" + + "github.com/gorilla/rpc/v2" + + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/components/codec" + + cjson "github.com/ava-labs/gecko/utils/json" +) + +const ( + batchTimeout = time.Second + batchSize = 30 + stateCacheSize = 10000 + idCacheSize = 10000 + txCacheSize = 10000 + addressSep = "-" +) + +var ( + errIncompatibleFx = errors.New("incompatible feature extension") + errUnknownFx = errors.New("unknown feature extension") + errGenesisAssetMustHaveState = errors.New("genesis asset must have non-empty state") + errInvalidAddress = errors.New("invalid address") + errWrongBlockchainID = errors.New("wrong blockchain ID") +) + +// VM implements the avalanche.DAGVM interface +type VM struct { + ids.Aliaser + + // Contains information of where this VM is executing + ctx *snow.Context + + // Used to check local time + clock timer.Clock + + codec codec.Codec + + pubsub *cjson.PubSubServer + + // State management + state *prefixedState + + // Transaction issuing + timer *timer.Timer + batchTimeout time.Duration + txs []snowstorm.Tx + toEngine chan<- common.Message + + baseDB database.Database + db *versiondb.Database + + typeToFxIndex map[reflect.Type]int + fxs []*parsedFx +} + +type codecRegistry struct { + index int + typeToFxIndex map[reflect.Type]int + codec codec.Codec +} + +func (cr *codecRegistry) RegisterType(val interface{}) error { + valType := reflect.TypeOf(val) + cr.typeToFxIndex[valType] = cr.index + return cr.codec.RegisterType(val) +} +func (cr *codecRegistry) Marshal(val interface{}) ([]byte, error) { return cr.codec.Marshal(val) } +func (cr *codecRegistry) Unmarshal(b []byte, val interface{}) error { return cr.codec.Unmarshal(b, val) } + +/* + ****************************************************************************** + ******************************** Avalanche API ******************************* + ****************************************************************************** + */ + +// Initialize implements the avalanche.DAGVM interface +func (vm *VM) Initialize( + ctx *snow.Context, + db database.Database, + genesisBytes []byte, + toEngine chan<- common.Message, + fxs []*common.Fx, +) error { + vm.ctx = ctx + vm.toEngine = toEngine + vm.baseDB = db + vm.db = versiondb.New(db) + vm.typeToFxIndex = map[reflect.Type]int{} + vm.Aliaser.Initialize() + + vm.pubsub = cjson.NewPubSubServer(ctx) + + errs := wrappers.Errs{} + errs.Add( + vm.pubsub.Register("accepted"), + vm.pubsub.Register("rejected"), + vm.pubsub.Register("verified"), + ) + if errs.Errored() { + return errs.Err + } + + vm.state = &prefixedState{ + state: &state{ + c: &cache.LRU{Size: stateCacheSize}, + vm: vm, + }, + + tx: &cache.LRU{Size: idCacheSize}, + utxo: &cache.LRU{Size: idCacheSize}, + txStatus: &cache.LRU{Size: idCacheSize}, + funds: &cache.LRU{Size: idCacheSize}, + + uniqueTx: &cache.EvictableLRU{Size: txCacheSize}, + } + + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + + vm.fxs = make([]*parsedFx, len(fxs)) + for i, fxContainer := range fxs { + if fxContainer == nil { + return errIncompatibleFx + } + fx, ok := fxContainer.Fx.(Fx) + if !ok { + return errIncompatibleFx + } + vm.fxs[i] = &parsedFx{ + ID: fxContainer.ID, + Fx: fx, + } + vm.codec = &codecRegistry{ + index: i, + typeToFxIndex: vm.typeToFxIndex, + codec: c, + } + if err := fx.Initialize(vm); err != nil { + return err + } + } + + vm.codec = c + + if err := vm.initAliases(genesisBytes); err != nil { + return err + } + + if dbStatus, err := vm.state.DBInitialized(); err != nil || dbStatus == choices.Unknown { + if err := vm.initState(genesisBytes); err != nil { + return err + } + } + + vm.timer = timer.NewTimer(func() { + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm.FlushTxs() + }) + go ctx.Log.RecoverAndPanic(vm.timer.Dispatch) + vm.batchTimeout = batchTimeout + + return vm.db.Commit() +} + +// Shutdown implements the avalanche.DAGVM interface +func (vm *VM) Shutdown() { + vm.timer.Stop() + if err := vm.baseDB.Close(); err != nil { + vm.ctx.Log.Error("Closing the database failed with %s", err) + } +} + +// CreateHandlers implements the avalanche.DAGVM interface +func (vm *VM) CreateHandlers() map[string]*common.HTTPHandler { + rpcServer := rpc.NewServer() + codec := cjson.NewCodec() + rpcServer.RegisterCodec(codec, "application/json") + rpcServer.RegisterCodec(codec, "application/json;charset=UTF-8") + rpcServer.RegisterService(&Service{vm: vm}, "avm") // name this service "avm" + + return map[string]*common.HTTPHandler{ + "": &common.HTTPHandler{Handler: rpcServer}, + "/pubsub": &common.HTTPHandler{LockOptions: common.NoLock, Handler: vm.pubsub}, + } +} + +// CreateStaticHandlers implements the avalanche.DAGVM interface +func (vm *VM) CreateStaticHandlers() map[string]*common.HTTPHandler { + newServer := rpc.NewServer() + codec := cjson.NewCodec() + newServer.RegisterCodec(codec, "application/json") + newServer.RegisterCodec(codec, "application/json;charset=UTF-8") + newServer.RegisterService(&StaticService{}, "avm") // name this service "avm" + return map[string]*common.HTTPHandler{ + "": &common.HTTPHandler{LockOptions: common.WriteLock, Handler: newServer}, + } +} + +// PendingTxs implements the avalanche.DAGVM interface +func (vm *VM) PendingTxs() []snowstorm.Tx { + vm.timer.Cancel() + + txs := vm.txs + vm.txs = nil + return txs +} + +// ParseTx implements the avalanche.DAGVM interface +func (vm *VM) ParseTx(b []byte) (snowstorm.Tx, error) { return vm.parseTx(b) } + +// GetTx implements the avalanche.DAGVM interface +func (vm *VM) GetTx(txID ids.ID) (snowstorm.Tx, error) { + tx := &UniqueTx{ + vm: vm, + txID: txID, + } + // Verify must be called in the case the that tx was flushed from the unique + // cache. + return tx, tx.Verify() +} + +/* + ****************************************************************************** + ********************************** JSON API ********************************** + ****************************************************************************** + */ + +// IssueTx attempts to send a transaction to consensus +func (vm *VM) IssueTx(b []byte) (ids.ID, error) { + tx, err := vm.parseTx(b) + if err != nil { + return ids.ID{}, err + } + if err := tx.Verify(); err != nil { + return ids.ID{}, err + } + vm.issueTx(tx) + return tx.ID(), nil +} + +// GetUTXOs returns the utxos that at least one of the provided addresses is +// referenced in. +func (vm *VM) GetUTXOs(addrs ids.Set) ([]*UTXO, error) { + utxoIDs := ids.Set{} + for _, addr := range addrs.List() { + utxos, _ := vm.state.Funds(addr) + utxoIDs.Add(utxos...) + } + + utxos := []*UTXO{} + for _, utxoID := range utxoIDs.List() { + utxo, err := vm.state.UTXO(utxoID) + if err != nil { + return nil, err + } + utxos = append(utxos, utxo) + } + return utxos, nil +} + +/* + ****************************************************************************** + *********************************** Fx API *********************************** + ****************************************************************************** + */ + +// Clock returns a reference to the internal clock of this VM +func (vm *VM) Clock() *timer.Clock { return &vm.clock } + +// Codec returns a reference to the internal codec of this VM +func (vm *VM) Codec() codec.Codec { return vm.codec } + +/* + ****************************************************************************** + ********************************** Timer API ********************************* + ****************************************************************************** + */ + +// FlushTxs into consensus +func (vm *VM) FlushTxs() { + vm.timer.Cancel() + if len(vm.txs) != 0 { + select { + case vm.toEngine <- common.PendingTxs: + default: + vm.ctx.Log.Warn("Delaying issuance of transactions due to contention") + vm.timer.SetTimeoutIn(vm.batchTimeout) + } + } +} + +/* + ****************************************************************************** + ********************************** Helpers *********************************** + ****************************************************************************** + */ + +func (vm *VM) initAliases(genesisBytes []byte) error { + genesis := Genesis{} + if err := vm.codec.Unmarshal(genesisBytes, &genesis); err != nil { + return err + } + + for _, genesisTx := range genesis.Txs { + if len(genesisTx.Outs) != 0 { + return errGenesisAssetMustHaveState + } + + tx := Tx{ + UnsignedTx: &genesisTx.CreateAssetTx, + } + txBytes, err := vm.codec.Marshal(&tx) + if err != nil { + return err + } + tx.Initialize(txBytes) + + txID := tx.ID() + + vm.Alias(txID, genesisTx.Alias) + } + + return nil +} + +func (vm *VM) initState(genesisBytes []byte) error { + genesis := Genesis{} + if err := vm.codec.Unmarshal(genesisBytes, &genesis); err != nil { + return err + } + + for _, genesisTx := range genesis.Txs { + if len(genesisTx.Outs) != 0 { + return errGenesisAssetMustHaveState + } + + tx := Tx{ + UnsignedTx: &genesisTx.CreateAssetTx, + } + txBytes, err := vm.codec.Marshal(&tx) + if err != nil { + return err + } + tx.Initialize(txBytes) + + txID := tx.ID() + + vm.ctx.Log.Info("Initializing with AssetID %s", txID) + + if err := vm.state.SetTx(txID, &tx); err != nil { + return err + } + if err := vm.state.SetStatus(txID, choices.Accepted); err != nil { + return err + } + for _, utxo := range tx.UTXOs() { + if err := vm.state.FundUTXO(utxo); err != nil { + return err + } + } + } + + return vm.state.SetDBInitialized(choices.Processing) +} + +func (vm *VM) parseTx(b []byte) (*UniqueTx, error) { + rawTx := &Tx{} + err := vm.codec.Unmarshal(b, rawTx) + if err != nil { + return nil, err + } + rawTx.Initialize(b) + + tx := &UniqueTx{ + vm: vm, + txID: rawTx.ID(), + t: &txState{ + tx: rawTx, + }, + } + if err := tx.SyntacticVerify(); err != nil { + return nil, err + } + + if tx.Status() == choices.Unknown { + if err := vm.state.SetTx(tx.ID(), tx.t.tx); err != nil { + return nil, err + } + tx.setStatus(choices.Processing) + } + + return tx, nil +} + +func (vm *VM) issueTx(tx snowstorm.Tx) { + vm.txs = append(vm.txs, tx) + switch { + case len(vm.txs) == batchSize: + vm.FlushTxs() + case len(vm.txs) == 1: + vm.timer.SetTimeoutIn(vm.batchTimeout) + } +} + +func (vm *VM) getFx(val interface{}) (int, error) { + valType := reflect.TypeOf(val) + fx, exists := vm.typeToFxIndex[valType] + if !exists { + return 0, errUnknownFx + } + return fx, nil +} + +func (vm *VM) verifyFxUsage(fxID int, assetID ids.ID) bool { + tx := &UniqueTx{ + vm: vm, + txID: assetID, + } + if status := tx.Status(); !status.Fetched() { + return false + } + createAssetTx, ok := tx.t.tx.UnsignedTx.(*CreateAssetTx) + if !ok { + return false + } + // TODO: This could be a binary search to import performance... Or perhaps + // make a map + for _, state := range createAssetTx.States { + if state.FxID == uint32(fxID) { + return true + } + } + return false +} + +// Parse ... +func (vm *VM) Parse(addrStr string) ([]byte, error) { + if count := strings.Count(addrStr, addressSep); count != 1 { + return nil, errInvalidAddress + } + addressParts := strings.SplitN(addrStr, addressSep, 2) + bcAlias := addressParts[0] + rawAddr := addressParts[1] + bcID, err := vm.ctx.BCLookup.Lookup(bcAlias) + if err != nil { + bcID, err = ids.FromString(bcAlias) + if err != nil { + return nil, err + } + } + if !bcID.Equals(vm.ctx.ChainID) { + return nil, errWrongBlockchainID + } + cb58 := formatting.CB58{} + err = cb58.FromString(rawAddr) + return cb58.Bytes, err +} + +// Format ... +func (vm *VM) Format(b []byte) string { + var bcAlias string + if alias, err := vm.ctx.BCLookup.PrimaryAlias(vm.ctx.ChainID); err == nil { + bcAlias = alias + } else { + bcAlias = vm.ctx.ChainID.String() + } + return fmt.Sprintf("%s%s%s", bcAlias, addressSep, formatting.CB58{Bytes: b}) +} diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go new file mode 100644 index 0000000..65c82a5 --- /dev/null +++ b/vms/avm/vm_test.go @@ -0,0 +1,549 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var networkID uint32 = 43110 +var chainID = ids.NewID([32]byte{5, 4, 3, 2, 1}) + +var keys []*crypto.PrivateKeySECP256K1R +var ctx *snow.Context +var asset = ids.NewID([32]byte{1, 2, 3}) + +func init() { + ctx = snow.DefaultContextTest() + ctx.NetworkID = networkID + ctx.ChainID = chainID + cb58 := formatting.CB58{} + factory := crypto.FactorySECP256K1R{} + + for _, key := range []string{ + "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", + "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", + "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", + } { + ctx.Log.AssertNoError(cb58.FromString(key)) + pk, err := factory.ToPrivateKey(cb58.Bytes) + ctx.Log.AssertNoError(err) + keys = append(keys, pk.(*crypto.PrivateKeySECP256K1R)) + } +} + +func GetFirstTxFromGenesisTest(genesisBytes []byte, t *testing.T) *Tx { + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + genesis := Genesis{} + if err := c.Unmarshal(genesisBytes, &genesis); err != nil { + t.Fatal(err) + } + + for _, genesisTx := range genesis.Txs { + if len(genesisTx.Outs) != 0 { + t.Fatal("genesis tx can't have non-new assets") + } + + tx := Tx{ + UnsignedTx: &genesisTx.CreateAssetTx, + } + txBytes, err := c.Marshal(&tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(txBytes) + + return &tx + } + + t.Fatal("genesis tx didn't have any txs") + return nil +} + +func BuildGenesisTest(t *testing.T) []byte { + ss := StaticService{} + + addr0 := keys[0].PublicKey().Address() + addr1 := keys[1].PublicKey().Address() + addr2 := keys[2].PublicKey().Address() + + args := BuildGenesisArgs{GenesisData: map[string]AssetDefinition{ + "asset1": AssetDefinition{ + Name: "myFixedCapAsset", + Symbol: "MFCA", + InitialState: map[string][]interface{}{ + "fixedCap": []interface{}{ + Holder{ + Amount: 100000, + Address: addr0.String(), + }, + Holder{ + Amount: 100000, + Address: addr0.String(), + }, + Holder{ + Amount: 50000, + Address: addr0.String(), + }, + Holder{ + Amount: 50000, + Address: addr0.String(), + }, + }, + }, + }, + "asset2": AssetDefinition{ + Name: "myVarCapAsset", + Symbol: "MVCA", + InitialState: map[string][]interface{}{ + "variableCap": []interface{}{ + Owners{ + Threshold: 1, + Minters: []string{ + addr0.String(), + addr1.String(), + }, + }, + Owners{ + Threshold: 2, + Minters: []string{ + addr0.String(), + addr1.String(), + addr2.String(), + }, + }, + }, + }, + }, + "asset3": AssetDefinition{ + Name: "myOtherVarCapAsset", + InitialState: map[string][]interface{}{ + "variableCap": []interface{}{ + Owners{ + Threshold: 1, + Minters: []string{ + addr0.String(), + }, + }, + }, + }, + }, + }} + reply := BuildGenesisReply{} + err := ss.BuildGenesis(nil, &args, &reply) + if err != nil { + t.Fatal(err) + } + + return reply.Bytes.Bytes +} + +func GenesisVM(t *testing.T) *VM { + genesisBytes := BuildGenesisTest(t) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + make(chan common.Message, 1), + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + return vm +} + +func TestTxSerialization(t *testing.T) { + expected := []byte{ + // txID: + 0x00, 0x00, 0x00, 0x02, + // networkID: + 0x00, 0x00, 0xa8, 0x66, + // chainID: + 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of outs: + 0x00, 0x00, 0x00, 0x03, + // output[0]: + // assetID: + 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // fxID: + 0x00, 0x00, 0x00, 0x04, + // secp256k1 Transferable Output: + // amount: + 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, + // locktime: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold: + 0x00, 0x00, 0x00, 0x01, + // number of addresses + 0x00, 0x00, 0x00, 0x01, + // address[0] + 0xfc, 0xed, 0xa8, 0xf9, 0x0f, 0xcb, 0x5d, 0x30, + 0x61, 0x4b, 0x99, 0xd7, 0x9f, 0xc4, 0xba, 0xa2, + 0x93, 0x07, 0x76, 0x26, + // output[1]: + // assetID: + 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // fxID: + 0x00, 0x00, 0x00, 0x04, + // secp256k1 Transferable Output: + // amount: + 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, + // locktime: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold: + 0x00, 0x00, 0x00, 0x01, + // number of addresses: + 0x00, 0x00, 0x00, 0x01, + // address[0]: + 0x6e, 0xad, 0x69, 0x3c, 0x17, 0xab, 0xb1, 0xbe, + 0x42, 0x2b, 0xb5, 0x0b, 0x30, 0xb9, 0x71, 0x1f, + 0xf9, 0x8d, 0x66, 0x7e, + // output[2]: + // assetID: + 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // fxID: + 0x00, 0x00, 0x00, 0x04, + // secp256k1 Transferable Output: + // amount: + 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, + // locktime: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // threshold: + 0x00, 0x00, 0x00, 0x01, + // number of addresses: + 0x00, 0x00, 0x00, 0x01, + // address[0]: + 0xf2, 0x42, 0x08, 0x46, 0x87, 0x6e, 0x69, 0xf4, + 0x73, 0xdd, 0xa2, 0x56, 0x17, 0x29, 0x67, 0xe9, + 0x92, 0xf0, 0xee, 0x31, + // number of inputs: + 0x00, 0x00, 0x00, 0x00, + // number of operations: + 0x00, 0x00, 0x00, 0x01, + // operation[0]: + // assetID: + 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // number of inputs: + 0x00, 0x00, 0x00, 0x00, + // number of outputs: + 0x00, 0x00, 0x00, 0x01, + // fxID: + 0x00, 0x00, 0x00, 0x03, + // secp256k1 Mint Output: + // threshold: + 0x00, 0x00, 0x00, 0x01, + // number of addresses: + 0x00, 0x00, 0x00, 0x01, + // address[0]: + 0xfc, 0xed, 0xa8, 0xf9, 0x0f, 0xcb, 0x5d, 0x30, + 0x61, 0x4b, 0x99, 0xd7, 0x9f, 0xc4, 0xba, 0xa2, + 0x93, 0x07, 0x76, 0x26, + // number of credentials: + 0x00, 0x00, 0x00, 0x00, + } + + unsignedTx := &OperationTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ops: []*Operation{ + &Operation{ + Asset: Asset{ + ID: asset, + }, + Outs: []*OperableOutput{ + &OperableOutput{ + Out: &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + }, + }, + } + tx := &Tx{UnsignedTx: unsignedTx} + for _, key := range keys { + addr := key.PublicKey().Address() + + unsignedTx.Outs = append(unsignedTx.Outs, &TransferableOutput{ + Asset: Asset{ + ID: asset, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 20 * units.KiloAva, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }, + }) + } + + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintInput{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.Credential{}) + + b, err := c.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + result := tx.Bytes() + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } +} + +func TestInvalidGenesis(t *testing.T) { + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + /*context=*/ ctx, + /*db=*/ memdb.New(), + /*genesisState=*/ nil, + /*engineMessenger=*/ make(chan common.Message, 1), + /*fxs=*/ nil, + ) + if err == nil { + t.Fatalf("Should have errored due to an invalid genesis") + } +} + +func TestInvalidFx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + /*context=*/ ctx, + /*db=*/ memdb.New(), + /*genesisState=*/ genesisBytes, + /*engineMessenger=*/ make(chan common.Message, 1), + /*fxs=*/ []*common.Fx{ + nil, + }, + ) + if err == nil { + t.Fatalf("Should have errored due to an invalid interface") + } +} + +func TestFxInitializationFailure(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + /*context=*/ ctx, + /*db=*/ memdb.New(), + /*genesisState=*/ genesisBytes, + /*engineMessenger=*/ make(chan common.Message, 1), + /*fxs=*/ []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &testFx{initialize: errUnknownFx}, + }}, + ) + if err == nil { + t.Fatalf("Should have errored due to an invalid fx initialization") + } +} + +type testTxBytes struct{ unsignedBytes []byte } + +func (tx *testTxBytes) UnsignedBytes() []byte { return tx.unsignedBytes } + +func TestIssueTx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + newTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*TransferableInput{ + &TransferableInput{ + UTXOID: UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: Asset{ + ID: genesisTx.ID(), + }, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }, + }, + }}} + + unsignedBytes, err := vm.codec.Marshal(&newTx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + newTx.Creds = append(newTx.Creds, &Credential{ + Cred: &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }, + }) + + b, err := vm.codec.Marshal(newTx) + if err != nil { + t.Fatal(err) + } + newTx.Initialize(b) + + txID, err := vm.IssueTx(newTx.Bytes()) + if err != nil { + t.Fatal(err) + } + if !txID.Equals(newTx.ID()) { + t.Fatalf("Issue Tx returned wrong TxID") + } + ctx.Lock.Unlock() + + msg := <-issuer + if msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + if txs := vm.PendingTxs(); len(txs) != 1 { + t.Fatalf("Should have returned %d tx(s)", 1) + } +} + +func TestGenesisGetUTXOs(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + ctx.Lock.Lock() + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + make(chan common.Message, 1), + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + shortAddr := keys[0].PublicKey().Address() + addr := ids.NewID(hashing.ComputeHash256Array(shortAddr.Bytes())) + + addrs := ids.Set{} + addrs.Add(addr) + utxos, err := vm.GetUTXOs(addrs) + if err != nil { + t.Fatal(err) + } + vm.Shutdown() + ctx.Lock.Unlock() + + if len(utxos) != 7 { + t.Fatalf("Wrong number of utxos (%d) returned", len(utxos)) + } +} diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go new file mode 100644 index 0000000..92c731e --- /dev/null +++ b/vms/components/codec/codec.go @@ -0,0 +1,379 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package codec + +import ( + "errors" + "fmt" + "reflect" + "unicode" + + "github.com/ava-labs/gecko/utils/wrappers" +) + +// Type is an identifier for a codec +type Type uint32 + +// Codec types +const ( + NoType Type = iota + GenericType + CustomType + // TODO: Utilize a standard serialization library. Must have a canonical + // serialization format. +) + +const ( + defaultMaxSize = 1 << 19 // default max size, in bytes, of something being marshalled by Marshal() + defaultMaxSliceLength = 1 << 19 // default max length of a slice being marshalled by Marshal() +) + +// ErrBadCodec is returned when one tries to perform an operation +// using an unknown codec +var ( + errBadCodec = errors.New("wrong or unknown codec used") + errNil = errors.New("can't marshal nil value") + errUnmarshalNil = errors.New("can't unmarshal into nil") + errNeedPointer = errors.New("must unmarshal into a pointer") + errMarshalUnregisteredType = errors.New("can't marshal an unregistered type") + errUnmarshalUnregisteredType = errors.New("can't unmarshal an unregistered type") + errUnknownType = errors.New("don't know how to marshal/unmarshal this type") + errMarshalUnexportedField = errors.New("can't serialize an unexported field") + errUnmarshalUnexportedField = errors.New("can't deserialize into an unexported field") + errOutOfMemory = errors.New("out of memory") + errSliceTooLarge = errors.New("slice too large") +) + +// Verify that the codec is a known codec value. Returns nil if the codec is +// valid. +func (c Type) Verify() error { + switch c { + case NoType, GenericType, CustomType: + return nil + default: + return errBadCodec + } +} + +func (c Type) String() string { + switch c { + case NoType: + return "No Codec" + case GenericType: + return "Generic Codec" + case CustomType: + return "Custom Codec" + default: + return "Unknown Codec" + } +} + +// Codec handles marshaling and unmarshaling of structs +type codec struct { + maxSize int + maxSliceLen int + + typeIDToType map[uint32]reflect.Type + typeToTypeID map[reflect.Type]uint32 +} + +// Codec marshals and unmarshals +type Codec interface { + RegisterType(interface{}) error + Marshal(interface{}) ([]byte, error) + Unmarshal([]byte, interface{}) error +} + +// New returns a new codec +func New(maxSize, maxSliceLen int) Codec { + return codec{ + maxSize: maxSize, + maxSliceLen: maxSliceLen, + typeIDToType: map[uint32]reflect.Type{}, + typeToTypeID: map[reflect.Type]uint32{}, + } +} + +// NewDefault returns a new codec with reasonable default values +func NewDefault() Codec { return New(defaultMaxSize, defaultMaxSliceLength) } + +// RegisterType is used to register types that may be unmarshaled into an interface typed value +// [val] is a value of the type being registered +func (c codec) RegisterType(val interface{}) error { + valType := reflect.TypeOf(val) + if _, exists := c.typeToTypeID[valType]; exists { + return fmt.Errorf("type %v has already been registered", valType) + } + c.typeIDToType[uint32(len(c.typeIDToType))] = reflect.TypeOf(val) + c.typeToTypeID[valType] = uint32(len(c.typeIDToType) - 1) + return nil +} + +// A few notes: +// 1) See codec_test.go for examples of usage +// 2) We use "marshal" and "serialize" interchangeably, and "unmarshal" and "deserialize" interchangeably +// 3) To include a field of a struct in the serialized form, add the tag `serialize:"true"` to it +// 4) These typed members of a struct may be serialized: +// bool, string, uint[8,16,32,64, int[8,16,32,64], +// structs, slices, arrays, interface. +// structs, slices and arrays can only be serialized if their constituent parts can be. +// 5) To marshal an interface typed value, you must pass a _pointer_ to the value +// 6) If you want to be able to unmarshal into an interface typed value, +// you must call codec.RegisterType([instance of the type that fulfills the interface]). +// 7) nil slices will be unmarshaled as an empty slice of the appropriate type +// 8) Serialized fields must be exported + +// Marshal returns the byte representation of [value] +// If you want to marshal an interface, [value] must be a pointer +// to the interface +func (c codec) Marshal(value interface{}) ([]byte, error) { + if value == nil { + return nil, errNil + } + + return c.marshal(reflect.ValueOf(value)) +} + +// Marshal [value] to bytes +func (c codec) marshal(value reflect.Value) ([]byte, error) { + p := wrappers.Packer{MaxSize: c.maxSize, Bytes: []byte{}} + t := value.Type() + + valueKind := value.Kind() + switch valueKind { + case reflect.Interface, reflect.Ptr, reflect.Slice: + if value.IsNil() { + return nil, errNil + } + } + + switch valueKind { + case reflect.Uint8: + p.PackByte(uint8(value.Uint())) + return p.Bytes, p.Err + case reflect.Int8: + p.PackByte(uint8(value.Int())) + return p.Bytes, p.Err + case reflect.Uint16: + p.PackShort(uint16(value.Uint())) + return p.Bytes, p.Err + case reflect.Int16: + p.PackShort(uint16(value.Int())) + return p.Bytes, p.Err + case reflect.Uint32: + p.PackInt(uint32(value.Uint())) + return p.Bytes, p.Err + case reflect.Int32: + p.PackInt(uint32(value.Int())) + return p.Bytes, p.Err + case reflect.Uint64: + p.PackLong(value.Uint()) + return p.Bytes, p.Err + case reflect.Int64: + p.PackLong(uint64(value.Int())) + return p.Bytes, p.Err + case reflect.Uintptr, reflect.Ptr: + return c.marshal(value.Elem()) + case reflect.String: + p.PackStr(value.String()) + return p.Bytes, p.Err + case reflect.Bool: + p.PackBool(value.Bool()) + return p.Bytes, p.Err + case reflect.Interface: + typeID, ok := c.typeToTypeID[reflect.TypeOf(value.Interface())] // Get the type ID of the value being marshaled + if !ok { + return nil, fmt.Errorf("can't marshal unregistered type '%v'", reflect.TypeOf(value.Interface()).String()) + } + p.PackInt(typeID) + bytes, err := c.Marshal(value.Interface()) + if err != nil { + return nil, err + } + p.PackFixedBytes(bytes) + if p.Errored() { + return nil, p.Err + } + return p.Bytes, err + case reflect.Array, reflect.Slice: + numElts := value.Len() // # elements in the slice/array (assumed to be <= 2^31 - 1) + // If this is a slice, pack the number of elements in the slice + if valueKind == reflect.Slice { + p.PackInt(uint32(numElts)) + } + for i := 0; i < numElts; i++ { // Pack each element in the slice/array + eltBytes, err := c.marshal(value.Index(i)) + if err != nil { + return nil, err + } + p.PackFixedBytes(eltBytes) + } + return p.Bytes, p.Err + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { // Go through all fields of this struct + field := t.Field(i) + if !shouldSerialize(field) { // Skip fields we don't need to serialize + continue + } + if unicode.IsLower(rune(field.Name[0])) { // Can only marshal exported fields + return nil, errMarshalUnexportedField + } + fieldVal := value.Field(i) // The field we're serializing + if fieldVal.Kind() == reflect.Slice && fieldVal.IsNil() { + p.PackInt(0) + continue + } + fieldBytes, err := c.marshal(fieldVal) // Serialize the field + if err != nil { + return nil, err + } + p.PackFixedBytes(fieldBytes) + } + return p.Bytes, p.Err + case reflect.Invalid: + return nil, errUnmarshalNil + default: + return nil, errUnknownType + } +} + +// Unmarshal unmarshals [bytes] into [dest], where +// [dest] must be a pointer or interface +func (c codec) Unmarshal(bytes []byte, dest interface{}) error { + p := &wrappers.Packer{Bytes: bytes} + + if len(bytes) > c.maxSize { + return errSliceTooLarge + } + + if dest == nil { + return errNil + } + + destPtr := reflect.ValueOf(dest) + + if destPtr.Kind() != reflect.Ptr { + return errNeedPointer + } + + destVal := destPtr.Elem() + + err := c.unmarshal(p, destVal) + if err != nil { + return err + } + + if p.Offset != len(p.Bytes) { + return fmt.Errorf("has %d leftover bytes after unmarshalling", len(p.Bytes)-p.Offset) + } + return nil +} + +// Unmarshal bytes from [p] into [field] +// [field] must be addressable +func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { + kind := field.Kind() + switch kind { + case reflect.Uint8: + field.SetUint(uint64(p.UnpackByte())) + case reflect.Int8: + field.SetInt(int64(p.UnpackByte())) + case reflect.Uint16: + field.SetUint(uint64(p.UnpackShort())) + case reflect.Int16: + field.SetInt(int64(p.UnpackShort())) + case reflect.Uint32: + field.SetUint(uint64(p.UnpackInt())) + case reflect.Int32: + field.SetInt(int64(p.UnpackInt())) + case reflect.Uint64: + field.SetUint(p.UnpackLong()) + case reflect.Int64: + field.SetInt(int64(p.UnpackLong())) + case reflect.Bool: + field.SetBool(p.UnpackBool()) + case reflect.Slice: + sliceLen := int(p.UnpackInt()) // number of elements in the slice + if sliceLen < 0 || sliceLen > c.maxSliceLen { + return errSliceTooLarge + } + + // First set [field] to be a slice of the appropriate type/capacity (right now [field] is nil) + slice := reflect.MakeSlice(field.Type(), sliceLen, sliceLen) + field.Set(slice) + // Unmarshal each element into the appropriate index of the slice + for i := 0; i < sliceLen; i++ { + if err := c.unmarshal(p, field.Index(i)); err != nil { + return err + } + } + case reflect.Array: + for i := 0; i < field.Len(); i++ { + if err := c.unmarshal(p, field.Index(i)); err != nil { + return err + } + } + case reflect.String: + field.SetString(p.UnpackStr()) + case reflect.Interface: + // Get the type ID + typeID := p.UnpackInt() + // Get a struct that implements the interface + typ, ok := c.typeIDToType[typeID] + if !ok { + return errUnmarshalUnregisteredType + } + concreteInstancePtr := reflect.New(typ) // instance of the proper type + // Unmarshal into the struct + if err := c.unmarshal(p, concreteInstancePtr.Elem()); err != nil { + return err + } + // And assign the filled struct to the field + field.Set(concreteInstancePtr.Elem()) + case reflect.Struct: + // Type of this struct + structType := reflect.TypeOf(field.Interface()) + // Go through all the fields and umarshal into each + for i := 0; i < structType.NumField(); i++ { + structField := structType.Field(i) + if !shouldSerialize(structField) { // Skip fields we don't need to unmarshal + continue + } + if unicode.IsLower(rune(structField.Name[0])) { // Only unmarshal into exported field + return errUnmarshalUnexportedField + } + field := field.Field(i) // Get the field + if err := c.unmarshal(p, field); err != nil { // Unmarshal into the field + return err + } + if p.Errored() { // If there was an error just return immediately + return p.Err + } + } + case reflect.Ptr: + // Get the type this pointer points to + underlyingType := field.Type().Elem() + // Create a new pointer to a new value of the underlying type + underlyingValue := reflect.New(underlyingType) + // Fill the value + if err := c.unmarshal(p, underlyingValue.Elem()); err != nil { + return err + } + // Assign to the top-level struct's member + field.Set(underlyingValue) + case reflect.Invalid: + return errUnmarshalNil + default: + return errUnknownType + } + return p.Err +} + +// Returns true iff [field] should be serialized +func shouldSerialize(field reflect.StructField) bool { + if field.Tag.Get("serialize") == "true" { + return true + } + return false +} diff --git a/vms/components/codec/codec_benchmark_test.go b/vms/components/codec/codec_benchmark_test.go new file mode 100644 index 0000000..8e6f9f7 --- /dev/null +++ b/vms/components/codec/codec_benchmark_test.go @@ -0,0 +1,55 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package codec + +import ( + "testing" + + "github.com/ava-labs/gecko/utils/wrappers" +) + +// BenchmarkMarshal benchmarks the codec's marshal function +func BenchmarkMarshal(b *testing.B) { + temp := Foo(&MyInnerStruct{}) + myStructInstance := myStruct{ + InnerStruct: MyInnerStruct{"hello"}, + InnerStruct2: &MyInnerStruct{"yello"}, + Member1: 1, + MySlice: []byte{1, 2, 3, 4}, + MySlice2: []string{"one", "two", "three"}, + MySlice3: []MyInnerStruct{MyInnerStruct{"a"}, MyInnerStruct{"b"}, MyInnerStruct{"c"}}, + MySlice4: []*MyInnerStruct2{&MyInnerStruct2{true}, &MyInnerStruct2{}}, + MySlice5: []Foo{&MyInnerStruct2{true}, &MyInnerStruct2{}}, + MyArray: [4]byte{5, 6, 7, 8}, + MyArray2: [5]string{"four", "five", "six", "seven"}, + MyArray3: [3]MyInnerStruct{MyInnerStruct{"d"}, MyInnerStruct{"e"}, MyInnerStruct{"f"}}, + MyArray4: [2]*MyInnerStruct2{&MyInnerStruct2{}, &MyInnerStruct2{true}}, + MyInterface: &MyInnerStruct{"yeet"}, + InnerStruct3: MyInnerStruct3{ + Str: "str", + M1: MyInnerStruct{ + Str: "other str", + }, + F: &MyInnerStruct2{}, + }, + MyPointer: &temp, + } + + codec := NewDefault() + codec.RegisterType(&MyInnerStruct{}) // Register the types that may be unmarshaled into interfaces + codec.RegisterType(&MyInnerStruct2{}) + b.ResetTimer() + for n := 0; n < b.N; n++ { + codec.Marshal(myStructInstance) + } +} + +func BenchmarkMarshalNonCodec(b *testing.B) { + p := wrappers.Packer{} + for n := 0; n < b.N; n++ { + for i := 0; i < 30; i++ { + p.PackStr("yay") + } + } +} diff --git a/vms/components/codec/codec_test.go b/vms/components/codec/codec_test.go new file mode 100644 index 0000000..6fc4f25 --- /dev/null +++ b/vms/components/codec/codec_test.go @@ -0,0 +1,540 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package codec + +import ( + "bytes" + "reflect" + "testing" +) + +// The below structs and interfaces exist +// for the sake of testing + +type Foo interface { + Foo() int +} + +// *MyInnerStruct implements Foo +type MyInnerStruct struct { + Str string `serialize:"true"` +} + +func (m *MyInnerStruct) Foo() int { + return 1 +} + +// *MyInnerStruct2 implements Foo +type MyInnerStruct2 struct { + Bool bool `serialize:"true"` +} + +func (m *MyInnerStruct2) Foo() int { + return 2 +} + +// MyInnerStruct3 embeds Foo, an interface, +// so it has to implement TypeID and ConcreteInstance +type MyInnerStruct3 struct { + Str string `serialize:"true"` + M1 MyInnerStruct `serialize:"true"` + F Foo `serialize:"true"` +} + +type myStruct struct { + InnerStruct MyInnerStruct `serialize:"true"` + InnerStruct2 *MyInnerStruct `serialize:"true"` + Member1 int64 `serialize:"true"` + MyArray2 [5]string `serialize:"true"` + MyArray3 [3]MyInnerStruct `serialize:"true"` + MyArray4 [2]*MyInnerStruct2 `serialize:"true"` + MySlice []byte `serialize:"true"` + MySlice2 []string `serialize:"true"` + MySlice3 []MyInnerStruct `serialize:"true"` + MySlice4 []*MyInnerStruct2 `serialize:"true"` + MyArray [4]byte `serialize:"true"` + MyInterface Foo `serialize:"true"` + MySlice5 []Foo `serialize:"true"` + InnerStruct3 MyInnerStruct3 `serialize:"true"` + MyPointer *Foo `serialize:"true"` +} + +// Test marshaling/unmarshaling a complicated struct +func TestStruct(t *testing.T) { + temp := Foo(&MyInnerStruct{}) + myStructInstance := myStruct{ + InnerStruct: MyInnerStruct{"hello"}, + InnerStruct2: &MyInnerStruct{"yello"}, + Member1: 1, + MySlice: []byte{1, 2, 3, 4}, + MySlice2: []string{"one", "two", "three"}, + MySlice3: []MyInnerStruct{MyInnerStruct{"a"}, MyInnerStruct{"b"}, MyInnerStruct{"c"}}, + MySlice4: []*MyInnerStruct2{&MyInnerStruct2{true}, &MyInnerStruct2{}}, + MySlice5: []Foo{&MyInnerStruct2{true}, &MyInnerStruct2{}}, + MyArray: [4]byte{5, 6, 7, 8}, + MyArray2: [5]string{"four", "five", "six", "seven"}, + MyArray3: [3]MyInnerStruct{MyInnerStruct{"d"}, MyInnerStruct{"e"}, MyInnerStruct{"f"}}, + MyArray4: [2]*MyInnerStruct2{&MyInnerStruct2{}, &MyInnerStruct2{true}}, + MyInterface: &MyInnerStruct{"yeet"}, + InnerStruct3: MyInnerStruct3{ + Str: "str", + M1: MyInnerStruct{ + Str: "other str", + }, + F: &MyInnerStruct2{}, + }, + MyPointer: &temp, + } + + codec := NewDefault() + codec.RegisterType(&MyInnerStruct{}) // Register the types that may be unmarshaled into interfaces + codec.RegisterType(&MyInnerStruct2{}) + + myStructBytes, err := codec.Marshal(myStructInstance) + if err != nil { + t.Fatal(err) + } + + myStructUnmarshaled := &myStruct{} + err = codec.Unmarshal(myStructBytes, myStructUnmarshaled) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(myStructUnmarshaled.Member1, myStructInstance.Member1) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !bytes.Equal(myStructUnmarshaled.MySlice, myStructInstance.MySlice) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.MySlice2, myStructInstance.MySlice2) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.MySlice3, myStructInstance.MySlice3) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.MySlice3, myStructInstance.MySlice3) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.MySlice4, myStructInstance.MySlice4) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.InnerStruct, myStructInstance.InnerStruct) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.InnerStruct2, myStructInstance.InnerStruct2) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.MyArray2, myStructInstance.MyArray2) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.MyArray3, myStructInstance.MyArray3) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.MyArray4, myStructInstance.MyArray4) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.MyInterface, myStructInstance.MyInterface) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.MySlice5, myStructInstance.MySlice5) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.InnerStruct3, myStructInstance.InnerStruct3) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } else if !reflect.DeepEqual(myStructUnmarshaled.MyPointer, myStructInstance.MyPointer) { + t.Fatal("expected unmarshaled struct to be same as original struct") + } +} + +func TestUInt32(t *testing.T) { + number := uint32(500) + codec := NewDefault() + bytes, err := codec.Marshal(number) + if err != nil { + t.Fatal(err) + } + + var numberUnmarshaled uint32 + if err := codec.Unmarshal(bytes, &numberUnmarshaled); err != nil { + t.Fatal(err) + } + + if number != numberUnmarshaled { + t.Fatal("expected marshaled and unmarshaled values to match") + } +} + +func TestSlice(t *testing.T) { + mySlice := []bool{true, false, true, true} + codec := NewDefault() + bytes, err := codec.Marshal(mySlice) + if err != nil { + t.Fatal(err) + } + + var sliceUnmarshaled []bool + if err := codec.Unmarshal(bytes, &sliceUnmarshaled); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(mySlice, sliceUnmarshaled) { + t.Fatal("expected marshaled and unmarshaled values to match") + } +} + +func TestBool(t *testing.T) { + myBool := true + codec := NewDefault() + bytes, err := codec.Marshal(myBool) + if err != nil { + t.Fatal(err) + } + + var boolUnmarshaled bool + if err := codec.Unmarshal(bytes, &boolUnmarshaled); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(myBool, boolUnmarshaled) { + t.Fatal("expected marshaled and unmarshaled values to match") + } +} + +func TestArray(t *testing.T) { + myArr := [5]uint64{5, 6, 7, 8, 9} + codec := NewDefault() + bytes, err := codec.Marshal(myArr) + if err != nil { + t.Fatal(err) + } + + var myArrUnmarshaled [5]uint64 + if err := codec.Unmarshal(bytes, &myArrUnmarshaled); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(myArr, myArrUnmarshaled) { + t.Fatal("expected marshaled and unmarshaled values to match") + } +} + +func TestPointerToStruct(t *testing.T) { + myPtr := &MyInnerStruct{Str: "Hello!"} + codec := NewDefault() + bytes, err := codec.Marshal(myPtr) + if err != nil { + t.Fatal(err) + } + + var myPtrUnmarshaled *MyInnerStruct + if err := codec.Unmarshal(bytes, &myPtrUnmarshaled); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(myPtr, myPtrUnmarshaled) { + t.Fatal("expected marshaled and unmarshaled values to match") + } +} + +func TestSliceOfStruct(t *testing.T) { + mySlice := []MyInnerStruct3{ + MyInnerStruct3{ + Str: "One", + M1: MyInnerStruct{"Two"}, + F: &MyInnerStruct{"Three"}, + }, + MyInnerStruct3{ + Str: "Four", + M1: MyInnerStruct{"Five"}, + F: &MyInnerStruct{"Six"}, + }, + } + codec := NewDefault() + codec.RegisterType(&MyInnerStruct{}) + bytes, err := codec.Marshal(mySlice) + if err != nil { + t.Fatal(err) + } + + var mySliceUnmarshaled []MyInnerStruct3 + if err := codec.Unmarshal(bytes, &mySliceUnmarshaled); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(mySlice, mySliceUnmarshaled) { + t.Fatal("expected marshaled and unmarshaled values to match") + } +} + +func TestInterface(t *testing.T) { + codec := NewDefault() + codec.RegisterType(&MyInnerStruct2{}) + + var f Foo = &MyInnerStruct2{true} + bytes, err := codec.Marshal(&f) + if err != nil { + t.Fatal(err) + } + + var unmarshaledFoo Foo + err = codec.Unmarshal(bytes, &unmarshaledFoo) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(f, unmarshaledFoo) { + t.Fatal("expected unmarshaled value to match original") + } +} + +func TestSliceOfInterface(t *testing.T) { + mySlice := []Foo{ + &MyInnerStruct{ + Str: "Hello", + }, + &MyInnerStruct{ + Str: ", World!", + }, + } + codec := NewDefault() + codec.RegisterType(&MyInnerStruct{}) + bytes, err := codec.Marshal(mySlice) + if err != nil { + t.Fatal(err) + } + + var mySliceUnmarshaled []Foo + if err := codec.Unmarshal(bytes, &mySliceUnmarshaled); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(mySlice, mySliceUnmarshaled) { + t.Fatal("expected marshaled and unmarshaled values to match") + } +} + +func TestArrayOfInterface(t *testing.T) { + myArray := [2]Foo{ + &MyInnerStruct{ + Str: "Hello", + }, + &MyInnerStruct{ + Str: ", World!", + }, + } + codec := NewDefault() + codec.RegisterType(&MyInnerStruct{}) + bytes, err := codec.Marshal(myArray) + if err != nil { + t.Fatal(err) + } + + var myArrayUnmarshaled [2]Foo + if err := codec.Unmarshal(bytes, &myArrayUnmarshaled); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(myArray, myArrayUnmarshaled) { + t.Fatal("expected marshaled and unmarshaled values to match") + } +} + +func TestPointerToInterface(t *testing.T) { + var myinnerStruct Foo = &MyInnerStruct{Str: "Hello!"} + var myPtr *Foo = &myinnerStruct + + codec := NewDefault() + codec.RegisterType(&MyInnerStruct{}) + + bytes, err := codec.Marshal(&myPtr) + if err != nil { + t.Fatal(err) + } + + var myPtrUnmarshaled *Foo + if err := codec.Unmarshal(bytes, &myPtrUnmarshaled); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(myPtr, myPtrUnmarshaled) { + t.Fatal("expected marshaled and unmarshaled values to match") + } +} + +func TestString(t *testing.T) { + myString := "Ayy" + codec := NewDefault() + bytes, err := codec.Marshal(myString) + if err != nil { + t.Fatal(err) + } + + var stringUnmarshaled string + if err := codec.Unmarshal(bytes, &stringUnmarshaled); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(myString, stringUnmarshaled) { + t.Fatal("expected marshaled and unmarshaled values to match") + } +} + +// Ensure a nil slice is unmarshaled as an empty slice +func TestNilSlice(t *testing.T) { + type structWithSlice struct { + Slice []byte `serialize:"true"` + } + + myStruct := structWithSlice{Slice: nil} + codec := NewDefault() + bytes, err := codec.Marshal(myStruct) + if err != nil { + t.Fatal(err) + } + + var structUnmarshaled structWithSlice + if err := codec.Unmarshal(bytes, &structUnmarshaled); err != nil { + t.Fatal(err) + } + + if structUnmarshaled.Slice == nil || len(structUnmarshaled.Slice) != 0 { + t.Fatal("expected slice to be empty slice") + } +} + +// Ensure that trying to serialize a struct with an unexported member +// that has `serialize:"true"` returns errUnexportedField +func TestSerializeUnexportedField(t *testing.T) { + type s struct { + ExportedField string `serialize:"true"` + unexportedField string `serialize:"true"` + } + + myS := s{ + ExportedField: "Hello, ", + unexportedField: "world!", + } + + codec := NewDefault() + if _, err := codec.Marshal(myS); err != errMarshalUnexportedField { + t.Fatalf("expected err to be errUnexportedField but was %v", err) + } +} + +type simpleSliceStruct struct { + Arr []uint32 `serialize:"true"` +} + +func TestEmptySliceSerialization(t *testing.T) { + codec := NewDefault() + + val := &simpleSliceStruct{} + expected := []byte{0, 0, 0, 0} + result, err := codec.Marshal(val) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } +} + +type emptyStruct struct{} + +type nestedSliceStruct struct { + Arr []emptyStruct `serialize:"true"` +} + +func TestSliceWithEmptySerialization(t *testing.T) { + codec := NewDefault() + + val := &nestedSliceStruct{ + Arr: make([]emptyStruct, 1000), + } + expected := []byte{0x00, 0x00, 0x03, 0xE8} + result, err := codec.Marshal(val) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } + + unmarshaled := nestedSliceStruct{} + if err := codec.Unmarshal(expected, &unmarshaled); err != nil { + t.Fatal(err) + } + if len(unmarshaled.Arr) != 1000 { + t.Fatalf("Should have created an array of length %d", 1000) + } +} + +func TestSliceWithEmptySerializationOutOfMemory(t *testing.T) { + codec := NewDefault() + + val := &nestedSliceStruct{ + Arr: make([]emptyStruct, 1000000), + } + expected := []byte{0x00, 0x0f, 0x42, 0x40} // 1,000,000 in hex + result, err := codec.Marshal(val) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } + + unmarshaled := nestedSliceStruct{} + if err := codec.Unmarshal(expected, &unmarshaled); err == nil { + t.Fatalf("Should have errored due to excess memory requested") + } +} + +func TestOutOfMemory(t *testing.T) { + codec := NewDefault() + + val := []bool{} + b := []byte{0xff, 0xff, 0xff, 0xff, 0x00} + if err := codec.Unmarshal(b, &val); err == nil { + t.Fatalf("Should have errored due to memory usage") + } +} + +// Ensure serializing structs with negative number members works +func TestNegativeNumbers(t *testing.T) { + type s struct { + MyInt8 int8 `serialize:"true"` + MyInt16 int16 `serialize:"true"` + MyInt32 int32 `serialize:"true"` + MyInt64 int64 `serialize:"true"` + } + + myS := s{-1, -2, -3, -4} + + codec := NewDefault() + + bytes, err := codec.Marshal(myS) + if err != nil { + t.Fatal(err) + } + + mySUnmarshaled := s{} + if err := codec.Unmarshal(bytes, &mySUnmarshaled); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(myS, mySUnmarshaled) { + t.Log(mySUnmarshaled) + t.Log(myS) + t.Fatal("expected marshaled and unmarshaled structs to be the same") + } +} + +// Ensure deserializing structs with too many bytes errors correctly +func TestTooLargeUnmarshal(t *testing.T) { + type inner struct { + Long uint64 `serialize:"true"` + } + + bytes := []byte{0, 0, 0, 0} + + s := inner{} + codec := New(3, 1) + + err := codec.Unmarshal(bytes, &s) + if err == nil { + t.Fatalf("Should have errored due to too many bytes provided") + } +} diff --git a/vms/components/core/block.go b/vms/components/core/block.go new file mode 100644 index 0000000..0690a0d --- /dev/null +++ b/vms/components/core/block.go @@ -0,0 +1,103 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package core + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/vms/components/missing" +) + +var ( + errBlockNil = errors.New("block is nil") + errRejected = errors.New("block is rejected") +) + +// Block contains fields and methods common to block's in a Snowman blockchain. +// Block is meant to be a building-block (pun intended). +// When you write a VM, your blocks can (and should) embed a core.Block +// to take care of some bioler-plate code. +// Block's methods can be over-written by structs that embed this struct. +type Block struct { + Metadata + PrntID ids.ID `serialize:"true"` // parent's ID + VM *SnowmanVM +} + +// Initialize sets [b.bytes] to [bytes], sets [b.id] to hash([b.bytes]) +// Checks if [b]'s status is already stored in state. If so, [b] gets that status. +// Otherwise [b]'s status is Unknown. +func (b *Block) Initialize(bytes []byte, vm *SnowmanVM) { + b.VM = vm + b.Metadata.Initialize(bytes) + status := b.VM.State.GetStatus(vm.DB, b.ID()) + b.SetStatus(status) +} + +// ParentID returns [b]'s parent's ID +func (b *Block) ParentID() ids.ID { return b.PrntID } + +// Parent returns [b]'s parent +func (b *Block) Parent() snowman.Block { + parent, err := b.VM.GetBlock(b.ParentID()) + if err != nil { + return &missing.Block{BlkID: b.ParentID()} + } + return parent +} + +// Accept sets this block's status to Accepted and sets lastAccepted to this +// block's ID and saves this info to b.vm.DB +// Recall that b.vm.DB.Commit() must be called to persist to the DB +func (b *Block) Accept() { + b.SetStatus(choices.Accepted) // Change state of this block + b.VM.State.PutStatus(b.VM.DB, b.ID(), choices.Accepted) // Persist data + b.VM.State.PutLastAccepted(b.VM.DB, b.ID()) + b.VM.lastAccepted = b.ID() // Change state of VM +} + +// Reject sets this block's status to Rejected and saves the status in state +// Recall that b.vm.DB.Commit() must be called to persist to the DB +func (b *Block) Reject() { + b.SetStatus(choices.Rejected) + b.VM.State.PutStatus(b.VM.DB, b.ID(), choices.Rejected) +} + +// Status returns the status of this block +func (b *Block) Status() choices.Status { + // See if [b]'s status field already has a value + if status := b.Metadata.Status(); status != choices.Unknown { + return status + } + // If not, check the state + status := b.VM.State.GetStatus(b.VM.DB, b.ID()) + b.SetStatus(status) + return status +} + +// Verify returns: +// 1) true if the block is accepted +// 2) nil if this block is valid +func (b *Block) Verify() (bool, error) { + if b == nil { + return false, errBlockNil + } + + // Check if [b] has already been accepted/rejected + switch status := b.Status(); status { + case choices.Accepted: + return true, nil + case choices.Rejected: + return false, errRejected + } + return false, nil +} + +// NewBlock returns a new *Block +func NewBlock(parentID ids.ID) *Block { + return &Block{PrntID: parentID} +} diff --git a/vms/components/core/metadata.go b/vms/components/core/metadata.go new file mode 100644 index 0000000..1390e4d --- /dev/null +++ b/vms/components/core/metadata.go @@ -0,0 +1,37 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package core + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/hashing" +) + +// Metadata contains the data common to all blocks and transactions +type Metadata struct { + id ids.ID + status choices.Status + bytes []byte +} + +// ID returns the ID of this block/transaction +func (i *Metadata) ID() ids.ID { return i.id } + +// Status returns the status of this block/transaction +func (i *Metadata) Status() choices.Status { return i.status } + +// Bytes returns the byte repr. of this block/transaction +func (i *Metadata) Bytes() []byte { return i.bytes } + +// SetStatus sets the status of this block/transaction +func (i *Metadata) SetStatus(status choices.Status) { i.status = status } + +// Initialize sets [i.bytes] to [bytes], sets [i.id] to a hash of [i.bytes] +// and sets [i.status] to choices.Processing +func (i *Metadata) Initialize(bytes []byte) { + i.bytes = bytes + i.id = ids.NewID(hashing.ComputeHash256Array(i.bytes)) + i.status = choices.Processing +} diff --git a/vms/components/core/snowman_state.go b/vms/components/core/snowman_state.go new file mode 100644 index 0000000..8a47b4e --- /dev/null +++ b/vms/components/core/snowman_state.go @@ -0,0 +1,78 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package core + +import ( + "errors" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/vms/components/state" +) + +var errWrongType = errors.New("got unexpected type from database") + +// state.Get(Db, IDTypeID, lastAcceptedID) == ID of last accepted block +var lastAcceptedID = ids.NewID([32]byte{'l', 'a', 's', 't'}) + +// SnowmanState is a wrapper around state.State +// In additions to the methods exposed by state.State, +// SnowmanState exposes a few methods needed for managing +// state in a snowman vm +type SnowmanState interface { + state.State + GetBlock(database.Database, ids.ID) (snowman.Block, error) + PutBlock(database.Database, snowman.Block) error + GetLastAccepted(database.Database) (ids.ID, error) + PutLastAccepted(database.Database, ids.ID) error +} + +// implements SnowmanState +type snowmanState struct { + state.State +} + +// GetBlock gets the block with ID [ID] from [db] +func (s *snowmanState) GetBlock(db database.Database, ID ids.ID) (snowman.Block, error) { + blockInterface, err := s.Get(db, state.BlockTypeID, ID) + if err != nil { + return nil, err + } + + if block, ok := blockInterface.(snowman.Block); ok { + return block, nil + } + return nil, errWrongType +} + +// PutBlock puts [block] in [db] +func (s *snowmanState) PutBlock(db database.Database, block snowman.Block) error { + return s.Put(db, state.BlockTypeID, block.ID(), block) +} + +// GetLastAccepted returns the ID of the last accepted block in [db] +func (s *snowmanState) GetLastAccepted(db database.Database) (ids.ID, error) { + lastAccepted, err := s.GetID(db, lastAcceptedID) + if err != nil { + return ids.ID{}, err + } + return lastAccepted, nil +} + +// PutLastAccepted sets the ID of the last accepted block in [db] to [lastAccepted] +func (s *snowmanState) PutLastAccepted(db database.Database, lastAccepted ids.ID) error { + return s.PutID(db, lastAcceptedID, lastAccepted) +} + +// NewSnowmanState returns a new SnowmanState +func NewSnowmanState(unmarshalBlockFunc func([]byte) (snowman.Block, error)) (SnowmanState, error) { + rawState := state.NewState() + snowmanState := &snowmanState{State: rawState} + return snowmanState, rawState.RegisterType(state.BlockTypeID, + func(bytes []byte) (interface{}, error) { + return unmarshalBlockFunc(bytes) + }, + ) +} diff --git a/vms/components/core/snowman_vm.go b/vms/components/core/snowman_vm.go new file mode 100644 index 0000000..a659b1b --- /dev/null +++ b/vms/components/core/snowman_vm.go @@ -0,0 +1,167 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package core + +import ( + "errors" + + "github.com/gorilla/rpc/v2" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/json" + "github.com/ava-labs/gecko/vms/components/state" +) + +var ( + errUnmarshalBlockUndefined = errors.New("vm's UnmarshalBlock member is undefined") + errBadData = errors.New("got unexpected value from database") +) + +// If the status of this ID is not choices.Accepted, +// the db has not yet been initialized +var dbInitializedID = ids.NewID([32]byte{'d', 'b', ' ', 'i', 'n', 'i', 't'}) + +// SnowmanVM provides the core functionality shared by most snowman vms +type SnowmanVM struct { + State SnowmanState + + // VersionDB on top of underlying database + // Important note: In order for writes to [DB] to be persisted, + // DB.Commit() must be called + // We use a versionDB here so user can do atomic commits as they see fit + DB *versiondb.Database + + // The context of this vm + Ctx *snow.Context + + // ID of the preferred block + preferred ids.ID + + // ID of the last accepted block + lastAccepted ids.ID + + // unmarshals bytes to a block + unmarshalBlockFunc func([]byte) (snowman.Block, error) + + // channel to send messages to the consensus engine + ToEngine chan<- common.Message +} + +// SetPreference sets the block with ID [ID] as the preferred block +func (svm *SnowmanVM) SetPreference(ID ids.ID) { svm.preferred = ID } + +// Preferred returns the ID of the preferred block +func (svm *SnowmanVM) Preferred() ids.ID { return svm.preferred } + +// LastAccepted returns the block most recently accepted +func (svm *SnowmanVM) LastAccepted() ids.ID { return svm.lastAccepted } + +// ParseBlock parses [bytes] to a block +func (svm *SnowmanVM) ParseBlock(bytes []byte) (snowman.Block, error) { + return svm.unmarshalBlockFunc(bytes) +} + +// GetBlock returns the block with ID [ID] +func (svm *SnowmanVM) GetBlock(ID ids.ID) (snowman.Block, error) { + block, err := svm.State.Get(svm.DB, state.BlockTypeID, ID) + if err != nil { + return nil, err + } + + if block, ok := block.(snowman.Block); ok { + return block, nil + } + return nil, errBadData // Should never happen +} + +// Shutdown this vm +func (svm *SnowmanVM) Shutdown() { + svm.DB.Commit() // Flush DB + svm.DB.GetDatabase().Close() // close underlying database + svm.DB.Close() // close versionDB +} + +// DBInitialized returns true iff [svm]'s database has values in it already +func (svm *SnowmanVM) DBInitialized() bool { + status := svm.State.GetStatus(svm.DB, dbInitializedID) + if status == choices.Accepted { + return true + } + return false +} + +// SetDBInitialized marks the database as initialized +func (svm *SnowmanVM) SetDBInitialized() { + svm.State.PutStatus(svm.DB, dbInitializedID, choices.Accepted) +} + +// SaveBlock saves [block] to state +func (svm *SnowmanVM) SaveBlock(db database.Database, block snowman.Block) error { + return svm.State.Put(db, state.BlockTypeID, block.ID(), block) +} + +// NotifyBlockReady tells the consensus engine that a new block +// is ready to be created +func (svm *SnowmanVM) NotifyBlockReady() { + select { + case svm.ToEngine <- common.PendingTxs: + default: + svm.Ctx.Log.Warn("dropping message to consensus engine") + } +} + +// NewHandler returns a new Handler for a service where: +// * The handler's functionality is defined by [service] +// [service] should be a gorilla RPC service (see https://www.gorillatoolkit.org/pkg/rpc/v2) +// * The name of the service is [name] +// * The LockOption is the first element of [lockOption] +// By default the LockOption is WriteLock +// [lockOption] should have either 0 or 1 elements. Elements beside the first are ignored. +func (svm *SnowmanVM) NewHandler(name string, service interface{}, lockOption ...common.LockOption) *common.HTTPHandler { + server := rpc.NewServer() + server.RegisterCodec(json.NewCodec(), "application/json") + server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") + server.RegisterService(service, name) + + var lock common.LockOption = common.WriteLock + if len(lockOption) != 0 { + lock = lockOption[0] + } + return &common.HTTPHandler{LockOptions: lock, Handler: server} +} + +// Initialize this vm. +// If there is data in [db], sets [svm.lastAccepted] using data in the database, +// and sets [svm.preferred] to the last accepted block. +func (svm *SnowmanVM) Initialize( + ctx *snow.Context, + db database.Database, + unmarshalBlockFunc func([]byte) (snowman.Block, error), + toEngine chan<- common.Message, +) error { + svm.Ctx = ctx + svm.ToEngine = toEngine + svm.DB = versiondb.New(db) + + var err error + svm.State, err = NewSnowmanState(unmarshalBlockFunc) + if err != nil { + return err + } + + if svm.DBInitialized() { + if svm.lastAccepted, err = svm.State.GetLastAccepted(svm.DB); err != nil { + return err + } + svm.preferred = svm.lastAccepted + } + + return nil +} diff --git a/vms/components/missing/block.go b/vms/components/missing/block.go new file mode 100644 index 0000000..3f9b4a2 --- /dev/null +++ b/vms/components/missing/block.go @@ -0,0 +1,40 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package missing + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" +) + +var ( + errMissingBlock = errors.New("missing block") +) + +// Block represents a block that can't be found +type Block struct{ BlkID ids.ID } + +// ID ... +func (mb *Block) ID() ids.ID { return mb.BlkID } + +// Accept ... +func (*Block) Accept() { panic(errMissingBlock) } + +// Reject ... +func (*Block) Reject() { panic(errMissingBlock) } + +// Status ... +func (*Block) Status() choices.Status { return choices.Unknown } + +// Parent ... +func (*Block) Parent() snowman.Block { return nil } + +// Verify ... +func (*Block) Verify() error { return errMissingBlock } + +// Bytes ... +func (*Block) Bytes() []byte { return nil } diff --git a/vms/components/missing/block_test.go b/vms/components/missing/block_test.go new file mode 100644 index 0000000..9bffa99 --- /dev/null +++ b/vms/components/missing/block_test.go @@ -0,0 +1,45 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package missing + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" +) + +func TestMissingBlock(t *testing.T) { + id := ids.NewID([32]byte{255}) + mb := Block{BlkID: id} + + if blkID := mb.ID(); !blkID.Equals(id) { + t.Fatalf("missingBlock.ID returned %s, expected %s", blkID, id) + } else if status := mb.Status(); status != choices.Unknown { + t.Fatalf("missingBlock.Status returned %s, expected %s", status, choices.Unknown) + } else if parent := mb.Parent(); parent != nil { + t.Fatalf("missingBlock.Parent returned %v, expected %v", parent, nil) + } else if err := mb.Verify(); err == nil { + t.Fatalf("missingBlock.Verify returned nil, expected an error") + } else if bytes := mb.Bytes(); bytes != nil { + t.Fatalf("missingBlock.Bytes returned %v, expected %v", bytes, nil) + } + + func() { + defer func() { + if r := recover(); r == nil { + t.Fatalf("Should have panicked on accept") + } + }() + mb.Accept() + }() + func() { + defer func() { + if r := recover(); r == nil { + t.Fatalf("Should have panicked on reject") + } + }() + mb.Reject() + }() +} diff --git a/vms/components/state/state.go b/vms/components/state/state.go new file mode 100644 index 0000000..d4d0da8 --- /dev/null +++ b/vms/components/state/state.go @@ -0,0 +1,249 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "errors" + "fmt" + "time" + + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/wrappers" + + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" +) + +const cacheSize = 1000 + +var errWrongType = errors.New("value in the database was the wrong type") + +// Marshaller can marshal itself to bytes +type Marshaller interface { + Bytes() []byte +} + +// State is a key-value store where every value is associated with a "type ID". +// Every different type of value must have its own type ID. +// +// For example, if you're storing blocks, accounts and addresses, each of those types +// must have their own type ID. +// +// Each type ID is associated with a function that specifies how to unmarshal bytes +// to a struct/value of a given type. +// +// State has built-in support for putting and getting choices.Status and ids.ID +// To put/get any other type, you must first register that type using RegisterType +type State interface { + // In [db], add a key-value pair. + // [value] will be converted to bytes by calling Bytes() on it. + // [typeID] must have already been registered using RegisterType. + // If [value] is nil, the value associated with [key] and [typeID] is deleted (if it exists). + Put(db database.Database, typeID uint64, key ids.ID, value Marshaller) error + + // From [db], get the value of type [typeID] whose key is [key] + // Returns database.ErrNotFound if the entry doesn't exist + Get(db database.Database, typeID uint64, key ids.ID) (interface{}, error) + + // Return whether [key] exists in [db] for type [typeID] + Has(db database.Database, typeID uint64, key ids.ID) (bool, error) + + // PutStatus associates [key] with [status] in [db] + PutStatus(db database.Database, key ids.ID, status choices.Status) error + + // GetStatus gets the status associated with [key] in [db] + GetStatus(db database.Database, key ids.ID) choices.Status + + // PutID associates [key] with [ID] in [db] + PutID(db database.Database, key ids.ID, ID ids.ID) error + + // GetID gets the ID associated with [key] in [db] + GetID(db database.Database, key ids.ID) (ids.ID, error) + + // PutTime associates [key] with [time] in [db] + PutTime(db database.Database, key ids.ID, time time.Time) error + + // GetTime gets the time associated with [key] in [db] + GetTime(db database.Database, key ids.ID) (time.Time, error) + + // Register a new type. + // When values that were Put with [typeID] are retrieved from the database, + // they will be unmarshaled from bytes using [unmarshal]. + // Returns an error if there is already a type with ID [typeID] + RegisterType(typeID uint64, + unmarshal func([]byte) (interface{}, error)) error +} + +type state struct { + // Keys: Type ID + // Values: Function that unmarshals values + // that were Put with that type ID + unmarshallers map[uint64]func([]byte) (interface{}, error) + + // Keys: Type ID + // Values: Cache that stores uniqueIDs for values that were put with that type ID + // (Saves us from having to re-compute uniqueIDs) + uniqueIDCaches map[uint64]*cache.LRU +} + +// Implements State.RegisterType +func (s *state) RegisterType(typeID uint64, unmarshal func([]byte) (interface{}, error)) error { + if _, exists := s.unmarshallers[typeID]; exists { + return fmt.Errorf("there is already a type with ID %d", typeID) + } + s.unmarshallers[typeID] = unmarshal + return nil +} + +// Implements State.Put +func (s *state) Put(db database.Database, typeID uint64, key ids.ID, value Marshaller) error { + if _, exists := s.unmarshallers[typeID]; !exists { + return fmt.Errorf("typeID %d has not been registered", typeID) + } + + // Get the unique ID of thie key/typeID pair + uID := s.uniqueID(key, typeID) + + if value == nil { + return db.Delete(uID.Bytes()) + } + + // Put the byte repr. of the value in the database + return db.Put(uID.Bytes(), value.Bytes()) +} + +func (s *state) Has(db database.Database, typeID uint64, key ids.ID) (bool, error) { + return db.Has(s.uniqueID(key, typeID).Bytes()) +} + +// Implements State.Get +func (s *state) Get(db database.Database, typeID uint64, key ids.ID) (interface{}, error) { + unmarshal, exists := s.unmarshallers[typeID] + if !exists { + return nil, fmt.Errorf("typeID %d has not been registered", typeID) + } + + // The unique ID of this key/typeID pair + uID := s.uniqueID(key, typeID) + + // See if exists in database + exists, err := db.Has(uID.Bytes()) + if err != nil { + return nil, err + } + if !exists { + return nil, database.ErrNotFound + } + + // Get the value from the database + valueBytes, err := db.Get(uID.Bytes()) + if err != nil { + return nil, fmt.Errorf("problem getting value from database: %w", err) + } + + // Unmarshal the value from bytes and return it + return unmarshal(valueBytes) +} + +// PutStatus associates [key] with [status] in [db] +func (s *state) PutStatus(db database.Database, key ids.ID, status choices.Status) error { + return s.Put(db, StatusTypeID, key, status) +} + +// GetStatus gets the status associated with [key] in [db] +// Return choices.Processing if can't get the status from database +func (s *state) GetStatus(db database.Database, key ids.ID) choices.Status { + statusInterface, err := s.Get(db, StatusTypeID, key) + if err != nil { + return choices.Processing + } + + status, ok := statusInterface.(choices.Status) + if !ok || status.Valid() != nil { + return choices.Processing + } + + return status +} + +// PutID associates [key] with [ID] in [db] +func (s *state) PutID(db database.Database, key ids.ID, ID ids.ID) error { + return s.Put(db, IDTypeID, key, ID) +} + +// GetID gets the ID associated with [key] in [db] +func (s *state) GetID(db database.Database, key ids.ID) (ids.ID, error) { + IDInterface, err := s.Get(db, IDTypeID, key) + if err != nil { + return ids.ID{}, err + } + + if ID, ok := IDInterface.(ids.ID); ok { + return ID, nil + } + + return ids.ID{}, errWrongType +} + +// PutTime associates [key] with [time] in [db] +func (s *state) PutTime(db database.Database, key ids.ID, time time.Time) error { + return s.Put(db, TimeTypeID, key, &timeMarshaller{time}) +} + +// GetTime gets the time associated with [key] in [db] +func (s *state) GetTime(db database.Database, key ids.ID) (time.Time, error) { + timeInterface, err := s.Get(db, TimeTypeID, key) + if err != nil { + return time.Time{}, err + } + + if time, ok := timeInterface.(time.Time); ok { + return time, nil + } + + return time.Time{}, errWrongType +} + +// Prefix [ID] with [typeID] to prevent key collisions in the database +func (s *state) uniqueID(ID ids.ID, typeID uint64) ids.ID { + uIDCache, cacheExists := s.uniqueIDCaches[typeID] + if cacheExists { + if uID, uIDExists := uIDCache.Get(ID); uIDExists { // Get the uniqueID associated with [typeID] and [ID] + return uID.(ids.ID) + } + } else { + s.uniqueIDCaches[typeID] = &cache.LRU{Size: cacheSize} + } + uID := ID.Prefix(typeID) + s.uniqueIDCaches[typeID].Put(ID, uID) + return uID +} + +// NewState returns a new State +func NewState() State { + state := &state{ + unmarshallers: make(map[uint64]func([]byte) (interface{}, error)), + uniqueIDCaches: make(map[uint64]*cache.LRU), + } + + // Register ID, Status and time.Time so they can be put/get without client code + // having to register them + state.RegisterType(IDTypeID, unmarshalID) + state.RegisterType(StatusTypeID, unmarshalStatus) + state.RegisterType(TimeTypeID, unmarshalTime) + + return state +} + +// So we can marshal time +type timeMarshaller struct { + t time.Time +} + +func (tm *timeMarshaller) Bytes() []byte { + p := wrappers.Packer{MaxSize: 8} + p.PackLong(uint64(tm.t.Unix())) + return p.Bytes +} diff --git a/vms/components/state/state_test.go b/vms/components/state/state_test.go new file mode 100644 index 0000000..827bc26 --- /dev/null +++ b/vms/components/state/state_test.go @@ -0,0 +1,437 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/wrappers" +) + +// toy example of a block, just used for testing +type block struct { + parentID ids.ID + value uint64 +} + +const blockSize = 40 // hashing.HashLen (32) + length of uin64 (8) + +func (b *block) Bytes() []byte { + p := wrappers.Packer{Bytes: make([]byte, blockSize)} + p.PackFixedBytes(b.parentID.Bytes()) + p.PackLong(b.value) + return p.Bytes +} + +func unmarshalBlock(bytes []byte) (interface{}, error) { + p := wrappers.Packer{Bytes: bytes} + + parentID, err := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + if err != nil { + return nil, err + } + + value := p.UnpackLong() + + if p.Errored() { + return nil, p.Err + } + + return &block{ + parentID: parentID, + value: value, + }, nil +} + +// toy example of an account, just used for testing +type account struct { + id ids.ID + balance uint64 + nonce uint64 +} + +const accountSize = 32 + 8 + 8 + +func (acc *account) Bytes() []byte { + p := wrappers.Packer{Bytes: make([]byte, accountSize)} + p.PackFixedBytes(acc.id.Bytes()) + p.PackLong(acc.balance) + p.PackLong(acc.nonce) + return p.Bytes +} + +func unmarshalAccount(bytes []byte) (interface{}, error) { + p := wrappers.Packer{Bytes: bytes} + + id, err := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + if err != nil { + return nil, err + } + + balance := p.UnpackLong() + nonce := p.UnpackLong() + + if p.Errored() { + return nil, p.Err + } + + return &account{ + id: id, + balance: balance, + nonce: nonce, + }, nil +} + +// Ensure there is an error if someone tries to do a put without registering the type +func TestPutUnregistered(t *testing.T) { + // make a state and a database + state := NewState() + db := memdb.New() + defer db.Close() + + // make an account + acc1 := &account{ + id: ids.NewID([32]byte{1, 2, 3}), + balance: 1, + nonce: 2, + } + + if err := state.Put(db, 1, ids.NewID([32]byte{1, 2, 3}), acc1); err == nil { + t.Fatal("should have failed because type ID is unregistred") + } + + // register type + if err := state.RegisterType(1, unmarshalAccount); err != nil { + t.Fatal(err) + } + + // should not error now + if err := state.Put(db, 1, ids.NewID([32]byte{1, 2, 3}), acc1); err != nil { + t.Fatal(err) + } +} + +// Ensure there is an error if someone tries to get the value associated with a +// key that doesn't exist +func TestKeyDoesNotExist(t *testing.T) { + // make a state and a database + state := NewState() + db := memdb.New() + defer db.Close() + + if _, err := state.Get(db, 1, ids.NewID([32]byte{1, 2, 3})); err == nil { + t.Fatal("should have failed because no such key or typeID exists") + } + + // register type with ID 1 + typeID := uint64(1) + if err := state.RegisterType(typeID, unmarshalAccount); err != nil { + t.Fatal(err) + } + + // Should still fail because there is no value with this key + if _, err := state.Get(db, typeID, ids.NewID([32]byte{1, 2, 3})); err == nil { + t.Fatal("should have failed because no such key exists") + } +} + +// Ensure there is an error if someone tries to register a type ID that already exists +func TestRegisterExistingTypeID(t *testing.T) { + // make a state and a database + state := NewState() + db := memdb.New() + defer db.Close() + + // register type with ID 1 + typeID := uint64(1) + if err := state.RegisterType(typeID, unmarshalBlock); err != nil { + t.Fatal(err) + } + + // try to register the same type ID + if err := state.RegisterType(typeID, unmarshalAccount); err == nil { + t.Fatal("Should have errored because typeID already registered") + } + +} + +// Ensure there is an error when someone tries to get a value using the wrong typeID +func TestGetWrongTypeID(t *testing.T) { + // make a state and a database + state := NewState() + db := memdb.New() + defer db.Close() + + // register type with ID 1 + blockTypeID := uint64(1) + if err := state.RegisterType(blockTypeID, unmarshalBlock); err != nil { + t.Fatal(err) + } + + // make and put a block + block := &block{ + parentID: ids.NewID([32]byte{4, 5, 6}), + value: 5, + } + blockID := ids.NewID([32]byte{1, 2, 3}) + err := state.Put(db, blockTypeID, blockID, block) + if err != nil { + t.Fatal(err) + } + + // try to get it using the right key but wrong typeID + if _, err := state.Get(db, 2, blockID); err == nil { + t.Fatal("should have failed because type ID is wrong") + } +} + +// Ensure that there is no error when someone puts two values with the same +// key but different type IDs +func TestSameKeyDifferentTypeID(t *testing.T) { + // make a state and a database + state := NewState() + db := memdb.New() + defer db.Close() + + // register block type with ID 1 + blockTypeID := uint64(1) + if err := state.RegisterType(blockTypeID, unmarshalBlock); err != nil { + t.Fatal(err) + } + + // register account type with ID 2 + accountTypeID := uint64(2) + if err := state.RegisterType(accountTypeID, unmarshalAccount); err != nil { + t.Fatal(err) + } + + sharedKey := ids.NewID([32]byte{1, 2, 3}) + + // make an account + acc := &account{ + id: ids.NewID([32]byte{1, 2, 3}), + balance: 1, + nonce: 2, + } + + // put it using sharedKey + err := state.Put(db, accountTypeID, sharedKey, acc) + if err != nil { + t.Fatal(err) + } + + // make a block + block1 := &block{ + parentID: ids.NewID([32]byte{4, 5, 6}), + value: 5, + } + + // put it using sharedKey + err = state.Put(db, blockTypeID, sharedKey, block1) + if err != nil { + t.Fatal(err) + } + + // ensure the account is still there and correct + if accInterface, err := state.Get(db, accountTypeID, sharedKey); err != nil { + t.Fatal(err) + } else if accFromState, ok := accInterface.(*account); !ok { + t.Fatal("should have been type *account") + } else if accFromState.balance != acc.balance { + t.Fatal("balances should be same") + } else if !accFromState.id.Equals(acc.id) { + t.Fatal("ids should be the same") + } else if accFromState.nonce != acc.nonce { + t.Fatal("nonces should be same") + } + + // ensure the block is still there and correct + if blockInterface, err := state.Get(db, blockTypeID, sharedKey); err != nil { + t.Fatal(err) + } else if blockFromState, ok := blockInterface.(*block); !ok { + t.Fatal("should have been type *block") + } else if !blockFromState.parentID.Equals(block1.parentID) { + t.Fatal("parentIDs should be same") + } else if blockFromState.value != block1.value { + t.Fatal("values should be same") + } +} + +// Ensure that overwriting a value works +func TestOverwrite(t *testing.T) { + // make a state and a database + state := NewState() + db := memdb.New() + defer db.Close() + + // register block type with ID 1 + blockTypeID := uint64(1) + if err := state.RegisterType(blockTypeID, unmarshalBlock); err != nil { + t.Fatal(err) + } + + // make a block + block1 := &block{ + parentID: ids.NewID([32]byte{4, 5, 6}), + value: 5, + } + + key := ids.NewID([32]byte{1, 2, 3}) + + // put it + err := state.Put(db, blockTypeID, key, block1) + if err != nil { + t.Fatal(err) + } + + // make another block + block2 := &block{ + parentID: ids.NewID([32]byte{100, 200, 1}), + value: 6, + } + + // put it with the same key + err = state.Put(db, blockTypeID, key, block2) + if err != nil { + t.Fatal(err) + } + + // ensure the first value was over-written + // get it and make sure it's right + if blockInterface, err := state.Get(db, blockTypeID, key); err != nil { + t.Fatal(err) + } else if blockFromState, ok := blockInterface.(*block); !ok { + t.Fatal("should have been type *block") + } else if !blockFromState.parentID.Equals(block2.parentID) { + t.Fatal("parentIDs should be same") + } else if blockFromState.value != block2.value { + t.Fatal("values should be same") + } +} + +// Put 4 values, 2 of one type and 2 of another +func TestHappyPath(t *testing.T) { + // make a state and a database + state := NewState() + db := memdb.New() + defer db.Close() + + accountTypeID := uint64(1) + + // register type account + err := state.RegisterType(accountTypeID, unmarshalAccount) + if err != nil { + t.Fatal(err) + } + + // make an account + acc1 := &account{ + id: ids.NewID([32]byte{1, 2, 3}), + balance: 1, + nonce: 2, + } + + // put it + err = state.Put(db, accountTypeID, acc1.id, acc1) + if err != nil { + t.Fatal(err) + } + + // get it and make sure it's right + if acc1Interface, err := state.Get(db, accountTypeID, acc1.id); err != nil { + t.Fatal(err) + } else if acc1FromState, ok := acc1Interface.(*account); !ok { + t.Fatal("should have been type *account") + } else if acc1FromState.balance != acc1.balance { + t.Fatal("balances should be same") + } else if !acc1FromState.id.Equals(acc1.id) { + t.Fatal("ids should be the same") + } else if acc1FromState.nonce != acc1.nonce { + t.Fatal("nonces should be same") + } + + // make another account + acc2 := &account{ + id: ids.NewID([32]byte{9, 2, 1}), + balance: 7, + nonce: 44, + } + + // put it + err = state.Put(db, accountTypeID, acc2.id, acc2) + if err != nil { + t.Fatal(err) + } + + // get it and make sure it's right + if acc2Interface, err := state.Get(db, accountTypeID, acc2.id); err != nil { + t.Fatal(err) + } else if acc2FromState, ok := acc2Interface.(*account); !ok { + t.Fatal("should have been type *account") + } else if acc2FromState.balance != acc2.balance { + t.Fatal("balances should be same") + } else if !acc2FromState.id.Equals(acc2.id) { + t.Fatal("ids should be the same") + } else if acc2FromState.nonce != acc2.nonce { + t.Fatal("nonces should be same") + } + + // register type block + blockTypeID := uint64(2) + err = state.RegisterType(blockTypeID, unmarshalBlock) + if err != nil { + t.Fatal(err) + } + + // make a block + block1ID := ids.NewID([32]byte{9, 9, 9}) + block1 := &block{ + parentID: ids.NewID([32]byte{4, 5, 6}), + value: 5, + } + + // put it + err = state.Put(db, blockTypeID, block1ID, block1) + if err != nil { + t.Fatal(err) + } + + // get it and make sure it's right + if block1Interface, err := state.Get(db, blockTypeID, block1ID); err != nil { + t.Fatal(err) + } else if block1FromState, ok := block1Interface.(*block); !ok { + t.Fatal("should have been type *block") + } else if !block1FromState.parentID.Equals(block1.parentID) { + t.Fatal("parentIDs should be same") + } else if block1FromState.value != block1.value { + t.Fatal("values should be same") + } + + // make another block + block2ID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9}) + block2 := &block{ + parentID: ids.NewID([32]byte{10, 1, 2}), + value: 67, + } + + // put it + err = state.Put(db, blockTypeID, block2ID, block2) + if err != nil { + t.Fatal(err) + } + + // get it and make sure it's right + if block2Interface, err := state.Get(db, blockTypeID, block2ID); err != nil { + t.Fatal(err) + } else if block2FromState, ok := block2Interface.(*block); !ok { + t.Fatal("should have been type *block") + } else if !block2FromState.parentID.Equals(block2.parentID) { + t.Fatal("parentIDs should be same") + } else if block2FromState.value != block2.value { + t.Fatal("values should be same") + } +} diff --git a/vms/components/state/types.go b/vms/components/state/types.go new file mode 100644 index 0000000..fdbd724 --- /dev/null +++ b/vms/components/state/types.go @@ -0,0 +1,19 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "math" +) + +const ( + // IDTypeID is the type ID for ids.ID + IDTypeID uint64 = math.MaxUint64 - iota + // StatusTypeID is the type ID for choices.Status + StatusTypeID + // TimeTypeID is the type ID for time + TimeTypeID + // BlockTypeID is the type ID of blocks in state + BlockTypeID +) diff --git a/vms/components/state/unmarshal.go b/vms/components/state/unmarshal.go new file mode 100644 index 0000000..38caff9 --- /dev/null +++ b/vms/components/state/unmarshal.go @@ -0,0 +1,31 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/wrappers" +) + +func unmarshalID(bytes []byte) (interface{}, error) { + return ids.ToID(bytes) +} + +func unmarshalStatus(bytes []byte) (interface{}, error) { + p := wrappers.Packer{Bytes: bytes} + status := choices.Status(p.UnpackInt()) + if err := status.Valid(); err != nil { + return nil, err + } + return status, p.Err +} + +func unmarshalTime(bytes []byte) (interface{}, error) { + p := wrappers.Packer{Bytes: bytes} + unixTime := p.UnpackLong() + return time.Unix(int64(unixTime), 0), nil +} diff --git a/vms/components/verify/verification.go b/vms/components/verify/verification.go new file mode 100644 index 0000000..5dd1f3d --- /dev/null +++ b/vms/components/verify/verification.go @@ -0,0 +1,19 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package verify + +// Verifiable can be verified +type Verifiable interface { + Verify() error +} + +// All returns nil if all the verifiables were verified with no errors +func All(verifiables ...Verifiable) error { + for _, verifiable := range verifiables { + if err := verifiable.Verify(); err != nil { + return err + } + } + return nil +} diff --git a/vms/components/verify/verification_test.go b/vms/components/verify/verification_test.go new file mode 100644 index 0000000..02c4300 --- /dev/null +++ b/vms/components/verify/verification_test.go @@ -0,0 +1,37 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package verify + +import ( + "errors" + "testing" +) + +var ( + errTest = errors.New("non-nil error") +) + +type testVerifiable struct{ err error } + +func (v testVerifiable) Verify() error { return v.err } + +func TestAllNil(t *testing.T) { + err := All( + testVerifiable{}, + testVerifiable{}, + ) + if err != nil { + t.Fatal(err) + } +} + +func TestAllError(t *testing.T) { + err := All( + testVerifiable{}, + testVerifiable{err: errTest}, + ) + if err == nil { + t.Fatalf("Should have returned an error") + } +} diff --git a/vms/evm/block.go b/vms/evm/block.go new file mode 100644 index 0000000..ec47490 --- /dev/null +++ b/vms/evm/block.go @@ -0,0 +1,75 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "fmt" + + "github.com/ava-labs/go-ethereum/core/types" + "github.com/ava-labs/go-ethereum/rlp" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" +) + +// Block implements the snowman.Block interface +type Block struct { + id ids.ID + ethBlock *types.Block + vm *VM +} + +// ID implements the snowman.Block interface +func (b *Block) ID() ids.ID { return b.id } + +// Accept implements the snowman.Block interface +func (b *Block) Accept() { + b.vm.ctx.Log.Verbo("Block %s is accepted", b.ID()) + b.vm.updateStatus(b.ID(), choices.Accepted) +} + +// Reject implements the snowman.Block interface +func (b *Block) Reject() { + b.vm.ctx.Log.Verbo("Block %s is rejected", b.ID()) + b.vm.updateStatus(b.ID(), choices.Rejected) +} + +// Status implements the snowman.Block interface +func (b *Block) Status() choices.Status { + status := b.vm.getCachedStatus(b.ID()) + if status == choices.Unknown && b.ethBlock != nil { + return choices.Processing + } + return status +} + +// Parent implements the snowman.Block interface +func (b *Block) Parent() snowman.Block { + parentID := ids.NewID(b.ethBlock.ParentHash()) + block := &Block{ + id: parentID, + ethBlock: b.vm.getCachedBlock(parentID), + vm: b.vm, + } + b.vm.ctx.Log.Verbo("Parent(%s) has status: %s", block.ID(), block.Status()) + return block +} + +// Verify implements the snowman.Block interface +func (b *Block) Verify() error { + _, err := b.vm.chain.InsertChain([]*types.Block{b.ethBlock}) + return err +} + +// Bytes implements the snowman.Block interface +func (b *Block) Bytes() []byte { + res, err := rlp.EncodeToBytes(b.ethBlock) + if err != nil { + panic(err) + } + return res +} + +func (b *Block) String() string { return fmt.Sprintf("EVM block, ID = %s", b.ID()) } diff --git a/vms/evm/database.go b/vms/evm/database.go new file mode 100644 index 0000000..de592e1 --- /dev/null +++ b/vms/evm/database.go @@ -0,0 +1,66 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "errors" + + "github.com/ava-labs/go-ethereum/ethdb" + + "github.com/ava-labs/gecko/database" +) + +var ( + errOpNotSupported = errors.New("this operation is not supported") +) + +// Database implements ethdb.Database +type Database struct{ database.Database } + +// HasAncient returns an error as we don't have a backing chain freezer. +func (db Database) HasAncient(kind string, number uint64) (bool, error) { + return false, errOpNotSupported +} + +// Ancient returns an error as we don't have a backing chain freezer. +func (db Database) Ancient(kind string, number uint64) ([]byte, error) { return nil, errOpNotSupported } + +// Ancients returns an error as we don't have a backing chain freezer. +func (db Database) Ancients() (uint64, error) { return 0, errOpNotSupported } + +// AncientSize returns an error as we don't have a backing chain freezer. +func (db Database) AncientSize(kind string) (uint64, error) { return 0, errOpNotSupported } + +// AppendAncient returns an error as we don't have a backing chain freezer. +func (db Database) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error { + return errOpNotSupported +} + +// TruncateAncients returns an error as we don't have a backing chain freezer. +func (db Database) TruncateAncients(items uint64) error { return errOpNotSupported } + +// Sync returns an error as we don't have a backing chain freezer. +func (db Database) Sync() error { return errOpNotSupported } + +// NewBatch implements ethdb.Database +func (db Database) NewBatch() ethdb.Batch { return Batch{db.Database.NewBatch()} } + +// NewIterator implements ethdb.Database +func (db Database) NewIterator() ethdb.Iterator { return db.Database.NewIterator() } + +// NewIteratorWithPrefix implements ethdb.Database +func (db Database) NewIteratorWithPrefix(prefix []byte) ethdb.Iterator { + return db.NewIteratorWithPrefix(prefix) +} + +// NewIteratorWithStart implements ethdb.Database +func (db Database) NewIteratorWithStart(start []byte) ethdb.Iterator { + return db.NewIteratorWithStart(start) +} + +// Batch implements ethdb.Batch +type Batch struct{ database.Batch } + +// Replay implements ethdb.Batch +func (batch Batch) Replay(w ethdb.KeyValueWriter) error { return batch.Batch.Replay(w) } diff --git a/vms/evm/factory.go b/vms/evm/factory.go new file mode 100644 index 0000000..a4c0eca --- /dev/null +++ b/vms/evm/factory.go @@ -0,0 +1,19 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "github.com/ava-labs/gecko/ids" +) + +// ID this VM should be referenced by +var ( + ID = ids.NewID([32]byte{'e', 'v', 'm'}) +) + +// Factory ... +type Factory struct{} + +// New ... +func (f *Factory) New() interface{} { return &VM{} } diff --git a/vms/evm/service.go b/vms/evm/service.go new file mode 100644 index 0000000..70135cc --- /dev/null +++ b/vms/evm/service.go @@ -0,0 +1,122 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "crypto/rand" + "fmt" + "math/big" + + "github.com/ava-labs/coreth" + + "github.com/ava-labs/go-ethereum/common" + "github.com/ava-labs/go-ethereum/common/hexutil" + "github.com/ava-labs/go-ethereum/core/types" + "github.com/ava-labs/go-ethereum/crypto" +) + +const ( + version = "Athereum 1.0" +) + +// test constants +const ( + GenesisTestAddr = "0x751a0b96e1042bee789452ecb20253fba40dbe85" + GenesisTestKey = "0xabd71b35d559563fea757f0f5edbde286fb8c043105b15abb7cd57189306d7d1" +) + +// DebugAPI introduces helper functions for debuging +type DebugAPI struct{ vm *VM } + +// SnowmanAPI introduces snowman specific functionality to the evm +type SnowmanAPI struct{ vm *VM } + +// NetAPI offers network related API methods +type NetAPI struct{ vm *VM } + +// NewNetAPI creates a new net API instance. +func NewNetAPI(vm *VM) *NetAPI { return &NetAPI{vm} } + +// Listening returns an indication if the node is listening for network connections. +func (s *NetAPI) Listening() bool { return true } // always listening + +// PeerCount returns the number of connected peers +func (s *NetAPI) PeerCount() hexutil.Uint { return hexutil.Uint(0) } // TODO: report number of connected peers + +// Version returns the current ethereum protocol version. +func (s *NetAPI) Version() string { return fmt.Sprintf("%d", s.vm.networkID) } + +// Web3API offers helper API methods +type Web3API struct{} + +// ClientVersion returns the version of the vm running +func (s *Web3API) ClientVersion() string { return version } + +// Sha3 returns the bytes returned by hashing [input] with Keccak256 +func (s *Web3API) Sha3(input hexutil.Bytes) hexutil.Bytes { return crypto.Keccak256(input) } + +// GetAcceptedFrontReply defines the reply that will be sent from the +// GetAcceptedFront API call +type GetAcceptedFrontReply struct { + Hash common.Hash `json:"hash"` + Number *big.Int `json:"number"` +} + +// GetAcceptedFront returns the last accepted block's hash and height +func (api *SnowmanAPI) GetAcceptedFront(ctx context.Context) (*GetAcceptedFrontReply, error) { + blk := api.vm.getLastAccepted().ethBlock + return &GetAcceptedFrontReply{ + Hash: blk.Hash(), + Number: blk.Number(), + }, nil +} + +// GetGenesisBalance returns the current funds in the genesis +func (api *DebugAPI) GetGenesisBalance(ctx context.Context) (*hexutil.Big, error) { + lastAccepted := api.vm.getLastAccepted() + api.vm.ctx.Log.Verbo("Currently accepted block front: %s", lastAccepted.ethBlock.Hash().Hex()) + state, err := api.vm.chain.BlockState(lastAccepted.ethBlock) + if err != nil { + return nil, err + } + return (*hexutil.Big)(state.GetBalance(common.HexToAddress(GenesisTestAddr))), nil +} + +// SpendGenesis funds +func (api *DebugAPI) SpendGenesis(ctx context.Context, nonce uint64) error { + api.vm.ctx.Log.Info("Spending the genesis") + + value := big.NewInt(1000000000000) + gasLimit := 21000 + gasPrice := big.NewInt(1000000000) + + genPrivateKey, err := crypto.HexToECDSA(GenesisTestKey[2:]) + if err != nil { + return err + } + bob, err := coreth.NewKey(rand.Reader) + if err != nil { + return err + } + + tx := types.NewTransaction(nonce, bob.Address, value, uint64(gasLimit), gasPrice, nil) + signedTx, err := types.SignTx(tx, types.NewEIP155Signer(api.vm.chainID), genPrivateKey) + if err != nil { + return err + } + + if err := api.vm.issueRemoteTxs([]*types.Transaction{signedTx}); err != nil { + return err + } + + return nil +} + +// IssueBlock to the chain +func (api *DebugAPI) IssueBlock(ctx context.Context) error { + api.vm.ctx.Log.Info("Issuing a new block") + + return api.vm.tryBlockGen() +} diff --git a/vms/evm/static_service.go b/vms/evm/static_service.go new file mode 100644 index 0000000..d3870ca --- /dev/null +++ b/vms/evm/static_service.go @@ -0,0 +1,22 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "encoding/json" + + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/gecko/utils/formatting" +) + +// StaticService defines the static API services exposed by the evm +type StaticService struct{} + +// BuildGenesis returns the UTXOs such that at least one address in [args.Addresses] is +// referenced in the UTXO. +func (*StaticService) BuildGenesis(_ context.Context, args *core.Genesis) (formatting.CB58, error) { + bytes, err := json.Marshal(args) + return formatting.CB58{Bytes: bytes}, err +} diff --git a/vms/evm/static_service_test.go b/vms/evm/static_service_test.go new file mode 100644 index 0000000..c492798 --- /dev/null +++ b/vms/evm/static_service_test.go @@ -0,0 +1,64 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "math/big" + "testing" + + "github.com/ava-labs/go-ethereum/common" + "github.com/ava-labs/go-ethereum/params" + + "github.com/ava-labs/coreth/core" +) + +func TestBuildGenesis(t *testing.T) { + expected := "3wP629bGfSGj9trh1UNBp5qGRGCcma5d8ezLeSmd9hnUJjSMUJesHHoxbZNcVUC9CjH7PEGNA96htNTd1saZCMt1Mf1dZFG7JDhcYNok6RS4TZufejXdxbVVgquohSa7nCCcrXpiVeiRFwzLJAxyQbXzYRhaCRtcDDfCcqfaVdtkFsPbNeQ49pDTbEC5hVkmfopeQ2Zz8tAG5QXKBdbYBCukR3xNHJ4xDxeixmEwPr1odb42yQRYrL7xREKNn2LFoFwAWUjBTsCkf5GPNgY2GvvN9o8wFWXTroW5fp754DhpdxHYxkMTfuE9DGyNWHTyrEbrUHutUdsfitcSHVj5ctFtkN2wGCs3cyv1eRRNvFFMggWTbarjne6AYaeCrJ631qAu3CbrUtrTH5N2E6G2yQKX4sT4Sk3qWPJdsGXuT95iKKcgNn1u5QRHHw9DXXuGPpJjkcKQRGUCuqpXy61iF5RNPEwAwKDa8f2Y25WMmNgWynUuLj8iSAyePj7USPWk54QFUr86ApVzqAdzzdD1qSVScpmudGnGbz9UNXdzHqSot6XLrNTYsgkabiu6TGntFm7qywbCRmtNdBuT9aznGQdUVimjt5QzUz68HXhUxBzTkrz7yXfVGV5JcWxVHQXYS4oc41U5yu83mH3A7WBrZLVq6UyNrvQVbim5nDxeKKbALPxwzVwywjgY5cp39AvzGnY8CX2AtuBNnKmZaAvG8JWAkx3yxjnJrwWhLgpDQYcCvRp2jg1EPBqN8FKJxSPE6eedjDHDJfB57mNzyEtmg22BPnem3eLdiovX8awkhBUHdE7uPrapNSVprnS85u1saW2Kwza3FsS2jAM3LckGW8KdtfPTpHBTRKAUo49zZLuPsyGL5WduedGyAdaM3a2KPoyXuz4UbexTVUWFNypFvvgyoDS8FMxDCNoMMaD7y4yVnoDpSpVFEVZD6EuSGHe9U8Ew57xLPbjhepDx6" + + balance, success := new(big.Int).SetString("33b2e3c9fd0804000000000", 16) + if !success { + t.Fatal("Failed to initialize balance") + } + + args := core.Genesis{ + Config: ¶ms.ChainConfig{ + ChainID: big.NewInt(43110), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, + EIP150Block: big.NewInt(0), + EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + }, + Nonce: 0, + Timestamp: 0, + ExtraData: []byte{}, + GasLimit: 100000000, + Difficulty: big.NewInt(0), + Mixhash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"), + Alloc: core.GenesisAlloc{ + common.HexToAddress("751a0b96e1042bee789452ecb20253fba40dbe85"): core.GenesisAccount{ + Balance: balance, + }, + }, + Number: 0, + GasUsed: 0, + ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + } + + ss := StaticService{} + result, err := ss.BuildGenesis(nil, &args) + if err != nil { + t.Fatal(err) + } + + if result.String() != expected { + t.Fatalf("StaticService.BuildGenesis:\nReturned: %s\nExpected: %s", result, expected) + } +} diff --git a/vms/evm/vm.go b/vms/evm/vm.go new file mode 100644 index 0000000..e1e9846 --- /dev/null +++ b/vms/evm/vm.go @@ -0,0 +1,498 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "math/big" + "sync" + "sync/atomic" + "time" + + "github.com/ava-labs/coreth" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/eth" + "github.com/ava-labs/coreth/node" + + "github.com/ava-labs/go-ethereum/common" + "github.com/ava-labs/go-ethereum/core/types" + "github.com/ava-labs/go-ethereum/rlp" + "github.com/ava-labs/go-ethereum/rpc" + + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/utils/timer" + + commonEng "github.com/ava-labs/gecko/snow/engine/common" +) + +const ( + lastAcceptedKey = "snowman_lastAccepted" +) + +const ( + minBlockTime = 250 * time.Millisecond + maxBlockTime = 1000 * time.Millisecond + batchSize = 250 +) + +const ( + bdTimerStateMin = iota + bdTimerStateMax + bdTimerStateLong +) + +var ( + errEmptyBlock = errors.New("empty block") + errCreateBlock = errors.New("couldn't create block") + errUnknownBlock = errors.New("unknown block") + errBlockFrequency = errors.New("too frequent block issuance") + errUnsupportedFXs = errors.New("unsupported feature extensions") +) + +func maxDuration(x, y time.Duration) time.Duration { + if x > y { + return x + } + return y +} + +// VM implements the snowman.ChainVM interface +type VM struct { + ctx *snow.Context + + chainID *big.Int + networkID uint64 + chain *coreth.ETHChain + chaindb Database + newBlockChan chan *Block + networkChan chan<- commonEng.Message + newTxPoolHeadChan chan core.NewTxPoolHeadEvent + + txPoolStabilizedHead common.Hash + txPoolStabilizedOk chan struct{} + txPoolStabilizedLock sync.Mutex + + metalock sync.Mutex + blockCache, blockStatusCache cache.LRU + lastAccepted *Block + writingMetadata uint32 + + bdlock sync.Mutex + blockDelayTimer *timer.Timer + bdTimerState int8 + bdGenWaitFlag bool + bdGenFlag bool + + genlock sync.Mutex + txSubmitChan <-chan struct{} +} + +/* + ****************************************************************************** + ********************************* Snowman API ******************************** + ****************************************************************************** + */ + +// Initialize implements the snowman.ChainVM interface +func (vm *VM) Initialize( + ctx *snow.Context, + db database.Database, + b []byte, + toEngine chan<- commonEng.Message, + fxs []*commonEng.Fx, +) error { + if len(fxs) > 0 { + return errUnsupportedFXs + } + + vm.ctx = ctx + vm.chaindb = Database{db} + g := new(core.Genesis) + err := json.Unmarshal(b, g) + if err != nil { + return err + } + + vm.chainID = g.Config.ChainID + + config := eth.DefaultConfig + config.ManualCanonical = true + config.Genesis = g + config.Miner.ManualMining = true + config.Miner.DisableUncle = true + if err := config.SetGCMode("archive"); err != nil { + panic(err) + } + nodecfg := node.Config{NoUSB: true} + chain := coreth.NewETHChain(&config, &nodecfg, nil, vm.chaindb) + vm.chain = chain + vm.networkID = config.NetworkId + chain.SetOnHeaderNew(func(header *types.Header) { + hid := make([]byte, 32) + _, err := rand.Read(hid) + if err != nil { + panic("cannot generate hid") + } + header.Extra = append(header.Extra, hid...) + }) + chain.SetOnSeal(func(block *types.Block) error { + if len(block.Transactions()) == 0 { + // this could happen due to the async logic of geth tx pool + vm.newBlockChan <- nil + return errEmptyBlock + } + return nil + }) + chain.SetOnSealFinish(func(block *types.Block) error { + vm.ctx.Log.Verbo("EVM sealed a block") + + blk := &Block{ + id: ids.NewID(block.Hash()), + ethBlock: block, + vm: vm, + } + vm.newBlockChan <- blk + vm.updateStatus(ids.NewID(block.Hash()), choices.Processing) + vm.txPoolStabilizedLock.Lock() + vm.txPoolStabilizedHead = block.Hash() + vm.txPoolStabilizedLock.Unlock() + return nil + }) + chain.SetOnQueryAcceptedBlock(func() *types.Block { + return vm.getLastAccepted().ethBlock + }) + vm.blockCache = cache.LRU{Size: 2048} + vm.blockStatusCache = cache.LRU{Size: 1024} + vm.newBlockChan = make(chan *Block) + vm.networkChan = toEngine + vm.blockDelayTimer = timer.NewTimer(func() { + vm.bdlock.Lock() + switch vm.bdTimerState { + case bdTimerStateMin: + vm.bdTimerState = bdTimerStateMax + vm.blockDelayTimer.SetTimeoutIn(maxDuration(maxBlockTime-minBlockTime, 0)) + case bdTimerStateMax: + vm.bdTimerState = bdTimerStateLong + } + tryAgain := vm.bdGenWaitFlag + vm.bdlock.Unlock() + if tryAgain { + vm.tryBlockGen() + } + }) + go ctx.Log.RecoverAndPanic(vm.blockDelayTimer.Dispatch) + + vm.bdTimerState = bdTimerStateLong + vm.bdGenWaitFlag = true + vm.newTxPoolHeadChan = make(chan core.NewTxPoolHeadEvent, 1) + vm.txPoolStabilizedOk = make(chan struct{}, 1) + chain.GetTxPool().SubscribeNewHeadEvent(vm.newTxPoolHeadChan) + // TODO: shutdown this go routine + go ctx.Log.RecoverAndPanic(func() { + for { + select { + case h := <-vm.newTxPoolHeadChan: + vm.txPoolStabilizedLock.Lock() + if vm.txPoolStabilizedHead == h.Block.Hash() { + vm.txPoolStabilizedOk <- struct{}{} + vm.txPoolStabilizedHead = common.Hash{} + } + vm.txPoolStabilizedLock.Unlock() + } + } + }) + chain.Start() + + var lastAccepted *types.Block + if b, err := vm.chaindb.Get([]byte(lastAcceptedKey)); err == nil { + var hash common.Hash + if err = rlp.DecodeBytes(b, &hash); err == nil { + if block := chain.GetBlockByHash(hash); block == nil { + vm.ctx.Log.Debug("lastAccepted block not found in chaindb") + } else { + lastAccepted = block + } + } + } + if lastAccepted == nil { + vm.ctx.Log.Debug("lastAccepted is unavailable, setting to the genesis block") + lastAccepted = chain.GetGenesisBlock() + } + vm.lastAccepted = &Block{ + id: ids.NewID(lastAccepted.Hash()), + ethBlock: lastAccepted, + vm: vm, + } + vm.ctx.Log.Info(fmt.Sprintf("lastAccepted = %s", vm.lastAccepted.ethBlock.Hash().Hex())) + + // TODO: shutdown this go routine + go vm.ctx.Log.RecoverAndPanic(func() { + vm.txSubmitChan = vm.chain.GetTxSubmitCh() + for { + select { + case <-vm.txSubmitChan: + vm.ctx.Log.Verbo("New tx detected, trying to generate a block") + vm.tryBlockGen() + case <-time.After(5 * time.Second): + vm.tryBlockGen() + } + } + }) + + return nil +} + +// Shutdown implements the snowman.ChainVM interface +func (vm *VM) Shutdown() { + vm.writeBackMetadata() + vm.chain.Stop() +} + +// BuildBlock implements the snowman.ChainVM interface +func (vm *VM) BuildBlock() (snowman.Block, error) { + vm.chain.GenBlock() + block := <-vm.newBlockChan + if block == nil { + return nil, errCreateBlock + } + // reset the min block time timer + vm.bdlock.Lock() + vm.bdTimerState = bdTimerStateMin + vm.bdGenWaitFlag = false + vm.bdGenFlag = false + vm.blockDelayTimer.SetTimeoutIn(minBlockTime) + vm.bdlock.Unlock() + + vm.ctx.Log.Debug("built block 0x%x", block.ID().Bytes()) + // make sure Tx Pool is updated + <-vm.txPoolStabilizedOk + return block, nil +} + +// ParseBlock implements the snowman.ChainVM interface +func (vm *VM) ParseBlock(b []byte) (snowman.Block, error) { + vm.metalock.Lock() + defer vm.metalock.Unlock() + + ethBlock := new(types.Block) + if err := rlp.DecodeBytes(b, ethBlock); err != nil { + return nil, err + } + block := &Block{ + id: ids.NewID(ethBlock.Hash()), + ethBlock: ethBlock, + vm: vm, + } + vm.blockCache.Put(block.ID(), block) + return block, nil +} + +// GetBlock implements the snowman.ChainVM interface +func (vm *VM) GetBlock(id ids.ID) (snowman.Block, error) { + vm.metalock.Lock() + defer vm.metalock.Unlock() + + block := vm.getBlock(id) + if block == nil { + return nil, errUnknownBlock + } + return block, nil +} + +// SetPreference sets what the current tail of the chain is +func (vm *VM) SetPreference(blkID ids.ID) { + err := vm.chain.SetTail(blkID.Key()) + vm.ctx.Log.AssertNoError(err) +} + +// LastAccepted returns the ID of the block that was last accepted +func (vm *VM) LastAccepted() ids.ID { + vm.metalock.Lock() + defer vm.metalock.Unlock() + + return vm.lastAccepted.ID() +} + +// CreateHandlers makes new http handlers that can handle API calls +func (vm *VM) CreateHandlers() map[string]*commonEng.HTTPHandler { + handler := vm.chain.NewRPCHandler() + vm.chain.AttachEthService(handler, []string{"eth", "personal", "txpool"}) + handler.RegisterName("net", &NetAPI{vm}) + handler.RegisterName("snowman", &SnowmanAPI{vm}) + handler.RegisterName("web3", &Web3API{}) + handler.RegisterName("debug", &DebugAPI{vm}) + + return map[string]*commonEng.HTTPHandler{ + "/rpc": &commonEng.HTTPHandler{LockOptions: commonEng.NoLock, Handler: handler}, + "/ws": &commonEng.HTTPHandler{LockOptions: commonEng.NoLock, Handler: handler.WebsocketHandler([]string{"*"})}, + } +} + +// CreateStaticHandlers makes new http handlers that can handle API calls +func (vm *VM) CreateStaticHandlers() map[string]*commonEng.HTTPHandler { + handler := rpc.NewServer() + handler.RegisterName("static", &StaticService{}) + return map[string]*commonEng.HTTPHandler{ + "/rpc": &commonEng.HTTPHandler{LockOptions: commonEng.NoLock, Handler: handler}, + "/ws": &commonEng.HTTPHandler{LockOptions: commonEng.NoLock, Handler: handler.WebsocketHandler([]string{"*"})}, + } +} + +/* + ****************************************************************************** + *********************************** Helpers ********************************** + ****************************************************************************** + */ + +func (vm *VM) updateStatus(blockID ids.ID, status choices.Status) { + vm.metalock.Lock() + defer vm.metalock.Unlock() + + if status == choices.Accepted { + vm.lastAccepted = vm.getBlock(blockID) + // TODO: improve this naive implementation + if atomic.SwapUint32(&vm.writingMetadata, 1) == 0 { + go vm.ctx.Log.RecoverAndPanic(vm.writeBackMetadata) + } + } + vm.blockStatusCache.Put(blockID, status) +} + +func (vm *VM) getCachedBlock(blockID ids.ID) *types.Block { + return vm.chain.GetBlockByHash(blockID.Key()) +} + +func (vm *VM) tryBlockGen() error { + vm.bdlock.Lock() + defer vm.bdlock.Unlock() + if vm.bdGenFlag { + // skip if one call already generates a block in this round + return nil + } + vm.bdGenWaitFlag = true + + vm.genlock.Lock() + defer vm.genlock.Unlock() + // get pending size + size, err := vm.chain.PendingSize() + if err != nil { + return err + } + if size == 0 { + return nil + } + + switch vm.bdTimerState { + case bdTimerStateMin: + return nil + case bdTimerStateMax: + if size < batchSize { + return nil + } + case bdTimerStateLong: + // timeout; go ahead and generate a new block anyway + } + select { + case vm.networkChan <- commonEng.PendingTxs: + // successfully push out the notification; this round ends + vm.bdGenFlag = true + default: + return errBlockFrequency + } + return nil +} + +func (vm *VM) getCachedStatus(blockID ids.ID) choices.Status { + vm.metalock.Lock() + defer vm.metalock.Unlock() + status := choices.Processing + + if statusIntf, ok := vm.blockStatusCache.Get(blockID); ok { + status = statusIntf.(choices.Status) + } else { + blk := vm.chain.GetBlockByHash(blockID.Key()) + if blk == nil { + return choices.Unknown + } + acceptedBlk := vm.lastAccepted.ethBlock + + // TODO: There must be a better way of doing this. + // Traverse up the chain from the lower block until the indices match + highBlock := blk + lowBlock := acceptedBlk + if highBlock.Number().Cmp(lowBlock.Number()) < 0 { + highBlock, lowBlock = lowBlock, highBlock + } + for highBlock.Number().Cmp(lowBlock.Number()) > 0 { + highBlock = vm.chain.GetBlockByHash(highBlock.ParentHash()) + } + + if highBlock.Hash() == lowBlock.Hash() { // on the same branch + if blk.Number().Cmp(acceptedBlk.Number()) <= 0 { + status = choices.Accepted + } + } else { // on different branches + status = choices.Rejected + } + } + + vm.blockStatusCache.Put(blockID, status) + return status +} + +func (vm *VM) getBlock(id ids.ID) *Block { + if blockIntf, ok := vm.blockCache.Get(id); ok { + return blockIntf.(*Block) + } + ethBlock := vm.getCachedBlock(id) + if ethBlock == nil { + return nil + } + block := &Block{ + id: ids.NewID(ethBlock.Hash()), + ethBlock: ethBlock, + vm: vm, + } + vm.blockCache.Put(id, block) + return block +} + +func (vm *VM) issueRemoteTxs(txs []*types.Transaction) error { + errs := vm.chain.AddRemoteTxs(txs) + for _, err := range errs { + if err != nil { + return err + } + } + return vm.tryBlockGen() +} + +func (vm *VM) writeBackMetadata() { + vm.metalock.Lock() + defer vm.metalock.Unlock() + + b, err := rlp.EncodeToBytes(vm.lastAccepted.ethBlock.Hash()) + if err != nil { + vm.ctx.Log.Error("snowman-eth: error while writing back metadata") + return + } + vm.ctx.Log.Debug("writing back metadata") + vm.chaindb.Put([]byte(lastAcceptedKey), b) + atomic.StoreUint32(&vm.writingMetadata, 0) +} + +func (vm *VM) getLastAccepted() *Block { + vm.metalock.Lock() + defer vm.metalock.Unlock() + + return vm.lastAccepted +} diff --git a/vms/evm/vm_genesis_parse_test.go b/vms/evm/vm_genesis_parse_test.go new file mode 100644 index 0000000..9a113fb --- /dev/null +++ b/vms/evm/vm_genesis_parse_test.go @@ -0,0 +1,32 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "encoding/json" + "testing" + + "github.com/ava-labs/coreth/core" +) + +func TestParseGenesis(t *testing.T) { + genesis := []byte(`{"config":{"chainId":43110,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"751a0b96e1042bee789452ecb20253fba40dbe85":{"balance":"0x33b2e3c9fd0804000000000"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}`) + + genesisBlock := new(core.Genesis) + err := json.Unmarshal(genesis, genesisBlock) + if err != nil { + t.Fatal(err) + } + + marshalledBytes, err := json.Marshal(genesisBlock) + if err != nil { + t.Fatal(err) + } + + secondGenesisBlock := new(core.Genesis) + err = json.Unmarshal(marshalledBytes, secondGenesisBlock) + if err != nil { + t.Fatal(err) + } +} diff --git a/vms/manager.go b/vms/manager.go new file mode 100644 index 0000000..1509d49 --- /dev/null +++ b/vms/manager.go @@ -0,0 +1,129 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package vms + +import ( + "fmt" + "sync" + + "github.com/ava-labs/gecko/api" + "github.com/ava-labs/gecko/snow/engine/common" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" +) + +// A VMFactory creates new instances of a VM +type VMFactory interface { + New() interface{} +} + +// Manager is a VM manager. +// It has the following functionality: +// 1) Register a VM factory. To register a VM is to associate its ID with a +// VMFactory which, when New() is called upon it, creates a new instance of that VM. +// 2) Get a VM factory. Given the ID of a VM that has been +// registered, return the factory that the ID is associated with. +// 3) Associate a VM with an alias +// 4) Get the ID of the VM by the VM's alias +// 5) Get the aliases of a VM +type Manager interface { + // Returns a factory that can create new instances of the VM + // with the given ID + GetVMFactory(ids.ID) (VMFactory, error) + + // Associate an ID with the factory that creates new instances + // of the VM with the given ID + RegisterVMFactory(ids.ID, VMFactory) error + + // Given an alias, return the ID of the VM associated with that alias + Lookup(string) (ids.ID, error) + + // Return the aliases associated with a VM + Aliases(ids.ID) []string + + // Give an alias to a VM + Alias(ids.ID, string) error +} + +// Implements Manager +type manager struct { + // Note: The string representation of a VM's ID is also considered to be an + // alias of the VM. That is, [VM].String() is an alias for the VM, too. + ids.Aliaser + + // Key: The key underlying a VM's ID + // Value: A factory that creates new instances of that VM + vmFactories map[[32]byte]VMFactory + + // The node's API server. + // [manager] adds routes to this server to expose new API endpoints/services + apiServer *api.Server + + log logging.Logger +} + +// NewManager returns an instance of a VM manager +func NewManager(apiServer *api.Server, log logging.Logger) Manager { + m := &manager{ + vmFactories: make(map[[32]byte]VMFactory), + apiServer: apiServer, + log: log, + } + m.Initialize() + return m +} + +// Return a factory that can create new instances of the vm whose +// ID is [vmID] +func (m *manager) GetVMFactory(vmID ids.ID) (VMFactory, error) { + if factory, ok := m.vmFactories[vmID.Key()]; ok { + return factory, nil + } + return nil, fmt.Errorf("no vm with ID '%v' has been registered", vmID) + +} + +// Map [vmID] to [factory]. [factory] creates new instances of the vm whose +// ID is [vmID] +func (m *manager) RegisterVMFactory(vmID ids.ID, factory VMFactory) error { + key := vmID.Key() + if _, exists := m.vmFactories[key]; exists { + return fmt.Errorf("a vm with ID '%v' has already been registered", vmID) + } + if err := m.Alias(vmID, vmID.String()); err != nil { + return err + } + + m.vmFactories[key] = factory + + // add the static API endpoints + m.addStaticAPIEndpoints(vmID) + return nil +} + +// VMs can expose a static API (one that does not depend on the state of a particular chain.) +// This method adds to the node's API server the static API of the VM with ID [vmID]. +// This allows clients to call the VM's static API methods. +func (m *manager) addStaticAPIEndpoints(vmID ids.ID) { + vmFactory, err := m.GetVMFactory(vmID) + m.log.AssertNoError(err) + m.log.Debug("adding static API for VM with ID %s", vmID) + vm := vmFactory.New() + + staticVM, ok := vm.(common.StaticVM) + if !ok { + return + } + + // all static endpoints go to the vm endpoint, defaulting to the vm id + defaultEndpoint := "vm/" + vmID.String() + // use a single lock for this entire vm + lock := new(sync.RWMutex) + // register the static endpoints + for extension, service := range staticVM.CreateStaticHandlers() { + m.log.Verbo("adding static API endpoint: %s", defaultEndpoint+extension) + m.apiServer.AddRoute(service, lock, defaultEndpoint, extension, m.log) + } +} diff --git a/vms/platformvm/abort_block.go b/vms/platformvm/abort_block.go new file mode 100644 index 0000000..cb08d02 --- /dev/null +++ b/vms/platformvm/abort_block.go @@ -0,0 +1,57 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/core" +) + +// Abort being accepted results in the proposal of its parent (which must be a proposal block) +// being rejected. +type Abort struct { + CommonDecisionBlock `serialize:"true"` +} + +// Verify this block performs a valid state transition. +// +// The parent block must be a proposal +// +// This function also sets onAcceptDB database if the verification passes. +func (a *Abort) Verify() error { + // Abort is a decision, so its parent must be a proposal + if parent, ok := a.parentBlock().(*ProposalBlock); ok { + a.onAcceptDB, a.onAcceptFunc = parent.onAbort() + } else { + return errInvalidBlockType + } + + a.vm.currentBlocks[a.ID().Key()] = a + a.parentBlock().addChild(a) + return nil +} + +// newAbortBlock returns a new *Abort block where the block's parent, a proposal +// block, has ID [parentID]. +func (vm *VM) newAbortBlock(parentID ids.ID) *Abort { + abort := &Abort{ + CommonDecisionBlock: CommonDecisionBlock{ + CommonBlock: CommonBlock{ + Block: core.NewBlock(parentID), + vm: vm, + }, + }, + } + + // We serialize this block as a Block so that it can be deserialized into a + // Block + blk := Block(abort) + bytes, err := Codec.Marshal(&blk) + if err != nil { + return nil + } + + abort.Block.Initialize(bytes, vm.SnowmanVM) + return abort +} diff --git a/vms/platformvm/account.go b/vms/platformvm/account.go new file mode 100644 index 0000000..7bd559a --- /dev/null +++ b/vms/platformvm/account.go @@ -0,0 +1,133 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + "fmt" + + stdmath "math" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/utils/units" +) + +var ( + txFee = uint64(0) * units.MicroAva // The transaction fee +) + +var ( + errOutOfSpends = errors.New("ran out of spends") + errInvalidID = errors.New("invalid ID") +) + +// Account represents the Balance and nonce of a user's funds +type Account struct { + // Address of this account + // Its value is [privKey].PublicKey().Address() where privKey + // is the private key that controls this account + Address ids.ShortID `serialize:"true"` + + // Nonce this account was last spent with + // Initially, this is set to 0. Therefore, the first nonce a transaction should + // use for a new account is 1. + Nonce uint64 `serialize:"true"` + + // Balance of $AVA held by this account + Balance uint64 `serialize:"true"` +} + +// Remove generates a new account state from removing [amount + txFee] from [a]'s balance. +// [nonce] is [a]'s next unused nonce +func (a Account) Remove(amount, nonce uint64) (Account, error) { + // Ensure account is in a valid state + if err := a.Verify(); err != nil { + return Account{}, err + } + + // Ensure account's nonce isn't used up. + // For this error to occur, an account would need to be issuing transactions + // at 10k tps for ~ 80 million years + newNonce, err := math.Add64(a.Nonce, 1) + if err != nil { + return Account{}, errOutOfSpends + } + + if newNonce != nonce { + return Account{}, fmt.Errorf("account's last nonce is %d so expected tx nonce to be %d but was %d", a.Nonce, newNonce, nonce) + } + + amountWithFee, err := math.Add64(amount, txFee) + if err != nil { + return Account{}, fmt.Errorf("send amount overflowed: tx fee (%d) + send amount (%d) > maximum value", txFee, amount) + } + + newBalance, err := math.Sub64(a.Balance, amountWithFee) + if err != nil { + return Account{}, fmt.Errorf("insufficient funds: account balance %d < tx fee (%d) + send amount (%d)", a.Balance, txFee, amount) + } + + // Ensure this tx wouldn't lock funds + if newNonce == stdmath.MaxUint64 && newBalance != 0 { + return Account{}, fmt.Errorf("transaction would lock %d funds", newBalance) + } + + return Account{ + Address: a.Address, + Nonce: newNonce, + Balance: newBalance, + }, nil +} + +// Add returns the state of [a] after receiving the $AVA +func (a Account) Add(amount uint64) (Account, error) { + // Ensure account is in a valid state + if err := a.Verify(); err != nil { + return Account{}, err + } + + // Ensure account's nonce isn't used up + // For this error to occur, a user would need to be issuing transactions + // at 10k tps for ~ 80 million years + if a.Nonce == stdmath.MaxUint64 { + return a, errOutOfSpends + } + + // account's balance after receipt of staked $AVA + newBalance, err := math.Add64(a.Balance, amount) + if err != nil { + return a, fmt.Errorf("account balance (%d) + staked $AVA (%d) exceeds maximum uint64", a.Balance, amount) + } + + return Account{ + Address: a.Address, + Nonce: a.Nonce, + Balance: newBalance, + }, nil +} + +// Verify that this account is in a valid state +func (a Account) Verify() error { + switch { + case a.Address.IsZero(): + return errInvalidID + default: + return nil + } +} + +// Bytes returns the byte representation of this account +func (a Account) Bytes() []byte { + bytes, _ := Codec.Marshal(a) + return bytes +} + +func newAccount(Address ids.ShortID, Nonce, Balance uint64) Account { + return Account{ + Address: Address, + Nonce: Nonce, + Balance: Balance, + } +} diff --git a/vms/platformvm/account_test.go b/vms/platformvm/account_test.go new file mode 100644 index 0000000..dd7ed13 --- /dev/null +++ b/vms/platformvm/account_test.go @@ -0,0 +1,181 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "math" + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestAccountVerifyNoID(t *testing.T) { + account := Account{ + Address: ids.ShortID{}, + Nonce: defaultNonce, + Balance: defaultBalance, + } + + if err := account.Verify(); err == nil { + t.Fatal("should've failed because ID is empty") + } +} + +func TestAccountRemoveMaxNonce(t *testing.T) { + account := Account{ + Address: defaultKey.PublicKey().Address(), + Nonce: math.MaxUint64, + Balance: defaultBalance, + } + + _, err := account.Remove(defaultBalance-txFee, account.Nonce) + if err == nil { + t.Fatal("should have failed because account is out of nonces") + } +} + +func TestAccountRemoveWrongNonce(t *testing.T) { + account := Account{ + Address: defaultKey.PublicKey().Address(), + Nonce: defaultNonce, + Balance: defaultBalance, + } + + _, err := account.Remove(defaultBalance-txFee, account.Nonce) + if err == nil { + t.Fatal("should have failed because nonce in argument is wrong") + } +} + +func TestAccountRemoveLockFunds(t *testing.T) { + account := Account{ + Address: defaultKey.PublicKey().Address(), + Nonce: math.MaxUint64 - 1, + Balance: defaultBalance, + } + + _, err := account.Remove(defaultBalance-txFee-1, account.Nonce+1) + if err == nil { + t.Fatal("should have failed because funds would be locked") + } +} + +func TestAccountRemoveInvalid(t *testing.T) { + account := Account{ + Address: ids.ShortID{}, + Nonce: defaultNonce, + Balance: defaultBalance, + } + + _, err := account.Remove(defaultBalance-txFee, account.Nonce+1) + if err == nil { + t.Fatal("should have failed because account is invalid (ID is empty)") + } +} + +func TestRemoveOverflow(t *testing.T) { + // this test is only meaningful if txFee is non-zero + if txFee == 0 { + return + } + account := Account{ + Address: defaultKey.PublicKey().Address(), + Nonce: defaultNonce, + Balance: math.MaxUint64, + } + + _, err := account.Remove(account.Balance, account.Nonce+1) + if err == nil { + t.Fatal("should have failed because amount to remove plus tx fee overflows") + } +} + +// Remove all funds +func TestRemoveAllFunds(t *testing.T) { + account := Account{ + Address: defaultKey.PublicKey().Address(), + Nonce: defaultNonce, + Balance: defaultBalance, + } + + account, err := account.Remove(defaultBalance-txFee, account.Nonce+1) + if err != nil { + t.Fatal(err) + } + + if account.Balance != 0 { + t.Fatal("account balance should be 0") + } + if account.Nonce != defaultNonce+1 { + t.Fatal("nonce should've been inccremented") + } + if !account.Address.Equals(defaultKey.PublicKey().Address()) { + t.Fatal("Address shouldn't have changed") + } +} + +func TestAccountAddInvalid(t *testing.T) { + account := Account{ + Address: ids.ShortID{}, + Nonce: defaultNonce, + Balance: defaultBalance, + } + + if _, err := account.Add(1); err == nil { + t.Fatal("should have error because account is invalid (has empty ID)") + } +} + +func TestAccountAddMaxNonce(t *testing.T) { + account := Account{ + Address: defaultKey.PublicKey().Address(), + Nonce: math.MaxUint64, + Balance: defaultBalance, + } + + if _, err := account.Add(1); err == nil { + t.Fatal("should have errored because account is out of nonces") + } +} + +func TestAccountAddValid(t *testing.T) { + account := Account{ + Address: defaultKey.PublicKey().Address(), + Nonce: defaultNonce, + Balance: defaultBalance, + } + + if _, err := account.Add(1); err != nil { + t.Fatal(err) + } +} + +func TestMarshalAccount(t *testing.T) { + account := newAccount( + defaultKey.PublicKey().Address(), + defaultNonce, + defaultBalance, + ) + + bytes, err := Codec.Marshal(account) + if err != nil { + t.Fatal(err) + } + + accountUnmarshaled := &Account{} + err = Codec.Unmarshal(bytes, accountUnmarshaled) + if err != nil { + t.Fatal(err) + } + + if !account.Address.Equals(accountUnmarshaled.Address) { + t.Fatal("IDs should match") + } + if account.Balance != accountUnmarshaled.Balance { + t.Fatal("Balances should match") + } + if account.Nonce != accountUnmarshaled.Nonce { + t.Fatal("Nonces don't match") + } +} diff --git a/vms/platformvm/add_default_subnet_delegator_tx.go b/vms/platformvm/add_default_subnet_delegator_tx.go new file mode 100644 index 0000000..a337802 --- /dev/null +++ b/vms/platformvm/add_default_subnet_delegator_tx.go @@ -0,0 +1,228 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "fmt" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" +) + +// UnsignedAddDefaultSubnetDelegatorTx is an unsigned addDefaultSubnetDelegatorTx +type UnsignedAddDefaultSubnetDelegatorTx struct { + DurationValidator `serialize:"true"` + NetworkID uint32 `serialize:"true"` + Nonce uint64 `serialize:"true"` + Destination ids.ShortID `serialize:"true"` +} + +// addDefaultSubnetDelegatorTx is a transaction that, if it is in a +// ProposalBlock that is accepted and followed by a Commit block, adds a +// delegator to the pending validator set of the default subnet. (That is, the +// validator in the tx will have their weight increase at some point in the +// future.) The transaction fee will be paid from the account who signed the +// transaction. +type addDefaultSubnetDelegatorTx struct { + UnsignedAddDefaultSubnetDelegatorTx `serialize:"true"` + + // Sig is the signature of the public key whose corresponding account pays + // the tx fee for this tx. ie the account with ID == [public key].Address() + // pays the tx fee + Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + + vm *VM + id ids.ID + senderID ids.ShortID + + // Byte representation of the signed transaction + bytes []byte +} + +// initialize [tx] +func (tx *addDefaultSubnetDelegatorTx) initialize(vm *VM) error { + tx.vm = vm + bytes, err := Codec.Marshal(tx) // byte representation of the signed transaction + tx.bytes = bytes + tx.id = ids.NewID(hashing.ComputeHash256Array(bytes)) + return err +} + +func (tx *addDefaultSubnetDelegatorTx) ID() ids.ID { return tx.id } + +// SyntacticVerify return nil iff [tx] is valid +// If [tx] is valid, sets [tx.accountID] +func (tx *addDefaultSubnetDelegatorTx) SyntacticVerify() error { + switch { + case tx == nil: + return errNilTx + case !tx.senderID.IsZero(): + return nil // Only verify the transaction once + case tx.id.IsZero(): + return errInvalidID + case tx.NetworkID != tx.vm.Ctx.NetworkID: + return errWrongNetworkID + case tx.NodeID.IsZero(): + return errInvalidID + case tx.Wght < MinimumStakeAmount: // Ensure validator is staking at least the minimum amount + return errWeightTooSmall + } + + // Ensure staking length is not too short or long + stakingDuration := tx.Duration() + if stakingDuration < MinimumStakingDuration { + return errStakeTooShort + } else if stakingDuration > MaximumStakingDuration { + return errStakeTooLong + } + + unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetDelegatorTx) + // Byte representation of the unsigned transaction + unsignedBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return err + } + + // get account to pay tx fee from + key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:]) + if err != nil { + return err + } + tx.senderID = key.Address() + + return nil +} + +// SemanticVerify this transaction is valid. +func (tx *addDefaultSubnetDelegatorTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), error) { + if err := tx.SyntacticVerify(); err != nil { + return nil, nil, nil, nil, err + } + + // Ensure the proposed validator starts after the current timestamp + currentTimestamp, err := tx.vm.getTimestamp(db) + if err != nil { + return nil, nil, nil, nil, err + } + validatorStartTime := tx.StartTime() + if !currentTimestamp.Before(validatorStartTime) { + return nil, nil, nil, nil, fmt.Errorf("chain timestamp (%s) not before validator's start time (%s)", + currentTimestamp, + validatorStartTime) + } + + // Get the account that is paying the transaction fee and, if the proposal is to add a validator + // to the default subnet, providing the staked $AVA. + // The ID of this account is the address associated with the public key that signed this tx + accountID := tx.senderID + account, err := tx.vm.getAccount(db, accountID) + if err != nil { + return nil, nil, nil, nil, errDBAccount + } + + // The account if this block's proposal is committed and the validator is added + // to the pending validator set. (Increase the account's nonce; decrease its balance.) + newAccount, err := account.Remove(0, tx.Nonce) // Remove also removes the fee + if err != nil { + return nil, nil, nil, nil, err + } + + // Ensure that the period this validator validates the specified subnet is a subnet of the time they validate the default subnet + // First, see if they're currently validating the default subnet + currentEvents, err := tx.vm.getCurrentValidators(db, DefaultSubnetID) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("couldn't get current validators of default subnet: %v", err) + } + if dsValidator, err := currentEvents.getDefaultSubnetStaker(tx.NodeID); err == nil { + if !tx.DurationValidator.BoundedBy(dsValidator.StartTime(), dsValidator.EndTime()) { + return nil, nil, nil, nil, errDSValidatorSubset + } + } else { + // They aren't currently validating the default subnet. + // See if they will validate the default subnet in the future. + pendingDSValidators, err := tx.vm.getPendingValidators(db, DefaultSubnetID) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("couldn't get pending validators of default subnet: %v", err) + } + dsValidator, err := pendingDSValidators.getDefaultSubnetStaker(tx.NodeID) + if err != nil { + return nil, nil, nil, nil, errDSValidatorSubset + } + if !tx.DurationValidator.BoundedBy(dsValidator.StartTime(), dsValidator.EndTime()) { + return nil, nil, nil, nil, errDSValidatorSubset + } + } + + pendingEvents, err := tx.vm.getPendingValidators(db, DefaultSubnetID) + if err != nil { + return nil, nil, nil, nil, err + } + + pendingEvents.Add(tx) // add validator to set of pending validators + + // If this proposal is committed, update the pending validator set to include the validator, + // update the validator's account by removing the staked $AVA + onCommitDB := versiondb.New(db) + if err := tx.vm.putPendingValidators(onCommitDB, pendingEvents, DefaultSubnetID); err != nil { + return nil, nil, nil, nil, err + } + if err := tx.vm.putAccount(onCommitDB, newAccount); err != nil { + return nil, nil, nil, nil, err + } + + // If this proposal is aborted, chain state doesn't change + onAbortDB := versiondb.New(db) + + return onCommitDB, onAbortDB, nil, nil, nil +} + +// InitiallyPrefersCommit returns true if the proposed validators start time is +// after the current wall clock time, +func (tx *addDefaultSubnetDelegatorTx) InitiallyPrefersCommit() bool { + return tx.StartTime().After(tx.vm.clock.Time()) +} + +func (vm *VM) newAddDefaultSubnetDelegatorTx( + nonce, + weight, + startTime, + endTime uint64, + nodeID ids.ShortID, + destination ids.ShortID, + networkID uint32, + key *crypto.PrivateKeySECP256K1R, +) (*addDefaultSubnetDelegatorTx, error) { + tx := &addDefaultSubnetDelegatorTx{ + UnsignedAddDefaultSubnetDelegatorTx: UnsignedAddDefaultSubnetDelegatorTx{ + DurationValidator: DurationValidator{ + Validator: Validator{ + NodeID: nodeID, + Wght: weight, + }, + Start: startTime, + End: endTime, + }, + NetworkID: networkID, + Nonce: nonce, + Destination: destination, + }, + } + + unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetDelegatorTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // byte repr. of unsigned tx + if err != nil { + return nil, err + } + + sig, err := key.Sign(unsignedBytes) + if err != nil { + return nil, err + } + copy(tx.Sig[:], sig) + + return tx, tx.initialize(vm) +} diff --git a/vms/platformvm/add_default_subnet_delegator_tx_test.go b/vms/platformvm/add_default_subnet_delegator_tx_test.go new file mode 100644 index 0000000..46ba21e --- /dev/null +++ b/vms/platformvm/add_default_subnet_delegator_tx_test.go @@ -0,0 +1,379 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "testing" + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" +) + +func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { + vm := defaultVM() + + // Case 1: tx is nil + var tx *addDefaultSubnetDelegatorTx + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because tx is nil") + } + + // Case 2: Tx ID is nil + tx, err := vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + tx.id = ids.ID{} + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because ID is nil") + } + + // Case 3: Wrong network ID + tx, err = vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + testNetworkID+1, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because the wrong network ID was used") + } + + // Case 4: Missing Node ID + tx, err = vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + tx.NodeID = ids.ShortID{} + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because NodeID is nil") + } + + // Case 5: Not enough weight + tx, err = vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + MinimumStakeAmount-1, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because of not enough weight") + } + + // Case 6: Validation length is too short + tx, err = vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix())-1, + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + err = tx.SyntacticVerify() + if err == nil { + t.Fatal("should have errored because validation length too short") + } + + // Case 7: Validation length is too long + tx, err = vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix())+1, + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + err = tx.SyntacticVerify() + if err == nil { + t.Fatal("should have errored because validation length too long") + } + + // Case 8: Valid + tx, err = vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err != nil { + t.Fatal(err) + } +} + +func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { + vm := defaultVM() + + // Case 1: Proposed validator currently validating default subnet + // but stops validating non-default subnet after stops validating default subnet + // (note that defaultKey is a genesis validator) + tx, err := vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix())+1, + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed because validator stops validating default subnet earlier than non-default subnet") + } + + // Case 2: Proposed validator currently validating default subnet + // and proposed non-default subnet validation period is subset of + // default subnet validation period + // (note that defaultKey is a genesis validator) + tx, err = vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix())+1, + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatalf("should have failed because the end time is outside the default subnets end time") + } + + // Add a validator to pending validator set of default subnet + key, err := vm.factory.NewPrivateKey() + if err != nil { + t.Fatal(err) + } + pendingDSValidatorID := key.PublicKey().Address() + + // starts validating default subnet 10 seconds after genesis + DSStartTime := defaultGenesisTime.Add(10 * time.Second) + DSEndTime := DSStartTime.Add(5 * MinimumStakingDuration) + + addDSTx, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, // nonce + defaultStakeAmount, // stake amount + uint64(DSStartTime.Unix()), // start time + uint64(DSEndTime.Unix()), // end time + pendingDSValidatorID, // node ID + defaultKey.PublicKey().Address(), // destination + NumberOfShares, // subnet + testNetworkID, // network + defaultKey, // key + ) + if err != nil { + t.Fatal(err) + } + + // Case 3: Proposed validator isn't in pending or current validator sets + tx, err = vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(DSStartTime.Unix()), + uint64(DSEndTime.Unix()), + pendingDSValidatorID, + defaultKey.PublicKey().Address(), + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed because validator not in the current or pending validator sets of the default subnet") + } + + err = vm.putPendingValidators( + vm.DB, + &EventHeap{ + SortByStartTime: true, + Txs: []TimedTx{addDSTx}, + }, + DefaultSubnetID, + ) + if err != nil { + t.Fatal(err) + } + // Node with ID key.PublicKey().Address() now a pending validator for default subnet + + // Case 4: Proposed validator is pending validator of default subnet + // but starts validating non-default subnet before default subnet + tx, err = vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(DSStartTime.Unix())-1, // start validating non-default subnet before default subnet + uint64(DSEndTime.Unix()), + pendingDSValidatorID, + defaultKey.PublicKey().Address(), + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed because validator starts validating non-default " + + "subnet before starting to validate default subnet") + } + + // Case 5: Proposed validator is pending validator of default subnet + // but stops validating non-default subnet after default subnet + tx, err = vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(DSStartTime.Unix()), + uint64(DSEndTime.Unix())+1, // stop validating non-default subnet after stopping validating default subnet + pendingDSValidatorID, + defaultKey.PublicKey().Address(), + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed because validator stops validating non-default " + + "subnet after stops validating default subnet") + } + + // Case 6: Proposed validator is pending validator of default subnet + // and period validating non-default subnet is subset of time validating default subnet + tx, err = vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(DSStartTime.Unix()), // same start time as for default subnet + uint64(DSEndTime.Unix()), // same end time as for default subnet + pendingDSValidatorID, + defaultKey.PublicKey().Address(), + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err != nil { + t.Fatalf("should have passed verification") + } + + // Case 7: Proposed validator start validating at/before current timestamp + // First, advance the timestamp + newTimestamp := defaultGenesisTime.Add(2 * time.Second) + if err := vm.putTimestamp(vm.DB, newTimestamp); err != nil { + t.Fatal(err) + } + + tx, err = vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, // nonce + defaultStakeAmount, // weight + uint64(newTimestamp.Unix()), // start time + uint64(newTimestamp.Add(MinimumStakingDuration).Unix()), // end time + defaultKey.PublicKey().Address(), // node ID + defaultKey.PublicKey().Address(), // destination + testNetworkID, // network ID + defaultKey, // tx fee payer + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed verification because starts validating at current timestamp") + } + + // reset the timestamp + if err := vm.putTimestamp(vm.DB, defaultGenesisTime); err != nil { + t.Fatal(err) + } + + // Case 7: Account that pays tx fee doesn't have enough $AVA to pay tx fee + txFeeSaved := txFee + txFee = 1 // Do this so test works even when txFee is 0 + + // Create new key whose account has no $AVA + factory := crypto.FactorySECP256K1R{} + newAcctKey, err := factory.NewPrivateKey() + if err != nil { + t.Fatal(err) + } + + tx, err = vm.newAddDefaultSubnetDelegatorTx( + 1, // nonce (new account has nonce 0 so use nonce 1) + defaultStakeAmount, // weight + uint64(defaultValidateStartTime.Unix()), // start time + uint64(defaultValidateEndTime.Unix()), // end time + defaultKey.PublicKey().Address(), // node ID + defaultKey.PublicKey().Address(), // destination + testNetworkID, // network ID + newAcctKey.(*crypto.PrivateKeySECP256K1R), // tx fee payer + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed verification because payer account has no $AVA to pay fee") + } + txFee = txFeeSaved // Reset tx fee +} diff --git a/vms/platformvm/add_default_subnet_validator_tx.go b/vms/platformvm/add_default_subnet_validator_tx.go new file mode 100644 index 0000000..bf398c7 --- /dev/null +++ b/vms/platformvm/add_default_subnet_validator_tx.go @@ -0,0 +1,232 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + "fmt" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" +) + +var ( + errNilTx = errors.New("nil tx is invalid") + errWrongNetworkID = errors.New("tx was issued with a different network ID") + errWeightTooSmall = errors.New("weight of this validator is too low") + errStakeTooShort = errors.New("staking period is too short") + errStakeTooLong = errors.New("staking period is too long") + errTooManyShares = fmt.Errorf("a staker can only require at most %d shares from delegators", NumberOfShares) +) + +// UnsignedAddDefaultSubnetValidatorTx is an unsigned addDefaultSubnetValidatorTx +type UnsignedAddDefaultSubnetValidatorTx struct { + DurationValidator `serialize:"true"` + NetworkID uint32 `serialize:"true"` + Nonce uint64 `serialize:"true"` + Destination ids.ShortID `serialize:"true"` + Shares uint32 `serialize:"true"` +} + +// addDefaultSubnetValidatorTx is a transaction that, if it is in a ProposeAddValidator block that +// is accepted and followed by a Commit block, adds a validator to the pending validator set of the default subnet. +// (That is, the validator in the tx will validate at some point in the future.) +type addDefaultSubnetValidatorTx struct { + UnsignedAddDefaultSubnetValidatorTx `serialize:"true"` + + // Signature on the byte repr. of UnsignedAddValidatorTx + Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + + vm *VM + id ids.ID + senderID ids.ShortID + + // Byte representation of the signed transaction + bytes []byte +} + +// initialize [tx] +func (tx *addDefaultSubnetValidatorTx) initialize(vm *VM) error { + tx.vm = vm + bytes, err := Codec.Marshal(tx) // byte representation of the signed transaction + tx.bytes = bytes + tx.id = ids.NewID(hashing.ComputeHash256Array(bytes)) + return err +} + +func (tx *addDefaultSubnetValidatorTx) ID() ids.ID { return tx.id } + +// SyntacticVerify that this transaction is well formed +// If [tx] is valid, this method also populates [tx.accountID] +func (tx *addDefaultSubnetValidatorTx) SyntacticVerify() error { + switch { + case tx == nil: + return errNilTx + case !tx.senderID.IsZero(): + return nil // Only verify the transaction once + case tx.id.IsZero(): + return errInvalidID + case tx.NetworkID != tx.vm.Ctx.NetworkID: + return errWrongNetworkID + case tx.NodeID.IsZero(): + return errInvalidID + case tx.Destination.IsZero(): + return errInvalidID + case tx.Wght < MinimumStakeAmount: // Ensure validator is staking at least the minimum amount + return errWeightTooSmall + case tx.Shares > NumberOfShares: // Ensure delegators shares are in the allowed amount + return errTooManyShares + } + + // Ensure staking length is not too short or long + stakingDuration := tx.Duration() + if stakingDuration < MinimumStakingDuration { + return errStakeTooShort + } else if stakingDuration > MaximumStakingDuration { + return errStakeTooLong + } + + // Byte representation of the unsigned transaction + unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetValidatorTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return err + } + + key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:]) // the public key that signed [tx] + if err != nil { + return err + } + tx.senderID = key.Address() + + return nil +} + +// SemanticVerify this transaction is valid. +func (tx *addDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), error) { + if err := tx.SyntacticVerify(); err != nil { + return nil, nil, nil, nil, err + } + + // Ensure the proposed validator starts after the current time + currentTime, err := tx.vm.getTimestamp(db) + if err != nil { + return nil, nil, nil, nil, err + } + startTime := tx.StartTime() + if !currentTime.Before(startTime) { + return nil, nil, nil, nil, fmt.Errorf("chain timestamp (%s) not before validator's start time (%s)", + currentTime, + startTime) + } + + // Get the account that is paying the transaction fee and, if the proposal is to add a validator + // to the default subnet, providing the staked $AVA. + // The ID of this account is the address associated with the public key that signed this tx + accountID := tx.senderID + account, err := tx.vm.getAccount(db, accountID) + if err != nil { + return nil, nil, nil, nil, errDBAccount + } + + // If the transaction adds a validator to the default subnet, also deduct + // staked $AVA + amount := tx.Weight() + + // The account if this block's proposal is committed and the validator is added + // to the pending validator set. (Increase the account's nonce; decrease its balance.) + newAccount, err := account.Remove(amount, tx.Nonce) + if err != nil { + return nil, nil, nil, nil, err + } + + // Ensure the proposed validator is not already a validator of the specified subnet + currentEvents, err := tx.vm.getCurrentValidators(db, DefaultSubnetID) + if err != nil { + return nil, nil, nil, nil, err + } + currentValidators := validators.NewSet() + currentValidators.Set(tx.vm.getValidators(currentEvents)) + if currentValidators.Contains(tx.NodeID) { + return nil, nil, nil, nil, fmt.Errorf("validator with ID %s already in the current default validator set", + tx.NodeID) + } + + // Ensure the proposed validator is not already slated to validate for the specified subnet + pendingEvents, err := tx.vm.getPendingValidators(db, DefaultSubnetID) + if err != nil { + return nil, nil, nil, nil, err + } + pendingValidators := validators.NewSet() + pendingValidators.Set(tx.vm.getValidators(pendingEvents)) + if pendingValidators.Contains(tx.NodeID) { + return nil, nil, nil, nil, fmt.Errorf("validator with ID %s already in the pending default validator set", + tx.NodeID) + } + + pendingEvents.Add(tx) // add validator to set of pending validators + + // If this proposal is committed, update the pending validator set to include the validator, + // update the validator's account by removing the staked $AVA + onCommitDB := versiondb.New(db) + if err := tx.vm.putPendingValidators(onCommitDB, pendingEvents, DefaultSubnetID); err != nil { + return nil, nil, nil, nil, err + } + if err := tx.vm.putAccount(onCommitDB, newAccount); err != nil { + return nil, nil, nil, nil, err + } + + // If this proposal is aborted, chain state doesn't change + onAbortDB := versiondb.New(db) + + onAccept := func() { + tx.vm.resetTimer() + } + return onCommitDB, onAbortDB, onAccept, nil, nil +} + +// InitiallyPrefersCommit returns true if the proposed validators start time is +// after the current wall clock time, +func (tx *addDefaultSubnetValidatorTx) InitiallyPrefersCommit() bool { + return tx.StartTime().After(tx.vm.clock.Time()) +} + +// NewAddDefaultSubnetValidatorTx returns a new NewAddDefaultSubnetValidatorTx +func (vm *VM) newAddDefaultSubnetValidatorTx(nonce, stakeAmt, startTime, endTime uint64, nodeID, destination ids.ShortID, shares, networkID uint32, key *crypto.PrivateKeySECP256K1R, +) (*addDefaultSubnetValidatorTx, error) { + tx := &addDefaultSubnetValidatorTx{ + UnsignedAddDefaultSubnetValidatorTx: UnsignedAddDefaultSubnetValidatorTx{ + NetworkID: networkID, + DurationValidator: DurationValidator{ + Validator: Validator{ + NodeID: nodeID, + Wght: stakeAmt, + }, + Start: startTime, + End: endTime, + }, + Nonce: nonce, + Destination: destination, + Shares: shares, + }, + } + + unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetValidatorTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // byte repr. of unsigned tx + if err != nil { + return nil, err + } + + sig, err := key.Sign(unsignedBytes) // Sign the transaction + if err != nil { + return nil, err + } + copy(tx.Sig[:], sig) // have to do this because sig has type []byte but tx.Sig has type [65]byte + + return tx, tx.initialize(vm) +} diff --git a/vms/platformvm/add_default_subnet_validator_tx_test.go b/vms/platformvm/add_default_subnet_validator_tx_test.go new file mode 100644 index 0000000..bca9188 --- /dev/null +++ b/vms/platformvm/add_default_subnet_validator_tx_test.go @@ -0,0 +1,313 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "testing" + "time" + + "github.com/ava-labs/gecko/ids" +) + +func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { + vm := defaultVM() + + // Case 1: tx is nil + var tx *addDefaultSubnetValidatorTx + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because tx is nil") + } + + // Case 2: ID is nil + tx, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + tx.id = ids.ID{} + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because ID is nil") + } + + // Case 3: Wrong Network ID + tx, err = vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + NumberOfShares, + testNetworkID+1, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because the wrong network ID was used") + } + + // Case 4: Node ID is nil + tx, err = vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + tx.NodeID = ids.ShortID{} + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because node ID is nil") + } + + // Case 5: Destination ID is nil + tx, err = vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + tx.Destination = ids.ShortID{} + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because destination ID is nil") + } + + // Case 6: Stake amount too small + tx, err = vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + MinimumStakeAmount-1, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because stake amount too small") + } + + // Case 7: Too many shares + tx, err = vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + NumberOfShares+1, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because of too many shares") + } + + // Case 8.1: Validation length is too short + tx, err = vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix())-1, + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because validation length too short") + } + + // Case 8.2: Validation length is negative + tx, err = vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateStartTime.Unix())-1, + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because validation length too short") + } + + // Case 9: Validation length is too long + tx, err = vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix())+1, + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because validation length too long") + } + + // Case 10: Valid + tx, err = vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err != nil { + t.Fatal(err) + } +} + +// Test AddDefaultSubnetValidatorTx.SemanticVerify +func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { + vm := defaultVM() + + // Case 1: Validator's start time too early + tx, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix())-1, + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if _, _, _, _, err := tx.SemanticVerify(vm.DB); err == nil { + t.Fatal("should've errored because start time too early") + } + + // Case 2: Validator doesn't have enough $AVA to cover stake amount + tx, err = vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultBalance-txFee+1, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + defaultKey.PublicKey().Address(), + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if _, _, _, _, err := tx.SemanticVerify(vm.DB); err == nil { + t.Fatal("should've errored because validator doesn't have enough $AVA to cover stake") + } + + // Case 3: Validator already validating default subnet + tx, err = vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), // node ID + defaultKey.PublicKey().Address(), // destination + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if _, _, _, _, err := tx.SemanticVerify(vm.DB); err == nil { + t.Fatal("should've errored because validator already validating") + } + + // Case 4: Validator in pending validator set of default subnet + key, err := vm.factory.NewPrivateKey() + if err != nil { + t.Fatal(err) + } + startTime := defaultGenesisTime.Add(1 * time.Second) + tx, err = vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, // nonce + defaultStakeAmount, // stake amount + uint64(startTime.Unix()), // start time + uint64(startTime.Add(MinimumStakingDuration).Unix()), // end time + key.PublicKey().Address(), // node ID + defaultKey.PublicKey().Address(), // destination + NumberOfShares, // shares + testNetworkID, // network + defaultKey, // key + ) + if err != nil { + t.Fatal(err) + } + + // Put validator in pending validator set + err = vm.putPendingValidators(vm.DB, + &EventHeap{ + SortByStartTime: true, + Txs: []TimedTx{tx}, + }, + DefaultSubnetID, + ) + if err != nil { + t.Fatal(err) + } + + if _, _, _, _, err := tx.SemanticVerify(vm.DB); err == nil { + t.Fatal("should have failed because validator in pending validator set") + } +} diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx.go b/vms/platformvm/add_nondefault_subnet_validator_tx.go new file mode 100644 index 0000000..6173950 --- /dev/null +++ b/vms/platformvm/add_nondefault_subnet_validator_tx.go @@ -0,0 +1,361 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + "fmt" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" +) + +var ( + errSigsNotSorted = errors.New("control signatures not sorted") + errWrongNumberOfSignatures = errors.New("wrong number of signatures") + errDSValidatorSubset = errors.New("all subnets must be a subset of the default subnet") +) + +// UnsignedAddNonDefaultSubnetValidatorTx is an unsigned addNonDefaultSubnetValidatorTx +type UnsignedAddNonDefaultSubnetValidatorTx struct { + // The validator + SubnetValidator `serialize:"true"` + + // ID of the network + NetworkID uint32 `serialize:"true"` + + // Next unused nonce of the account paying the tx fee + Nonce uint64 `serialize:"true"` +} + +// addNonDefaultSubnetValidatorTx is a transaction that, if it is in a ProposeAddValidator block that +// is accepted and followed by a Commit block, adds a validator to the pending validator set of a subnet +// other than the default subnet. +// (That is, the validator in the tx will validate at some point in the future.) +// The transaction fee will be paid from the account whose ID is [Sigs[0].Address()] +type addNonDefaultSubnetValidatorTx struct { + UnsignedAddNonDefaultSubnetValidatorTx `serialize:"true"` + + // When a subnet is created, it specifies a set of public keys ("control keys") such + // that in order to add a validator to the subnet, a tx must be signed with + // a certain threshold of those keys + // Each element of ControlSigs is the signature of one of those keys + ControlSigs [][crypto.SECP256K1RSigLen]byte `serialize:"true"` + + // PayerSig is the signature of the public key whose corresponding account pays + // the tx fee for this tx + // ie the account with ID == [public key].Address() pays the tx fee + PayerSig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + + vm *VM + id ids.ID + controlIDs []ids.ShortID + senderID ids.ShortID + + // Byte representation of the signed transaction + bytes []byte +} + +// initialize [tx] +func (tx *addNonDefaultSubnetValidatorTx) initialize(vm *VM) error { + bytes, err := Codec.Marshal(tx) // byte representation of the signed transaction + if err != nil { + return err + } + tx.vm = vm + tx.bytes = bytes + tx.id = ids.NewID(hashing.ComputeHash256Array(bytes)) + return nil +} + +func (tx *addNonDefaultSubnetValidatorTx) ID() ids.ID { return tx.id } + +// SyntacticVerify return nil iff [tx] is valid +// If [tx] is valid, sets [tx.accountID] +func (tx *addNonDefaultSubnetValidatorTx) SyntacticVerify() error { + switch { + case tx == nil: + return errNilTx + case !tx.senderID.IsZero(): + return nil // Only verify the transaction once + case tx.id.IsZero(): + return errInvalidID + case tx.NetworkID != tx.vm.Ctx.NetworkID: + return errWrongNetworkID + case tx.NodeID.IsZero(): + return errInvalidID + case tx.Subnet.IsZero(): + return errInvalidID + case tx.Wght == 0: // Ensure the validator has some weight + return errWeightTooSmall + case !crypto.IsSortedAndUniqueSECP2561RSigs(tx.ControlSigs): + return errSigsNotSorted + } + + // Ensure staking length is not too short or long + stakingDuration := tx.Duration() + if stakingDuration < MinimumStakingDuration { + return errStakeTooShort + } else if stakingDuration > MaximumStakingDuration { + return errStakeTooLong + } + + // Byte representation of the unsigned transaction + unsignedIntf := interface{}(&tx.UnsignedAddNonDefaultSubnetValidatorTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return err + } + unsignedBytesHash := hashing.ComputeHash256(unsignedBytes) + + tx.controlIDs = make([]ids.ShortID, len(tx.ControlSigs)) + // recover control signatures + for i, sig := range tx.ControlSigs { + key, err := tx.vm.factory.RecoverHashPublicKey(unsignedBytesHash, sig[:]) + if err != nil { + return err + } + tx.controlIDs[i] = key.Address() + } + + // get account to pay tx fee from + key, err := tx.vm.factory.RecoverHashPublicKey(unsignedBytesHash, tx.PayerSig[:]) + if err != nil { + return err + } + tx.senderID = key.Address() + + return nil +} + +// getDefaultSubnetStaker ... +func (h *EventHeap) getDefaultSubnetStaker(id ids.ShortID) (*addDefaultSubnetValidatorTx, error) { + for _, txIntf := range h.Txs { + tx, ok := txIntf.(*addDefaultSubnetValidatorTx) + if !ok { + continue + } + + if id.Equals(tx.NodeID) { + return tx, nil + } + } + return nil, errors.New("couldn't find validator in the default subnet") +} + +// SemanticVerify this transaction is valid. +func (tx *addNonDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), error) { + // Ensure tx is syntactically valid + if err := tx.SyntacticVerify(); err != nil { + return nil, nil, nil, nil, err + } + + // Get info about the subnet we're adding a validator to + subnets, err := tx.vm.getSubnets(db) + if err != nil { + return nil, nil, nil, nil, err + } + var subnet *CreateSubnetTx + for _, sn := range subnets { + if sn.ID.Equals(tx.SubnetID()) { + subnet = sn + break + } + } + if subnet == nil { + return nil, nil, nil, nil, fmt.Errorf("there is no subnet with ID %s", tx.SubnetID()) + } + + // Ensure the sigs on [tx] are valid + if len(tx.ControlSigs) != int(subnet.Threshold) { + return nil, nil, nil, nil, fmt.Errorf("expected tx to have %d control sigs but has %d", subnet.Threshold, len(tx.ControlSigs)) + } + if !crypto.IsSortedAndUniqueSECP2561RSigs(tx.ControlSigs) { + return nil, nil, nil, nil, errors.New("control signatures aren't sorted") + } + + controlKeys := ids.ShortSet{} + controlKeys.Add(subnet.ControlKeys...) + for _, controlID := range tx.controlIDs { + if !controlKeys.Contains(controlID) { + return nil, nil, nil, nil, errors.New("tx has control signature from key not in subnet's ControlKeys") + } + } + + // Ensure that the period this validator validates the specified subnet is a subnet of the time they validate the default subnet + // First, see if they're currently validating the default subnet + currentDSValidators, err := tx.vm.getCurrentValidators(db, DefaultSubnetID) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("couldn't get current validators of default subnet: %v", err) + } + + if dsValidator, err := currentDSValidators.getDefaultSubnetStaker(tx.NodeID); err == nil { + if !tx.DurationValidator.BoundedBy(dsValidator.StartTime(), dsValidator.EndTime()) { + return nil, nil, nil, nil, + fmt.Errorf("time validating subnet [%v, %v] not subset of time validating default subnet [%v, %v]", + tx.DurationValidator.StartTime(), tx.DurationValidator.EndTime(), + dsValidator.StartTime(), dsValidator.EndTime()) + } + } else { + // They aren't currently validating the default subnet. + // See if they will validate the default subnet in the future. + pendingDSValidators, err := tx.vm.getPendingValidators(db, DefaultSubnetID) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("couldn't get pending validators of default subnet: %v", err) + } + dsValidator, err := pendingDSValidators.getDefaultSubnetStaker(tx.NodeID) + if err != nil { + return nil, nil, nil, nil, + fmt.Errorf("validator would not be validating default subnet while validating non-default subnet") + } + if !tx.DurationValidator.BoundedBy(dsValidator.StartTime(), dsValidator.EndTime()) { + return nil, nil, nil, nil, + fmt.Errorf("time validating subnet [%v, %v] not subset of time validating default subnet [%v, %v]", + tx.DurationValidator.StartTime(), tx.DurationValidator.EndTime(), + dsValidator.StartTime(), dsValidator.EndTime()) + } + } + + // Ensure the proposed validator starts after the current timestamp + currentTimestamp, err := tx.vm.getTimestamp(db) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("couldn't get current timestamp: %v", err) + } + validatorStartTime := tx.StartTime() + if !currentTimestamp.Before(validatorStartTime) { + return nil, nil, nil, nil, fmt.Errorf("chain timestamp (%s) not before validator's start time (%s)", + currentTimestamp, + validatorStartTime) + } + + // Get the account that is paying the transaction fee and, if the proposal is to add a validator + // to the default subnet, providing the staked $AVA. + // The ID of this account is the address associated with the public key that signed this tx + accountID := tx.senderID + account, err := tx.vm.getAccount(db, accountID) + if err != nil { + return nil, nil, nil, nil, errDBAccount + } + + // The account if this block's proposal is committed and the validator is added + // to the pending validator set. (Increase the account's nonce; decrease its balance.) + newAccount, err := account.Remove(0, tx.Nonce) // Remove also removes the fee + if err != nil { + return nil, nil, nil, nil, err + } + + // Ensure the proposed validator is not already a validator of the specified subnet + currentEvents, err := tx.vm.getCurrentValidators(db, tx.Subnet) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("couldn't get current validators of subnet %s: %v", tx.Subnet, err) + } + currentValidators := validators.NewSet() + currentValidators.Set(tx.vm.getValidators(currentEvents)) + if currentValidators.Contains(tx.NodeID) { + return nil, nil, nil, nil, fmt.Errorf("validator with ID %s already in the current validator set for subnet with ID %s", + tx.NodeID, + tx.Subnet, + ) + } + + // Ensure the proposed validator is not already slated to validate for the specified subnet + pendingEvents, err := tx.vm.getPendingValidators(db, tx.Subnet) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("couldn't get pending validators of subnet %s: %v", tx.Subnet, err) + } + pendingValidators := validators.NewSet() + pendingValidators.Set(tx.vm.getValidators(pendingEvents)) + if pendingValidators.Contains(tx.NodeID) { + return nil, nil, nil, nil, fmt.Errorf("validator with ID %s already in the pending validator set for subnet with ID %s", + tx.NodeID, + tx.Subnet, + ) + } + + pendingEvents.Add(tx) // add validator to set of pending validators + + // If this proposal is committed, update the pending validator set to include the validator, + // update the validator's account by removing the staked $AVA + onCommitDB := versiondb.New(db) + if err := tx.vm.putPendingValidators(onCommitDB, pendingEvents, tx.Subnet); err != nil { + return nil, nil, nil, nil, fmt.Errorf("couldn't put current validators: %v", err) + } + if err := tx.vm.putAccount(onCommitDB, newAccount); err != nil { + return nil, nil, nil, nil, fmt.Errorf("couldn't put account: %v", err) + } + + // If this proposal is aborted, chain state doesn't change + onAbortDB := versiondb.New(db) + + return onCommitDB, onAbortDB, nil, nil, nil +} + +// InitiallyPrefersCommit returns true if the proposed validators start time is +// after the current wall clock time, +func (tx *addNonDefaultSubnetValidatorTx) InitiallyPrefersCommit() bool { + return tx.StartTime().After(tx.vm.clock.Time()) +} + +func (vm *VM) newAddNonDefaultSubnetValidatorTx( + nonce, + weight, + startTime, + endTime uint64, + nodeID ids.ShortID, + subnetID ids.ID, + networkID uint32, + controlKeys []*crypto.PrivateKeySECP256K1R, + payerKey *crypto.PrivateKeySECP256K1R, +) (*addNonDefaultSubnetValidatorTx, error) { + tx := &addNonDefaultSubnetValidatorTx{ + UnsignedAddNonDefaultSubnetValidatorTx: UnsignedAddNonDefaultSubnetValidatorTx{ + SubnetValidator: SubnetValidator{ + DurationValidator: DurationValidator{ + Validator: Validator{ + NodeID: nodeID, + Wght: weight, + }, + Start: startTime, + End: endTime, + }, + Subnet: subnetID, + }, + NetworkID: networkID, + Nonce: nonce, + }, + } + + unsignedIntf := interface{}(&tx.UnsignedAddNonDefaultSubnetValidatorTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // byte repr. of unsigned tx + if err != nil { + return nil, err + } + unsignedHash := hashing.ComputeHash256(unsignedBytes) + + // Sign this tx with each control key + tx.ControlSigs = make([][crypto.SECP256K1RSigLen]byte, len(controlKeys)) + for i, key := range controlKeys { + sig, err := key.SignHash(unsignedHash) + if err != nil { + return nil, err + } + // tx.ControlSigs[i] is type [65]byte but sig is type []byte + // so we have to do the below + copy(tx.ControlSigs[i][:], sig) + } + crypto.SortSECP2561RSigs(tx.ControlSigs) + + // Sign this tx with the key of the tx fee payer + sig, err := payerKey.SignHash(unsignedHash) + if err != nil { + return nil, err + } + copy(tx.PayerSig[:], sig) + + return tx, tx.initialize(vm) +} diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx_test.go b/vms/platformvm/add_nondefault_subnet_validator_tx_test.go new file mode 100644 index 0000000..2d63f06 --- /dev/null +++ b/vms/platformvm/add_nondefault_subnet_validator_tx_test.go @@ -0,0 +1,634 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "reflect" + "testing" + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" +) + +func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { + vm := defaultVM() + + // Case 1: tx is nil + var tx *addNonDefaultSubnetValidatorTx + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because tx is nil") + } + + // Case 2: Tx ID is nil + tx, err := vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + tx.id = ids.ID{} + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because ID is nil") + } + + // Case 3: Wrong network ID + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + testSubnet1.ID, + testNetworkID+1, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because the wrong network ID was used") + } + + // Case 4: Missing Node ID + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + tx.NodeID = ids.ShortID{} + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because NodeID is nil") + } + + // Case 5: Missing Subnet ID + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + tx.Subnet = ids.ID{} + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because Subnet ID is nil") + } + + // Case 6: No weight + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + 0, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have errored because of no weight") + } + + // Case 7: ControlSigs not sorted + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix())-1, + defaultKey.PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + tx.ControlSigs[0], tx.ControlSigs[1] = tx.ControlSigs[1], tx.ControlSigs[0] + if err != nil { + t.Fatal(err) + } + err = tx.SyntacticVerify() + if err == nil { + t.Fatal("should have errored because addresses weren't sorted") + } + + // Case 8: Validation length is too short + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix())-1, + defaultKey.PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + err = tx.SyntacticVerify() + if err == nil { + t.Fatal("should have errored because validation length too short") + } + + // Case 9: Validation length is too long + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix())+1, + defaultKey.PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + err = tx.SyntacticVerify() + if err == nil { + t.Fatal("should have errored because validation length too long") + } + + // Case 10: Valid + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + if err := tx.SyntacticVerify(); err != nil { + t.Fatal(err) + } +} + +func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { + vm := defaultVM() + + // Case 1: Proposed validator currently validating default subnet + // but stops validating non-default subnet after stops validating default subnet + // (note that defaultKey is a genesis validator) + tx, err := vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix())+1, + defaultKey.PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed because validator stops validating default subnet earlier than non-default subnet") + } + + // Case 2: Proposed validator currently validating default subnet + // and proposed non-default subnet validation period is subset of + // default subnet validation period + // (note that defaultKey is a genesis validator) + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err != nil { + t.Log(testSubnet1.ID) + subnets, err := vm.getSubnets(vm.DB) + if err != nil { + t.Fatal(err) + } + if len(subnets) == 0 { + t.Fatal("no subnets found") + } + t.Logf("subnets[0].ID: %v", subnets[0].ID) + t.Fatal(err) + } + + // Add a validator to pending validator set of default subnet + key, err := vm.factory.NewPrivateKey() + if err != nil { + t.Fatal(err) + } + pendingDSValidatorID := key.PublicKey().Address() + + // starts validating default subnet 10 seconds after genesis + DSStartTime := defaultGenesisTime.Add(10 * time.Second) + DSEndTime := DSStartTime.Add(5 * MinimumStakingDuration) + + addDSTx, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, // nonce + defaultStakeAmount, // stake amount + uint64(DSStartTime.Unix()), // start time + uint64(DSEndTime.Unix()), // end time + pendingDSValidatorID, // node ID + defaultKey.PublicKey().Address(), // destination + NumberOfShares, // subnet + testNetworkID, // network + defaultKey, // key + ) + if err != nil { + t.Fatal(err) + } + + // Case 3: Proposed validator isn't in pending or current validator sets + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(DSStartTime.Unix()), // start validating non-default subnet before default subnet + uint64(DSEndTime.Unix()), + pendingDSValidatorID, + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed because validator not in the current or pending validator sets of the default subnet") + } + + err = vm.putPendingValidators( + vm.DB, + &EventHeap{ + SortByStartTime: true, + Txs: []TimedTx{addDSTx}, + }, + DefaultSubnetID, + ) + if err != nil { + t.Fatal(err) + } + // Node with ID key.PublicKey().Address() now a pending validator for default subnet + + // Case 4: Proposed validator is pending validator of default subnet + // but starts validating non-default subnet before default subnet + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(DSStartTime.Unix())-1, // start validating non-default subnet before default subnet + uint64(DSEndTime.Unix()), + pendingDSValidatorID, + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed because validator starts validating non-default " + + "subnet before starting to validate default subnet") + } + + // Case 5: Proposed validator is pending validator of default subnet + // but stops validating non-default subnet after default subnet + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(DSStartTime.Unix()), + uint64(DSEndTime.Unix())+1, // stop validating non-default subnet after stopping validating default subnet + pendingDSValidatorID, + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed because validator stops validating non-default " + + "subnet after stops validating default subnet") + } + + // Case 6: Proposed validator is pending validator of default subnet + // and period validating non-default subnet is subset of time validating default subnet + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(DSStartTime.Unix()), // same start time as for default subnet + uint64(DSEndTime.Unix()), // same end time as for default subnet + pendingDSValidatorID, + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err != nil { + t.Fatalf("should have passed verification") + } + + // Case 7: Proposed validator start validating at/before current timestamp + // First, advance the timestamp + newTimestamp := defaultGenesisTime.Add(2 * time.Second) + if err := vm.putTimestamp(vm.DB, newTimestamp); err != nil { + t.Fatal(err) + } + + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(newTimestamp.Unix()), // start time + uint64(newTimestamp.Add(MinimumStakingDuration).Unix()), // end time + defaultKey.PublicKey().Address(), // node ID + testSubnet1.ID, // subnet ID + testNetworkID, // network ID + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, // tx fee payer + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed verification because starts validating at current timestamp") + } + + // reset the timestamp + if err := vm.putTimestamp(vm.DB, defaultGenesisTime); err != nil { + t.Fatal(err) + } + + // Case 7: Account that pays tx fee doesn't have enough $AVA to pay tx fee + txFeeSaved := txFee + txFee = 1 // Do this so test works even when txFee is 0 + + // Create new key whose account has no $AVA + factory := crypto.FactorySECP256K1R{} + newAcctKey, err := factory.NewPrivateKey() + if err != nil { + t.Fatal(err) + } + + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + 1, // nonce (new account has nonce 0 so use nonce 1) + defaultWeight, // weight + uint64(defaultValidateStartTime.Unix()), // start time + uint64(defaultValidateEndTime.Unix()), // end time + defaultKey.PublicKey().Address(), // node ID + testSubnet1.ID, // subnet ID + testNetworkID, // network ID + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + newAcctKey.(*crypto.PrivateKeySECP256K1R), // tx fee payer + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed verification because payer account has no $AVA to pay fee") + } + txFee = txFeeSaved // Reset tx fee + + // Case 8: Proposed validator already validating the non-default subnet + // First, add validator as validator of non-default subnet + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultValidateStartTime.Unix()), // start time + uint64(defaultValidateEndTime.Unix()), // end time + defaultKey.PublicKey().Address(), // node ID + testSubnet1.ID, // subnet ID + testNetworkID, // network ID + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, // tx fee payer + ) + if err != nil { + t.Fatal(err) + } + + err = vm.putCurrentValidators(vm.DB, + &EventHeap{ + SortByStartTime: false, + Txs: []TimedTx{tx}, + }, + testSubnet1.ID, + ) + // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID + + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultValidateStartTime.Unix()), // start time + uint64(defaultValidateEndTime.Unix()), // end time + defaultKey.PublicKey().Address(), // node ID + testSubnet1.ID, // subnet ID + testNetworkID, // network ID + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, // tx fee payer + ) + if err != nil { + t.Fatal(err) + } + + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed verification because validator already validating the specified subnet") + } + + // reset validator heap + err = vm.putCurrentValidators(vm.DB, + &EventHeap{ + SortByStartTime: false, + }, + testSubnet1.ID, + ) + + // Case 9: Too many signatures + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time + uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix())+1, // end time + keys[0].PublicKey().Address(), // node ID + testSubnet1.ID, // subnet ID + testNetworkID, // network ID + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, + defaultKey, // tx fee payer + ) + if err != nil { + t.Fatal(err) + } + + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed verification because tx has 3 signatures but only 2 needed") + } + + // Case 10: Too few signatures + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time + uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()), // end time + keys[0].PublicKey().Address(), // node ID + testSubnet1.ID, // subnet ID + testNetworkID, // network ID + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[2]}, + defaultKey, // tx fee payer + ) + if err != nil { + t.Fatal(err) + } + + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed verification because tx has 1 signatures but 2 needed") + } + + // Case 10: Control Signature from invalid key + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time + uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()), // end time + keys[0].PublicKey().Address(), // node ID + testSubnet1.ID, // subnet ID + testNetworkID, // network ID + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], keys[3]}, + defaultKey, // tx fee payer + ) + if err != nil { + t.Fatal(err) + } + + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed verification because tx has control sig from non-control key") + } + + // Case 11: Proposed validator in pending validator set for subnet + // First, add validator to pending validator set of subnet + tx, err = vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultGenesisTime.Unix())+1, // start time + uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix())+1, // end time + defaultKey.PublicKey().Address(), // node ID + testSubnet1.ID, // subnet ID + testNetworkID, // network ID + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, // tx fee payer + ) + if err != nil { + t.Fatal(err) + } + + err = vm.putPendingValidators(vm.DB, + &EventHeap{ + SortByStartTime: true, + Txs: []TimedTx{tx}, + }, + testSubnet1.ID, + ) + // Node with ID nodeIDKey.PublicKey().Address() now pending validator for subnet with ID testSubnet1.ID + + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed verification because validator already in pending validator set of the specified subnet") + } + +} + +// Test that marshalling/unmarshalling works +func TestAddNonDefaultSubnetValidatorMarshal(t *testing.T) { + vm := defaultVM() + + // valid tx + tx, err := vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(defaultValidateStartTime.Unix()), + uint64(defaultValidateEndTime.Unix()), + defaultKey.PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + txBytes, err := Codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + + var unmarshaledTx addNonDefaultSubnetValidatorTx + if err := Codec.Unmarshal(txBytes, &unmarshaledTx); err != nil { + t.Fatal(err) + } + if err := unmarshaledTx.initialize(vm); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tx, &unmarshaledTx) { + t.Log(tx) + t.Log(&unmarshaledTx) + t.Fatal("should be equal") + } +} diff --git a/vms/platformvm/advance_time_tx.go b/vms/platformvm/advance_time_tx.go new file mode 100644 index 0000000..b126ec8 --- /dev/null +++ b/vms/platformvm/advance_time_tx.go @@ -0,0 +1,158 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "fmt" + "time" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" +) + +// advanceTimeTx is a transaction to increase the chain's timestamp. +// When the chain's timestamp is updated (a AdvanceTimeTx is accepted and +// followed by a commit block) the staker set is also updated accordingly. +// It must be that: +// * proposed timestamp > [current chain time] +// * proposed timestamp <= [time for next staker to be removed] +type advanceTimeTx struct { + // Unix time this block proposes increasing the timestamp to + Time uint64 `serialize:"true"` + + vm *VM +} + +func (tx *advanceTimeTx) initialize(vm *VM) error { + tx.vm = vm + return nil +} + +// Timestamp returns the time this block is proposing the chain should be set to +func (tx *advanceTimeTx) Timestamp() time.Time { return time.Unix(int64(tx.Time), 0) } + +// SyntacticVerify that this transaction is well formed +func (tx *advanceTimeTx) SyntacticVerify() error { + switch { + case tx == nil: + return errNilTx + case tx.vm.clock.Time().Add(Delta).Before(tx.Timestamp()): + return errTimeTooAdvanced + default: + return nil + } +} + +// SemanticVerify this transaction is valid. +func (tx *advanceTimeTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), error) { + if err := tx.SyntacticVerify(); err != nil { + return nil, nil, nil, nil, err + } + + currentTimestamp, err := tx.vm.getTimestamp(db) + if err != nil { + return nil, nil, nil, nil, err + } + if tx.Time <= uint64(currentTimestamp.Unix()) { + return nil, nil, nil, nil, fmt.Errorf("proposed timestamp %s not after current timestamp %s", + tx.Timestamp(), + currentTimestamp) + } + + // Only allow timestamp to move forward as far as the next validator's end time + nextValidatorEndTime := tx.vm.nextValidatorChangeTime(db, false) + if tx.Time > uint64(nextValidatorEndTime.Unix()) { + return nil, nil, nil, nil, fmt.Errorf("proposed timestamp %v later than next validator end time %s", + tx.Time, + nextValidatorEndTime) + } + + // Only allow timestamp to move forward as far as the next pending validator's start time + nextValidatorStartTime := tx.vm.nextValidatorChangeTime(db, true) + if tx.Time > uint64(nextValidatorStartTime.Unix()) { + return nil, nil, nil, nil, fmt.Errorf("proposed timestamp %v later than next validator start time %s", + tx.Time, + nextValidatorStartTime) + } + + // Calculate what the validator sets will be given new timestamp + // Move validators from pending to current if their start time is <= new timestamp. + // Remove validators from current if their end time <= proposed timestamp + + // Specify what the state of the chain will be if this proposal is committed + onCommitDB := versiondb.New(db) + if err := tx.vm.putTimestamp(onCommitDB, tx.Timestamp()); err != nil { + return nil, nil, nil, nil, err + } + + current, pending, err := tx.vm.calculateValidators(db, tx.Timestamp(), DefaultSubnetID) + if err != nil { + return nil, nil, nil, nil, err + } + + if err := tx.vm.putCurrentValidators(onCommitDB, current, DefaultSubnetID); err != nil { + return nil, nil, nil, nil, err + } + if err := tx.vm.putPendingValidators(onCommitDB, pending, DefaultSubnetID); err != nil { + return nil, nil, nil, nil, err + } + + // For each subnet, calculate what current and pending validator sets should be + // given new timestamp + subnets, err := tx.vm.getSubnets(db) + if err != nil { + return nil, nil, nil, nil, err + } + for _, subnet := range subnets { + current, pending, err := tx.vm.calculateValidators(db, tx.Timestamp(), subnet.ID) + if err != nil { + return nil, nil, nil, nil, err + } + + if err := tx.vm.putCurrentValidators(onCommitDB, current, subnet.ID); err != nil { + return nil, nil, nil, nil, err + } + if err := tx.vm.putPendingValidators(onCommitDB, pending, subnet.ID); err != nil { + return nil, nil, nil, nil, err + } + } + + // If this block is committed, update the validator sets + // onAbortDB or onCommitDB should commit (flush to vm.DB) before this is called + updateValidators := func() { + subnets, err := tx.vm.getSubnets(tx.vm.DB) + if err != nil { + tx.vm.Ctx.Log.Error("failed to get subnets: %s", err) + return + } + for _, subnet := range subnets { + if err := tx.vm.updateValidators(subnet.ID); err != nil { + tx.vm.Ctx.Log.Debug("failed to update validators on the default subnet: %s", err) + } + } + if err := tx.vm.updateValidators(DefaultSubnetID); err != nil { + tx.vm.Ctx.Log.Fatal("failed to update validators on the default subnet: %s", err) + } + } + + // Specify what the state of the chain will be if this proposal is aborted + onAbortDB := versiondb.New(db) // state doesn't change + + return onCommitDB, onAbortDB, updateValidators, nil, nil +} + +// InitiallyPrefersCommit returns true if the proposed time isn't after the +// current wall clock time. +func (tx *advanceTimeTx) InitiallyPrefersCommit() bool { + return !tx.Timestamp().After(tx.vm.clock.Time()) +} + +// newAdvanceTimeTx creates a new tx that, if it is accepted and followed by a +// Commit block, will set the chain's timestamp to [timestamp]. +func (vm *VM) newAdvanceTimeTx(timestamp time.Time) (*advanceTimeTx, error) { + tx := &advanceTimeTx{ + Time: uint64(timestamp.Unix()), + } + return tx, tx.initialize(vm) +} diff --git a/vms/platformvm/advance_time_tx_test.go b/vms/platformvm/advance_time_tx_test.go new file mode 100644 index 0000000..8765600 --- /dev/null +++ b/vms/platformvm/advance_time_tx_test.go @@ -0,0 +1,240 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "testing" + "time" +) + +func TestAdvanceTimeTxSyntacticVerify(t *testing.T) { + // Case 1: Tx is nil + var tx *advanceTimeTx + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have failed verification because tx is nil") + } + + // Case 2: Timestamp is ahead of synchrony bound + vm := defaultVM() + tx = &advanceTimeTx{ + Time: uint64(defaultGenesisTime.Add(Delta).Add(1 * time.Second).Unix()), + vm: vm, + } + + err := tx.SyntacticVerify() + if err == nil { + t.Fatal("should've failed verification because timestamp is ahead of synchrony bound") + } + + // Case 3: Valid + tx.Time = uint64(defaultGenesisTime.Add(Delta).Unix()) + err = tx.SyntacticVerify() + if err != nil { + t.Fatalf("should've passed verification but got: %v", err) + } +} + +// Ensure semantic verification fails when proposed timestamp is at or before current timestamp +func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { + vm := defaultVM() + + tx := &advanceTimeTx{ + Time: uint64(defaultGenesisTime.Unix()), + vm: vm, + } + _, _, _, _, err := tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should've failed verification because proposed timestamp same as current timestamp") + } +} + +// Ensure semantic verification fails when proposed timestamp is after next validator set change time +func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { + vm := defaultVM() + + // Case 1: Timestamp is after next validator start time + // Add a pending validator + pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) + pendingValidatorEndTime := pendingValidatorStartTime.Add(MinimumStakingDuration) + nodeIDKey, _ := vm.factory.NewPrivateKey() + nodeID := nodeIDKey.PublicKey().Address() + addPendingValidatorTx, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(pendingValidatorStartTime.Unix()), + uint64(pendingValidatorEndTime.Unix()), + nodeID, + nodeID, + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + err = vm.putPendingValidators( + vm.DB, + &EventHeap{ + SortByStartTime: true, + Txs: []TimedTx{addPendingValidatorTx}, + }, + DefaultSubnetID, + ) + if err != nil { + t.Fatal(err) + } + + tx := &advanceTimeTx{ + Time: uint64(pendingValidatorStartTime.Add(1 * time.Second).Unix()), + vm: vm, + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should've failed verification because proposed timestamp is after pending validator start time") + } + + // Case 2: Timestamp is after next validator end time + vm = defaultVM() + + // fast forward clock to 10 seconds before genesis validators stop validating + vm.clock.Set(defaultValidateEndTime.Add(-10 * time.Second)) + + // Proposes advancing timestamp to 1 second after genesis validators stop validating + tx = &advanceTimeTx{ + Time: uint64(defaultValidateEndTime.Add(1 * time.Second).Unix()), + vm: vm, + } + + _, _, _, _, err = tx.SemanticVerify(vm.DB) + t.Log(err) + if err == nil { + t.Fatal("should've failed verification because proposed timestamp is after pending validator start time") + } +} + +// Ensure semantic verification updates the current and pending validator sets correctly +func TestAdvanceTimeTxUpdateValidators(t *testing.T) { + vm := defaultVM() + + // Case 1: Timestamp is after next validator start time + // Add a pending validator + pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) + pendingValidatorEndTime := pendingValidatorStartTime.Add(MinimumStakingDuration) + nodeIDKey, _ := vm.factory.NewPrivateKey() + nodeID := nodeIDKey.PublicKey().Address() + addPendingValidatorTx, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(pendingValidatorStartTime.Unix()), + uint64(pendingValidatorEndTime.Unix()), + nodeID, + nodeID, + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + err = vm.putPendingValidators( + vm.DB, + &EventHeap{ + SortByStartTime: true, + Txs: []TimedTx{addPendingValidatorTx}, + }, + DefaultSubnetID, + ) + if err != nil { + t.Fatal(err) + } + + tx := &advanceTimeTx{ + Time: uint64(pendingValidatorStartTime.Unix()), + vm: vm, + } + onCommit, onAbort, _, _, err := tx.SemanticVerify(vm.DB) + if err != nil { + t.Fatal(err) + } + + onCommitCurrentEvents, err := vm.getCurrentValidators(onCommit, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + if onCommitCurrentEvents.Len() != len(keys)+1 { // Each key in [keys] is a validator to start with...then we added a validator + t.Fatalf("Should have added the validator to the validator set") + } + + onCommitPendingEvents, err := vm.getPendingValidators(onCommit, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + if onCommitPendingEvents.Len() != 0 { + t.Fatalf("Should have removed the validator from the pending validator set") + } + + onAbortCurrentEvents, err := vm.getCurrentValidators(onAbort, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + if onAbortCurrentEvents.Len() != len(keys) { + t.Fatalf("Shouldn't have added the validator to the validator set") + } + + onAbortPendingEvents, err := vm.getPendingValidators(onAbort, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + if onAbortPendingEvents.Len() != 1 { + t.Fatalf("Shouldn't have removed the validator from the pending validator set") + } +} + +// Test method InitiallyPrefersCommit +func TestAdvanceTimeTxInitiallyPrefersCommit(t *testing.T) { + vm := defaultVM() + + // Proposed advancing timestamp to 1 second after current timestamp + tx, err := vm.newAdvanceTimeTx(defaultGenesisTime.Add(1 * time.Second)) + if err != nil { + t.Fatal(err) + } + + if tx.InitiallyPrefersCommit() { + t.Fatal("should not prefer to commit this tx because its proposed timestamp is after wall clock time") + } + + // advance wall clock time + vm.clock.Set(defaultGenesisTime.Add(2 * time.Second)) + if !tx.InitiallyPrefersCommit() { + t.Fatal("should prefer to commit this tx because its proposed timestamp is before wall clock time") + } +} + +// Ensure marshaling/unmarshaling works +func TestAdvanceTimeTxUnmarshal(t *testing.T) { + vm := defaultVM() + + tx, err := vm.newAdvanceTimeTx(defaultGenesisTime) + if err != nil { + t.Fatal(err) + } + + bytes, err := Codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + + var unmarshaledTx advanceTimeTx + err = Codec.Unmarshal(bytes, &unmarshaledTx) + if err != nil { + t.Fatal(err) + } + + if tx.Time != unmarshaledTx.Time { + t.Fatal("should have same timestamp") + } +} diff --git a/vms/platformvm/commit_block.go b/vms/platformvm/commit_block.go new file mode 100644 index 0000000..63fc8db --- /dev/null +++ b/vms/platformvm/commit_block.go @@ -0,0 +1,56 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/core" +) + +// Commit being accepted results in the proposal of its parent (which must be a proposal block) +// being enacted. +type Commit struct { + CommonDecisionBlock `serialize:"true"` +} + +// Verify this block performs a valid state transition. +// +// The parent block must either be a proposal +// +// This function also sets the onCommit databases if the verification passes. +func (c *Commit) Verify() error { + // the parent of an Commit block should always be a proposal + if parent, ok := c.parentBlock().(*ProposalBlock); ok { + c.onAcceptDB, c.onAcceptFunc = parent.onCommit() + } else { + return errInvalidBlockType + } + + c.vm.currentBlocks[c.ID().Key()] = c + c.parentBlock().addChild(c) + return nil +} + +// newCommitBlock returns a new *Commit block where the block's parent, a +// proposal block, has ID [parentID]. +func (vm *VM) newCommitBlock(parentID ids.ID) *Commit { + commit := &Commit{ + CommonDecisionBlock: CommonDecisionBlock{ + CommonBlock: CommonBlock{ + Block: core.NewBlock(parentID), + vm: vm, + }, + }, + } + + // We serialize this block as a Block so that it can be deserialized into a + // Block + blk := Block(commit) + bytes, err := Codec.Marshal(&blk) + if err != nil { + return nil + } + commit.Block.Initialize(bytes, vm.SnowmanVM) + return commit +} diff --git a/vms/platformvm/common_blocks.go b/vms/platformvm/common_blocks.go new file mode 100644 index 0000000..5023a44 --- /dev/null +++ b/vms/platformvm/common_blocks.go @@ -0,0 +1,236 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/components/missing" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/vms/components/core" +) + +// When one stakes, one must specify the time one will start to validate and +// the time one will stop validating. The latter must be after the former, and both +// times must be in the future. +// +// When one wants to start staking: +// * They issue a transaction to that effect to an existing staker. +// * The staker checks whether the specified "start staking time" is in the past relative to +// their wall clock. +// ** If so, the staker ignores the transaction +// ** If not, the staker issues a proposal block (see below) on behalf of the would-be staker. +// +// When one is done staking: +// * The staking set decides whether the staker should receive either: +// ** Only the $AVA that the staker put up as a bond +// ** The $AVA the staker put up as a bond, and also a reward for staking +// +// This chain has three types of blocks: +// 1. A proposal block +// - Contains a proposal to do one of the following: +// * Change the chain time from t to t'. (This doubles as +// proposing to update the staking set.) +// ** It must be that: t' > t +// ** It must be that: t' <= [time at which the next staker stops staking] +// * Reward a staker upon their leaving the staking pool +// ** It must be that chain time == [time for this staker to stop staking] +// ** It must be that this staker is the next staker to stop staking +// * Add a staker to the staking pool +// ** It must be that: staker.startTime < chain time +// - A proposal block is always followed by either a commit block or a rejection block +// 2. A commit block +// - Does one of the following: +// * Approve a proposal to change the chain time from t to t' +// ** This should be the initial preference if t' <= Wall clock time +// * Approve a proposal to reward for a staker upon their leaving the staking pool +// ** It must be that: chain time == [time for this staker to stop staking] +// ** It must be that: this staker is the next staker to stop staking +// * Approve a proposal to add a staker to the staking pool +// ** This should be the initial preference if staker.startTime > Wall clock +// time +// - A commit block must always be preceded on the chain by the proposal block whose +// proposal is being commited +// 3. A rejection block +// - Does one of the following: +// * Reject a proposal to change the chain time from t to t' (therefore keeping it at t) +// ** This should be the initial preference if t' > [this node's wall clock time + Delta], +// where Delta is our synchrony assumption +// * Reject a proposal to reward for a staker upon their leaving the staking pool. +// ** The staker only has their bond (locked tokens) returned +// ** This should be the initial preference if the staker has had < Chi uptime +// ** It must be that: t == [time for this staker to stop staking] +// ** It must be that: this staker is the next staker to stop staking +// * Reject a proposal to add a staker to the staking set. +// ** Increase the timestamp to the would-be staker's start time +// ** This should be the initial preference if staker.startTime <= Wall clock +// time +// - A rejection block must always be preceded on the chain by the proposal block whose +// proposal is being rejected + +var ( + errInvalidBlockType = errors.New("invalid block type") + errEmptyValidatingSet = errors.New("empty validating set") +) + +// Block is the common interface that all staking blocks must have +type Block interface { + snowman.Block + + // initialize this block's non-serialized fields. + // This method should be called when a block is unmarshaled from bytes. + // [vm] is the vm the block exists in + // [bytes] is the byte representation of this block + initialize(vm *VM, bytes []byte) error + + // parentBlock returns the parent block, similarly to Parent. However, it + // provides the more specific staking.Block interface. + parentBlock() Block + + // addChild notifies this block that it has a child block building on + // its database. When this block commits its database, it should set the + // child's database to the former's underlying database instance. This ensures that + // the database versions do not recurse the length of the chain. + addChild(Block) + + // free all the references of this block from the vm's memory + free() + + // Set the database underlying the block's versiondb's to [db] + setBaseDatabase(db database.Database) +} + +// A decision block (either Commit, Abort, or DecisionBlock.) represents a +// decision to either commit (accept) or abort (reject) the changes specified in +// its parent, if its parent is a proposal. Otherwise, the changes are committed +// immediately. +type decision interface { + // This function should only be called after Verify is called. + // returns a database that contains the state of the chain if this block is + // accepted. + onAccept() database.Database +} + +// CommonBlock contains the fields common to all blocks of the Platform Chain +type CommonBlock struct { + *core.Block `serialize:"true"` + vm *VM + + // This block's parent. + // nil before parentBlock() is called on this block + parent Block + + // This block's children + children []Block +} + +// Reject implements the snowman.Block interface +func (cb *CommonBlock) Reject() { + defer cb.free() // remove this block from memory + + cb.Block.Reject() +} + +// free removes this block from memory +func (cb *CommonBlock) free() { + delete(cb.vm.currentBlocks, cb.ID().Key()) + cb.parent = nil + cb.children = nil +} + +// Parent returns this block's parent +func (cb *CommonBlock) Parent() snowman.Block { + parent := cb.parentBlock() + if parent != nil { + return parent + } + return &missing.Block{BlkID: cb.ParentID()} +} + +// parentBlock returns this block's parent +func (cb *CommonBlock) parentBlock() Block { + // Check if the block already has a reference to its parent + if cb.parent != nil { + return cb.parent + } + + // Get the parent from database + parentID := cb.ParentID() + parent, err := cb.vm.getBlock(parentID) + if err != nil { + cb.vm.Ctx.Log.Warn("could not get parent (ID %s) of block %s", parentID, cb.ID()) + return nil + } + cb.parent = parent + return parent.(Block) +} + +// addChild adds [child] as a child of this block +func (cb *CommonBlock) addChild(child Block) { cb.children = append(cb.children, child) } + +// CommonDecisionBlock contains the fields and methods common to all decision blocks +// (ie *Commit and *Abort) +type CommonDecisionBlock struct { + CommonBlock `serialize:"true"` + + // state of the chain if this block is accepted + onAcceptDB *versiondb.Database + + // to be executed if this block is accepted + onAcceptFunc func() +} + +// initialize this block +func (cdb *CommonDecisionBlock) initialize(vm *VM, bytes []byte) error { + cdb.vm = vm + cdb.Block.Initialize(bytes, vm.SnowmanVM) + return nil +} + +// setBaseDatabase sets this block's base database to [db] +func (cdb *CommonDecisionBlock) setBaseDatabase(db database.Database) { + if err := cdb.onAcceptDB.SetDatabase(db); err != nil { + cdb.vm.Ctx.Log.Error("problem while setting base database: %s", err) + } +} + +// onAccept returns: +// 1) The state of the chain if this block is accepted +// 2) The function to execute if this block is accepted +func (cdb *CommonDecisionBlock) onAccept() database.Database { + if cdb.Status().Decided() { + return cdb.vm.DB + } + return cdb.onAcceptDB +} + +// Accept implements the snowman.Block interface +func (cdb *CommonDecisionBlock) Accept() { + cdb.VM.Ctx.Log.Verbo("Accepting block with ID %s", cdb.ID()) + + cdb.CommonBlock.Accept() + + // Update the state of the chain in the database + if err := cdb.onAcceptDB.Commit(); err != nil { + cdb.vm.Ctx.Log.Warn("unable to commit onAcceptDB") + } + if err := cdb.vm.DB.Commit(); err != nil { + cdb.vm.Ctx.Log.Warn("unable to commit vm's DB") + } + + for _, child := range cdb.children { + child.setBaseDatabase(cdb.vm.DB) + } + if cdb.onAcceptFunc != nil { + cdb.onAcceptFunc() + } + + parent := cdb.parentBlock() + // remove this block and its parent from memory + parent.free() + cdb.free() +} diff --git a/vms/platformvm/create_chain_tx.go b/vms/platformvm/create_chain_tx.go new file mode 100644 index 0000000..74bd3f0 --- /dev/null +++ b/vms/platformvm/create_chain_tx.go @@ -0,0 +1,194 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + "fmt" + + "github.com/ava-labs/gecko/chains" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" +) + +var ( + errInvalidVMID = errors.New("invalid VM ID") + errFxIDsNotSortedAndUnique = errors.New("feature extensions IDs must be sorted and unique") +) + +// UnsignedCreateChainTx is an unsigned CreateChainTx +type UnsignedCreateChainTx struct { + // ID of the network this blockchain exists on + NetworkID uint32 `serialize:"true"` + + // Next unused nonce of account paying the transaction fee for this transaction. + // Currently unused, as there are no tx fees. + Nonce uint64 `serialize:"true"` + + // A human readable name for the chain; need not be unique + ChainName string `serialize:"true"` + + // ID of the VM running on the new chain + VMID ids.ID `serialize:"true"` + + // IDs of the feature extensions running on the new chain + FxIDs []ids.ID `serialize:"true"` + + // Byte representation of state of the new chain + GenesisData []byte `serialize:"true"` +} + +// CreateChainTx is a proposal to create a chain +type CreateChainTx struct { + UnsignedCreateChainTx `serialize:"true"` + + Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + + vm *VM + id ids.ID + key crypto.PublicKey // public key of transaction signer + bytes []byte +} + +func (tx *CreateChainTx) initialize(vm *VM) error { + tx.vm = vm + txBytes, err := Codec.Marshal(tx) // byte repr. of the signed tx + tx.bytes = txBytes + tx.id = ids.NewID(hashing.ComputeHash256Array(txBytes)) + return err +} + +// ID of this transaction +func (tx *CreateChainTx) ID() ids.ID { return tx.id } + +// Key returns the public key of the signer of this transaction +// Precondition: tx.Verify() has been called and returned nil +func (tx *CreateChainTx) Key() crypto.PublicKey { return tx.key } + +// Bytes returns the byte representation of a CreateChainTx +func (tx *CreateChainTx) Bytes() []byte { return tx.bytes } + +// SyntacticVerify this transaction is well-formed +// Also populates [tx.Key] with the public key that signed this transaction +func (tx *CreateChainTx) SyntacticVerify() error { + switch { + case tx == nil: + return errNilTx + case tx.key != nil: + return nil // Only verify the transaction once + case tx.NetworkID != tx.vm.Ctx.NetworkID: // verify the transaction is on this network + return errWrongNetworkID + case tx.id.IsZero(): + return errInvalidID + case tx.VMID.IsZero(): + return errInvalidVMID + case !ids.IsSortedAndUniqueIDs(tx.FxIDs): + return errFxIDsNotSortedAndUnique + } + + unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // byte repr of unsigned tx + if err != nil { + return err + } + + key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:]) + if err != nil { + return err + } + tx.key = key + + return nil +} + +// SemanticVerify this transaction is valid. +func (tx *CreateChainTx) SemanticVerify(db database.Database) (func(), error) { + if err := tx.SyntacticVerify(); err != nil { + return nil, err + } + + currentChains, err := tx.vm.getChains(db) // chains that currently exist + if err != nil { + return nil, errDBChains + } + for _, chain := range currentChains { + if chain.ID().Equals(tx.ID()) { + return nil, fmt.Errorf("chain with ID %s already exists", chain.ID()) + } + } + currentChains = append(currentChains, tx) // add this new chain + if err := tx.vm.putChains(db, currentChains); err != nil { + return nil, err + } + + // Deduct tx fee from payer's account + account, err := tx.vm.getAccount(db, tx.Key().Address()) + if err != nil { + return nil, err + } + account, err = account.Remove(0, tx.Nonce) + if err != nil { + return nil, err + } + if err := tx.vm.putAccount(db, account); err != nil { + return nil, err + } + + // If this proposal is committed, create the new blockchain using the chain manager + onAccept := func() { + chainParams := chains.ChainParameters{ + ID: tx.ID(), + GenesisData: tx.GenesisData, + VMAlias: tx.VMID.String(), + } + for _, fxID := range tx.FxIDs { + chainParams.FxAliases = append(chainParams.FxAliases, fxID.String()) + } + // TODO: Not sure how else to make this not nil pointer error during tests + if tx.vm.ChainManager != nil { + tx.vm.ChainManager.CreateChain(chainParams) + } + } + + return onAccept, nil +} + +// We use this type so we can serialize a list of *CreateChainTx +// by defining a Bytes method on it +type createChainList []*CreateChainTx + +// Bytes returns the byte representation of a list of *CreateChainTx +func (chains createChainList) Bytes() []byte { + bytes, _ := Codec.Marshal(chains) + return bytes +} + +func (vm *VM) newCreateChainTx(nonce uint64, genesisData []byte, vmID ids.ID, fxIDs []ids.ID, chainName string, networkID uint32, key *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) { + tx := &CreateChainTx{ + UnsignedCreateChainTx: UnsignedCreateChainTx{ + NetworkID: networkID, + Nonce: nonce, + GenesisData: genesisData, + VMID: vmID, + FxIDs: fxIDs, + ChainName: chainName, + }, + } + + unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // Byte repr. of unsigned transaction + if err != nil { + return nil, err + } + + sig, err := key.Sign(unsignedBytes) + if err != nil { + return nil, err + } + copy(tx.Sig[:], sig) + + return tx, tx.initialize(vm) +} diff --git a/vms/platformvm/create_chain_tx_test.go b/vms/platformvm/create_chain_tx_test.go new file mode 100644 index 0000000..8c555c6 --- /dev/null +++ b/vms/platformvm/create_chain_tx_test.go @@ -0,0 +1,142 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "testing" + + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/avm" +) + +// test method SyntacticVerify +func TestCreateChainTxSyntacticVerify(t *testing.T) { + vm := defaultVM() + + // Case 1: tx is nil + var tx *CreateChainTx + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should have failed because tx is nil") + } + + // Case 2: network ID is wrong + tx, err := vm.newCreateChainTx( + defaultNonce+1, + nil, + avm.ID, + nil, + "chain name", + testNetworkID+1, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + err = tx.SyntacticVerify() + t.Log(err) + if err == nil { + t.Fatal("should've errored because network ID is wrong") + } + + // case 3: tx ID is empty + tx, err = vm.newCreateChainTx( + defaultNonce+1, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + tx.id = ids.ID{} + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should've errored because tx ID is empty") + } + + // Case 4: vm ID is empty + tx, err = vm.newCreateChainTx( + defaultNonce+1, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + tx.VMID = ids.ID{} + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should've errored because tx ID is empty") + } +} + +func TestSemanticVerify(t *testing.T) { + vm := defaultVM() + + // create a tx + tx, err := vm.newCreateChainTx( + defaultNonce+1, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + newDB := versiondb.New(vm.DB) + + _, err = tx.SemanticVerify(newDB) + if err != nil { + t.Fatal(err) + } + + chains, err := vm.getChains(newDB) + if err != nil { + t.Fatal(err) + } + for _, c := range chains { + if c.ID().Equals(tx.ID()) { + return + } + } + t.Fatalf("Should have added the chain to the set of chains") +} + +func TestSemanticVerifyAlreadyExisting(t *testing.T) { + vm := defaultVM() + + // create a tx + tx, err := vm.newCreateChainTx( + defaultNonce+1, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + // put the chain in existing chain + if err := vm.putChains(vm.DB, []*CreateChainTx{tx}); err != nil { + t.Fatal(err) + } + + _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatalf("should have failed because there is already a chain with ID %s", tx.id) + } +} diff --git a/vms/platformvm/create_subnet_tx.go b/vms/platformvm/create_subnet_tx.go new file mode 100644 index 0000000..0d33ca7 --- /dev/null +++ b/vms/platformvm/create_subnet_tx.go @@ -0,0 +1,195 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + "fmt" + + "github.com/ava-labs/gecko/database" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" +) + +const maxThreshold = 25 + +var ( + errThresholdExceedsKeysLen = errors.New("threshold must be no more than number of control keys") + errThresholdTooHigh = fmt.Errorf("threshold can't be greater than %d", maxThreshold) +) + +// UnsignedCreateSubnetTx is an unsigned proposal to create a new subnet +type UnsignedCreateSubnetTx struct { + // The VM this tx exists within + vm *VM + + // ID is this transaction's ID + ID ids.ID + + // NetworkID is the ID of the network this tx was issued on + NetworkID uint32 `serialize:"true"` + + // Next unused nonce of account paying the transaction fee for this transaction. + // Currently unused, as there are no tx fees. + Nonce uint64 `serialize:"true"` + + // Each element in ControlKeys is the address of a public key + // In order to add a validator to this subnet, a tx must be signed + // with Threshold of these keys + ControlKeys []ids.ShortID `serialize:"true"` + Threshold uint16 `serialize:"true"` +} + +// CreateSubnetTx is a proposal to create a new subnet +type CreateSubnetTx struct { + UnsignedCreateSubnetTx `serialize:"true"` + + // The public key that signed this transaction + // The transaction fee will be paid from the corresponding account + // (ie the account whose ID is [key].Address()) + // [key] is non-nil iff this tx is valid + key crypto.PublicKey + + // Signature on the UnsignedCreateSubnetTx's byte repr + Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + + // Byte representation of this transaction (including signature) + bytes []byte +} + +// SyntacticVerify nil iff [tx] is syntactically valid. +// If [tx] is valid, this method sets [tx.key] +func (tx *CreateSubnetTx) SyntacticVerify() error { + switch { + case tx == nil: + return errNilTx + case tx.key != nil: + return nil // Only verify the transaction once + case tx.ID.IsZero(): + return errInvalidID + case tx.NetworkID != tx.vm.Ctx.NetworkID: + return errWrongNetworkID + case tx.Threshold > uint16(len(tx.ControlKeys)): + return errThresholdExceedsKeysLen + } + + // Byte representation of the unsigned transaction + unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return err + } + + // Recover signature from byte repr. of unsigned tx + key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:]) // the public key that signed [tx] + if err != nil { + return err + } + + tx.key = key + return nil +} + +// SemanticVerify returns nil if [tx] is valid given the state in [db] +func (tx *CreateSubnetTx) SemanticVerify(db database.Database) (func(), error) { + if err := tx.SyntacticVerify(); err != nil { + return nil, err + } + + // Add new subnet to list of subnets + subnets, err := tx.vm.getSubnets(db) + if err != nil { + return nil, err + } + + for _, subnet := range subnets { + if subnet.ID.Equals(tx.ID) { + return nil, fmt.Errorf("there is already a subnet with ID %s", tx.ID) + } + } + subnets = append(subnets, tx) // add new subnet + if err := tx.vm.putSubnets(db, subnets); err != nil { + return nil, err + } + + // Deduct tx fee from payer's account + account, err := tx.vm.getAccount(db, tx.key.Address()) + if err != nil { + return nil, err + } + account, err = account.Remove(0, tx.Nonce) + if err != nil { + return nil, err + } + if err := tx.vm.putAccount(db, account); err != nil { + return nil, err + } + + return nil, nil +} + +// Bytes returns the byte representation of [tx] +func (tx *CreateSubnetTx) Bytes() []byte { + if tx.bytes != nil { + return tx.bytes + } + var err error + tx.bytes, err = Codec.Marshal(tx) + if err != nil { + tx.vm.Ctx.Log.Error("problem marshaling tx: %v", err) + } + return tx.bytes +} + +// initialize sets [tx.vm] to [vm] +func (tx *CreateSubnetTx) initialize(vm *VM) error { + tx.vm = vm + txBytes, err := Codec.Marshal(tx) // byte repr. of the signed tx + if err != nil { + return err + } + tx.bytes = txBytes + tx.ID = ids.NewID(hashing.ComputeHash256Array(txBytes)) + return nil +} + +func (vm *VM) newCreateSubnetTx(networkID uint32, nonce uint64, controlKeys []ids.ShortID, + threshold uint16, payerKey *crypto.PrivateKeySECP256K1R, +) (*CreateSubnetTx, error) { + + tx := &CreateSubnetTx{ + UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{ + vm: vm, + NetworkID: networkID, + Nonce: nonce, + ControlKeys: controlKeys, + Threshold: threshold, + }, + } + + unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return nil, err + } + + sig, err := payerKey.Sign(unsignedBytes) + if err != nil { + return nil, err + } + copy(tx.Sig[:], sig) + + return tx, tx.initialize(vm) +} + +// CreateSubnetTxList is a list of *CreateSubnetTx +type CreateSubnetTxList []*CreateSubnetTx + +// Bytes returns the binary representation of [lst] +func (lst CreateSubnetTxList) Bytes() []byte { + bytes, _ := Codec.Marshal(lst) + return bytes +} diff --git a/vms/platformvm/event_heap.go b/vms/platformvm/event_heap.go new file mode 100644 index 0000000..3381cfd --- /dev/null +++ b/vms/platformvm/event_heap.go @@ -0,0 +1,98 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "bytes" + "container/heap" + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/validators" +) + +// TimedTx ... +type TimedTx interface { + ProposalTx + + Vdr() validators.Validator + + ID() ids.ID + StartTime() time.Time + EndTime() time.Time +} + +// EventHeap is a collection of timedTxs where elements are ordered by either +// their startTime or their endTime. If SortByStartTime == true, the first +// element of [Txs] is the tx in the heap with the earliest startTime. Otherwise +// the first element is the tx with earliest endTime. The default value of this +// struct will order transactions by endTime. This struct implements the heap +// interface. +type EventHeap struct { + SortByStartTime bool `serialize:"true"` + Txs []TimedTx `serialize:"true"` +} + +func (h *EventHeap) Len() int { return len(h.Txs) } +func (h *EventHeap) Less(i, j int) bool { + iTx := h.Txs[i] + jTx := h.Txs[j] + + iTime := iTx.EndTime() + jTime := jTx.EndTime() + if h.SortByStartTime { + iTime = iTx.StartTime() + jTime = jTx.StartTime() + } + + switch { + case iTime.Unix() < jTime.Unix(): + return true + case iTime == jTime: + _, iOk := iTx.(*addDefaultSubnetValidatorTx) + _, jOk := jTx.(*addDefaultSubnetValidatorTx) + + if iOk != jOk { + return iOk == h.SortByStartTime + } + return bytes.Compare(iTx.ID().Bytes(), jTx.ID().Bytes()) == -1 + default: + return false + } +} +func (h *EventHeap) Swap(i, j int) { h.Txs[i], h.Txs[j] = h.Txs[j], h.Txs[i] } + +// Timestamp returns the timestamp on the top transaction on the heap +func (h *EventHeap) Timestamp() time.Time { + if h.SortByStartTime { + return h.Txs[0].StartTime() + } + return h.Txs[0].EndTime() +} + +// Add ... +func (h *EventHeap) Add(tx TimedTx) { heap.Push(h, tx) } + +// Peek ... +func (h *EventHeap) Peek() TimedTx { return h.Txs[0] } + +// Remove ... +func (h *EventHeap) Remove() TimedTx { return heap.Pop(h).(TimedTx) } + +// Push implements the heap interface +func (h *EventHeap) Push(x interface{}) { h.Txs = append(h.Txs, x.(TimedTx)) } + +// Pop implements the heap interface +func (h *EventHeap) Pop() interface{} { + newLen := len(h.Txs) - 1 + val := h.Txs[newLen] + h.Txs = h.Txs[:newLen] + return val +} + +// Bytes returns the byte representation of this heap +func (h *EventHeap) Bytes() []byte { + bytes, _ := Codec.Marshal(h) + return bytes +} diff --git a/vms/platformvm/event_heap_test.go b/vms/platformvm/event_heap_test.go new file mode 100644 index 0000000..1a045ad --- /dev/null +++ b/vms/platformvm/event_heap_test.go @@ -0,0 +1,249 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestTxHeapStart(t *testing.T) { + vm := defaultVM() + txHeap := EventHeap{SortByStartTime: true} + + validator0, err := vm.newAddDefaultSubnetValidatorTx( + 5, // nonce + 123, // stake amount + 1, // startTime + 3, // endTime + ids.NewShortID([20]byte{1}), // node ID + ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination + 0, // shares + 0, // network ID + keys[0], // key + ) + if err != nil { + t.Fatal(err) + } + + validator1, err := vm.newAddDefaultSubnetValidatorTx( + 5, // nonce + 123, // stake amount + 1, // startTime + 3, // endTime + ids.NewShortID([20]byte{}), // node ID + ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination + 0, // shares + 0, // network ID + keys[0], // key + ) + if err != nil { + t.Fatal(err) + } + + validator2, err := vm.newAddDefaultSubnetValidatorTx( + 5, // nonce + 123, // stake amount + 2, // startTime + 4, // endTime + ids.NewShortID([20]byte{}), // node ID + ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination + 0, // shares + 0, // network ID + keys[0], // key + ) + if err != nil { + t.Fatal(err) + } + + txHeap.Add(validator2) + if timestamp := txHeap.Timestamp(); !timestamp.Equal(validator2.StartTime()) { + t.Fatalf("TxHeap.Timestamp returned %s, expected %s", timestamp, validator2.StartTime()) + } + + txHeap.Add(validator1) + if timestamp := txHeap.Timestamp(); !timestamp.Equal(validator1.StartTime()) { + t.Fatalf("TxHeap.Timestamp returned %s, expected %s", timestamp, validator1.StartTime()) + } + + txHeap.Add(validator0) + if timestamp := txHeap.Timestamp(); !timestamp.Equal(validator0.StartTime()) { + t.Fatalf("TxHeap.Timestamp returned %s, expected %s", timestamp, validator0.StartTime()) + } else if top := txHeap.Peek(); !top.ID().Equals(validator0.ID()) { + t.Fatalf("TxHeap prioritized %s, expected %s", top.ID(), validator0.ID()) + } +} + +func TestTxHeapStop(t *testing.T) { + vm := defaultVM() + txHeap := EventHeap{} + + validator0, err := vm.newAddDefaultSubnetValidatorTx( + 5, // nonce + 123, // stake amount + 1, // startTime + 3, // endTime + ids.NewShortID([20]byte{1}), // node ID + ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination + 0, // shares + 0, // network ID + keys[0], // key + ) + if err != nil { + t.Fatal(err) + } + + validator1, err := vm.newAddDefaultSubnetValidatorTx( + 5, // nonce + 123, // stake amount + 1, // startTime + 3, // endTime + ids.NewShortID([20]byte{}), // node ID + ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination + 0, // shares + 0, // network ID + keys[0], // key + ) + if err != nil { + t.Fatal(err) + } + + validator2, err := vm.newAddDefaultSubnetValidatorTx( + 5, // nonce + 123, // stake amount + 2, // startTime + 4, // endTime + ids.NewShortID([20]byte{}), // node ID + ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination + 0, // shares + 0, // network ID + keys[0], // key + ) + if err != nil { + t.Fatal(err) + } + + txHeap.Add(validator2) + if timestamp := txHeap.Timestamp(); !timestamp.Equal(validator2.EndTime()) { + t.Fatalf("TxHeap.Timestamp returned %s, expected %s", timestamp, validator2.EndTime()) + } + + txHeap.Add(validator1) + if timestamp := txHeap.Timestamp(); !timestamp.Equal(validator1.EndTime()) { + t.Fatalf("TxHeap.Timestamp returned %s, expected %s", timestamp, validator1.EndTime()) + } + + txHeap.Add(validator0) + if timestamp := txHeap.Timestamp(); !timestamp.Equal(validator0.EndTime()) { + t.Fatalf("TxHeap.Timestamp returned %s, expected %s", timestamp, validator0.EndTime()) + } else if top := txHeap.Txs[0]; !top.ID().Equals(validator0.ID()) { + t.Fatalf("TxHeap prioritized %s, expected %s", top.ID(), validator0.ID()) + } +} + +func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) { + vm := defaultVM() + txHeap := EventHeap{SortByStartTime: true} + + validator, err := vm.newAddDefaultSubnetValidatorTx( + 5, // nonce + 123, // stake amount + 1, // startTime + 3, // endTime + ids.NewShortID([20]byte{}), // node ID + ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination + 0, // shares + 0, // network ID + keys[0], // key + ) + if err != nil { + t.Fatal(err) + } + + delegator, err := vm.newAddDefaultSubnetDelegatorTx( + 5, // nonce + 123, // stake amount + 1, // startTime + 3, // endTime + ids.NewShortID([20]byte{}), // node ID + ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination + 0, // network ID + keys[0], // key + ) + if err != nil { + t.Fatal(err) + } + + txHeap.Add(validator) + txHeap.Add(delegator) + + if top := txHeap.Txs[0]; !top.ID().Equals(validator.ID()) { + t.Fatalf("TxHeap prioritized %s, expected %s", top.ID(), validator.ID()) + } +} + +func TestTxHeapStopValidatorVsDelegatorOrdering(t *testing.T) { + vm := defaultVM() + txHeap := EventHeap{} + + validator, err := vm.newAddDefaultSubnetValidatorTx( + 5, // nonce + 123, // stake amount + 1, // startTime + 3, // endTime + ids.NewShortID([20]byte{}), // node ID + ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination + 0, // shares + 0, // network ID + keys[0], // key + ) + if err != nil { + t.Fatal(err) + } + + delegator, err := vm.newAddDefaultSubnetDelegatorTx( + 5, // nonce + 123, // stake amount + 1, // startTime + 3, // endTime + ids.NewShortID([20]byte{}), // node ID + ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination + 0, // network ID + keys[0], // key + ) + if err != nil { + t.Fatal(err) + } + + txHeap.Add(validator) + txHeap.Add(delegator) + + if top := txHeap.Txs[0]; !top.ID().Equals(delegator.ID()) { + t.Fatalf("TxHeap prioritized %s, expected %s", top.ID(), delegator.ID()) + } +} + +// Ensure *AddValidatorTxHeap are marshaled/unmarshaled correctly +func TestMarshalAddValidatorTxHeap(t *testing.T) { + validators := GenesisCurrentValidators() + + bytes, err := Codec.Marshal(validators) + if err != nil { + t.Fatal("err") + } + + stakersUnmarshaled := EventHeap{} + if err := Codec.Unmarshal(bytes, &stakersUnmarshaled); err != nil { + t.Fatal(err) + } + + for i, originalTx := range validators.Txs { + unmarshaledTx := stakersUnmarshaled.Txs[i] + unmarshaledTx.initialize(nil) + if !originalTx.ID().Equals(unmarshaledTx.ID()) { + t.Fatalf("Wrong IDs returned") + } + } +} diff --git a/vms/platformvm/factory.go b/vms/platformvm/factory.go new file mode 100644 index 0000000..25f9786 --- /dev/null +++ b/vms/platformvm/factory.go @@ -0,0 +1,29 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "github.com/ava-labs/gecko/chains" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/validators" +) + +// ID of the platform VM +var ( + ID = ids.NewID([32]byte{'p', 'l', 'a', 't', 'f', 'o', 'r', 'm', 'v', 'm'}) +) + +// Factory can create new instances of the Platform Chain +type Factory struct { + ChainManager chains.Manager + Validators validators.Manager +} + +// New returns a new instance of the Platform Chain +func (f *Factory) New() interface{} { + return &VM{ + ChainManager: f.ChainManager, + Validators: f.Validators, + } +} diff --git a/vms/platformvm/proposal_block.go b/vms/platformvm/proposal_block.go new file mode 100644 index 0000000..afa3f93 --- /dev/null +++ b/vms/platformvm/proposal_block.go @@ -0,0 +1,151 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/vms/components/core" +) + +// ProposalTx is an operation that can be proposed +type ProposalTx interface { + initialize(vm *VM) error + // Attempts to verify this transaction with the provided state. + SemanticVerify(database.Database) (onCommitDB *versiondb.Database, onAbortDB *versiondb.Database, onCommitFunc func(), onAbortFunc func(), err error) + InitiallyPrefersCommit() bool +} + +// ProposalBlock is a proposal to change the chain's state. +// A proposal may be to: +// 1. Advance the chain's timestamp (*AdvanceTimeTx) +// 2. Remove a staker from the staker set (*RewardStakerTx) +// 3. Add a new staker to the set of pending (future) stakers (*AddStakerTx) +// The proposal will be enacted (change the chain's state) if the proposal block +// is accepted and followed by an accepted Commit block +type ProposalBlock struct { + CommonBlock `serialize:"true"` + + Tx ProposalTx `serialize:"true"` + + // The database that the chain will have if this block's proposal is committed + onCommitDB *versiondb.Database + // The database that the chain will have if this block's proposal is aborted + onAbortDB *versiondb.Database + // The function to execute if this block's proposal is committed + onCommitFunc func() + // The function to execute if this block's proposal is aborted + onAbortFunc func() +} + +// Initialize this block. +// Sets [pb.vm] to [vm] and populates non-serialized fields +// This method should be called when a block is unmarshaled from bytes +func (pb *ProposalBlock) initialize(vm *VM, bytes []byte) error { + pb.vm = vm + pb.Block.Initialize(bytes, vm.SnowmanVM) + return pb.Tx.initialize(vm) +} + +// setBaseDatabase sets this block's base database to [db] +func (pb *ProposalBlock) setBaseDatabase(db database.Database) { + if err := pb.onCommitDB.SetDatabase(db); err != nil { + pb.vm.Ctx.Log.Error("problem while setting base database: %s", err) + } + if err := pb.onAbortDB.SetDatabase(db); err != nil { + pb.vm.Ctx.Log.Error("problem while setting base database: %s", err) + } +} + +// onCommit should only be called after Verify is called. +// onCommit returns: +// 1. A database that contains the state of the chain assuming this proposal +// is enacted. (That is, if this block is accepted and followed by an +// accepted Commit block.) +// 2. A function be be executed when this block's proposal is committed. +// This function should not write to state. +func (pb *ProposalBlock) onCommit() (*versiondb.Database, func()) { + return pb.onCommitDB, pb.onCommitFunc +} + +// onAbort should only be called after Verify is called. +// onAbort returns a database that contains the state of the chain assuming this +// block's proposal is rejected. (That is, if this block is accepted and +// followed by an accepted Abort block.) +func (pb *ProposalBlock) onAbort() (*versiondb.Database, func()) { + return pb.onAbortDB, pb.onAbortFunc +} + +// Verify this block is valid. +// +// The parent block must either be a Commit or an Abort block. +// +// If this block is valid, this function also sets pas.onCommit and pas.onAbort. +func (pb *ProposalBlock) Verify() error { + // pdb is the database if this block's parent is accepted + var pdb database.Database + parent := pb.parentBlock() + // The parent of a proposal block (ie this block) must be a decision block + if parent, ok := parent.(decision); ok { + pdb = parent.onAccept() + } else { + return errInvalidBlockType + } + + var err error + pb.onCommitDB, pb.onAbortDB, pb.onCommitFunc, pb.onAbortFunc, err = pb.Tx.SemanticVerify(pdb) + if err != nil { + return err + } + + pb.vm.currentBlocks[pb.ID().Key()] = pb + parent.addChild(pb) + return nil +} + +// Options returns the possible children of this block in preferential order. +func (pb *ProposalBlock) Options() [2]snowman.Block { + blockID := pb.ID() + + commit := pb.vm.newCommitBlock(blockID) + abort := pb.vm.newAbortBlock(blockID) + + if err := pb.vm.State.PutBlock(pb.vm.DB, commit); err != nil { + pb.vm.Ctx.Log.Warn(errDBPutBlock.Error()) + } + if err := pb.vm.State.PutBlock(pb.vm.DB, abort); err != nil { + pb.vm.Ctx.Log.Warn(errDBPutBlock.Error()) + } + pb.vm.DB.Commit() + + if pb.Tx.InitiallyPrefersCommit() { + return [2]snowman.Block{commit, abort} + } + return [2]snowman.Block{abort, commit} +} + +// newProposalBlock creates a new block that proposes to issue a transaction. +// The parent of this block has ID [parentID]. The parent must be a decision block. +// Returns nil if there's an error while creating this block +func (vm *VM) newProposalBlock(parentID ids.ID, tx ProposalTx) (*ProposalBlock, error) { + pb := &ProposalBlock{ + CommonBlock: CommonBlock{ + Block: core.NewBlock(parentID), + vm: vm, + }, + Tx: tx, + } + + // We marshal the block in this way (as a Block) so that we can unmarshal + // it into a Block (rather than a *ProposalBlock) + block := Block(pb) + bytes, err := Codec.Marshal(&block) + if err != nil { + return nil, err + } + pb.Initialize(bytes, vm.SnowmanVM) + return pb, nil +} diff --git a/vms/platformvm/reward.go b/vms/platformvm/reward.go new file mode 100644 index 0000000..7cd4263 --- /dev/null +++ b/vms/platformvm/reward.go @@ -0,0 +1,25 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "math" + "time" +) + +// reward returns the amount of $AVA to reward the staker with +func reward(duration time.Duration, amount uint64, inflationRate float64) uint64 { + // TODO: Can't use floats here. Need to figure out how to do some integer + // approximations + + years := duration.Hours() / (365. * 24.) + + // Total value of this transaction + value := float64(amount) * math.Pow(inflationRate, years) + + // Amount of the reward in $AVA + reward := value - float64(amount) + + return uint64(reward) +} diff --git a/vms/platformvm/reward_validator_tx.go b/vms/platformvm/reward_validator_tx.go new file mode 100644 index 0000000..fa1d309 --- /dev/null +++ b/vms/platformvm/reward_validator_tx.go @@ -0,0 +1,262 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "container/heap" + "errors" + "fmt" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/math" +) + +var ( + errShouldBeDSValidator = errors.New("expected validator to be in the default subnet") +) + +// rewardValidatorTx is a transaction that represents a proposal to remove a +// validator that is currently validating from the validator set. +// +// If this transaction is accepted and the next block accepted is a *Commit +// block, the validator is removed and the account that the validator specified +// receives the staked $AVA as well as a validating reward. +// +// If this transaction is accepted and the next block accepted is an *Abort +// block, the validator is removed and the account that the validator specified +// receives the staked $AVA but no reward. +type rewardValidatorTx struct { + // ID of the tx that created the delegator/validator being removed/rewarded + TxID ids.ID `serialize:"true"` + + vm *VM +} + +func (tx *rewardValidatorTx) initialize(vm *VM) error { + tx.vm = vm + return nil +} + +// SyntacticVerify that this transaction is well formed +func (tx *rewardValidatorTx) SyntacticVerify() error { + switch { + case tx == nil: + return errNilTx + case tx.TxID.IsZero(): + return errInvalidID + default: + return nil + } +} + +// SemanticVerify this transaction performs a valid state transition. +// +// The current validating set must have at least one member. +// The next validator to be removed must be the validator specified in this block. +// The next validator to be removed must be have an end time equal to the current +// chain timestamp. +func (tx *rewardValidatorTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), error) { + if err := tx.SyntacticVerify(); err != nil { + return nil, nil, nil, nil, err + } + if db == nil { + return nil, nil, nil, nil, errDbNil + } + + currentEvents, err := tx.vm.getCurrentValidators(db, DefaultSubnetID) + if err != nil { + return nil, nil, nil, nil, errDBCurrentValidators + } + if currentEvents.Len() == 0 { // there is no validator to remove + return nil, nil, nil, nil, errEmptyValidatingSet + } + + vdrTx := currentEvents.Peek() + + if txID := vdrTx.ID(); !txID.Equals(tx.TxID) { + return nil, nil, nil, nil, fmt.Errorf("attempting to remove TxID: %s. Should be removing %s", + tx.TxID, + txID) + } + + // Verify that the chain's timestamp is the validator's end time + currentTime, err := tx.vm.getTimestamp(db) + if err != nil { + return nil, nil, nil, nil, err + } + if endTime := vdrTx.EndTime(); !endTime.Equal(currentTime) { + return nil, nil, nil, nil, fmt.Errorf("attempting to remove TxID: %s before their end time %s", + tx.TxID, + endTime) + } + + heap.Pop(currentEvents) // Remove validator from the validator set + + onCommitDB := versiondb.New(db) + // If this tx's proposal is committed, remove the validator from the validator set and update the + // account balance to reflect the return of staked $AVA and their reward. + if err := tx.vm.putCurrentValidators(onCommitDB, currentEvents, DefaultSubnetID); err != nil { + return nil, nil, nil, nil, errDBPutCurrentValidators + } + + onAbortDB := versiondb.New(db) + // If this tx's proposal is aborted, remove the validator from the validator set and update the + // account balance to reflect the return of staked $AVA. The validator receives no reward. + if err := tx.vm.putCurrentValidators(onAbortDB, currentEvents, DefaultSubnetID); err != nil { + return nil, nil, nil, nil, errDBPutCurrentValidators + } + + switch vdrTx := vdrTx.(type) { + case *addDefaultSubnetValidatorTx: + duration := vdrTx.Duration() + amount := vdrTx.Wght + reward := reward(duration, amount, InflationRate) + amountWithReward, err := math.Add64(amount, reward) + if err != nil { + amountWithReward = amount + tx.vm.Ctx.Log.Error("error while calculating balance with reward: %s", err) + } + + accountID := vdrTx.Destination + account, err := tx.vm.getAccount(db, accountID) // account receiving staked $AVA (and, if applicable, reward) + // Error is likely because the staked $AVA is being sent to a new + // account that isn't in the platform chain's state yet. + // Create the account + // TODO: We should have a keyNotFound error to distinguish this case from others + if err != nil { + account = newAccount(accountID, 0, 0) + } + + accountWithReward := account // The state of the account if the validator earned a validating reward + accountNoReward := account // The state of the account if the validator didn't earn a validating reward + if newAccount, err := account.Add(amountWithReward); err == nil { + accountWithReward = newAccount + } else { + tx.vm.Ctx.Log.Error("error while calculating account balance: %v", err) + } + if newAccount, err := account.Add(amount); err == nil { + accountNoReward = newAccount + } else { + tx.vm.Ctx.Log.Error("error while calculating account balance: %v", err) + } + + if err := tx.vm.putAccount(onCommitDB, accountWithReward); err != nil { + return nil, nil, nil, nil, errDBPutAccount + } + if err := tx.vm.putAccount(onAbortDB, accountNoReward); err != nil { + return nil, nil, nil, nil, errDBPutAccount + } + case *addDefaultSubnetDelegatorTx: + parentTx, err := currentEvents.getDefaultSubnetStaker(vdrTx.NodeID) + if err != nil { + return nil, nil, nil, nil, err + } + + duration := vdrTx.Duration() + amount := vdrTx.Wght + reward := reward(duration, amount, InflationRate) + + // Because parentTx.Shares <= NumberOfShares this will never underflow + delegatorShares := NumberOfShares - uint64(parentTx.Shares) + // Because delegatorShares <= NumberOfShares this will never overflow + delegatorReward := delegatorShares * (reward / NumberOfShares) + // Delay rounding as long as possible for small numbers + if optimisticReward, err := math.Mul64(delegatorShares, reward); err == nil { + delegatorReward = optimisticReward / NumberOfShares + } + + // Because delegatorReward <= reward this will never underflow + validatorReward := reward - delegatorReward + + delegatorAmountWithReward, err := math.Add64(amount, delegatorReward) + if err != nil { + delegatorAmountWithReward = amount + tx.vm.Ctx.Log.Error("error while calculating balance with reward: %s", err) + } + + delegatorAccountID := vdrTx.Destination + delegatorAccount, err := tx.vm.getAccount(db, delegatorAccountID) // account receiving staked $AVA (and, if applicable, reward) + // Error is likely because the staked $AVA is being sent to a new + // account that isn't in the platform chain's state yet. + // Create the account + // TODO: We should have a keyNotFound error to distinguish this case from others + if err != nil { + delegatorAccount = newAccount(delegatorAccountID, 0, 0) + } + + delegatorAccountWithReward := delegatorAccount // The state of the account if the validator earned a validating reward + delegatorAccountNoReward := delegatorAccount // The state of the account if the validator didn't earn a validating reward + if newAccount, err := delegatorAccount.Add(delegatorAmountWithReward); err == nil { + delegatorAccountWithReward = newAccount + } else { + tx.vm.Ctx.Log.Error("error while calculating account balance: %v", err) + } + if newAccount, err := delegatorAccount.Add(amount); err == nil { + delegatorAccountNoReward = newAccount + } else { + tx.vm.Ctx.Log.Error("error while calculating account balance: %v", err) + } + + if err := tx.vm.putAccount(onCommitDB, delegatorAccountWithReward); err != nil { + return nil, nil, nil, nil, errDBPutAccount + } + if err := tx.vm.putAccount(onAbortDB, delegatorAccountNoReward); err != nil { + return nil, nil, nil, nil, errDBPutAccount + } + + validatorAccountID := parentTx.Destination + validatorAccount, err := tx.vm.getAccount(onCommitDB, validatorAccountID) // account receiving staked $AVA (and, if applicable, reward) + // Error is likely because the staked $AVA is being sent to a new + // account that isn't in the platform chain's state yet. + // Create the account + // TODO: We should have a keyNotFound error to distinguish this case from others + if err != nil { + validatorAccount = newAccount(validatorAccountID, 0, 0) + } + + validatorAccountWithReward := validatorAccount // The state of the account if the validator earned a validating reward + if newAccount, err := validatorAccount.Add(validatorReward); err == nil { + validatorAccountWithReward = newAccount + } else { + tx.vm.Ctx.Log.Error("error while calculating account balance: %v", err) + } + + if err := tx.vm.putAccount(onCommitDB, validatorAccountWithReward); err != nil { + return nil, nil, nil, nil, errDBPutAccount + } + default: + return nil, nil, nil, nil, errShouldBeDSValidator + } + + // Regardless of whether this tx is committed or aborted, update the + // validator set to remove the staker. onAbortDB or onCommitDB should commit + // (flush to vm.DB) before this is called + updateValidators := func() { + if err := tx.vm.updateValidators(DefaultSubnetID); err != nil { + tx.vm.Ctx.Log.Fatal("failed to update validators on the default subnet: %s", err) + } + } + + return onCommitDB, onAbortDB, updateValidators, updateValidators, nil +} + +// InitiallyPrefersCommit returns true. +// +// Right now, *Commit (that is, remove the validator and reward them) is always +// preferred over *Abort (remove the validator but don't reward them.) +// +// TODO: A validator should receive a reward only if they are sufficiently +// responsive and correct during the time they are validating. +func (tx *rewardValidatorTx) InitiallyPrefersCommit() bool { return true } + +// RewardStakerTx creates a new transaction that proposes to remove the staker +// [validatorID] from the default validator set. +func (vm *VM) newRewardValidatorTx(txID ids.ID) (*rewardValidatorTx, error) { + tx := &rewardValidatorTx{ + TxID: txID, + } + return tx, tx.initialize(vm) +} diff --git a/vms/platformvm/reward_validator_tx_test.go b/vms/platformvm/reward_validator_tx_test.go new file mode 100644 index 0000000..b3563a2 --- /dev/null +++ b/vms/platformvm/reward_validator_tx_test.go @@ -0,0 +1,233 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "testing" + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" +) + +func TestRewardValidatorTxSyntacticVerify(t *testing.T) { + type test struct { + tx *rewardValidatorTx + shouldErr bool + } + + vm := defaultVM() + txID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7}) + + tests := []test{ + { + tx: nil, + shouldErr: true, + }, + { + tx: &rewardValidatorTx{ + vm: vm, + TxID: txID, + }, + shouldErr: false, + }, + { + tx: &rewardValidatorTx{ + vm: vm, + TxID: ids.ID{}, + }, + shouldErr: true, + }, + } + + for _, test := range tests { + err := test.tx.SyntacticVerify() + if err != nil && !test.shouldErr { + t.Fatalf("expected nil error but got: %v", err) + } + if err == nil && test.shouldErr { + t.Fatalf("expected error but got nil") + } + } +} + +func TestRewardValidatorTxSemanticVerify(t *testing.T) { + vm := defaultVM() + var nextToRemove *addDefaultSubnetValidatorTx + currentValidators, err := vm.getCurrentValidators(vm.DB, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + // ID of validator that should leave DS validator set next + nextToRemove = currentValidators.Peek().(*addDefaultSubnetValidatorTx) + + // Case 1: Chain timestamp is wrong + tx, err := vm.newRewardValidatorTx(nextToRemove.ID()) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + t.Log(err) + if err == nil { + t.Fatalf("should have failed because validator end time doesn't match chain timestamp") + } + + // Case 2: Wrong validator + tx, err = vm.newRewardValidatorTx(ids.Empty) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + t.Log(err) + if err == nil { + t.Fatalf("should have failed because validator ID is wrong") + } + + // Case 3: Happy path + // Advance chain timestamp to time that genesis validators leave + if err := vm.putTimestamp(vm.DB, defaultValidateEndTime); err != nil { + t.Fatal(err) + } + tx, err = vm.newRewardValidatorTx(nextToRemove.ID()) + if err != nil { + t.Fatal(err) + } + onCommitDB, onAbortDB, _, _, err := tx.SemanticVerify(vm.DB) + t.Log(err) + if err != nil { + t.Fatal(err) + } + + // there should be no validators of default subnet in [onCommitDB] or [onAbortDB] + // (as specified in defaultVM's init) + currentValidators, err = vm.getCurrentValidators(onCommitDB, DefaultSubnetID) + t.Log(currentValidators) + if err != nil { + t.Fatal(err) + } + if numValidators := currentValidators.Len(); numValidators != len(keys)-1 { + t.Fatalf("Should be %d validators but are %d", len(keys)-1, numValidators) + } + + currentValidators, err = vm.getCurrentValidators(onAbortDB, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + if numValidators := currentValidators.Len(); numValidators != len(keys)-1 { + t.Fatalf("Should be %d validators but there are %d", len(keys)-1, numValidators) + } + + // account should have gotten validator reward + account, err := vm.getAccount(onCommitDB, nextToRemove.Destination) + if err != nil { + t.Fatal(err) + } + if account.Balance <= defaultBalance-txFee { + t.Fatal("expected account balance to have increased due to receiving validator reward") + } +} + +func TestRewardDelegatorTxSemanticVerify(t *testing.T) { + vm := defaultVM() + + keyIntf1, err := vm.factory.NewPrivateKey() + if err != nil { + t.Fatal(err) + } + key1 := keyIntf1.(*crypto.PrivateKeySECP256K1R) + + keyIntf2, err := vm.factory.NewPrivateKey() + if err != nil { + t.Fatal(err) + } + key2 := keyIntf2.(*crypto.PrivateKeySECP256K1R) + + vdrTx, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, // nonce + defaultStakeAmount, // stakeAmt + uint64(defaultValidateEndTime.Add(-365*24*time.Hour).Unix())-1, + uint64(defaultValidateEndTime.Unix())-1, + key1.PublicKey().Address(), // node ID + key1.PublicKey().Address(), // destination + NumberOfShares/4, + testNetworkID, + key1, + ) + if err != nil { + t.Fatal(err) + } + + delTx, err := vm.newAddDefaultSubnetDelegatorTx( + defaultNonce+1, // nonce + defaultStakeAmount, // stakeAmt + uint64(defaultValidateEndTime.Add(-365*24*time.Hour).Unix())-1, + uint64(defaultValidateEndTime.Unix())-1, + key1.PublicKey().Address(), // node ID + key2.PublicKey().Address(), // destination + testNetworkID, + key2, + ) + if err != nil { + t.Fatal(err) + } + + currentValidators, err := vm.getCurrentValidators(vm.DB, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + currentValidators.Add(vdrTx) + currentValidators.Add(delTx) + vm.putCurrentValidators(vm.DB, currentValidators, DefaultSubnetID) + + if err := vm.putTimestamp(vm.DB, defaultValidateEndTime.Add(-time.Second)); err != nil { + t.Fatal(err) + } + + tx, err := vm.newRewardValidatorTx(delTx.ID()) + if err != nil { + t.Fatal(err) + } + onCommitDB, _, _, _, err := tx.SemanticVerify(vm.DB) + t.Log(err) + if err != nil { + t.Fatal(err) + } + + // account should have gotten validator reward + account, err := vm.getAccount(onCommitDB, vdrTx.Destination) + if err != nil { + t.Fatal(err) + } + if expectedBalance := defaultStakeAmount / 100; account.Balance != expectedBalance { + t.Fatalf("expected account balance to be %d was %d", expectedBalance, account.Balance) + } + + // account should have gotten validator reward + account, err = vm.getAccount(onCommitDB, delTx.Destination) + if err != nil { + t.Fatal(err) + } + if expectedBalance := (defaultStakeAmount * 103) / 100; account.Balance != expectedBalance { + t.Fatalf("expected account balance to be %d was %d", expectedBalance, account.Balance) + } + + tx, err = vm.newRewardValidatorTx(vdrTx.ID()) + if err != nil { + t.Fatal(err) + } + onCommitDB, _, _, _, err = tx.SemanticVerify(onCommitDB) + t.Log(err) + if err != nil { + t.Fatal(err) + } + + // account should have gotten validator reward + account, err = vm.getAccount(onCommitDB, vdrTx.Destination) + if err != nil { + t.Fatal(err) + } + if expectedBalance := (defaultStakeAmount * 21) / 20; account.Balance != expectedBalance { + t.Fatalf("expected account balance to be %d was %d", expectedBalance, account.Balance) + } +} diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go new file mode 100644 index 0000000..4842ff1 --- /dev/null +++ b/vms/platformvm/service.go @@ -0,0 +1,1028 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "bytes" + "errors" + "fmt" + "net/http" + "net/http/httptest" + + "github.com/gorilla/rpc/v2/json2" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/json" +) + +var ( + errMissingDecisionBlock = errors.New("should have a decision block within the past two blocks") + errParsingID = errors.New("error parsing ID") + errGetAccount = errors.New("error retrieving account information") + errGetAccounts = errors.New("error getting accounts controlled by specified user") + errGetUser = errors.New("error while getting user. Does user exist?") + errNoMethodWithGenesis = errors.New("no method was provided but genesis data was provided") + errCreatingTransaction = errors.New("problem while creating transaction") + errNoDestination = errors.New("call is missing field 'stakeDestination'") + errNoSource = errors.New("call is missing field 'stakeSource'") + errGetStakeSource = errors.New("couldn't get account specified in 'stakeSource'") +) + +var key *crypto.PrivateKeySECP256K1R + +func init() { + cb58 := formatting.CB58{} + err := cb58.FromString("24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5") + if err != nil { + panic(err) + } + factory := crypto.FactorySECP256K1R{} + pk, err := factory.ToPrivateKey(cb58.Bytes) + if err != nil { + panic(err) + } + key = pk.(*crypto.PrivateKeySECP256K1R) +} + +// Service defines the API calls that can be made to the platform chain +type Service struct{ vm *VM } + +/* + ****************************************************** + ******************* Get Subnets ********************** + ****************************************************** + */ + +// APISubnet is a representation of a subnet used in API calls +type APISubnet struct { + // ID of the subnet + ID ids.ID `json:"id"` + + // Each element of [ControlKeys] the address of a public key. + // A transaction to add a validator to this subnet requires + // signatures from [Threshold] of these keys to be valid. + ControlKeys []ids.ShortID `json:"controlKeys"` + Threshold json.Uint16 `json:"threshold"` +} + +// GetSubnetsArgs are the arguments to GetSubnet +type GetSubnetsArgs struct { + // IDs of the subnets to retrieve information about + // If omitted, gets all subnets + IDs []ids.ID `json:"ids"` +} + +// GetSubnetsResponse is the response from calling GetSubnets +type GetSubnetsResponse struct { + // Each element is a subnet that exists + // Null if there are no subnets other than the default subnet + Subnets []APISubnet `json:"subnets"` +} + +// GetSubnets returns the subnets whose ID are in [args.IDs] +// The response will not contain the default subnet +func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *GetSubnetsResponse) error { + subnets, err := service.vm.getSubnets(service.vm.DB) // all subnets + if err != nil { + return fmt.Errorf("error getting subnets from database: %v", err) + } + + getAll := len(args.IDs) == 0 + + if getAll { + response.Subnets = make([]APISubnet, len(subnets)) + for i, subnet := range subnets { + response.Subnets[i] = APISubnet{ + ID: subnet.ID, + ControlKeys: subnet.ControlKeys, + Threshold: json.Uint16(subnet.Threshold), + } + } + return nil + } + + idsSet := ids.Set{} + idsSet.Add(args.IDs...) + for _, subnet := range subnets { + if idsSet.Contains(subnet.ID) { + response.Subnets = append(response.Subnets, + APISubnet{ + ID: subnet.ID, + ControlKeys: subnet.ControlKeys, + Threshold: json.Uint16(subnet.Threshold), + }, + ) + } + } + return nil +} + +/* + ****************************************************** + **************** Get/Sample Validators *************** + ****************************************************** + */ + +// GetCurrentValidatorsArgs are the arguments for calling GetCurrentValidators +type GetCurrentValidatorsArgs struct { + // Subnet we're listing the validators of + // If omitted, defaults to default subnet + SubnetID ids.ID `json:"subnetID"` +} + +// GetCurrentValidatorsReply are the results from calling GetCurrentValidators +type GetCurrentValidatorsReply struct { + Validators []APIValidator `json:"validators"` +} + +// GetCurrentValidators returns the list of current validators +func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsArgs, reply *GetCurrentValidatorsReply) error { + service.vm.Ctx.Log.Debug("GetCurrentValidators called") + + if args.SubnetID.IsZero() { + args.SubnetID = DefaultSubnetID + } + + validators, err := service.vm.getCurrentValidators(service.vm.DB, args.SubnetID) + if err != nil { + return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) + } + + reply.Validators = make([]APIValidator, validators.Len()) + for i, tx := range validators.Txs { + vdr := tx.Vdr() + weight := json.Uint64(vdr.Weight()) + if args.SubnetID.Equals(DefaultSubnetID) { + reply.Validators[i] = APIValidator{ + ID: vdr.ID(), + StartTime: json.Uint64(tx.StartTime().Unix()), + EndTime: json.Uint64(tx.EndTime().Unix()), + StakeAmount: &weight, + } + } else { + reply.Validators[i] = APIValidator{ + ID: vdr.ID(), + StartTime: json.Uint64(tx.StartTime().Unix()), + EndTime: json.Uint64(tx.EndTime().Unix()), + Weight: &weight, + } + } + } + + return nil +} + +// GetPendingValidatorsArgs are the arguments for calling GetPendingValidators +type GetPendingValidatorsArgs struct { + // Subnet we're getting the pending validators of + // If omitted, defaults to default subnet + SubnetID ids.ID `json:"subnetID"` +} + +// GetPendingValidatorsReply are the results from calling GetPendingValidators +type GetPendingValidatorsReply struct { + Validators []APIValidator `json:"validators"` +} + +// GetPendingValidators returns the list of current validators +func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { + service.vm.Ctx.Log.Debug("GetPendingValidators called") + + if args.SubnetID.IsZero() { + args.SubnetID = DefaultSubnetID + } + + validators, err := service.vm.getPendingValidators(service.vm.DB, args.SubnetID) + if err != nil { + return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) + } + + reply.Validators = make([]APIValidator, validators.Len()) + for i, tx := range validators.Txs { + vdr := tx.Vdr() + weight := json.Uint64(vdr.Weight()) + if args.SubnetID.Equals(DefaultSubnetID) { + reply.Validators[i] = APIValidator{ + ID: vdr.ID(), + StartTime: json.Uint64(tx.StartTime().Unix()), + EndTime: json.Uint64(tx.EndTime().Unix()), + StakeAmount: &weight, + } + } else { + reply.Validators[i] = APIValidator{ + ID: vdr.ID(), + StartTime: json.Uint64(tx.StartTime().Unix()), + EndTime: json.Uint64(tx.EndTime().Unix()), + Weight: &weight, + } + } + } + + return nil +} + +// SampleValidatorsArgs are the arguments for calling SampleValidators +type SampleValidatorsArgs struct { + // Number of validators in the sample + Size json.Uint16 `json:"size"` + + // ID of subnet to sample validators from + // If omitted, defaults to the default subnet + SubnetID ids.ID `json:"subnetID"` +} + +// SampleValidatorsReply are the results from calling Sample +type SampleValidatorsReply struct { + Validators []ids.ShortID `json:"validators"` +} + +// SampleValidators returns a sampling of the list of current validators +func (service *Service) SampleValidators(_ *http.Request, args *SampleValidatorsArgs, reply *SampleValidatorsReply) error { + service.vm.Ctx.Log.Debug("Sample called with {Size = %d}", args.Size) + + if args.SubnetID.IsZero() { + args.SubnetID = DefaultSubnetID + } + + validators, ok := service.vm.Validators.GetValidatorSet(args.SubnetID) + if !ok { + return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) + } + + sample := validators.Sample(int(args.Size)) + if setLen := len(sample); setLen != int(args.Size) { + return fmt.Errorf("current number of validators (%d) is insufficient to sample %d validators", setLen, args.Size) + } + + reply.Validators = make([]ids.ShortID, int(args.Size)) + for i, vdr := range sample { + reply.Validators[i] = vdr.ID() + } + + ids.SortShortIDs(reply.Validators) + return nil +} + +/* + ****************************************************** + *************** Get/Create Accounts ****************** + ****************************************************** + */ + +// GetAccountArgs are the arguments for calling GetAccount +type GetAccountArgs struct { + // Address of the account we want the information about + Address ids.ShortID `json:"address"` +} + +// GetAccountReply is the response from calling GetAccount +type GetAccountReply struct { + Address ids.ShortID `json:"address"` + Nonce json.Uint64 `json:"nonce"` + Balance json.Uint64 `json:"balance"` +} + +// GetAccount details given account ID +func (service *Service) GetAccount(_ *http.Request, args *GetAccountArgs, reply *GetAccountReply) error { + account, err := service.vm.getAccount(service.vm.DB, args.Address) + if err != nil && err != database.ErrNotFound { + return errGetAccount + } else if err == database.ErrNotFound { + account = newAccount(args.Address, 0, 0) + } + + reply.Address = account.Address + reply.Balance = json.Uint64(account.Balance) + reply.Nonce = json.Uint64(account.Nonce) + return nil +} + +// ListAccountsArgs are the arguments to ListAccounts +type ListAccountsArgs struct { + // List all of the accounts controlled by this user + Username string `json:"username"` + Password string `json:"password"` +} + +// ListAccountsReply is the reply from ListAccounts +type ListAccountsReply struct { + Accounts []APIAccount `json:"accounts"` +} + +// ListAccounts lists all of the accounts controlled by [args.Username] +func (service *Service) ListAccounts(_ *http.Request, args *ListAccountsArgs, reply *ListAccountsReply) error { + service.vm.Ctx.Log.Debug("platform.listAccounts called for user '%s'", args.Username) + + // db holds the user's info that pertains to the Platform Chain + userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return errGetUser + } + + // The user + user := user{ + db: userDB, + } + + // IDs of accounts controlled by this user + accountIDs, err := user.getAccountIDs() + if err != nil { + return errGetAccounts + } + + var accounts []APIAccount + for _, accountID := range accountIDs { + account, err := service.vm.getAccount(service.vm.DB, accountID) // Get account whose ID is [accountID] + if err != nil && err != database.ErrNotFound { + service.vm.Ctx.Log.Error("couldn't get account from database: %v", err) + continue + } else if err == database.ErrNotFound { + account = newAccount(accountID, 0, 0) + } + accounts = append(accounts, APIAccount{ + Address: accountID, + Nonce: json.Uint64(account.Nonce), + Balance: json.Uint64(account.Balance), + }) + } + reply.Accounts = accounts + return nil +} + +// CreateAccountArgs are the arguments for calling CreateAccount +type CreateAccountArgs struct { + // User that will control the newly created account + Username string `json:"username"` + + // That user's password + Password string `json:"password"` + + // The private key that controls the new account. + // If omitted, will generate a new private key belonging + // to the user. + PrivateKey string `json:"privateKey"` +} + +// CreateAccountReply are the response from calling CreateAccount +type CreateAccountReply struct { + // Address of the newly created account + Address ids.ShortID `json:"address"` +} + +// CreateAccount creates a new account on the Platform Chain +// The account is controlled by [args.Username] +// The account's ID is [privKey].PublicKey().Address(), where [privKey] is a +// private key controlled by the user. +func (service *Service) CreateAccount(_ *http.Request, args *CreateAccountArgs, reply *CreateAccountReply) error { + service.vm.Ctx.Log.Debug("platform.createAccount called for user '%s'", args.Username) + + // userDB holds the user's info that pertains to the Platform Chain + userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return errGetUser + } + + // The user creating a new account + user := user{ + db: userDB, + } + + // private key that controls the new account + var privKey *crypto.PrivateKeySECP256K1R + // If no private key supplied in args, create a new one + if args.PrivateKey == "" { + privKeyInt, err := service.vm.factory.NewPrivateKey() // The private key that controls the new account + if err != nil { // The account ID is [private key].PublicKey().Address() + return errors.New("problem generating private key") + } + privKey = privKeyInt.(*crypto.PrivateKeySECP256K1R) + } else { // parse provided private key + byteFormatter := formatting.CB58{} + err := byteFormatter.FromString(args.PrivateKey) + if err != nil { + return errors.New("problem while parsing privateKey") + } + pk, err := service.vm.factory.ToPrivateKey(byteFormatter.Bytes) + if err != nil { + return errors.New("problem while parsing privateKey") + } + privKey = pk.(*crypto.PrivateKeySECP256K1R) + } + + if err := user.putAccount(privKey); err != nil { // Save the private key + return errors.New("problem saving account") + } + + reply.Address = privKey.PublicKey().Address() + + return nil +} + +type genericTx struct { + Tx interface{} `serialize:"true"` +} + +/* + ****************************************************** + ************ Add Validators to Subnets *************** + ****************************************************** + */ + +// AddDefaultSubnetValidatorArgs are the arguments to AddDefaultSubnetValidator +type AddDefaultSubnetValidatorArgs struct { + APIDefaultSubnetValidator + + // Next unused nonce of the account the staked $AVA and tx fee are paid from + PayerNonce json.Uint64 `json:"payerNonce"` +} + +// AddDefaultSubnetValidatorResponse is the response from a call to AddDefaultSubnetValidator +type AddDefaultSubnetValidatorResponse struct { + // The unsigned transaction + UnsignedTx formatting.CB58 `json:"unsignedTx"` +} + +// AddDefaultSubnetValidator returns an unsigned transaction to add a validator to the default subnet +// The returned unsigned transaction should be signed using Sign() +func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefaultSubnetValidatorArgs, reply *AddDefaultSubnetValidatorResponse) error { + service.vm.Ctx.Log.Debug("platform.AddDefaultSubnetValidator called") + + if args.ID.IsZero() { // If ID unspecified, use this node's ID as validator ID + args.ID = service.vm.Ctx.NodeID + } + + // Create the transaction + tx := addDefaultSubnetValidatorTx{UnsignedAddDefaultSubnetValidatorTx: UnsignedAddDefaultSubnetValidatorTx{ + DurationValidator: DurationValidator{ + Validator: Validator{ + NodeID: args.ID, + Wght: args.weight(), + }, + Start: uint64(args.StartTime), + End: uint64(args.EndTime), + }, + Nonce: uint64(args.PayerNonce), + Destination: args.Destination, + NetworkID: service.vm.Ctx.NetworkID, + Shares: uint32(args.DelegationFeeRate), + }} + + txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) + if err != nil { + return fmt.Errorf("problem while creating transaction: %w", err) + } + + reply.UnsignedTx.Bytes = txBytes + return nil +} + +// AddDefaultSubnetDelegatorArgs are the arguments to AddDefaultSubnetDelegator +type AddDefaultSubnetDelegatorArgs struct { + APIValidator + + Destination ids.ShortID `json:"destination"` + + // Next unused nonce of the account the staked $AVA and tx fee are paid from + PayerNonce json.Uint64 `json:"payerNonce"` +} + +// AddDefaultSubnetDelegatorResponse is the response from a call to AddDefaultSubnetDelegator +type AddDefaultSubnetDelegatorResponse struct { + // The unsigned transaction + UnsignedTx formatting.CB58 `json:"unsignedTx"` +} + +// AddDefaultSubnetDelegator returns an unsigned transaction to add a delegator +// to the default subnet +// The returned unsigned transaction should be signed using Sign() +func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefaultSubnetDelegatorArgs, reply *AddDefaultSubnetDelegatorResponse) error { + service.vm.Ctx.Log.Debug("platform.AddDefaultSubnetDelegator called") + + if args.ID.IsZero() { // If ID unspecified, use this node's ID as validator ID + args.ID = service.vm.Ctx.NodeID + } + + // Create the transaction + tx := addDefaultSubnetDelegatorTx{UnsignedAddDefaultSubnetDelegatorTx: UnsignedAddDefaultSubnetDelegatorTx{ + DurationValidator: DurationValidator{ + Validator: Validator{ + NodeID: args.ID, + Wght: args.weight(), + }, + Start: uint64(args.StartTime), + End: uint64(args.EndTime), + }, + NetworkID: service.vm.Ctx.NetworkID, + Nonce: uint64(args.PayerNonce), + Destination: args.Destination, + }} + + txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) + if err != nil { + return fmt.Errorf("problem while creating transaction: %w", err) + } + + reply.UnsignedTx.Bytes = txBytes + return nil +} + +// AddNonDefaultSubnetValidatorArgs are the arguments to AddNonDefaultSubnetValidator +type AddNonDefaultSubnetValidatorArgs struct { + APIValidator + + // ID of subnet to validate + SubnetID ids.ID `json:"subnetID"` + + // Next unused nonce of the account the tx fee is paid from + PayerNonce json.Uint64 `json:"payerNonce"` +} + +// AddNonDefaultSubnetValidatorResponse is the response from a call to AddNonDefaultSubnetValidator +type AddNonDefaultSubnetValidatorResponse struct { + // The unsigned transaction + UnsignedTx formatting.CB58 `json:"unsignedTx"` +} + +// AddNonDefaultSubnetValidator adds a validator to a subnet other than the default subnet +// Returns the unsigned transaction, which must be signed using Sign +func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddNonDefaultSubnetValidatorArgs, response *AddNonDefaultSubnetValidatorResponse) error { + tx := addNonDefaultSubnetValidatorTx{ + UnsignedAddNonDefaultSubnetValidatorTx: UnsignedAddNonDefaultSubnetValidatorTx{ + SubnetValidator: SubnetValidator{ + DurationValidator: DurationValidator{ + Validator: Validator{ + NodeID: args.APIValidator.ID, + Wght: args.weight(), + }, + Start: uint64(args.StartTime), + End: uint64(args.EndTime), + }, + Subnet: args.SubnetID, + }, + NetworkID: service.vm.Ctx.NetworkID, + Nonce: uint64(args.PayerNonce), + }, + ControlSigs: nil, + PayerSig: [crypto.SECP256K1RSigLen]byte{}, + vm: nil, + id: ids.ID{}, + senderID: ids.ShortID{}, + bytes: nil, + } + + txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) + if err != nil { + return errCreatingTransaction + } + + response.UnsignedTx.Bytes = txBytes + return nil +} + +/* + ****************************************************** + **************** Sign/Issue Txs ********************** + ****************************************************** + */ + +// SignArgs are the arguments to Sign +type SignArgs struct { + // The bytes to sign + // Must be the output of AddDefaultSubnetValidator + Tx formatting.CB58 `json:"tx"` + + // The address of the key signing the bytes + Signer ids.ShortID `json:"signer"` + + // User that controls Signer + Username string `json:"username"` + Password string `json:"password"` +} + +// SignResponse is the response from Sign +type SignResponse struct { + // The signed bytes + Tx formatting.CB58 +} + +// Sign [args.bytes] +func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignResponse) error { + service.vm.Ctx.Log.Debug("platform.sign called") + + // Get the key of the Signer + db, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return fmt.Errorf("couldn't get data for user '%s'. Does user exist?", args.Username) + } + user := user{db: db} + + key, err := user.getKey(args.Signer) // Key of [args.Signer] + if err != nil { + return errDB + } + if !bytes.Equal(key.PublicKey().Address().Bytes(), args.Signer.Bytes()) { // sanity check + return errors.New("got unexpected key from database") + } + + genTx := genericTx{} + if err := Codec.Unmarshal(args.Tx.Bytes, &genTx); err != nil { + return err + } + + switch tx := genTx.Tx.(type) { + case *addDefaultSubnetValidatorTx: + genTx.Tx, err = service.signAddDefaultSubnetValidatorTx(tx, key) + case *addDefaultSubnetDelegatorTx: + genTx.Tx, err = service.signAddDefaultSubnetDelegatorTx(tx, key) + case *addNonDefaultSubnetValidatorTx: + genTx.Tx, err = service.signAddNonDefaultSubnetValidatorTx(tx, key) + case *CreateSubnetTx: + genTx.Tx, err = service.signCreateSubnetTx(tx, key) + default: + err = errors.New("Could not parse given tx. Must be one of: addDefaultSubnetValidatorTx, addNonDefaultSubnetValidatorTx, createSubnetTx") + } + if err != nil { + return err + } + + reply.Tx.Bytes, err = Codec.Marshal(genTx) + return err +} + +// Sign [unsigned] with [key] +func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetValidatorTx, error) { + service.vm.Ctx.Log.Debug("platform.signAddDefaultSubnetValidatorTx called") + + // TODO: Should we check if tx is already signed? + unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetValidatorTx) + unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return nil, fmt.Errorf("error serializing unsigned tx: %v", err) + } + + sig, err := key.Sign(unsignedTxBytes) + if err != nil { + return nil, errors.New("error while signing") + } + if len(sig) != crypto.SECP256K1RSigLen { + return nil, fmt.Errorf("expected signature to be length %d but was length %d", crypto.SECP256K1RSigLen, len(sig)) + } + copy(tx.Sig[:], sig) + + return tx, nil +} + +// Sign [unsigned] with [key] +func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDelegatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetDelegatorTx, error) { + service.vm.Ctx.Log.Debug("platform.signAddDefaultSubnetValidatorTx called") + + // TODO: Should we check if tx is already signed? + unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetDelegatorTx) + unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return nil, fmt.Errorf("error serializing unsigned tx: %v", err) + } + + sig, err := key.Sign(unsignedTxBytes) + if err != nil { + return nil, errors.New("error while signing") + } + if len(sig) != crypto.SECP256K1RSigLen { + return nil, fmt.Errorf("expected signature to be length %d but was length %d", crypto.SECP256K1RSigLen, len(sig)) + } + copy(tx.Sig[:], sig) + + return tx, nil +} + +// Sign [xt] with [key] +func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.PrivateKeySECP256K1R) (*CreateSubnetTx, error) { + service.vm.Ctx.Log.Debug("platform.signAddDefaultSubnetValidatorTx called") + + // TODO: Should we check if tx is already signed? + unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx) + unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return nil, fmt.Errorf("error serializing unsigned tx: %v", err) + } + + sig, err := key.Sign(unsignedTxBytes) + if err != nil { + return nil, errors.New("error while signing") + } + if len(sig) != crypto.SECP256K1RSigLen { + return nil, fmt.Errorf("expected signature to be length %d but was length %d", crypto.SECP256K1RSigLen, len(sig)) + } + copy(tx.Sig[:], sig) + + return tx, nil +} + +// Signs an unsigned or partially signed addNonDefaultSubnetValidatorTx with [key] +// If [key] is a control key for the subnet and there is an empty spot in tx.ControlSigs, signs there +// If [key] is a control key for the subnet and there is no empty spot in tx.ControlSigs, signs as payer +// If [key] is not a control key, sign as payer (account controlled by [key] pays the tx fee) +// Sorts tx.ControlSigs before returning +// Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes +func (service *Service) signAddNonDefaultSubnetValidatorTx(tx *addNonDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addNonDefaultSubnetValidatorTx, error) { + service.vm.Ctx.Log.Debug("platform.signAddNonDefaultSubnetValidatorTx called") + + // Compute the byte repr. of the unsigned tx and the signature of [key] over it + unsignedIntf := interface{}(&tx.UnsignedAddNonDefaultSubnetValidatorTx) + unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return nil, fmt.Errorf("error serializing unsigned tx: %v", err) + } + sig, err := key.Sign(unsignedTxBytes) + if err != nil { + return nil, errors.New("error while signing") + } + if len(sig) != crypto.SECP256K1RSigLen { + return nil, fmt.Errorf("expected signature to be length %d but was length %d", crypto.SECP256K1RSigLen, len(sig)) + } + + // Get information about the subnet + subnet, err := service.vm.getSubnet(service.vm.DB, tx.SubnetID()) + if err != nil { + return nil, fmt.Errorf("problem getting subnet information: %v", err) + } + + // Find the location at which [key] should put its signature. + // If [key] is a control key for this subnet and there is an empty spot in tx.ControlSigs, sign there + // If [key] is a control key for this subnet and there is no empty spot in tx.ControlSigs, sign as payer + // If [key] is not a control key, sign as payer (account controlled by [key] pays the tx fee) + controlKeySet := ids.ShortSet{} + controlKeySet.Add(subnet.ControlKeys...) + isControlKey := controlKeySet.Contains(key.PublicKey().Address()) + + payerSigEmpty := tx.PayerSig == [crypto.SECP256K1RSigLen]byte{} // true if no key has signed to pay the tx fee + + if isControlKey && len(tx.ControlSigs) != int(subnet.Threshold) { // Sign as controlSig + tx.ControlSigs = append(tx.ControlSigs, [crypto.SECP256K1RSigLen]byte{}) + copy(tx.ControlSigs[len(tx.ControlSigs)-1][:], sig) + } else if payerSigEmpty { // sign as payer + copy(tx.PayerSig[:], sig) + } else { + return nil, errors.New("no place for key to sign") + } + + return tx, nil +} + +// IssueTxArgs are the arguments to IssueTx +type IssueTxArgs struct { + // Tx being sent to the network + Tx formatting.CB58 `json:"tx"` +} + +// IssueTxResponse is the response from IssueTx +type IssueTxResponse struct { + // ID of transaction being sent to network + TxID ids.ID `json:"txID"` +} + +// IssueTx issues the transaction [args.Tx] to the network +func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *IssueTxResponse) error { + genTx := genericTx{} + if err := Codec.Unmarshal(args.Tx.Bytes, &genTx); err != nil { + return err + } + + switch tx := genTx.Tx.(type) { + case TimedTx: + if err := tx.initialize(service.vm); err != nil { + return fmt.Errorf("error initializing tx: %s", err) + } + service.vm.unissuedEvents.Push(tx) + defer service.vm.resetTimer() + response.TxID = tx.ID() + return nil + case *CreateSubnetTx: + if err := tx.initialize(service.vm); err != nil { + return fmt.Errorf("error initializing tx: %s", err) + } + service.vm.unissuedDecisionTxs = append(service.vm.unissuedDecisionTxs, tx) + defer service.vm.resetTimer() + response.TxID = tx.ID + return nil + default: + return errors.New("Could not parse given tx. Must be one of: addDefaultSubnetValidatorTx, addDefaultSubnetDelegatorTx, addNonDefaultSubnetValidatorTx, createSubnetTx") + } +} + +/* + ****************************************************** + **************** Create a Subnet ********************* + ****************************************************** + */ + +// CreateSubnetArgs are the arguments to CreateSubnet +type CreateSubnetArgs struct { + // The ID member of APISubnet is ignored + APISubnet + + // Nonce of the account that pays the transaction fee + PayerNonce json.Uint64 `json:"payerNonce"` +} + +// CreateSubnetResponse is the response from a call to CreateSubnet +type CreateSubnetResponse struct { + // Byte representation of the unsigned transaction to create a new subnet + UnsignedTx formatting.CB58 `json:"unsignedTx"` +} + +// CreateSubnet returns an unsigned transaction to create a new subnet. +// The unsigned transaction must be signed with the key of [args.Payer] +func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *CreateSubnetResponse) error { + service.vm.Ctx.Log.Debug("platform.createSubnet called") + + // Create the transaction + tx := CreateSubnetTx{ + UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{ + NetworkID: service.vm.Ctx.NetworkID, + Nonce: uint64(args.PayerNonce), + ControlKeys: args.ControlKeys, + Threshold: uint16(args.Threshold), + }, + key: nil, + Sig: [65]byte{}, + bytes: nil, + } + + txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) + if err != nil { + return errCreatingTransaction + } + + response.UnsignedTx.Bytes = txBytes + return nil + +} + +/* + ****************************************************** + ******** Create/get status of a blockchain *********** + ****************************************************** + */ + +// CreateBlockchainArgs is the arguments for calling CreateBlockchain +type CreateBlockchainArgs struct { + // ID of the VM the new blockchain is running + VMID string `json:"vmID"` + + // IDs of the FXs the VM is running + FxIDs []string `json:"fxIDs"` + + // Human-readable name for the new blockchain, not necessarily unique + Name string `json:"name"` + + // To generate the byte representation of the genesis data for this blockchain, + // a POST request with body [GenesisData] is made to the API method whose name is [Method], whose + // endpoint is [Endpoint]. See Platform Chain documentation for more info and examples. + Method string `json:"method"` + Endpoint string `json:"endpoint"` + GenesisData interface{} `json:"genesisData"` +} + +// CreateGenesisReply is the reply from a call to CreateGenesis +type CreateGenesisReply struct { + Bytes formatting.CB58 `json:"bytes"` +} + +// CreateBlockchainReply is the reply from calling CreateBlockchain +type CreateBlockchainReply struct { + BlockchainID ids.ID `json:"blockchainID"` +} + +// CreateBlockchain issues a transaction to the network to create a new blockchain +func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, reply *CreateBlockchainReply) error { + vmID, err := service.vm.ChainManager.LookupVM(args.VMID) + if err != nil { + return fmt.Errorf("no VM with ID '%s' found", args.VMID) + } + + fxIDs := []ids.ID(nil) + for _, fxIDStr := range args.FxIDs { + fxID, err := service.vm.ChainManager.LookupVM(fxIDStr) + if err != nil { + return fmt.Errorf("no FX with ID '%s' found", fxIDStr) + } + fxIDs = append(fxIDs, fxID) + } + + genesisBytes := []byte(nil) + if args.Method != "" { + buf, err := json2.EncodeClientRequest(args.Method, args.GenesisData) + if err != nil { + return fmt.Errorf("problem building blockchain genesis state: %w", err) + } + + writer := httptest.NewRecorder() + service.vm.Ctx.HTTP.Call( + /*writer=*/ writer, + /*method=*/ "POST", + /*base=*/ args.VMID, + /*endpoint=*/ args.Endpoint, + /*body=*/ bytes.NewBuffer(buf), + /*headers=*/ map[string]string{ + "Content-Type": "application/json", + }, + ) + + result := CreateGenesisReply{} + if err := json2.DecodeClientResponse(writer.Body, &result); err != nil { + return fmt.Errorf("problem building blockchain genesis state: %w", err) + } + genesisBytes = result.Bytes.Bytes + } else if args.GenesisData != nil { + return errNoMethodWithGenesis + } + + // TODO: Should use the key store to sign this transaction. + // TODO: Nonce shouldn't always be 0 + tx, err := service.vm.newCreateChainTx(0, genesisBytes, vmID, fxIDs, args.Name, service.vm.Ctx.NetworkID, key) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + // Add this tx to the set of unissued txs + service.vm.unissuedDecisionTxs = append(service.vm.unissuedDecisionTxs, tx) + service.vm.resetTimer() + + reply.BlockchainID = tx.ID() + + return nil +} + +// GetBlockchainStatusArgs is the arguments for calling GetBlockchainStatus +// [BlockchainID] is the blockchain to get the status of. +type GetBlockchainStatusArgs struct { + BlockchainID string `json:"blockchainID"` +} + +// GetBlockchainStatusReply is the reply from calling GetBlockchainStatus +// [Status] is the blockchain's status. +type GetBlockchainStatusReply struct { + Status Status `json:"status"` +} + +// GetBlockchainStatus gets the status of a blockchain with the ID [args.BlockchainID]. +func (service *Service) GetBlockchainStatus(_ *http.Request, args *GetBlockchainStatusArgs, reply *GetBlockchainStatusReply) error { + _, err := service.vm.ChainManager.Lookup(args.BlockchainID) + if err == nil { + reply.Status = Validating + return nil + } + + bID, err := ids.FromString(args.BlockchainID) + if err != nil { + return fmt.Errorf("problem parsing blockchainID '%s': %w", args.BlockchainID, err) + } + + lastAcceptedID := service.vm.LastAccepted() + if exists, err := service.chainExists(lastAcceptedID, bID); err != nil { + return fmt.Errorf("problem looking up blockchain: %w", err) + } else if exists { + reply.Status = Created + return nil + } + + preferred := service.vm.Preferred() + if exists, err := service.chainExists(preferred, bID); err != nil { + return fmt.Errorf("problem looking up blockchain: %w", err) + } else if exists { + reply.Status = Preferred + return nil + } + + return nil +} + +func (service *Service) chainExists(blockID ids.ID, chainID ids.ID) (bool, error) { + blockIntf, err := service.vm.getBlock(blockID) + if err != nil { + return false, err + } + + block, ok := blockIntf.(decision) + if !ok { + block, ok = blockIntf.Parent().(decision) + if !ok { + return false, errMissingDecisionBlock + } + } + db := block.onAccept() + + chains, err := service.vm.getChains(db) + for _, chain := range chains { + if chain.ID().Equals(chainID) { + return true, nil + } + } + + return false, nil +} diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go new file mode 100644 index 0000000..e1ece30 --- /dev/null +++ b/vms/platformvm/service_test.go @@ -0,0 +1,34 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "encoding/json" + "testing" +) + +func TestAddDefaultSubnetValidator(t *testing.T) { + expectedJSONString := `{"startTime":"0","endtime":"0","id":null,"destination":null,"delegationFeeRate":"0","payerNonce":"0"}` + args := AddDefaultSubnetValidatorArgs{} + bytes, err := json.Marshal(&args) + if err != nil { + t.Fatal(err) + } + jsonString := string(bytes) + if jsonString != expectedJSONString { + t.Fatalf("Expected: %s\nResult: %s", expectedJSONString, jsonString) + } +} + +func TestCreateBlockchainArgsParsing(t *testing.T) { + jsonString := `{"vmID":"lol","chainName":"awesome","genesisData":{"key":"value"}}` + args := CreateBlockchainArgs{} + err := json.Unmarshal([]byte(jsonString), &args) + if err != nil { + t.Fatal(err) + } + if _, err = json.Marshal(args.GenesisData); err != nil { + t.Fatal(err) + } +} diff --git a/vms/platformvm/standard_block.go b/vms/platformvm/standard_block.go new file mode 100644 index 0000000..847f5c9 --- /dev/null +++ b/vms/platformvm/standard_block.go @@ -0,0 +1,108 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/core" +) + +// DecisionTx is an operation that can be decided without being proposed +type DecisionTx interface { + initialize(vm *VM) error + + // Attempt to verify this transaction with the provided state. The provided + // database can be modified arbitrarily. If a nil error is returned, it is + // assumped onAccept is non-nil. + SemanticVerify(database.Database) (onAccept func(), err error) +} + +// StandardBlock being accepted results in the transactions contained in the +// block to be accepted and committed to the chain. +type StandardBlock struct { + CommonDecisionBlock `serialize:"true"` + + Txs []DecisionTx `serialize:"true"` +} + +// initialize this block +func (sb *StandardBlock) initialize(vm *VM, bytes []byte) error { + if err := sb.CommonDecisionBlock.initialize(vm, bytes); err != nil { + return err + } + for _, tx := range sb.Txs { + if err := tx.initialize(vm); err != nil { + return err + } + } + return nil +} + +// Verify this block performs a valid state transition. +// +// The parent block must be a proposal +// +// This function also sets onAcceptDB database if the verification passes. +func (sb *StandardBlock) Verify() error { + // StandardBlock is not a modifier on a proposal block, so its parent must + // be a decision. + parent, ok := sb.parentBlock().(decision) + if !ok { + return errInvalidBlockType + } + + pdb := parent.onAccept() + + sb.onAcceptDB = versiondb.New(pdb) + funcs := []func(){} + for _, tx := range sb.Txs { + onAccept, err := tx.SemanticVerify(sb.onAcceptDB) + if err != nil { + return err + } + if onAccept != nil { + funcs = append(funcs, onAccept) + } + } + + if numFuncs := len(funcs); numFuncs == 1 { + sb.onAcceptFunc = funcs[0] + } else if numFuncs > 1 { + sb.onAcceptFunc = func() { + for _, f := range funcs { + f() + } + } + } + + sb.vm.currentBlocks[sb.ID().Key()] = sb + sb.parentBlock().addChild(sb) + return nil +} + +// newStandardBlock returns a new *StandardBlock where the block's parent, a +// decision block, has ID [parentID]. +func (vm *VM) newStandardBlock(parentID ids.ID, txs []DecisionTx) (*StandardBlock, error) { + sb := &StandardBlock{ + CommonDecisionBlock: CommonDecisionBlock{ + CommonBlock: CommonBlock{ + Block: core.NewBlock(parentID), + vm: vm, + }, + }, + Txs: txs, + } + + // We serialize this block as a Block so that it can be deserialized into a + // Block + blk := Block(sb) + bytes, err := Codec.Marshal(&blk) + if err != nil { + return nil, err + } + sb.Block.Initialize(bytes, vm.SnowmanVM) + return sb, nil +} diff --git a/vms/platformvm/state.go b/vms/platformvm/state.go new file mode 100644 index 0000000..632abcc --- /dev/null +++ b/vms/platformvm/state.go @@ -0,0 +1,299 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + "fmt" + "time" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/snowman" +) + +// This file contains methods of VM that deal with getting/putting values from database + +// TODO: Cache prefixed IDs or use different way of keying into database +const ( + currentValidatorsPrefix uint64 = iota + pendingValidatorsPrefix +) + +// get the validators currently validating the specified subnet +func (vm *VM) getCurrentValidators(db database.Database, subnetID ids.ID) (*EventHeap, error) { + // if current validators aren't specified in database, return empty validator set + key := subnetID.Prefix(currentValidatorsPrefix) + has, err := vm.State.Has(db, validatorsTypeID, key) + if err != nil { + return nil, err + } + if !has { + return &EventHeap{ + SortByStartTime: false, + Txs: make([]TimedTx, 0), + }, nil + } + currentValidatorsInterface, err := vm.State.Get(db, validatorsTypeID, key) + if err != nil { + return nil, err + } + currentValidators, ok := currentValidatorsInterface.(*EventHeap) + if !ok { + vm.Ctx.Log.Warn("expected to retrieve *AddStakerHeap from database but got different type") + return nil, err + } + for _, validator := range currentValidators.Txs { + if err := validator.initialize(vm); err != nil { + return nil, err + } + } + return currentValidators, nil +} + +// put the validators currently validating the specified subnet +func (vm *VM) putCurrentValidators(db database.Database, validators *EventHeap, subnetID ids.ID) error { + err := vm.State.Put(db, validatorsTypeID, subnetID.Prefix(currentValidatorsPrefix), validators) + if err != nil { + return errDBPutCurrentValidators + } + return nil +} + +// get the validators that are slated to validate the specified subnet in the future +func (vm *VM) getPendingValidators(db database.Database, subnetID ids.ID) (*EventHeap, error) { + // if pending validators aren't specified in database, return empty validator set + key := subnetID.Prefix(pendingValidatorsPrefix) + has, err := vm.State.Has(db, validatorsTypeID, key) + if err != nil { + return nil, err + } + if !has { + return &EventHeap{ + SortByStartTime: true, + Txs: make([]TimedTx, 0), + }, nil + } + pendingValidatorHeapInterface, err := vm.State.Get(db, validatorsTypeID, key) + if err != nil { + return nil, errDBPendingValidators + } + pendingValidatorHeap, ok := pendingValidatorHeapInterface.(*EventHeap) + if !ok { + vm.Ctx.Log.Error("expected to retrieve *EventHeap from database but got different type") + return nil, errDBPendingValidators + } + return pendingValidatorHeap, nil +} + +// put the validators that are slated to validate the specified subnet in the future +func (vm *VM) putPendingValidators(db database.Database, validators *EventHeap, subnetID ids.ID) error { + if !validators.SortByStartTime { + return errors.New("pending validators should be sorted by start time") + } + err := vm.State.Put(db, validatorsTypeID, subnetID.Prefix(pendingValidatorsPrefix), validators) + if err != nil { + return errDBPutPendingValidators + } + return nil +} + +// get the account with the specified Address +// If account does not exist in database, return new account +func (vm *VM) getAccount(db database.Database, address ids.ShortID) (Account, error) { + longID := address.LongID() + + // see if account exists + exists, err := vm.State.Has(db, accountTypeID, longID) + if err != nil { + return Account{}, err + } + if !exists { // account doesn't exist so return new, empty account + return Account{ + Address: address, + Nonce: 0, + Balance: 0, + }, nil + } + + accountInterface, err := vm.State.Get(db, accountTypeID, longID) + if err != nil { + return Account{}, err + } + account, ok := accountInterface.(Account) + if !ok { + vm.Ctx.Log.Warn("expected to retrieve Account from database but got different type") + return Account{}, errDBAccount + } + return account, nil +} + +// put an account in [db] +func (vm *VM) putAccount(db database.Database, account Account) error { + err := vm.State.Put(db, accountTypeID, account.Address.LongID(), account) + if err != nil { + return errDBPutAccount + } + return nil +} + +// get the blockchains that exist +func (vm *VM) getChains(db database.Database) ([]*CreateChainTx, error) { + chainsInterface, err := vm.State.Get(db, chainsTypeID, chainsKey) + if err != nil { + return nil, err + } + chains, ok := chainsInterface.([]*CreateChainTx) + if !ok { + vm.Ctx.Log.Warn("expected to retrieve []*CreateChainTx from database but got different type") + return nil, errDBChains + } + return chains, nil +} + +// put the list of blockchains that exist to database +func (vm *VM) putChains(db database.Database, chains createChainList) error { + if err := vm.State.Put(db, chainsTypeID, chainsKey, chains); err != nil { + return errDBPutChains + } + return nil +} + +// get the platfrom chain's timestamp from [db] +func (vm *VM) getTimestamp(db database.Database) (time.Time, error) { + timestamp, err := vm.State.GetTime(db, timestampKey) + if err != nil { + return time.Time{}, err + } + return timestamp, nil +} + +// put the platform chain's timestamp in [db] +func (vm *VM) putTimestamp(db database.Database, timestamp time.Time) error { + if err := vm.State.PutTime(db, timestampKey, timestamp); err != nil { + return err + } + return nil +} + +// put the subnets that exist to [db] +func (vm *VM) putSubnets(db database.Database, subnets CreateSubnetTxList) error { + if err := vm.State.Put(db, subnetsTypeID, subnetsKey, subnets); err != nil { + return err + } + return nil +} + +// get the subnets that exist in [db] +func (vm *VM) getSubnets(db database.Database) ([]*CreateSubnetTx, error) { + subnetsIntf, err := vm.State.Get(db, subnetsTypeID, subnetsKey) + if err != nil { + return nil, err + } + subnets, ok := subnetsIntf.([]*CreateSubnetTx) + if !ok { + vm.Ctx.Log.Warn("expected to retrieve []*CreateSubnetTx from database but got different type") + return nil, errDB + } + for _, subnet := range subnets { + subnet.vm = vm + } + return subnets, nil +} + +// get the subnet with the specified ID +func (vm *VM) getSubnet(db database.Database, ID ids.ID) (*CreateSubnetTx, error) { + subnets, err := vm.getSubnets(db) + if err != nil { + return nil, err + } + + for _, subnet := range subnets { + if subnet.ID.Equals(ID) { + return subnet, nil + } + } + return nil, fmt.Errorf("couldn't find subnet with ID %s", ID) +} + +// register each type that we'll be storing in the database +// so that [vm.State] knows how to unmarshal these types from bytes +func (vm *VM) registerDBTypes() { + unmarshalValidatorsFunc := func(bytes []byte) (interface{}, error) { + stakers := EventHeap{} + if err := Codec.Unmarshal(bytes, &stakers); err != nil { + return nil, err + } + for _, tx := range stakers.Txs { + if err := tx.initialize(vm); err != nil { + return nil, err + } + } + return &stakers, nil + } + if err := vm.State.RegisterType(validatorsTypeID, unmarshalValidatorsFunc); err != nil { + vm.Ctx.Log.Warn(errRegisteringType.Error()) + } + + unmarshalAccountFunc := func(bytes []byte) (interface{}, error) { + var account Account + if err := Codec.Unmarshal(bytes, &account); err != nil { + return nil, err + } + return account, nil + } + if err := vm.State.RegisterType(accountTypeID, unmarshalAccountFunc); err != nil { + vm.Ctx.Log.Warn(errRegisteringType.Error()) + } + + unmarshalChainsFunc := func(bytes []byte) (interface{}, error) { + var chains []*CreateChainTx + if err := Codec.Unmarshal(bytes, &chains); err != nil { + return nil, err + } + for _, chain := range chains { + if err := chain.initialize(vm); err != nil { + return nil, err + } + } + return chains, nil + } + if err := vm.State.RegisterType(chainsTypeID, unmarshalChainsFunc); err != nil { + vm.Ctx.Log.Warn(errRegisteringType.Error()) + } + + unmarshalSubnetsFunc := func(bytes []byte) (interface{}, error) { + var subnets []*CreateSubnetTx + if err := Codec.Unmarshal(bytes, &subnets); err != nil { + return nil, err + } + for _, subnet := range subnets { + if err := subnet.initialize(vm); err != nil { + return nil, err + } + } + return subnets, nil + } + if err := vm.State.RegisterType(subnetsTypeID, unmarshalSubnetsFunc); err != nil { + vm.Ctx.Log.Warn(errRegisteringType.Error()) + } +} + +// Unmarshal a Block from bytes and initialize it +// The Block being unmarshaled must have had static type Block when it was marshaled +// i.e. don't do: +// block := &Abort{} (or some other type that implements block) +// bytes := codec.Marshal(block) +// instead do: +// var block Block = &Abort{} (or some other type that implements block) +// bytes := codec.Marshal(&block) (need to do &block, not block, because its an interface) +func (vm *VM) unmarshalBlockFunc(bytes []byte) (snowman.Block, error) { + // Parse the serialized fields from bytes into a new block + var block Block + if err := Codec.Unmarshal(bytes, &block); err != nil { + return nil, err + } + // Populate the un-serialized fields of the block + return block, block.initialize(vm, bytes) +} diff --git a/vms/platformvm/static_service.go b/vms/platformvm/static_service.go new file mode 100644 index 0000000..cdfd1d7 --- /dev/null +++ b/vms/platformvm/static_service.go @@ -0,0 +1,210 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "container/heap" + "errors" + "net/http" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/json" +) + +// Note that since an AVA network has exactly one Platform Chain, +// and the Platform Chain defines the genesis state of the network +// (who is staking, which chains exist, etc.), defining the genesis +// state of the Platform Chain is the same as defining the genesis +// state of the network. + +var ( + errAccountHasNoValue = errors.New("account has no value") + errValidatorAddsNoValue = errors.New("validator would have already unstaked") +) + +// StaticService defines the static API methods exposed by the platform VM +type StaticService struct{} + +// APIAccount is an account on the Platform Chain +// that exists at the chain's genesis. +type APIAccount struct { + Address ids.ShortID `json:"address"` + Nonce json.Uint64 `json:"nonce"` + Balance json.Uint64 `json:"balance"` +} + +// APIValidator is a validator. +// [Amount] is the amount of $AVA being staked. +// [Endtime] is the Unix time repr. of when they are done staking +// [ID] is the node ID of the staker +// [Destination] is the address where the staked $AVA (and, if applicable, reward) +// is sent when this staker is done staking. +type APIValidator struct { + StartTime json.Uint64 `json:"startTime"` + EndTime json.Uint64 `json:"endtime"` + Weight *json.Uint64 `json:"weight,omitempty"` + StakeAmount *json.Uint64 `json:"stakeAmount,omitempty"` + ID ids.ShortID `json:"id"` +} + +func (v *APIValidator) weight() uint64 { + switch { + case v.Weight != nil: + return uint64(*v.Weight) + case v.StakeAmount != nil: + return uint64(*v.StakeAmount) + default: + return 0 + } +} + +// APIDefaultSubnetValidator is a validator of the default subnet +type APIDefaultSubnetValidator struct { + APIValidator + + Destination ids.ShortID `json:"destination"` + DelegationFeeRate json.Uint32 `json:"delegationFeeRate"` +} + +// APIChain defines a chain that exists +// at the network's genesis. +// [GenesisData] is the initial state of the chain. +// [VMID] is the ID of the VM this chain runs. +// [FxIDs] are the IDs of the Fxs the chain supports. +// [Name] is a human-readable, non-unique name for the chain. +type APIChain struct { + GenesisData formatting.CB58 `json:"genesisData"` + VMID ids.ID `json:"vmID"` + FxIDs []ids.ID `json:"fxIDs"` + Name string `json:"name"` +} + +// BuildGenesisArgs are the arguments used to create +// the genesis data of the Platform Chain. +// [NetworkID] is the ID of the network +// [Accounts] are the accounts on the Platform Chain +// that exists at genesis. +// [Validators] are the validators of the default subnet at genesis. +// [Chains] are the chains that exist at genesis. +// [Time] is the Platform Chain's time at network genesis. +type BuildGenesisArgs struct { + NetworkID json.Uint32 `json:"address"` + Accounts []APIAccount `json:"accounts"` + Validators []APIDefaultSubnetValidator `json:"defaultSubnetValidators"` + Chains []APIChain `json:"chains"` + Time json.Uint64 `json:"time"` +} + +// BuildGenesisReply is the reply from BuildGenesis +type BuildGenesisReply struct { + Bytes formatting.CB58 `json:"bytes"` +} + +// Genesis represents a genesis state of the platform chain +type Genesis struct { + Accounts []Account `serialize:"true"` + Validators *EventHeap `serialize:"true"` + Chains []*CreateChainTx `serialize:"true"` + Timestamp uint64 `serialize:"true"` +} + +// Initialize ... +func (g *Genesis) Initialize() error { + for _, tx := range g.Validators.Txs { + if err := tx.initialize(nil); err != nil { + return err + } + } + for _, chain := range g.Chains { + if err := chain.initialize(nil); err != nil { + return err + } + } + return nil +} + +// BuildGenesis build the genesis state of the Platform Chain (and thereby the AVA network.) +func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { + // Specify the accounts on the Platform chain that exist at genesis. + accounts := []Account(nil) + for _, account := range args.Accounts { + if account.Balance == 0 { + return errAccountHasNoValue + } + accounts = append(accounts, newAccount( + account.Address, // ID + 0, // nonce + uint64(account.Balance), // balance + )) + } + + // Specify the validators that are validating the default subnet at genesis. + validators := &EventHeap{} + for _, validator := range args.Validators { + weight := validator.weight() + if weight == 0 { + return errValidatorAddsNoValue + } + if uint64(validator.EndTime) <= uint64(args.Time) { + return errValidatorAddsNoValue + } + + tx := &addDefaultSubnetValidatorTx{ + UnsignedAddDefaultSubnetValidatorTx: UnsignedAddDefaultSubnetValidatorTx{ + DurationValidator: DurationValidator{ + Validator: Validator{ + NodeID: validator.ID, + Wght: weight, + }, + Start: uint64(args.Time), + End: uint64(validator.EndTime), + }, + NetworkID: uint32(args.NetworkID), + Nonce: 0, + Destination: validator.Destination, + }, + } + if err := tx.initialize(nil); err != nil { + return err + } + + heap.Push(validators, tx) + } + + // Specify the chains that exist at genesis. + chains := []*CreateChainTx{} + for _, chain := range args.Chains { + // Ordinarily we sign a createChainTx. For genesis, there is no key. + // We generate the ID of this tx by hashing the bytes of the unsigned transaction + // TODO: Should we just sign this tx with a private key that we share publicly? + tx := &CreateChainTx{ + UnsignedCreateChainTx: UnsignedCreateChainTx{ + NetworkID: uint32(args.NetworkID), + Nonce: 0, + ChainName: chain.Name, + VMID: chain.VMID, + FxIDs: chain.FxIDs, + GenesisData: chain.GenesisData.Bytes, + }, + } + if err := tx.initialize(nil); err != nil { + return err + } + + chains = append(chains, tx) + } + + // genesis holds the genesis state + genesis := Genesis{ + Accounts: accounts, + Validators: validators, + Chains: chains, + Timestamp: uint64(args.Time), + } + // Marshal genesis to bytes + bytes, err := Codec.Marshal(genesis) + reply.Bytes.Bytes = bytes + return err +} diff --git a/vms/platformvm/static_service_test.go b/vms/platformvm/static_service_test.go new file mode 100644 index 0000000..d1bdc4e --- /dev/null +++ b/vms/platformvm/static_service_test.go @@ -0,0 +1,215 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/json" +) + +func TestBuildGenesis(t *testing.T) { + expected := []byte{ + 0x00, 0x00, 0x00, 0x01, 0x01, 0x5c, 0xce, 0x6c, + 0x55, 0xd6, 0xb5, 0x09, 0x84, 0x5c, 0x8c, 0x4e, + 0x30, 0xbe, 0xd9, 0x8d, 0x39, 0x1a, 0xe7, 0xf0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x5b, 0xcd, 0x15, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x05, 0x01, 0x5c, 0xce, 0x6c, 0x55, 0xd6, 0xb5, + 0x09, 0x84, 0x5c, 0x8c, 0x4e, 0x30, 0xbe, 0xd9, + 0x8d, 0x39, 0x1a, 0xe7, 0xf0, 0x00, 0x00, 0x00, + 0x00, 0x3a, 0xde, 0x68, 0xb1, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x5c, 0xce, 0x6c, 0x55, 0xd6, 0xb5, + 0x09, 0x84, 0x5c, 0x8c, 0x4e, 0x30, 0xbe, 0xd9, + 0x8d, 0x39, 0x1a, 0xe7, 0xf0, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x13, 0x4d, 0x79, 0x20, 0x46, + 0x61, 0x76, 0x6f, 0x72, 0x69, 0x74, 0x65, 0x20, + 0x45, 0x70, 0x69, 0x73, 0x6f, 0x64, 0x65, 0x53, + 0x6f, 0x75, 0x74, 0x68, 0x20, 0x50, 0x61, 0x72, + 0x6b, 0x20, 0x65, 0x70, 0x69, 0x73, 0x6f, 0x64, + 0x65, 0x20, 0x70, 0x6c, 0x61, 0x79, 0x65, 0x72, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x53, + 0x63, 0x6f, 0x74, 0x74, 0x20, 0x54, 0x65, 0x6e, + 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x20, 0x6d, 0x75, + 0x73, 0x74, 0x20, 0x64, 0x69, 0x65, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + } + + addr, _ := ids.ShortFromString("8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") + genesisData := formatting.CB58{} + genesisData.FromString("CGgRrQ3nws7RRMGyDV59cetJBAwmsmDyCSgku") + vmID, _ := ids.FromString("dkFD29iYU9e9jah2nrnksTWJUy2VVpg5Lnqd7nQqvCJgR26H4") + + account := APIAccount{ + Address: addr, + Balance: 123456789, + } + weight := json.Uint64(987654321) + validator := APIDefaultSubnetValidator{ + APIValidator: APIValidator{ + EndTime: 15, + Weight: &weight, + ID: addr, + }, + Destination: addr, + } + chains := APIChain{ + GenesisData: genesisData, + VMID: vmID, + Name: "My Favorite Episode", + } + + args := BuildGenesisArgs{ + Accounts: []APIAccount{ + account, + }, + Validators: []APIDefaultSubnetValidator{ + validator, + }, + Chains: []APIChain{ + chains, + }, + Time: 5, + } + reply := BuildGenesisReply{} + + ss := StaticService{} + if err := ss.BuildGenesis(nil, &args, &reply); err != nil { + t.Fatal(err) + } + + if !bytes.Equal(reply.Bytes.Bytes, expected) { + t.Fatalf("StaticService.BuildGenesis:\nReturned:\n%s\nExpected:\n%s", + formatting.DumpBytes{Bytes: reply.Bytes.Bytes}, + formatting.DumpBytes{Bytes: expected}) + } +} + +func TestBuildGenesisInvalidAccountBalance(t *testing.T) { + id, _ := ids.ShortFromString("8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") + account := APIAccount{ + Address: id, + Balance: 0, + } + weight := json.Uint64(987654321) + validator := APIDefaultSubnetValidator{ + APIValidator: APIValidator{ + EndTime: 15, + Weight: &weight, + ID: id, + }, + Destination: id, + } + + args := BuildGenesisArgs{ + Accounts: []APIAccount{ + account, + }, + Validators: []APIDefaultSubnetValidator{ + validator, + }, + Time: 5, + } + reply := BuildGenesisReply{} + + ss := StaticService{} + if err := ss.BuildGenesis(nil, &args, &reply); err == nil { + t.Fatalf("Should have errored due to an invalid balance") + } +} + +func TestBuildGenesisInvalidAmount(t *testing.T) { + id, _ := ids.ShortFromString("8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") + account := APIAccount{ + Address: id, + Balance: 123456789, + } + weight := json.Uint64(0) + validator := APIDefaultSubnetValidator{ + APIValidator: APIValidator{ + StartTime: 0, + EndTime: 15, + Weight: &weight, + ID: id, + }, + Destination: id, + } + + args := BuildGenesisArgs{ + Accounts: []APIAccount{ + account, + }, + Validators: []APIDefaultSubnetValidator{ + validator, + }, + Time: 5, + } + reply := BuildGenesisReply{} + + ss := StaticService{} + if err := ss.BuildGenesis(nil, &args, &reply); err == nil { + t.Fatalf("Should have errored due to an invalid amount") + } +} + +func TestBuildGenesisInvalidEndtime(t *testing.T) { + id, _ := ids.ShortFromString("8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") + account := APIAccount{ + Address: id, + Balance: 123456789, + } + + weight := json.Uint64(987654321) + validator := APIDefaultSubnetValidator{ + APIValidator: APIValidator{ + StartTime: 0, + EndTime: 5, + Weight: &weight, + ID: id, + }, + Destination: id, + } + + args := BuildGenesisArgs{ + Accounts: []APIAccount{ + account, + }, + Validators: []APIDefaultSubnetValidator{ + validator, + }, + Time: 5, + } + reply := BuildGenesisReply{} + + ss := StaticService{} + if err := ss.BuildGenesis(nil, &args, &reply); err == nil { + t.Fatalf("Should have errored due to an invalid end time") + } +} diff --git a/vms/platformvm/status.go b/vms/platformvm/status.go new file mode 100644 index 0000000..33ff9ea --- /dev/null +++ b/vms/platformvm/status.go @@ -0,0 +1,81 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" +) + +var ( + errUnknownStatus = errors.New("unknown status") +) + +// Status ... +type Status uint32 + +// List of possible status values +// [Unknown] Zero value, means the status is not known +// [Preferred] means the operation is known and preferred, but hasn't been decided yet +// [Created] means the operation occurred, but isn't managed locally +// [Validating] means the operation was accepted and is managed locally +const ( + Unknown Status = iota + Preferred + Created + Validating +) + +// MarshalJSON ... +func (s Status) MarshalJSON() ([]byte, error) { + if err := s.Valid(); err != nil { + return nil, err + } + return []byte("\"" + s.String() + "\""), nil +} + +// UnmarshalJSON ... +func (s *Status) UnmarshalJSON(b []byte) error { + str := string(b) + if str == "null" { + return nil + } + switch str { + case "\"Unknown\"": + *s = Unknown + case "\"Preferred\"": + *s = Preferred + case "\"Created\"": + *s = Created + case "\"Validating\"": + *s = Validating + default: + return errUnknownStatus + } + return nil +} + +// Valid returns nil if the status is a valid status. +func (s Status) Valid() error { + switch s { + case Unknown, Preferred, Created, Validating: + return nil + default: + return errUnknownStatus + } +} + +func (s Status) String() string { + switch s { + case Unknown: + return "Unknown" + case Preferred: + return "Preferred" + case Created: + return "Created" + case Validating: + return "Validating" + default: + return "Invalid status" + } +} diff --git a/vms/platformvm/status_test.go b/vms/platformvm/status_test.go new file mode 100644 index 0000000..ea112cf --- /dev/null +++ b/vms/platformvm/status_test.go @@ -0,0 +1,37 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "math" + "testing" +) + +func TestStatusValid(t *testing.T) { + if err := Validating.Valid(); err != nil { + t.Fatalf("%s failed verification", Validating) + } else if err := Created.Valid(); err != nil { + t.Fatalf("%s failed verification", Created) + } else if err := Preferred.Valid(); err != nil { + t.Fatalf("%s failed verification", Preferred) + } else if err := Unknown.Valid(); err != nil { + t.Fatalf("%s failed verification", Unknown) + } else if badStatus := Status(math.MaxInt32); badStatus.Valid() == nil { + t.Fatalf("%s passed verification", badStatus) + } +} + +func TestStatusString(t *testing.T) { + if Validating.String() != "Validating" { + t.Fatalf("%s failed printing", Validating) + } else if Created.String() != "Created" { + t.Fatalf("%s failed printing", Created) + } else if Preferred.String() != "Preferred" { + t.Fatalf("%s failed printing", Preferred) + } else if Unknown.String() != "Unknown" { + t.Fatalf("%s failed printing", Unknown) + } else if badStatus := Status(math.MaxInt32); badStatus.String() != "Invalid status" { + t.Fatalf("%s failed printing", badStatus) + } +} diff --git a/vms/platformvm/subnet.go b/vms/platformvm/subnet.go new file mode 100644 index 0000000..e8e505e --- /dev/null +++ b/vms/platformvm/subnet.go @@ -0,0 +1,19 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/validators" +) + +// A Subnet is a set of validators that are validating a set of blockchains +// Each blockchain is validated by one subnet; one subnet may validate many blockchains +type Subnet interface { + // ID returns this subnet's ID + ID() ids.ID + + // Validators returns the validators that compose this subnet + Validators() []validators.Validator +} diff --git a/vms/platformvm/user.go b/vms/platformvm/user.go new file mode 100644 index 0000000..e8d9c7e --- /dev/null +++ b/vms/platformvm/user.go @@ -0,0 +1,121 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" +) + +// Key in the database whose corresponding value is the list of +// account IDs this user controls +var accountIDsKey = ids.Empty.Bytes() + +var errDbNil = errors.New("db uninitialized") + +type user struct { + // This user's database, acquired from the keystore + db database.Database +} + +// Get the IDs of the accounts controlled by this user +func (u *user) getAccountIDs() ([]ids.ShortID, error) { + if u.db == nil { + return nil, errDbNil + } + + // If user has no accounts, return empty list + hasAccounts, err := u.db.Has(accountIDsKey) + if err != nil { + return nil, errDB + } + if !hasAccounts { + return make([]ids.ShortID, 0), nil + } + // User has accounts. Get them. + bytes, err := u.db.Get(accountIDsKey) + if err != nil { + return nil, errDB + } + accountIDs := []ids.ShortID{} + if err := Codec.Unmarshal(bytes, &accountIDs); err != nil { + return nil, err + } + return accountIDs, nil +} + +// controlsAccount returns true iff this user controls the account +// with the specified ID +func (u *user) controlsAccount(ID ids.ShortID) (bool, error) { + if u.db == nil { + return false, errDbNil + } + + if _, err := u.db.Get(ID.Bytes()); err == nil { + return true, nil + } + return false, nil +} + +// putAccount persists that this user controls the account whose ID is +// [privKey].PublicKey().Address() +func (u *user) putAccount(privKey *crypto.PrivateKeySECP256K1R) error { + newAccountID := privKey.PublicKey().Address() // Account thie privKey controls + controlsAccount, err := u.controlsAccount(newAccountID) + if err != nil { + return err + } + if controlsAccount { // user already controls this account. Do nothing. + return nil + } + + err = u.db.Put(newAccountID.Bytes(), privKey.Bytes()) // Account ID --> private key + if err != nil { + return errDB + } + + accountIDs := make([]ids.ShortID, 0) // Add account to list of accounts user controls + userHasAccounts, err := u.db.Has(accountIDsKey) + if err != nil { + return errDB + } + if userHasAccounts { // Get accountIDs this user already controls, if they exist + if accountIDs, err = u.getAccountIDs(); err != nil { + return errDB + } + } + accountIDs = append(accountIDs, newAccountID) + bytes, err := Codec.Marshal(accountIDs) + if err != nil { + return err + } + if err := u.db.Put(accountIDsKey, bytes); err != nil { + return errDB + } + return nil +} + +// Key returns the private key that controls the account with the specified ID +func (u *user) getKey(accountID ids.ShortID) (*crypto.PrivateKeySECP256K1R, error) { + if u.db == nil { + return nil, errDbNil + } + + factory := crypto.FactorySECP256K1R{} + bytes, err := u.db.Get(accountID.Bytes()) + if err != nil { + return nil, err + } + sk, err := factory.ToPrivateKey(bytes) + if err != nil { + return nil, err + } + if sk, ok := sk.(*crypto.PrivateKeySECP256K1R); ok { + return sk, nil + } + return nil, errDB +} diff --git a/vms/platformvm/validator.go b/vms/platformvm/validator.go new file mode 100644 index 0000000..b511037 --- /dev/null +++ b/vms/platformvm/validator.go @@ -0,0 +1,67 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/validators" +) + +// Validator ... +type Validator struct { + // Node ID of the staker + NodeID ids.ShortID `serialize:"true"` + + // Weight of this validator used when sampling + Wght uint64 `serialize:"true"` +} + +// ID returns the node ID of the staker +func (v *Validator) ID() ids.ShortID { return v.NodeID } + +// Weight is this validator's weight when sampling +func (v *Validator) Weight() uint64 { return v.Wght } + +// Vdr returns this validator +func (v *Validator) Vdr() validators.Validator { return v } + +// DurationValidator ... +type DurationValidator struct { + Validator `serialize:"true"` + + // Unix time this staker starts validating + Start uint64 `serialize:"true"` + + // Unix time this staker stops validating + End uint64 `serialize:"true"` +} + +// StartTime is the time that this staker will enter the validator set +func (v *DurationValidator) StartTime() time.Time { return time.Unix(int64(v.Start), 0) } + +// EndTime is the time that this staker will leave the validator set +func (v *DurationValidator) EndTime() time.Time { return time.Unix(int64(v.End), 0) } + +// Duration is the amount of time that this staker will be in the validator set +func (v *DurationValidator) Duration() time.Duration { return v.EndTime().Sub(v.StartTime()) } + +// BoundedBy returns true iff the period that [validator] validates is a +// (non-strict) subset of the time that [other] validates. +// Namely, startTime <= v.StartTime() <= v.EndTime() <= endTime +func (v *DurationValidator) BoundedBy(startTime, endTime time.Time) bool { + return !v.StartTime().Before(startTime) && !v.EndTime().After(endTime) +} + +// SubnetValidator validates a blockchain on the AVA network. +type SubnetValidator struct { + DurationValidator `serialize:"true"` + + // ID of the subnet this validator is validating + Subnet ids.ID `serialize:"true"` +} + +// SubnetID is the ID of the subnet this validator is validating +func (v *SubnetValidator) SubnetID() ids.ID { return v.Subnet } diff --git a/vms/platformvm/validator_test.go b/vms/platformvm/validator_test.go new file mode 100644 index 0000000..6d0d850 --- /dev/null +++ b/vms/platformvm/validator_test.go @@ -0,0 +1,96 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + "testing" +) + +var ( + errCalculatedSubsetWrong = errors.New("incorrectly calculated whether one duration was subset of other") +) + +func TestValidatorBoundedBy(t *testing.T) { + // case 1: a starts, a finishes, b starts, b finishes + aStartTime := uint64(0) + aEndTIme := uint64(1) + a := &DurationValidator{ + Validator: Validator{ + NodeID: defaultKey.PublicKey().Address(), + Wght: defaultWeight, + }, + Start: aStartTime, + End: aEndTIme, + } + + bStartTime := uint64(2) + bEndTime := uint64(3) + b := &DurationValidator{ + Validator: Validator{ + NodeID: defaultKey.PublicKey().Address(), + Wght: defaultWeight, + }, + Start: bStartTime, + End: bEndTime, + } + + if a.BoundedBy(b.StartTime(), b.EndTime()) || b.BoundedBy(a.StartTime(), a.EndTime()) { + t.Fatal(errCalculatedSubsetWrong) + } + + // case 2: a starts, b starts, a finishes, b finishes + a.Start = 0 + b.Start = 1 + a.End = 2 + b.End = 3 + if a.BoundedBy(b.StartTime(), b.EndTime()) || b.BoundedBy(a.StartTime(), a.EndTime()) { + t.Fatal(errCalculatedSubsetWrong) + } + + // case 3: a starts, b starts, b finishes, a finishes + a.Start = 0 + b.Start = 1 + b.End = 2 + a.End = 3 + if a.BoundedBy(b.StartTime(), b.EndTime()) || !b.BoundedBy(a.StartTime(), a.EndTime()) { + t.Fatal(errCalculatedSubsetWrong) + } + + // case 4: b starts, a starts, a finishes, b finishes + b.Start = 0 + a.Start = 1 + a.End = 2 + b.End = 3 + if !a.BoundedBy(b.StartTime(), b.EndTime()) || b.BoundedBy(a.StartTime(), a.EndTime()) { + t.Fatal(errCalculatedSubsetWrong) + } + + // case 5: b starts, b finishes, a starts, a finishes + b.Start = 0 + b.End = 1 + a.Start = 2 + a.End = 3 + if a.BoundedBy(b.StartTime(), b.EndTime()) || b.BoundedBy(a.StartTime(), a.EndTime()) { + t.Fatal(errCalculatedSubsetWrong) + } + + // case 6: b starts, a starts, b finishes, a finishes + b.Start = 0 + a.Start = 1 + b.End = 2 + a.End = 3 + if a.BoundedBy(b.StartTime(), b.EndTime()) || b.BoundedBy(a.StartTime(), a.EndTime()) { + t.Fatal(errCalculatedSubsetWrong) + } + + // case 3: a starts, b starts, b finishes, a finishes + a.Start = 0 + b.Start = 0 + b.End = 1 + a.End = 1 + if !a.BoundedBy(b.StartTime(), b.EndTime()) || !b.BoundedBy(a.StartTime(), a.EndTime()) { + t.Fatal(errCalculatedSubsetWrong) + } +} diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go new file mode 100644 index 0000000..20eed9d --- /dev/null +++ b/vms/platformvm/vm.go @@ -0,0 +1,688 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "container/heap" + "errors" + "fmt" + "time" + + stdmath "math" + + "github.com/ava-labs/gecko/chains" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/core" +) + +const ( + // For putting/getting values from state + accountTypeID uint64 = iota + validatorsTypeID + chainsTypeID + blockTypeID + subnetsTypeID + + // Delta is the synchrony bound used for safe decision making + Delta = 10 * time.Second // TODO change to longer period (2 minutes?) before release + + // InflationRate is the maximum inflation rate of AVA from staking + InflationRate = 1.04 + + // BatchSize is the number of decision transaction to place into a block + BatchSize = 30 + + // TODO: Incorporate these constants + turn them into governable parameters + + // MinimumStakeAmount is the minimum amount of $AVA one must bond to be a staker + MinimumStakeAmount = 10 * units.MicroAva + + // MinimumStakingDuration is the shortest amount of time a staker can bond + // their funds for. + MinimumStakingDuration = 24 * time.Hour + + // MaximumStakingDuration is the longest amount of time a staker can bond + // their funds for. + MaximumStakingDuration = 365 * 24 * time.Hour + + // NumberOfShares is the number of shares that a delegator is + // rewarded + NumberOfShares = 1000000 +) + +var ( + // taken from https://stackoverflow.com/questions/25065055/what-is-the-maximum-time-time-in-go/32620397#32620397 + maxTime = time.Unix(1<<63-62135596801, 0) // 0 is used because we drop the nano-seconds + + // DefaultSubnetID is the ID of the default subnet + DefaultSubnetID = ids.Empty + + timestampKey = ids.NewID([32]byte{'t', 'i', 'm', 'e'}) + currentValidatorsKey = ids.NewID([32]byte{'c', 'u', 'r', 'r', 'e', 'n', 't'}) + pendingValidatorsKey = ids.NewID([32]byte{'p', 'e', 'n', 'd', 'i', 'n', 'g'}) + chainsKey = ids.NewID([32]byte{'c', 'h', 'a', 'i', 'n', 's'}) + subnetsKey = ids.NewID([32]byte{'s', 'u', 'b', 'n', 'e', 't', 's'}) +) + +var ( + errEndOfTime = errors.New("program time is suspiciously far in the future. Either this codebase was way more successful than expected, or a critical error has occurred") + errTimeTooAdvanced = errors.New("this is proposing a time too far in the future") + errNoPendingBlocks = errors.New("no pending blocks") + errUnsupportedFXs = errors.New("unsupported feature extensions") + errDB = errors.New("problem retrieving/putting value from/in database") + errDBCurrentValidators = errors.New("couldn't retrieve current validators from database") + errDBPutCurrentValidators = errors.New("couldn't put current validators in database") + errDBPendingValidators = errors.New("couldn't retrieve pending validators from database") + errDBPutPendingValidators = errors.New("couldn't put pending validators in database") + errDBAccount = errors.New("couldn't retrieve account from database") + errDBPutAccount = errors.New("couldn't put account in database") + errDBChains = errors.New("couldn't retrieve chain list from database") + errDBPutChains = errors.New("couldn't put chain list in database") + errDBPutBlock = errors.New("couldn't put block in database") + errRegisteringType = errors.New("error registering type with database") + errMissingBlock = errors.New("missing block") +) + +// Codec does serialization and deserialization +var Codec codec.Codec + +func init() { + Codec = codec.NewDefault() + + errs := wrappers.Errs{} + errs.Add( + Codec.RegisterType(&ProposalBlock{}), + Codec.RegisterType(&Abort{}), + Codec.RegisterType(&Commit{}), + Codec.RegisterType(&StandardBlock{}), + + Codec.RegisterType(&UnsignedAddDefaultSubnetValidatorTx{}), + Codec.RegisterType(&addDefaultSubnetValidatorTx{}), + + Codec.RegisterType(&UnsignedAddNonDefaultSubnetValidatorTx{}), + Codec.RegisterType(&addNonDefaultSubnetValidatorTx{}), + + Codec.RegisterType(&UnsignedAddDefaultSubnetDelegatorTx{}), + Codec.RegisterType(&addDefaultSubnetDelegatorTx{}), + + Codec.RegisterType(&UnsignedCreateChainTx{}), + Codec.RegisterType(&CreateChainTx{}), + + Codec.RegisterType(&UnsignedCreateSubnetTx{}), + Codec.RegisterType(&CreateSubnetTx{}), + + Codec.RegisterType(&advanceTimeTx{}), + Codec.RegisterType(&rewardValidatorTx{}), + ) + if errs.Errored() { + panic(errs.Err) + } +} + +// VM implements the snowman.ChainVM interface +type VM struct { + *core.SnowmanVM + + Validators validators.Manager + + // The node's chain manager + ChainManager chains.Manager + + // Used to create and use keys. + factory crypto.FactorySECP256K1R + + // Used to get time. Useful for faking time during tests. + clock timer.Clock + + // Key: block ID + // Value: the block + currentBlocks map[[32]byte]Block + + // Transactions that have not been put into blocks yet + unissuedEvents *EventHeap + unissuedDecisionTxs []DecisionTx + + // This timer goes off when it is time for the next validator to add/leave the validator set + // When it goes off resetTimer() is called, triggering creation of a new block + timer *timer.Timer +} + +// Initialize this blockchain. +// [vm.ChainManager] and [vm.Validators] must be set before this function is called. +func (vm *VM) Initialize( + ctx *snow.Context, + db database.Database, + genesisBytes []byte, + msgs chan<- common.Message, + fxs []*common.Fx, +) error { + ctx.Log.Verbo("initializing platform chain") + + if len(fxs) != 0 { + return errUnsupportedFXs + } + + // Initialize the inner VM, which has a lot of boiler-plate logic + vm.SnowmanVM = &core.SnowmanVM{} + if err := vm.SnowmanVM.Initialize(ctx, db, vm.unmarshalBlockFunc, msgs); err != nil { + return err + } + + // Register this VM's types with the database so we can get/put structs to/from it + vm.registerDBTypes() + + // If the database is empty, create the platform chain anew using + // the provided genesis state + if !vm.DBInitialized() { + genesis := &Genesis{} + if err := Codec.Unmarshal(genesisBytes, genesis); err != nil { + return err + } + if err := genesis.Initialize(); err != nil { + return err + } + + // Persist accounts that exist at genesis + for _, account := range genesis.Accounts { + if err := vm.putAccount(vm.DB, account); err != nil { + return errDBPutAccount + } + } + + // Persist default subnet validator set at genesis + if err := vm.putCurrentValidators(vm.DB, genesis.Validators, DefaultSubnetID); err != nil { + return errDBPutCurrentValidators + } + + // Persist the subnets that exist at genesis (none do) + if err := vm.putSubnets(vm.DB, []*CreateSubnetTx{}); err != nil { + return fmt.Errorf("error putting genesis subnets: %v", err) + } + + // Ensure all chains that the genesis bytes say to create + // have the right network ID + filteredChains := []*CreateChainTx{} + for _, chain := range genesis.Chains { + if chain.NetworkID == vm.Ctx.NetworkID { + filteredChains = append(filteredChains, chain) + } else { + vm.Ctx.Log.Warn("chain has networkID %d, expected %d", chain.NetworkID, vm.Ctx.NetworkID) + } + } + + // Persist the chains that exist at genesis + if err := vm.putChains(vm.DB, filteredChains); err != nil { + return errDBPutChains + } + + // Persist the platform chain's timestamp at genesis + time := time.Unix(int64(genesis.Timestamp), 0) + if err := vm.State.PutTime(vm.DB, timestampKey, time); err != nil { + return errDB + } + + // There are no pending stakers at genesis + if err := vm.putPendingValidators(vm.DB, &EventHeap{SortByStartTime: true}, DefaultSubnetID); err != nil { + return errDBPutPendingValidators + } + + // Create the genesis block and save it as being accepted + // (We don't just do genesisBlock.Accept() because then it'd look for genesisBlock's + // non-existent parent) + genesisBlock := vm.newCommitBlock(ids.Empty) + if err := vm.State.PutBlock(vm.DB, genesisBlock); err != nil { + return errDB + } + genesisBlock.onAcceptDB = versiondb.New(vm.DB) + genesisBlock.CommonBlock.Accept() + + vm.SetDBInitialized() + } + + // Transactions from clients that have not yet been put into blocks + // and added to consensus + vm.unissuedEvents = &EventHeap{SortByStartTime: true} + + vm.currentBlocks = make(map[[32]byte]Block) + vm.timer = timer.NewTimer(func() { + vm.Ctx.Lock.Lock() + defer vm.Ctx.Lock.Unlock() + + vm.resetTimer() + }) + go ctx.Log.RecoverAndPanic(vm.timer.Dispatch) + + if err := vm.updateValidators(DefaultSubnetID); err != nil { + ctx.Log.Error("failed to initialize the current validator set: %s", err) + return err + } + + // Create all of the chains that the database says exist + if err := vm.initBlockchains(); err != nil { + vm.Ctx.Log.Warn("could not retrieve existing chains from database: %s", err) + return err + } + + // Build off the most recently accepted block + vm.SetPreference(vm.LastAccepted()) + + return nil +} + +// Create all of the chains that the database says should exist +func (vm *VM) initBlockchains() error { + vm.Ctx.Log.Verbo("platform chain initializing existing blockchains") + existingChains, err := vm.getChains(vm.DB) + if err != nil { + return err + } + for _, chain := range existingChains { // Create each blockchain + chainParams := chains.ChainParameters{ + ID: chain.ID(), + GenesisData: chain.GenesisData, + VMAlias: chain.VMID.String(), + } + for _, fxID := range chain.FxIDs { + chainParams.FxAliases = append(chainParams.FxAliases, fxID.String()) + } + vm.ChainManager.CreateChain(chainParams) + } + return nil +} + +// Shutdown this blockchain +func (vm *VM) Shutdown() { + vm.timer.Stop() + if err := vm.DB.Close(); err != nil { + vm.Ctx.Log.Error("Closing the database failed with %s", err) + } +} + +// BuildBlock builds a block to be added to consensus +func (vm *VM) BuildBlock() (snowman.Block, error) { + vm.Ctx.Log.Debug("in BuildBlock") + preferredID := vm.Preferred() + + // If there are pending decision txs, build a block with a batch of them + if len(vm.unissuedDecisionTxs) > 0 { + numTxs := BatchSize + if numTxs > len(vm.unissuedDecisionTxs) { + numTxs = len(vm.unissuedDecisionTxs) + } + var txs []DecisionTx + txs, vm.unissuedDecisionTxs = vm.unissuedDecisionTxs[:numTxs], vm.unissuedDecisionTxs[numTxs:] + blk, err := vm.newStandardBlock(preferredID, txs) + if err != nil { + return nil, err + } + if err := blk.Verify(); err != nil { + vm.resetTimer() + return nil, err + } + if err := vm.State.PutBlock(vm.DB, blk); err != nil { + return nil, err + } + return blk, vm.DB.Commit() + } + + // Get the preferred block (which we want to build off) + preferred, err := vm.getBlock(preferredID) + vm.Ctx.Log.AssertNoError(err) + + // The database if the preferred block were to be accepted + var db database.Database + // The preferred block should always be a decision block + if preferred, ok := preferred.(decision); ok { + db = preferred.onAccept() + } else { + return nil, errInvalidBlockType + } + + // The chain time if the preferred block were to be committed + currentChainTimestamp, err := vm.getTimestamp(db) + if err != nil { + return nil, err + } + if !currentChainTimestamp.Before(maxTime) { + return nil, errEndOfTime + } + + // If the chain time would be the time for the next default subnet validator to leave, + // then we create a block that removes the validator and proposes they receive a validator reward + currentValidators, err := vm.getCurrentValidators(db, DefaultSubnetID) + if err != nil { + return nil, errDBCurrentValidators + } + nextValidatorEndtime := maxTime + if currentValidators.Len() > 0 { + nextValidatorEndtime = currentValidators.Peek().EndTime() + } + if currentChainTimestamp.Equal(nextValidatorEndtime) { + stakerTx := currentValidators.Peek() + rewardValidatorTx, err := vm.newRewardValidatorTx(stakerTx.ID()) + if err != nil { + return nil, err + } + blk, err := vm.newProposalBlock(preferredID, rewardValidatorTx) + if err != nil { + return nil, err + } + if err := vm.State.PutBlock(vm.DB, blk); err != nil { + return nil, err + } + return blk, vm.DB.Commit() + } + + // If local time is >= time of the next validator set change, + // propose moving the chain time forward + nextValidatorStartTime := vm.nextValidatorChangeTime(db /*start=*/, true) + nextValidatorEndTime := vm.nextValidatorChangeTime(db /*start=*/, false) + + nextValidatorSetChangeTime := nextValidatorStartTime + if nextValidatorEndTime.Before(nextValidatorStartTime) { + nextValidatorSetChangeTime = nextValidatorEndTime + } + + localTime := vm.clock.Time() + if !localTime.Before(nextValidatorSetChangeTime) { // time is at or after the time for the next validator to join/leave + advanceTimeTx, err := vm.newAdvanceTimeTx(nextValidatorSetChangeTime) + if err != nil { + return nil, err + } + blk, err := vm.newProposalBlock(preferredID, advanceTimeTx) + if err != nil { + return nil, err + } + if err := vm.State.PutBlock(vm.DB, blk); err != nil { + return nil, err + } + return blk, vm.DB.Commit() + } + + // Propose adding a new validator but only if their start time is in the + // future relative to local time (plus Delta) + syncTime := localTime.Add(Delta) + for vm.unissuedEvents.Len() > 0 { + tx := vm.unissuedEvents.Remove() + if !syncTime.After(tx.StartTime()) { + blk, err := vm.newProposalBlock(preferredID, tx) + if err != nil { + return nil, err + } + if err := vm.State.PutBlock(vm.DB, blk); err != nil { + return nil, err + } + return blk, vm.DB.Commit() + } + vm.Ctx.Log.Debug("dropping tx to add validator because start time too late") + } + + vm.Ctx.Log.Debug("BuildBlock returning error (no blocks)") + return nil, errNoPendingBlocks +} + +// ParseBlock implements the snowman.ChainVM interface +func (vm *VM) ParseBlock(bytes []byte) (snowman.Block, error) { + blockInterface, err := vm.unmarshalBlockFunc(bytes) + if err != nil { + return nil, errors.New("problem parsing block") + } + block, ok := blockInterface.(snowman.Block) + if !ok { // in practice should never happen because unmarshalBlockFunc returns a snowman.Block + return nil, errors.New("problem parsing block") + } + // If we have seen this block before, return it with the most up-to-date info + if block, err := vm.GetBlock(block.ID()); err == nil { + return block, nil + } + vm.State.PutBlock(vm.DB, block) + vm.DB.Commit() + return block, nil +} + +// GetBlock implements the snowman.ChainVM interface +func (vm *VM) GetBlock(blkID ids.ID) (snowman.Block, error) { return vm.getBlock(blkID) } + +func (vm *VM) getBlock(blkID ids.ID) (Block, error) { + // If block is in memory, return it. + if blk, exists := vm.currentBlocks[blkID.Key()]; exists { + return blk, nil + } + // Block isn't in memory. If block is in database, return it. + blkInterface, err := vm.State.GetBlock(vm.DB, blkID) + if err != nil { + return nil, err + } + if block, ok := blkInterface.(Block); ok { + return block, nil + } + return nil, errors.New("block not found") +} + +// SetPreference sets the preferred block to be the one with ID [blkID] +func (vm *VM) SetPreference(blkID ids.ID) { + if !blkID.Equals(vm.Preferred()) { + vm.SnowmanVM.SetPreference(blkID) + vm.resetTimer() + } +} + +// CreateHandlers returns a map where: +// * keys are API endpoint extensions +// * values are API handlers +// See API documentation for more information +func (vm *VM) CreateHandlers() map[string]*common.HTTPHandler { + // Create a service with name "platform" + handler := vm.SnowmanVM.NewHandler("platform", &Service{vm: vm}) + return map[string]*common.HTTPHandler{"": handler} +} + +// CreateStaticHandlers implements the snowman.ChainVM interface +func (vm *VM) CreateStaticHandlers() map[string]*common.HTTPHandler { + // Static service's name is platform + handler := vm.SnowmanVM.NewHandler("platform", &StaticService{}) + return map[string]*common.HTTPHandler{ + "": handler, + } +} + +// Check if there is a block ready to be added to consensus +// If so, notify the consensus engine +func (vm *VM) resetTimer() { + // If there is a pending CreateChainTx, trigger building of a block + // with that transaction + if len(vm.unissuedDecisionTxs) > 0 { + vm.SnowmanVM.NotifyBlockReady() + return + } + + // Get the preferred block + preferred, err := vm.getBlock(vm.Preferred()) + vm.Ctx.Log.AssertNoError(err) + + // The database if the preferred block were to be committed + var db database.Database + // The preferred block should always be a decision block + if preferred, ok := preferred.(decision); ok { + db = preferred.onAccept() + } else { + vm.Ctx.Log.Error("The preferred block should always be a decision block") + return + } + + // The chain time if the preferred block were to be committed + timestamp, err := vm.getTimestamp(db) + if err != nil { + vm.Ctx.Log.Error("could not retrieve timestamp from database") + return + } + if timestamp.Equal(maxTime) { + vm.Ctx.Log.Error("Program time is suspiciously far in the future. Either this codebase was way more successful than expected, or a critical error has occurred.") + return + } + + nextDSValidatorEndTime := vm.nextSubnetValidatorChangeTime(db, DefaultSubnetID, false) + if timestamp.Equal(nextDSValidatorEndTime) { + vm.SnowmanVM.NotifyBlockReady() // Should issue a ProposeRewardValidator + return + } + + // If local time is >= time of the next change in the validator set, + // propose moving forward the chain timestamp + nextValidatorStartTime := vm.nextValidatorChangeTime(db, true) + nextValidatorEndTime := vm.nextValidatorChangeTime(db, false) + + nextValidatorSetChangeTime := nextValidatorStartTime + if nextValidatorEndTime.Before(nextValidatorStartTime) { + nextValidatorSetChangeTime = nextValidatorEndTime + } + + localTime := vm.clock.Time() + if !localTime.Before(nextValidatorSetChangeTime) { // time is at or after the time for the next validator to join/leave + vm.SnowmanVM.NotifyBlockReady() // Should issue a ProposeTimestamp + return + } + + syncTime := localTime.Add(Delta) + for vm.unissuedEvents.Len() > 0 { + if !syncTime.After(vm.unissuedEvents.Peek().StartTime()) { + vm.SnowmanVM.NotifyBlockReady() // Should issue a ProposeAddValidator + return + } + // If the tx doesn't meet the syncrony bound, drop it + vm.unissuedEvents.Remove() + vm.Ctx.Log.Debug("dropping tx to add validator because its start time has passed") + } + + waitTime := nextValidatorSetChangeTime.Sub(localTime) + vm.Ctx.Log.Info("next scheduled event is at %s (%s in the future)", nextValidatorSetChangeTime, waitTime) + + // Wake up when it's time to add/remove the next validator + vm.timer.SetTimeoutIn(waitTime) +} + +// If [start], returns the time at which the next validator (of any subnet) in the pending set starts validating +// Otherwise, returns the time at which the next validator (of any subnet) stops validating +// If no such validator is found, returns maxTime +func (vm *VM) nextValidatorChangeTime(db database.Database, start bool) time.Time { + earliest := vm.nextSubnetValidatorChangeTime(db, DefaultSubnetID, start) + subnets, err := vm.getSubnets(db) + if err != nil { + return earliest + } + for _, subnet := range subnets { + t := vm.nextSubnetValidatorChangeTime(db, subnet.ID, start) + if t.Before(earliest) { + earliest = t + } + } + return earliest +} + +func (vm *VM) nextSubnetValidatorChangeTime(db database.Database, subnetID ids.ID, start bool) time.Time { + var validators *EventHeap + var err error + if start { + validators, err = vm.getPendingValidators(db, subnetID) + } else { + validators, err = vm.getCurrentValidators(db, subnetID) + } + if err != nil { + vm.Ctx.Log.Error("couldn't get validators of subnet with ID %s: %v", subnetID, err) + return maxTime + } + if validators.Len() == 0 { + vm.Ctx.Log.Verbo("subnet, %s, has no validators", subnetID) + return maxTime + } + return validators.Timestamp() +} + +// Returns: +// 1) The validator set of subnet with ID [subnetID] when timestamp is advanced to [timestamp] +// 2) The pending validator set of subnet with ID [subnetID] when timestamp is advanced to [timestamp] +// Note that this method will not remove validators from the current validator set of the default subnet. +// That happens in reward blocks. +func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, subnetID ids.ID) (current, pending *EventHeap, err error) { + // remove validators whose end time <= [timestamp] + current, err = vm.getCurrentValidators(db, subnetID) + if err != nil { + return nil, nil, err + } + if !subnetID.Equals(DefaultSubnetID) { // validators of default subnet removed in rewardValidatorTxs, not here + for current.Len() > 0 { + next := current.Peek() // current validator with earliest end time + if timestamp.Before(next.EndTime()) { + break + } + current.Remove() + } + } + pending, err = vm.getPendingValidators(db, subnetID) + if err != nil { + return nil, nil, err + } + for pending.Len() > 0 { + nextTx := pending.Peek() // pending staker with earliest start time + if timestamp.Before(nextTx.StartTime()) { + break + } + heap.Push(current, nextTx) + heap.Pop(pending) + } + return current, pending, nil +} + +func (vm *VM) getValidators(validatorEvents *EventHeap) []validators.Validator { + vdrMap := make(map[[20]byte]*Validator, validatorEvents.Len()) + for _, event := range validatorEvents.Txs { + vdr := event.Vdr() + vdrID := vdr.ID() + vdrKey := vdrID.Key() + validator, exists := vdrMap[vdrKey] + if !exists { + validator = &Validator{NodeID: vdrID} + vdrMap[vdrKey] = validator + } + weight, err := math.Add64(validator.Wght, vdr.Weight()) + if err != nil { + weight = stdmath.MaxUint64 + } + validator.Wght = weight + } + + vdrList := make([]validators.Validator, len(vdrMap))[:0] + for _, validator := range vdrMap { + vdrList = append(vdrList, validator) + } + return vdrList +} + +func (vm *VM) updateValidators(subnetID ids.ID) error { + validatorSet, ok := vm.Validators.GetValidatorSet(subnetID) + if !ok { + return fmt.Errorf("couldn't get the validator sampler of the %s subnet", subnetID) + } + + currentValidators, err := vm.getCurrentValidators(vm.DB, subnetID) + if err != nil { + return err + } + + validators := vm.getValidators(currentValidators) + validatorSet.Set(validators) + return nil +} diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go new file mode 100644 index 0000000..67c0084 --- /dev/null +++ b/vms/platformvm/vm_test.go @@ -0,0 +1,1062 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "bytes" + "container/heap" + "errors" + "testing" + "time" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/vms/components/core" + "github.com/ava-labs/gecko/vms/timestampvm" +) + +var ( + // chain timestamp at genesis + defaultGenesisTime = time.Now().Round(time.Second) + + // time that genesis validators start validating + defaultValidateStartTime = defaultGenesisTime.Add(1 * time.Second) + + // time that genesis validators stop validating + defaultValidateEndTime = defaultValidateStartTime.Add(10 * MinimumStakingDuration) + + // each key corresponds to an account that has $AVA and a genesis validator + keys []*crypto.PrivateKeySECP256K1R + + // amount all genesis validators stake + defaultStakeAmount uint64 + + // balance of accounts that exist at genesis + defaultBalance = 100 * MinimumStakeAmount + + // At genesis this account has AVA and is validating the default subnet + defaultKey *crypto.PrivateKeySECP256K1R + + // non-default subnet that exists at genesis in defaultVM + testSubnet1 *CreateSubnetTx + testSubnet1ControlKeys []*crypto.PrivateKeySECP256K1R +) + +var ( + errShouldNotifyEngine = errors.New("should have notified engine of block ready") + errShouldPrefCommit = errors.New("should prefer to commit proposal") + errShouldPrefAbort = errors.New("should prefer to abort proposal") +) + +const ( + testNetworkID = 10 // To be used in tests + + defaultNonce = 1 + defaultWeight = 1 +) + +func init() { + ctx := defaultContext() + byteFormatter := formatting.CB58{} + factory := crypto.FactorySECP256K1R{} + + for _, key := range []string{ + "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", + "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", + "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", + "ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN", + "2RWLv6YVEXDiWLpaCbXhhqxtLbnFaKQsWPSSMSPhpWo47uJAeV", + } { + ctx.Log.AssertNoError(byteFormatter.FromString(key)) + pk, err := factory.ToPrivateKey(byteFormatter.Bytes) + ctx.Log.AssertNoError(err) + keys = append(keys, pk.(*crypto.PrivateKeySECP256K1R)) + } + + defaultStakeAmount = defaultBalance - txFee + + defaultKey = keys[0] + + testSubnet1ControlKeys = keys[0:3] + +} + +func defaultContext() *snow.Context { + ctx := snow.DefaultContextTest() + ctx.NetworkID = testNetworkID + return ctx +} + +func defaultVM() *VM { + genesisAccounts := GenesisAccounts() + genesisValidators := GenesisCurrentValidators() + genesisChains := make([]*CreateChainTx, 0) + + genesisState := Genesis{ + Accounts: genesisAccounts, + Validators: genesisValidators, + Chains: genesisChains, + Timestamp: uint64(defaultGenesisTime.Unix()), + } + + genesisBytes, err := Codec.Marshal(genesisState) + if err != nil { + panic(err) + } + + vm := &VM{ + SnowmanVM: &core.SnowmanVM{}, + } + + defaultSubnet := validators.NewSet() + vm.Validators = validators.NewManager() + vm.Validators.PutValidatorSet(DefaultSubnetID, defaultSubnet) + + vm.clock.Set(defaultGenesisTime) + db := memdb.New() + msgChan := make(chan common.Message, 1) + ctx := defaultContext() + if err := vm.Initialize(ctx, db, genesisBytes, msgChan, nil); err != nil { + panic(err) + } + + // Create 1 non-default subnet and store it in testSubnet1 + tx, err := vm.newCreateSubnetTx( + testNetworkID, + 0, + []ids.ShortID{keys[0].PublicKey().Address(), keys[1].PublicKey().Address(), keys[2].PublicKey().Address()}, // control keys are keys[0], keys[1], keys[2] + 2, // 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet + keys[0], + ) + if err != nil { + panic(err) + } + if testSubnet1 == nil { + testSubnet1 = tx + } + if err := vm.putSubnets(vm.DB, []*CreateSubnetTx{tx}); err != nil { + panic(err) + } + err = vm.putCurrentValidators( + vm.DB, + &EventHeap{ + SortByStartTime: false, + }, + tx.ID, + ) + if err != nil { + panic(err) + } + err = vm.putPendingValidators( + vm.DB, + &EventHeap{ + SortByStartTime: true, + }, + tx.ID, + ) + if err != nil { + panic(err) + } + + subnets, err := vm.getSubnets(vm.DB) + if err != nil { + panic(err) + } + if len(subnets) == 0 { + panic("no subnets found") + } // end delete + + return vm +} + +// The returned accounts have nil for their vm field +func GenesisAccounts() []Account { + accounts := []Account(nil) + for _, key := range keys { + accounts = append(accounts, + newAccount( + key.PublicKey().Address(), // address + defaultNonce, // nonce + defaultBalance, // balance + )) + } + return accounts +} + +// Returns the validators validating at genesis in tests +func GenesisCurrentValidators() *EventHeap { + vm := &VM{} + validators := &EventHeap{SortByStartTime: false} + for _, key := range keys { + validator, _ := vm.newAddDefaultSubnetValidatorTx( + defaultNonce, // nonce + defaultStakeAmount, // weight + uint64(defaultValidateStartTime.Unix()), // start time + uint64(defaultValidateEndTime.Unix()), // end time + key.PublicKey().Address(), // nodeID + key.PublicKey().Address(), // destination + NumberOfShares, // shares + testNetworkID, // network ID + key, // key paying tx fee and stake + ) + heap.Push(validators, validator) + } + return validators +} + +// Ensure genesis state is parsed from bytes and stored correctly +func TestGenesis(t *testing.T) { + vm := defaultVM() + + // Ensure the genesis block has been accepted and stored + genesisBlockID := vm.LastAccepted() // lastAccepted should be ID of genesis block + genesisBlock, err := vm.getBlock(genesisBlockID) + if err != nil { + t.Fatalf("couldn't get genesis block: %v", err) + } + if genesisBlock.Status() != choices.Accepted { + t.Fatal("genesis block should be accepted") + } + + // Ensure all the genesis accounts are stored + for _, account := range GenesisAccounts() { + vmAccount, err := vm.getAccount(vm.DB, account.Address) + if err != nil { + t.Fatal("couldn't find account in vm's db") + } + if !vmAccount.Address.Equals(account.Address) { + t.Fatal("account IDs should match") + } + if vmAccount.Balance != account.Balance { + t.Fatal("balances should match") + } + if vmAccount.Nonce != account.Nonce { + t.Fatal("nonces should match") + } + } + + // Ensure current validator set of default subnet is correct + currentValidators, err := vm.getCurrentValidators(vm.DB, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + if len(currentValidators.Txs) != len(keys) { + t.Fatal("vm's current validator set is wrong") + } + if currentValidators.SortByStartTime == true { + t.Fatal("vm's current validators should be sorted by end time") + } + currentSampler := validators.NewSet() + currentSampler.Set(vm.getValidators(currentValidators)) + for _, key := range keys { + if addr := key.PublicKey().Address(); !currentSampler.Contains(addr) { + t.Fatalf("should have had validator with NodeID %s", addr) + } + } + + // Ensure pending validator set is correct (empty) + pendingValidators, err := vm.getPendingValidators(vm.DB, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + if pendingValidators.Len() != 0 { + t.Fatal("vm's pending validator set should be empty") + } + + // Ensure genesis timestamp is correct + time, err := vm.getTimestamp(vm.DB) + if err != nil { + t.Fatal(err) + } + if !time.Equal(defaultGenesisTime) { + t.Fatalf("vm's time is incorrect. Expected %s got %s", defaultGenesisTime, time) + } +} + +// accept proposal to add validator to default subnet +func TestAddDefaultSubnetValidatorCommit(t *testing.T) { + vm := defaultVM() + startTime := defaultGenesisTime.Add(Delta).Add(1 * time.Second) + endTime := startTime.Add(MinimumStakingDuration) + key, _ := vm.factory.NewPrivateKey() + ID := key.PublicKey().Address() + + // create valid tx + tx, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(startTime.Unix()), + uint64(endTime.Unix()), + ID, + ID, + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + // trigger block creation + vm.unissuedEvents.Add(tx) + vm.Ctx.Lock.Lock() + blk, err := vm.BuildBlock() + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + // Assert preferences are correct + block := blk.(*ProposalBlock) + options := block.Options() + commit, ok := blk.(*ProposalBlock).Options()[0].(*Commit) + if !ok { + t.Fatal(errShouldPrefCommit) + } + _, ok = options[1].(*Abort) + if !ok { + t.Fatal(errShouldPrefAbort) + } + + if err := block.Verify(); err != nil { + t.Fatal(err) + } + block.Accept() + + if err := commit.Verify(); err != nil { + t.Fatal(err) + } + commit.Accept() // commit the proposal + + // Verify that new validator now in pending validator set + pendingValidators, err := vm.getPendingValidators(vm.DB, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + pendingSampler := validators.NewSet() + pendingSampler.Set(vm.getValidators(pendingValidators)) + if !pendingSampler.Contains(ID) { + t.Fatalf("pending validator should have validator with ID %s", ID) + } +} + +// Reject proposal to add validator to default subnet +func TestAddDefaultSubnetValidatorReject(t *testing.T) { + vm := defaultVM() + startTime := defaultGenesisTime.Add(Delta).Add(1 * time.Second) + endTime := startTime.Add(MinimumStakingDuration) + key, _ := vm.factory.NewPrivateKey() + ID := key.PublicKey().Address() + + // create valid tx + tx, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(startTime.Unix()), + uint64(endTime.Unix()), + ID, + ID, + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + // trigger block creation + vm.unissuedEvents.Add(tx) + vm.Ctx.Lock.Lock() + blk, err := vm.BuildBlock() + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + // Assert preferences are correct + block := blk.(*ProposalBlock) + options := block.Options() + commit, ok := blk.(*ProposalBlock).Options()[0].(*Commit) + if !ok { + t.Fatal(errShouldPrefCommit) + } + abort, ok := options[1].(*Abort) + if !ok { + t.Fatal(errShouldPrefAbort) + } + + if err := block.Verify(); err != nil { + t.Fatal(err) + } + block.Accept() + + if err := commit.Verify(); err != nil { // should pass verification + t.Fatal(err) + } + if err := abort.Verify(); err != nil { // should pass verification + t.Fatal(err) + } + + abort.Accept() // reject the proposal + + // Verify that new validator NOT in pending validator set + pendingValidators, err := vm.getPendingValidators(vm.DB, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + pendingSampler := validators.NewSet() + pendingSampler.Set(vm.getValidators(pendingValidators)) + if pendingSampler.Contains(ID) { + t.Fatalf("should not have added validator to pending validator set") + } +} + +// Accept proposal to add validator to non-default subnet +func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) { + vm := defaultVM() + startTime := defaultValidateStartTime.Add(Delta).Add(1 * time.Second) + endTime := startTime.Add(MinimumStakingDuration) + + // create valid tx + // note that [startTime, endTime] is a subset of time that keys[0] + // validates default subnet ([defaultValidateStartTime, defaultValidateEndTime]) + tx, err := vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(startTime.Unix()), + uint64(endTime.Unix()), + keys[0].PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + keys[0], + ) + if err != nil { + t.Fatal(err) + } + + // trigger block creation + vm.unissuedEvents.Add(tx) + vm.Ctx.Lock.Lock() + blk, err := vm.BuildBlock() + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + // Assert preferences are correct + block := blk.(*ProposalBlock) + options := block.Options() + commit, ok := blk.(*ProposalBlock).Options()[0].(*Commit) + if !ok { + t.Fatal(errShouldPrefCommit) + } + abort, ok := options[1].(*Abort) + if !ok { + t.Fatal(errShouldPrefAbort) + } + + if err := block.Verify(); err != nil { + t.Fatal(err) + } + block.Accept() + + if err := commit.Verify(); err != nil { + t.Fatal(err) + } + if err := abort.Verify(); err != nil { + t.Fatal(err) + } + + commit.Accept() // accept the proposal + + // Verify that new validator is in pending validator set + pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.ID) + if err != nil { + t.Fatal(err) + } + pendingSampler := validators.NewSet() + pendingSampler.Set(vm.getValidators(pendingValidators)) + if !pendingSampler.Contains(keys[0].PublicKey().Address()) { + t.Fatalf("should have added validator to pending validator set") + } +} + +// Reject proposal to add validator to non-default subnet +func TestAddNonDefaultSubnetValidatorReject(t *testing.T) { + vm := defaultVM() + startTime := defaultValidateStartTime.Add(Delta).Add(1 * time.Second) + endTime := startTime.Add(MinimumStakingDuration) + key, _ := vm.factory.NewPrivateKey() + ID := key.PublicKey().Address() + + // create valid tx + // note that [startTime, endTime] is a subset of time that keys[0] + // validates default subnet ([defaultValidateStartTime, defaultValidateEndTime]) + tx, err := vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+1, + defaultWeight, + uint64(startTime.Unix()), + uint64(endTime.Unix()), + keys[0].PublicKey().Address(), + testSubnet1.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, + keys[0], + ) + if err != nil { + t.Fatal(err) + } + + // trigger block creation + vm.unissuedEvents.Add(tx) + vm.Ctx.Lock.Lock() + blk, err := vm.BuildBlock() + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + // Assert preferences are correct + block := blk.(*ProposalBlock) + options := block.Options() + commit, ok := blk.(*ProposalBlock).Options()[0].(*Commit) + if !ok { + t.Fatal(errShouldPrefCommit) + } + abort, ok := options[1].(*Abort) + if !ok { + t.Fatal(errShouldPrefAbort) + } + + if err := block.Verify(); err != nil { + t.Fatal(err) + } + block.Accept() + + if err := commit.Verify(); err != nil { + t.Fatal(err) + } + if err := abort.Verify(); err != nil { + t.Fatal(err) + } + + abort.Accept() // reject the proposal + + // Verify that new validator NOT in pending validator set + pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.ID) + if err != nil { + t.Fatal(err) + } + pendingSampler := validators.NewSet() + pendingSampler.Set(vm.getValidators(pendingValidators)) + if pendingSampler.Contains(ID) { + t.Fatalf("should not have added validator to pending validator set") + } +} + +// Test case where default subnet validator rewarded +func TestRewardValidatorAccept(t *testing.T) { + vm := defaultVM() + + // Fast forward clock to time for genesis validators to leave + vm.clock.Set(defaultValidateEndTime) + + vm.Ctx.Lock.Lock() + blk, err := vm.BuildBlock() // should contain proposal to advance time + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + // Assert preferences are correct + block := blk.(*ProposalBlock) + options := block.Options() + commit, ok := blk.(*ProposalBlock).Options()[0].(*Commit) + if !ok { + t.Fatal(errShouldPrefCommit) + } + abort, ok := options[1].(*Abort) + if !ok { + t.Fatal(errShouldPrefAbort) + } + + if err := block.Verify(); err != nil { + t.Fatal(err) + } + block.Accept() + + if err := commit.Verify(); err != nil { + t.Fatal(err) + } + if err := abort.Verify(); err != nil { + t.Fatal(err) + } + + commit.Accept() // advance the timestamp + + // Verify that chain's timestamp has advanced + timestamp, err := vm.getTimestamp(vm.DB) + if err != nil { + t.Fatal(err) + } + if !timestamp.Equal(defaultValidateEndTime) { + t.Fatal("expected timestamp to have advanced") + } + + vm.Ctx.Lock.Lock() + blk, err = vm.BuildBlock() // should contain proposal to reward genesis validator + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + // Assert preferences are correct + block = blk.(*ProposalBlock) + options = block.Options() + commit, ok = blk.(*ProposalBlock).Options()[0].(*Commit) + if !ok { + t.Fatal(errShouldPrefCommit) + } + abort, ok = options[1].(*Abort) + if !ok { + t.Fatal(errShouldPrefAbort) + } + + if err := block.Verify(); err != nil { + t.Fatal(err) + } + block.Accept() + + if err := commit.Verify(); err != nil { + t.Fatal(err) + } + if err := abort.Verify(); err != nil { + t.Fatal(err) + } + + commit.Accept() // reward the genesis validator + + // Verify that genesis validator was rewarded and removed from current validator set + currentValidators, err := vm.getCurrentValidators(vm.DB, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + if currentValidators.Len() != len(keys)-1 { + t.Fatal("should have removed a genesis validator") + } +} + +// Test case where default subnet validator not rewarded +func TestRewardValidatorReject(t *testing.T) { + vm := defaultVM() + + // Fast forward clock to time for genesis validators to leave + vm.clock.Set(defaultValidateEndTime) + + vm.Ctx.Lock.Lock() + blk, err := vm.BuildBlock() // should contain proposal to advance time + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + // Assert preferences are correct + block := blk.(*ProposalBlock) + options := block.Options() + commit, ok := blk.(*ProposalBlock).Options()[0].(*Commit) + if !ok { + t.Fatal(errShouldPrefCommit) + } + abort, ok := options[1].(*Abort) + if !ok { + t.Fatal(errShouldPrefAbort) + } + + if err := block.Verify(); err != nil { + t.Fatal(err) + } + block.Accept() + + if err := commit.Verify(); err != nil { + t.Fatal(err) + } + if err := abort.Verify(); err != nil { + t.Fatal(err) + } + + commit.Accept() // advance the timestamp + + // Verify that chain's timestamp has advanced + timestamp, err := vm.getTimestamp(vm.DB) + if err != nil { + t.Fatal(err) + } + if !timestamp.Equal(defaultValidateEndTime) { + t.Fatal("expected timestamp to have advanced") + } + + vm.Ctx.Lock.Lock() + blk, err = vm.BuildBlock() // should contain proposal to reward genesis validator + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + // Assert preferences are correct + block = blk.(*ProposalBlock) + options = block.Options() + commit, ok = blk.(*ProposalBlock).Options()[0].(*Commit) + if !ok { + t.Fatal(errShouldPrefCommit) + } + abort, ok = options[1].(*Abort) + if !ok { + t.Fatal(errShouldPrefAbort) + } + + if err := block.Verify(); err != nil { + t.Fatal(err) + } + block.Accept() + + if err := commit.Verify(); err != nil { + t.Fatal(err) + } + if err := abort.Verify(); err != nil { + t.Fatal(err) + } + + abort.Accept() // do not reward the genesis validator + + // Verify that genesis validator was removed from current validator set + currentValidators, err := vm.getCurrentValidators(vm.DB, DefaultSubnetID) + if err != nil { + t.Fatal(err) + } + if currentValidators.Len() != len(keys)-1 { + t.Fatal("should have removed a genesis validator") + } +} + +// Ensure BuildBlock errors when there is no block to build +func TestUnneededBuildBlock(t *testing.T) { + vm := defaultVM() + + if _, err := vm.BuildBlock(); err == nil { + t.Fatalf("Should have errored on BuildBlock") + } +} + +// test acceptance of proposal to create a new chain +func TestCreateChain(t *testing.T) { + vm := defaultVM() + + tx, err := vm.newCreateChainTx( + defaultNonce+1, + nil, + timestampvm.ID, + nil, + "name ", + testNetworkID, + keys[0], + ) + if err != nil { + t.Fatal(err) + } + + vm.Ctx.Lock.Lock() + vm.unissuedDecisionTxs = append(vm.unissuedDecisionTxs, tx) + blk, err := vm.BuildBlock() // should contain proposal to create chain + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + if err := blk.Verify(); err != nil { + t.Fatal(err) + } + + blk.Accept() + + // Verify chain was created + chains, err := vm.getChains(vm.DB) + if err != nil { + t.Fatal(err) + } + foundNewChain := false + for _, chain := range chains { + if bytes.Equal(chain.Bytes(), tx.Bytes()) { + foundNewChain = true + } + } + if !foundNewChain { + t.Fatal("should've created new chain but didn't") + } + + // Verify tx fee was deducted + account, err := vm.getAccount(vm.DB, tx.Key().Address()) + if err != nil { + t.Fatal(err) + } + if account.Balance != defaultBalance-txFee { + t.Fatal("should have deducted txFee from balance") + } +} + +// test where we: +// 1) Create a subnet +// 2) Add a validator to the subnet's pending validator set +// 3) Advance timestamp to validator's start time (moving the validator from pending to current) +// 4) Advance timestamp to validator's end time (removing validator from current) +func TestCreateSubnet(t *testing.T) { + vm := defaultVM() + + createSubnetTx, err := vm.newCreateSubnetTx( + testNetworkID, + defaultNonce+1, + []ids.ShortID{ + keys[0].PublicKey().Address(), + keys[1].PublicKey().Address(), + }, + 1, // threshold + keys[0], // payer + ) + if err != nil { + t.Fatal(err) + } + + vm.Ctx.Lock.Lock() + vm.unissuedDecisionTxs = append(vm.unissuedDecisionTxs, createSubnetTx) + blk, err := vm.BuildBlock() // should contain proposal to create subnet + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + if err := blk.Verify(); err != nil { + t.Fatal(err) + } + + blk.Accept() + + // Verify new subnet was created + subnets, err := vm.getSubnets(vm.DB) + if err != nil { + t.Fatal(err) + } + foundNewSubnet := false + for _, subnet := range subnets { + if bytes.Equal(subnet.Bytes(), createSubnetTx.Bytes()) { + foundNewSubnet = true + } + } + if !foundNewSubnet { + t.Fatal("should've created new subnet but didn't") + } + + // Verify tx fee was deducted + account, err := vm.getAccount(vm.DB, createSubnetTx.key.Address()) + if err != nil { + t.Fatal(err) + } + if account.Balance != defaultBalance-txFee { + t.Fatal("should have deducted txFee from balance") + } + + // Now that we've created a new subnet, add a validator to that subnet + startTime := defaultValidateStartTime.Add(Delta).Add(1 * time.Second) + endTime := startTime.Add(MinimumStakingDuration) + // [startTime, endTime] is subset of time keys[0] validates default subent so tx is valid + addValidatorTx, err := vm.newAddNonDefaultSubnetValidatorTx( + defaultNonce+2, + defaultWeight, + uint64(startTime.Unix()), + uint64(endTime.Unix()), + keys[0].PublicKey().Address(), + createSubnetTx.ID, + testNetworkID, + []*crypto.PrivateKeySECP256K1R{keys[0]}, + keys[0], + ) + if err != nil { + t.Fatal(err) + } + + // Verify tx is valid + _, _, _, _, err = addValidatorTx.SemanticVerify(vm.DB) + if err != nil { + t.Fatal(err) + } + + vm.Ctx.Lock.Lock() + vm.unissuedEvents.Push(addValidatorTx) + blk, err = vm.BuildBlock() // should add validator to the new subnet + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + // Assert preferences are correct + // and accept the proposal/commit + block := blk.(*ProposalBlock) + options := block.Options() + commit, ok := blk.(*ProposalBlock).Options()[0].(*Commit) + if !ok { + t.Fatal(errShouldPrefCommit) + } + abort, ok := options[1].(*Abort) + if !ok { + t.Fatal(errShouldPrefAbort) + } + + // Accept the block + if err := block.Verify(); err != nil { + t.Fatal(err) + } + block.Accept() + if err := commit.Verify(); err != nil { + t.Fatal(err) + } + if err := abort.Verify(); err != nil { + t.Fatal(err) + } + commit.Accept() // add the validator to pending validator set + + // Verify validator is in pending validator set + pendingValidators, err := vm.getPendingValidators(vm.DB, createSubnetTx.ID) + if err != nil { + t.Fatal(err) + } + foundNewValidator := false + for _, tx := range pendingValidators.Txs { + if tx.ID().Equals(addValidatorTx.ID()) { + foundNewValidator = true + } + } + if !foundNewValidator { + t.Fatal("didn't add validator to new subnet's pending validator set") + } + + // Advance time to when new validator should start validating + // Create a block with an advance time tx that moves validator + // from pending to current validator set + vm.clock.Set(startTime) + + vm.Ctx.Lock.Lock() + blk, err = vm.BuildBlock() // should be advance time tx + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + // Assert preferences are correct + // and accept the proposal/commit + block = blk.(*ProposalBlock) + options = block.Options() + commit, ok = blk.(*ProposalBlock).Options()[0].(*Commit) + if !ok { + t.Fatal(errShouldPrefCommit) + } + abort, ok = options[1].(*Abort) + if !ok { + t.Fatal(errShouldPrefAbort) + } + + // Accept the block + if err := block.Verify(); err != nil { + t.Fatal(err) + } + block.Accept() + if err := commit.Verify(); err != nil { + t.Fatal(err) + } + if err := abort.Verify(); err != nil { + t.Fatal(err) + } + commit.Accept() // move validator addValidatorTx from pending to current + + // Verify validator no longer in pending validator set + // Verify validator is in pending validator set + pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.ID) + if err != nil { + t.Fatal(err) + } + if pendingValidators.Len() != 0 { + t.Fatal("pending validator set should be empty") + } + + // Verify validator is in current validator set + currentValidators, err := vm.getCurrentValidators(vm.DB, createSubnetTx.ID) + if err != nil { + t.Fatal(err) + } + foundNewValidator = false + for _, tx := range currentValidators.Txs { + if tx.ID().Equals(addValidatorTx.ID()) { + foundNewValidator = true + } + } + if !foundNewValidator { + t.Fatal("didn't add validator to new subnet's current validator set") + } + + // fast forward clock to time validator should stop validating + vm.clock.Set(endTime) + vm.Ctx.Lock.Lock() + blk, err = vm.BuildBlock() // should be advance time tx + if err != nil { + t.Fatal(err) + } + vm.Ctx.Lock.Unlock() + + // Assert preferences are correct + // and accept the proposal/commit + block = blk.(*ProposalBlock) + options = block.Options() + commit, ok = blk.(*ProposalBlock).Options()[0].(*Commit) + if !ok { + t.Fatal(errShouldPrefCommit) + } + abort, ok = options[1].(*Abort) + if !ok { + t.Fatal(errShouldPrefAbort) + } + + // Accept the block + if err := block.Verify(); err != nil { + t.Fatal(err) + } + block.Accept() + if err := commit.Verify(); err != nil { + t.Fatal(err) + } + if err := abort.Verify(); err != nil { + t.Fatal(err) + } + commit.Accept() // remove validator from current validator set + + // pending validators and current validator should be empty + pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.ID) + if err != nil { + t.Fatal(err) + } + if pendingValidators.Len() != 0 { + t.Fatal("pending validator set should be empty") + } + currentValidators, err = vm.getCurrentValidators(vm.DB, createSubnetTx.ID) + if err != nil { + t.Fatal(err) + } + if currentValidators.Len() != 0 { + t.Fatal("pending validator set should be empty") + } + +} diff --git a/vms/secp256k1fx/credential.go b/vms/secp256k1fx/credential.go new file mode 100644 index 0000000..2b1cfc7 --- /dev/null +++ b/vms/secp256k1fx/credential.go @@ -0,0 +1,29 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "errors" + + "github.com/ava-labs/gecko/utils/crypto" +) + +var ( + errNilCredential = errors.New("nil credential") +) + +// Credential ... +type Credential struct { + Sigs [][crypto.SECP256K1RSigLen]byte `serialize:"true"` +} + +// Verify ... +func (cr *Credential) Verify() error { + switch { + case cr == nil: + return errNilCredential + default: + return nil + } +} diff --git a/vms/secp256k1fx/credential_test.go b/vms/secp256k1fx/credential_test.go new file mode 100644 index 0000000..5157fab --- /dev/null +++ b/vms/secp256k1fx/credential_test.go @@ -0,0 +1,94 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/vms/components/codec" +) + +func TestCredentialVerify(t *testing.T) { + cred := Credential{} + err := cred.Verify() + if err != nil { + t.Fatal(err) + } +} + +func TestCredentialVerifyNil(t *testing.T) { + cred := (*Credential)(nil) + err := cred.Verify() + if err == nil { + t.Fatalf("Should have errored with a nil credential") + } +} + +func TestCredentialSerialize(t *testing.T) { + c := codec.NewDefault() + + expected := []byte{ + // length: + 0x00, 0x00, 0x00, 0x02, + // sig[0] + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1e, 0x1d, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2e, 0x2d, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x00, + // sig[1] + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5e, 0x5d, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6e, 0x6d, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x00, + } + cred := Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{ + [crypto.SECP256K1RSigLen]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1e, 0x1d, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2e, 0x2d, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x00, + }, + [crypto.SECP256K1RSigLen]byte{ + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5e, 0x5d, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6e, 0x6d, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x00, + }, + }} + err := cred.Verify() + if err != nil { + t.Fatal(err) + } + + result, err := c.Marshal(&cred) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } +} diff --git a/vms/secp256k1fx/factory.go b/vms/secp256k1fx/factory.go new file mode 100644 index 0000000..da2e022 --- /dev/null +++ b/vms/secp256k1fx/factory.go @@ -0,0 +1,19 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "github.com/ava-labs/gecko/ids" +) + +// ID that this Fx uses when labeled +var ( + ID = ids.NewID([32]byte{'s', 'e', 'c', 'p', '2', '5', '6', 'k', '1', 'f', 'x'}) +) + +// Factory ... +type Factory struct{} + +// New ... +func (f *Factory) New() interface{} { return &Fx{} } diff --git a/vms/secp256k1fx/fx.go b/vms/secp256k1fx/fx.go new file mode 100644 index 0000000..ff54b91 --- /dev/null +++ b/vms/secp256k1fx/fx.go @@ -0,0 +1,179 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "errors" + + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/vms/components/verify" +) + +var ( + errWrongVMType = errors.New("wrong vm type") + errWrongTxType = errors.New("wrong tx type") + errWrongUTXOType = errors.New("wrong utxo type") + errWrongOutputType = errors.New("wrong output type") + errWrongInputType = errors.New("wrong input type") + errWrongCredentialType = errors.New("wrong credential type") + + errWrongNumberOfOutputs = errors.New("wrong number of outputs for an operation") + errWrongNumberOfInputs = errors.New("wrong number of inputs for an operation") + errWrongNumberOfCredentials = errors.New("wrong number of credentials for an operation") + + errWrongMintCreated = errors.New("wrong mint output created from the operation") + + errWrongAmounts = errors.New("input is consuming a different amount than expected") + errTimelocked = errors.New("output is time locked") + errTooManySigners = errors.New("input has more signers than expected") + errTooFewSigners = errors.New("input has less signers than expected") + errInputCredentialSignersMismatch = errors.New("input expected a different number of signers than provided in the credential") + errWrongSigner = errors.New("credential does not produce expected signer") +) + +// Fx ... +type Fx struct { + vm VM + secpFactory crypto.FactorySECP256K1R +} + +// Initialize ... +func (fx *Fx) Initialize(vmIntf interface{}) error { + vm, ok := vmIntf.(VM) + if !ok { + return errWrongVMType + } + + c := vm.Codec() + c.RegisterType(&MintOutput{}) + c.RegisterType(&TransferOutput{}) + c.RegisterType(&MintInput{}) + c.RegisterType(&TransferInput{}) + c.RegisterType(&Credential{}) + + fx.vm = vm + return nil +} + +// VerifyOperation ... +func (fx *Fx) VerifyOperation(txIntf interface{}, utxosIntf, insIntf, credsIntf, outsIntf []interface{}) error { + tx, ok := txIntf.(Tx) + if !ok { + return errWrongTxType + } + + if len(outsIntf) != 2 { + return errWrongNumberOfOutputs + } + if len(utxosIntf) != 1 || len(insIntf) != 1 { + return errWrongNumberOfInputs + } + if len(credsIntf) != 1 { + return errWrongNumberOfCredentials + } + + utxo, ok := utxosIntf[0].(*MintOutput) + if !ok { + return errWrongUTXOType + } + in, ok := insIntf[0].(*MintInput) + if !ok { + return errWrongInputType + } + cred, ok := credsIntf[0].(*Credential) + if !ok { + return errWrongCredentialType + } + newMint, ok := outsIntf[0].(*MintOutput) + if !ok { + return errWrongOutputType + } + newOutput, ok := outsIntf[1].(*TransferOutput) + if !ok { + return errWrongOutputType + } + + return fx.verifyOperation(tx, utxo, in, cred, newMint, newOutput) +} + +func (fx *Fx) verifyOperation(tx Tx, utxo *MintOutput, in *MintInput, cred *Credential, newMint *MintOutput, newOutput *TransferOutput) error { + if err := verify.All(utxo, in, cred, newMint, newOutput); err != nil { + return err + } + + if !utxo.Equals(&newMint.OutputOwners) { + return errWrongMintCreated + } + + return fx.verifyCredentials(tx, &utxo.OutputOwners, &in.Input, cred) +} + +// VerifyTransfer ... +func (fx *Fx) VerifyTransfer(txIntf, utxoIntf, inIntf, credIntf interface{}) error { + tx, ok := txIntf.(Tx) + if !ok { + return errWrongTxType + } + utxo, ok := utxoIntf.(*TransferOutput) + if !ok { + return errWrongUTXOType + } + in, ok := inIntf.(*TransferInput) + if !ok { + return errWrongInputType + } + cred, ok := credIntf.(*Credential) + if !ok { + return errWrongCredentialType + } + return fx.verifyTransfer(tx, utxo, in, cred) +} + +func (fx *Fx) verifyTransfer(tx Tx, utxo *TransferOutput, in *TransferInput, cred *Credential) error { + if err := verify.All(utxo, in, cred); err != nil { + return err + } + + clock := fx.vm.Clock() + switch { + case utxo.Amt != in.Amt: + return errWrongAmounts + case utxo.Locktime > clock.Unix(): + return errTimelocked + } + + return fx.verifyCredentials(tx, &utxo.OutputOwners, &in.Input, cred) +} + +func (fx *Fx) verifyCredentials(tx Tx, out *OutputOwners, in *Input, cred *Credential) error { + numSigs := len(in.SigIndices) + switch { + case out.Threshold < uint32(numSigs): + return errTooManySigners + case out.Threshold > uint32(numSigs): + return errTooFewSigners + case numSigs != len(cred.Sigs): + return errInputCredentialSignersMismatch + } + + txBytes := tx.UnsignedBytes() + txHash := hashing.ComputeHash256(txBytes) + + for i, index := range in.SigIndices { + sig := cred.Sigs[i] + + pk, err := fx.secpFactory.RecoverHashPublicKey(txHash, sig[:]) + if err != nil { + return err + } + + expectedAddress := out.Addrs[index] + if !expectedAddress.Equals(pk.Address()) { + return errWrongSigner + } + } + + return nil +} diff --git a/vms/secp256k1fx/fx_test.go b/vms/secp256k1fx/fx_test.go new file mode 100644 index 0000000..0d0e9d0 --- /dev/null +++ b/vms/secp256k1fx/fx_test.go @@ -0,0 +1,1215 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "testing" + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/vms/components/codec" +) + +var ( + txBytes = []byte{0, 1, 2, 3, 4, 5} + sigBytes = [crypto.SECP256K1RSigLen]byte{ + 0x0e, 0x33, 0x4e, 0xbc, 0x67, 0xa7, 0x3f, 0xe8, + 0x24, 0x33, 0xac, 0xa3, 0x47, 0x88, 0xa6, 0x3d, + 0x58, 0xe5, 0x8e, 0xf0, 0x3a, 0xd5, 0x84, 0xf1, + 0xbc, 0xa3, 0xb2, 0xd2, 0x5d, 0x51, 0xd6, 0x9b, + 0x0f, 0x28, 0x5d, 0xcd, 0x3f, 0x71, 0x17, 0x0a, + 0xf9, 0xbf, 0x2d, 0xb1, 0x10, 0x26, 0x5c, 0xe9, + 0xdc, 0xc3, 0x9d, 0x7a, 0x01, 0x50, 0x9d, 0xe8, + 0x35, 0xbd, 0xcb, 0x29, 0x3a, 0xd1, 0x49, 0x32, + 0x00, + } + addrBytes = [hashing.AddrLen]byte{ + 0x01, 0x5c, 0xce, 0x6c, 0x55, 0xd6, 0xb5, 0x09, + 0x84, 0x5c, 0x8c, 0x4e, 0x30, 0xbe, 0xd9, 0x8d, + 0x39, 0x1a, 0xe7, 0xf0, + } +) + +type testVM struct{ clock timer.Clock } + +func (vm *testVM) Codec() codec.Codec { return codec.NewDefault() } + +func (vm *testVM) Clock() *timer.Clock { return &vm.clock } + +type testCodec struct{} + +func (c *testCodec) RegisterStruct(interface{}) {} + +type testTx struct{ bytes []byte } + +func (tx *testTx) UnsignedBytes() []byte { return tx.bytes } + +func TestFxInitialize(t *testing.T) { + vm := testVM{} + fx := Fx{} + err := fx.Initialize(&vm) + if err != nil { + t.Fatal(err) + } +} + +func TestFxInitializeInvalid(t *testing.T) { + fx := Fx{} + err := fx.Initialize(nil) + if err == nil { + t.Fatalf("Should have returned an error") + } +} + +func TestFxVerifyTransfer(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + out := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + + err := fx.VerifyTransfer(tx, out, in, cred) + if err != nil { + t.Fatal(err) + } +} + +func TestFxVerifyTransferNilTx(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + out := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + + err := fx.VerifyTransfer(nil, out, in, cred) + if err == nil { + t.Fatalf("Should have failed verification due to a nil tx") + } +} + +func TestFxVerifyTransferNilOutput(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + + err := fx.VerifyTransfer(tx, nil, in, cred) + if err == nil { + t.Fatalf("Should have failed verification due to a nil output") + } +} + +func TestFxVerifyTransferNilInput(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + out := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + + err := fx.VerifyTransfer(tx, out, nil, cred) + if err == nil { + t.Fatalf("Should have failed verification due to a nil input") + } +} + +func TestFxVerifyTransferNilCredential(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + out := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0}, + }, + } + + err := fx.VerifyTransfer(tx, out, in, nil) + if err == nil { + t.Fatalf("Should have failed verification due to a nil credential") + } +} + +func TestFxVerifyTransferInvalidOutput(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + out := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 0, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + + err := fx.VerifyTransfer(tx, out, in, cred) + if err == nil { + t.Fatalf("Should have errored due to an invalid output") + } +} + +func TestFxVerifyTransferWrongAmounts(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + out := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &TransferInput{ + Amt: 2, + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + + err := fx.VerifyTransfer(tx, out, in, cred) + if err == nil { + t.Fatalf("Should have errored due to different amounts") + } +} + +func TestFxVerifyTransferTimelocked(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + out := &TransferOutput{ + Amt: 1, + Locktime: uint64(date.Add(time.Second).Unix()), + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + + err := fx.VerifyTransfer(tx, out, in, cred) + if err == nil { + t.Fatalf("Should have errored due to a timelocked output") + } +} + +func TestFxVerifyTransferTooManySigners(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + out := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0, 1}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + [crypto.SECP256K1RSigLen]byte{}, + }, + } + + err := fx.VerifyTransfer(tx, out, in, cred) + if err == nil { + t.Fatalf("Should have errored due to too many signers") + } +} + +func TestFxVerifyTransferTooFewSigners(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + out := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{}, + } + + err := fx.VerifyTransfer(tx, out, in, cred) + if err == nil { + t.Fatalf("Should have errored due to too few signers") + } +} + +func TestFxVerifyTransferMismatchedSigners(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + out := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + [crypto.SECP256K1RSigLen]byte{}, + }, + } + + err := fx.VerifyTransfer(tx, out, in, cred) + if err == nil { + t.Fatalf("Should have errored due to too mismatched signers") + } +} + +func TestFxVerifyTransferInvalidSignature(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + out := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + [crypto.SECP256K1RSigLen]byte{}, + }, + } + + err := fx.VerifyTransfer(tx, out, in, cred) + if err == nil { + t.Fatalf("Should have errored due to an invalid signature") + } +} + +func TestFxVerifyTransferWrongSigner(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + out := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + + err := fx.VerifyTransfer(tx, out, in, cred) + if err == nil { + t.Fatalf("Should have errored due to a wrong signer") + } +} + +func TestFxVerifyOperation(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + utxo := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &MintInput{ + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + mintOutput := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + transferOutput := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + ins := []interface{}{in} + creds := []interface{}{cred} + outs := []interface{}{mintOutput, transferOutput} + err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + if err != nil { + t.Fatal(err) + } +} + +func TestFxVerifyOperationUnknownTx(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + utxo := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &MintInput{ + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + mintOutput := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + transferOutput := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + ins := []interface{}{in} + creds := []interface{}{cred} + outs := []interface{}{mintOutput, transferOutput} + err := fx.VerifyOperation(nil, utxos, ins, creds, outs) + if err == nil { + t.Fatalf("Should have errored due to an invalid tx type") + } +} + +func TestFxVerifyOperationWrongNumberOfOutputs(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + utxo := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &MintInput{ + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + mintOutput := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + ins := []interface{}{in} + creds := []interface{}{cred} + outs := []interface{}{mintOutput} + err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + if err == nil { + t.Fatalf("Should have errored due to a wrong number of outputs") + } +} + +func TestFxVerifyOperationWrongNumberOfInputs(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + utxo := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + mintOutput := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + transferOutput := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + creds := []interface{}{cred} + outs := []interface{}{mintOutput, transferOutput} + err := fx.VerifyOperation(tx, utxos, nil, creds, outs) + if err == nil { + t.Fatalf("Should have errored due to a wrong number of inputs") + } +} + +func TestFxVerifyOperationWrongNumberOfCredentials(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + utxo := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &MintInput{ + Input: Input{ + SigIndices: []uint32{0}, + }, + } + mintOutput := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + transferOutput := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + ins := []interface{}{in} + outs := []interface{}{mintOutput, transferOutput} + err := fx.VerifyOperation(tx, utxos, ins, nil, outs) + if err == nil { + t.Fatalf("Should have errored due to a wrong number of credentials") + } +} + +func TestFxVerifyOperationWrongUTXOType(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + utxo := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &MintInput{ + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + mintOutput := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + transferOutput := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + ins := []interface{}{in} + creds := []interface{}{cred} + outs := []interface{}{mintOutput, transferOutput} + err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + if err == nil { + t.Fatalf("Should have errored due to a wrong utxo type") + } +} + +func TestFxVerifyOperationWrongInputType(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + utxo := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + mintOutput := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + transferOutput := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + ins := []interface{}{in} + creds := []interface{}{cred} + outs := []interface{}{mintOutput, transferOutput} + err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + if err == nil { + t.Fatalf("Should have errored due to a wrong input type") + } +} + +func TestFxVerifyOperationWrongCredentialType(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + utxo := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &MintInput{ + Input: Input{ + SigIndices: []uint32{0}, + }, + } + mintOutput := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + transferOutput := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + ins := []interface{}{in} + creds := []interface{}{nil} + outs := []interface{}{mintOutput, transferOutput} + err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + if err == nil { + t.Fatalf("Should have errored due to a wrong credential type") + } +} + +func TestFxVerifyOperationWrongMintType(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + utxo := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &MintInput{ + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + mintOutput := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + transferOutput := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + ins := []interface{}{in} + creds := []interface{}{cred} + outs := []interface{}{mintOutput, transferOutput} + err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + if err == nil { + t.Fatalf("Should have errored due to a wrong output type") + } +} + +func TestFxVerifyOperationWrongTransferType(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + utxo := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &MintInput{ + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + mintOutput := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + transferOutput := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + ins := []interface{}{in} + creds := []interface{}{cred} + outs := []interface{}{mintOutput, transferOutput} + err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + if err == nil { + t.Fatalf("Should have errored due to a wrong output type") + } +} + +func TestFxVerifyOperationInvalid(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + utxo := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &MintInput{ + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + mintOutput := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 0, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + transferOutput := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + ins := []interface{}{in} + creds := []interface{}{cred} + outs := []interface{}{mintOutput, transferOutput} + err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + if err == nil { + t.Fatalf("Should have errored due to an invalid output") + } +} + +func TestFxVerifyOperationMismatchedMintOutput(t *testing.T) { + vm := testVM{} + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.clock.Set(date) + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &testTx{ + bytes: txBytes, + } + utxo := &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + in := &MintInput{ + Input: Input{ + SigIndices: []uint32{0}, + }, + } + cred := &Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + } + mintOutput := &MintOutput{ + OutputOwners: OutputOwners{ + Addrs: []ids.ShortID{}, + }, + } + transferOutput := &TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + ins := []interface{}{in} + creds := []interface{}{cred} + outs := []interface{}{mintOutput, transferOutput} + err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + if err == nil { + t.Fatalf("Should have errored due to a mismatched mint output") + } +} diff --git a/vms/secp256k1fx/input.go b/vms/secp256k1fx/input.go new file mode 100644 index 0000000..0a6ce66 --- /dev/null +++ b/vms/secp256k1fx/input.go @@ -0,0 +1,32 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "errors" + + "github.com/ava-labs/gecko/utils" +) + +var ( + errNilInput = errors.New("nil input") + errNotSortedUnique = errors.New("signatures not sorted and unique") +) + +// Input ... +type Input struct { + SigIndices []uint32 `serialize:"true"` +} + +// Verify this input is syntactically valid +func (in *Input) Verify() error { + switch { + case in == nil: + return errNilInput + case !utils.IsSortedAndUniqueUint32(in.SigIndices): + return errNotSortedUnique + default: + return nil + } +} diff --git a/vms/secp256k1fx/input_test.go b/vms/secp256k1fx/input_test.go new file mode 100644 index 0000000..775f553 --- /dev/null +++ b/vms/secp256k1fx/input_test.go @@ -0,0 +1,15 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "testing" +) + +func TestInputVerifyNil(t *testing.T) { + in := (*Input)(nil) + if err := in.Verify(); err == nil { + t.Fatalf("Input.Verify should have returned an error due to an nil input") + } +} diff --git a/vms/secp256k1fx/keychain.go b/vms/secp256k1fx/keychain.go new file mode 100644 index 0000000..280e86b --- /dev/null +++ b/vms/secp256k1fx/keychain.go @@ -0,0 +1,132 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "errors" + "fmt" + "strings" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/vms/components/verify" +) + +var ( + errLockedFunds = errors.New("funds currently locked") + errCantSpend = errors.New("utxo couldn't be spent") +) + +// Keychain is a collection of keys that can be used to spend outputs +type Keychain struct { + factory *crypto.FactorySECP256K1R + addrToKeyIndex map[[20]byte]int + + // These can be used to iterate over. However, they should not be modified externally. + Addrs ids.ShortSet + Keys []*crypto.PrivateKeySECP256K1R +} + +// NewKeychain returns a new, empty, keychain +func NewKeychain() *Keychain { + return &Keychain{ + factory: &crypto.FactorySECP256K1R{}, + addrToKeyIndex: make(map[[20]byte]int), + } +} + +// Add a new key to the key chain +func (kc *Keychain) Add(key *crypto.PrivateKeySECP256K1R) { + addr := key.PublicKey().Address() + addrHash := addr.Key() + if _, ok := kc.addrToKeyIndex[addrHash]; !ok { + kc.addrToKeyIndex[addrHash] = len(kc.Keys) + kc.Keys = append(kc.Keys, key) + kc.Addrs.Add(addr) + } +} + +// Get a key from the keychain. If the key is unknown, the +func (kc Keychain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { + if i, ok := kc.addrToKeyIndex[id.Key()]; ok { + return kc.Keys[i], true + } + return &crypto.PrivateKeySECP256K1R{}, false +} + +// Addresses returns a list of addresses this keychain manages +func (kc Keychain) Addresses() ids.ShortSet { return kc.Addrs } + +// New returns a newly generated private key +func (kc *Keychain) New() (*crypto.PrivateKeySECP256K1R, error) { + skGen, err := kc.factory.NewPrivateKey() + if err != nil { + return nil, err + } + + sk := skGen.(*crypto.PrivateKeySECP256K1R) + kc.Add(sk) + return sk, nil +} + +// Spend attempts to create an input +func (kc *Keychain) Spend(out verify.Verifiable, time uint64) (verify.Verifiable, []*crypto.PrivateKeySECP256K1R, error) { + switch out := out.(type) { + case *MintOutput: + if sigIndices, keys, able := kc.Match(&out.OutputOwners); able { + return &MintInput{ + Input: Input{ + SigIndices: sigIndices, + }, + }, keys, nil + } + case *TransferOutput: + if time < out.Locktime { + return nil, nil, errLockedFunds + } + if sigIndices, keys, able := kc.Match(&out.OutputOwners); able { + return &TransferInput{ + Amt: out.Amt, + Input: Input{ + SigIndices: sigIndices, + }, + }, keys, nil + } + } + return nil, nil, errCantSpend +} + +// Match attempts to match a list of addresses up to the provided threshold +func (kc *Keychain) Match(owners *OutputOwners) ([]uint32, []*crypto.PrivateKeySECP256K1R, bool) { + sigs := []uint32{} + keys := []*crypto.PrivateKeySECP256K1R{} + for i := uint32(0); i < uint32(len(owners.Addrs)) && uint32(len(keys)) < owners.Threshold; i++ { + if key, exists := kc.Get(owners.Addrs[i]); exists { + sigs = append(sigs, i) + keys = append(keys, key) + } + } + return sigs, keys, uint32(len(keys)) == owners.Threshold +} + +// PrefixedString returns the key chain as a string representation with [prefix] +// added before every line. +func (kc *Keychain) PrefixedString(prefix string) string { + s := strings.Builder{} + + format := fmt.Sprintf("%%sKey[%s]: Key: %%s Address: %%s\n", + formatting.IntFormat(len(kc.Keys)-1)) + for i, key := range kc.Keys { + s.WriteString(fmt.Sprintf(format, + prefix, + i, + formatting.CB58{Bytes: key.Bytes()}, + key.PublicKey().Address())) + } + + return strings.TrimSuffix(s.String(), "\n") +} + +func (kc *Keychain) String() string { return kc.PrefixedString("") } diff --git a/vms/secp256k1fx/keychain_test.go b/vms/secp256k1fx/keychain_test.go new file mode 100644 index 0000000..861f08e --- /dev/null +++ b/vms/secp256k1fx/keychain_test.go @@ -0,0 +1,353 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" +) + +var ( + keys = []string{ + "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", + "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", + "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", + } + addrs = []string{ + "B6D4v1VtPYLbiUvYXtW4Px8oE9imC2vGW", + "P5wdRuZeaDt28eHMP5S3w9ZdoBfo7wuzF", + "Q4MzFZZDPHRPAHFeDs3NiyyaZDvxHKivf", + } +) + +func TestNewKeychain(t *testing.T) { + kc := NewKeychain() + if kc == nil { + t.Fatalf("NewKeychain returned a nil keychain") + } +} + +func TestKeychainGetUnknownAddr(t *testing.T) { + kc := NewKeychain() + + addr, _ := ids.ShortFromString(addrs[0]) + if _, exists := kc.Get(addr); exists { + t.Fatalf("Shouldn't have returned a key from an empty keychain") + } +} + +func TestKeychainAdd(t *testing.T) { + kc := NewKeychain() + + cb58 := formatting.CB58{} + if err := cb58.FromString(keys[0]); err != nil { + t.Fatal(err) + } + skBytes := cb58.Bytes + + skIntff, err := kc.factory.ToPrivateKey(skBytes) + if err != nil { + t.Fatal(err) + } + sk, ok := skIntff.(*crypto.PrivateKeySECP256K1R) + if !ok { + t.Fatalf("Factory should have returned secp256k1r private key") + } + + kc.Add(sk) + + addr, _ := ids.ShortFromString(addrs[0]) + if rsk, exists := kc.Get(addr); !exists { + t.Fatalf("Should have returned the key from the keychain") + } else if !bytes.Equal(rsk.Bytes(), sk.Bytes()) { + t.Fatalf("Returned wrong key from the keychain") + } + + if addrs := kc.Addresses(); addrs.Len() != 1 { + t.Fatalf("Should have returned one address from the keychain") + } else if !addrs.Contains(addr) { + t.Fatalf("Keychain contains the wrong address") + } +} + +func TestKeychainNew(t *testing.T) { + kc := NewKeychain() + + if addrs := kc.Addresses(); addrs.Len() != 0 { + t.Fatalf("Shouldn't have returned any addresses from the empty keychain") + } + + sk, err := kc.New() + if err != nil { + t.Fatal(err) + } + + addr := sk.PublicKey().Address() + + if addrs := kc.Addresses(); addrs.Len() != 1 { + t.Fatalf("Should have returned one address from the keychain") + } else if !addrs.Contains(addr) { + t.Fatalf("Keychain contains the wrong address") + } +} + +func TestKeychainMatch(t *testing.T) { + kc := NewKeychain() + + cb58 := formatting.CB58{} + sks := []*crypto.PrivateKeySECP256K1R{} + for _, keyStr := range keys { + if err := cb58.FromString(keyStr); err != nil { + t.Fatal(err) + } + skBytes := cb58.Bytes + + skIntf, err := kc.factory.ToPrivateKey(skBytes) + if err != nil { + t.Fatal(err) + } + sk, ok := skIntf.(*crypto.PrivateKeySECP256K1R) + if !ok { + t.Fatalf("Factory should have returned secp256k1r private key") + } + sks = append(sks, sk) + } + + kc.Add(sks[0]) + + owners := OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + sks[1].PublicKey().Address(), + sks[2].PublicKey().Address(), + }, + } + if err := owners.Verify(); err != nil { + t.Fatal(err) + } + + if _, _, ok := kc.Match(&owners); ok { + t.Fatalf("Shouldn't have been able to match with the owners") + } + + kc.Add(sks[1]) + + if indices, keys, ok := kc.Match(&owners); !ok { + t.Fatalf("Should have been able to match with the owners") + } else if numIndices := len(indices); numIndices != 1 { + t.Fatalf("Should have returned one index") + } else if numKeys := len(keys); numKeys != 1 { + t.Fatalf("Should have returned one key") + } else if index := indices[0]; index != 0 { + t.Fatalf("Should have returned index 0 for the key") + } else if key := keys[0]; !key.PublicKey().Address().Equals(sks[1].PublicKey().Address()) { + t.Fatalf("Returned wrong key") + } + + kc.Add(sks[2]) + + if indices, keys, ok := kc.Match(&owners); !ok { + t.Fatalf("Should have been able to match with the owners") + } else if numIndices := len(indices); numIndices != 1 { + t.Fatalf("Should have returned one index") + } else if numKeys := len(keys); numKeys != 1 { + t.Fatalf("Should have returned one key") + } else if index := indices[0]; index != 0 { + t.Fatalf("Should have returned index 0 for the key") + } else if key := keys[0]; !key.PublicKey().Address().Equals(sks[1].PublicKey().Address()) { + t.Fatalf("Returned wrong key") + } +} + +func TestKeychainSpendMint(t *testing.T) { + kc := NewKeychain() + + cb58 := formatting.CB58{} + sks := []*crypto.PrivateKeySECP256K1R{} + for _, keyStr := range keys { + if err := cb58.FromString(keyStr); err != nil { + t.Fatal(err) + } + skBytes := cb58.Bytes + + skIntf, err := kc.factory.ToPrivateKey(skBytes) + if err != nil { + t.Fatal(err) + } + sk, ok := skIntf.(*crypto.PrivateKeySECP256K1R) + if !ok { + t.Fatalf("Factory should have returned secp256k1r private key") + } + sks = append(sks, sk) + } + + mint := MintOutput{OutputOwners: OutputOwners{ + Threshold: 2, + Addrs: []ids.ShortID{ + sks[1].PublicKey().Address(), + sks[2].PublicKey().Address(), + }, + }} + if err := mint.Verify(); err != nil { + t.Fatal(err) + } + + if _, _, err := kc.Spend(&mint, 0); err == nil { + t.Fatalf("Shouldn't have been able to spend with no keys") + } + + kc.Add(sks[0]) + kc.Add(sks[1]) + kc.Add(sks[2]) + + if input, keys, err := kc.Spend(&mint, 0); err != nil { + t.Fatal(err) + } else if input, ok := input.(*MintInput); !ok { + t.Fatalf("Wrong input type returned") + } else if err := input.Verify(); err != nil { + t.Fatal(err) + } else if numSigs := len(input.SigIndices); numSigs != 2 { + t.Fatalf("Should have returned two signers") + } else if sig := input.SigIndices[0]; sig != 0 { + t.Fatalf("Should have returned index of secret key 1") + } else if sig := input.SigIndices[1]; sig != 1 { + t.Fatalf("Should have returned index of secret key 2") + } else if numKeys := len(keys); numKeys != 2 { + t.Fatalf("Should have returned two keys") + } else if key := keys[0]; !key.PublicKey().Address().Equals(sks[1].PublicKey().Address()) { + t.Fatalf("Returned wrong key") + } else if key := keys[1]; !key.PublicKey().Address().Equals(sks[2].PublicKey().Address()) { + t.Fatalf("Returned wrong key") + } +} + +func TestKeychainSpendTransfer(t *testing.T) { + kc := NewKeychain() + + cb58 := formatting.CB58{} + sks := []*crypto.PrivateKeySECP256K1R{} + for _, keyStr := range keys { + if err := cb58.FromString(keyStr); err != nil { + t.Fatal(err) + } + skBytes := cb58.Bytes + + skIntf, err := kc.factory.ToPrivateKey(skBytes) + if err != nil { + t.Fatal(err) + } + sk, ok := skIntf.(*crypto.PrivateKeySECP256K1R) + if !ok { + t.Fatalf("Factory should have returned secp256k1r private key") + } + sks = append(sks, sk) + } + + transfer := TransferOutput{ + Amt: 12345, + Locktime: 54321, + OutputOwners: OutputOwners{ + Threshold: 2, + Addrs: []ids.ShortID{ + sks[1].PublicKey().Address(), + sks[2].PublicKey().Address(), + }, + }, + } + if err := transfer.Verify(); err != nil { + t.Fatal(err) + } + + if _, _, err := kc.Spend(&transfer, 54321); err == nil { + t.Fatalf("Shouldn't have been able to spend with no keys") + } + + kc.Add(sks[0]) + kc.Add(sks[1]) + kc.Add(sks[2]) + + if _, _, err := kc.Spend(&transfer, 4321); err == nil { + t.Fatalf("Shouldn't have been able timelocked funds") + } + + if input, keys, err := kc.Spend(&transfer, 54321); err != nil { + t.Fatal(err) + } else if input, ok := input.(*TransferInput); !ok { + t.Fatalf("Wrong input type returned") + } else if err := input.Verify(); err != nil { + t.Fatal(err) + } else if amt := input.Amount(); amt != 12345 { + t.Fatalf("Wrong amount returned from input") + } else if numSigs := len(input.SigIndices); numSigs != 2 { + t.Fatalf("Should have returned two signers") + } else if sig := input.SigIndices[0]; sig != 0 { + t.Fatalf("Should have returned index of secret key 1") + } else if sig := input.SigIndices[1]; sig != 1 { + t.Fatalf("Should have returned index of secret key 2") + } else if numKeys := len(keys); numKeys != 2 { + t.Fatalf("Should have returned two keys") + } else if key := keys[0]; !key.PublicKey().Address().Equals(sks[1].PublicKey().Address()) { + t.Fatalf("Returned wrong key") + } else if key := keys[1]; !key.PublicKey().Address().Equals(sks[2].PublicKey().Address()) { + t.Fatalf("Returned wrong key") + } +} + +func TestKeychainString(t *testing.T) { + kc := NewKeychain() + + cb58 := formatting.CB58{} + if err := cb58.FromString(keys[0]); err != nil { + t.Fatal(err) + } + skBytes := cb58.Bytes + + skIntf, err := kc.factory.ToPrivateKey(skBytes) + if err != nil { + t.Fatal(err) + } + sk, ok := skIntf.(*crypto.PrivateKeySECP256K1R) + if !ok { + t.Fatalf("Factory should have returned secp256k1r private key") + } + + kc.Add(sk) + + expected := "Key[0]: Key: 2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY Address: B6D4v1VtPYLbiUvYXtW4Px8oE9imC2vGW" + + if result := kc.String(); result != expected { + t.Fatalf("Keychain.String returned:\n%s\nexpected:\n%s", result, expected) + } +} + +func TestKeychainPrefixedString(t *testing.T) { + kc := NewKeychain() + + cb58 := formatting.CB58{} + if err := cb58.FromString(keys[0]); err != nil { + t.Fatal(err) + } + skBytes := cb58.Bytes + + skIntf, err := kc.factory.ToPrivateKey(skBytes) + if err != nil { + t.Fatal(err) + } + sk, ok := skIntf.(*crypto.PrivateKeySECP256K1R) + if !ok { + t.Fatalf("Factory should have returned secp256k1r private key") + } + + kc.Add(sk) + + expected := "xDKey[0]: Key: 2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY Address: B6D4v1VtPYLbiUvYXtW4Px8oE9imC2vGW" + + if result := kc.PrefixedString("xD"); result != expected { + t.Fatalf(`Keychain.PrefixedString("xD") returned:\n%s\nexpected:\n%s`, result, expected) + } +} diff --git a/vms/secp256k1fx/mint_input.go b/vms/secp256k1fx/mint_input.go new file mode 100644 index 0000000..c16f38c --- /dev/null +++ b/vms/secp256k1fx/mint_input.go @@ -0,0 +1,9 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +// MintInput ... +type MintInput struct { + Input `serialize:"true"` +} diff --git a/vms/secp256k1fx/mint_output.go b/vms/secp256k1fx/mint_output.go new file mode 100644 index 0000000..4a3d62f --- /dev/null +++ b/vms/secp256k1fx/mint_output.go @@ -0,0 +1,19 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +// MintOutput ... +type MintOutput struct { + OutputOwners `serialize:"true"` +} + +// Verify ... +func (out *MintOutput) Verify() error { + switch { + case out == nil: + return errNilOutput + default: + return out.OutputOwners.Verify() + } +} diff --git a/vms/secp256k1fx/mint_output_test.go b/vms/secp256k1fx/mint_output_test.go new file mode 100644 index 0000000..219558e --- /dev/null +++ b/vms/secp256k1fx/mint_output_test.go @@ -0,0 +1,15 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "testing" +) + +func TestMintOutputVerifyNil(t *testing.T) { + out := (*MintOutput)(nil) + if err := out.Verify(); err == nil { + t.Fatalf("MintOutput.Verify should have returned an error due to an nil output") + } +} diff --git a/vms/secp256k1fx/output_owners.go b/vms/secp256k1fx/output_owners.go new file mode 100644 index 0000000..104a7a4 --- /dev/null +++ b/vms/secp256k1fx/output_owners.go @@ -0,0 +1,68 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" +) + +var ( + errNilOutput = errors.New("nil output") + errOutputUnspendable = errors.New("output is unspendable") + errOutputUnoptimized = errors.New("output representation should be optimized") + errAddrsNotSortedUnique = errors.New("addresses not sorted and unique") +) + +// OutputOwners ... +type OutputOwners struct { + Threshold uint32 `serialize:"true"` + Addrs []ids.ShortID `serialize:"true"` +} + +// Addresses returns the addresses that manage this output +func (out *OutputOwners) Addresses() [][]byte { + addrs := make([][]byte, len(out.Addrs)) + for i, addr := range out.Addrs { + addrs[i] = addr.Bytes() + } + return addrs +} + +// Equals returns true if the provided owners create the same condition +func (out *OutputOwners) Equals(other *OutputOwners) bool { + if out == other { + return true + } + if out == nil || other == nil || out.Threshold != other.Threshold || len(out.Addrs) != len(other.Addrs) { + return false + } + for i, addr := range out.Addrs { + otherAddr := other.Addrs[i] + if !addr.Equals(otherAddr) { + return false + } + } + return true +} + +// Verify ... +func (out *OutputOwners) Verify() error { + switch { + case out == nil: + return errNilOutput + case out.Threshold > uint32(len(out.Addrs)): + return errOutputUnspendable + case out.Threshold == 0 && len(out.Addrs) > 0: + return errOutputUnoptimized + case !ids.IsSortedAndUniqueShortIDs(out.Addrs): + return errAddrsNotSortedUnique + default: + return nil + } +} + +// Sort ... +func (out *OutputOwners) Sort() { ids.SortShortIDs(out.Addrs) } diff --git a/vms/secp256k1fx/output_owners_test.go b/vms/secp256k1fx/output_owners_test.go new file mode 100644 index 0000000..129f3ae --- /dev/null +++ b/vms/secp256k1fx/output_owners_test.go @@ -0,0 +1,60 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestMintOutputOwnersVerifyNil(t *testing.T) { + out := (*OutputOwners)(nil) + if err := out.Verify(); err == nil { + t.Fatalf("OutputOwners.Verify should have returned an error due to an nil output") + } +} + +func TestMintOutputOwnersExactEquals(t *testing.T) { + out0 := (*OutputOwners)(nil) + out1 := (*OutputOwners)(nil) + if !out0.Equals(out1) { + t.Fatalf("Outputs should have equaled") + } +} + +func TestMintOutputOwnersNotEqual(t *testing.T) { + out0 := &OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + } + out1 := &OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID([20]byte{1}), + }, + } + if out0.Equals(out1) { + t.Fatalf("Outputs should not have equaled") + } +} + +func TestMintOutputOwnersNotSorted(t *testing.T) { + out := &OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID([20]byte{1}), + ids.NewShortID([20]byte{0}), + }, + } + if err := out.Verify(); err == nil { + t.Fatalf("Verification should have failed due to unsorted addresses") + } + out.Sort() + if err := out.Verify(); err != nil { + t.Fatal(err) + } +} diff --git a/vms/secp256k1fx/transer_input_test.go b/vms/secp256k1fx/transer_input_test.go new file mode 100644 index 0000000..e954af0 --- /dev/null +++ b/vms/secp256k1fx/transer_input_test.go @@ -0,0 +1,117 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/vms/components/codec" +) + +func TestTransferInputAmount(t *testing.T) { + in := TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0, 1}, + }, + } + if amount := in.Amount(); amount != 1 { + t.Fatalf("Input.Amount returned the wrong amount. Result: %d ; Expected: %d", amount, 1) + } +} + +func TestTransferInputVerify(t *testing.T) { + in := TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0, 1}, + }, + } + err := in.Verify() + if err != nil { + t.Fatal(err) + } +} + +func TestTransferInputVerifyNil(t *testing.T) { + in := (*TransferInput)(nil) + err := in.Verify() + if err == nil { + t.Fatalf("Should have errored with a nil input") + } +} + +func TestTransferInputVerifyNoValue(t *testing.T) { + in := TransferInput{ + Amt: 0, + Input: Input{ + SigIndices: []uint32{0, 1}, + }, + } + err := in.Verify() + if err == nil { + t.Fatalf("Should have errored with a no value input") + } +} + +func TestTransferInputVerifyDuplicated(t *testing.T) { + in := TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{0, 0}, + }, + } + err := in.Verify() + if err == nil { + t.Fatalf("Should have errored with duplicated indices") + } +} + +func TestTransferInputVerifyUnsorted(t *testing.T) { + in := TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{1, 0}, + }, + } + err := in.Verify() + if err == nil { + t.Fatalf("Should have errored with unsorted indices") + } +} + +func TestTransferInputSerialize(t *testing.T) { + c := codec.NewDefault() + + expected := []byte{ + // amount: + 0x00, 0x00, 0x00, 0x00, 0x07, 0x5b, 0xcd, 0x15, + // length: + 0x00, 0x00, 0x00, 0x02, + // sig[0] + 0x00, 0x00, 0x00, 0x03, + // sig[1] + 0x00, 0x00, 0x00, 0x07, + } + in := TransferInput{ + Amt: 123456789, + Input: Input{ + SigIndices: []uint32{3, 7}, + }, + } + err := in.Verify() + if err != nil { + t.Fatal(err) + } + + result, err := c.Marshal(&in) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } +} diff --git a/vms/secp256k1fx/transfer_input.go b/vms/secp256k1fx/transfer_input.go new file mode 100644 index 0000000..5e44f76 --- /dev/null +++ b/vms/secp256k1fx/transfer_input.go @@ -0,0 +1,33 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "errors" +) + +var ( + errNoValueInput = errors.New("input has no value") +) + +// TransferInput ... +type TransferInput struct { + Amt uint64 `serialize:"true"` + Input `serialize:"true"` +} + +// Amount returns the quantity of the asset this input produces +func (in *TransferInput) Amount() uint64 { return in.Amt } + +// Verify this input is syntactically valid +func (in *TransferInput) Verify() error { + switch { + case in == nil: + return errNilInput + case in.Amt == 0: + return errNoValueInput + default: + return in.Input.Verify() + } +} diff --git a/vms/secp256k1fx/transfer_output.go b/vms/secp256k1fx/transfer_output.go new file mode 100644 index 0000000..69f2f20 --- /dev/null +++ b/vms/secp256k1fx/transfer_output.go @@ -0,0 +1,35 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "errors" +) + +var ( + errNoValueOutput = errors.New("output has no value") +) + +// TransferOutput ... +type TransferOutput struct { + Amt uint64 `serialize:"true"` + Locktime uint64 `serialize:"true"` + + OutputOwners `serialize:"true"` +} + +// Amount returns the quantity of the asset this output consumes +func (out *TransferOutput) Amount() uint64 { return out.Amt } + +// Verify ... +func (out *TransferOutput) Verify() error { + switch { + case out == nil: + return errNilOutput + case out.Amt == 0: + return errNoValueInput + default: + return out.OutputOwners.Verify() + } +} diff --git a/vms/secp256k1fx/transfer_output_test.go b/vms/secp256k1fx/transfer_output_test.go new file mode 100644 index 0000000..7e87875 --- /dev/null +++ b/vms/secp256k1fx/transfer_output_test.go @@ -0,0 +1,233 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/codec" +) + +func TestOutputAmount(t *testing.T) { + out := TransferOutput{ + Amt: 1, + Locktime: 1, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + } + if amount := out.Amount(); amount != 1 { + t.Fatalf("Output.Amount returned the wrong amount. Result: %d ; Expected: %d", amount, 1) + } +} + +func TestOutputVerify(t *testing.T) { + out := TransferOutput{ + Amt: 1, + Locktime: 1, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + } + err := out.Verify() + if err != nil { + t.Fatal(err) + } +} + +func TestOutputVerifyNil(t *testing.T) { + out := (*TransferOutput)(nil) + err := out.Verify() + if err == nil { + t.Fatalf("Should have errored with a nil output") + } +} + +func TestOutputVerifyNoValue(t *testing.T) { + out := TransferOutput{ + Amt: 0, + Locktime: 1, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + } + err := out.Verify() + if err == nil { + t.Fatalf("Should have errored with a no value output") + } +} + +func TestOutputVerifyUnspendable(t *testing.T) { + out := TransferOutput{ + Amt: 1, + Locktime: 1, + OutputOwners: OutputOwners{ + Threshold: 2, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + } + err := out.Verify() + if err == nil { + t.Fatalf("Should have errored with an unspendable output") + } +} + +func TestOutputVerifyUnoptimized(t *testing.T) { + out := TransferOutput{ + Amt: 1, + Locktime: 1, + OutputOwners: OutputOwners{ + Threshold: 0, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + } + err := out.Verify() + if err == nil { + t.Fatalf("Should have errored with an unoptimized output") + } +} + +func TestOutputVerifyUnsorted(t *testing.T) { + out := TransferOutput{ + Amt: 1, + Locktime: 1, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID([20]byte{1}), + ids.NewShortID([20]byte{0}), + }, + }, + } + err := out.Verify() + if err == nil { + t.Fatalf("Should have errored with an unsorted output") + } +} + +func TestOutputVerifyDuplicated(t *testing.T) { + out := TransferOutput{ + Amt: 1, + Locktime: 1, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + ids.ShortEmpty, + }, + }, + } + err := out.Verify() + if err == nil { + t.Fatalf("Should have errored with a duplicated output") + } +} + +func TestOutputSerialize(t *testing.T) { + c := codec.NewDefault() + + expected := []byte{ + // amount: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + // locktime: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, + // threshold: + 0x00, 0x00, 0x00, 0x01, + // number of addresses: + 0x00, 0x00, 0x00, 0x02, + // addrs[0]: + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, + // addrs[1]: + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + out := TransferOutput{ + Amt: 12345, + Locktime: 54321, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID([20]byte{ + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, + }), + ids.NewShortID([20]byte{ + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + }), + }, + }, + } + err := out.Verify() + if err != nil { + t.Fatal(err) + } + + result, err := c.Marshal(&out) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } +} + +func TestOutputAddresses(t *testing.T) { + out := TransferOutput{ + Amt: 12345, + Locktime: 54321, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID([20]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, + }), + ids.NewShortID([20]byte{ + 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + }), + }, + }, + } + err := out.Verify() + if err != nil { + t.Fatal(err) + } + + addrs := out.Addresses() + if len(addrs) != 2 { + t.Fatalf("Wrong number of addresses") + } + + if addr := addrs[0]; !bytes.Equal(addr, out.Addrs[0].Bytes()) { + t.Fatalf("Wrong address returned") + } + if addr := addrs[1]; !bytes.Equal(addr, out.Addrs[1].Bytes()) { + t.Fatalf("Wrong address returned") + } +} diff --git a/vms/secp256k1fx/tx.go b/vms/secp256k1fx/tx.go new file mode 100644 index 0000000..e2ac0f7 --- /dev/null +++ b/vms/secp256k1fx/tx.go @@ -0,0 +1,9 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +// Tx that this Fx is supporting +type Tx interface { + UnsignedBytes() []byte +} diff --git a/vms/secp256k1fx/vm.go b/vms/secp256k1fx/vm.go new file mode 100644 index 0000000..1083af7 --- /dev/null +++ b/vms/secp256k1fx/vm.go @@ -0,0 +1,15 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/vms/components/codec" +) + +// VM that this Fx must be run by +type VM interface { + Codec() codec.Codec + Clock() *timer.Clock +} diff --git a/vms/spchainvm/account.go b/vms/spchainvm/account.go new file mode 100644 index 0000000..4dc6c31 --- /dev/null +++ b/vms/spchainvm/account.go @@ -0,0 +1,155 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "errors" + "fmt" + "math" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/utils/crypto" +) + +var ( + errOutOfSpends = errors.New("ran out of spends") + errInsufficientFunds = errors.New("insufficient funds") + errOverflow = errors.New("math overflowed") + errInvalidID = errors.New("invalid ID") + errInvalidAddress = errors.New("invalid address") +) + +// Account represents the balance and nonce of a user's funds +type Account struct { + id ids.ShortID + nonce, balance uint64 +} + +// ID of this account +func (a Account) ID() ids.ShortID { return a.id } + +// Balance contained in this account +func (a Account) Balance() uint64 { return a.balance } + +// Nonce this account was last spent with +func (a Account) Nonce() uint64 { return a.nonce } + +// CreateTx creates a transaction from this account +// that sends [amount] to the address [destination] +func (a Account) CreateTx(amount uint64, destination ids.ShortID, ctx *snow.Context, key *crypto.PrivateKeySECP256K1R) (*Tx, Account, error) { + builder := Builder{ + NetworkID: ctx.NetworkID, + ChainID: ctx.ChainID, + } + // If nonce overflows, Send will return an error + tx, err := builder.NewTx(key, a.nonce+1, amount, destination) + if err != nil { + return nil, a, err + } + newAccount, err := a.Send(tx, ctx) + return tx, newAccount, err +} + +// Send generates a new account state from sending the transaction +func (a Account) Send(tx *Tx, ctx *snow.Context) (Account, error) { + return a.send(tx, ctx, &crypto.FactorySECP256K1R{}) +} + +// send generates the new account state from sending the transaction +func (a Account) send(tx *Tx, ctx *snow.Context, factory *crypto.FactorySECP256K1R) (Account, error) { + return Account{ + id: a.id, + // guaranteed not to overflow due to VerifySend + nonce: a.nonce + 1, + // guaranteed not to underflow due to VerifySend + balance: a.balance - tx.amount, + }, a.verifySend(tx, ctx, factory) +} + +// VerifySend returns if the provided transaction can send this transaction +func (a Account) VerifySend(tx *Tx, ctx *snow.Context) error { + return a.verifySend(tx, ctx, &crypto.FactorySECP256K1R{}) +} + +func (a Account) verifySend(tx *Tx, ctx *snow.Context, factory *crypto.FactorySECP256K1R) error { + // Verify the account is in a valid state and the transaction is valid + if err := a.Verify(); err != nil { + return err + } + if err := tx.verify(ctx, factory); err != nil { + return err + } + switch { + case a.nonce == math.MaxUint64: + // For this error to occur, a user would need to be issuing transactions + // at 10k tps for ~ 80 million years + return errOutOfSpends + case a.nonce+1 != tx.nonce: + return fmt.Errorf("wrong tx nonce used, %d != %d", a.nonce+1, tx.nonce) + case a.balance < tx.amount: + return fmt.Errorf("%s %d < %d", errInsufficientFunds, a.balance, tx.amount) + case a.nonce+1 == math.MaxUint64 && a.balance != tx.amount: + return errOutOfSpends + case !a.id.Equals(tx.key(ctx, factory).Address()): + return errInvalidAddress + default: + return nil + } +} + +// Receive generates a new account state from receiving the transaction +func (a Account) Receive(tx *Tx, ctx *snow.Context) (Account, error) { + return a.receive(tx, ctx, &crypto.FactorySECP256K1R{}) +} + +func (a Account) receive(tx *Tx, ctx *snow.Context, factory *crypto.FactorySECP256K1R) (Account, error) { + return Account{ + id: a.id, + nonce: a.nonce, + // guaranteed not to overflow due to VerifyReceive + balance: a.balance + tx.amount, + }, a.verifyReceive(tx, ctx, factory) +} + +// VerifyReceive returns if the provided transaction can receive this +// transaction +func (a Account) VerifyReceive(tx *Tx, ctx *snow.Context) error { + return a.verifyReceive(tx, ctx, &crypto.FactorySECP256K1R{}) +} + +func (a Account) verifyReceive(tx *Tx, ctx *snow.Context, factory *crypto.FactorySECP256K1R) error { + if err := a.Verify(); err != nil { + return err + } + if err := tx.verify(ctx, factory); err != nil { + return err + } + switch { + case a.nonce == math.MaxUint64: + // For this error to occur, a user would need to be issuing transactions + // at 10k tps for ~ 80 million years + return errOutOfSpends + case a.balance > math.MaxUint64-tx.amount: + return errOverflow + case !a.id.Equals(tx.to): + return errInvalidID + default: + return nil + } +} + +// Verify that this account is well formed +func (a Account) Verify() error { + switch { + case a.id.IsZero(): + return errInvalidID + default: + return nil + } +} + +func (a Account) String() string { + return fmt.Sprintf("Account[%s]: Balance=%d, Nonce=%d", a.ID(), a.Balance(), a.Nonce()) +} diff --git a/vms/spchainvm/account_test.go b/vms/spchainvm/account_test.go new file mode 100644 index 0000000..627498b --- /dev/null +++ b/vms/spchainvm/account_test.go @@ -0,0 +1,36 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +// TODO: Add package comment describing what spvm (simple payements vm means) + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestAccountSerialization(t *testing.T) { + chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + builder := Builder{ + NetworkID: 0, + ChainID: chainID, + } + account := builder.NewAccount(ids.ShortEmpty, 5, 25) + + codec := Codec{} + bytes, err := codec.MarshalAccount(account) + if err != nil { + t.Fatal(err) + } + + newAccount, err := codec.UnmarshalAccount(bytes) + if err != nil { + t.Fatal(err) + } + + if account.String() != newAccount.String() { + t.Fatalf("Expected %s got %s", account, newAccount) + } +} diff --git a/vms/spchainvm/block.go b/vms/spchainvm/block.go new file mode 100644 index 0000000..5db43b7 --- /dev/null +++ b/vms/spchainvm/block.go @@ -0,0 +1,67 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/utils/crypto" +) + +var ( + errInvalidNil = errors.New("nil is invalid") +) + +// Block is a group of transactions +type Block struct { + id ids.ID + + parentID ids.ID + txs []*Tx + + bytes []byte +} + +// ID of this operation +func (b *Block) ID() ids.ID { return b.id } + +// ParentID of this operation +func (b *Block) ParentID() ids.ID { return b.parentID } + +// Txs contained in the operation +func (b *Block) Txs() []*Tx { return b.txs } + +// Bytes of this transaction +func (b *Block) Bytes() []byte { return b.bytes } + +func (b *Block) startVerify(ctx *snow.Context, factory *crypto.FactorySECP256K1R) { + if b != nil { + for _, tx := range b.txs { + tx.startVerify(ctx, factory) + } + } +} + +func (b *Block) verify(ctx *snow.Context, factory *crypto.FactorySECP256K1R) error { + switch { + case b == nil: + return errInvalidNil + case b.id.IsZero(): + return errInvalidID + case b.parentID.IsZero(): + return errInvalidID + } + + b.startVerify(ctx, factory) + + for _, tx := range b.txs { + if err := tx.verify(ctx, factory); err != nil { + return err + } + } + + return nil +} diff --git a/vms/spchainvm/block_benchmark_test.go b/vms/spchainvm/block_benchmark_test.go new file mode 100644 index 0000000..c7657e8 --- /dev/null +++ b/vms/spchainvm/block_benchmark_test.go @@ -0,0 +1,63 @@ +package spchainvm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/utils/crypto" +) + +func genBlocks(numBlocks, numTxsPerBlock int, initialParent ids.ID, b *testing.B) []*Block { + ctx := snow.DefaultContextTest() + builder := Builder{ + NetworkID: ctx.NetworkID, + ChainID: ctx.ChainID, + } + + blocks := make([]*Block, numBlocks)[:0] + for j := 0; j < numBlocks; j++ { + txs := genTxs(numTxsPerBlock, uint64(j*numTxsPerBlock), b) + + blk, err := builder.NewBlock(initialParent, txs) + if err != nil { + b.Fatal(err) + } + blocks = append(blocks, blk) + initialParent = blk.ID() + } + return blocks +} + +func verifyBlocks(blocks []*Block, b *testing.B) { + ctx := snow.DefaultContextTest() + factory := crypto.FactorySECP256K1R{} + for _, blk := range blocks { + if err := blk.verify(ctx, &factory); err != nil { + b.Fatal(err) + } + + for _, tx := range blk.txs { + // reset the tx so that it won't be cached + tx.pubkey = nil + tx.startedVerification = false + tx.finishedVerification = false + tx.verificationErr = nil + } + } +} + +// BenchmarkBlockVerify runs the benchmark of verification of blocks +func BenchmarkBlockVerify(b *testing.B) { + blocks := genBlocks( + /*numBlocks=*/ 1, + /*numTxsPerBlock=*/ 1, + /*initialParent=*/ ids.Empty, + /*testing=*/ b, + ) + b.ResetTimer() + + for n := 0; n < b.N; n++ { + verifyBlocks(blocks, b) + } +} diff --git a/vms/spchainvm/builder.go b/vms/spchainvm/builder.go new file mode 100644 index 0000000..1fbf68c --- /dev/null +++ b/vms/spchainvm/builder.go @@ -0,0 +1,85 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" +) + +var ( + errNilChainID = errors.New("nil chain id") +) + +// Builder defines the functionality for building payment objects. +type Builder struct { + NetworkID uint32 + ChainID ids.ID +} + +// NewAccount creates a new Account +func (b Builder) NewAccount(id ids.ShortID, nonce, balance uint64) Account { + return Account{ + id: id, + nonce: nonce, + balance: balance, + } +} + +// NewBlock creates a new block +func (b Builder) NewBlock(parentID ids.ID, txs []*Tx) (*Block, error) { + block := &Block{ + parentID: parentID, + txs: txs, + } + + codec := Codec{} + bytes, err := codec.MarshalBlock(block) + if err != nil { + return nil, err + } + + block.bytes = bytes + block.id = ids.NewID(hashing.ComputeHash256Array(block.bytes)) + return block, nil +} + +// NewTx creates a new transaction from [key|nonce] for [amount] to [destination] +func (b Builder) NewTx(key *crypto.PrivateKeySECP256K1R, nonce, amount uint64, destination ids.ShortID) (*Tx, error) { + if b.ChainID.IsZero() { + return nil, errNilChainID + } + + tx := &Tx{ + networkID: b.NetworkID, + chainID: b.ChainID, + nonce: nonce, + amount: amount, + to: destination, + verification: make(chan error, 1), + } + + codec := Codec{} + unsignedBytes, err := codec.MarshalUnsignedTx(tx) + if err != nil { + return nil, err + } + sig, err := key.Sign(unsignedBytes) + if err != nil { + return nil, err + } + + tx.sig = sig + bytes, err := codec.MarshalTx(tx) + if err != nil { + return nil, err + } + + tx.bytes = bytes + tx.id = ids.NewID(hashing.ComputeHash256Array(tx.bytes)) + return tx, nil +} diff --git a/vms/spchainvm/codec.go b/vms/spchainvm/codec.go new file mode 100644 index 0000000..7768ddc --- /dev/null +++ b/vms/spchainvm/codec.go @@ -0,0 +1,393 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/wrappers" +) + +var ( + errBadCodec = errors.New("wrong or unknown codec used") + errExtraSpace = errors.New("trailing buffer space") + errNil = errors.New("nil value is invalid") +) + +// CodecID is an identifier for a codec +type CodecID uint32 + +// Codec types +const ( + NoID CodecID = iota + GenericID + CustomID + // TODO: Utilize a standard serialization library. Must have a canonical + // serialization format. +) + +// Verify that the codec is a known codec value. Returns nil if the codec is +// valid. +func (c CodecID) Verify() error { + switch c { + case NoID, GenericID, CustomID: + return nil + default: + return errBadCodec + } +} + +func (c CodecID) String() string { + switch c { + case NoID: + return "No Codec" + case GenericID: + return "Generic Codec" + case CustomID: + return "Custom Codec" + default: + return "Unknown Codec" + } +} + +// Codec is used to serialize and de-serialize transaction objects +type Codec struct{} + +/* + ****************************************************************************** + *********************************** Genesis ********************************** + ****************************************************************************** + */ + +/* Genesis: + * Accounts | ? Bytes + */ + +// MarshalGenesis returns the byte representation of the genesis +func (c *Codec) MarshalGenesis(accounts []Account) ([]byte, error) { + return c.MarshalAccounts(accounts) +} + +// UnmarshalGenesis attempts to parse the genesis +func (c *Codec) UnmarshalGenesis(b []byte) ([]Account, error) { + return c.UnmarshalAccounts(b) +} + +/* + ****************************************************************************** + ************************************ Block *********************************** + ****************************************************************************** + */ + +/* Block: + * Codec | 04 Bytes + * ParentID | 32 Bytes + * NumTxs | 04 bytes + * Repeated (NumTxs): + * Tx | ? bytes + */ + +const baseOpSize = 40 + +// MarshalBlock returns the byte representation of the block +func (c *Codec) MarshalBlock(block *Block) ([]byte, error) { + p := wrappers.Packer{Bytes: make([]byte, baseOpSize+signedTxSize*len(block.txs))} + + c.marshalBlock(block, &p) + + if p.Offset != len(p.Bytes) { + p.Add(errExtraSpace) + } + + return p.Bytes, p.Err +} + +func (c *Codec) marshalBlock(block *Block, p *wrappers.Packer) { + if block == nil { + p.Add(errNil) + return + } + + p.PackInt(uint32(CustomID)) + p.PackFixedBytes(block.parentID.Bytes()) + p.PackInt(uint32(len(block.txs))) + for _, tx := range block.txs { + if tx != nil { + p.PackFixedBytes(tx.Bytes()) + } else { + p.Add(errNil) + return + } + } +} + +// UnmarshalBlock attempts to parse an block from a byte array +func (c *Codec) UnmarshalBlock(b []byte) (*Block, error) { + p := wrappers.Packer{Bytes: b} + + block := c.unmarshalBlock(&p) + + if p.Offset != len(b) { + p.Add(errExtraSpace) + } + + return block, p.Err +} + +func (c *Codec) unmarshalBlock(p *wrappers.Packer) *Block { + start := p.Offset + + if codecID := CodecID(p.UnpackInt()); codecID != CustomID { + p.Add(errBadCodec) + } + + parentID, _ := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + + txs := []*Tx(nil) + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + txs = append(txs, c.unmarshalTx(p)) + } + + if p.Errored() { + return nil + } + + bytes := p.Bytes[start:p.Offset] + return &Block{ + id: ids.NewID(hashing.ComputeHash256Array(bytes)), + parentID: parentID, + txs: txs, + bytes: bytes, + } +} + +/* + ****************************************************************************** + ************************************* Tx ************************************* + ****************************************************************************** + */ + +/* Unsigned Tx: + * Codec | 04 Bytes + * Network ID | 04 bytes + * Chain ID | 32 bytes + * Nonce | 08 bytes + * Amount | 08 bytes + * Destination | 20 bytes + */ +const unsignedTxSize = 2*wrappers.IntLen + 2*wrappers.LongLen + hashing.AddrLen + hashing.HashLen + +/* Tx: + * Unsigned Tx | 76 bytes + * Signature | 65 bytes + */ +const signedTxSize = unsignedTxSize + crypto.SECP256K1RSigLen + +// MarshalUnsignedTx returns the byte representation of the unsigned tx +func (c *Codec) MarshalUnsignedTx(tx *Tx) ([]byte, error) { + p := wrappers.Packer{Bytes: make([]byte, unsignedTxSize)} + + c.marshalUnsignedTx(tx, &p) + + if p.Offset != len(p.Bytes) { + p.Add(errExtraSpace) + } + + return p.Bytes, p.Err +} + +// MarshalTx returns the byte representation of the tx +func (c *Codec) MarshalTx(tx *Tx) ([]byte, error) { + p := wrappers.Packer{Bytes: make([]byte, signedTxSize)} + + c.marshalTx(tx, &p) + + if p.Offset != len(p.Bytes) { + p.Add(errExtraSpace) + } + + return p.Bytes, p.Err +} + +func (c *Codec) marshalUnsignedTx(tx *Tx, p *wrappers.Packer) { + if tx == nil { + p.Add(errNil) + return + } + + p.PackInt(uint32(CustomID)) + p.PackInt(tx.networkID) + p.PackFixedBytes(tx.chainID.Bytes()) + p.PackLong(tx.nonce) + p.PackLong(tx.amount) + p.PackFixedBytes(tx.to.Bytes()) +} + +func (c *Codec) marshalTx(tx *Tx, p *wrappers.Packer) { + c.marshalUnsignedTx(tx, p) + + p.PackFixedBytes(tx.sig) +} + +// UnmarshalTx attempts to convert the stream of bytes into a representation +// of a tx +func (c *Codec) UnmarshalTx(b []byte) (*Tx, error) { + p := wrappers.Packer{Bytes: b} + + tx := c.unmarshalTx(&p) + + if p.Offset != len(b) { + p.Add(errExtraSpace) + } + + return tx, p.Err +} + +func (c *Codec) unmarshalTx(p *wrappers.Packer) *Tx { + start := p.Offset + + if codecID := CodecID(p.UnpackInt()); codecID != CustomID { + p.Add(errBadCodec) + } + + networkID := p.UnpackInt() + chainID, _ := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + nonce := p.UnpackLong() + amount := p.UnpackLong() + destination, _ := ids.ToShortID(p.UnpackFixedBytes(hashing.AddrLen)) + sig := p.UnpackFixedBytes(crypto.SECP256K1RSigLen) + + if p.Errored() { + return nil + } + + bytes := p.Bytes[start:p.Offset] + return &Tx{ + id: ids.NewID(hashing.ComputeHash256Array(bytes)), + networkID: networkID, + chainID: chainID, + nonce: nonce, + amount: amount, + to: destination, + sig: sig, + bytes: bytes, + verification: make(chan error, 1), + } +} + +/* + ****************************************************************************** + ********************************** Accounts ********************************** + ****************************************************************************** + */ + +/* Accounts: + * NumAccounts | 04 Bytes + * Repeated (NumAccounts): + * Account | 36 bytes + */ + +const baseAccountsSize = 4 + +// MarshalAccounts returns the byte representation of a list of accounts +func (c *Codec) MarshalAccounts(accounts []Account) ([]byte, error) { + p := wrappers.Packer{Bytes: make([]byte, baseAccountsSize+accountSize*len(accounts))} + + c.marshalAccounts(accounts, &p) + + if p.Offset != len(p.Bytes) { + p.Add(errExtraSpace) + } + + return p.Bytes, p.Err +} + +func (c *Codec) marshalAccounts(accounts []Account, p *wrappers.Packer) { + p.PackInt(uint32(len(accounts))) + for _, account := range accounts { + c.marshalAccount(account, p) + } +} + +// UnmarshalAccounts attempts to parse a list of accounts from a byte array +func (c *Codec) UnmarshalAccounts(b []byte) ([]Account, error) { + p := wrappers.Packer{Bytes: b} + + account := c.unmarshalAccounts(&p) + + if p.Offset != len(b) { + p.Add(errExtraSpace) + } + + return account, p.Err +} + +func (c *Codec) unmarshalAccounts(p *wrappers.Packer) []Account { + accounts := []Account(nil) + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + accounts = append(accounts, c.unmarshalAccount(p)) + } + return accounts +} + +/* + ****************************************************************************** + *********************************** Account ********************************** + ****************************************************************************** + */ + +/* Account: + * ID | 20 bytes + * Nonce | 08 bytes + * Balance | 08 bytes + */ + +const accountSize = 36 + +// MarshalAccount returns the byte representation of the account +func (c *Codec) MarshalAccount(account Account) ([]byte, error) { + p := wrappers.Packer{Bytes: make([]byte, accountSize)} + + c.marshalAccount(account, &p) + + if p.Offset != len(p.Bytes) { + p.Add(errExtraSpace) + } + + return p.Bytes, p.Err +} + +func (c *Codec) marshalAccount(account Account, p *wrappers.Packer) { + p.PackFixedBytes(account.id.Bytes()) + p.PackLong(account.nonce) + p.PackLong(account.balance) +} + +// UnmarshalAccount attempts to parse an account from a byte array +func (c *Codec) UnmarshalAccount(b []byte) (Account, error) { + p := wrappers.Packer{Bytes: b} + + account := c.unmarshalAccount(&p) + + if p.Offset != len(b) { + p.Add(errExtraSpace) + } + + return account, p.Err +} + +func (c *Codec) unmarshalAccount(p *wrappers.Packer) Account { + id, _ := ids.ToShortID(p.UnpackFixedBytes(hashing.AddrLen)) + nonce := p.UnpackLong() + balance := p.UnpackLong() + + return Account{ + id: id, + nonce: nonce, + balance: balance, + } +} diff --git a/vms/spchainvm/codec_benchmark_test.go b/vms/spchainvm/codec_benchmark_test.go new file mode 100644 index 0000000..88fe0b4 --- /dev/null +++ b/vms/spchainvm/codec_benchmark_test.go @@ -0,0 +1,33 @@ +package spchainvm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +// BenchmarkUnmarshalBlock runs the benchmark of block parsing +func BenchmarkUnmarshalBlock(b *testing.B) { + blocks := genBlocks( + /*numBlocks=*/ 1, + /*numTxsPerBlock=*/ 1, + /*initialParent=*/ ids.Empty, + /*testing=*/ b, + ) + + blockBytes := make([][]byte, len(blocks)) + for i, block := range blocks { + blockBytes[i] = block.Bytes() + } + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + c := Codec{} + for _, block := range blockBytes { + if _, err := c.UnmarshalBlock(block); err != nil { + b.Fatal(err) + } + } + } +} diff --git a/vms/spchainvm/consensus_benchmark_test.go b/vms/spchainvm/consensus_benchmark_test.go new file mode 100644 index 0000000..08c63ab --- /dev/null +++ b/vms/spchainvm/consensus_benchmark_test.go @@ -0,0 +1,281 @@ +package spchainvm + +import ( + "sync" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/database/prefixdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/engine/common/queue" + "github.com/ava-labs/gecko/snow/networking/handler" + "github.com/ava-labs/gecko/snow/networking/router" + "github.com/ava-labs/gecko/snow/networking/sender" + "github.com/ava-labs/gecko/snow/networking/timeout" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/logging" + + smcon "github.com/ava-labs/gecko/snow/consensus/snowman" + smeng "github.com/ava-labs/gecko/snow/engine/snowman" +) + +// ConsensusLeader runs the leader consensus benchmark for blocks +func ConsensusLeader(numBlocks, numTxsPerBlock int, b *testing.B) { + b.StopTimer() + b.ResetTimer() + + ctx := snow.DefaultContextTest() + genesisData, blocks := genGenesisState( + /*numBlocks=*/ numBlocks, + /*numTxsPerBlock=*/ numTxsPerBlock, + /*testing=*/ b, + ) + + maxBatchSize = numTxsPerBlock + for n := 0; n < b.N; n++ { + db := memdb.New() + vmDB := prefixdb.New([]byte("vm"), db) + bootstrappingDB := prefixdb.New([]byte("bootstrapping"), db) + + blocked, err := queue.New(bootstrappingDB) + if err != nil { + b.Fatal(err) + } + + // The channel through which a VM may send messages to the consensus engine + // VM uses this channel to notify engine that a block is ready to be made + msgChan := make(chan common.Message, 1000) + + vdrs := validators.NewSet() + vdrs.Add(validators.NewValidator(ctx.NodeID, 1)) + beacons := validators.NewSet() + + timeoutManager := timeout.Manager{} + timeoutManager.Initialize(2 * time.Second) + go timeoutManager.Dispatch() + + router := &router.ChainRouter{} + router.Initialize(logging.NoLog{}, &timeoutManager) + + // Initialize the VM + vm := &VM{} + ctx.Lock.Lock() + if err := vm.Initialize(ctx, vmDB, genesisData, msgChan, nil); err != nil { + b.Fatal(err) + } + + externalSender := &sender.ExternalSenderTest{B: b} + + // Passes messages from the consensus engine to the network + sender := sender.Sender{} + + sender.Initialize(ctx, externalSender, router, &timeoutManager) + + // The engine handles consensus + engine := smeng.Transitive{} + engine.Initialize(smeng.Config{ + BootstrapConfig: smeng.BootstrapConfig{ + Config: common.Config{ + Context: ctx, + Validators: vdrs, + Beacons: beacons, + Alpha: (beacons.Len() + 1) / 2, + Sender: &sender, + }, + Blocked: blocked, + VM: vm, + }, + Params: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 20, + BetaRogue: 20, + }, + Consensus: &smcon.Topological{}, + }) + + // Asynchronously passes messages from the network to the consensus engine + handler := &handler.Handler{} + handler.Initialize(&engine, msgChan, 1000) + + // Allow incoming messages to be routed to the new chain + router.AddChain(handler) + go ctx.Log.RecoverAndPanic(handler.Dispatch) + + engine.Startup() + ctx.Lock.Unlock() + + wg := sync.WaitGroup{} + wg.Add(numBlocks * numTxsPerBlock) + + b.StartTimer() + for _, block := range blocks { + for _, tx := range block.txs { + ctx.Lock.Lock() + if _, err := vm.IssueTx(tx.Bytes(), func(choices.Status) { + wg.Done() + }); err != nil { + ctx.Lock.Unlock() + b.Fatal(err) + } + ctx.Lock.Unlock() + } + } + wg.Wait() + b.StopTimer() + } +} + +// BenchmarkConsensusLeader1 runs the leader consensus benchmark for 1 block +func BenchmarkConsensusLeader1(b *testing.B) { + ConsensusLeader( + /*numBlocks=*/ 1, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) +} + +// BenchmarkConsensusLeader10 runs the leader consensus benchmark for 10 blocks +func BenchmarkConsensusLeader10(b *testing.B) { + ConsensusLeader( + /*numBlocks=*/ 10, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) +} + +// ConsensusFollower runs the follower consensus benchmark for blocks +func ConsensusFollower(numBlocks, numTxsPerBlock int, b *testing.B) { + b.StopTimer() + b.ResetTimer() + + ctx := snow.DefaultContextTest() + genesisData, blocks := genGenesisState( + /*numBlocks=*/ numBlocks, + /*numTxsPerBlock=*/ numTxsPerBlock, + /*testing=*/ b, + ) + + maxBatchSize = 1 + for n := 0; n < b.N; n++ { + db := memdb.New() + vmDB := prefixdb.New([]byte("vm"), db) + bootstrappingDB := prefixdb.New([]byte("bootstrapping"), db) + + blocked, err := queue.New(bootstrappingDB) + if err != nil { + b.Fatal(err) + } + + // The channel through which a VM may send messages to the consensus engine + // VM uses this channel to notify engine that a block is ready to be made + msgChan := make(chan common.Message, 1000) + + vdrs := validators.NewSet() + vdrs.Add(validators.NewValidator(ctx.NodeID, 1)) + beacons := validators.NewSet() + + timeoutManager := timeout.Manager{} + timeoutManager.Initialize(2 * time.Second) + go timeoutManager.Dispatch() + + router := &router.ChainRouter{} + router.Initialize(logging.NoLog{}, &timeoutManager) + + wg := sync.WaitGroup{} + wg.Add(numBlocks) + + // Initialize the VM + vm := &VM{ + onAccept: func(ids.ID) { wg.Done() }, + } + ctx.Lock.Lock() + if err := vm.Initialize(ctx, vmDB, genesisData, msgChan, nil); err != nil { + b.Fatal(err) + } + + externalSender := &sender.ExternalSenderTest{B: b} + + // Passes messages from the consensus engine to the network + sender := sender.Sender{} + + sender.Initialize(ctx, externalSender, router, &timeoutManager) + + // The engine handles consensus + engine := smeng.Transitive{} + engine.Initialize(smeng.Config{ + BootstrapConfig: smeng.BootstrapConfig{ + Config: common.Config{ + Context: ctx, + Validators: vdrs, + Beacons: beacons, + Alpha: (beacons.Len() + 1) / 2, + Sender: &sender, + }, + Blocked: blocked, + VM: vm, + }, + Params: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 20, + BetaRogue: 20, + }, + Consensus: &smcon.Topological{}, + }) + + // Asynchronously passes messages from the network to the consensus engine + handler := &handler.Handler{} + handler.Initialize(&engine, msgChan, 1000) + + // Allow incoming messages to be routed to the new chain + router.AddChain(handler) + go ctx.Log.RecoverAndPanic(handler.Dispatch) + + engine.Startup() + ctx.Lock.Unlock() + + b.StartTimer() + for _, block := range blocks { + router.Put(ctx.NodeID, ctx.ChainID, 0, block.ID(), block.Bytes()) + } + wg.Wait() + b.StopTimer() + } +} + +// BenchmarkConsensusFollower1 runs the follower consensus benchmark for 1 block +func BenchmarkConsensusFollower1(b *testing.B) { + ConsensusFollower( + /*numBlocks=*/ 1, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) +} + +// BenchmarkConsensusFollower10 runs the follower consensus benchmark for 10 blocks +func BenchmarkConsensusFollower10(b *testing.B) { + ConsensusFollower( + /*numBlocks=*/ 10, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) +} + +// BenchmarkConsensusFollower100 runs the follower consensus benchmark for 100 blocks +func BenchmarkConsensusFollower100(b *testing.B) { + ConsensusFollower( + /*numBlocks=*/ 100, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) +} diff --git a/vms/spchainvm/factory.go b/vms/spchainvm/factory.go new file mode 100644 index 0000000..6cb6fe2 --- /dev/null +++ b/vms/spchainvm/factory.go @@ -0,0 +1,19 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "github.com/ava-labs/gecko/ids" +) + +// ID this VM should be referenced by +var ( + ID = ids.NewID([32]byte{'s', 'p', 'c', 'h', 'a', 'i', 'n', 'v', 'm'}) +) + +// Factory ... +type Factory struct{} + +// New ... +func (f *Factory) New() interface{} { return &VM{} } diff --git a/vms/spchainvm/key_chain.go b/vms/spchainvm/key_chain.go new file mode 100644 index 0000000..03c1879 --- /dev/null +++ b/vms/spchainvm/key_chain.go @@ -0,0 +1,104 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "errors" + "fmt" + "strings" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" +) + +var ( + errUnknownAccount = errors.New("unknown account") +) + +// KeyChain is a collection of keys that can be used to spend utxos +type KeyChain struct { + networkID uint32 + chainID ids.ID + // This can be used to iterate over. However, it should not be modified externally. + keyMap map[[20]byte]int + Addrs ids.ShortSet + Keys []*crypto.PrivateKeySECP256K1R +} + +// NewKeyChain creates a new keychain for a chain +func NewKeyChain(networkID uint32, chainID ids.ID) *KeyChain { + return &KeyChain{ + chainID: chainID, + keyMap: make(map[[20]byte]int), + } +} + +// New returns a newly generated private key +func (kc *KeyChain) New() *crypto.PrivateKeySECP256K1R { + factory := &crypto.FactorySECP256K1R{} + + skGen, _ := factory.NewPrivateKey() + + sk := skGen.(*crypto.PrivateKeySECP256K1R) + kc.Add(sk) + return sk +} + +// Add a new key to the key chain +func (kc *KeyChain) Add(key *crypto.PrivateKeySECP256K1R) { + addr := key.PublicKey().Address() + addrHash := addr.Key() + if _, ok := kc.keyMap[addrHash]; !ok { + kc.keyMap[addrHash] = len(kc.Keys) + kc.Keys = append(kc.Keys, key) + kc.Addrs.Add(addr) + } +} + +// Get a key from the keychain. If the key is unknown, the +func (kc *KeyChain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { + if i, ok := kc.keyMap[id.Key()]; ok { + return kc.Keys[i], true + } + return &crypto.PrivateKeySECP256K1R{}, false +} + +// Addresses returns a list of addresses this keychain manages +func (kc *KeyChain) Addresses() ids.ShortSet { return kc.Addrs } + +// Spend attempts to create a new transaction +func (kc *KeyChain) Spend(account Account, amount uint64, destination ids.ShortID) (*Tx, Account, error) { + key, exists := kc.Get(account.ID()) + if !exists { + return nil, Account{}, errUnknownAccount + } + ctx := snow.DefaultContextTest() + ctx.NetworkID = kc.networkID + ctx.ChainID = kc.chainID + return account.CreateTx(amount, destination, ctx, key) +} + +// PrefixedString returns a string representation of this keychain with each +// line prepended with [prefix] +func (kc *KeyChain) PrefixedString(prefix string) string { + s := strings.Builder{} + + format := fmt.Sprintf("%%sKey[%s]: Key: %%s Address: %%s\n", + formatting.IntFormat(len(kc.Keys)-1)) + for i, key := range kc.Keys { + s.WriteString(fmt.Sprintf(format, + prefix, + i, + formatting.CB58{Bytes: key.Bytes()}, + key.PublicKey().Address())) + } + + return strings.TrimSuffix(s.String(), "\n") +} + +func (kc *KeyChain) String() string { + return kc.PrefixedString("") +} diff --git a/vms/spchainvm/live_block.go b/vms/spchainvm/live_block.go new file mode 100644 index 0000000..b0f0457 --- /dev/null +++ b/vms/spchainvm/live_block.go @@ -0,0 +1,247 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "errors" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/vms/components/missing" +) + +var ( + errRejected = errors.New("block is rejected") + errMissingBlock = errors.New("missing block") +) + +// LiveBlock implements snow/snowman.Block +type LiveBlock struct { + // The VM this block exists within + vm *VM + + verifiedBlock, verifiedState bool + validity error + + // This block's parent + parent *LiveBlock + + // This block's children + children []*LiveBlock + + // The status of this block + status choices.Status + + db *versiondb.Database + + // Contains the actual transactions + block *Block +} + +// ID returns the blkID +func (lb *LiveBlock) ID() ids.ID { return lb.block.id } + +// Accept is called when this block is finalized as accepted by consensus +func (lb *LiveBlock) Accept() { + bID := lb.ID() + lb.vm.ctx.Log.Debug("Accepted block %s", bID) + + lb.status = choices.Accepted + lb.vm.lastAccepted = bID + + if err := lb.db.Commit(); err != nil { + lb.vm.ctx.Log.Debug("Failed to accept block %s due to %s", bID, err) + return + } + + for _, child := range lb.children { + child.setBaseDatabase(lb.vm.baseDB) + } + + delete(lb.vm.currentBlocks, bID.Key()) + lb.parent = nil + lb.children = nil + + for _, tx := range lb.block.txs { + if tx.onDecide != nil { + tx.onDecide(choices.Accepted) + } + } + if lb.vm.onAccept != nil { + lb.vm.onAccept(bID) + } +} + +// Reject is called when this block is finalized as rejected by consensus +func (lb *LiveBlock) Reject() { + lb.vm.ctx.Log.Debug("Rejected block %s", lb.ID()) + + if err := lb.vm.state.SetStatus(lb.vm.baseDB, lb.ID(), choices.Rejected); err != nil { + lb.vm.ctx.Log.Debug("Failed to reject block %s due to %s", lb.ID(), err) + return + } + + lb.status = choices.Rejected + + delete(lb.vm.currentBlocks, lb.ID().Key()) + lb.parent = nil + lb.children = nil + + for _, tx := range lb.block.txs { + if tx.onDecide != nil { + tx.onDecide(choices.Rejected) + } + } +} + +// Status returns the current status of this block +func (lb *LiveBlock) Status() choices.Status { + if lb.status == choices.Unknown { + lb.status = choices.Processing + if status, err := lb.vm.state.Status(lb.vm.baseDB, lb.block.ID()); err == nil { + lb.status = status + } + } + return lb.status +} + +// Parent returns the parent of this block +func (lb *LiveBlock) Parent() snowman.Block { + parent := lb.parentBlock() + if parent != nil { + return parent + } + return &missing.Block{BlkID: lb.block.ParentID()} +} + +func (lb *LiveBlock) parentBlock() *LiveBlock { + // If [lb]'s parent field is already non-nil, return the value in that field + if lb.parent != nil { + return lb.parent + } + // Check to see if [lb]'s parent is in currentBlocks + if parent, exists := lb.vm.currentBlocks[lb.block.ParentID().Key()]; exists { + lb.parent = parent + return parent + } + // Check to see if [lb]'s parent is in the vm database + if parent, err := lb.vm.state.Block(lb.vm.baseDB, lb.block.ParentID()); err == nil { + return &LiveBlock{ + vm: lb.vm, + block: parent, + } + } + // Parent could not be found + return nil +} + +// Bytes returns the binary representation of this transaction +func (lb *LiveBlock) Bytes() []byte { return lb.block.Bytes() } + +// Verify the validity of this block +func (lb *LiveBlock) Verify() error { + switch status := lb.Status(); status { + case choices.Accepted: + return nil + case choices.Rejected: + return errRejected + default: + return lb.VerifyState() + } +} + +// VerifyBlock the validity of this block +func (lb *LiveBlock) VerifyBlock() error { + if lb.verifiedBlock { + return lb.validity + } + + lb.verifiedBlock = true + lb.validity = lb.block.verify(lb.vm.ctx, &lb.vm.factory) + return lb.validity +} + +// VerifyState the validity of this block +func (lb *LiveBlock) VerifyState() error { + if err := lb.VerifyBlock(); err != nil { + return err + } + + parent := lb.parentBlock() + if parent == nil { + return errMissingBlock + } + + if err := parent.Verify(); err != nil { + return err + } + + if lb.verifiedState { + return lb.validity + } + lb.verifiedState = true + + // The database if this block were to be accepted + lb.db = versiondb.New(parent.database()) + + // Go through each transaction in this block. + // Validate each and apply its state transitions to [lb.db]. + // Verify that taken together, these transactions are valid + // (e.g. they don't result in a negative account balance, etc.) + for _, tx := range lb.block.txs { + from := tx.key(lb.vm.ctx, &lb.vm.factory).Address() + fromAccount := lb.vm.GetAccount(lb.db, from) + newFromAccount, err := fromAccount.send(tx, lb.vm.ctx, &lb.vm.factory) + if err != nil { + lb.validity = err + break + } + + if err := lb.vm.state.SetAccount(lb.db, from.LongID(), newFromAccount); err != nil { + lb.validity = err + break + } + + to := tx.To() + toAccount := lb.vm.GetAccount(lb.db, to) + newToAccount, err := toAccount.receive(tx, lb.vm.ctx, &lb.vm.factory) + if err != nil { + lb.validity = err + break + } + + if err := lb.vm.state.SetAccount(lb.db, to.LongID(), newToAccount); err != nil { + lb.validity = err + break + } + } + + if err := lb.vm.state.SetStatus(lb.db, lb.ID(), choices.Accepted); err != nil { + lb.validity = err + } + + if err := lb.vm.state.SetLastAccepted(lb.db, lb.ID()); err != nil { + lb.validity = err + } + + // If this block is valid, add it as a child of its parent + // and add this block to currentBlocks + if lb.validity == nil { + parent.children = append(parent.children, lb) + lb.vm.currentBlocks[lb.block.ID().Key()] = lb + } + return lb.validity +} + +func (lb *LiveBlock) database() database.Database { + if lb.Status().Decided() { + return lb.vm.baseDB + } + return lb.db +} + +func (lb *LiveBlock) setBaseDatabase(db database.Database) { lb.db.SetDatabase(db) } diff --git a/vms/spchainvm/prefixed_state.go b/vms/spchainvm/prefixed_state.go new file mode 100644 index 0000000..7537b9a --- /dev/null +++ b/vms/spchainvm/prefixed_state.go @@ -0,0 +1,91 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" +) + +const ( + blockID uint64 = iota + accountID + statusID + lastAcceptedID + dbInitializedID +) + +var ( + lastAccepted = ids.Empty.Prefix(lastAcceptedID) + dbInitialized = ids.Empty.Prefix(dbInitializedID) +) + +// prefixedState wraps a state object. By prefixing the state, there will be no +// collisions between different types of objects that have the same hash. +type prefixedState struct { + state state + block, account, status cache.Cacher +} + +// Block attempts to load a block from storage. +func (s *prefixedState) Block(db database.Database, id ids.ID) (*Block, error) { + return s.state.Block(db, s.uniqueID(id, blockID, s.block)) +} + +// SetBlock saves a block to the database +func (s *prefixedState) SetBlock(db database.Database, id ids.ID, block *Block) error { + return s.state.SetBlock(db, s.uniqueID(id, blockID, s.block), block) +} + +// Account attempts to load an account from storage. +func (s *prefixedState) Account(db database.Database, id ids.ID) (Account, error) { + return s.state.Account(db, s.uniqueID(id, accountID, s.account)) +} + +// SetAccount saves an account to the database +func (s *prefixedState) SetAccount(db database.Database, id ids.ID, account Account) error { + return s.state.SetAccount(db, s.uniqueID(id, accountID, s.account), account) +} + +// Status returns the status of the provided block id from storage. +func (s *prefixedState) Status(db database.Database, id ids.ID) (choices.Status, error) { + return s.state.Status(db, s.uniqueID(id, statusID, s.status)) +} + +// SetStatus saves the provided status to storage. +func (s *prefixedState) SetStatus(db database.Database, id ids.ID, status choices.Status) error { + return s.state.SetStatus(db, s.uniqueID(id, statusID, s.status), status) +} + +// LastAccepted returns the last accepted blockID from storage. +func (s *prefixedState) LastAccepted(db database.Database) (ids.ID, error) { + return s.state.Alias(db, lastAccepted) +} + +// SetLastAccepted saves the last accepted blockID to storage. +func (s *prefixedState) SetLastAccepted(db database.Database, id ids.ID) error { + return s.state.SetAlias(db, lastAccepted, id) +} + +// DBInitialized returns the status of this database. If the database is +// uninitialized, the status will be unknown. +func (s *prefixedState) DBInitialized(db database.Database) (choices.Status, error) { + return s.state.Status(db, dbInitialized) +} + +// SetDBInitialized saves the provided status of the database. +func (s *prefixedState) SetDBInitialized(db database.Database, status choices.Status) error { + return s.state.SetStatus(db, dbInitialized, status) +} + +func (s *prefixedState) uniqueID(id ids.ID, prefix uint64, cacher cache.Cacher) ids.ID { + if cachedIDIntf, found := cacher.Get(id); found { + return cachedIDIntf.(ids.ID) + } + uID := id.Prefix(prefix) + cacher.Put(id, uID) + return uID +} diff --git a/vms/spchainvm/service.go b/vms/spchainvm/service.go new file mode 100644 index 0000000..479d532 --- /dev/null +++ b/vms/spchainvm/service.go @@ -0,0 +1,67 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "net/http" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/json" +) + +// Service defines the API exposed by the payments vm +type Service struct{ vm *VM } + +// IssueTxArgs are the arguments for IssueTx. +// [Tx] is the string representation of the transaction being issued +type IssueTxArgs struct { + Tx formatting.CB58 `json:"tx"` +} + +// IssueTxReply is the reply from IssueTx +// [TxID] is the ID of the issued transaction. +type IssueTxReply struct { + TxID ids.ID `json:"txID"` +} + +// IssueTx issues the transaction specified in [args] to this service +func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, reply *IssueTxReply) error { + service.vm.ctx.Log.Verbo("IssueTx called with args: %s", args.Tx) + + // Issue the tx + txID, err := service.vm.IssueTx(args.Tx.Bytes, nil) + if err != nil { + return err + } + + reply.TxID = txID + return nil +} + +// GetAccountArgs is the arguments for calling GetAccount +// [Address] is the string repr. of the address we want to know the nonce and balance of +type GetAccountArgs struct { + Address ids.ShortID `json:"address"` +} + +// GetAccountReply is the reply from calling GetAccount +// [nonce] is the nonce of the address specified in the arguments. +// [balance] is the balance of the address specified in the arguments. +type GetAccountReply struct { + Balance json.Uint64 `json:"balance"` + Nonce json.Uint64 `json:"nonce"` +} + +// GetAccount gets the nonce and balance of the account specified in [args] +func (service *Service) GetAccount(_ *http.Request, args *GetAccountArgs, reply *GetAccountReply) error { + if args.Address.IsZero() { + return errInvalidAddress + } + + account := service.vm.GetAccount(service.vm.baseDB, args.Address) + reply.Nonce = json.Uint64(account.nonce) + reply.Balance = json.Uint64(account.balance) + return nil +} diff --git a/vms/spchainvm/state.go b/vms/spchainvm/state.go new file mode 100644 index 0000000..4fe14d0 --- /dev/null +++ b/vms/spchainvm/state.go @@ -0,0 +1,117 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/wrappers" +) + +// state is a thin wrapper around a database to provide, caching, serialization, +// and de-serialization. +type state struct{} + +// Block attempts to load a block from storage. +func (s *state) Block(db database.Database, id ids.ID) (*Block, error) { + bytes, err := db.Get(id.Bytes()) + if err != nil { + return nil, err + } + + // The key was in the database + c := Codec{} + return c.UnmarshalBlock(bytes) +} + +// SetBlock saves the provided block to storage. +func (s *state) SetBlock(db database.Database, id ids.ID, block *Block) error { + if block == nil { + return db.Delete(id.Bytes()) + } + return db.Put(id.Bytes(), block.bytes) +} + +// Account attempts to load an account from storage. +func (s *state) Account(db database.Database, id ids.ID) (Account, error) { + bytes, err := db.Get(id.Bytes()) + if err != nil { + return Account{}, err + } + + // The key was in the database + codec := Codec{} + return codec.UnmarshalAccount(bytes) +} + +// SetAccount saves the provided account to storage. +func (s *state) SetAccount(db database.Database, id ids.ID, account Account) error { + codec := Codec{} + bytes, err := codec.MarshalAccount(account) + if err != nil { + return err + } + return db.Put(id.Bytes(), bytes) +} + +// Status returns a status from storage. +func (s *state) Status(db database.Database, id ids.ID) (choices.Status, error) { + bytes, err := db.Get(id.Bytes()) + if err != nil { + return choices.Unknown, err + } + + // The key was in the database + p := wrappers.Packer{Bytes: bytes} + status := choices.Status(p.UnpackInt()) + + if p.Offset != len(bytes) { + p.Add(errExtraSpace) + } + if p.Errored() { + return choices.Unknown, p.Err + } + + return status, nil +} + +// SetStatus saves a status in storage. +func (s *state) SetStatus(db database.Database, id ids.ID, status choices.Status) error { + if status == choices.Unknown { + return db.Delete(id.Bytes()) + } + + p := wrappers.Packer{Bytes: make([]byte, 4)} + + p.PackInt(uint32(status)) + + if p.Offset != len(p.Bytes) { + p.Add(errExtraSpace) + } + if p.Errored() { + return p.Err + } + + return db.Put(id.Bytes(), p.Bytes) +} + +// Alias returns an ID from storage. +func (s *state) Alias(db database.Database, id ids.ID) (ids.ID, error) { + bytes, err := db.Get(id.Bytes()) + if err != nil { + return ids.ID{}, err + } + + // The key was in the database + return ids.ToID(bytes) +} + +// SetAlias saves an id in storage. +func (s *state) SetAlias(db database.Database, id ids.ID, alias ids.ID) error { + if alias.IsZero() { + return db.Delete(id.Bytes()) + } + return db.Put(id.Bytes(), alias.Bytes()) +} diff --git a/vms/spchainvm/static_service.go b/vms/spchainvm/static_service.go new file mode 100644 index 0000000..095afa1 --- /dev/null +++ b/vms/spchainvm/static_service.go @@ -0,0 +1,56 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "errors" + "net/http" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/json" +) + +var ( + errAccountHasNoValue = errors.New("account has no value") +) + +// StaticService defines the static API exposed by the payments vm +type StaticService struct{} + +// APIAccount ... +type APIAccount struct { + Address ids.ShortID `json:"address"` + Balance json.Uint64 `json:"balance"` +} + +// BuildGenesisArgs are arguments for BuildGenesis +type BuildGenesisArgs struct { + Accounts []APIAccount `json:"accounts"` +} + +// BuildGenesisReply is the reply from BuildGenesis +type BuildGenesisReply struct { + Bytes formatting.CB58 `json:"bytes"` +} + +// BuildGenesis returns the UTXOs such that at least one address in [args.Addresses] is +// referenced in the UTXO. +func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { + b := Builder{} + + accounts := []Account(nil) + for _, account := range args.Accounts { + if account.Balance == 0 { + return errAccountHasNoValue + } + + accounts = append(accounts, b.NewAccount(account.Address, 0, uint64(account.Balance))) + } + + c := Codec{} + bytes, err := c.MarshalGenesis(accounts) + reply.Bytes.Bytes = bytes + return err +} diff --git a/vms/spchainvm/static_service_test.go b/vms/spchainvm/static_service_test.go new file mode 100644 index 0000000..1109160 --- /dev/null +++ b/vms/spchainvm/static_service_test.go @@ -0,0 +1,58 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestBuildGenesis(t *testing.T) { + expected := "111DngowbGtZTAwG9sRhy3EA1NeavNNa7AyDkAdo8N43M5ZYq3bJwmm9Ls" + + addr, _ := ids.ShortFromString("8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") + + account := APIAccount{ + Address: addr, + Balance: 123456789, + } + + args := BuildGenesisArgs{ + Accounts: []APIAccount{ + account, + }, + } + reply := BuildGenesisReply{} + + ss := StaticService{} + if err := ss.BuildGenesis(nil, &args, &reply); err != nil { + t.Fatal(err) + } + + if reply.Bytes.String() != expected { + t.Fatalf("StaticService.BuildGenesis:\nReturned: %s\nExpected: %s", reply.Bytes, expected) + } +} + +func TestBuildGenesisInvalidAmount(t *testing.T) { + addr, _ := ids.ShortFromString("8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") + + account := APIAccount{ + Address: addr, + Balance: 0, + } + + args := BuildGenesisArgs{ + Accounts: []APIAccount{ + account, + }, + } + reply := BuildGenesisReply{} + + ss := StaticService{} + if err := ss.BuildGenesis(nil, &args, &reply); err == nil { + t.Fatalf("Should have errored due to an invlaid amount") + } +} diff --git a/vms/spchainvm/tx.go b/vms/spchainvm/tx.go new file mode 100644 index 0000000..fb7347c --- /dev/null +++ b/vms/spchainvm/tx.go @@ -0,0 +1,143 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/crypto" +) + +var ( + errNilTx = errors.New("nil tx") + errTxHasNoValue = errors.New("tx has no value") + errWrongNetworkID = errors.New("tx has wrong network ID") + errWrongChainID = errors.New("tx has wrong chain ID") +) + +// Tx is a monetary transfer +type Tx struct { + // The ID of this transaction + id ids.ID + + networkID uint32 + + // The ID of the chain this transaction was issued on. + // Used to prevent replay attacks. Without this field, an attacker could + // re-issue a transaction sent on another chain running the same vm. + chainID ids.ID + + // The recipient of the transfered funds + to ids.ShortID + + // The nonce of the transaction + nonce uint64 + + // The amount to be transfered + amount uint64 + + // The signature on this transaction (namely, on [bytes]) + sig []byte + + // The public key that authorized this transaction + pubkey crypto.PublicKey + + // The byte representation of this transaction + bytes []byte + + // Called when this transaction is decided + onDecide func(choices.Status) + + startedVerification, finishedVerification bool + verificationErr error + verification chan error +} + +// ID of this transaction +func (tx *Tx) ID() ids.ID { return tx.id } + +// Nonce is the new nonce of the account this transaction is being sent from +func (tx *Tx) Nonce() uint64 { return tx.nonce } + +// Amount is the number of units to transfer to the recipient +func (tx *Tx) Amount() uint64 { return tx.amount } + +// To is the address this transaction is sending to +func (tx *Tx) To() ids.ShortID { return tx.to } + +// Bytes is the byte representation of this transaction +func (tx *Tx) Bytes() []byte { return tx.bytes } + +// Key returns the public key used to authorize this transaction +// This function also sets [tx]'s public key +func (tx *Tx) Key(ctx *snow.Context) crypto.PublicKey { return tx.key(ctx, &crypto.FactorySECP256K1R{}) } + +func (tx *Tx) key(ctx *snow.Context, factory *crypto.FactorySECP256K1R) crypto.PublicKey { + tx.verify(ctx, factory) // Sets the public key + return tx.pubkey +} + +// Verify that this transaction is well formed +func (tx *Tx) Verify(ctx *snow.Context) error { return tx.verify(ctx, &crypto.FactorySECP256K1R{}) } + +func (tx *Tx) verify(ctx *snow.Context, factory *crypto.FactorySECP256K1R) error { + // Check if tx has already been verified + if tx.finishedVerification { + return tx.verificationErr + } + + // past this point, we know verification has neither passed nor failed in the past + tx.startVerify(ctx, factory) + + // Wait for verification to complete + tx.verificationErr = <-tx.verification + tx.finishedVerification = true + return tx.verificationErr +} + +func (tx *Tx) startVerify(ctx *snow.Context, factory *crypto.FactorySECP256K1R) { + // See if verification has been started. + // If not, start verification + if !tx.startedVerification { + tx.startedVerification = true + go func(tx *Tx, ctx *snow.Context, factory *crypto.FactorySECP256K1R) { + tx.verification <- tx.syncVerify(ctx, factory) + }(tx, ctx, factory) + } +} + +func (tx *Tx) syncVerify(ctx *snow.Context, factory *crypto.FactorySECP256K1R) error { + switch { + case tx == nil: + return errNilTx + case tx.pubkey != nil: + return nil + case tx.amount == 0: + return errTxHasNoValue + case tx.networkID != ctx.NetworkID: + return errWrongNetworkID + case !tx.chainID.Equals(ctx.ChainID): + return errWrongChainID + } + + codec := Codec{} + // The byte repr. of this transaction, unsigned + unsignedBytes, err := codec.MarshalUnsignedTx(tx) + if err != nil { + return err + } + + // Using [unsignedBytes] and [tx.sig], derive the public key + // that authorized this transaction + key, err := factory.RecoverPublicKey(unsignedBytes, tx.sig) + if err != nil { + return err + } + + tx.pubkey = key + return nil +} diff --git a/vms/spchainvm/tx_benchmark_test.go b/vms/spchainvm/tx_benchmark_test.go new file mode 100644 index 0000000..f7ef638 --- /dev/null +++ b/vms/spchainvm/tx_benchmark_test.go @@ -0,0 +1,69 @@ +package spchainvm + +import ( + "testing" + + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/utils/crypto" +) + +func genTxs(numTxs int, offset uint64, b *testing.B) []*Tx { + ctx := snow.DefaultContextTest() + builder := Builder{ + NetworkID: ctx.NetworkID, + ChainID: ctx.ChainID, + } + factory := crypto.FactorySECP256K1R{} + + destKey, err := factory.NewPrivateKey() + if err != nil { + b.Fatal(err) + } + dest := destKey.PublicKey().Address() + + txs := make([]*Tx, numTxs)[:0] + for i := 1; i <= numTxs; i++ { + keyIntf, err := factory.NewPrivateKey() + if err != nil { + b.Fatal(err) + } + sk := keyIntf.(*crypto.PrivateKeySECP256K1R) + + tx, err := builder.NewTx(sk, uint64(i)+offset, uint64(i)+offset, dest) + if err != nil { + b.Fatal(err) + } + txs = append(txs, tx) + } + return txs +} + +func verifyTxs(txs []*Tx, b *testing.B) { + ctx := snow.DefaultContextTest() + factory := crypto.FactorySECP256K1R{} + for _, tx := range txs { + if err := tx.verify(ctx, &factory); err != nil { + b.Fatal(err) + } + + // reset the tx so that it won't be cached + tx.pubkey = nil + tx.startedVerification = false + tx.finishedVerification = false + tx.verificationErr = nil + } +} + +// BenchmarkTxVerify runs the benchmark of transaction verification +func BenchmarkTxVerify(b *testing.B) { + txs := genTxs( + /*numTxs=*/ 1, + /*initialOffset=*/ 0, + /*testing=*/ b, + ) + b.ResetTimer() + + for n := 0; n < b.N; n++ { + verifyTxs(txs, b) + } +} diff --git a/vms/spchainvm/tx_test.go b/vms/spchainvm/tx_test.go new file mode 100644 index 0000000..108b35b --- /dev/null +++ b/vms/spchainvm/tx_test.go @@ -0,0 +1,52 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" +) + +// Ensure transaction verification fails when the transaction has the wrong +func TestVerifyTxWrongChainID(t *testing.T) { + chainID := ids.NewID([32]byte{1, 2, 3, 4, 5}) + // Create a tx with chainID [chainID] + builder := Builder{ + NetworkID: 0, + ChainID: chainID, + } + tx, err := builder.NewTx(keys[0], 0, 1, keys[1].PublicKey().Address()) + if err != nil { + t.Fatal(err) + } + + ctx := snow.DefaultContextTest() + + // Ensure that it fails verification when we try to verify it using + // a different chain ID + if err := tx.Verify(ctx); err != errWrongChainID { + t.Fatalf("Should have failed with errWrongChainID") + } +} + +// Ensure transaction verification fails when the transaction has the wrong +func TestVerifyTxCorrectChainID(t *testing.T) { + ctx := snow.DefaultContextTest() + // Create a tx with chainID [chainID] + builder := Builder{ + NetworkID: 0, + ChainID: ctx.ChainID, + } + tx, err := builder.NewTx(keys[0], 0, 1, keys[1].PublicKey().Address()) + if err != nil { + t.Fatal(err) + } + + // Ensure it passes verification when we use correct chain ID + if err := tx.Verify(ctx); err != nil { + t.Fatalf("Should have passed verification") + } +} diff --git a/vms/spchainvm/vm.go b/vms/spchainvm/vm.go new file mode 100644 index 0000000..6b71ce6 --- /dev/null +++ b/vms/spchainvm/vm.go @@ -0,0 +1,308 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "errors" + "time" + + "github.com/gorilla/rpc/v2" + + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/utils/wrappers" + + jsoncodec "github.com/ava-labs/gecko/utils/json" +) + +const ( + batchTimeout = time.Second + idCacheSize = 10000 + sigCache = 10000 +) + +var ( + maxBatchSize = 30 +) + +var ( + errNoTxs = errors.New("no transactions") + errUnknownBlock = errors.New("unknown block") + errUnsupportedFXs = errors.New("unsupported feature extensions") +) + +// VM implements the snowman.ChainVM interface +type VM struct { + ctx *snow.Context + + // State management + state *prefixedState + baseDB database.Database + + factory crypto.FactorySECP256K1R + + // The ID of the last accepted block + lastAccepted ids.ID + + // Transaction issuing + preferred ids.ID + toEngine chan<- common.Message + timer *timer.Timer + txs []*Tx + + currentBlocks map[[32]byte]*LiveBlock + + onAccept func(ids.ID) +} + +/* + ****************************************************************************** + ********************************* Snowman API ******************************** + ****************************************************************************** + */ + +// Initialize implements the snowman.ChainVM interface +func (vm *VM) Initialize( + ctx *snow.Context, + db database.Database, + genesisBytes []byte, + toEngine chan<- common.Message, + fxs []*common.Fx, +) error { + if len(fxs) != 0 { + return errUnsupportedFXs + } + vm.ctx = ctx + vm.state = &prefixedState{ + block: &cache.LRU{Size: idCacheSize}, + account: &cache.LRU{Size: idCacheSize}, + status: &cache.LRU{Size: idCacheSize}, + } + vm.baseDB = db + vm.factory.Cache.Size = sigCache + + if dbStatus, err := vm.state.DBInitialized(db); err != nil || dbStatus == choices.Unknown { + if err := vm.initState(genesisBytes); err != nil { + return err + } + } + + vm.timer = timer.NewTimer(func() { + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm.FlushTxs() + }) + go ctx.Log.RecoverAndPanic(vm.timer.Dispatch) + vm.toEngine = toEngine + + lastAccepted, err := vm.state.LastAccepted(db) + if err != nil { + return err + } + vm.lastAccepted = lastAccepted + + vm.currentBlocks = make(map[[32]byte]*LiveBlock) + return nil +} + +// Shutdown implements the snowman.ChainVM interface +func (vm *VM) Shutdown() { + vm.timer.Stop() + if err := vm.baseDB.Close(); err != nil { + vm.ctx.Log.Error("Closing the database failed with %s", err) + } +} + +// BuildBlock implements the snowman.ChainVM interface +func (vm *VM) BuildBlock() (snowman.Block, error) { + vm.timer.Cancel() + + if len(vm.txs) == 0 { + return nil, errNoTxs + } + + defer vm.FlushTxs() + + txs := vm.txs + if len(txs) > maxBatchSize { + txs = txs[:maxBatchSize] + } + vm.txs = vm.txs[len(txs):] + + builder := Builder{ + NetworkID: 0, + ChainID: vm.ctx.ChainID, + } + rawBlock, err := builder.NewBlock(vm.preferred, txs) + if err != nil { + vm.ctx.Log.Warn("Dropping transactions due to %s", err) + return nil, err + } + rawBlock.startVerify(vm.ctx, &vm.factory) + block := &LiveBlock{ + vm: vm, + block: rawBlock, + } + return block, vm.state.SetBlock(vm.baseDB, rawBlock.ID(), rawBlock) +} + +// ParseBlock implements the snowman.ChainVM interface +func (vm *VM) ParseBlock(b []byte) (snowman.Block, error) { + c := Codec{} + rawBlock, err := c.UnmarshalBlock(b) + if err != nil { + return nil, err + } + rawBlock.startVerify(vm.ctx, &vm.factory) + block := &LiveBlock{ + vm: vm, + block: rawBlock, + } + return block, vm.state.SetBlock(vm.baseDB, rawBlock.ID(), rawBlock) +} + +// GetBlock implements the snowman.ChainVM interface +func (vm *VM) GetBlock(id ids.ID) (snowman.Block, error) { + blk, err := vm.state.Block(vm.baseDB, id) + if err != nil { + return nil, err + } + blk.startVerify(vm.ctx, &vm.factory) + return &LiveBlock{ + vm: vm, + block: blk, + }, nil +} + +// SetPreference sets what the current tail of the chain is +func (vm *VM) SetPreference(preferred ids.ID) { vm.preferred = preferred } + +// LastAccepted returns the last accepted block ID +func (vm *VM) LastAccepted() ids.ID { return vm.lastAccepted } + +// CreateHandlers makes new service objects with references to the vm +func (vm *VM) CreateHandlers() map[string]*common.HTTPHandler { + newServer := rpc.NewServer() + codec := jsoncodec.NewCodec() + newServer.RegisterCodec(codec, "application/json") + newServer.RegisterCodec(codec, "application/json;charset=UTF-8") + newServer.RegisterService(&Service{vm: vm}, "spchain") // Name the API service "spchain" + return map[string]*common.HTTPHandler{ + "": &common.HTTPHandler{LockOptions: common.WriteLock, Handler: newServer}, + } +} + +// CreateStaticHandlers makes new service objects without references to the vm +func (vm *VM) CreateStaticHandlers() map[string]*common.HTTPHandler { + newServer := rpc.NewServer() + codec := jsoncodec.NewCodec() + newServer.RegisterCodec(codec, "application/json") + newServer.RegisterCodec(codec, "application/json;charset=UTF-8") + newServer.RegisterService(&StaticService{}, "spchain") // Name the API service "spchain" + return map[string]*common.HTTPHandler{ + "": &common.HTTPHandler{LockOptions: common.NoLock, Handler: newServer}, + } +} + +// IssueTx ... +// TODO: Remove this +func (vm *VM) IssueTx(b []byte, onDecide func(choices.Status)) (ids.ID, error) { + codec := Codec{} + tx, err := codec.UnmarshalTx(b) + if err != nil { + return ids.ID{}, err + } + tx.startVerify(vm.ctx, &vm.factory) + tx.onDecide = onDecide + vm.issueTx(tx) + return tx.id, nil +} + +// GetAccount returns the account with the specified address +func (vm *VM) GetAccount(db database.Database, address ids.ShortID) Account { + if account, err := vm.state.Account(db, address.LongID()); err == nil { + return account + } + builder := Builder{ + NetworkID: 0, + ChainID: vm.ctx.ChainID, + } + return builder.NewAccount(address, 0, 0) +} + +/* + ****************************************************************************** + ********************************** Timer API ********************************* + ****************************************************************************** + */ + +// FlushTxs into consensus +func (vm *VM) FlushTxs() { + vm.timer.Cancel() + if len(vm.txs) > 0 { + select { + case vm.toEngine <- common.PendingTxs: + default: + vm.ctx.Log.Warn("Dropping block due to too frequent issuance") + vm.timer.SetTimeoutIn(batchTimeout) + } + } +} + +/* + ****************************************************************************** + ******************************* Implementation ******************************* + ****************************************************************************** + */ + +// Consensus: + +func (vm *VM) initState(genesisBytes []byte) error { + errs := wrappers.Errs{} + + vdb := versiondb.New(vm.baseDB) + + b := Builder{} + block, err := b.NewBlock(ids.Empty, nil) + errs.Add(err) + + c := Codec{} + accounts, err := c.UnmarshalGenesis(genesisBytes) + errs.Add(err) + errs.Add(vm.state.SetBlock(vdb, block.ID(), block)) + errs.Add(vm.state.SetStatus(vdb, block.ID(), choices.Accepted)) + errs.Add(vm.state.SetLastAccepted(vdb, block.ID())) + for _, account := range accounts { + errs.Add(vm.state.SetAccount(vdb, account.ID().LongID(), account)) + } + errs.Add(vm.state.SetDBInitialized(vdb, choices.Processing)) + + if errs.Errored() { + return errs.Err + } + + return vdb.Commit() +} + +func (vm *VM) issueTx(tx *Tx) { + vm.ctx.Log.Verbo("Issuing tx:\n%s", formatting.DumpBytes{Bytes: tx.Bytes()}) + + vm.txs = append(vm.txs, tx) + switch { + case len(vm.txs) == maxBatchSize: + vm.FlushTxs() + case len(vm.txs) == 1: + vm.timer.SetTimeoutIn(batchTimeout) + } +} diff --git a/vms/spchainvm/vm_benchmark_test.go b/vms/spchainvm/vm_benchmark_test.go new file mode 100644 index 0000000..ac4ca13 --- /dev/null +++ b/vms/spchainvm/vm_benchmark_test.go @@ -0,0 +1,342 @@ +package spchainvm + +import ( + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" +) + +func genGenesisState(numBlocks, numTxsPerBlock int, b *testing.B) ([]byte, []*Block) { + ctx := snow.DefaultContextTest() + builder := Builder{ + NetworkID: ctx.NetworkID, + ChainID: ctx.ChainID, + } + + genesisBlock, err := builder.NewBlock(ids.Empty, nil) + if err != nil { + b.Fatal(err) + } + + blocks := genBlocks( + /*numBlocks=*/ numBlocks, + /*numTxsPerBlock=*/ numTxsPerBlock, + /*initialParent=*/ genesisBlock.ID(), + /*testing=*/ b, + ) + + genesisAccounts := []Account{} + for _, block := range blocks { + for _, tx := range block.txs { + genesisAccounts = append( + genesisAccounts, + Account{ + id: tx.Key(ctx).Address(), + nonce: tx.nonce - 1, + balance: tx.amount, + }, + ) + } + } + + codec := Codec{} + genesisData, err := codec.MarshalGenesis(genesisAccounts) + if err != nil { + b.Fatal(err) + } + + return genesisData, blocks +} + +func genGenesisStateBytes(numBlocks, numTxsPerBlock int, b *testing.B) ([]byte, [][]byte) { + genesisData, blocks := genGenesisState(numBlocks, numTxsPerBlock, b) + blockBytes := make([][]byte, numBlocks) + for i, block := range blocks { + blockBytes[i] = block.Bytes() + } + return genesisData, blockBytes +} + +// BenchmarkParseBlock runs the benchmark of parsing blocks +func BenchmarkParseBlock(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + ctx := snow.DefaultContextTest() + genesisBytes, blocks := genGenesisStateBytes( + /*numBlocks=*/ 1, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) + vm := &VM{} + vm.Initialize( + /*ctx=*/ ctx, + /*db=*/ memdb.New(), + /*genesis=*/ genesisBytes, + /*engineChan=*/ make(chan common.Message, 1), + /*fxs=*/ nil, + ) + for n := 0; n < b.N; n++ { + for _, blockBytes := range blocks { + vm.state.block.Flush() + + b.StartTimer() + if _, err := vm.ParseBlock(blockBytes); err != nil { + b.Fatal(err) + } + b.StopTimer() + } + } +} + +// BenchmarkParseAndVerify runs the benchmark of parsing blocks and verifying them +func BenchmarkParseAndVerify(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + genesisBytes, blocks := genGenesisStateBytes( + /*numBlocks=*/ 1, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) + + for n := 0; n < b.N; n++ { + vm := &VM{} + vm.Initialize( + /*ctx=*/ snow.DefaultContextTest(), + /*db=*/ memdb.New(), + /*genesis=*/ genesisBytes, + /*engineChan=*/ make(chan common.Message, 1), + /*fxs=*/ nil, + ) + + b.StartTimer() + for _, blockBytes := range blocks { + blk, err := vm.ParseBlock(blockBytes) + if err != nil { + b.Fatal(err) + } + if err := blk.Verify(); err != nil { + b.Fatal(err) + } + } + b.StopTimer() + } +} + +// BenchmarkAccept runs the benchmark of accepting blocks +func BenchmarkAccept(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + genesisBytes, blocks := genGenesisStateBytes( + /*numBlocks=*/ 1, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) + + for n := 0; n < b.N; n++ { + vm := &VM{} + vm.Initialize( + /*ctx=*/ snow.DefaultContextTest(), + /*db=*/ memdb.New(), + /*genesis=*/ genesisBytes, + /*engineChan=*/ make(chan common.Message, 1), + /*fxs=*/ nil, + ) + + for _, blockBytes := range blocks { + blk, err := vm.ParseBlock(blockBytes) + if err != nil { + b.Fatal(err) + } + if err := blk.Verify(); err != nil { + b.Fatal(err) + } + + b.StartTimer() + blk.Accept() + b.StopTimer() + } + } +} + +// ParseAndVerifyAndAccept runs the benchmark of parsing, verifying, and accepting blocks +func ParseAndVerifyAndAccept(numBlocks, numTxsPerBlock int, b *testing.B) { + b.StopTimer() + b.ResetTimer() + + genesisBytes, blocks := genGenesisStateBytes( + /*numBlocks=*/ numBlocks, + /*numTxsPerBlock=*/ numTxsPerBlock, + /*testing=*/ b, + ) + + for n := 0; n < b.N; n++ { + vm := &VM{} + vm.Initialize( + /*ctx=*/ snow.DefaultContextTest(), + /*db=*/ memdb.New(), + /*genesis=*/ genesisBytes, + /*engineChan=*/ make(chan common.Message, 1), + /*fxs=*/ nil, + ) + + b.StartTimer() + for _, blockBytes := range blocks { + blk, err := vm.ParseBlock(blockBytes) + if err != nil { + b.Fatal(err) + } + if err := blk.Verify(); err != nil { + b.Fatal(err) + } + blk.Accept() + } + b.StopTimer() + } +} + +// BenchmarkParseAndVerifyAndAccept1 runs the benchmark of parsing, verifying, and accepting 1 block +func BenchmarkParseAndVerifyAndAccept1(b *testing.B) { + ParseAndVerifyAndAccept( + /*numBlocks=*/ 1, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) +} + +// BenchmarkParseAndVerifyAndAccept10 runs the benchmark of parsing, verifying, and accepting 10 blocks +func BenchmarkParseAndVerifyAndAccept10(b *testing.B) { + ParseAndVerifyAndAccept( + /*numBlocks=*/ 10, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) +} + +// ParseThenVerifyThenAccept runs the benchmark of parsing then verifying and then accepting blocks +func ParseThenVerifyThenAccept(numBlocks, numTxsPerBlock int, b *testing.B) { + b.StopTimer() + b.ResetTimer() + + genesisBytes, blocks := genGenesisStateBytes( + /*numBlocks=*/ numBlocks, + /*numTxsPerBlock=*/ numTxsPerBlock, + /*testing=*/ b, + ) + + for n := 0; n < b.N; n++ { + vm := &VM{} + vm.Initialize( + /*ctx=*/ snow.DefaultContextTest(), + /*db=*/ memdb.New(), + /*genesis=*/ genesisBytes, + /*engineChan=*/ make(chan common.Message, 1), + /*fxs=*/ nil, + ) + + b.StartTimer() + parsedBlocks := make([]snowman.Block, len(blocks)) + for i, blockBytes := range blocks { + blk, err := vm.ParseBlock(blockBytes) + if err != nil { + b.Fatal(err) + } + parsedBlocks[i] = blk + } + for _, blk := range parsedBlocks { + if err := blk.Verify(); err != nil { + b.Fatal(err) + } + } + for _, blk := range parsedBlocks { + blk.Accept() + } + b.StopTimer() + } +} + +// BenchmarkParseThenVerifyThenAccept1 runs the benchmark of parsing then verifying and then accepting 1 block +func BenchmarkParseThenVerifyThenAccept1(b *testing.B) { + ParseThenVerifyThenAccept( + /*numBlocks=*/ 1, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) +} + +// BenchmarkParseThenVerifyThenAccept10 runs the benchmark of parsing then verifying and then accepting 10 blocks +func BenchmarkParseThenVerifyThenAccept10(b *testing.B) { + ParseThenVerifyThenAccept( + /*numBlocks=*/ 10, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) +} + +// IssueAndVerifyAndAccept runs the benchmark of issuing, verifying, and accepting blocks +func IssueAndVerifyAndAccept(numBlocks, numTxsPerBlock int, b *testing.B) { + b.StopTimer() + b.ResetTimer() + + genesisBytes, blocks := genGenesisState( + /*numBlocks=*/ numBlocks, + /*numTxsPerBlock=*/ numTxsPerBlock, + /*testing=*/ b, + ) + + for n := 0; n < b.N; n++ { + vm := &VM{} + vm.Initialize( + /*ctx=*/ snow.DefaultContextTest(), + /*db=*/ memdb.New(), + /*genesis=*/ genesisBytes, + /*engineChan=*/ make(chan common.Message, 1), + /*fxs=*/ nil, + ) + vm.SetPreference(vm.LastAccepted()) + + b.StartTimer() + for _, block := range blocks { + for _, tx := range block.txs { + if _, err := vm.IssueTx(tx.Bytes(), nil); err != nil { + b.Fatal(err) + } + } + + blk, err := vm.BuildBlock() + if err != nil { + b.Fatal(err) + } + if err := blk.Verify(); err != nil { + b.Fatal(err) + } + vm.SetPreference(blk.ID()) + blk.Accept() + } + b.StopTimer() + } +} + +// BenchmarkIssueAndVerifyAndAccept1 runs the benchmark of issuing, verifying, and accepting 1 block +func BenchmarkIssueAndVerifyAndAccept1(b *testing.B) { + IssueAndVerifyAndAccept( + /*numBlocks=*/ 1, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) +} + +// BenchmarkIssueAndVerifyAndAccept10 runs the benchmark of issuing, verifying, and accepting 10 blocks +func BenchmarkIssueAndVerifyAndAccept10(b *testing.B) { + IssueAndVerifyAndAccept( + /*numBlocks=*/ 10, + /*numTxsPerBlock=*/ 1, + /*testing=*/ b, + ) +} diff --git a/vms/spchainvm/vm_test.go b/vms/spchainvm/vm_test.go new file mode 100644 index 0000000..4432d1e --- /dev/null +++ b/vms/spchainvm/vm_test.go @@ -0,0 +1,146 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spchainvm + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/consensus/snowball" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/engine/common/queue" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/units" + + smcon "github.com/ava-labs/gecko/snow/consensus/snowman" + smeng "github.com/ava-labs/gecko/snow/engine/snowman" +) + +var keys []*crypto.PrivateKeySECP256K1R + +var ctx = snow.DefaultContextTest() + +func init() { + cb58 := formatting.CB58{} + factory := crypto.FactorySECP256K1R{} + + for _, key := range []string{ + "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", + "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", + "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", + } { + ctx.Log.AssertNoError(cb58.FromString(key)) + pk, err := factory.ToPrivateKey(cb58.Bytes) + ctx.Log.AssertNoError(err) + keys = append(keys, pk.(*crypto.PrivateKeySECP256K1R)) + } +} + +func GenesisAccounts() []Account { + accounts := []Account(nil) + for _, key := range keys { + accounts = append(accounts, + Account{ + id: key.PublicKey().Address(), + balance: 20 * units.KiloAva, + }) + } + return accounts +} + +func TestPayments(t *testing.T) { + genesisAccounts := GenesisAccounts() + + codec := Codec{} + genesisData, _ := codec.MarshalGenesis(genesisAccounts) + db := memdb.New() + bootstrappingDB := memdb.New() + + msgChan := make(chan common.Message, 1) + blocker, _ := queue.New(bootstrappingDB) + + vm := &VM{} + vm.Initialize(ctx, db, genesisData, msgChan, nil) + + sender := &common.SenderTest{} + sender.T = t + sender.Default(true) + + vdrs := validators.NewSet() + vdr := validators.GenerateRandomValidator(1) + vdrs.Add(vdr) + + ctx.Lock.Lock() + consensus := smeng.Transitive{} + consensus.Initialize(smeng.Config{ + BootstrapConfig: smeng.BootstrapConfig{ + Config: common.Config{ + Context: ctx, + Validators: vdrs, + Beacons: validators.NewSet(), + Sender: sender, + }, + Blocked: blocker, + VM: vm, + }, + Params: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Consensus: &smcon.Topological{}, + }) + consensus.Startup() + + account := vm.GetAccount(vm.baseDB, keys[0].PublicKey().Address()) + + tx, _, err := account.CreateTx(200, keys[1].PublicKey().Address(), ctx, keys[0]) + if err != nil { + t.Fatal(err) + } + + vm.issueTx(tx) + ctx.Lock.Unlock() + + if msg := <-msgChan; msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + queriedVtxID := new(ids.ID) + queried := new(int) + queryRequestID := new(uint32) + sender.PushQueryF = func(_ ids.ShortSet, requestID uint32, vtxID ids.ID, _ []byte) { + *queriedVtxID = vtxID + *queried++ + *queryRequestID = requestID + } + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + consensus.Notify(common.PendingTxs) + + sender.PushQueryF = nil + if *queried != 1 { + t.Fatalf("Should have launched one query for the vertex") + } + + queriedVtxIDSet := ids.Set{} + queriedVtxIDSet.Add(*queriedVtxID) + consensus.Chits(vdr.ID(), *queryRequestID, queriedVtxIDSet) + + if account := vm.GetAccount(vm.baseDB, keys[0].PublicKey().Address()); account.Balance() != 20*units.KiloAva-200 { + t.Fatalf("Wrong Balance") + } else if account := vm.GetAccount(vm.baseDB, keys[1].PublicKey().Address()); account.Balance() != 20*units.KiloAva+200 { + t.Fatalf("Wrong Balance") + } +} diff --git a/vms/spdagvm/builder.go b/vms/spdagvm/builder.go new file mode 100644 index 0000000..5087b36 --- /dev/null +++ b/vms/spdagvm/builder.go @@ -0,0 +1,195 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/math" +) + +var ( + errInputSignerMismatch = errors.New("wrong number of signers for the inputs") + errNilSigner = errors.New("nil signer") + errNilChainID = errors.New("nil chain id") +) + +// Builder defines the functionality for building payment objects. +type Builder struct { + NetworkID uint32 + ChainID ids.ID +} + +// NewInputPayment returns a new input that consumes a UTXO. +func (b Builder) NewInputPayment(sourceID ids.ID, sourceIndex uint32, amount uint64, sigs []*Sig) Input { + SortTxSig(sigs) + + return &InputPayment{ + sourceID: sourceID, + sourceIndex: sourceIndex, + amount: amount, + sigs: sigs, + } +} + +// NewOutputPayment returns a new output that generates a standard UTXO. +func (b Builder) NewOutputPayment( + amount, + locktime uint64, + threshold uint32, + addresses []ids.ShortID, +) Output { + ids.SortShortIDs(addresses) + + return &OutputPayment{ + amount: amount, + locktime: locktime, + threshold: threshold, + addresses: addresses, + } +} + +// NewOutputTakeOrLeave returns a new output that generates a UTXO with all the +// features of the standard UTXO, plus the ability to have a different set of +// addresses spend the asset after the specified time. +func (b Builder) NewOutputTakeOrLeave( + amount, + locktime1 uint64, + threshold1 uint32, + addresses1 []ids.ShortID, + locktime2 uint64, + threshold2 uint32, + addresses2 []ids.ShortID, +) Output { + ids.SortShortIDs(addresses1) + ids.SortShortIDs(addresses2) + + return &OutputTakeOrLeave{ + amount: amount, + locktime1: locktime1, + threshold1: threshold1, + addresses1: addresses1, + locktime2: locktime2, + threshold2: threshold2, + addresses2: addresses2, + } +} + +// NewSig returns a new signature object. This object will specify the address +// in the UTXO that will be used to authorize the wrapping input. +func (b Builder) NewSig(index uint32) *Sig { return &Sig{index: index} } + +// NewTxFromUTXOs returns a new transaction where: +// * One of the outputs is an Output with [amount] ava that is controlled by [toAddr]. +// * This output can't be spent until at least [locktime]. +// * If there is any "change" there is another output controlled by [changeAddr] with the change. +// * The UTXOs consumed to make this transaction are a subset of [utxos]. +// * The keys controlling [utxos] are in [keyChain] +func (b Builder) NewTxFromUTXOs(keyChain *KeyChain, utxos []*UTXO, amount, txFee, locktime uint64, + threshold uint32, toAddrs []ids.ShortID, changeAddr ids.ShortID, currentTime uint64) (*Tx, error) { + + ins := []Input{} // Consumed by this transaction + signers := []*InputSigner{} // Each corresponds to an input consumed by this transaction + + amountPlusTxFee, err := math.Add64(amount, txFee) + if err != nil { + return nil, errAmountOverflow + } + + spent := uint64(0) // The sum of the UTXOs consumed in this transaction + for i := 0; i < len(utxos) && amountPlusTxFee > spent; i++ { + utxo := utxos[i] + if in, signer, err := keyChain.Spend(utxo, currentTime); err == nil { + ins = append(ins, in) + amount := in.(*InputPayment).Amount() + spent += amount + signers = append(signers, signer) + } + } + + if spent < amountPlusTxFee { + return nil, errInsufficientFunds + } + + outs := []Output{ // List of outputs + b.NewOutputPayment(amount, locktime, threshold, toAddrs), // The primary output + } + + // If there is "change", add another output + if spent > amountPlusTxFee { + outs = append(outs, + b.NewOutputPayment(spent-amountPlusTxFee, 0, 1, []ids.ShortID{changeAddr}), + ) + } + + return b.NewTx(ins, outs, signers) // Sort, marshal, sign, build the transaction +} + +// NewTx returns a new transaction where: +// * The inputs to the Tx are [ins], sorted. +// * The outputs of the Tx are [outs], sorted +// * The ith signer will be used to sign the ith input. This means that len([inputs]) must be == len([signers]). +// TODO: Should the signer be part of the input +func (b Builder) NewTx(ins []Input, outs []Output, signers []*InputSigner) (*Tx, error) { + if b.ChainID.IsZero() { + return nil, errNilChainID + } + if len(ins) != len(signers) { + return nil, errInputSignerMismatch + } + + SortOuts(outs) + SortIns(ins, signers) + + t := &Tx{ + networkID: b.NetworkID, + chainID: b.ChainID, + ins: ins, + outs: outs, + } + + c := Codec{} + unsignedBytes, err := c.MarshalUnsignedTx(t) + if err != nil { + return nil, err + } + unsignedHash := hashing.ComputeHash256(unsignedBytes) + + for i, rawIn := range t.ins { + switch in := rawIn.(type) { + case *InputPayment: + signer := signers[i] + if signer == nil { + return nil, errNilSigner + } + + for j, key := range signer.Keys { + if !crypto.EnableCrypto { + in.sigs[j].sig = make([]byte, crypto.SECP256K1RSigLen) + continue + } + + sig, err := key.SignHash(unsignedHash) + if err != nil { + return nil, err + } + + in.sigs[j].sig = sig + } + } + } + + bytes, err := c.MarshalTx(t) + if err != nil { + return nil, err + } + + t.bytes = bytes + t.id = ids.NewID(hashing.ComputeHash256Array(t.bytes)) + + return t, nil +} diff --git a/vms/spdagvm/codec.go b/vms/spdagvm/codec.go new file mode 100644 index 0000000..c4ac9c9 --- /dev/null +++ b/vms/spdagvm/codec.go @@ -0,0 +1,530 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "errors" + "fmt" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/wrappers" +) + +var ( + errBadCodec = errors.New("wrong or unknown codec used") + errExtraSpace = errors.New("trailing buffer space") + errOutputType = errors.New("unknown output type") + errInputType = errors.New("unknown input type") + errNil = errors.New("nil value is invalid") +) + +// CodecID is an identifier for a codec +type CodecID uint32 + +// Codec types +const ( + NoID CodecID = iota + GenericID + CustomID + // TODO: Utilize a standard serialization library. Must have a canonical + // serialization format. +) + +// Verify that the codec is a known codec value. Returns nil if the codec is +// valid. +func (c CodecID) Verify() error { + switch c { + case NoID, GenericID, CustomID: + return nil + default: + return errBadCodec + } +} + +func (c CodecID) String() string { + switch c { + case NoID: + return "No Codec" + case GenericID: + return "Generic Codec" + case CustomID: + return "Custom Codec" + default: + return "Unknown Codec" + } +} + +// MaxSize is the maximum allowed tx size. It is necessary to deter DoS. +const MaxSize = 1 << 18 + +// Output types +const ( + OutputPaymentID uint32 = iota + OutputTakeOrLeaveID +) + +// Input types +const ( + InputID uint32 = iota +) + +// Codec is used to serialize and de-serialize transaction objects +type Codec struct{} + +/* + ****************************************************************************** + ************************************* Tx ************************************* + ****************************************************************************** + */ + +/* Unsigned Tx: + * Codec | 04 bytes + * Network ID | 04 bytes + * Chain ID | 32 bytes + * NumOuts | 04 bytes + * Repeated (NumOuts): + * Out | ? bytes + * NumIns | 04 bytes + * Repeated (NumIns): + * In | ? bytes + */ + +/* Tx: + * Unsigned Tx | ? bytes + * Repeated (NumIns): + * Sig | ? bytes + */ + +// MarshalUnsignedTx returns the byte representation of the unsigned tx +func (c *Codec) MarshalUnsignedTx(tx *Tx) ([]byte, error) { + p := wrappers.Packer{MaxSize: MaxSize} + + c.marshalUnsignedTx(tx, &p) + + return p.Bytes, p.Err +} + +// MarshalTx returns the byte representation of the tx +func (c *Codec) MarshalTx(tx *Tx) ([]byte, error) { + p := wrappers.Packer{MaxSize: MaxSize} + + c.marshalUnsignedTx(tx, &p) + + if tx != nil { + for _, in := range tx.ins { + c.marshalSigs(in, &p) + } + } + + return p.Bytes, p.Err +} + +func (c *Codec) marshalUnsignedTx(tx *Tx, p *wrappers.Packer) { + if tx == nil { + p.Add(fmt.Errorf("serialization error occurred, Error:%w, Index=%d", errNil, p.Offset)) + return + } + + p.PackInt(uint32(CustomID)) + p.PackInt(tx.networkID) + p.PackFixedBytes(tx.chainID.Bytes()) + + outs := tx.outs + p.PackInt(uint32(len(outs))) + for _, out := range outs { + c.marshalOutput(out, p) + } + + ins := tx.ins + p.PackInt(uint32(len(ins))) + for _, in := range ins { + c.marshalInput(in, p) + } +} + +// UnmarshalTx attempts to convert the stream of bytes into a representation +// of a tx +func (c *Codec) UnmarshalTx(b []byte) (*Tx, error) { + p := wrappers.Packer{Bytes: b} + + tx := c.unmarshalTx(&p) + + if p.Offset != len(b) { + p.Add(fmt.Errorf("parse error occurred, Error:%w, Index=%d", errExtraSpace, p.Offset)) + } + + return tx, p.Err +} + +func (c *Codec) unmarshalTx(p *wrappers.Packer) *Tx { + start := p.Offset + + if codecID := CodecID(p.UnpackInt()); codecID != CustomID { + p.Add(fmt.Errorf("parse error occurred, Error:%w, Index=%d", errBadCodec, p.Offset)) + } + + networkID := p.UnpackInt() + chainID, _ := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + + outs := []Output(nil) + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + outs = append(outs, c.unmarshalOutput(p)) + } + + ins := []Input(nil) + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + ins = append(ins, c.unmarshalInput(p)) + } + + for _, in := range ins { + c.unmarshalSigs(in, p) + } + + if p.Errored() { + return nil + } + + bytes := p.Bytes[start:p.Offset] + return &Tx{ + id: ids.NewID(hashing.ComputeHash256Array(bytes)), + networkID: networkID, + chainID: chainID, + ins: ins, + outs: outs, + bytes: bytes, + } +} + +/* + ****************************************************************************** + *********************************** UTXOs ************************************ + ****************************************************************************** + */ + +/* UTXOs: + * NumUTXOs | 4 bytes + * Repeated (NumUTXOs): + * UTXO | ? bytes + */ + +// MarshalUTXOs returns the byte representation of the utxos +func (c *Codec) MarshalUTXOs(utxos []*UTXO) ([]byte, error) { + p := wrappers.Packer{MaxSize: MaxSize} + + p.PackInt(uint32(len(utxos))) + for _, utxo := range utxos { + if utxo == nil { + p.Add(fmt.Errorf("serialization error occurred, Error:%w, Index=%d", errNil, p.Offset)) + break + } else { + p.PackFixedBytes(utxo.Bytes()) + } + } + + return p.Bytes, p.Err +} + +// UnmarshalUTXOs attempts to convert the stream of bytes into a representation +// of a slice of utxos +func (c *Codec) UnmarshalUTXOs(b []byte) ([]*UTXO, error) { + p := wrappers.Packer{Bytes: b} + + utxos := []*UTXO(nil) + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + utxos = append(utxos, c.unmarshalUTXO(&p)) + } + + if p.Offset != len(b) { + p.Add(fmt.Errorf("parse error occurred, Error:%w, Index=%d", errExtraSpace, p.Offset)) + } + return utxos, p.Err +} + +/* + ****************************************************************************** + ************************************ UTXO ************************************ + ****************************************************************************** + */ + +/* UTXO: + * TxID | 32 Bytes + * TxIndex | 04 bytes + * Output | ?? bytes + */ + +// MarshalUTXO returns the byte representation of the utxo +func (c *Codec) MarshalUTXO(utxo *UTXO) ([]byte, error) { + p := wrappers.Packer{MaxSize: MaxSize} + + c.marshalUTXO(utxo, &p) + + return p.Bytes, p.Err +} + +func (c *Codec) marshalUTXO(utxo *UTXO, p *wrappers.Packer) { + if utxo == nil { + p.Add(fmt.Errorf("serialization error occurred, Error:%w, Index=%d", errNil, p.Offset)) + return + } + + txID, txIndex := utxo.Source() + p.PackFixedBytes(txID.Bytes()) + p.PackInt(txIndex) + c.marshalOutput(utxo.Out(), p) +} + +// UnmarshalUTXO attempts to convert the stream of bytes into a representation +// of an utxo +func (c *Codec) UnmarshalUTXO(b []byte) (*UTXO, error) { + p := wrappers.Packer{Bytes: b} + + utxo := c.unmarshalUTXO(&p) + + if p.Offset != len(b) { + p.Add(fmt.Errorf("parse error occurred, Error:%w, Index=%d", errExtraSpace, p.Offset)) + } + + return utxo, p.Err +} + +func (c *Codec) unmarshalUTXO(p *wrappers.Packer) *UTXO { + start := p.Offset + + sourceID, _ := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + sourceIndex := p.UnpackInt() + out := c.unmarshalOutput(p) + + return &UTXO{ + sourceID: sourceID, + sourceIndex: sourceIndex, + id: sourceID.Prefix(uint64(sourceIndex)), + out: out, + bytes: p.Bytes[start:p.Offset], + } +} + +/* + ****************************************************************************** + *********************************** Output *********************************** + ****************************************************************************** + */ + +/* Output Payment: + * OutputID | 04 Bytes + * Amount | 08 bytes + * Locktime | 08 bytes + * Threshold | 04 bytes + * NumAddrs | 04 bytes + * Repeated (NumAddrs): + * Addr | 20 bytes + */ + +/* Output Take-or-Leave: + * OutputID | 04 Bytes + * Amount | 08 bytes + * Locktime | 08 bytes + * Threshold | 04 bytes + * NumAddrs | 04 bytes + * Repeated (NumAddrs): + * Addr | 20 bytes + * FallLocktime | 08 bytes + * FallThreshold | 04 bytes + * NumFallAddrs | 04 bytes + * Repeated (NumFallAddrs): + * Addr | 20 bytes + */ + +// MarshalOutput returns the byte representation of the output +func (c *Codec) MarshalOutput(out Output) ([]byte, error) { + p := wrappers.Packer{MaxSize: MaxSize} + + c.marshalOutput(out, &p) + + return p.Bytes, p.Err +} + +func (c *Codec) marshalOutput(out Output, p *wrappers.Packer) { + switch o := out.(type) { + case *OutputPayment: + p.PackInt(OutputPaymentID) + p.PackLong(o.amount) + p.PackLong(o.locktime) + p.PackInt(o.threshold) + p.PackInt(uint32(len(o.addresses))) + for _, addr := range o.addresses { + p.PackFixedBytes(addr.Bytes()) + } + case *OutputTakeOrLeave: + p.PackInt(OutputTakeOrLeaveID) + p.PackLong(o.amount) + p.PackLong(o.locktime1) + p.PackInt(o.threshold1) + p.PackInt(uint32(len(o.addresses1))) + for _, addr := range o.addresses1 { + p.PackFixedBytes(addr.Bytes()) + } + p.PackLong(o.locktime2) + p.PackInt(o.threshold2) + p.PackInt(uint32(len(o.addresses2))) + for _, addr := range o.addresses2 { + p.PackFixedBytes(addr.Bytes()) + } + default: + p.Add(fmt.Errorf("serialization error occurred, Error:%w, Index=%d", errOutputType, p.Offset)) + } +} + +func (c *Codec) unmarshalOutput(p *wrappers.Packer) Output { + switch p.UnpackInt() { + case OutputPaymentID: + amount := p.UnpackLong() + locktime := p.UnpackLong() + threshold := p.UnpackInt() + + addresses := []ids.ShortID(nil) + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + addr, _ := ids.ToShortID(p.UnpackFixedBytes(hashing.AddrLen)) + addresses = append(addresses, addr) + } + + if p.Errored() { + return nil + } + + return &OutputPayment{ + amount: amount, + locktime: locktime, + threshold: threshold, + addresses: addresses, + } + case OutputTakeOrLeaveID: + amount := p.UnpackLong() + locktime1 := p.UnpackLong() + threshold1 := p.UnpackInt() + + addresses1 := []ids.ShortID(nil) + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + addr, _ := ids.ToShortID(p.UnpackFixedBytes(hashing.AddrLen)) + addresses1 = append(addresses1, addr) + } + + locktime2 := p.UnpackLong() + threshold2 := p.UnpackInt() + + addresses2 := []ids.ShortID(nil) + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + addr, _ := ids.ToShortID(p.UnpackFixedBytes(hashing.AddrLen)) + addresses2 = append(addresses2, addr) + } + + if p.Errored() { + return nil + } + + return &OutputTakeOrLeave{ + amount: amount, + locktime1: locktime1, + threshold1: threshold1, + addresses1: addresses1, + locktime2: locktime2, + threshold2: threshold2, + addresses2: addresses2, + } + default: + p.Add(fmt.Errorf("parse error occurred, Error:%w, Index=%d", errOutputType, p.Offset)) + return nil + } +} + +/* + ****************************************************************************** + *********************************** Input ************************************ + ****************************************************************************** + */ + +/* Input: + * ObjectID | 04 Bytes + * TxID | 32 bytes + * TxIndex | 04 bytes + * Amount | 08 bytes + * NumSigs | 04 bytes + * Repeated (NumSigs): + * Sig | 04 bytes + */ + +func (c *Codec) marshalInput(rawInput Input, p *wrappers.Packer) { + switch in := rawInput.(type) { + case *InputPayment: + p.PackInt(InputID) + p.PackFixedBytes(in.sourceID.Bytes()) + p.PackInt(in.sourceIndex) + p.PackLong(in.amount) + + p.PackInt(uint32(len(in.sigs))) + for _, sig := range in.sigs { + p.PackInt(sig.index) + } + default: + p.Add(fmt.Errorf("serialization error occurred, Error:%w, Index=%d", errInputType, p.Offset)) + } +} + +func (c *Codec) unmarshalInput(p *wrappers.Packer) Input { + switch inputID := p.UnpackInt(); inputID { + case InputID: + txID, _ := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + index := p.UnpackInt() + amount := p.UnpackLong() + + sigs := []*Sig(nil) + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + sigs = append(sigs, &Sig{index: p.UnpackInt()}) + } + + return &InputPayment{ + sourceID: txID, + sourceIndex: index, + amount: amount, + sigs: sigs, + } + default: + p.Add(fmt.Errorf("parse error occurred, Error:%w, Index=%d", errInputType, p.Offset)) + return nil + } +} + +/* + ****************************************************************************** + ************************************ Sig ************************************* + ****************************************************************************** + */ + +/* Sig: + * Repeated (NumSigs): + * Sig | 65 bytes + */ + +func (c *Codec) marshalSigs(rawInput Input, p *wrappers.Packer) { + switch in := rawInput.(type) { + case *InputPayment: + for _, sig := range in.sigs { + p.PackFixedBytes(sig.sig) + } + default: + p.Add(fmt.Errorf("serialization error occurred, Error:%w, Index=%d", errInputType, p.Offset)) + } +} + +func (c *Codec) unmarshalSigs(rawInput Input, p *wrappers.Packer) { + switch in := rawInput.(type) { + case *InputPayment: + for _, sig := range in.sigs { + sig.sig = p.UnpackFixedBytes(crypto.SECP256K1RSigLen) + } + default: + p.Add(fmt.Errorf("parse error occurred, Error:%w, Index=%d", errInputType, p.Offset)) + } +} diff --git a/vms/spdagvm/factory.go b/vms/spdagvm/factory.go new file mode 100644 index 0000000..7e6e263 --- /dev/null +++ b/vms/spdagvm/factory.go @@ -0,0 +1,21 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "github.com/ava-labs/gecko/ids" +) + +// ID this VM should be referenced with +var ( + ID = ids.NewID([32]byte{'s', 'p', 'd', 'a', 'g', 'v', 'm'}) +) + +// Factory ... +type Factory struct{ TxFee uint64 } + +// New ... +func (f *Factory) New() interface{} { + return &VM{TxFee: f.TxFee} // Use the tx fee from the config +} diff --git a/vms/spdagvm/input.go b/vms/spdagvm/input.go new file mode 100644 index 0000000..9d95e1c --- /dev/null +++ b/vms/spdagvm/input.go @@ -0,0 +1,148 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" +) + +// Input describes the interface all inputs must implement +type Input interface { + formatting.PrefixedStringer + + InputSource() (ids.ID, uint32) + InputID() ids.ID + + Verify() error +} + +// InputPayment is an input that consumes an output +// InputPayment implements Input +type InputPayment struct { + // The ID of the transaction that produced the UTXO this input consumes + sourceID ids.ID + + // The index within that transaction of the UTXO this input consumes + sourceIndex uint32 + + // The ID of the UTXO this input consumes + utxoID ids.ID + + // The amount of the UTXO this input consumes + amount uint64 + + // The signatures used to spend the UTXOs this input consumes + sigs []*Sig +} + +// InputSource returns: +// 1) The ID of the transaction that produced the UTXO this input consumes +// 2) The index within that transaction of the UTXO this input consumes +func (in *InputPayment) InputSource() (ids.ID, uint32) { return in.sourceID, in.sourceIndex } + +// InputID returns the ID of the UTXO this input consumes +func (in *InputPayment) InputID() ids.ID { + if in.utxoID.IsZero() { + in.utxoID = in.sourceID.Prefix(uint64(in.sourceIndex)) + } + return in.utxoID +} + +// Amount this input will produce for the tx +func (in *InputPayment) Amount() uint64 { return in.amount } + +// Verify this input is syntactically valid +func (in *InputPayment) Verify() error { + switch { + case in == nil: + return errNilInput + case in.amount == 0: + return errInputHasNoValue + } + // Verify the signatures are well-formed + for _, sig := range in.sigs { + switch { + case sig == nil: + return errNilSig + case len(sig.sig) != crypto.SECP256K1RSigLen: + return errInvalidSigLen + } + } + // Verify in.sigs are sorted and unique + if !isSortedAndUniqueTxSig(in.sigs) { + return errSigsNotSortedUnique + } + return nil +} + +// PrefixedString converts this input to a string representation with a prefix +// for each newline +func (in *InputPayment) PrefixedString(prefix string) string { + s := strings.Builder{} + + s.WriteString(fmt.Sprintf("InputPayment(\n"+ + "%s Source ID = %s\n"+ + "%s Source Index = %d\n"+ + "%s Amount = %d\n"+ + "%s NumSigs = %d\n", + prefix, in.sourceID, + prefix, in.sourceIndex, + prefix, in.amount, + prefix, len(in.sigs))) + + sigFormat := fmt.Sprintf("%%s Sig[%s]: Index = %%d, Signature = %%s\n", + formatting.IntFormat(len(in.sigs)-1)) + for i, sig := range in.sigs { + s.WriteString(fmt.Sprintf(sigFormat, + prefix, i, sig.index, formatting.CB58{Bytes: sig.sig}, + )) + } + + s.WriteString(fmt.Sprintf("%s)", prefix)) + + return s.String() +} + +func (in *InputPayment) String() string { return in.PrefixedString("") } + +type sortInsData struct { + ins []Input + signers []*InputSigner +} + +func (ins sortInsData) Less(i, j int) bool { + iID, iIndex := ins.ins[i].InputSource() + jID, jIndex := ins.ins[j].InputSource() + + switch bytes.Compare(iID.Bytes(), jID.Bytes()) { + case -1: + return true + case 0: + return iIndex < jIndex + default: + return false + } +} + +func (ins sortInsData) Len() int { return len(ins.ins) } + +func (ins sortInsData) Swap(i, j int) { + ins.ins[j], ins.ins[i] = ins.ins[i], ins.ins[j] + if ins.signers != nil { + ins.signers[j], ins.signers[i] = ins.signers[i], ins.signers[j] + } +} + +// SortIns sorts the tx input list by inputID | inputIndex +func SortIns(ins []Input, signers []*InputSigner) { sort.Sort(sortInsData{ins: ins, signers: signers}) } + +func isSortedAndUniqueIns(ins []Input) bool { return utils.IsSortedAndUnique(sortInsData{ins: ins}) } diff --git a/vms/spdagvm/input_test.go b/vms/spdagvm/input_test.go new file mode 100644 index 0000000..2ad9f2f --- /dev/null +++ b/vms/spdagvm/input_test.go @@ -0,0 +1,62 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "bytes" + "math" + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/wrappers" +) + +func TestInputPayment(t *testing.T) { + txID := ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }) + + b := Builder{ + NetworkID: 0, + ChainID: ids.Empty, + } + input := b.NewInputPayment( + /*sourceID=*/ txID, + /*sourceIndex=*/ 9, + /*amount=*/ 123456789, + /*sigs=*/ []*Sig{b.NewSig(7)}, + ) + + c := Codec{} + p := wrappers.Packer{MaxSize: math.MaxInt32} + c.marshalInput(input, &p) + if p.Errored() { + t.Fatal(p.Err) + } + inputBytes := p.Bytes + + expected := []byte{ + // input type: + 0x00, 0x00, 0x00, 0x00, + // txID + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + // output index: + 0x00, 0x00, 0x00, 0x09, + // amount: + 0x00, 0x00, 0x00, 0x00, 0x07, 0x5b, 0xcd, 0x15, + // number of signatures: + 0x00, 0x00, 0x00, 0x01, + // signature index[0]: + 0x00, 0x00, 0x00, 0x07, + } + if !bytes.Equal(inputBytes, expected) { + t.Fatalf("Codec.marshalInput returned:\n0x%x\nExpected:\n0x%x", inputBytes, expected) + } +} diff --git a/vms/spdagvm/keychain.go b/vms/spdagvm/keychain.go new file mode 100644 index 0000000..5740cfa --- /dev/null +++ b/vms/spdagvm/keychain.go @@ -0,0 +1,181 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "errors" + "fmt" + "strings" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" +) + +var ( + errLockedFunds = errors.New("funds currently locked") + errCantSpend = errors.New("utxo couldn't be spent") +) + +// KeyChain is a collection of keys that can be used to spend utxos +type KeyChain struct { + // This can be used to iterate over. However, it should not be modified externally. + // Key: The id of a private key (namely, [privKey].PublicKey().Address().Key()) + // Value: The index in Keys of that private key + keyMap map[[20]byte]int + + // Each element is an address controlled by a key in [Keys] + Addrs ids.ShortSet + + // List of keys this keychain manages + Keys []*crypto.PrivateKeySECP256K1R +} + +func (kc *KeyChain) init() { + if kc.keyMap == nil { + kc.keyMap = make(map[[20]byte]int) + } +} + +// Add a new key to the key chain. +// If [key] is already in the keychain, does nothing. +func (kc *KeyChain) Add(key *crypto.PrivateKeySECP256K1R) { + kc.init() + + addr := key.PublicKey().Address() // The address controlled by [key] + addrHash := addr.Key() + if _, ok := kc.keyMap[addrHash]; !ok { + kc.keyMap[addrHash] = len(kc.Keys) + kc.Keys = append(kc.Keys, key) + kc.Addrs.Add(addr) + } +} + +// Get a key from the keychain. If the key is unknown, the second return value is false. +func (kc KeyChain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { + kc.init() + + if i, ok := kc.keyMap[id.Key()]; ok { + return kc.Keys[i], true + } + return &crypto.PrivateKeySECP256K1R{}, false +} + +// Addresses returns a list of addresses this keychain manages +func (kc KeyChain) Addresses() ids.ShortSet { return kc.Addrs } + +// New returns a newly generated private key. +// The key and the address it controls are added to +// [kc.Keys] and [kc.Addrs], respectively +func (kc *KeyChain) New() (*crypto.PrivateKeySECP256K1R, error) { + factory := crypto.FactorySECP256K1R{} + + skGen, err := factory.NewPrivateKey() + if err != nil { + return nil, err + } + + sk := skGen.(*crypto.PrivateKeySECP256K1R) + kc.Add(sk) + return sk, nil +} + +// Spend attempts to create an input +func (kc *KeyChain) Spend(utxo *UTXO, time uint64) (Input, *InputSigner, error) { + builder := Builder{ + NetworkID: 0, + ChainID: ids.Empty, + } + + switch out := utxo.Out().(type) { + case *OutputPayment: + if time < out.Locktime() { // [UTXO] may not be spent yet + return nil, nil, errLockedFunds + } + // Get [threshold] of the keys needed to spend [UTXO] + if sigs, keys, able := kc.GetSigsAndKeys(out.Addresses(), int(out.Threshold())); able { + sourceID, sourceIndex := utxo.Source() + return builder.NewInputPayment( + sourceID, + sourceIndex, + out.Amount(), + sigs, + ), + &InputSigner{Keys: keys}, + nil + } + case *OutputTakeOrLeave: + if time < out.Locktime1() { + return nil, nil, errLockedFunds + } + if sigs, keys, able := kc.GetSigsAndKeys(out.Addresses1(), int(out.Threshold1())); able { + sourceID, sourceIndex := utxo.Source() + return builder.NewInputPayment( + sourceID, + sourceIndex, + out.Amount(), + sigs, + ), + &InputSigner{Keys: keys}, + nil + } + if time < out.Locktime2() { + return nil, nil, errLockedFunds + } + if sigs, keys, able := kc.GetSigsAndKeys(out.Addresses2(), int(out.Threshold2())); able { + sourceID, sourceIndex := utxo.Source() + return builder.NewInputPayment( + sourceID, + sourceIndex, + out.Amount(), + sigs, + ), + &InputSigner{Keys: keys}, + nil + } + } + return nil, nil, errCantSpend +} + +// GetSigsAndKeys returns: +// 1) A list of *Sig where [Sig].Index is the index of an address in [addresses] +// such that a key in this keychain that controls the address +// 2) A list of private keys such that each key controls an address in [addresses] +// 3) true iff this keychain contains at least [threshold] keys that control an address +// in [addresses] +func (kc *KeyChain) GetSigsAndKeys(addresses []ids.ShortID, threshold int) ([]*Sig, []*crypto.PrivateKeySECP256K1R, bool) { + sigs := []*Sig{} + keys := []*crypto.PrivateKeySECP256K1R{} + builder := Builder{ + NetworkID: 0, + ChainID: ids.Empty, + } + for i := uint32(0); i < uint32(len(addresses)) && len(keys) < threshold; i++ { + if key, exists := kc.Get(addresses[i]); exists { + sigs = append(sigs, builder.NewSig(i)) + keys = append(keys, key) + } + } + return sigs, keys, len(keys) == threshold +} + +// PrefixedString returns the key chain as a string representation with [prefix] +// added before every line. +func (kc *KeyChain) PrefixedString(prefix string) string { + s := strings.Builder{} + + format := fmt.Sprintf("%%sKey[%s]: Key: %%s Address: %%s\n", + formatting.IntFormat(len(kc.Keys)-1)) + for i, key := range kc.Keys { + s.WriteString(fmt.Sprintf(format, + prefix, + i, + formatting.CB58{Bytes: key.Bytes()}, + key.PublicKey().Address())) + } + + return strings.TrimSuffix(s.String(), "\n") +} + +func (kc *KeyChain) String() string { return kc.PrefixedString("") } diff --git a/vms/spdagvm/output.go b/vms/spdagvm/output.go new file mode 100644 index 0000000..3997428 --- /dev/null +++ b/vms/spdagvm/output.go @@ -0,0 +1,308 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" +) + +// Output describes what functions every output must implement +type Output interface { + formatting.PrefixedStringer + + Unlock(Input, uint64) error + Verify() error +} + +// OutputPayment represents an output that transfers value +// OutputPayment implements Output +type OutputPayment struct { + // The amount of this output + amount uint64 + + // The earliest time at which this output may be spent + // Measured in Unix time + locktime uint64 + + // The number of signatures required to spend this output + threshold uint32 + + // The addresses that can produce signatures to spend this output + addresses []ids.ShortID +} + +// Amount of value this output creates +func (op *OutputPayment) Amount() uint64 { return op.amount } + +// Locktime is the time that this output should be able to be spent +func (op *OutputPayment) Locktime() uint64 { return op.locktime } + +// Threshold is the number of signatures this output will require to be spent +func (op *OutputPayment) Threshold() uint32 { return op.threshold } + +// Addresses are the representations of keys that can produce signatures to +// spend this output +func (op *OutputPayment) Addresses() []ids.ShortID { return op.addresses } + +// Unlock returns true if the input has the correct signatures to spend this +// output +func (op *OutputPayment) Unlock(in Input, time uint64) error { + if op.locktime > time { + return errTimelocked + } + switch i := in.(type) { + case *InputPayment: + switch { + case op.amount != i.amount: + return errInvalidAmount + case !checkRawAddresses(op.threshold, op.addresses, i.sigs): + return errSpendFailed + } + default: + return errTypeMismatch + } + return nil +} + +// Verify that this output is syntactically correct +func (op *OutputPayment) Verify() error { + switch { + case op == nil: + return errNilOutput + case op.amount == 0: + return errOutputHasNoValue + case op.threshold > uint32(len(op.addresses)): + return errOutputUnspendable + case op.threshold == 0 && len(op.addresses) > 0: + return errOutputUnoptimized + case !ids.IsSortedAndUniqueShortIDs(op.addresses): // TODO: Should we allow duplicated addresses + return errAddrsNotSortedUnique + default: + return nil + } +} + +// PrefixedString converts this input to a string representation with a prefix +// for each newline +func (op *OutputPayment) PrefixedString(prefix string) string { + s := strings.Builder{} + + s.WriteString(fmt.Sprintf("OutputPayment(\n"+ + "%s Amount = %d\n"+ + "%s Locktime = %d\n"+ + "%s Threshold = %d\n"+ + "%s NumAddrs = %d\n", + prefix, op.amount, + prefix, op.locktime, + prefix, op.threshold, + prefix, len(op.addresses))) + + addrFormat := fmt.Sprintf("%%s Addrs[%s]: %%s\n", + formatting.IntFormat(len(op.addresses)-1)) + for i, addr := range op.addresses { + s.WriteString(fmt.Sprintf(addrFormat, + prefix, i, addr, + )) + } + + s.WriteString(fmt.Sprintf("%s)", prefix)) + + return s.String() +} + +func (op *OutputPayment) String() string { return op.PrefixedString("") } + +// OutputTakeOrLeave is a take-or-leave transaction. It implements Output. +// After time [locktime1], it can be spent using [threshold1] signatures, where each +// signature is from [addresses1] +// After time [locktime2], it can also be spent using [threshold2] signatures, where each +// signature is from [addresses2] +type OutputTakeOrLeave struct { + // The amount of this output + amount uint64 + + // The time (Unix time) after which this output may be spent + // using [threshold1] signatures from [addresses1] + locktime1 uint64 + + // The time (Unix time) after which this output may be spent + // using [threshold1] signatures from [addresses1] + // Must be greater than [locktime1] + locktime2 uint64 + + // The number of signatures from [addresses1] required to spend + // this output + threshold1 uint32 + + // The number of signatures from [addresses2] required to spend + // this output + threshold2 uint32 + + // The addresses that may spend this output after [locktime1] + addresses1 []ids.ShortID + + // The addresses that may spend this output after [locktime2] + addresses2 []ids.ShortID +} + +// Amount returns the value this output produces +func (otol *OutputTakeOrLeave) Amount() uint64 { return otol.amount } + +// Locktime1 returns the time after which the first set of addresses +// may spend this output are unlocked +func (otol *OutputTakeOrLeave) Locktime1() uint64 { return otol.locktime1 } + +// Threshold1 returns the number of signatures the first set of addresses need to +// produce to be able to spend this output +func (otol *OutputTakeOrLeave) Threshold1() uint32 { return otol.threshold1 } + +// Addresses1 are the addresses controlled by keys that can produce signatures to +// spend this output +func (otol *OutputTakeOrLeave) Addresses1() []ids.ShortID { return otol.addresses1 } + +// Locktime2 returns when the second set of addresses are unlocked +func (otol *OutputTakeOrLeave) Locktime2() uint64 { return otol.locktime2 } + +// Threshold2 returns the number of signatures the second set of addresses +// need to produce to be able to spend this output +func (otol *OutputTakeOrLeave) Threshold2() uint32 { return otol.threshold2 } + +// Addresses2 are the addresses controlled by keys that can produce signatures to +// spend this output +func (otol *OutputTakeOrLeave) Addresses2() []ids.ShortID { return otol.addresses2 } + +// Unlock returns true if the input has the correct signatures to spend this +// output at time [time] +func (otol *OutputTakeOrLeave) Unlock(in Input, time uint64) error { + switch i := in.(type) { + case *InputPayment: + switch { + case otol.amount != i.amount: + return errInvalidAmount + case otol.locktime2 > time: + return errTimelocked + case (otol.locktime1 > time || + !checkRawAddresses(otol.threshold1, otol.addresses1, i.sigs)) && + !checkRawAddresses(otol.threshold2, otol.addresses2, i.sigs): + return errSpendFailed + } + default: + return errTypeMismatch + } + return nil +} + +// Verify that this output is syntactically correct +func (otol *OutputTakeOrLeave) Verify() error { + switch { + case otol == nil: + return errNilOutput + case otol.amount == 0: + return errOutputHasNoValue + case otol.threshold1 > uint32(len(otol.addresses1)) || + otol.threshold2 > uint32(len(otol.addresses2)): + return errOutputUnspendable + case (otol.threshold1 == 0 && len(otol.addresses1) > 0) || + (otol.threshold2 == 0 && len(otol.addresses2) > 0): + return errOutputUnoptimized + case otol.locktime1 >= otol.locktime2: + return errTimesNotSortedUnique + case !ids.IsSortedAndUniqueShortIDs(otol.addresses1) || // TODO: Should we allow duplicated addresses + !ids.IsSortedAndUniqueShortIDs(otol.addresses2): + return errAddrsNotSortedUnique + default: + return nil + } +} + +// PrefixedString converts this input to a string representation with a prefix +// for each newline +func (otol *OutputTakeOrLeave) PrefixedString(prefix string) string { + s := strings.Builder{} + + s.WriteString(fmt.Sprintf("OutputTakeOrLeave(\n"+ + "%s Amount = %d\n"+ + "%s Locktime = %d\n"+ + "%s Threshold = %d\n"+ + "%s NumAddrs = %d\n", + prefix, otol.amount, + prefix, otol.locktime1, + prefix, otol.threshold1, + prefix, len(otol.addresses1))) + + addrFormat := fmt.Sprintf("%%s Addrs[%s]: %%s\n", + formatting.IntFormat(len(otol.addresses1)-1)) + for i, addr := range otol.addresses1 { + s.WriteString(fmt.Sprintf(addrFormat, + prefix, i, addr, + )) + } + + s.WriteString(fmt.Sprintf("%s FallLocktime = %d\n"+ + "%s FallThreshold = %d\n"+ + "%s FallNumAddrs = %d\n", + prefix, otol.locktime2, + prefix, otol.threshold2, + prefix, len(otol.addresses2))) + + fallAddrFormat := fmt.Sprintf("%%s FallAddrs[%s]: %%s\n", + formatting.IntFormat(len(otol.addresses2)-1)) + for i, addr := range otol.addresses2 { + s.WriteString(fmt.Sprintf(fallAddrFormat, + prefix, i, addr, + )) + } + + s.WriteString(fmt.Sprintf("%s)", prefix)) + + return s.String() +} + +func (otol *OutputTakeOrLeave) String() string { return otol.PrefixedString("") } + +// checkRange returns true if [index] is in the range [l, u). +func checkRange(index, l, u int) bool { + return l <= index && index < u +} + +// checkRawAddresses checks that the signatures match with the addresses and +// that the threshold is the expected value. +func checkRawAddresses(threshold uint32, addrs []ids.ShortID, sigs []*Sig) bool { + if !crypto.EnableCrypto { + return true + } + if uint32(len(sigs)) != threshold { + return false + } + for _, sig := range sigs { + i := int(sig.index) + if !checkRange(i, 0, len(addrs)) || !bytes.Equal(addrs[i].Bytes(), hashing.PubkeyBytesToAddress(sig.parsedPubKey)) { + return false + } + } + return true +} + +type sortOutsData []Output + +func (outs sortOutsData) Less(i, j int) bool { + c := Codec{} + iBytes, _ := c.MarshalOutput(outs[i]) + jBytes, _ := c.MarshalOutput(outs[j]) + return bytes.Compare(iBytes, jBytes) == -1 +} +func (outs sortOutsData) Len() int { return len(outs) } +func (outs sortOutsData) Swap(i, j int) { outs[j], outs[i] = outs[i], outs[j] } + +// SortOuts sorts the tx output list by byte representation +func SortOuts(outs []Output) { sort.Sort(sortOutsData(outs)) } +func isSortedOuts(outs []Output) bool { return sort.IsSorted(sortOutsData(outs)) } diff --git a/vms/spdagvm/output_test.go b/vms/spdagvm/output_test.go new file mode 100644 index 0000000..5748299 --- /dev/null +++ b/vms/spdagvm/output_test.go @@ -0,0 +1,128 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestOutputPayment(t *testing.T) { + addr0 := ids.NewShortID([20]byte{ + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, + }) + addr1 := ids.NewShortID([20]byte{ + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + }) + + b := Builder{ + NetworkID: 0, + ChainID: ids.Empty, + } + output := b.NewOutputPayment( + /*amount=*/ 12345, + /*locktime=*/ 54321, + /*threshold=*/ 1, + /*addresses=*/ []ids.ShortID{addr0, addr1}, + ) + + c := Codec{} + outputBytes, err := c.MarshalOutput(output) + if err != nil { + t.Fatal(err) + } + + expected := []byte{ + // output type + 0x00, 0x00, 0x00, 0x00, + // amount: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + // locktime: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, + // threshold: + 0x00, 0x00, 0x00, 0x01, + // number of addresses: + 0x00, 0x00, 0x00, 0x02, + // addr0: + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, + // addr1: + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + if !bytes.Equal(outputBytes, expected) { + t.Fatalf("Codec.MarshalOutput returned:\n0x%x\nExpected:\n0x%x", outputBytes, expected) + } +} + +func TestOutputTakeOrLeave(t *testing.T) { + addr0 := ids.NewShortID([20]byte{ + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, + }) + addr1 := ids.NewShortID([20]byte{ + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + }) + + b := Builder{ + NetworkID: 0, + ChainID: ids.Empty, + } + output := b.NewOutputTakeOrLeave( + /*amount=*/ 12345, + /*locktime1=*/ 54321, + /*threshold1=*/ 1, + /*addresses1=*/ []ids.ShortID{addr0}, + /*locktime2=*/ 56789, + /*threshold2=*/ 1, + /*addresses2=*/ []ids.ShortID{addr1}, + ) + + c := Codec{} + outputBytes, err := c.MarshalOutput(output) + if err != nil { + t.Fatal(err) + } + + expected := []byte{ + // output type + 0x00, 0x00, 0x00, 0x01, + // amount: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + // locktime1: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, + // threshold1: + 0x00, 0x00, 0x00, 0x01, + // number of addresses1: + 0x00, 0x00, 0x00, 0x01, + // addr0: + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, + // locktime2: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0xd5, + // threshold2: + 0x00, 0x00, 0x00, 0x01, + // number of addresses2: + 0x00, 0x00, 0x00, 0x01, + // addr1: + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + if !bytes.Equal(outputBytes, expected) { + t.Fatalf("Codec.MarshalOutput returned:\n0x%x\nExpected:\n0x%x", outputBytes, expected) + } +} diff --git a/vms/spdagvm/prefixed_state.go b/vms/spdagvm/prefixed_state.go new file mode 100644 index 0000000..01d37b9 --- /dev/null +++ b/vms/spdagvm/prefixed_state.go @@ -0,0 +1,179 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/wrappers" +) + +const ( + txID uint64 = iota + utxoID + txStatusID + fundsID + dbInitializedID +) + +var ( + dbInitialized = ids.Empty.Prefix(dbInitializedID) +) + +// prefixedState wraps a state object. By prefixing the state, there will be no +// collisions between different types of objects that have the same hash. +type prefixedState struct { + state *state + + tx, utxo, txStatus, funds cache.Cacher + uniqueTx cache.Deduplicator + + generatedStatus ids.ID +} + +// UniqueTx de-duplicates the transaction. +func (s *prefixedState) UniqueTx(tx *UniqueTx) *UniqueTx { + return s.uniqueTx.Deduplicate(tx).(*UniqueTx) +} + +// Tx loads the transaction whose ID is [id] from storage. +func (s *prefixedState) Tx(id ids.ID) (*Tx, error) { return s.state.Tx(s.uniqueID(id, txID, s.tx)) } + +// SetTx saves transaction [tx], whose ID is [id], to storage. +func (s *prefixedState) SetTx(id ids.ID, tx *Tx) error { + return s.state.SetTx(s.uniqueID(id, txID, s.tx), tx) +} + +// UTXO loads a UTXO from storage. +func (s *prefixedState) UTXO(id ids.ID) (*UTXO, error) { + return s.state.UTXO(s.uniqueID(id, utxoID, s.utxo)) +} + +// SetUTXO saves the provided utxo to storage. +func (s *prefixedState) SetUTXO(id ids.ID, utxo *UTXO) error { + return s.state.SetUTXO(s.uniqueID(id, utxoID, s.utxo), utxo) +} + +// Status returns the status of the transaction whose ID is [id] from storage. +func (s *prefixedState) Status(id ids.ID) (choices.Status, error) { + return s.state.Status(s.uniqueID(id, txStatusID, s.txStatus)) +} + +// SetStatus saves the status of [id] as [status] +func (s *prefixedState) SetStatus(id ids.ID, status choices.Status) error { + return s.state.SetStatus(s.uniqueID(id, txStatusID, s.txStatus), status) +} + +// DBInitialized returns the status of this database. If the database is +// uninitialized, the status will be unknown. +func (s *prefixedState) DBInitialized() (choices.Status, error) { return s.state.Status(dbInitialized) } + +// SetDBInitialized saves the provided status of the database. +func (s *prefixedState) SetDBInitialized(status choices.Status) error { + return s.state.SetStatus(dbInitialized, status) +} + +// Funds returns the IDs of unspent UTXOs that reference address [addr] +func (s *prefixedState) Funds(addr ids.ID) ([]ids.ID, error) { + return s.state.IDs(s.uniqueID(addr, fundsID, s.funds)) +} + +// SetFunds saves the mapping from address [addr] to the IDs of unspent UTXOs +// that reference [addr] +func (s *prefixedState) SetFunds(addr ids.ID, idSlice []ids.ID) error { + return s.state.SetIDs(s.uniqueID(addr, fundsID, s.funds), idSlice) +} + +// Make [id] unique by prefixing [prefix] to it +func (s *prefixedState) uniqueID(id ids.ID, prefix uint64, cacher cache.Cacher) ids.ID { + if cachedIDIntf, found := cacher.Get(id); found { + return cachedIDIntf.(ids.ID) + } + uID := id.Prefix(prefix) + cacher.Put(id, uID) + return uID +} + +// SpendUTXO consumes the utxo whose ID is [utxoID] +func (s *prefixedState) SpendUTXO(utxoID ids.ID) error { + utxo, err := s.UTXO(utxoID) + if err != nil { + return err + } + if err := s.SetUTXO(utxoID, nil); err != nil { + return err + } + + // Update funds + // TODO: Clean this up. More into the output object? + switch out := utxo.Out().(type) { + case *OutputPayment: + return s.removeUTXO(out.Addresses(), utxoID) + case *OutputTakeOrLeave: + errs := wrappers.Errs{} + errs.Add(s.removeUTXO(out.Addresses1(), utxoID)) + errs.Add(s.removeUTXO(out.Addresses2(), utxoID)) + return errs.Err + default: + return errOutputType + } +} + +// For each address in [addrs], persist that the UTXO whose ID is [utxoID] +// has been spent and can no longer be spent by the address +func (s *prefixedState) removeUTXO(addrs []ids.ShortID, utxoID ids.ID) error { + for _, addr := range addrs { + addrID := addr.LongID() + utxos := ids.Set{} // IDs of unspent UTXOs referencing [addr] + if funds, err := s.Funds(addrID); err == nil { + utxos.Add(funds...) + } + utxos.Remove(utxoID) // Remove [utxoID] from this set + if err := s.SetFunds(addrID, utxos.List()); err != nil { // Persist + return err + } + } + return nil +} + +// FundUTXO persists [utxo]. +// For each address referenced in [utxo]'s output, persists +// that the address is referenced by [utxo] +func (s *prefixedState) FundUTXO(utxo *UTXO) error { + utxoID := utxo.ID() + s.SetUTXO(utxoID, utxo) // Save [utxo] + + switch out := utxo.Out().(type) { + case *OutputPayment: + return s.addUTXO(out.Addresses(), utxoID) + case *OutputTakeOrLeave: + errs := wrappers.Errs{} + errs.Add(s.addUTXO(out.Addresses1(), utxoID)) + errs.Add(s.addUTXO(out.Addresses2(), utxoID)) + return errs.Err + default: + return errOutputType + } +} + +// Persist that each address in [addrs] is referenced in the UTXO whose ID is [utxoID] +func (s *prefixedState) addUTXO(addrs []ids.ShortID, utxoID ids.ID) error { + for _, addr := range addrs { + addrID := addr.LongID() + utxos := ids.Set{} + // Get the set of UTXO IDs such that [addr] is referenced in + // the UTXO + if funds, err := s.Funds(addrID); err == nil { + utxos.Add(funds...) + } + // Add [utxoID] to that set + utxos.Add(utxoID) + // Persist the new set + if err := s.SetFunds(addrID, utxos.List()); err != nil { + return err + } + } + return nil +} diff --git a/vms/spdagvm/service.go b/vms/spdagvm/service.go new file mode 100644 index 0000000..1d474ba --- /dev/null +++ b/vms/spdagvm/service.go @@ -0,0 +1,109 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "errors" + "net/http" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/formatting" +) + +var ( + errNilID = errors.New("nil ID is not valid") +) + +// Service defines the API services exposed by the ava vm +type Service struct{ vm *VM } + +// IssueTxArgs are arguments for passing into IssueTx requests +type IssueTxArgs struct { + Tx formatting.CB58 `json:"tx"` +} + +// IssueTxReply defines the IssueTx replies returned from the API +type IssueTxReply struct { + TxID ids.ID `json:"txID"` +} + +// IssueTx attempts to issue a transaction into consensus +func (service *Service) IssueTx(r *http.Request, args *IssueTxArgs, reply *IssueTxReply) error { + service.vm.ctx.Log.Verbo("IssueTx called with %s", args.Tx) + + txID, err := service.vm.IssueTx(args.Tx.Bytes, nil) + if err != nil { + service.vm.ctx.Log.Debug("IssueTx failed to issue due to %s", err) + return err + } + + reply.TxID = txID + return nil +} + +// GetTxStatusArgs are arguments for GetTxStatus +type GetTxStatusArgs struct { + TxID ids.ID `json:"txID"` +} + +// GetTxStatusReply is the reply from GetTxStatus +type GetTxStatusReply struct { + // Status of the returned transaction + Status choices.Status `json:"status"` +} + +// GetTxStatus returns the status of the transaction whose ID is [args.TxID] +func (service *Service) GetTxStatus(r *http.Request, args *GetTxStatusArgs, reply *GetTxStatusReply) error { + service.vm.ctx.Log.Verbo("GetTxStatus called with %s", args.TxID) + + if args.TxID.IsZero() { + return errNilID + } + + tx := UniqueTx{ + vm: service.vm, + txID: args.TxID, + } + + reply.Status = tx.Status() + return nil +} + +// GetUTXOsArgs are arguments for GetUTXOs +type GetUTXOsArgs struct { + Addresses []ids.ShortID `json:"addresses"` +} + +// GetUTXOsReply is the reply from GetUTXOs +type GetUTXOsReply struct { + // Each element is the string repr. of an unspent UTXO that + // references an address in the arguments + UTXOs []formatting.CB58 `json:"utxos"` +} + +// GetUTXOs returns the UTXOs such that at least one address in [args.Addresses] +// is referenced in the UTXO. +func (service *Service) GetUTXOs(r *http.Request, args *GetUTXOsArgs, reply *GetUTXOsReply) error { + service.vm.ctx.Log.Verbo("GetUTXOs called with %s", args.Addresses) + + addrSet := ids.ShortSet{} + for _, addr := range args.Addresses { + if addr.IsZero() { + return errNilID + } + } + addrSet.Add(args.Addresses...) + + utxos, err := service.vm.GetUTXOs(addrSet) + if err != nil { + return err + } + + reply.UTXOs = []formatting.CB58{} + for _, utxo := range utxos { + reply.UTXOs = append(reply.UTXOs, formatting.CB58{Bytes: utxo.Bytes()}) + } + return nil +} diff --git a/vms/spdagvm/signature.go b/vms/spdagvm/signature.go new file mode 100644 index 0000000..6e4104c --- /dev/null +++ b/vms/spdagvm/signature.go @@ -0,0 +1,33 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "sort" + + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/crypto" +) + +// Sig is a signature on a transaction +type Sig struct { + index uint32 + sig []byte + parsedPubKey []byte +} + +// InputSigner stores the keys used to sign an input +type InputSigner struct { + Keys []*crypto.PrivateKeySECP256K1R +} + +type sortTxSig []*Sig + +func (tsp sortTxSig) Less(i, j int) bool { return tsp[i].index < tsp[j].index } +func (tsp sortTxSig) Len() int { return len(tsp) } +func (tsp sortTxSig) Swap(i, j int) { tsp[j], tsp[i] = tsp[i], tsp[j] } + +// SortTxSig sorts the tx signature list by index +func SortTxSig(sigs []*Sig) { sort.Sort(sortTxSig(sigs)) } +func isSortedAndUniqueTxSig(sig []*Sig) bool { return utils.IsSortedAndUnique(sortTxSig(sig)) } diff --git a/vms/spdagvm/state.go b/vms/spdagvm/state.go new file mode 100644 index 0000000..74c12c9 --- /dev/null +++ b/vms/spdagvm/state.go @@ -0,0 +1,207 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "errors" + + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/wrappers" +) + +var ( + errCacheTypeMismatch = errors.New("type returned from cache doesn't match the expected type") +) + +// state is a thin wrapper around a database to provide, caching, serialization, +// and de-serialization. +type state struct { + c cache.Cacher + vm *VM +} + +// Tx attempts to load a transaction from storage. +func (s *state) Tx(id ids.ID) (*Tx, error) { + if txIntf, found := s.c.Get(id); found { + if tx, ok := txIntf.(*Tx); ok { + return tx, nil + } + return nil, errCacheTypeMismatch + } + + bytes, err := s.vm.db.Get(id.Bytes()) + if err != nil { + return nil, err + } + + // The key was in the database + c := Codec{} + tx, err := c.UnmarshalTx(bytes) + if err != nil { + return nil, err + } + + s.c.Put(id, tx) + return tx, nil +} + +// SetTx saves the provided transaction to storage. +func (s *state) SetTx(id ids.ID, tx *Tx) error { + if tx == nil { + s.c.Evict(id) + return s.vm.db.Delete(id.Bytes()) + } + s.c.Put(id, tx) + return s.vm.db.Put(id.Bytes(), tx.bytes) +} + +// UTXO attempts to load a utxo from storage. +func (s *state) UTXO(id ids.ID) (*UTXO, error) { + if utxoIntf, found := s.c.Get(id); found { + if utxo, ok := utxoIntf.(*UTXO); ok { + return utxo, nil + } + return nil, errCacheTypeMismatch + } + + bytes, err := s.vm.db.Get(id.Bytes()) + if err != nil { + return nil, err + } + + // The key was in the database + c := Codec{} + utxo, err := c.UnmarshalUTXO(bytes) + if err != nil { + return nil, err + } + + s.c.Put(id, utxo) + return utxo, nil +} + +// SetUTXO saves the provided utxo to storage. +func (s *state) SetUTXO(id ids.ID, utxo *UTXO) error { + if utxo == nil { + s.c.Evict(id) + return s.vm.db.Delete(id.Bytes()) + } + s.c.Put(id, utxo) + return s.vm.db.Put(id.Bytes(), utxo.Bytes()) +} + +// Status returns a status from storage. +func (s *state) Status(id ids.ID) (choices.Status, error) { + if statusIntf, found := s.c.Get(id); found { + if status, ok := statusIntf.(choices.Status); ok { + return status, nil + } + return choices.Unknown, errCacheTypeMismatch + } + + bytes, err := s.vm.db.Get(id.Bytes()) + if err != nil { + return choices.Unknown, err + } + + // The key was in the database + p := wrappers.Packer{Bytes: bytes} + status := choices.Status(p.UnpackInt()) + + if p.Offset != len(bytes) { + p.Add(errExtraSpace) + } + if p.Errored() { + return choices.Unknown, p.Err + } + + s.c.Put(id, status) + return status, nil +} + +// SetStatus saves a status in storage. +func (s *state) SetStatus(id ids.ID, status choices.Status) error { + if status == choices.Unknown { + s.c.Evict(id) + return s.vm.db.Delete(id.Bytes()) + } + + s.c.Put(id, status) + + p := wrappers.Packer{Bytes: make([]byte, 4)} + + p.PackInt(uint32(status)) + + if p.Offset != len(p.Bytes) { + p.Add(errExtraSpace) + } + + if p.Errored() { + return p.Err + } + return s.vm.db.Put(id.Bytes(), p.Bytes) +} + +// IDs returns a slice of IDs from storage +func (s *state) IDs(id ids.ID) ([]ids.ID, error) { + if idsIntf, found := s.c.Get(id); found { + if idSlice, ok := idsIntf.([]ids.ID); ok { + return idSlice, nil + } + return nil, errCacheTypeMismatch + } + + bytes, err := s.vm.db.Get(id.Bytes()) + if err != nil { + return nil, err + } + + p := wrappers.Packer{Bytes: bytes} + + idSlice := []ids.ID{} + for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- { + id, _ := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) + idSlice = append(idSlice, id) + } + + if p.Offset != len(bytes) { + p.Add(errExtraSpace) + } + if p.Errored() { + return nil, p.Err + } + + s.c.Put(id, idSlice) + return idSlice, nil +} + +// SetIDs saves a slice of IDs to the database. +func (s *state) SetIDs(id ids.ID, idSlice []ids.ID) error { + if len(idSlice) == 0 { + s.c.Evict(id) + return s.vm.db.Delete(id.Bytes()) + } + + s.c.Put(id, idSlice) + + size := wrappers.IntLen + hashing.HashLen*len(idSlice) + p := wrappers.Packer{Bytes: make([]byte, size)} + + p.PackInt(uint32(len(idSlice))) + for _, id := range idSlice { + p.PackFixedBytes(id.Bytes()) + } + + if p.Offset != len(p.Bytes) { + p.Add(errExtraSpace) + } + + if p.Errored() { + return p.Err + } + return s.vm.db.Put(id.Bytes(), p.Bytes) +} diff --git a/vms/spdagvm/static_service.go b/vms/spdagvm/static_service.go new file mode 100644 index 0000000..1ef5e08 --- /dev/null +++ b/vms/spdagvm/static_service.go @@ -0,0 +1,77 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "net/http" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/json" +) + +// StaticService defines the static API exposed by the AVA VM +type StaticService struct{} + +// APIOutput ... +type APIOutput struct { + Amount json.Uint64 `json:"amount"` + Locktime json.Uint64 `json:"locktime"` + Threshold json.Uint32 `json:"threshold"` + Addresses []ids.ShortID `json:"addresses"` + Locktime2 json.Uint64 `json:"locktime2"` + Threshold2 json.Uint32 `json:"threshold2"` + Addresses2 []ids.ShortID `json:"addresses2"` +} + +// BuildGenesisArgs are arguments for BuildGenesis +type BuildGenesisArgs struct { + Outputs []APIOutput `json:"outputs"` +} + +// BuildGenesisReply is the reply from BuildGenesis +type BuildGenesisReply struct { + Bytes formatting.CB58 `json:"bytes"` +} + +// BuildGenesis returns the UTXOs such that at least one address in [args.Addresses] is +// referenced in the UTXO. +func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { + builder := Builder{ + NetworkID: 0, + ChainID: ids.Empty, + } + outs := []Output{} + for _, output := range args.Outputs { + if output.Locktime2 == 0 && output.Threshold2 == 0 && len(output.Addresses2) == 0 { + outs = append(outs, builder.NewOutputPayment( + uint64(output.Amount), + uint64(output.Locktime), + uint32(output.Threshold), + output.Addresses, + )) + } else { + outs = append(outs, builder.NewOutputTakeOrLeave( + uint64(output.Amount), + uint64(output.Locktime), + uint32(output.Threshold), + output.Addresses, + uint64(output.Locktime2), + uint32(output.Threshold2), + output.Addresses2, + )) + } + } + tx, err := builder.NewTx( + /*ins=*/ nil, + /*outs=*/ outs, + /*signers=*/ nil, + ) + if err := tx.verifyOuts(); err != nil { + return err + } + + reply.Bytes.Bytes = tx.Bytes() + return err +} diff --git a/vms/spdagvm/static_service_test.go b/vms/spdagvm/static_service_test.go new file mode 100644 index 0000000..830cf64 --- /dev/null +++ b/vms/spdagvm/static_service_test.go @@ -0,0 +1,74 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestBuildGenesis(t *testing.T) { + expected := "111GZiNYug8np6hdorSEF5daDtep3Zc1BxWNc9UoxNkXhKK9xcvTbAbe3DX5bbAZ34BS4cHcKsQZ8SmDfi1CEYRaQVHf3ishkzbEsde67GM3KVfhwKMmyz33Ax8e1iwGcWftnsNPgRSGNkvAX9mdDgRszhXJG9Vp6RPRgW14hcufkQjq8ZGV1CajkgHLMvscex7yDsVRikwM2swra3Hrdmp32Ut8jR" + + addr, _ := ids.ShortFromString("8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") + + outputPayment := APIOutput{ + Amount: 1000000000, + Locktime: 0, + Threshold: 1, + Addresses: []ids.ShortID{ + addr, + }, + } + outputTakeOrLeave := APIOutput{ + Amount: 1000000000, + Locktime: 0, + Threshold: 1, + Addresses: []ids.ShortID{ + addr, + }, + Locktime2: 32503679940, + Threshold2: 0, + Addresses2: []ids.ShortID{}, + } + + args := BuildGenesisArgs{ + Outputs: []APIOutput{ + outputPayment, + outputTakeOrLeave, + }, + } + reply := BuildGenesisReply{} + + ss := StaticService{} + if err := ss.BuildGenesis(nil, &args, &reply); err != nil { + t.Fatal(err) + } + + if reply.Bytes.String() != expected { + t.Fatalf("StaticService.BuildGenesis:\nReturned: %s\nExpected: %s", reply.Bytes, expected) + } +} + +func TestBuildGenesisInvalidOutput(t *testing.T) { + output := APIOutput{ + Amount: 0, + Locktime: 0, + Threshold: 0, + Addresses: []ids.ShortID{}, + } + + args := BuildGenesisArgs{ + Outputs: []APIOutput{ + output, + }, + } + reply := BuildGenesisReply{} + + ss := StaticService{} + if err := ss.BuildGenesis(nil, &args, &reply); err == nil { + t.Fatalf("Should have failed with an invalid output") + } +} diff --git a/vms/spdagvm/tx.go b/vms/spdagvm/tx.go new file mode 100644 index 0000000..77a98e9 --- /dev/null +++ b/vms/spdagvm/tx.go @@ -0,0 +1,273 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "errors" + "fmt" + "strings" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/math" +) + +var ( + errNilTx = errors.New("nil tx") + errNilInput = errors.New("nil input") + errNilOutput = errors.New("nil output") + errNilSig = errors.New("nil signature") + errWrongNetworkID = errors.New("transaction has wrong network ID") + errWrongChainID = errors.New("transaction has wrong chain ID") + + errOutputsNotSorted = errors.New("outputs not sorted") + errInputsNotSortedUnique = errors.New("inputs not sorted and unique") + errSigsNotSortedUnique = errors.New("sigs not sorted and unique") + errAddrsNotSortedUnique = errors.New("addresses not sorted and unique") + errTimesNotSortedUnique = errors.New("times not sorted and unique") + + errUnknownInputType = errors.New("unknown input type") + errUnknownOutputType = errors.New("unknown output type") + + errInputHasNoValue = errors.New("input has no value") + errOutputHasNoValue = errors.New("output has no value") + errOutputUnspendable = errors.New("output is unspendable") + errOutputUnoptimized = errors.New("output could be optimized") + + errInputOverflow = errors.New("inputs overflowed uint64") + errOutputOverflow = errors.New("outputs (plus transaction fee) overflowed uint64") + + errInvalidAmount = errors.New("amount mismatch") + errInsufficientFunds = errors.New("insufficient funds (includes transaction fee)") + errTimelocked = errors.New("time locked") + errTypeMismatch = errors.New("input and output types don't match") + errSpendFailed = errors.New("input does not satisfy output's spend requirements") + + errInvalidSigLen = errors.New("signature is not the correct length") +) + +// Tx is the core operation that can be performed. The tx uses the UTXO model. +// That is, a tx's inputs will consume previous tx's outputs. A tx will be +// valid if the inputs have the authority to consume the outputs they are +// attempting to consume and the inputs consume sufficient state to produce the +// outputs. +type Tx struct { + id ids.ID // ID of this transaction + networkID uint32 // The network this transaction was issued to + chainID ids.ID // ID of the chain this transaction exists on + ins []Input // Inputs to this transaction + outs []Output // Outputs of this transaction + bytes []byte // Byte representation of this transaction +} + +// ID of this transaction +func (t *Tx) ID() ids.ID { return t.id } + +// Ins returns the ins of this tx +func (t *Tx) Ins() []Input { return t.ins } + +// Outs returns the outs of this tx +func (t *Tx) Outs() []Output { return t.outs } + +// UTXOs returns the UTXOs that this transaction will produce if accepted. +func (t *Tx) UTXOs() []*UTXO { + txID := t.ID() + utxos := []*UTXO(nil) + c := Codec{} + for i, out := range t.outs { + utxo := &UTXO{ + sourceID: txID, + sourceIndex: uint32(i), + id: txID.Prefix(uint64(i)), + out: out, + } + b, _ := c.MarshalUTXO(utxo) + utxo.bytes = b + utxos = append(utxos, utxo) + } + return utxos +} + +// Bytes of this transaction +func (t *Tx) Bytes() []byte { return t.bytes } + +// Verify that this transaction is well formed +func (t *Tx) Verify(ctx *snow.Context, txFee uint64) error { + switch { + case t == nil: + return errNilTx + case ctx.NetworkID != t.networkID: + return errWrongNetworkID + case !ctx.ChainID.Equals(t.chainID): + return errWrongChainID + } + + if err := t.verifyIns(); err != nil { + return err + } + if err := t.verifyOuts(); err != nil { + return err + } + if err := t.verifyFunds(txFee); err != nil { + return err + } + if err := t.verifySigs(); err != nil { + return err + } + return nil +} + +// verify that inputs are well-formed +func (t *Tx) verifyIns() error { + for _, in := range t.ins { + if in == nil { + return errNilInput + } + if err := in.Verify(); err != nil { + return err + } + } + if !isSortedAndUniqueIns(t.ins) { + return errInputsNotSortedUnique + } + return nil +} + +// verify outputs are well-formed +func (t *Tx) verifyOuts() error { + for _, out := range t.outs { + if out == nil { + return errNilOutput + } + if err := out.Verify(); err != nil { + return err + } + } + if !isSortedOuts(t.outs) { + return errOutputsNotSorted + } + return nil +} + +// Ensure that the sum of the input amounts +// is at least: +// [the sum of the output amounts] + [txFee] +func (t *Tx) verifyFunds(txFee uint64) error { + inFunds := uint64(0) + for _, in := range t.ins { + err := error(nil) + + switch i := in.(type) { + case *InputPayment: + inFunds, err = math.Add64(inFunds, i.amount) + default: + return errUnknownInputType + } + + if err != nil { + return errInputOverflow + } + } + outFunds := uint64(0) + for _, out := range t.outs { + err := error(nil) + + switch o := out.(type) { + case *OutputPayment: + outFunds, err = math.Add64(outFunds, o.amount) + case *OutputTakeOrLeave: + outFunds, err = math.Add64(outFunds, o.amount) + default: + return errUnknownOutputType + } + + if err != nil { + return errOutputOverflow + } + } + outFundsPlusFee, err := math.Add64(outFunds, txFee) + if err != nil { + return errOutputOverflow + } + if outFundsPlusFee > inFunds { + return errInsufficientFunds + } + return nil +} + +// verify the signatures +func (t *Tx) verifySigs() error { + if !crypto.EnableCrypto { + return nil + } + + c := Codec{} + txBytes, err := c.MarshalUnsignedTx(t) + if err != nil { + return err + } + txHash := hashing.ComputeHash256(txBytes) + + factory := crypto.FactorySECP256K1R{} + for _, in := range t.ins { + switch i := in.(type) { + case *InputPayment: + for _, sig := range i.sigs { + key, err := factory.RecoverHashPublicKey(txHash, sig.sig) + if err != nil { + return err + } + sig.parsedPubKey = key.Bytes() + } + } + } + return nil +} + +// PrefixedString converts this tx to a string representation with a prefix for +// each newline +func (t *Tx) PrefixedString(prefix string) string { + s := strings.Builder{} + + nestedPrefix := fmt.Sprintf("%s ", prefix) + + ins := t.Ins() + + s.WriteString(fmt.Sprintf("Tx(\n"+ + "%s ID = %s\n"+ + "%s NumIns = %d\n", + prefix, t.ID(), + prefix, len(ins))) + + inFormat := fmt.Sprintf("%%s In[%s]: %%s\n", + formatting.IntFormat(len(ins)-1)) + for i, in := range ins { + s.WriteString(fmt.Sprintf(inFormat, + prefix, i, + in.PrefixedString(nestedPrefix), + )) + } + + outs := t.Outs() + + s.WriteString(fmt.Sprintf("%s NumOuts = %d\n", + prefix, len(outs))) + + outFormat := fmt.Sprintf("%%s Out[%s]: %%s\n", + formatting.IntFormat(len(outs)-1)) + for i, out := range outs { + s.WriteString(fmt.Sprintf(outFormat, + prefix, i, + out.PrefixedString(nestedPrefix), + )) + } + s.WriteString(fmt.Sprintf("%s)", prefix)) + + return s.String() +} + +func (t *Tx) String() string { return t.PrefixedString("") } diff --git a/vms/spdagvm/tx_test.go b/vms/spdagvm/tx_test.go new file mode 100644 index 0000000..f286b42 --- /dev/null +++ b/vms/spdagvm/tx_test.go @@ -0,0 +1,357 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/utils/wrappers" +) + +// Ensure transaction verification fails when a transaction has +// the wrong chain ID +func TestTxVerifyBadChainID(t *testing.T) { + genesisTx := GenesisTx(defaultInitBalances) + + ctx := snow.DefaultContextTest() + ctx.NetworkID = 15 + ctx.ChainID = avaChainID + + builder := Builder{ + NetworkID: ctx.NetworkID, + ChainID: ctx.ChainID, + } + tx, err := builder.NewTx( //valid transaction + /*ins=*/ []Input{ + builder.NewInputPayment( + /*txID=*/ genesisTx.ID(), + /*txIndex=*/ 0, + /*amount=*/ 5*units.Ava, + /*sigs=*/ []*Sig{builder.NewSig(0 /*=index*/)}, + ), + }, + /*outs=*/ []Output{ + builder.NewOutputPayment( + /*amount=*/ 3*units.Ava, + /*locktime=*/ 0, + /*threshold=*/ 0, + /*addresses=*/ nil, + ), + }, + /*signers=*/ []*InputSigner{ + &InputSigner{Keys: []*crypto.PrivateKeySECP256K1R{ + keys[1], // reference to vm_test.go + }}, + }, + ) + if err != nil { + t.Fatal(err) + } + + // Should pass verification when chain ID is correct + if err := tx.Verify(ctx, txFeeTest); err != nil { + t.Fatal("Should have passed verification") + } + + ctx.ChainID = ctx.ChainID.Prefix() + + // Should pass verification when chain ID is wrong + if err := tx.Verify(ctx, txFeeTest); err != errWrongChainID { + t.Fatal("Should have failed with errWrongChainID") + } +} + +func TestUnsignedTx(t *testing.T) { + skBytes := []byte{ + 0x98, 0xcb, 0x07, 0x7f, 0x97, 0x2f, 0xeb, 0x04, + 0x81, 0xf1, 0xd8, 0x94, 0xf2, 0x72, 0xc6, 0xa1, + 0xe3, 0xc1, 0x5e, 0x27, 0x2a, 0x16, 0x58, 0xff, + 0x71, 0x64, 0x44, 0xf4, 0x65, 0x20, 0x00, 0x70, + } + outputPaymentBytes := []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + outputTakeOrLeaveBytes := []byte{ + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0xd5, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + inputPaymentBytes := []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x5b, 0xcd, 0x15, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, + } + chainID := ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }) + + f := crypto.FactorySECP256K1R{} + sk, err := f.ToPrivateKey(skBytes) + if err != nil { + t.Fatal(err) + } + + c := Codec{} + p := wrappers.Packer{Bytes: outputPaymentBytes} + outputPayment := c.unmarshalOutput(&p) + if p.Errored() { + t.Fatal(p.Err) + } + + p = wrappers.Packer{Bytes: outputTakeOrLeaveBytes} + outputTakeOrLeave := c.unmarshalOutput(&p) + if p.Errored() { + t.Fatal(p.Err) + } + + p = wrappers.Packer{Bytes: inputPaymentBytes} + inputPayment := c.unmarshalInput(&p) + if p.Errored() { + t.Fatal(p.Err) + } + + inputPaymentSigner := &InputSigner{ + Keys: []*crypto.PrivateKeySECP256K1R{ + sk.(*crypto.PrivateKeySECP256K1R), + }, + } + + b := Builder{ + NetworkID: 0, + ChainID: chainID, + } + tx, err := b.NewTx( + /*inputs=*/ []Input{inputPayment}, + /*outputs=*/ []Output{outputPayment, outputTakeOrLeave}, + /*signers=*/ []*InputSigner{inputPaymentSigner}, + ) + if err != nil { + t.Fatal(err) + } + + unsignedTxBytes, err := c.MarshalUnsignedTx(tx) + if err != nil { + t.Fatal(err) + } + + expected := []byte{ + // codec: + 0x00, 0x00, 0x00, 0x02, + // networkID: + 0x00, 0x00, 0x00, 0x00, + // chainID: + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + // number of outputs: + 0x00, 0x00, 0x00, 0x02, + // output payment: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + // output take-or-leave: + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0xd5, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + // number of inputs: + 0x00, 0x00, 0x00, 0x01, + // input payment: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x5b, 0xcd, 0x15, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, + } + if !bytes.Equal(unsignedTxBytes, expected) { + t.Fatalf("Codec.MarshalUnsignedTx returned:\n0x%x\nExpected:\n0x%x", unsignedTxBytes, expected) + } +} + +func TestSignedTx(t *testing.T) { + skBytes := []byte{ + 0x98, 0xcb, 0x07, 0x7f, 0x97, 0x2f, 0xeb, 0x04, + 0x81, 0xf1, 0xd8, 0x94, 0xf2, 0x72, 0xc6, 0xa1, + 0xe3, 0xc1, 0x5e, 0x27, 0x2a, 0x16, 0x58, 0xff, + 0x71, 0x64, 0x44, 0xf4, 0x65, 0x20, 0x00, 0x70, + } + outputPaymentBytes := []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + outputTakeOrLeaveBytes := []byte{ + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0xd5, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + inputPaymentBytes := []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x5b, 0xcd, 0x15, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, + } + chainID := ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }) + + f := crypto.FactorySECP256K1R{} + sk, err := f.ToPrivateKey(skBytes) + if err != nil { + t.Fatal(err) + } + + c := Codec{} + p := wrappers.Packer{Bytes: outputPaymentBytes} + outputPayment := c.unmarshalOutput(&p) + if p.Errored() { + t.Fatal(p.Err) + } + + p = wrappers.Packer{Bytes: outputTakeOrLeaveBytes} + outputTakeOrLeave := c.unmarshalOutput(&p) + if p.Errored() { + t.Fatal(p.Err) + } + + p = wrappers.Packer{Bytes: inputPaymentBytes} + inputPayment := c.unmarshalInput(&p) + if p.Errored() { + t.Fatal(p.Err) + } + + inputPaymentSigner := &InputSigner{ + Keys: []*crypto.PrivateKeySECP256K1R{ + sk.(*crypto.PrivateKeySECP256K1R), + }, + } + + b := Builder{ + NetworkID: 0, + ChainID: chainID, + } + tx, err := b.NewTx( + /*inputs=*/ []Input{inputPayment}, + /*outputs=*/ []Output{outputPayment, outputTakeOrLeave}, + /*signers=*/ []*InputSigner{inputPaymentSigner}, + ) + if err != nil { + t.Fatal(err) + } + signedTxBytes := tx.Bytes() + + expected := []byte{ + // unsigned transaction: + 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, 0xc3, 0x34, 0x41, 0x28, + 0xe0, 0x60, 0x12, 0x8e, 0xde, 0x35, 0x23, 0xa2, + 0x4a, 0x46, 0x1c, 0x89, 0x43, 0xab, 0x08, 0x59, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0xd5, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x5b, 0xcd, 0x15, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, + // signature: + 0x41, 0x3a, 0x8e, 0x30, 0x72, 0x1b, 0xd3, 0xdd, + 0x0f, 0x49, 0x3d, 0x0d, 0xed, 0x82, 0x8b, 0x90, + 0x8c, 0xfb, 0x5d, 0xd5, 0x4b, 0x63, 0x76, 0x42, + 0x99, 0x66, 0xda, 0x10, 0x14, 0x81, 0x89, 0x2d, + 0x22, 0x4b, 0x6c, 0x95, 0x6b, 0x93, 0x05, 0x13, + 0x83, 0x5d, 0xea, 0xa4, 0x44, 0x8f, 0x46, 0xb1, + 0x23, 0x45, 0x47, 0x05, 0xe9, 0xa5, 0x3b, 0xfc, + 0x27, 0x09, 0x21, 0x1a, 0x5c, 0x5a, 0x58, 0xec, + 0x01, + } + + if !bytes.Equal(signedTxBytes, expected) { + t.Fatalf("Codec.MarshalTx returned:\n0x%x\nExpected:\n0x%x", signedTxBytes, expected) + } +} diff --git a/vms/spdagvm/unique_tx.go b/vms/spdagvm/unique_tx.go new file mode 100644 index 0000000..f775377 --- /dev/null +++ b/vms/spdagvm/unique_tx.go @@ -0,0 +1,273 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" +) + +var ( + errInvalidUTXO = errors.New("utxo doesn't exist") + errMissingUTXO = errors.New("missing utxo") + errUnknownTx = errors.New("transaction is unknown") + errRejectedTx = errors.New("transaction is rejected") +) + +// UniqueTx provides a de-duplication service for txs. This only provides a +// performance boost +type UniqueTx struct { + vm *VM + txID ids.ID + t *txState +} + +func (tx *UniqueTx) refresh() { + if tx.t == nil { + tx.t = &txState{} + } + if !tx.t.unique { + unique := tx.vm.state.UniqueTx(tx) + prevTx := tx.t.tx + if unique == tx { + // If no one was in the cache, make sure that there wasn't an + // intermediate object whose state I must reflect + if status, err := tx.vm.state.Status(tx.ID()); err == nil { + tx.t.status = status + tx.t.unique = true + } + } else { + // If someone is in the cache, they must be up to date + *tx = *unique + } + switch { + case tx.t.tx == nil && prevTx == nil: + if innerTx, err := tx.vm.state.Tx(tx.ID()); err == nil { + tx.t.tx = innerTx + } + case tx.t.tx == nil: + tx.t.tx = prevTx + } + } +} + +// Evict is called when this UniqueTx will no longer be returned from a cache +// lookup +func (tx *UniqueTx) Evict() { tx.t.unique = false } // Lock is already held here + +func (tx *UniqueTx) setStatus(status choices.Status) error { + tx.refresh() + if tx.t.status != status { + tx.t.status = status + return tx.vm.state.SetStatus(tx.ID(), status) + } + return nil +} + +func (tx *UniqueTx) addEvents(finalized func(choices.Status)) { + tx.refresh() + + if finalized != nil { + tx.t.finalized = append(tx.t.finalized, finalized) + } +} + +// ID returns the wrapped txID +func (tx *UniqueTx) ID() ids.ID { return tx.txID } + +// Accept is called when the transaction was finalized as accepted by consensus +func (tx *UniqueTx) Accept() { + if err := tx.setStatus(choices.Accepted); err != nil { + tx.vm.ctx.Log.Error("Failed to accept tx %s due to %s", tx.txID, err) + return + } + + // Remove spent UTXOs + for _, utxoID := range tx.InputIDs().List() { + if err := tx.vm.state.SpendUTXO(utxoID); err != nil { + tx.vm.ctx.Log.Error("Failed to spend utxo %s due to %s", utxoID, err) + return + } + } + + // Add new UTXOs + for _, utxo := range tx.utxos() { + if err := tx.vm.state.FundUTXO(utxo); err != nil { + tx.vm.ctx.Log.Error("Failed to fund utxo %s due to %s", utxoID, err) + return + } + } + + for _, finalized := range tx.t.finalized { + if finalized != nil { + finalized(choices.Accepted) + } + } + + if err := tx.vm.db.Commit(); err != nil { + tx.vm.ctx.Log.Error("Failed to commit accept %s due to %s", tx.txID, err) + } + + tx.t.deps = nil // Needed to prevent a memory leak +} + +// Reject is called when the transaction was finalized as rejected by consensus +func (tx *UniqueTx) Reject() { + if err := tx.setStatus(choices.Rejected); err != nil { + tx.vm.ctx.Log.Error("Failed to reject tx %s due to %s", tx.txID, err) + return + } + + tx.vm.ctx.Log.Debug("Rejecting Tx: %s", tx.ID()) + + for _, finalized := range tx.t.finalized { + if finalized != nil { + finalized(choices.Rejected) + } + } + + if err := tx.vm.db.Commit(); err != nil { + tx.vm.ctx.Log.Error("Failed to commit reject %s due to %s", tx.txID, err) + } + + tx.t.deps = nil // Needed to prevent a memory leak +} + +// Status returns the current status of this transaction +func (tx *UniqueTx) Status() choices.Status { + tx.refresh() + return tx.t.status +} + +// Dependencies returns the set of transactions this transaction builds on +func (tx *UniqueTx) Dependencies() []snowstorm.Tx { + tx.refresh() + if tx.t.tx != nil && len(tx.t.deps) == 0 { + txIDs := ids.Set{} + for _, in := range tx.t.tx.ins { + txID, _ := in.InputSource() + if !txIDs.Contains(txID) { + txIDs.Add(txID) + tx.t.deps = append(tx.t.deps, &UniqueTx{ + vm: tx.vm, + txID: txID, + }) + } + } + } + return tx.t.deps +} + +// InputIDs returns the set of utxoIDs this transaction consumes +func (tx *UniqueTx) InputIDs() ids.Set { + tx.refresh() + if tx.t.tx != nil && tx.t.inputs.Len() == 0 { + for _, in := range tx.t.tx.ins { + tx.t.inputs.Add(in.InputID()) + } + } + return tx.t.inputs +} + +func (tx *UniqueTx) utxos() []*UTXO { + tx.refresh() + if tx.t.tx != nil && len(tx.t.tx.outs) != len(tx.t.outputs) { + tx.t.outputs = tx.t.tx.UTXOs() + } + return tx.t.outputs +} + +// Bytes returns the binary representation of this transaction +func (tx *UniqueTx) Bytes() []byte { + tx.refresh() + return tx.t.tx.Bytes() +} + +// Verify the validity of this transaction +func (tx *UniqueTx) Verify() error { + switch status := tx.Status(); status { + case choices.Unknown: + return errUnknownTx + case choices.Accepted: + return nil + case choices.Rejected: + return errRejectedTx + default: + return tx.VerifyState() + } +} + +// VerifyTx the validity of this transaction +func (tx *UniqueTx) VerifyTx() error { + tx.refresh() + + if tx.t.verifiedTx { + return tx.t.validity + } + + tx.t.verifiedTx = true + tx.t.validity = tx.t.tx.Verify(tx.vm.ctx, tx.vm.TxFee) + return tx.t.validity +} + +// VerifyState the validity of this transaction +func (tx *UniqueTx) VerifyState() error { + tx.VerifyTx() + + if tx.t.validity != nil || tx.t.verifiedState { + return tx.t.validity + } + + tx.t.verifiedState = true + + time := tx.vm.clock.Unix() + for _, in := range tx.t.tx.ins { + // Tx is spending spent/non-existent utxo + // Tx input doesn't unlock output + if utxo, err := tx.vm.state.UTXO(in.InputID()); err == nil { + if err := utxo.Out().Unlock(in, time); err != nil { + tx.t.validity = err + break + } + continue + } + inputTx, inputIndex := in.InputSource() + parent := &UniqueTx{ + vm: tx.vm, + txID: inputTx, + } + + // TODO: Replace with a switch? + if err := parent.Verify(); err != nil { + tx.t.validity = errMissingUTXO + } else if status := parent.Status(); status.Decided() { + tx.t.validity = errMissingUTXO + } else if uint32(len(parent.t.tx.outs)) <= inputIndex { + tx.t.validity = errInvalidUTXO + } else if err := parent.t.tx.outs[int(inputIndex)].Unlock(in, time); err != nil { + tx.t.validity = err + } else { + continue + } + break + } + return tx.t.validity +} + +type txState struct { + unique, verifiedTx, verifiedState bool + validity error + + tx *Tx + inputs ids.Set + outputs []*UTXO + deps []snowstorm.Tx + + status choices.Status + + finalized []func(choices.Status) +} diff --git a/vms/spdagvm/utxo.go b/vms/spdagvm/utxo.go new file mode 100644 index 0000000..ac3171b --- /dev/null +++ b/vms/spdagvm/utxo.go @@ -0,0 +1,59 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" +) + +// UTXO represents an unspent transaction output +type UTXO struct { + // The ID of the transaction that produced this UTXO + sourceID ids.ID + + // This UTXOs index within the transaction [Tx] that created this UTXO + // e.g. if this UTXO is the first entry in [Tx.outs] then sourceIndex is 0 + sourceIndex uint32 + + // The ID of this UTXO + id ids.ID + + // The output this UTXO wraps + out Output + + // The binary representation of this UTXO + bytes []byte +} + +// Source returns the origin of this utxo. Specifically the txID and the +// outputIndex this utxo represents +func (u *UTXO) Source() (ids.ID, uint32) { return u.sourceID, u.sourceIndex } + +// ID returns a unique identifier for this utxo +func (u *UTXO) ID() ids.ID { return u.id } + +// Out returns the output this utxo wraps +func (u *UTXO) Out() Output { return u.out } + +// Bytes returns a binary representation of this utxo +func (u *UTXO) Bytes() []byte { return u.bytes } + +// PrefixedString converts this utxo to a string representation with a prefix +// for each newline +func (u *UTXO) PrefixedString(prefix string) string { + return fmt.Sprintf("UTXO(\n"+ + "%s Source ID = %s\n"+ + "%s Source Index = %d\n"+ + "%s Output = %s\n"+ + "%s)", + prefix, u.sourceID, + prefix, u.sourceIndex, + prefix, u.out.PrefixedString(fmt.Sprintf("%s ", prefix)), + prefix, + ) +} + +func (u *UTXO) String() string { return u.PrefixedString("") } diff --git a/vms/spdagvm/utxo_test.go b/vms/spdagvm/utxo_test.go new file mode 100644 index 0000000..f7ffdea --- /dev/null +++ b/vms/spdagvm/utxo_test.go @@ -0,0 +1,134 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/wrappers" +) + +func TestUTXO(t *testing.T) { + skBytes := []byte{ + 0x98, 0xcb, 0x07, 0x7f, 0x97, 0x2f, 0xeb, 0x04, + 0x81, 0xf1, 0xd8, 0x94, 0xf2, 0x72, 0xc6, 0xa1, + 0xe3, 0xc1, 0x5e, 0x27, 0x2a, 0x16, 0x58, 0xff, + 0x71, 0x64, 0x44, 0xf4, 0x65, 0x20, 0x00, 0x70, + } + outputPaymentBytes := []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + outputTakeOrLeaveBytes := []byte{ + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0xd5, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + inputPaymentBytes := []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x5b, 0xcd, 0x15, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, + } + + f := crypto.FactorySECP256K1R{} + sk, err := f.ToPrivateKey(skBytes) + if err != nil { + t.Fatal(err) + } + + c := Codec{} + p := wrappers.Packer{Bytes: outputPaymentBytes} + outputPayment := c.unmarshalOutput(&p) + if p.Errored() { + t.Fatal(p.Err) + } + + p = wrappers.Packer{Bytes: outputTakeOrLeaveBytes} + outputTakeOrLeave := c.unmarshalOutput(&p) + if p.Errored() { + t.Fatal(p.Err) + } + + p = wrappers.Packer{Bytes: inputPaymentBytes} + inputPayment := c.unmarshalInput(&p) + if p.Errored() { + t.Fatal(p.Err) + } + + inputPaymentSigner := &InputSigner{ + Keys: []*crypto.PrivateKeySECP256K1R{ + sk.(*crypto.PrivateKeySECP256K1R), + }, + } + + b := Builder{ + NetworkID: 0, + ChainID: ids.Empty, + } + tx, err := b.NewTx( + /*inputs=*/ []Input{inputPayment}, + /*outputs=*/ []Output{outputPayment, outputTakeOrLeave}, + /*signers=*/ []*InputSigner{inputPaymentSigner}, + ) + if err != nil { + t.Fatal(err) + } + utxos := tx.UTXOs() + if len(utxos) != 2 { + t.Fatalf("Produced %d UTXOs", len(utxos)) + } + utxo := utxos[1] + utxoBytes, err := c.MarshalUTXO(utxo) + if err != nil { + t.Fatal(err) + } + + expected := []byte{ + // txID: + 0xc7, 0xe6, 0xe2, 0xd3, 0x83, 0xa6, 0xd8, 0xac, + 0x54, 0xdc, 0xc8, 0x4e, 0x32, 0x7c, 0x22, 0xa5, + 0x6a, 0xf1, 0x25, 0x88, 0x33, 0x75, 0x0f, 0x94, + 0x54, 0x1b, 0xdb, 0xa2, 0xc0, 0xac, 0x67, 0x38, + // output index: + 0x00, 0x00, 0x00, 0x01, + // output: + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x01, 0x51, 0x02, 0x5c, 0x61, + 0xfb, 0xcf, 0xc0, 0x78, 0xf6, 0x93, 0x34, 0xf8, + 0x34, 0xbe, 0x6d, 0xd2, 0x6d, 0x55, 0xa9, 0x55, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0xd5, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + } + + if !bytes.Equal(utxoBytes, expected) { + t.Fatalf("Codec.MarshalUTXO returned:\n0x%x\nExpected:\n0x%x", utxoBytes, expected) + } +} diff --git a/vms/spdagvm/vm.go b/vms/spdagvm/vm.go new file mode 100644 index 0000000..c5a78d8 --- /dev/null +++ b/vms/spdagvm/vm.go @@ -0,0 +1,551 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "errors" + "fmt" + "strconv" + "time" + + "github.com/gorilla/rpc/v2" + + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/utils/timer" + + jsoncodec "github.com/ava-labs/gecko/utils/json" +) + +const ( + batchTimeout = time.Second + batchSize = 30 + stateCacheSize = 10000 + idCacheSize = 10000 + txCacheSize = 10000 +) + +var ( + errNoKeys = errors.New("no private keys were provided") + errUnknownUTXOType = errors.New("utxo has unknown output type") + errAsset = errors.New("assetID must be blank") + errAmountOverflow = errors.New("the amount of this transaction plus the transaction fee overflows") + errUnsupportedFXs = errors.New("unsupported feature extensions") +) + +// VM implements the avalanche.DAGVM interface +type VM struct { + // The context of this vm + ctx *snow.Context + + // Used to check local time + clock timer.Clock + + // State management + state *prefixedState + + // Transaction issuing + timer *timer.Timer + + // Transactions will be sent to consensus after at most [batchTimeout] + batchTimeout time.Duration + + // Transactions that have not yet been sent to consensus + txs []snowstorm.Tx + + // Channel through which the vm notifies the consensus engine + // that there are transactions to add to consensus + toEngine chan<- common.Message + + // The transaction fee, which the sender pays. The fee is burned. + TxFee uint64 + + baseDB database.Database + db *versiondb.Database +} + +/* + ****************************************************************************** + ******************************** Avalanche API ******************************* + ****************************************************************************** + */ + +// Initialize implements the avalanche.DAGVM interface +func (vm *VM) Initialize( + ctx *snow.Context, + db database.Database, + genesisBytes []byte, + toEngine chan<- common.Message, + fxs []*common.Fx, +) error { + if len(fxs) != 0 { + return errUnsupportedFXs + } + vm.ctx = ctx + vm.baseDB = db + vm.db = versiondb.New(db) + vm.state = &prefixedState{ + state: &state{ + c: &cache.LRU{Size: stateCacheSize}, + vm: vm, + }, + + tx: &cache.LRU{Size: idCacheSize}, + utxo: &cache.LRU{Size: idCacheSize}, + txStatus: &cache.LRU{Size: idCacheSize}, + funds: &cache.LRU{Size: idCacheSize}, + + uniqueTx: &cache.EvictableLRU{Size: txCacheSize}, + } + + // Initialize the database if it has not already been initialized + if dbStatus, err := vm.state.DBInitialized(); err != nil || dbStatus == choices.Unknown { + if err := vm.initState(genesisBytes); err != nil { + return err + } + } + + vm.timer = timer.NewTimer(func() { + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm.FlushTxs() + }) + go vm.ctx.Log.RecoverAndPanic(vm.timer.Dispatch) + vm.batchTimeout = batchTimeout + vm.toEngine = toEngine + + return vm.db.Commit() +} + +// Shutdown implements the avalanche.DAGVM interface +func (vm *VM) Shutdown() { + vm.timer.Stop() + if err := vm.baseDB.Close(); err != nil { + vm.ctx.Log.Error("Closing the database failed with %s", err) + } +} + +// CreateHandlers makes new service objects with references to the vm +func (vm *VM) CreateHandlers() map[string]*common.HTTPHandler { + newServer := rpc.NewServer() + codec := jsoncodec.NewCodec() + newServer.RegisterCodec(codec, "application/json") + newServer.RegisterCodec(codec, "application/json;charset=UTF-8") + newServer.RegisterService(&Service{vm: vm}, "spdag") // name this service "spdag" + return map[string]*common.HTTPHandler{ + "": &common.HTTPHandler{Handler: newServer}, + } +} + +// CreateStaticHandlers makes new service objects without references to the vm +func (vm *VM) CreateStaticHandlers() map[string]*common.HTTPHandler { + newServer := rpc.NewServer() + codec := jsoncodec.NewCodec() + newServer.RegisterCodec(codec, "application/json") + newServer.RegisterCodec(codec, "application/json;charset=UTF-8") + newServer.RegisterService(&StaticService{}, "spdag") // name this service "spdag" + return map[string]*common.HTTPHandler{ + // NoLock because the static functions probably wont be stateful (i.e. no + // write operations) + "": &common.HTTPHandler{LockOptions: common.NoLock, Handler: newServer}, + } +} + +// PendingTxs returns the transactions that have not yet +// been added to consensus +func (vm *VM) PendingTxs() []snowstorm.Tx { + txs := vm.txs + + vm.txs = nil + vm.timer.Cancel() + + return txs +} + +// ParseTx parses bytes to a *UniqueTx +func (vm *VM) ParseTx(b []byte) (snowstorm.Tx, error) { return vm.parseTx(b, nil) } + +// GetTx returns the transaction whose ID is [txID] +func (vm *VM) GetTx(txID ids.ID) (snowstorm.Tx, error) { + rawTx, err := vm.state.Tx(txID) + if err != nil { + return nil, err + } + + tx := &UniqueTx{ + vm: vm, + txID: rawTx.ID(), + t: &txState{ + tx: rawTx, + }, + } + // Verify must be called in the case the that tx was flushed from the unique + // cache. + if err := tx.VerifyState(); err != nil { + vm.ctx.Log.Debug("GetTx resulted in fetching a tx that failed verification: %s", err) + tx.setStatus(choices.Rejected) + } + + return tx, nil +} + +/* + ****************************************************************************** + ******************************** Wallet API ********************************** + ****************************************************************************** + */ + +// CreateKey returns a new base58-encoded private key +func (vm *VM) CreateKey() (string, error) { + factory := crypto.FactorySECP256K1R{} + pk, err := factory.NewPrivateKey() + if err != nil { + return "", err + } + cb58 := formatting.CB58{Bytes: pk.Bytes()} + return cb58.String(), nil +} + +// GetAddress returns the string repr. of the address +// controlled by a base58-encoded private key +func (vm *VM) GetAddress(privKeyStr string) (string, error) { + cb58 := formatting.CB58{} + if err := cb58.FromString(privKeyStr); err != nil { + return "", err + } + factory := crypto.FactorySECP256K1R{} + pk, err := factory.ToPrivateKey(cb58.Bytes) + if err != nil { + return "", err + } + return pk.PublicKey().Address().String(), nil +} + +// GetBalance returns [address]'s balance of the asset whose +// ID is [assetID] +func (vm *VM) GetBalance(address, assetID string) (uint64, error) { + if assetID != "" { + return 0, errAsset + } + + // Parse the string repr. of the address to an ids.ShortID + addr, err := ids.ShortFromString(address) + if err != nil { + return 0, err + } + + addrSet := ids.ShortSet{addr.Key(): true} // Note this set contains only [addr] + utxos, err := vm.GetUTXOs(addrSet) // The UTXOs that reference [addr] + if err != nil { + return 0, err + } + + // Go through each UTXO that references [addr]. + // If the private key that controls [addr] may spend the UTXO, + // add its amount to [balance] + balance := uint64(0) + currentTime := vm.clock.Unix() + for _, utxo := range utxos { + switch out := utxo.Out().(type) { + case *OutputPayment: + // Because [addrSet] has size 1, we know [addr] is + // referenced in [out] + if currentTime > out.Locktime() && out.Threshold() == 1 { + amount, err := math.Add64(balance, out.Amount()) + if err != nil { + return 0, err + } + balance = amount // This is not a mistake. It should _not_ be +=. The adding is done by math.Add64 a few lines above. + } + case *OutputTakeOrLeave: + addresses := ids.ShortSet{} + addresses.Add(out.Addresses1()...) + if addresses.Contains(addr) && currentTime > out.Locktime1() && out.Threshold1() == 1 { + amount, err := math.Add64(balance, out.Amount()) + if err != nil { + return 0, err + } + balance = amount + } + + addresses.Clear() + addresses.Add(out.Addresses2()...) + if addresses.Contains(addr) && currentTime > out.Locktime2() && out.Threshold2() == 1 { + amount, err := math.Add64(balance, out.Amount()) + if err != nil { + return 0, err + } + balance = amount + } + default: // TODO: Should this error? Or should we just ignore outputs we don't recognize? + return 0, errUnknownOutputType + } + } + return balance, nil +} + +// ListAssets returns the IDs of assets such that [address] has +// a non-zero balance of that asset +func (vm *VM) ListAssets(address string) ([]string, error) { + balance, err := vm.GetBalance(address, "") + if balance > 0 && err == nil { + return []string{""}, nil + } + return []string{}, err +} + +// Send issues a transaction that sends [amount] from the addresses controlled +// by [fromPKs] to [toAddrStr]. Send returns the transaction's ID. Any "change" +// will be sent to the address controlled by the first element of [fromPKs]. +func (vm *VM) Send(amount uint64, assetID, toAddrStr string, fromPKs []string) (string, error) { + // The assetID must be empty + if assetID != "" { + return "", errAsset + } + + // Add all of the keys in [fromPKs] to a keychain + keychain := KeyChain{} + factory := crypto.FactorySECP256K1R{} + cb58 := formatting.CB58{} + for _, fpk := range fromPKs { + // Parse the string repr. of the private key to bytes + if err := cb58.FromString(fpk); err != nil { + return "", err + } + // Parse the byte repr. to a crypto.PrivateKey + pk, err := factory.ToPrivateKey(cb58.Bytes) + if err != nil { + return "", err + } + // Parse the crpyo.PrivateKey repr. to a crypto.PrivateKeySECP256K1R + keychain.Add(pk.(*crypto.PrivateKeySECP256K1R)) + } + + // Parse [toAddrStr] to an ids.ShortID + toAddr, err := ids.ShortFromString(toAddrStr) + if err != nil { + return "", err + } + toAddrs := []ids.ShortID{toAddr} + outAddrStr, err := vm.GetAddress(fromPKs[0]) + if err != nil { + return "", err + } + outAddr, err := ids.ShortFromString(outAddrStr) + if err != nil { + return "", err + } + + // Get the UTXOs controlled by the keys in [fromPKs] + utxos, err := vm.GetUTXOs(keychain.Addresses()) + if err != nil { + return "", err + } + + // Build the transaction + builder := Builder{ + NetworkID: vm.ctx.NetworkID, + ChainID: vm.ctx.ChainID, + } + currentTime := vm.clock.Unix() + tx, err := builder.NewTxFromUTXOs(&keychain, utxos, amount, vm.TxFee, 0, 1, toAddrs, outAddr, currentTime) + if err != nil { + return "", err + } + + // Wrap the *Tx to make it a snowstorm.Tx + wrappedTx, err := vm.wrapTx(tx, nil) + if err != nil { + return "", err + } + + // Issue the transaction + vm.issueTx(wrappedTx) + return tx.ID().String(), nil +} + +// GetTxHistory takes an address and returns an ordered list of known records containing +// key-value pairs of data. +func (vm *VM) GetTxHistory(address string) (string, []string, map[string]string, []map[string]string, error) { + addr, err := ids.ShortFromString(address) + if err != nil { + return "", nil, nil, nil, err + } + addrSet := ids.ShortSet{addr.Key(): true} + utxos, err := vm.GetUTXOs(addrSet) + if err != nil { + return "", nil, nil, nil, err + } + + result := []map[string]string{} + for _, utxo := range utxos { + r := map[string]string{ + "TxID": utxo.sourceID.String(), + "TxIndex": fmt.Sprint(utxo.sourceIndex), + } + switch v := utxo.out.(type) { + case *OutputPayment: + r["Amount"] = strconv.FormatUint(v.Amount(), 10) + r["Locktime"] = strconv.FormatUint(v.Locktime(), 10) + case *OutputTakeOrLeave: + r["Amount"] = strconv.FormatUint(v.Amount(), 10) + r["Locktime"] = strconv.FormatUint(v.Locktime1(), 10) + default: + return "", nil, nil, nil, errUnknownUTXOType + } + result = append(result, r) + } + title := "UTXO Data" + fieldKeys := []string{"TxID", "TxIndex", "Amount", "Locktime"} + fieldNames := map[string]string{ + "TxID": "TxID", + "TxIndex": "TxIndex", + "Amount": "Amount", + "Locktime": "Locktime", + } + return title, fieldKeys, fieldNames, result, nil +} + +// GetUTXOs returns the UTXOs such that at least one address in [addrs] is +// referenced in the UTXO. +func (vm *VM) GetUTXOs(addrs ids.ShortSet) ([]*UTXO, error) { + utxoIDs := ids.Set{} + for _, addr := range addrs.List() { + if utxos, err := vm.state.Funds(addr.LongID()); err == nil { + utxoIDs.Add(utxos...) + } + } + + utxos := []*UTXO{} + for _, utxoID := range utxoIDs.List() { + utxo, err := vm.state.UTXO(utxoID) + if err != nil { + return nil, err + } + utxos = append(utxos, utxo) + } + return utxos, nil +} + +/* + ****************************************************************************** + ********************************** Timer API ********************************* + ****************************************************************************** + */ + +// FlushTxs into consensus +func (vm *VM) FlushTxs() { + vm.timer.Cancel() + if len(vm.txs) != 0 { + select { + case vm.toEngine <- common.PendingTxs: + default: + vm.ctx.Log.Warn("Delaying issuance of transactions due to contention") + vm.timer.SetTimeoutIn(vm.batchTimeout) + } + } +} + +/* + ****************************************************************************** + ******************************* Deprecated API ******************************* + ****************************************************************************** + */ + +// IssueTx implements the avalanche.DAGVM interface +func (vm *VM) IssueTx(b []byte, finalized func(choices.Status)) (ids.ID, error) { + tx, err := vm.parseTx(b, finalized) + if err != nil { + return ids.ID{}, err + } + if err := tx.Verify(); err != nil { + return ids.ID{}, err + } + vm.issueTx(tx) + return tx.ID(), nil +} + +/* + ****************************************************************************** + ******************************* Implementation ******************************* + ****************************************************************************** + */ + +// Initialize state using [genesisBytes] as the genesis data +func (vm *VM) initState(genesisBytes []byte) error { + c := Codec{} + tx, err := c.UnmarshalTx(genesisBytes) + if err != nil { + return err + } + if err := vm.state.SetTx(tx.ID(), tx); err != nil { + return err + } + if err := vm.state.SetStatus(tx.ID(), choices.Accepted); err != nil { + return err + } + for _, utxo := range tx.UTXOs() { + if err := vm.state.FundUTXO(utxo); err != nil { + return err + } + } + + return vm.state.SetDBInitialized(choices.Processing) +} + +// TODO: Remove the callback from this function +func (vm *VM) parseTx(b []byte, finalized func(choices.Status)) (*UniqueTx, error) { + c := Codec{} + rawTx, err := c.UnmarshalTx(b) + if err != nil { + return nil, err + } + return vm.wrapTx(rawTx, finalized) +} + +// TODO: Remove the callback from this function +func (vm *VM) wrapTx(rawTx *Tx, finalized func(choices.Status)) (*UniqueTx, error) { + tx := &UniqueTx{ + vm: vm, + txID: rawTx.ID(), + t: &txState{ + tx: rawTx, + }, + } + if err := tx.VerifyTx(); err != nil { + return nil, err + } + + if tx.Status() == choices.Unknown { + if err := vm.state.SetTx(tx.ID(), tx.t.tx); err != nil { + return nil, err + } + tx.setStatus(choices.Processing) + } + + tx.addEvents(finalized) + return tx, nil +} + +func (vm *VM) issueTx(tx snowstorm.Tx) { + vm.txs = append(vm.txs, tx) + switch { + // Flush the transactions if enough transactions are waiting + case len(vm.txs) == batchSize: + vm.FlushTxs() + // Set timeout so we flush this transaction after at most [p.batchTimeout] + case len(vm.txs) == 1: + vm.timer.SetTimeoutIn(vm.batchTimeout) + } +} diff --git a/vms/spdagvm/vm_test.go b/vms/spdagvm/vm_test.go new file mode 100644 index 0000000..03a3b8e --- /dev/null +++ b/vms/spdagvm/vm_test.go @@ -0,0 +1,810 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package spdagvm + +import ( + "math" + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/units" +) + +var keys []*crypto.PrivateKeySECP256K1R +var ctx *snow.Context +var avaChainID = ids.NewID([32]byte{'y', 'e', 'e', 't'}) +var defaultInitBalances = make(map[string]uint64) + +const txFeeTest = 0 // Tx fee to use for tests + +const ( + defaultInitBalance = uint64(5000000000) // Measured in NanoAva +) + +func init() { + ctx = snow.DefaultContextTest() + ctx.ChainID = avaChainID + cb58 := formatting.CB58{} + factory := crypto.FactorySECP256K1R{} + + // String reprs. of private keys. Copy-pasted from genesis.go. + for _, key := range []string{ + "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", + "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", + "cxb7KpGWhDMALTjNNSJ7UQkkomPesyWAPUaWRGdyeBNzR6f35", + } { + ctx.Log.AssertNoError(cb58.FromString(key)) + pk, err := factory.ToPrivateKey(cb58.Bytes) + ctx.Log.AssertNoError(err) + keys = append(keys, pk.(*crypto.PrivateKeySECP256K1R)) + + defaultInitBalances[pk.PublicKey().Address().String()] = defaultInitBalance + } +} + +// GenesisTx is the genesis transaction +// The amount given to each address is determined by [initBalances] +// [initBalances] keys are string reprs. of addresses +// [initBalances] values are the amount of NanoAva they have at genesis +func GenesisTx(initBalances map[string]uint64) *Tx { + builder := Builder{ + NetworkID: 0, + ChainID: avaChainID, + } + + outputs := []Output(nil) + for _, key := range keys { + addr := key.PublicKey().Address() + if balance, ok := initBalances[addr.String()]; ok { + outputs = append(outputs, + builder.NewOutputPayment( + /*amount=*/ balance, + /*locktime=*/ 0, + /*threshold=*/ 1, + /*addresses=*/ []ids.ShortID{addr}, + ), + ) + } + + } + + result, _ := builder.NewTx( + /*ins=*/ nil, + /*outs=*/ outputs, + /*signers=*/ nil, + ) + return result +} + +func TestAva(t *testing.T) { + // Give + genesisTx := GenesisTx(defaultInitBalances) + + vmDB := memdb.New() + + msgChan := make(chan common.Message, 1) + + vm := &VM{} + vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) + vm.batchTimeout = 0 + + builder := Builder{ + NetworkID: 0, + ChainID: avaChainID, + } + tx1, err := builder.NewTx( + /*ins=*/ []Input{ + builder.NewInputPayment( + /*txID=*/ genesisTx.ID(), + /*txIndex=*/ 0, + /*amount=*/ 5*units.Ava, + /*sigs=*/ []*Sig{builder.NewSig(0 /*=index*/)}, + ), + }, + /*outs=*/ []Output{ + builder.NewOutputPayment( + /*amount=*/ 3*units.Ava, + /*locktime=*/ 0, + /*threshold=*/ 0, + /*addresses=*/ nil, + ), + }, + /*signers=*/ []*InputSigner{ + &InputSigner{Keys: []*crypto.PrivateKeySECP256K1R{ + keys[1], + }}, + }, + ) + ctx.Log.AssertNoError(err) + tx1Bytes := tx1.Bytes() + + ctx.Lock.Lock() + vm.IssueTx(tx1Bytes, nil) + ctx.Lock.Unlock() + + if msg := <-msgChan; msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + ctx.Lock.Lock() + if txs := vm.PendingTxs(); len(txs) != 1 { + t.Fatalf("Should have returned a tx") + } else if tx := txs[0]; !tx.ID().Equals(tx1.ID()) { + t.Fatalf("Should have returned %s", tx1.ID()) + } + ctx.Lock.Unlock() + + tx2, err := builder.NewTx( + /*ins=*/ []Input{ + builder.NewInputPayment( + /*txID=*/ tx1.ID(), + /*txIndex=*/ 0, + /*amount=*/ 3*units.Ava, + /*sigs=*/ []*Sig{}, + ), + }, + /*outs=*/ nil, + /*signers=*/ []*InputSigner{&InputSigner{}}, + ) + ctx.Log.AssertNoError(err) + tx2Bytes := tx2.Bytes() + + ctx.Lock.Lock() + vm.IssueTx(tx2Bytes, nil) + ctx.Lock.Unlock() + + if msg := <-msgChan; msg != common.PendingTxs { + t.Fatalf("Wrong message") + } +} + +func TestInvalidSpentTx(t *testing.T) { + genesisTx := GenesisTx(defaultInitBalances) + + vmDB := memdb.New() + + msgChan := make(chan common.Message, 1) + + vm := &VM{} + + ctx.Lock.Lock() + vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) + vm.batchTimeout = 0 + + builder := Builder{ + NetworkID: 0, + ChainID: avaChainID, + } + tx1, _ := builder.NewTx( + /*ins=*/ []Input{ + builder.NewInputPayment( + /*txID=*/ genesisTx.ID(), + /*txIndex=*/ 0, + /*amount=*/ 5*units.Ava, + /*sigs=*/ []*Sig{builder.NewSig(0 /*=index*/)}, + ), + }, + /*outs=*/ []Output{ + builder.NewOutputPayment( + /*amount=*/ 3*units.Ava, + /*locktime=*/ 0, + /*threshold=*/ 0, + /*addresses=*/ nil, + ), + }, + /*signers=*/ []*InputSigner{ + &InputSigner{Keys: []*crypto.PrivateKeySECP256K1R{ + keys[1], + }}, + }, + ) + tx2, _ := builder.NewTx( + /*ins=*/ []Input{ + builder.NewInputPayment( + /*txID=*/ genesisTx.ID(), + /*txIndex=*/ 0, + /*amount=*/ 5*units.Ava, + /*sigs=*/ []*Sig{builder.NewSig(0 /*=index*/)}, + ), + }, + /*outs=*/ []Output{ + builder.NewOutputPayment( + /*amount=*/ 2*units.Ava, + /*locktime=*/ 0, + /*threshold=*/ 0, + /*addresses=*/ nil, + ), + }, + /*signers=*/ []*InputSigner{ + &InputSigner{Keys: []*crypto.PrivateKeySECP256K1R{ + keys[1], + }}, + }, + ) + + wrappedTx1, err := vm.wrapTx(tx1, nil) + if err != nil { + t.Fatal(err) + } + + if err := wrappedTx1.Verify(); err != nil { + t.Fatal(err) + } + + wrappedTx1.Accept() + + wrappedTx2, err := vm.wrapTx(tx2, nil) + if err != nil { + t.Fatal(err) + } + + if err := wrappedTx2.Verify(); err == nil { + t.Fatalf("Should have failed verification") + } + ctx.Lock.Unlock() +} + +func TestInvalidTxVerification(t *testing.T) { + genesisTx := GenesisTx(defaultInitBalances) + + vmDB := memdb.New() + + msgChan := make(chan common.Message, 1) + + vm := &VM{} + + ctx.Lock.Lock() + vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) + vm.batchTimeout = 0 + + builder := Builder{ + NetworkID: 0, + ChainID: avaChainID, + } + tx, _ := builder.NewTx( + /*ins=*/ []Input{ + builder.NewInputPayment( + /*txID=*/ genesisTx.ID(), + /*txIndex=*/ 2345, + /*amount=*/ 50000+txFeeTest, + /*sigs=*/ []*Sig{builder.NewSig(0 /*=index*/)}, + ), + }, + /*outs=*/ []Output{ + builder.NewOutputPayment( + /*amount=*/ 50000, + /*locktime=*/ 0, + /*threshold=*/ 0, + /*addresses=*/ nil, + ), + }, + /*signers=*/ []*InputSigner{ + &InputSigner{Keys: []*crypto.PrivateKeySECP256K1R{ + keys[1], + }}, + }, + ) + + wrappedTx, err := vm.wrapTx(tx, nil) + if err != nil { + t.Fatal(err) + } + + if err := wrappedTx.Verify(); err == nil { + t.Fatalf("Should have failed verification") + } + + vm.state.uniqueTx.Flush() + + wrappedTx2, err := vm.wrapTx(tx, nil) + if err != nil { + t.Fatal(err) + } + + if err := wrappedTx2.Verify(); err == nil { + t.Fatalf("Should have failed verification") + } + ctx.Lock.Unlock() +} + +func TestRPCAPI(t *testing.T) { + // Initialize ava vm with the genesis transaction + genesisTx := GenesisTx(defaultInitBalances) + vmDB := memdb.New() + msgChan := make(chan common.Message, 1) + vm := &VM{} + vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) + vm.batchTimeout = 0 + + // Key: string repr. of an address + // Value: string repr. of the private key that controls the address + addrToPK := map[string]string{} + + // Inverse of the above map + pkToAddr := map[string]string{} + + pks := []string{} // List of private keys + addresses := []string{} // List of addresses controlled by the private keys + + // Populate the above data structures using [keys] + for _, v := range keys { + cb58 := formatting.CB58{Bytes: v.Bytes()} + pk := cb58.String() + + address := v.PublicKey().Address().String() + + addrToPK[address] = pk + pkToAddr[pk] = address + + pks = append(pks, pk) + addresses = append(addresses, address) + } + + // Ensure GetAddress and GetBalance return the correct values for the + // addresses in the genesis transactions + for addr, pk := range addrToPK { + ctx.Lock.Lock() + if a, err := vm.GetAddress(pk); err != nil { + t.Fatalf("GetAddress(%q): %s", pk, err) + } else if a != addr { + t.Fatalf("GetAddress(%q): Addresses Not Equal(%q,%q)", pk, addr, a) + } else if balance, err := vm.GetBalance(addr, ""); err != nil { + t.Fatalf("GetBalance(%q): %s", addr, err) + } else if balance != defaultInitBalance { + t.Fatalf("GetBalance(%q,%q): Balance Not Equal(%d,%d)", addr, "", defaultInitBalance, balance) + } + ctx.Lock.Unlock() + } + + // Create a new key + ctx.Lock.Lock() + addr1PrivKey, err := vm.CreateKey() + if err != nil { + t.Fatalf("CreateKey(): %s", err) + } + + // The address of the key we just created + addr1, err := vm.GetAddress(addr1PrivKey) + if err != nil { + t.Fatalf("GetAddress(%q): %s", addr1PrivKey, err) + } + + send1Amt := uint64(10000) + // Ensure the balance of the new address is 0 + if testbal, err := vm.GetBalance(addr1, ""); err != nil { + t.Fatalf("GetBalance(%q): %s", addr1, err) + } else if testbal != 0 { + t.Fatalf("GetBalance(%q,%q): Balance Not Equal(%d,%d)", addr1, "", 0, testbal) + // The only valid asset ID is ava + } else if _, err = vm.GetBalance(addr1, "thisshouldfail"); err == nil { + t.Fatalf("GetBalance(%q): passed when it should have failed on bad assetID", addr1) + } else if _, err = vm.Send(100, "thisshouldfail", addr1, pks); err == nil || err != errAsset { + t.Fatalf("Send(%d,%q,%q,%v): passed when it should have failed on bad assetID", 100, "thisshouldfail", addr1, pks) + // Ensure we can't send more funds from this address than the address has + } else if _, err = vm.Send(4000000000000000, "", addr1, pks); err == nil || err != errInsufficientFunds { + t.Fatalf("Send(%d,%q,%q,%v): passed when it should have failed on insufficient funds", 4000000000000000, "", addr1, pks) + // Send [send1Amt] NanoAva from [pks[0]] to [addr1] + } else if _, err = vm.Send(send1Amt, "", addr1, []string{pks[0]}); err != nil { + t.Fatalf("Send(%d,%q,%q,%v): failed with error - %s", send1Amt, "", addr1, []string{pks[0]}, err) + } + ctx.Lock.Unlock() + + if msg := <-msgChan; msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + // There should be one pending transaction (the send we just did). + // Accept that transaction. + ctx.Lock.Lock() + if txs := vm.PendingTxs(); len(txs) != 1 { + t.Fatalf("PendingTxs(): returned wrong number of transactions - expected: %d ; returned: %d", 1, len(txs)) + } else { + txs[0].Accept() + } + if txs := vm.PendingTxs(); len(txs) != 0 { + t.Fatalf("PendingTxs(): there should not have been any pending transactions") + } + + send2Amt := uint64(10000) + // Ensure that the balance of the address we sent [send1Amt] to is [send1Amt] + if testbal, err := vm.GetBalance(addr1, ""); err != nil { + t.Fatalf("GetBalance(%q): %s", addr1, err) + } else if testbal != send1Amt { + t.Fatalf("GetBalance(%q): returned wrong balance - expected: %d ; returned: %d", addr1, send1Amt, testbal) + // Send [send2Amt] from [pks[0]] to [addr1] + } else if _, err = vm.Send(send1Amt, "", addr1, []string{pks[0]}); err != nil { + t.Fatalf("Send(%d,%q,%q,%v): failed with error - %s", send2Amt, "", addr1, []string{pks[0]}, err) + } + ctx.Lock.Unlock() + + if msg := <-msgChan; msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + // There should be one pending transaction (the send we just did). + // Accept that transaction. + ctx.Lock.Lock() + if txs := vm.PendingTxs(); len(txs) != 1 { + t.Fatalf("PendingTxs: returned wrong number of transactions - expected: %d ; returned: %d", 1, len(txs)) + } else { + txs[0].Accept() + } + if txs := vm.PendingTxs(); len(txs) != 0 { + t.Fatalf("PendingTxs(): there should not have been any pending transactions") + } + + // Ensure [addr1] has [send1Amt+send2Amt] + if testbal, err := vm.GetBalance(addr1, ""); err != nil { + t.Fatalf("GetBalance(%q): %s", addr1, err) + } else if testbal != send1Amt+send2Amt { + t.Fatalf("GetBalance(%q): returned wrong balance - expected: %d; returned: %d", addr1, send1Amt+send2Amt, testbal) + } + + send3Amt := uint64(10000) + // Ensure the balance of the address controlled by [pks[0]] is the initial amount + // it had (from genesis) minus the 2 amounts it sent to [addr1] minus 2 tx fees + if testbal, err := vm.GetBalance(pkToAddr[pks[0]], ""); err != nil { + t.Fatalf("GetBalance(%q): %s", pkToAddr[pks[0]], err) + } else if testbal != defaultInitBalance-send1Amt-send2Amt-2*txFeeTest { // TODO generalize + t.Fatalf("GetBalance(%q): returned wrong balance - expected: %d; returned: %d", pkToAddr[pks[0]], defaultInitBalance-send1Amt-send2Amt-2*txFeeTest, testbal) + // Send [send3Amt] from [addr1] to the address controlled by [pks[0]] + } else if _, err = vm.Send(send3Amt, "", pkToAddr[pks[0]], []string{addr1PrivKey}); err != nil { + t.Fatalf("Send(%d,%q,%q,%v): failed with error - %s", send3Amt-txFeeTest, "", pkToAddr[pks[0]], []string{addr1PrivKey}, err) + } + ctx.Lock.Unlock() + + if msg := <-msgChan; msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + ctx.Lock.Lock() + if txs := vm.PendingTxs(); len(txs) != 1 { + t.Fatalf("PendingTxs(): returned wrong number of transactions - expected: %d; returned: %d", 1, len(txs)) + } else { + txs[0].Accept() + } + if txs := vm.PendingTxs(); len(txs) != 0 { + t.Fatalf("PendingTxs(): there should not have been any pending transactions") + } + + send4Amt := uint64(30000) + // Ensure the balance of the address controlled by [pk[0]] is: + // [initial balance] - [send1Amt] - [send2Amt] + [send3Amt] - 2 * [txFeeTest] + if testbal, err := vm.GetBalance(pkToAddr[pks[0]], ""); err != nil { + t.Fatalf("GetBalance(%q): %s", pkToAddr[pks[0]], err) + } else if testbal != defaultInitBalance-send1Amt-send2Amt+send3Amt-2*txFeeTest { + t.Fatalf("GetBalance(%q): returned wrong balance - expected: %d; returned: %d", pkToAddr[pks[0]], defaultInitBalance-send1Amt-send2Amt+send3Amt-2*txFeeTest, testbal) + // Send [send4Amt] to [addr1] from addresses controlled by [pks[1]] and [pks[2]] + } else if _, err = vm.Send(send4Amt, "", addr1, []string{pks[1], pks[2]}); err != nil { + t.Fatalf("Send(%d,%q,%q,%v): failed with error - %s", send4Amt, "", addr1, []string{pks[1], pks[2]}, err) + } + ctx.Lock.Unlock() + + if msg := <-msgChan; msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + ctx.Lock.Lock() + if txs := vm.PendingTxs(); len(txs) != 1 { + t.Fatalf("<-txChan: returned wrong number of transactions - expected: %d ; returned: %d", 1, len(txs)) + } else { + txs[0].Accept() + } + if txs := vm.PendingTxs(); len(txs) != 0 { + t.Fatalf("PendingTxs(): there should not have been any pending transactions") + } + + // Ensure the balance of [addr1] is: + // [send1Amt] + [send2Amt] - [send3Amt] + [send4Amt] - [txFeeTest] + if testbal, err := vm.GetBalance(addr1, ""); err != nil { + t.Fatalf("GetBalance(%q): %s", addr1, err) + } else if testbal != send1Amt+send2Amt-send3Amt+send4Amt-txFeeTest { + t.Fatalf("GetBalance(%q): returned wrong balance - expected: %d; returned: %d", addr1, send1Amt+send2Amt-send3Amt+send4Amt-txFeeTest, testbal) + } + + // Ensure the sum of the balances of the addresses controlled by [pks[1]] and [pks[2]] is: + // [sum of their initial balances] - [send4Amt] - [txFeeTest] + if testbal1, err := vm.GetBalance(pkToAddr[pks[1]], ""); err != nil { + t.Fatalf("GetBalance(%q): %s", pkToAddr[pks[1]], err) + } else if testbal2, err := vm.GetBalance(pkToAddr[pks[2]], ""); err != nil { + t.Fatalf("GetBalance(%q): %s", pkToAddr[pks[2]], err) + } else if testbal1+testbal2 != defaultInitBalance*2-send4Amt-txFeeTest { + t.Fatalf("GetBalance(%q) + GetBalance(%q): returned wrong balance - expected: %d ; returned: %d", pkToAddr[pks[1]], pkToAddr[pks[2]], defaultInitBalance*2-send4Amt-txFeeTest, testbal1+testbal2) + } + ctx.Lock.Unlock() +} + +func TestMultipleSend(t *testing.T) { + // Initialize the vm + genesisTx := GenesisTx(defaultInitBalances) + vmDB := memdb.New() + msgChan := make(chan common.Message, 1) + vm := &VM{} + vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) + + // Initialize these data structures + addrToPK := map[string]string{} + pkToAddr := map[string]string{} + pks := []string{} + addresses := []string{} + for _, v := range keys { + cb58 := formatting.CB58{Bytes: v.Bytes()} + pk := cb58.String() + + address := v.PublicKey().Address().String() + + addrToPK[address] = pk + pkToAddr[pk] = address + + pks = append(pks, pk) + addresses = append(addresses, address) + } + + ctx.Lock.Lock() + // Ensure GetAddress and GetBalance return the correct values for + // the addresses mentioned in the genesis tx + for addr, pk := range addrToPK { + if a, err := vm.GetAddress(pk); err != nil { + t.Fatalf("GetAddress(%q): %s", pk, err) + } else if a != addr { + t.Fatalf("GetAddress(%q): Addresses Not Equal(%q,%q)", pk, addr, a) + // Ensure the balances of the addresses are [initAddrBalance] + } else if balance, err := vm.GetBalance(addr, ""); err != nil { + t.Fatalf("GetBalance(%q): %s", addr, err) + } else if balance != defaultInitBalance { + t.Fatalf("GetBalance(%q,%q): Balance Not Equal(%d,%d)", addr, "", defaultInitBalance, balance) + } + } + + // Create a new private key + testPK, err := vm.CreateKey() + if err != nil { + t.Fatalf("CreateKey(): %s", err) + } + // Get the address controlled by the new private key + testaddr, err := vm.GetAddress(testPK) + if err != nil { + t.Fatalf("GetAddress(%q): %s", testPK, err) + } + + if testbal, err := vm.GetBalance(testaddr, ""); err != nil { + t.Fatalf("GetBalance(%q): %s", testaddr, err) + } else if testbal != 0 { + // Balance of new address should be 0 + t.Fatalf("GetBalance(%q,%q): Balance Not Equal(%d,%d)", testaddr, "", 0, testbal) + } + if _, err = vm.GetBalance(testaddr, "thisshouldfail"); err == nil { + t.Fatalf("GetBalance(%q): passed when it should have failed on bad assetID", testaddr) + } + if _, err = vm.Send(100, "thisshouldfail", testaddr, pks); err == nil || err != errAsset { + t.Fatalf("Send(%d,%q,%q,%v): passed when it should have failed on bad assetID", 100, "thisshouldfail", testaddr, pks) + } + if _, err = vm.Send(4000000000000000, "", testaddr, pks); err == nil || err != errInsufficientFunds { + t.Fatalf("Send(%d,%q,%q,%v): passed when it should have failed on insufficient funds", 4000000000000000, "", testaddr, pks) + } + + // Send [send1Amt] and [send2Amt] from address controlled by [pks[0]] to [testAddr] + send1Amt := uint64(10000) + if _, err = vm.Send(send1Amt, "", testaddr, []string{pks[0]}); err != nil { + t.Fatalf("Send(%d,%q,%q,%v): failed with error - %s", send1Amt, "", testaddr, []string{pks[0]}, err) + } + send2Amt := uint64(10000) + if _, err = vm.Send(send2Amt, "", testaddr, []string{pks[0]}); err != nil { + t.Fatalf("Send(%d,%q,%q,%v): failed with error - %s", send2Amt, "", testaddr, []string{pks[0]}, err) + } + ctx.Lock.Unlock() + + if msg := <-msgChan; msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + ctx.Lock.Lock() + if txs := vm.PendingTxs(); len(txs) != 2 { + t.Fatalf("PendingTxs(): returned wrong number of transactions - expected: %d ; returned: %d", 2, len(txs)) + } else if inputs1 := txs[0].InputIDs(); inputs1.Len() != 1 { + t.Fatalf("inputs1: returned wrong number of inputs - expected: %d ; returned: %d", 1, inputs1.Len()) + } else if inputs2 := txs[1].InputIDs(); inputs2.Len() != 1 { + t.Fatalf("inputs2: returned wrong number of inputs - expected: %d ; returned: %d", 1, inputs2.Len()) + } else if !inputs1.Overlaps(inputs2) { + t.Fatalf("inputs1 doesn't conflict with inputs2 but it should") + } + + _, _, _, _, err = vm.GetTxHistory(testaddr) + if err != nil { + t.Fatalf("GetTxHistory(%s): %s", testaddr, err) + } + + _, _, _, _, err = vm.GetTxHistory(pkToAddr[pks[0]]) + if err != nil { + t.Fatalf("GetTxHistory(%s): %s", pkToAddr[pks[0]], err) + } + ctx.Lock.Unlock() +} + +func TestIssuePendingDependency(t *testing.T) { + // Initialize vm with genesis info + genesisTx := GenesisTx(defaultInitBalances) + vmDB := memdb.New() + msgChan := make(chan common.Message, 1) + + ctx.Lock.Lock() + vm := &VM{} + vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) + vm.batchTimeout = 0 + + builder := Builder{ + NetworkID: 0, + ChainID: avaChainID, + } + tx1, _ := builder.NewTx( + /*ins=*/ []Input{ + builder.NewInputPayment( + /*txID=*/ genesisTx.ID(), + /*txIndex=*/ 0, + /*amount=*/ 5*units.Ava, + /*sigs=*/ []*Sig{builder.NewSig(0 /*=index*/)}, + ), + }, + /*outs=*/ []Output{ + builder.NewOutputPayment( + /*amount=*/ 3*units.Ava, + /*locktime=*/ 0, + /*threshold=*/ 0, + /*addresses=*/ nil, + ), + }, + /*signers=*/ []*InputSigner{ + &InputSigner{Keys: []*crypto.PrivateKeySECP256K1R{ + keys[1], + }}, + }, + ) + tx1Bytes := tx1.Bytes() + + tx2, _ := builder.NewTx( + /*ins=*/ []Input{ + builder.NewInputPayment( + /*txID=*/ tx1.ID(), + /*txIndex=*/ 0, + /*amount=*/ 3*units.Ava, + /*sigs=*/ nil, + ), + }, + /*outs=*/ []Output{ + builder.NewOutputPayment( + /*amount=*/ 1*units.Ava, + /*locktime=*/ 0, + /*threshold=*/ 0, + /*addresses=*/ nil, + ), + }, + /*signers=*/ []*InputSigner{ + &InputSigner{}, + }, + ) + tx2Bytes := tx2.Bytes() + + vm.IssueTx(tx1Bytes, nil) + vm.IssueTx(tx2Bytes, nil) + + ctx.Lock.Unlock() + + if msg := <-msgChan; msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + ctx.Lock.Lock() + + txs := vm.PendingTxs() + + var avlTx1 snowstorm.Tx + var avlTx2 snowstorm.Tx + + if txs[0].ID().Equals(tx1.ID()) { + avlTx1 = txs[0] + avlTx2 = txs[1] + } else { + avlTx1 = txs[1] + avlTx2 = txs[0] + } + + if err := avlTx1.Verify(); err != nil { + t.Fatal(err) + } + if err := avlTx2.Verify(); err != nil { + t.Fatal(err) + } + + ctx.Lock.Unlock() +} + +// Ensure that an error is returned if an address will have more than +// math.MaxUint64 NanoAva +func TestTxOutputOverflow(t *testing.T) { + // Modify the genesis tx so the address controlled by [keys[0]] + // has math.MaxUint64 NanoAva + initBalances := map[string]uint64{ + keys[0].PublicKey().Address().String(): math.MaxUint64, + keys[1].PublicKey().Address().String(): defaultInitBalance, + keys[2].PublicKey().Address().String(): defaultInitBalance, + } + genesisTx := GenesisTx(initBalances) + + // Initialize vm + vmDB := memdb.New() + msgChan := make(chan common.Message, 1) + ctx.Lock.Lock() + vm := &VM{} + vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) + vm.batchTimeout = 0 + + // Create a new private key + testPK, err := vm.CreateKey() + if err != nil { + t.Fatalf("CreateKey(): %s", err) + } + // Get the address controlled by the new private key + testAddr, err := vm.GetAddress(testPK) + if err != nil { + t.Fatalf("GetAddress(%q): %s", testPK, err) + } + + // Get string repr. of keys[0] + cb58 := formatting.CB58{Bytes: keys[0].Bytes()} + privKey0 := cb58.String() + + // Send [math.MaxUint64 - txFeeTest] NanoAva from [privKey0] to [testAddr] + _, err = vm.Send(math.MaxUint64-txFeeTest, "", testAddr, []string{privKey0}) + if err != nil { + t.Fatalf("Send(%d,%q,%q,%v): failed with error - %s", uint64(math.MaxUint64-txFeeTest), "", testAddr, []string{privKey0}, err) + } + ctx.Lock.Unlock() + + if msg := <-msgChan; msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + // Accept the transaction + ctx.Lock.Lock() + if txs := vm.PendingTxs(); len(txs) != 1 { + t.Fatalf("PendingTxs(): returned wrong number of transactions - expected: %d; returned: %d", 1, len(txs)) + } else { + txs[0].Accept() + } + if txs := vm.PendingTxs(); len(txs) != 0 { + t.Fatalf("PendingTxs(): there should not have been any pending transactions") + } + + // Ensure that [testAddr] has balance [math.MaxUint64 - txFeeTest] + if testbal, err := vm.GetBalance(testAddr, ""); err != nil { + t.Fatalf("GetBalance(%q): %s", testAddr, err) + } else if testbal != math.MaxUint64-txFeeTest { + t.Fatalf("GetBalance(%q,%q): Balance Not Equal(%d,%d)", testAddr, "", uint64(math.MaxUint64-txFeeTest), testbal) + } + + // Ensure that the address controlled by [keys[0]] has balance 0 + if testbal, err := vm.GetBalance(keys[0].PublicKey().Address().String(), ""); err != nil { + t.Fatalf("GetBalance(%q): %s", keys[0].PublicKey().Address().String(), err) + } else if testbal != 0 { + // Balance of new address should be 0 + t.Fatalf("GetBalance(%q,%q): Balance Not Equal(%d,%d)", keys[0].PublicKey().Address().String(), "", 0, testbal) + } + + cb58.Bytes = keys[1].Bytes() + privKey1 := cb58.String() + + // Send [2*txFeeTest+1] NanoAva from [key1Str] to [testAddr] + // Should overflow [testAddr] by 1 + _, err = vm.Send(2*txFeeTest+1, "", testAddr, []string{privKey1}) + if err == errOutputOverflow { + t.Fatalf("Expected output to overflow but it did not") + } + ctx.Lock.Unlock() +} diff --git a/vms/timestampvm/block.go b/vms/timestampvm/block.go new file mode 100644 index 0000000..67cd56d --- /dev/null +++ b/vms/timestampvm/block.go @@ -0,0 +1,54 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timestampvm + +import ( + "errors" + "time" + + "github.com/ava-labs/gecko/vms/components/core" +) + +var ( + errTimestampTooEarly = errors.New("block's timestamp is later than its parent's timestamp") + errDatabase = errors.New("error while retrieving data from database") + errTimestampTooLate = errors.New("block's timestamp is more than 1 hour ahead of local time") +) + +// Block is a block on the chain. +// Each block contains: +// 1) A piece of data (a string) +// 2) A timestamp +type Block struct { + *core.Block `serialize:"true"` + Data [dataLen]byte `serialize:"true"` + Timestamp int64 `serialize:"true"` +} + +// Verify returns nil iff this block is valid. +// To be valid, it must be that: +// b.parent.Timestamp < b.Timestamp <= [local time] + 1 hour +func (b *Block) Verify() error { + if accepted, err := b.Block.Verify(); err != nil || accepted { + return err + } + + // Get [b]'s parent + parent, ok := b.Parent().(*Block) + if !ok { + return errDatabase + } + + if b.Timestamp < time.Unix(parent.Timestamp, 0).Unix() { + return errTimestampTooEarly + } + + if b.Timestamp >= time.Now().Add(time.Hour).Unix() { + return errTimestampTooLate + } + + // Persist the block + b.VM.SaveBlock(b.VM.DB, b) + return b.VM.DB.Commit() +} diff --git a/vms/timestampvm/factory.go b/vms/timestampvm/factory.go new file mode 100644 index 0000000..0800395 --- /dev/null +++ b/vms/timestampvm/factory.go @@ -0,0 +1,17 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timestampvm + +import "github.com/ava-labs/gecko/ids" + +// ID is a unique identifier for this VM +var ( + ID = ids.NewID([32]byte{'t', 'i', 'm', 'e', 's', 't', 'a', 'm', 'p'}) +) + +// Factory ... +type Factory struct{} + +// New ... +func (f *Factory) New() interface{} { return &VM{} } diff --git a/vms/timestampvm/service.go b/vms/timestampvm/service.go new file mode 100644 index 0000000..e872d80 --- /dev/null +++ b/vms/timestampvm/service.go @@ -0,0 +1,102 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timestampvm + +import ( + "errors" + "net/http" + + "github.com/ava-labs/gecko/ids" + + "github.com/ava-labs/gecko/utils/formatting" +) + +var ( + errDBError = errors.New("error getting data from database") + errBadData = errors.New("data must be base 58 repr. of 32 bytes") + errNoSuchBlock = errors.New("couldn't get block from database. Does it exist?") +) + +// Service is the API service for this VM +type Service struct{ vm *VM } + +// ProposeBlockArgs are the arguments to function ProposeValue +type ProposeBlockArgs struct { + // Data in the block. Must be base 58 encoding of 32 bytes. + Data string `json:"data"` +} + +// ProposeBlockReply is the reply from function ProposeBlock +type ProposeBlockReply struct{ Success bool } + +// ProposeBlock is an API method to propose a new block whose data is [args].Data. +// [args].Data must be a string repr. of a 32 byte array +func (s *Service) ProposeBlock(_ *http.Request, args *ProposeBlockArgs, reply *ProposeBlockReply) error { + byteFormatter := formatting.CB58{} + if err := byteFormatter.FromString(args.Data); err != nil { + return errBadData + } + dataSlice := byteFormatter.Bytes + if len(dataSlice) != dataLen { + return errBadData + } + var data [dataLen]byte // The data as an array of bytes + copy(data[:], dataSlice[:dataLen]) // Copy the bytes in dataSlice to data + s.vm.proposeBlock(data) + reply.Success = true + return nil +} + +// APIBlock is the API representation of a block +type APIBlock struct { + Timestamp int64 `json:"timestamp"` // Timestamp of most recent block + Data string `json:"data"` // Data in the most recent block. Base 58 repr. of 5 bytes. + ID string `json:"id"` // String repr. of ID of the most recent block + ParentID string `json:"parentID"` // String repr. of ID of the most recent block's parent +} + +// GetBlockArgs are the arguments to GetBlock +type GetBlockArgs struct { + // ID of the block we're getting. + // If left blank, gets the latest block + ID string +} + +// GetBlockReply is the reply from GetBlock +type GetBlockReply struct { + APIBlock +} + +// GetBlock gets the block whose ID is [args.ID] +// If [args.ID] is empty, get the latest block +func (s *Service) GetBlock(_ *http.Request, args *GetBlockArgs, reply *GetBlockReply) error { + var ID ids.ID + var err error + if args.ID == "" { + ID = s.vm.LastAccepted() + } else { + ID, err = ids.FromString(args.ID) + if err != nil { + return errors.New("problem parsing ID") + } + } + + blockInterface, err := s.vm.GetBlock(ID) + if err != nil { + return errDatabase + } + + block, ok := blockInterface.(*Block) + if !ok { + return errBadData + } + + reply.APIBlock.ID = block.ID().String() + reply.APIBlock.Timestamp = block.Timestamp + reply.APIBlock.ParentID = block.ParentID().String() + byteFormatter := formatting.CB58{Bytes: block.Data[:]} + reply.Data = byteFormatter.String() + + return nil +} diff --git a/vms/timestampvm/vm.go b/vms/timestampvm/vm.go new file mode 100644 index 0000000..c571d9a --- /dev/null +++ b/vms/timestampvm/vm.go @@ -0,0 +1,172 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timestampvm + +import ( + "errors" + "time" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/core" +) + +const dataLen = 32 + +var ( + errNoPendingBlocks = errors.New("there is no block to propose") + errBadGenesisBytes = errors.New("genesis data should be bytes (max length 32)") +) + +// VM implements the snowman.VM interface +// Each block in this chain contains a Unix timestamp +// and a piece of data (a string) +type VM struct { + core.SnowmanVM + codec codec.Codec + // Proposed pieces of data that haven't been put into a block and proposed yet + mempool [][dataLen]byte +} + +// Initialize this vm +// [ctx] is this vm's context +// [db] is this vm's database +// [toEngine] is used to notify the consensus engine that new blocks are +// ready to be added to consensus +// The data in the genesis block is [genesisData] +func (vm *VM) Initialize( + ctx *snow.Context, + db database.Database, + genesisData []byte, + toEngine chan<- common.Message, + _ []*common.Fx, +) error { + if err := vm.SnowmanVM.Initialize(ctx, db, vm.ParseBlock, toEngine); err != nil { + ctx.Log.Error("error initializing SnowmanVM: %v", err) + return err + } + vm.codec = codec.NewDefault() + + // If database is empty, create it using the provided genesis data + if !vm.DBInitialized() { + if len(genesisData) > dataLen { + return errBadGenesisBytes + } + + // genesisData is a byte slice but each block contains an byte array + // Take the first [dataLen] bytes from genesisData and put them in an array + var genesisDataArr [dataLen]byte + copy(genesisDataArr[:], genesisData) + + // Create the genesis block + // Timestamp of genesis block is 0. It has no parent. + genesisBlock, err := vm.NewBlock(ids.Empty, genesisDataArr, time.Unix(0, 0)) + if err != nil { + vm.Ctx.Log.Error("error while creating genesis block: %v", err) + return err + } + + if err := vm.SaveBlock(vm.DB, genesisBlock); err != nil { + vm.Ctx.Log.Error("error while saving genesis block: %v", err) + return err + } + + // Accept the genesis block + // Sets [vm.lastAccepted] and [vm.preferred] + genesisBlock.Accept() + + vm.SetDBInitialized() + + // Flush VM's database to underlying db + if err := vm.DB.Commit(); err != nil { + vm.Ctx.Log.Error("error while commiting db: %v", err) + return err + } + } + return nil +} + +// CreateHandlers returns a map where: +// Keys: The path extension for this VM's API (empty in this case) +// Values: The handler for the API +func (vm *VM) CreateHandlers() map[string]*common.HTTPHandler { + handler := vm.NewHandler("timestamp", &Service{vm}) + return map[string]*common.HTTPHandler{ + "": handler, + } +} + +// CreateStaticHandlers returns a map where: +// Keys: The path extension for this VM's static API +// Values: The handler for that static API +// We return nil because this VM has no static API +func (vm *VM) CreateStaticHandlers() map[string]*common.HTTPHandler { return nil } + +// BuildBlock returns a block that this vm wants to add to consensus +func (vm *VM) BuildBlock() (snowman.Block, error) { + if len(vm.mempool) == 0 { // There is no block to be built + return nil, errNoPendingBlocks + } + + // Get the value to put in the new block + value := vm.mempool[0] + vm.mempool = vm.mempool[1:] + + // Notify consensus engine that there are more pending data for blocks + // (if that is the case) when done building this block + if len(vm.mempool) > 0 { + defer vm.NotifyBlockReady() + } + + // Build the block + block, err := vm.NewBlock(vm.Preferred(), value, time.Now()) + if err != nil { + return nil, err + } + return block, nil +} + +// proposeBlock appends [data] to [p.mempool]. +// Then it notifies the consensus engine +// that a new block is ready to be added to consensus +// (namely, a block with data [data]) +func (vm *VM) proposeBlock(data [dataLen]byte) { + vm.mempool = append(vm.mempool, data) + vm.NotifyBlockReady() +} + +// ParseBlock parses [bytes] to a snowman.Block +// This function is used by the vm's state to unmarshal blocks saved in state +func (vm *VM) ParseBlock(bytes []byte) (snowman.Block, error) { + block := &Block{} + err := vm.codec.Unmarshal(bytes, block) + block.Initialize(bytes, &vm.SnowmanVM) + return block, err +} + +// NewBlock returns a new Block where: +// - the block's parent is [parentID] +// - the block's data is [data] +// - the block's timestamp is [timestamp] +// The block is persisted in storage +func (vm *VM) NewBlock(parentID ids.ID, data [dataLen]byte, timestamp time.Time) (*Block, error) { + block := &Block{ + Block: core.NewBlock(parentID), + Data: data, + Timestamp: timestamp.Unix(), + } + + blockBytes, err := vm.codec.Marshal(block) + if err != nil { + return nil, err + } + + block.Initialize(blockBytes, &vm.SnowmanVM) + + return block, nil +} diff --git a/vms/timestampvm/vm_test.go b/vms/timestampvm/vm_test.go new file mode 100644 index 0000000..caf3573 --- /dev/null +++ b/vms/timestampvm/vm_test.go @@ -0,0 +1,209 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timestampvm + +import ( + "fmt" + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/formatting" +) + +var blockchainID = ids.NewID([32]byte{1, 2, 3}) + +// Utility function to assert that [block] has: +// * Parent with ID [parentID] +// * Data [expectedData] +// * Verify() returns nil iff passesVerify == true +func assertBlock(block *Block, parentID ids.ID, expectedData [dataLen]byte, passesVerify bool) error { + if !block.ParentID().Equals(parentID) { + return fmt.Errorf("expect parent ID to be %s but was %s", parentID, block.ParentID()) + } + if block.Data != expectedData { + return fmt.Errorf("expected data to be %v but was %v", expectedData, block.Data) + } + if block.Verify() != nil && passesVerify { + return fmt.Errorf("expected block to pass verification but it fails") + } + if block.Verify() == nil && !passesVerify { + return fmt.Errorf("expected block to fail verification but it passes") + } + return nil +} + +// Assert that after initialization, the vm has the state we expect +func TestGenesis(t *testing.T) { + // Initialize the vm + db := memdb.New() + msgChan := make(chan common.Message, 1) + vm := &VM{} + ctx := snow.DefaultContextTest() + ctx.ChainID = blockchainID + vm.Initialize(ctx, db, []byte{0, 0, 0, 0, 0}, msgChan, nil) + + // Verify that the db is initialized + if !vm.DBInitialized() { + t.Fatal("db should be initialized") + } + + // Get lastAccepted + lastAccepted := vm.LastAccepted() + if lastAccepted.IsZero() { + t.Fatal("lastAccepted should not be empty") + } + + // Verify that getBlock returns the genesis block, and the genesis block + // is the type we expect + genesisSnowmanBlock, err := vm.GetBlock(lastAccepted) // genesisBlock as snowman.Block + if err != nil { + t.Fatalf("couldn't get genesisBlock: %s", err) + } + genesisBlock, ok := genesisSnowmanBlock.(*Block) // type assert that genesisBlock is a *Block + if !ok { + t.Fatal("type of genesisBlock should be *Block") + } + + // Verify that the genesis block has the data we expect + if err := assertBlock(genesisBlock, ids.Empty, [32]byte{0, 0, 0, 0, 0}, true); err != nil { + t.Fatal(err) + } +} + +func TestHappyPath(t *testing.T) { + // Initialize the vm + db := memdb.New() + msgChan := make(chan common.Message, 1) + vm := &VM{} + ctx := snow.DefaultContextTest() + ctx.ChainID = blockchainID + if err := vm.Initialize(ctx, db, []byte{0, 0, 0, 0, 0}, msgChan, nil); err != nil { + t.Fatal(err) + } + + genesisBlock, err := vm.GetBlock(vm.LastAccepted()) + if err != nil { + t.Fatal("could not get genesis block") + } + // in an actual execution, the engine would set the preference + vm.SetPreference(genesisBlock.ID()) + + ctx.Lock.Lock() + vm.proposeBlock([dataLen]byte{0, 0, 0, 0, 1}) // propose a value + ctx.Lock.Unlock() + + select { // assert there is a pending tx message to the engine + case msg := <-msgChan: + if msg != common.PendingTxs { + t.Fatal("Wrong message") + } + default: + t.Fatal("should have been pendingTxs message on channel") + } + + // build the block + ctx.Lock.Lock() + snowmanBlock2, err := vm.BuildBlock() + if err != nil { + t.Fatalf("problem building block: %s", err) + } + if err := snowmanBlock2.Verify(); err != nil { + t.Fatal(err) + } + snowmanBlock2.Accept() // accept the block + vm.SetPreference(snowmanBlock2.ID()) + + // Should be the block we just accepted + snowmanBlock2, err = vm.GetBlock(vm.LastAccepted()) + if err != nil { + t.Fatal("couldn't get block") + } + block2, ok := snowmanBlock2.(*Block) + if !ok { + t.Fatal("genesis block should be type *Block") + } + // Assert the block we accepted has the data we expect + if err := assertBlock(block2, genesisBlock.ID(), [dataLen]byte{0, 0, 0, 0, 1}, true); err != nil { + t.Fatal(err) + } + + vm.proposeBlock([dataLen]byte{0, 0, 0, 0, 2}) // propose a block + ctx.Lock.Unlock() + + select { // verify there is a pending tx message to the engine + case msg := <-msgChan: + if msg != common.PendingTxs { + t.Fatal("Wrong message") + } + default: + t.Fatal("should have been pendingTxs message on channel") + } + + ctx.Lock.Lock() + + // build the block + if block, err := vm.BuildBlock(); err != nil { + t.Fatalf("problem building block: %s", err) + } else { + if err := block.Verify(); err != nil { + t.Fatal(err) + } + block.Accept() // accept the block + vm.SetPreference(block.ID()) + } + + // The block we just accepted + snowmanBlock3, err := vm.GetBlock(vm.LastAccepted()) + if err != nil { + t.Fatal("couldn't get block") + } + block3, ok := snowmanBlock3.(*Block) + if !ok { + t.Fatal("genesis block should be type *Block") + } + // Assert the block we accepted has the data we expect + if err := assertBlock(block3, snowmanBlock2.ID(), [dataLen]byte{0, 0, 0, 0, 2}, true); err != nil { + t.Fatal(err) + } + + // Next, check the blocks we added are there + if block2FromState, err := vm.GetBlock(block2.ID()); err != nil { + t.Fatal(err) + } else if !block2FromState.ID().Equals(block2.ID()) { + t.Fatal("expected IDs to match but they don't") + } + if block3FromState, err := vm.GetBlock(block3.ID()); err != nil { + t.Fatal(err) + } else if !block3FromState.ID().Equals(block3.ID()) { + t.Fatal("expected IDs to match but they don't") + } + + ctx.Lock.Unlock() +} + +func TestMakeStringFrom32Bytes(t *testing.T) { + bytes := [32]byte{'w', 'o', 'o'} + bytesFormatter := formatting.CB58{Bytes: bytes[:]} + t.Log(bytesFormatter.String()) +} + +func TestService(t *testing.T) { + // Initialize the vm + db := memdb.New() + msgChan := make(chan common.Message, 1) + vm := &VM{} + ctx := snow.DefaultContextTest() + ctx.ChainID = blockchainID + if err := vm.Initialize(ctx, db, []byte{0, 0, 0, 0, 0}, msgChan, nil); err != nil { + t.Fatal(err) + } + + service := Service{vm} + if err := service.GetBlock(nil, &GetBlockArgs{}, &GetBlockReply{}); err != nil { + t.Fatal(err) + } +} diff --git a/xputtest/chainwallet/wallet.go b/xputtest/chainwallet/wallet.go new file mode 100644 index 0000000..9f4b92b --- /dev/null +++ b/xputtest/chainwallet/wallet.go @@ -0,0 +1,133 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chainwallet + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/vms/spchainvm" +) + +// The max number of transactions this wallet can send as part of the throughput tests +// lower --> low startup time but test has shorter duration +// higher --> high startup time but test has longer duration +const ( + MaxNumTxs = 25000 +) + +// Wallet is a holder for keys and UTXOs. +type Wallet struct { + networkID uint32 + chainID ids.ID + keyChain *spchainvm.KeyChain // Mapping from public address to the SigningKeys + accountSet map[[20]byte]spchainvm.Account // Mapping from addresses to accounts + balance uint64 + TxsSent int32 + txs [MaxNumTxs]*spchainvm.Tx +} + +// NewWallet ... +func NewWallet(networkID uint32, chainID ids.ID) Wallet { + return Wallet{ + networkID: networkID, + chainID: chainID, + keyChain: spchainvm.NewKeyChain(networkID, chainID), + accountSet: make(map[[20]byte]spchainvm.Account), + } +} + +// CreateAddress returns a brand new address! Ready to receive funds! +func (w *Wallet) CreateAddress() ids.ShortID { return w.keyChain.New().PublicKey().Address() } + +// ImportKey imports a private key into this wallet +func (w *Wallet) ImportKey(sk *crypto.PrivateKeySECP256K1R) { w.keyChain.Add(sk) } + +// AddAccount adds a new account to this wallet, if this wallet can spend it. +func (w *Wallet) AddAccount(account spchainvm.Account) { + if account.Balance() > 0 { + w.accountSet[account.ID().Key()] = account + w.balance += account.Balance() + } +} + +// Balance returns the amount of the assets in this wallet +func (w *Wallet) Balance() uint64 { return w.balance } + +// GenerateTxs generates the transactions that will be sent +// during the test +// Generate them all on test initialization so tx generation is not bottleneck +// in testing +func (w *Wallet) GenerateTxs() { + ctx := snow.DefaultContextTest() + ctx.NetworkID = w.networkID + ctx.ChainID = w.chainID + + for i := 0; i < MaxNumTxs; i++ { + if i%1000 == 0 { + fmt.Printf("generated %d transactions\n", i) + } + for _, account := range w.accountSet { + accountID := account.ID() + if key, exists := w.keyChain.Get(accountID); exists { + amount := uint64(1) + if tx, sendAccount, err := account.CreateTx(amount, accountID, ctx, key); err == nil { + newAccount, err := sendAccount.Receive(tx, ctx) + if err != nil { + panic("shouldn't error") + } + w.accountSet[accountID.Key()] = newAccount + w.txs[i] = tx + continue + } else { + panic("shouldn't error here either: " + err.Error()) + } + } else { + panic("shouldn't not exist") + } + } + } +} + +/* +// Send a new transaction +func (w *Wallet) Send() *spchainvm.Tx { + ctx := snow.DefaultContextTest() + ctx.NetworkID = w.networkID + ctx.ChainID = w.chainID + + for _, account := range w.accountSet { + accountID := account.ID() + if key, exists := w.keyChain.Get(accountID); exists { + amount := uint64(1) + if tx, sendAccount, err := account.CreateTx(amount, accountID, ctx, key); err == nil { + newAccount, err := sendAccount.Receive(tx, ctx) + if err == nil { + w.accountSet[accountID.Key()] = newAccount + return tx + } + } + } + } + return nil +} +*/ + +// NextTx returns the next tx to be sent as part of xput test +func (w *Wallet) NextTx() *spchainvm.Tx { + if w.TxsSent >= MaxNumTxs { + return nil + } + w.TxsSent++ + return w.txs[w.TxsSent-1] +} + +func (w Wallet) String() string { + return fmt.Sprintf( + "KeyChain:\n"+ + "%s", + w.keyChain.PrefixedString(" ")) +} diff --git a/xputtest/config.go b/xputtest/config.go new file mode 100644 index 0000000..5066069 --- /dev/null +++ b/xputtest/config.go @@ -0,0 +1,29 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/logging" +) + +// Config contains all of the configurations of an Ava client. +type Config struct { + // Networking configurations + RemoteIP utils.IPDesc // Which Ava node to connect to + + // ID of the network that this client will be issuing transactions to + NetworkID uint32 + + // Transaction fee + AvaTxFee uint64 + + EnableCrypto bool + LoggingConfig logging.Config + + // Key describes which key to use to issue transactions + // MaxOutstandingTxs describes how many txs to pipeline + Key, MaxOutstandingTxs int + Chain ChainType +} diff --git a/xputtest/dagwallet/utxo_set.go b/xputtest/dagwallet/utxo_set.go new file mode 100644 index 0000000..ae513e8 --- /dev/null +++ b/xputtest/dagwallet/utxo_set.go @@ -0,0 +1,88 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package dagwallet + +import ( + "fmt" + "strings" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/spdagvm" +) + +// UtxoSet ... +type UtxoSet struct { + // This can be used to iterate over. However, it should not be modified externally. + utxoMap map[[32]byte]int + Utxos []*spdagvm.UTXO +} + +// Put ... +func (us *UtxoSet) Put(utxo *spdagvm.UTXO) { + if us.utxoMap == nil { + us.utxoMap = make(map[[32]byte]int) + } + if _, ok := us.utxoMap[utxo.ID().Key()]; !ok { + us.utxoMap[utxo.ID().Key()] = len(us.Utxos) + us.Utxos = append(us.Utxos, utxo) + } +} + +// Get ... +func (us *UtxoSet) Get(id ids.ID) *spdagvm.UTXO { + if us.utxoMap == nil { + return nil + } + if i, ok := us.utxoMap[id.Key()]; ok { + utxo := us.Utxos[i] + return utxo + } + return nil +} + +// Remove ... +func (us *UtxoSet) Remove(id ids.ID) *spdagvm.UTXO { + i, ok := us.utxoMap[id.Key()] + if !ok { + return nil + } + utxoI := us.Utxos[i] + + j := len(us.Utxos) - 1 + utxoJ := us.Utxos[j] + + us.Utxos[i] = us.Utxos[j] + us.Utxos = us.Utxos[:j] + + us.utxoMap[utxoJ.ID().Key()] = i + delete(us.utxoMap, utxoI.ID().Key()) + + return utxoI +} + +func (us *UtxoSet) string(prefix string) string { + s := strings.Builder{} + + for i, utxo := range us.Utxos { + out := utxo.Out().(*spdagvm.OutputPayment) + sourceID, sourceIndex := utxo.Source() + + s.WriteString(fmt.Sprintf("%sUtxo[%d]:"+ + "\n%s InputID: %s"+ + "\n%s InputIndex: %d"+ + "\n%s Locktime: %d"+ + "\n%s Amount: %d\n", + prefix, i, + prefix, sourceID, + prefix, sourceIndex, + prefix, out.Locktime(), + prefix, out.Amount())) + } + + return strings.TrimSuffix(s.String(), "\n") +} + +func (us *UtxoSet) String() string { + return us.string("") +} diff --git a/xputtest/dagwallet/wallet.go b/xputtest/dagwallet/wallet.go new file mode 100644 index 0000000..df308a2 --- /dev/null +++ b/xputtest/dagwallet/wallet.go @@ -0,0 +1,122 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package dagwallet + +import ( + "fmt" + "math" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/vms/spdagvm" +) + +// Wallet is a holder for keys and UTXOs for the Ava DAG. +type Wallet struct { + networkID uint32 + chainID ids.ID + clock timer.Clock + keyChain *spdagvm.KeyChain // Mapping from public address to the SigningKeys + utxoSet *UtxoSet // Mapping from utxoIDs to Utxos + balance uint64 + txFee uint64 +} + +// NewWallet returns a new Wallet +func NewWallet(networkID uint32, chainID ids.ID, txFee uint64) Wallet { + return Wallet{ + networkID: networkID, + chainID: chainID, + keyChain: &spdagvm.KeyChain{}, + utxoSet: &UtxoSet{}, + txFee: txFee, + } +} + +// GetAddress returns one of the addresses this wallet manages. If no address +// exists, one will be created. +func (w *Wallet) GetAddress() ids.ShortID { + if w.keyChain.Addrs.Len() == 0 { + return w.CreateAddress() + } + return w.keyChain.Addrs.CappedList(1)[0] +} + +// CreateAddress returns a new address. +// It also saves the address and the private key that controls it +// so the address can be used later +func (w *Wallet) CreateAddress() ids.ShortID { + privKey, _ := w.keyChain.New() + return privKey.PublicKey().Address() +} + +// ImportKey imports a private key into this wallet +func (w *Wallet) ImportKey(sk *crypto.PrivateKeySECP256K1R) { w.keyChain.Add(sk) } + +// AddUtxo adds a new UTXO to this wallet if this wallet may spend it +// The UTXO's output must be an OutputPayment +func (w *Wallet) AddUtxo(utxo *spdagvm.UTXO) { + out, ok := utxo.Out().(*spdagvm.OutputPayment) + if !ok { + return + } + + if _, _, err := w.keyChain.Spend(utxo, math.MaxUint64); err == nil { + w.utxoSet.Put(utxo) + w.balance += out.Amount() + } +} + +// Balance returns the amount of the assets in this wallet +func (w *Wallet) Balance() uint64 { return w.balance } + +// Send sends [amount] to address [destAddr] +// The output of this transaction may be spent after [locktime] +func (w *Wallet) Send(amount uint64, locktime uint64, destAddr ids.ShortID) *spdagvm.Tx { + builder := spdagvm.Builder{ + NetworkID: w.networkID, + ChainID: w.chainID, + } + currentTime := w.clock.Unix() + + // Send any change to an address this wallet controls + changeAddr := ids.ShortID{} + if w.keyChain.Addrs.Len() < 1000 { + changeAddr = w.CreateAddress() + } else { + changeAddr = w.GetAddress() + } + + utxoList := w.utxoSet.Utxos // List of UTXOs this wallet may spend + + destAddrs := []ids.ShortID{destAddr} + + // Build the transaction + tx, err := builder.NewTxFromUTXOs(w.keyChain, utxoList, amount, w.txFee, locktime, 1, destAddrs, changeAddr, currentTime) + if err != nil { + panic(err) + } + + // Remove from [w.utxoSet] any UTXOs used to fund [tx] + for _, in := range tx.Ins() { + if in, ok := in.(*spdagvm.InputPayment); ok { + inUtxoID := in.InputID() + w.utxoSet.Remove(inUtxoID) + w.balance -= in.Amount() // Deduct from [w.balance] the amount sent + } + } + + return tx +} + +func (w Wallet) String() string { + return fmt.Sprintf( + "KeyChain:\n"+ + "%s\n"+ + "UtxoSet:\n"+ + "%s", + w.keyChain.PrefixedString(" "), + w.utxoSet.string(" ")) +} diff --git a/xputtest/main.go b/xputtest/main.go new file mode 100644 index 0000000..10b634f --- /dev/null +++ b/xputtest/main.go @@ -0,0 +1,326 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +// #include "salticidae/network.h" +// void onTerm(int sig, void *); +// void decidedTx(msg_t *, msgnetwork_conn_t *, void *); +import "C" + +import ( + "fmt" + "os" + "path" + "runtime" + "runtime/pprof" + "time" + "unsafe" + + "github.com/ava-labs/salticidae-go" + + "github.com/ava-labs/gecko/genesis" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/networking" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/spchainvm" + "github.com/ava-labs/gecko/vms/spdagvm" + "github.com/ava-labs/gecko/xputtest/chainwallet" + "github.com/ava-labs/gecko/xputtest/dagwallet" +) + +// tp stores the persistent data needed when running the test. +type tp struct { + ec salticidae.EventContext + build networking.Builder + + conn salticidae.MsgNetworkConn + + log logging.Logger + decided chan ids.ID + + networkID uint32 +} + +var t = tp{} + +//export onTerm +func onTerm(C.int, unsafe.Pointer) { + t.log.Info("Terminate signal received") + t.ec.Stop() +} + +// decidedTx handles the recept of a decidedTx message +//export decidedTx +func decidedTx(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + msg := salticidae.MsgFromC(salticidae.CMsg(_msg)) + + pMsg, err := t.build.Parse(networking.DecidedTx, msg.GetPayloadByMove()) + if err != nil { + t.log.Warn("Failed to parse DecidedTx message") + return + } + + txID, err := ids.ToID(pMsg.Get(networking.TxID).([]byte)) + t.log.AssertNoError(err) // Length is checked in message parsing + + t.log.Debug("Decided %s", txID) + t.decided <- txID +} + +func main() { + if err != nil { + fmt.Printf("Failed to parse arguments: %s\n", err) + } + + config.LoggingConfig.Directory = path.Join(config.LoggingConfig.Directory, "client") + log, err := logging.New(config.LoggingConfig) + if err != nil { + fmt.Printf("Failed to start the logger: %s\n", err) + return + } + + defer log.Stop() + + t.log = log + crypto.EnableCrypto = config.EnableCrypto + t.decided = make(chan ids.ID, config.MaxOutstandingTxs) + + if config.Key >= len(genesis.Keys) || config.Key < 0 { + log.Fatal("Unknown key specified") + return + } + + t.ec = salticidae.NewEventContext() + evInt := salticidae.NewSigEvent(t.ec, salticidae.SigEventCallback(C.onTerm), nil) + evInt.Add(salticidae.SIGINT) + evTerm := salticidae.NewSigEvent(t.ec, salticidae.SigEventCallback(C.onTerm), nil) + evTerm.Add(salticidae.SIGTERM) + + serr := salticidae.NewError() + netconfig := salticidae.NewMsgNetworkConfig() + net := salticidae.NewMsgNetwork(t.ec, netconfig, &serr) + if serr.GetCode() != 0 { + log.Fatal("Sync error %s", salticidae.StrError(serr.GetCode())) + return + } + + net.RegHandler(networking.DecidedTx, salticidae.MsgNetworkMsgCallback(C.decidedTx), nil) + + net.Start() + defer net.Stop() + + remoteIP := salticidae.NewNetAddrFromIPPortString(config.RemoteIP.String(), true, &serr) + if code := serr.GetCode(); code != 0 { + log.Fatal("Sync error %s", salticidae.StrError(serr.GetCode())) + return + } + + t.conn = net.ConnectSync(remoteIP, true, &serr) + if serr.GetCode() != 0 { + log.Fatal("Sync error %s", salticidae.StrError(serr.GetCode())) + return + } + + file, gErr := os.Create("cpu_client.profile") + log.AssertNoError(gErr) + gErr = pprof.StartCPUProfile(file) + log.AssertNoError(gErr) + runtime.SetMutexProfileFraction(1) + + defer file.Close() + defer pprof.StopCPUProfile() + + t.networkID = config.NetworkID + + switch config.Chain { + case ChainChain: + t.benchmarkSnowman() + case DagChain: + t.benchmarkAvalanche() + default: + t.log.Fatal("did not specify whether to test dag or chain. Exiting") + return + } + + t.ec.Dispatch() +} + +func (t *tp) benchmarkAvalanche() { + platformGenesisBytes := genesis.Genesis(t.networkID) + genesisState := &platformvm.Genesis{} + err := platformvm.Codec.Unmarshal(platformGenesisBytes, genesisState) + t.log.AssertNoError(err) + t.log.AssertNoError(genesisState.Initialize()) + + spDAGChain := genesisState.Chains[2] + if name := spDAGChain.ChainName; name != "Simple DAG Payments" { + panic("Wrong chain name") + } + genesisBytes := spDAGChain.GenesisData + + wallet := dagwallet.NewWallet(t.networkID, spDAGChain.ID(), config.AvaTxFee) + + codec := spdagvm.Codec{} + tx, err := codec.UnmarshalTx(genesisBytes) + t.log.AssertNoError(err) + + cb58 := formatting.CB58{} + keyStr := genesis.Keys[config.Key] + t.log.AssertNoError(cb58.FromString(keyStr)) + factory := crypto.FactorySECP256K1R{} + skGen, err := factory.ToPrivateKey(cb58.Bytes) + t.log.AssertNoError(err) + sk := skGen.(*crypto.PrivateKeySECP256K1R) + wallet.ImportKey(sk) + + for _, utxo := range tx.UTXOs() { + wallet.AddUtxo(utxo) + } + + go t.log.RecoverAndPanic(func() { t.IssueAvalanche(spDAGChain.ID(), wallet) }) +} + +func (t *tp) IssueAvalanche(chainID ids.ID, wallet dagwallet.Wallet) { + t.log.Info("starting avalanche benchmark") + pending := make(map[[32]byte]*spdagvm.Tx) + canAdd := []*spdagvm.Tx{} + numAccepted := 0 + + t.decided <- ids.ID{} + meter := timer.TimedMeter{Duration: time.Second} + for d := range t.decided { + if numAccepted%1000 == 0 { + t.log.Info("TPS: %d", meter.Ticks()) + } + if !d.IsZero() { + meter.Tick() + key := d.Key() + if tx := pending[key]; tx != nil { + canAdd = append(canAdd, tx) + + t.log.Debug("Finalized %s", d) + delete(pending, key) + numAccepted++ + } + } + + for len(pending) < config.MaxOutstandingTxs && (wallet.Balance() > 0 || len(canAdd) > 0) { + if wallet.Balance() == 0 { + tx := canAdd[0] + canAdd = canAdd[1:] + + for _, utxo := range tx.UTXOs() { + wallet.AddUtxo(utxo) + } + } + + tx := wallet.Send(1, 0, wallet.GetAddress()) + t.log.AssertTrue(tx != nil, "Tx creation failed") + + it, err := t.build.IssueTx(chainID, tx.Bytes()) + t.log.AssertNoError(err) + ds := it.DataStream() + ba := salticidae.NewByteArrayMovedFromDataStream(ds, false) + newMsg := salticidae.NewMsgMovedFromByteArray(networking.IssueTx, ba, false) + + t.conn.GetNet().SendMsg(newMsg, t.conn) + + ds.Free() + ba.Free() + newMsg.Free() + + pending[tx.ID().Key()] = tx + t.log.Debug("Sent tx, pending = %d, accepted = %d", len(pending), numAccepted) + } + } +} + +func (t *tp) benchmarkSnowman() { + platformGenesisBytes := genesis.Genesis(t.networkID) + genesisState := &platformvm.Genesis{} + err := platformvm.Codec.Unmarshal(platformGenesisBytes, genesisState) + t.log.AssertNoError(err) + t.log.AssertNoError(genesisState.Initialize()) + + spchainChain := genesisState.Chains[3] + if name := spchainChain.ChainName; name != "Simple Chain Payments" { + panic("Wrong chain name") + } + genesisBytes := spchainChain.GenesisData + + wallet := chainwallet.NewWallet(t.networkID, spchainChain.ID()) + + codec := spchainvm.Codec{} + accounts, err := codec.UnmarshalGenesis(genesisBytes) + t.log.AssertNoError(err) + + cb58 := formatting.CB58{} + factory := crypto.FactorySECP256K1R{} + for _, keyStr := range genesis.Keys { + t.log.AssertNoError(cb58.FromString(keyStr)) + skGen, err := factory.ToPrivateKey(cb58.Bytes) + t.log.AssertNoError(err) + sk := skGen.(*crypto.PrivateKeySECP256K1R) + wallet.ImportKey(sk) + } + + for _, account := range accounts { + wallet.AddAccount(account) + break + } + + wallet.GenerateTxs() + + go t.log.RecoverAndPanic(func() { t.IssueSnowman(spchainChain.ID(), wallet) }) +} + +func (t *tp) IssueSnowman(chainID ids.ID, wallet chainwallet.Wallet) { + t.log.Debug("Issuing with %d", wallet.Balance()) + numAccepted := 0 + numPending := 0 + + t.decided <- ids.ID{} + + meter := timer.TimedMeter{Duration: time.Second} + for d := range t.decided { + if numAccepted%1000 == 0 { + t.log.Info("TPS: %d", meter.Ticks()) + } + if !d.IsZero() { + meter.Tick() + t.log.Debug("Finalized %s", d) + numAccepted++ + numPending-- + } + + for numPending < config.MaxOutstandingTxs && wallet.Balance() > 0 && wallet.TxsSent < chainwallet.MaxNumTxs { + tx := wallet.NextTx() + t.log.AssertTrue(tx != nil, "Tx creation failed") + + it, err := t.build.IssueTx(chainID, tx.Bytes()) + t.log.AssertNoError(err) + ds := it.DataStream() + ba := salticidae.NewByteArrayMovedFromDataStream(ds, false) + newMsg := salticidae.NewMsgMovedFromByteArray(networking.IssueTx, ba, false) + + t.conn.GetNet().SendMsg(newMsg, t.conn) + + ds.Free() + ba.Free() + newMsg.Free() + + numPending++ + t.log.Debug("Sent tx, pending = %d, accepted = %d", numPending, numAccepted) + } + if wallet.TxsSent >= chainwallet.MaxNumTxs { + fmt.Println("done with test") + return + } + } + +} diff --git a/xputtest/params.go b/xputtest/params.go new file mode 100644 index 0000000..cf9b132 --- /dev/null +++ b/xputtest/params.go @@ -0,0 +1,96 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "flag" + "fmt" + "net" + + "github.com/ava-labs/gecko/genesis" + "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/wrappers" +) + +var ( + config Config + err error +) + +// Parse the CLI arguments +func init() { + errs := &wrappers.Errs{} + defer func() { err = errs.Err }() + + loggingConfig, err := logging.DefaultConfig() + errs.Add(err) + + // NetworkID: + networkName := flag.String("network-id", genesis.LocalName, "Network ID this node will connect to") + + // Ava fees: + flag.Uint64Var(&config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva") + + // Assertions: + flag.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution") + + // Crypto: + flag.BoolVar(&config.EnableCrypto, "signature-verification-enabled", true, "Turn on signature verification") + + // Remote Server: + ip := flag.String("ip", "127.0.0.1", "IP address of the remote server socket") + port := flag.Uint("port", 9652, "Port of the remote server socket") + + // Logging: + logsDir := flag.String("log-dir", "", "Logging directory for Ava") + logLevel := flag.String("log-level", "info", "The log level. Should be one of {all, debug, info, warn, error, fatal, off}") + + // Test Variables: + chain := flag.Bool("chain", false, "Execute chain transactions") + dag := flag.Bool("dag", false, "Execute dag transactions") + flag.IntVar(&config.Key, "key", 0, "Index of the genesis key list to use") + flag.IntVar(&config.MaxOutstandingTxs, "max_outstanding", 1000, "Maximum number of transactions to leave outstanding") + + flag.Parse() + + networkID, err := genesis.NetworkID(*networkName) + errs.Add(err) + + if networkID != genesis.LocalID { + errs.Add(fmt.Errorf("the only supported networkID is: %s", genesis.LocalName)) + } + + config.NetworkID = networkID + + // Remote: + parsedIP := net.ParseIP(*ip) + if parsedIP == nil { + errs.Add(fmt.Errorf("invalid IP Address %s", *ip)) + } + config.RemoteIP = utils.IPDesc{ + IP: parsedIP, + Port: uint16(*port), + } + + // Logging: + if *logsDir != "" { + loggingConfig.Directory = *logsDir + } + level, err := logging.ToLevel(*logLevel) + errs.Add(err) + loggingConfig.LogLevel = level + loggingConfig.DisplayLevel = level + config.LoggingConfig = loggingConfig + + // Test Variables: + switch { + case *chain: + config.Chain = ChainChain + case *dag: + config.Chain = DagChain + default: + config.Chain = UnknownChain + } +} diff --git a/xputtest/subnets.go b/xputtest/subnets.go new file mode 100644 index 0000000..aeb01dc --- /dev/null +++ b/xputtest/subnets.go @@ -0,0 +1,14 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +// ChainType ... +type ChainType int + +// Chain types +const ( + UnknownChain ChainType = iota + ChainChain + DagChain +)