Merge pull request #8 from adityapk00/lightd_compat

LightwalletD Compatibility
This commit is contained in:
adityapk00 2021-04-22 12:00:54 -07:00 committed by GitHub
commit f0152f09b9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1243 changed files with 273277 additions and 349132 deletions

5
.codecov.yml Normal file
View File

@ -0,0 +1,5 @@
#used to disable false alarm in codecov/patch github status check
coverage:
status:
patch: off

12
.env.template Normal file
View File

@ -0,0 +1,12 @@
ZCASHD_RPCUSER=zcashrpc
ZCASHD_RPCPASSWORD=${PASSWORD_ZCASHD}
ZCASHD_RPCPORT=38232
ZCASHD_ALLOWIP=0.0.0.0/0
ZCASHD_DATADIR=/srv/zcashd/.zcash
ZCASHD_PARMDIR=/srv/zcashd/.zcash-params
ZCASHD_GEN=0
GF_SECURITY_ADMIN_USER=admin
GF_SECURITY_ADMIN_PASSWORD=${PASSWORD_GRAFANA}
LWD_GRPC_PORT=9067
LWD_HTTP_PORT=9068
ZCASHD_CONF_PATH=/srv/lightwalletd/zcash.conf

17
.github/ISSUE_TEMPLATE/bug-report.md vendored Normal file
View File

@ -0,0 +1,17 @@
---
name: Bug report
about: Tell us what's wrong.
title: ''
labels: 'bug'
assignees: ''
---
**What is the bug?**
What happened, and why was it unexpected? If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here. Device, OS, App versions, and other details appreciated.
**Solution**
Add fix suggestions here, or what youd like to see.

View File

@ -0,0 +1,14 @@
---
name: Feature request
about: Suggest an idea.
title: ''
labels: 'use case'
assignees: ''
---
**What is your feature request?**
A description of your feature request. References or screenshots appreciated.
**How would this feature help you?**
A clear and concise description of what this feature would help with, or do.

5
.github/pull_request_template.md vendored Normal file
View File

@ -0,0 +1,5 @@
Please ensure this checklist is followed for any pull requests for this repo. This checklist must be checked by both the PR creator and by anyone who reviews the PR.
* [ ] Relevant documentation for this PR has to be completed and reviewed by @lindanlee before the PR can be merged
* [ ] A test plan for the PR must be documented in the PR notes and included in the test plan for the next regular release
As a note, all CI tests need to be passing and all appropriate code reviews need to be done before this PR can be merged

15
.gitignore vendored
View File

@ -1,6 +1,11 @@
main first-make-timestamp
grpcfrontend* lightwalletd
cert.pem lightwalletd.exe
key.pem server.log
db/
coverage.out
test-log
lwd-api.html
*.orig
__debug_bin
.vscode .vscode
lightwalletd.log

130
.gitlab-ci.yml Normal file
View File

@ -0,0 +1,130 @@
# /************************************************************************
# File: .gitlab-ci.yml
# Author: mdr0id
# Date: 7/16/2019
# Description: Used to setup runners/jobs for lightwalletd
# Usage: Commit source and the pipeline will trigger the according jobs.
#
# Known bugs/missing features:
#
# IMPORTANT NOTE: any job with preceeding '.'' is ignored in pipeline
# ************************************************************************/
image: golang:1.11-alpine
stages:
- build
- test
- deploy
- monitor
before_script:
- apk update && apk add make git gcc musl-dev curl bash
# ************************************************************************/
# BUILD
# ************************************************************************/
.lint-check:
stage: build
script:
- make lint
.build-docs:
stage: build
script:
- make docs
build:build-linux:
stage: build
script:
- make
artifacts:
paths:
- ./lightwalletd
.build-windows:
stage: build
script:
- make
.build-mac:
stage: build
script:
- make
# Build against latest Golang
.build-latest:
stage: build
image: golang:latest-alpine
script:
- make
allow_failure: true
# ************************************************************************/
# TEST
# ************************************************************************/
test:test-unittest:
stage: test
dependencies:
- build:build-linux
script:
- make test
after_script:
- bash <(curl -s https://codecov.io/bash) -t $CODECOV_TOKEN
.test:test-race-conditions:
stage: test
dependencies:
- build:build-linux
script:
- make race
allow_failure: true
.test:test-coverage:
stage: test
dependencies:
- build:build-linux
script:
- make coverage
- make coverage_report
- make coverage_html
artifacts:
paths:
- ./coverage.html
# ************************************************************************/
# DEPLOY
# ************************************************************************/
.deploy_staging:
stage: deploy
script:
- make
- make test
environment:
name: staging
only:
- master
after_script:
- bash <(curl -s https://codecov.io/bash) -t $CODECOV_TOKEN
.release-candidate:
stage: deploy
script:
- echo "Generating v0.0.1-rc"
when: manual
.release-production:
stage: deploy
script:
- echo "Generating v0.0.1"
when: manual
# ************************************************************************/
# MONITOR
# ************************************************************************/
.monitor-release:
stage: deploy
script:
- echo "Building docker image for v0.0.0"
- make image
when: manual

53
CODE_OF_CONDUCT.md Normal file
View File

@ -0,0 +1,53 @@
# Contributor Code of Conduct
As contributors and maintainers of this project, and in the interest of
fostering an open and welcoming community, we pledge to respect all people who
contribute through reporting issues, posting feature requests, updating
documentation, submitting pull requests or patches, and other activities.
We are committed to making participation in this project a harassment-free
experience for everyone, regardless of level of experience, gender, gender
identity and expression, sexual orientation, disability, personal appearance,
body size, race, ethnicity, age, religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information, such as physical or electronic
addresses, without explicit permission
* Other unethical or unprofessional conduct
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
By adopting this Code of Conduct, project maintainers commit themselves to
fairly and consistently applying these principles to every aspect of managing
this project. Project maintainers who do not follow or enforce the Code of
Conduct may be permanently removed from the project team.
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting a project maintainer (see below). All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. Maintainers are
obligated to maintain confidentiality with regard to the reporter of an
incident.
If you wish to contact specific maintainers directly, the following have made
themselves available for conduct issues:
- Marshall Gaucher (marshall@z.cash)
- Larry Ruane (larry@z.cash)
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 1.3.0, available at https://www.contributor-covenant.org/version/1/3/0/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org

207
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,207 @@
# Development Workflow
This document describes the standard workflows and terminology for developers at Zcash. It is intended to provide procedures that will allow users to contribute to the open-source code base. Below are common workflows users will encounter:
1. Fork lightwalletd Repository
2. Create Branch
3. Make & Commit Changes
4. Create Pull Request
5. Discuss / Review PR
6. Deploy / Merge PR
Before continuing, please ensure you have an existing GitHub or GitLab account. If not, visit [GitHub](https://github.com) or [GitLab](https://gitlab.com) to create an account.
## Fork Repository
This step assumes you are starting with a new GitHub/GitLab environment. If you have already forked the lightwalletd repository, please continue to [Create Branch] section. Otherwise, open up a terminal and issue the below commands:
Note: Please replace `your_username`, with your actual GitHub username
```bash
git clone git@github.com:your_username/lightwalletd.git
cd lightwalletd
git remote set-url origin git@github.com:your_username/lightwalletd.git
git remote add upstream git@github.com:zcash/lightwalletd.git
git remote set-url --push upstream DISABLED
git fetch upstream
git branch -u upstream/master master
```
After issuing the above commands, your `.git/config` file should look similar to the following:
```bash
[core]
repositoryformatversion = 0
filemode = true
bare = false
logallrefupdates = true
[remote "origin"]
url = git@github.com:your_username/lightwalletd.git
fetch = +refs/heads/*:refs/remotes/origin/*
[branch "master"]
remote = upstream
merge = refs/heads/master
[remote "upstream"]
url = git@github.com:zcash/lightalletd.git
fetch = +refs/heads/*:refs/remotes/upstream/*
pushurl = DISABLED
```
This setup provides a single cloned environment to develop for lightwalletd. There are alternative methods using multiple clones, but this document does not cover that process.
## Create Branch
While working on the lightwalletd project, you are going to have bugs, features, and ideas to work on. Branching exists to aid these different tasks while you write code. Below are some conventions of branching at Zcash:
1. `master` branch is **ALWAYS** deployable
2. Branch names **MUST** be descriptive:
* General format: `issue#_short_description`
To create a new branch (assuming you are in `lightwalletd` directory):
```bash
git checkout -b [new_branch_name]
```
Note: Even though you have created a new branch, until you `git push` this local branch, it will not show up in your lightwalletd fork on GitHub (e.g. https://github.com/your_username/lightwalletd)
To checkout an existing branch (assuming you are in `lightwalletd` directory):
```bash
git checkout [existing_branch_name]
```
If you are fixing a bug or implementing a new feature, you likely will want to create a new branch. If you are reviewing code or working on existing branches, you likely will checkout an existing branch. To view the list of current lightwalletd GitHub issues, click [here](https://github.com/zcash/lightwalletd/issues).
## Make & Commit Changes
If you have created a new branch or checked out an existing one, it is time to make changes to your local source code. Below are some formalities for commits:
1. Commit messages **MUST** be clear
2. Commit messages **MUST** be descriptive
3. Commit messages **MUST** be clean (see squashing commits for details)
While continuing to do development on a branch, keep in mind that other approved commits are getting merged into `master`. In order to ensure there are minimal to no merge conflicts, we need `rebase` with master.
If you are new to this process, please sanity check your remotes:
```
git remote -v
```
```bash
origin git@github.com:your_username/lightwalletd.git (fetch)
origin git@github.com:your_username/lightwalletd.git (push)
upstream git@github.com:zcash/lightwalletd.git (fetch)
upstream DISABLED (push)
```
This output should be consistent with your `.git/config`:
```bash
[branch "master"]
remote = upstream
merge = refs/heads/master
[remote "origin"]
url = git@github.com:your_username/lightwalletd.git
fetch = +refs/heads/*:refs/remotes/origin/*
[remote "upstream"]
url = git@github.com:zcash/lightwalletd.git
fetch = +refs/heads/*:refs/remotes/upstream/*
pushurl = DISABLED
```
Once you have confirmed your branch/remote is valid, issue the following commands (assumes you have **NO** existing uncommitted changes):
```bash
git fetch upstream
git rebase upstream/master
git push -f
```
If you have uncommitted changes, use `git stash` to preserve them:
```bash
git stash
git fetch upstream
git rebase upstream/master
git push -f
git stash pop
```
Using `git stash` allows you to temporarily store your changes while you rebase with `master`. Without this, you will rebase with master and lose your local changes.
Before committing changes, ensure your commit messages follow these guidelines:
1. Separate subject from body with a blank line
2. Limit the subject line to 50 characters
3. Capitalize the subject line
4. Do not end the subject line with a period
5. Wrap the body at 72 characters
6. Use the body to explain *what* and *why* vs. *how*
Once synced with `master`, let's commit our changes:
```bash
git add [files...] # default is all files, be careful not to add unintended files
git commit -m 'Message describing commit'
git push
```
Now that all the files changed have been committed, let's continue to Create Pull Request section.
## Create Pull Request
On your GitHub page (e.g. https://github.com/your_username/lightwalletd), you will notice a newly created banner containing your recent commit with a big green `Compare & pull request`. Click on it.
First, write a brief summary comment for your PR -- this first comment should be no more than a few lines because it ends up in the merge commit message. This comment should mention the issue number preceded by a hash symbol (for example, #2984).
Add a second comment if more explanation is needed. It's important to explain why this pull request should be accepted. State whether the proposed change fixes part of the problem or all of it; if the change is temporary (a workaround) or permanent; if the problem also exists upstream (Bitcoin) and, if so, if and how it was fixed there.
If you click on `Commits`, you should see the diff of that commit; it's advisable to verify it's what you expect. You can also click on the small plus signs that appear when you hover over the lines on either the left or right side and add a comment specific to that part of the code. This is very helpful, as you don't have to tell the reviewers (in a general comment) that you're referring to a certain line in a certain file.
Add comments **before** adding reviewers, otherwise they will get a separate email for each comment you add. Once you're happy with the documentation you've added to your PR, select reviewers along the right side. For a trivial change (like the example here), one reviewer is enough, but generally you should have at least two reviewers, at least one of whom should be experienced. It may be good to add one less experienced engineer as a learning experience for that person.
## Discuss / Review PR
In order to merge your PR with `master`, you will need to convince the reviewers of the intentions of your code.
**IMPORTANT:** If your PR introduces code that does not have existing tests to ensure it operates gracefully, you **MUST** also create these tests to accompany your PR.
Reviewers will investigate your PR and provide feedback. Generally the comments are explicitly requesting code changes or clarifying implementations. Otherwise Reviewers will reply with PR terminology:
> **Concept ACK** - Agree with the idea and overall direction, but have neither reviewed nor tested the code changes.
> **utACK (untested ACK)**- Reviewed and agree with the code changes but haven't actually tested them.
> **Tested ACK** - Reviewed the code changes and have verified the functionality or bug fix.
> **ACK** - A loose ACK can be confusing. It's best to avoid them unless it's a documentation/comment only change in which case there is nothing to test/verify; therefore the tested/untested distinction is not there.
> **NACK** - Disagree with the code changes/concept. Should be accompanied by an explanation.
### Squashing Commits
Before your PR is accepted, you might be requested to squash your commits to clean up the logs. This can be done using the following approach:
```bash
git checkout branch_name
git rebase -i HEAD~4
```
The integer value after `~` represents the number of commits you would like to interactively rebase. You can pick a value that makes sense for your situation. A template will pop-up in your terminal requesting you to specify what commands you would like to do with each prior commit:
```bash
Commands:
p, pick = use commit
r, reword = use commit, but edit the commit message
e, edit = use commit, but stop for amending
s, squash = use commit, but meld into previous commit
f, fixup = like "squash", but discard this commit's log message
x, exec = run command (the rest of the line) using shell
```
Modify each line with the according command, followed by the hash of the commit. For example, if I wanted to squash my last 4 commits into the most recent commit for this PR:
```bash
p 1fc6c95 Final commit message
s 6b2481b Third commit message
s dd1475d Second commit message
s c619268 First commit message
```
```bash
git push origin branch-name --force
```
## Deploy / Merge PR
Once your PR/MR has been properly reviewed, it will be ran in the build pipeline to ensure it is valid to merge with master.
Sometimes there will be times when your PR is waiting for some portion of the above process. If you are requested to rebase your PR, in order to gracefully merge into `master`, please do the following:
```bash
git checkout branch_name
git fetch upstream
git rebase upstream/master
git push -f
```

38
COPYING Normal file
View File

@ -0,0 +1,38 @@
Copyright (c) 2020 The Zcash developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
The MIT software license (https://www.opensource.org/licenses/mit-license.php)
above applies to the code directly included in this source distribution, with
the exception of certain Autoconf macros. Dependencies downloaded as part of
the build process may be covered by other open-source licenses. The MIT-licensed
source code is not considered a derived work of these Autoconf macros or of the
dependencies. For further details see 'contrib/debian/copyright'.
This product includes software developed by the OpenSSL Project for use in the
OpenSSL Toolkit (https://www.openssl.org/). This product includes cryptographic
software written by Eric Young (eay@cryptsoft.com).
Although almost all of the Zcash code is licensed under "permissive" open source
licenses, users and distributors should note that when built using the default
build options, Zcash depends on Oracle Berkeley DB 6.2.x, which is licensed
under the GNU Affero General Public License.

65
Dockerfile Normal file
View File

@ -0,0 +1,65 @@
# /************************************************************************
# File: Dockerfile
# Author: mdr0id
# Date: 9/3/2019
# Description: Used for devs that have not built zcashd or lightwalletd on
# on existing system
# USAGE:
#
# To build image: make docker_img
# To run container: make docker_image_run
#
# This will place you into the container where you can run zcashd, zcash-cli,
# lightwalletd server etc..
#
# First you need to get zcashd sync to current height on testnet, from outside container:
# make docker_img_run_zcashd
#
# Sometimes you need to manually start zcashd for the first time, from inside the container:
# zcashd -printtoconsole
#
# Once the block height is at least 280,000 you can go ahead and start lightwalletd
# make docker_img_run_lightwalletd_insecure_server
#
# If you need a random bash session in the container, use:
# make docker_img_bash
#
# If you get kicked out of docker or it locks up...
# To restart, check to see what container you want to restart via docker ps -a
# Then, docker restart <container id>
# The reattach to it, docker attach <container id>
#
# Known bugs/missing features/todos:
#
# *** DO NOT USE IN PRODUCTION ***
#
# - Create docker-compose with according .env scaffolding
# - Determine librustzcash bug that breaks zcashd alpine builds at runtime
# - Once versioning is stable add config flags for images
# - Add mainnet config once lightwalletd stack supports it
#
# ************************************************************************/
# Create layer in case you want to modify local lightwalletd code
FROM golang:1.13 AS lightwalletd_base
ADD . /go/src/github.com/zcash/lightwalletd
WORKDIR /go/src/github.com/zcash/lightwalletd
RUN make \
&& /usr/bin/install -c ./lightwalletd /usr/local/bin/ \
&& mkdir -p /var/lib/lightwalletd/db \
&& chown 2002:2002 /var/lib/lightwalletd/db
ARG LWD_USER=lightwalletd
ARG LWD_UID=2002
RUN useradd --home-dir /srv/$LWD_USER \
--shell /bin/bash \
--create-home \
--uid $LWD_UID\
$LWD_USER
USER $LWD_USER
WORKDIR /srv/$LWD_USER
ENTRYPOINT ["lightwalletd"]
CMD ["--help"]

21
LICENSE Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019 Electric Coin Company
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

160
Makefile Normal file
View File

@ -0,0 +1,160 @@
# /************************************************************************
# File: Makefile
# Author: mdr0id
# Date: 7/16/2019
# Description: Used for local and container dev in CI deployments
# Usage: make <target_name>
#
# Copyright (c) 2020 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
#
# Known bugs/missing features:
# 1. make msan is not stable as of 9/20/2019
#
# ************************************************************************/
PROJECT_NAME := "lightwalletd"
GO_FILES := $(shell find . -name '*.go' | grep -v /vendor/ | grep -v '*_test.go')
GO_TEST_FILES := $(shell find . -name '*_test.go' -type f | rev | cut -d "/" -f2- | rev | sort -u)
GO_BUILD_FILES := $(shell find . -name 'main.go')
VERSION := `git describe --tags`
GITCOMMIT := `git rev-parse HEAD`
BUILDDATE := `date +%Y-%m-%d`
BUILDUSER := `whoami`
LDFLAGSSTRING :=-X github.com/adityapk00/lightwalletd/common.Version=$(VERSION)
LDFLAGSSTRING +=-X github.com/adityapk00/lightwalletd/common.GitCommit=$(GITCOMMIT)
LDFLAGSSTRING +=-X github.com/adityapk00/lightwalletd/common.Branch=$(BRANCH)
LDFLAGSSTRING +=-X github.com/adityapk00/lightwalletd/common.BuildDate=$(BUILDDATE)
LDFLAGSSTRING +=-X github.com/adityapk00/lightwalletd/common.BuildUser=$(BUILDUSER)
LDFLAGS :=-ldflags "$(LDFLAGSSTRING)"
# There are some files that are generated but are also in source control
# (so that the average clone - build doesn't need the required tools)
GENERATED_FILES := docs/rtd/index.html walletrpc/compact_formats.pb.go walletrpc/service.pb.go walletrpc/darkside.proto
PWD := $(shell pwd)
.PHONY: all dep build clean test coverage lint doc simpledoc proto
all: first-make-timestamp build $(GENERATED_FILES)
# Ensure that the generated files that are also in git source control are
# initially more recent than the files they're generated from (so we don't try
# to rebuild them); this isn't perfect because it depends on doing a make before
# editing a .proto file; also, "make -jn" may trigger remake if n > 1.
first-make-timestamp:
touch $(GENERATED_FILES) $@
# Lint golang files
lint:
golint -set_exit_status
show_tests:
@echo ${GO_TEST_FILES}
# Run unittests
test:
go test -v ./...
# Run data race detector
race:
GO111MODULE=on CGO_ENABLED=1 go test -v -race -short ./...
# Run memory sanitizer (need to ensure proper build flag is set)
msan:
go test -v -msan -short ${GO_TEST_FILES}
# Generate global code coverage report, ignore generated *.pb.go files
coverage:
go test -coverprofile=coverage.out ./...
sed -i '/\.pb\.go/d' coverage.out
# Generate code coverage report
coverage_report: coverage
go tool cover -func=coverage.out
# Generate code coverage report in HTML
coverage_html: coverage
go tool cover -html=coverage.out
# Generate documents, requires docker, see https://github.com/pseudomuto/protoc-gen-doc
doc: docs/rtd/index.html
docs/rtd/index.html: walletrpc/compact_formats.proto walletrpc/service.proto walletrpc/darkside.proto
docker run --rm -v $(PWD)/docs/rtd:/out -v $(PWD)/walletrpc:/protos pseudomuto/protoc-gen-doc
proto: walletrpc/service.pb.go walletrpc/darkside.pb.go walletrpc/compact_formats.pb.go
walletrpc/service.pb.go: walletrpc/service.proto
cd walletrpc && protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative service.proto
walletrpc/darkside.pb.go: walletrpc/darkside.proto
cd walletrpc && protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative darkside.proto
walletrpc/compact_formats.pb.go: walletrpc/compact_formats.proto
cd walletrpc && protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative compact_formats.proto
# Generate documents using a very simple wrap-in-html approach (not ideal)
simpledoc: lwd-api.html
lwd-api.html: walletrpc/compact_formats.proto walletrpc/service.proto
./docgen.sh $^ >lwd-api.html
# Generate docker image
docker_img:
docker build -t zcash_lwd_base .
# Run the above docker image in a container
docker_img_run:
docker run -i --name zcashdlwd zcash_lwd_base
# Execture a bash process on zcashdlwdcontainer
docker_img_bash:
docker exec -it zcashdlwd bash
# Start the zcashd process in the zcashdlwd container
docker_img_run_zcashd:
docker exec -i zcashdlwd zcashd -printtoconsole
# Stop the zcashd process in the zcashdlwd container
docker_img_stop_zcashd:
docker exec -i zcashdlwd zcash-cli stop
# Start the lightwalletd server in the zcashdlwd container
docker_img_run_lightwalletd_insecure_server:
docker exec -i zcashdlwd server --no-tls-very-insecure=true --conf-file /home/zcash/.zcash/zcash.conf --log-file /logs/server.log --bind-addr 127.0.0.1:18232
# Remove and delete ALL images and containers in Docker; assumes containers are stopped
docker_remove_all:
docker system prune -f
# Get dependencies
dep:
@go get -v -d ./...
# Build binary
build:
GO111MODULE=on go build $(LDFLAGS)
build_rel:
GO111MODULE=on GOOS=linux go build $(LDFLAGS)
# Install binaries into Go path
install:
go install ./...
# Update your protoc, protobufs, grpc, .pb.go files
update-grpc:
go get -u github.com/golang/protobuf/proto
go get -u github.com/golang/protobuf/protoc-gen-go
go get -u google.golang.org/grpc
cd walletrpc && protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative service.proto
cd walletrpc && protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative darkside.proto
cd walletrpc && protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative compact_formats.proto
go mod tidy && go mod vendor
clean:
@echo "clean project..."
#rm -f $(PROJECT_NAME)

View File

@ -1,84 +1,10 @@
# Overview # Overview
Zecwallet lightwalletd is a fork of [lightwalletd](https://github.com/adityapk00/lightwalletd) from the ECC. Zecwallet lightwalletd is an API-compatible fork of [lightwalletd](https://github.com/zcash/lightwalletd) from the ECC. Since the API is compatible Zecwallet Lite will work with any LightwalletD. This fork is maintained by Zecwallet to ease integrations and experiments with the Lightwallet protocol.
It is a backend service that provides a bandwidth-efficient interface to the Zcash blockchain for the [Zecwallet light wallet](https://github.com/adityapk00/zecwallet-lite-lib). LightwalletD is a backend service that provides a bandwidth-efficient interface to the Zcash blockchain for the [Zecwallet light wallet](https://github.com/adityapk00/zecwallet-lite-lib).
## Changes from upstream lightwalletd
This version of Zecwallet lightwalletd extends lightwalletd and:
* Adds support for transparent addresses
* Adds several new RPC calls for lightclients
* Lots of perf improvements
* Replaces SQLite with in-memory cache for Compact Blocks
* Replace local Txstore, delegating Tx lookups to Zcashd
* Remove the need for a separate ingestor
## Running your own zeclite lightwalletd ## Running your own zeclite lightwalletd
Please see instructions at the [LightwalletD Readme](https://github.com/zcash/lightwalletd/blob/master/README.md)
#### 0. First, install [Go >= 1.11](https://golang.org/dl/#stable).
#### 1. Run a zcash node.
Start a `zcashd` with the following options:
```
server=1
rpcuser=user
rpcpassword=password
rpcbind=127.0.0.1
rpcport=8232
experimentalfeatures=1
txindex=1
insightexplorer=1
```
You might need to run with `-reindex` the first time if you are enabling the `txindex` or `insightexplorer` options for the first time. The reindex might take a while. If you are using it on testnet, please also include `testnet=1`
#### 2. Get a TLS certificate
##### a. "Let's Encrypt" certificate using NGINX as a reverse proxy
If you running a public-facing server, the easiest way to obtain a certificate is to use a NGINX reverse proxy and get a Let's Encrypt certificate. [Instructions are here](https://www.nginx.com/blog/using-free-ssltls-certificates-from-lets-encrypt-with-nginx/)
Create a new section for the NGINX reverse proxy:
```
server {
listen 443 ssl http2;
ssl_certificate ssl/cert.pem; # From certbot
ssl_certificate_key ssl/key.pem; # From certbot
location / {
# Replace localhost:9067 with the address and port of your gRPC server if using a custom port
grpc_pass grpc://localhost:9067;
}
}
```
##### b. Use without TLS certificate
You can run lightwalletd without TLS and server traffic over `http`. This is recommended only for local testing
#### 3. Run the frontend:
You can run the gRPC server with or without TLS, depending on how you configured step 2. If you are using NGINX as a reverse proxy and are letting NGINX handle the TLS authentication, then run the frontend with `-no-tls`
```
go run cmd/server/main.go -bind-addr 127.0.0.1:9067 -conf-file ~/.zcash/zcash.conf -no-tls
```
If you have a certificate that you want to use (either self signed, or from a certificate authority), pass the certificate to the frontend:
```
go run cmd/server/main.go -bind-addr 127.0.0.1:443 -conf-file ~/.zcash/zcash.conf -tls-cert cert.pem -tls-key key.pem
```
You should start seeing the frontend ingest and cache the zcash blocks after ~15 seconds.
#### 4. Point the `zecwallet-cli` to this server
Connect to your server!
```
./zecwallet-cli -server https://mylightwalletd.server.com:443
```
If you are using your own server running without TLS, you can also connect over `http`
```
./zecwallet-cli --server http://127.0.0.1:9067
```

42
buildenv.sh Executable file
View File

@ -0,0 +1,42 @@
#!/bin/bash
for i in "$@"
do
case $i in
-h|--help)
echo HELP
exit 0
;;
-n=*|--network=*)
NETWORK="${i#*=}"
shift
;;
*)
echo Unknown option. Use -h for help.
exit -1
;;
esac
done
if [ "$NETWORK" == "" ]
then
echo ZCASHD_NETWORK=testnet
else
echo ZCASHD_NETWORK=$NETWORK
fi
# sanity check openssl first...
if [ `openssl rand -base64 32 | wc -c` != 45 ]
then
echo Openssl password generation failed.
exit 1
fi
PASSWORD_GRAFANA=`openssl rand -base64 32`
PASSWORD_ZCASHD=`openssl rand -base64 32`
while read TEMPLATE
do
eval echo $TEMPLATE
done < .env.template

438
cmd/root.go Normal file
View File

@ -0,0 +1,438 @@
package cmd
import (
"fmt"
"net"
"net/http"
"os"
"os/signal"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/btcsuite/btcd/rpcclient"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/reflection"
"github.com/adityapk00/lightwalletd/common"
"github.com/adityapk00/lightwalletd/common/logging"
"github.com/adityapk00/lightwalletd/frontend"
"github.com/adityapk00/lightwalletd/walletrpc"
)
var (
promRegistry = prometheus.NewRegistry()
)
var cfgFile string
var logger = logrus.New()
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "lightwalletd",
Short: "Lightwalletd is a backend service to the Zcash blockchain",
Long: `Lightwalletd is a backend service that provides a
bandwidth-efficient interface to the Zcash blockchain`,
Run: func(cmd *cobra.Command, args []string) {
opts := &common.Options{
GRPCBindAddr: viper.GetString("grpc-bind-addr"),
GRPCLogging: viper.GetBool("grpc-logging-insecure"),
HTTPBindAddr: viper.GetString("http-bind-addr"),
TLSCertPath: viper.GetString("tls-cert"),
TLSKeyPath: viper.GetString("tls-key"),
LogLevel: viper.GetUint64("log-level"),
LogFile: viper.GetString("log-file"),
ZcashConfPath: viper.GetString("zcash-conf-path"),
RPCUser: viper.GetString("rpcuser"),
RPCPassword: viper.GetString("rpcpassword"),
RPCHost: viper.GetString("rpchost"),
RPCPort: viper.GetString("rpcport"),
NoTLSVeryInsecure: viper.GetBool("no-tls-very-insecure"),
GenCertVeryInsecure: viper.GetBool("gen-cert-very-insecure"),
DataDir: viper.GetString("data-dir"),
Redownload: viper.GetBool("redownload"),
PingEnable: viper.GetBool("ping-very-insecure"),
Darkside: viper.GetBool("darkside-very-insecure"),
DarksideTimeout: viper.GetUint64("darkside-timeout"),
}
common.Log.Debugf("Options: %#v\n", opts)
filesThatShouldExist := []string{
opts.LogFile,
}
if !fileExists(opts.LogFile) {
os.OpenFile(opts.LogFile, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
}
if !opts.Darkside && (opts.RPCUser == "" || opts.RPCPassword == "" || opts.RPCHost == "" || opts.RPCPort == "") {
filesThatShouldExist = append(filesThatShouldExist, opts.ZcashConfPath)
}
if !opts.NoTLSVeryInsecure && !opts.GenCertVeryInsecure {
filesThatShouldExist = append(filesThatShouldExist,
opts.TLSCertPath, opts.TLSKeyPath)
}
for _, filename := range filesThatShouldExist {
if !fileExists(filename) {
os.Stderr.WriteString(fmt.Sprintf("\n ** File does not exist: %s\n\n", filename))
common.Log.Fatal("required file ", filename, " does not exist")
}
}
// Start server and block, or exit
if err := startServer(opts); err != nil {
common.Log.WithFields(logrus.Fields{
"error": err,
}).Fatal("couldn't create server")
}
},
}
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
func startServer(opts *common.Options) error {
if opts.LogFile != "" {
// instead write parsable logs for logstash/splunk/etc
output, err := os.OpenFile(opts.LogFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
common.Log.WithFields(logrus.Fields{
"error": err,
"path": opts.LogFile,
}).Fatal("couldn't open log file")
}
defer output.Close()
logger.SetOutput(output)
logger.SetFormatter(&logrus.JSONFormatter{})
}
promRegistry.MustRegister(common.Metrics.LatestBlockCounter)
promRegistry.MustRegister(common.Metrics.TotalErrors)
promRegistry.MustRegister(common.Metrics.TotalBlocksServedConter)
promRegistry.MustRegister(common.Metrics.SendTransactionsCounter)
promRegistry.MustRegister(common.Metrics.TotalSaplingParamsCounter)
promRegistry.MustRegister(common.Metrics.TotalSproutParamsCounter)
logger.SetLevel(logrus.Level(opts.LogLevel))
common.Log.WithFields(logrus.Fields{
"gitCommit": common.GitCommit,
"buildDate": common.BuildDate,
"buildUser": common.BuildUser,
}).Infof("Starting gRPC server version %s on %s", common.Version, opts.GRPCBindAddr)
logging.LogToStderr = opts.GRPCLogging
// gRPC initialization
var server *grpc.Server
if opts.NoTLSVeryInsecure {
common.Log.Warningln("Starting insecure no-TLS (plaintext) server")
fmt.Println("Starting insecure server")
server = grpc.NewServer(
grpc.StreamInterceptor(
grpc_middleware.ChainStreamServer(
grpc_prometheus.StreamServerInterceptor),
),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
logging.LogInterceptor,
grpc_prometheus.UnaryServerInterceptor),
))
} else {
var transportCreds credentials.TransportCredentials
if opts.GenCertVeryInsecure {
common.Log.Warning("Certificate and key not provided, generating self signed values")
fmt.Println("Starting insecure self-certificate server")
tlsCert := common.GenerateCerts()
transportCreds = credentials.NewServerTLSFromCert(tlsCert)
} else {
var err error
transportCreds, err = credentials.NewServerTLSFromFile(opts.TLSCertPath, opts.TLSKeyPath)
if err != nil {
common.Log.WithFields(logrus.Fields{
"cert_file": opts.TLSCertPath,
"key_path": opts.TLSKeyPath,
"error": err,
}).Fatal("couldn't load TLS credentials")
}
}
server = grpc.NewServer(
grpc.Creds(transportCreds),
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
grpc_prometheus.StreamServerInterceptor),
),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
logging.LogInterceptor,
grpc_prometheus.UnaryServerInterceptor),
))
}
grpc_prometheus.EnableHandlingTimeHistogram()
grpc_prometheus.Register(server)
go startHTTPServer(opts)
// Enable reflection for debugging
if opts.LogLevel >= uint64(logrus.WarnLevel) {
reflection.Register(server)
}
// Initialize Zcash RPC client. Right now (Jan 2018) this is only for
// sending transactions, but in the future it could back a different type
// of block streamer.
var saplingHeight int
var chainName string
var rpcClient *rpcclient.Client
var err error
if opts.Darkside {
chainName = "darkside"
} else {
if opts.RPCUser != "" && opts.RPCPassword != "" && opts.RPCHost != "" && opts.RPCPort != "" {
rpcClient, err = frontend.NewZRPCFromFlags(opts)
} else {
rpcClient, err = frontend.NewZRPCFromConf(opts.ZcashConfPath)
}
if err != nil {
common.Log.WithFields(logrus.Fields{
"error": err,
}).Fatal("setting up RPC connection to zcashd")
}
// Indirect function for test mocking (so unit tests can talk to stub functions).
common.RawRequest = rpcClient.RawRequest
// Ensure that we can communicate with zcashd
common.FirstRPC()
getLightdInfo, err := common.GetLightdInfo()
if err != nil {
common.Log.WithFields(logrus.Fields{
"error": err,
}).Fatal("getting initial information from zcashd")
}
common.Log.Info("Got sapling height ", getLightdInfo.SaplingActivationHeight,
" block height ", getLightdInfo.BlockHeight,
" chain ", getLightdInfo.ChainName,
" branchID ", getLightdInfo.ConsensusBranchId)
saplingHeight = int(getLightdInfo.SaplingActivationHeight)
chainName = getLightdInfo.ChainName
}
dbPath := filepath.Join(opts.DataDir, "db")
if opts.Darkside {
os.RemoveAll(filepath.Join(dbPath, chainName))
}
// Temporary, because PR 320 put the db files in the wrong place
// (one level too high, directly in "db/" instead of "db/chainname"),
// so delete them if they're present. This can be removed sometime.
os.Remove(filepath.Join(dbPath, "blocks"))
os.Remove(filepath.Join(dbPath, "blocks-corrupted"))
os.Remove(filepath.Join(dbPath, "lengths"))
os.Remove(filepath.Join(dbPath, "lengths-corrupted"))
if err := os.MkdirAll(opts.DataDir, 0755); err != nil {
os.Stderr.WriteString(fmt.Sprintf("\n ** Can't create data directory: %s\n\n", opts.DataDir))
os.Exit(1)
}
if err := os.MkdirAll(dbPath, 0755); err != nil {
os.Stderr.WriteString(fmt.Sprintf("\n ** Can't create db directory: %s\n\n", dbPath))
os.Exit(1)
}
cache := common.NewBlockCache(dbPath, chainName, saplingHeight, opts.Redownload)
if !opts.Darkside {
go common.BlockIngestor(cache, 0 /*loop forever*/)
} else {
// Darkside wants to control starting the block ingestor.
common.DarksideInit(cache, int(opts.DarksideTimeout))
}
// Compact transaction service initialization
{
service, err := frontend.NewLwdStreamer(cache, chainName, opts.PingEnable)
if err != nil {
common.Log.WithFields(logrus.Fields{
"error": err,
}).Fatal("couldn't create backend")
}
walletrpc.RegisterCompactTxStreamerServer(server, service)
}
if opts.Darkside {
service, err := frontend.NewDarksideStreamer(cache)
if err != nil {
common.Log.WithFields(logrus.Fields{
"error": err,
}).Fatal("couldn't create backend")
}
walletrpc.RegisterDarksideStreamerServer(server, service)
}
// Start listening
listener, err := net.Listen("tcp", opts.GRPCBindAddr)
if err != nil {
common.Log.WithFields(logrus.Fields{
"bind_addr": opts.GRPCBindAddr,
"error": err,
}).Fatal("couldn't create listener")
}
// Signal handler for graceful stops
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
go func() {
s := <-signals
cache.Sync()
common.Log.WithFields(logrus.Fields{
"signal": s.String(),
}).Info("caught signal, stopping gRPC server")
os.Exit(1)
}()
err = server.Serve(listener)
if err != nil {
common.Log.WithFields(logrus.Fields{
"error": err,
}).Fatal("gRPC server exited")
}
return nil
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func init() {
rootCmd.AddCommand(versionCmd)
cobra.OnInitialize(initConfig)
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is current directory, lightwalletd.yaml)")
rootCmd.Flags().String("http-bind-addr", "127.0.0.1:9068", "the address to listen for http on")
rootCmd.Flags().String("grpc-bind-addr", "127.0.0.1:9067", "the address to listen for grpc on")
rootCmd.Flags().Bool("grpc-logging-insecure", false, "enable grpc logging to stderr")
rootCmd.Flags().String("tls-cert", "./cert.pem", "the path to a TLS certificate")
rootCmd.Flags().String("tls-key", "./cert.key", "the path to a TLS key file")
rootCmd.Flags().Int("log-level", int(logrus.InfoLevel), "log level (logrus 1-7)")
rootCmd.Flags().String("log-file", "./server.log", "log file to write to")
rootCmd.Flags().String("zcash-conf-path", "./zcash.conf", "conf file to pull RPC creds from")
rootCmd.Flags().String("rpcuser", "", "RPC user name")
rootCmd.Flags().String("rpcpassword", "", "RPC password")
rootCmd.Flags().String("rpchost", "", "RPC host")
rootCmd.Flags().String("rpcport", "", "RPC host port")
rootCmd.Flags().Bool("no-tls-very-insecure", false, "run without the required TLS certificate, only for debugging, DO NOT use in production")
rootCmd.Flags().Bool("gen-cert-very-insecure", false, "run with self-signed TLS certificate, only for debugging, DO NOT use in production")
rootCmd.Flags().Bool("redownload", false, "re-fetch all blocks from zcashd; reinitialize local cache files")
rootCmd.Flags().String("data-dir", "/var/lib/lightwalletd", "data directory (such as db)")
rootCmd.Flags().Bool("ping-very-insecure", false, "allow Ping GRPC for testing")
rootCmd.Flags().Bool("darkside-very-insecure", false, "run with GRPC-controllable mock zcashd for integration testing (shuts down after 30 minutes)")
rootCmd.Flags().Int("darkside-timeout", 30, "override 30 minute default darkside timeout")
viper.BindPFlag("grpc-bind-addr", rootCmd.Flags().Lookup("grpc-bind-addr"))
viper.SetDefault("grpc-bind-addr", "127.0.0.1:9067")
viper.BindPFlag("grpc-logging-insecure", rootCmd.Flags().Lookup("grpc-logging-insecure"))
viper.SetDefault("grpc-logging-insecure", false)
viper.BindPFlag("http-bind-addr", rootCmd.Flags().Lookup("http-bind-addr"))
viper.SetDefault("http-bind-addr", "127.0.0.1:9068")
viper.BindPFlag("tls-cert", rootCmd.Flags().Lookup("tls-cert"))
viper.SetDefault("tls-cert", "./cert.pem")
viper.BindPFlag("tls-key", rootCmd.Flags().Lookup("tls-key"))
viper.SetDefault("tls-key", "./cert.key")
viper.BindPFlag("log-level", rootCmd.Flags().Lookup("log-level"))
viper.SetDefault("log-level", int(logrus.InfoLevel))
viper.BindPFlag("log-file", rootCmd.Flags().Lookup("log-file"))
viper.SetDefault("log-file", "./server.log")
viper.BindPFlag("zcash-conf-path", rootCmd.Flags().Lookup("zcash-conf-path"))
viper.SetDefault("zcash-conf-path", "./zcash.conf")
viper.BindPFlag("rpcuser", rootCmd.Flags().Lookup("rpcuser"))
viper.BindPFlag("rpcpassword", rootCmd.Flags().Lookup("rpcpassword"))
viper.BindPFlag("rpchost", rootCmd.Flags().Lookup("rpchost"))
viper.BindPFlag("rpcport", rootCmd.Flags().Lookup("rpcport"))
viper.BindPFlag("no-tls-very-insecure", rootCmd.Flags().Lookup("no-tls-very-insecure"))
viper.SetDefault("no-tls-very-insecure", false)
viper.BindPFlag("gen-cert-very-insecure", rootCmd.Flags().Lookup("gen-cert-very-insecure"))
viper.SetDefault("gen-cert-very-insecure", false)
viper.BindPFlag("redownload", rootCmd.Flags().Lookup("redownload"))
viper.SetDefault("redownload", false)
viper.BindPFlag("data-dir", rootCmd.Flags().Lookup("data-dir"))
viper.SetDefault("data-dir", "/var/lib/lightwalletd")
viper.BindPFlag("ping-very-insecure", rootCmd.Flags().Lookup("ping-very-insecure"))
viper.SetDefault("ping-very-insecure", false)
viper.BindPFlag("darkside-very-insecure", rootCmd.Flags().Lookup("darkside-very-insecure"))
viper.SetDefault("darkside-very-insecure", false)
viper.BindPFlag("darkside-timeout", rootCmd.Flags().Lookup("darkside-timeout"))
viper.SetDefault("darkside-timeout", 30)
logger.SetFormatter(&logrus.TextFormatter{
//DisableColors: true,
FullTimestamp: true,
DisableLevelTruncation: true,
})
onexit := func() {
fmt.Printf("Lightwalletd died with a Fatal error. Check logfile for details.\n")
}
common.Log = logger.WithFields(logrus.Fields{
"app": "lightwalletd",
})
// Metrics
common.Metrics = common.GetPrometheusMetrics()
logrus.RegisterExitHandler(onexit)
// Indirect function for test mocking (so unit tests can talk to stub functions)
common.Sleep = time.Sleep
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
} else {
// Look in the current directory for a configuration file
viper.AddConfigPath(".")
// Viper auto appends extention to this config name
// For example, lightwalletd.yml
viper.SetConfigName("lightwalletd")
}
// Replace `-` in config options with `_` for ENV keys
replacer := strings.NewReplacer("-", "_")
viper.SetEnvKeyReplacer(replacer)
viper.AutomaticEnv() // read in environment variables that match
// If a config file is found, read it in.
var err error
if err = viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
}
func startHTTPServer(opts *common.Options) {
http.Handle("/metrics", promhttp.HandlerFor(
promRegistry,
promhttp.HandlerOpts{},
))
// Add the params download handler
http.HandleFunc("/params/", common.ParamsHandler)
http.ListenAndServe(opts.HTTPBindAddr, nil)
}

22
cmd/root_test.go Normal file
View File

@ -0,0 +1,22 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package cmd
import (
"testing"
)
func TestFileExists(t *testing.T) {
if fileExists("nonexistent-file") {
t.Fatal("fileExists unexpected success")
}
// If the path exists but is a directory, should return false
if fileExists(".") {
t.Fatal("fileExists unexpected success")
}
// The following file should exist, it's what's being tested
if !fileExists("root.go") {
t.Fatal("fileExists failed")
}
}

View File

@ -1,288 +0,0 @@
package main
import (
"context"
"flag"
"fmt"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/reflection"
"github.com/adityapk00/lightwalletd/common"
"github.com/adityapk00/lightwalletd/frontend"
"github.com/adityapk00/lightwalletd/walletrpc"
)
var log *logrus.Entry
var logger = logrus.New()
var (
promRegistry = prometheus.NewRegistry()
)
var metrics = common.GetPrometheusMetrics()
func init() {
logger.SetFormatter(&logrus.TextFormatter{
//DisableColors: true,
FullTimestamp: true,
DisableLevelTruncation: true,
})
log = logger.WithFields(logrus.Fields{
"app": "frontend-grpc",
})
promRegistry.MustRegister(metrics.LatestBlockCounter)
promRegistry.MustRegister(metrics.TotalErrors)
promRegistry.MustRegister(metrics.TotalBlocksServedConter)
promRegistry.MustRegister(metrics.SendTransactionsCounter)
promRegistry.MustRegister(metrics.TotalSaplingParamsCounter)
promRegistry.MustRegister(metrics.TotalSproutParamsCounter)
}
// TODO stream logging
func LoggingInterceptor() grpc.ServerOption {
return grpc.UnaryInterceptor(logInterceptor)
}
func logInterceptor(
ctx context.Context,
req interface{},
info *grpc.UnaryServerInfo,
handler grpc.UnaryHandler,
) (interface{}, error) {
reqLog := loggerFromContext(ctx)
start := time.Now()
resp, err := handler(ctx, req)
entry := reqLog.WithFields(logrus.Fields{
"method": info.FullMethod,
"duration": time.Since(start),
"error": err,
})
if err != nil {
entry.Error("call failed")
} else {
entry.Info("method called")
}
return resp, err
}
func loggerFromContext(ctx context.Context) *logrus.Entry {
if xRealIP, ok := metadata.FromIncomingContext(ctx); ok {
realIP := xRealIP.Get("x-real-ip")
if len(realIP) > 0 {
return log.WithFields(logrus.Fields{"peer_addr": realIP[0]})
}
}
if peerInfo, ok := peer.FromContext(ctx); ok {
return log.WithFields(logrus.Fields{"peer_addr": peerInfo.Addr})
}
return log.WithFields(logrus.Fields{"peer_addr": "unknown"})
}
type Options struct {
bindAddr string
tlsCertPath string
tlsKeyPath string
noTLS bool
logLevel uint64
logPath string
zcashConfPath string
cacheSize int
metricsPort uint
paramsPort uint
}
func main() {
opts := &Options{}
flag.StringVar(&opts.bindAddr, "bind-addr", "127.0.0.1:9067", "the address to listen on")
flag.StringVar(&opts.tlsCertPath, "tls-cert", "", "the path to a TLS certificate (optional)")
flag.StringVar(&opts.tlsKeyPath, "tls-key", "", "the path to a TLS key file (optional)")
flag.BoolVar(&opts.noTLS, "no-tls", false, "Disable TLS, serve un-encrypted traffic.")
flag.Uint64Var(&opts.logLevel, "log-level", uint64(logrus.InfoLevel), "log level (logrus 1-7)")
flag.StringVar(&opts.logPath, "log-file", "", "log file to write to")
flag.StringVar(&opts.zcashConfPath, "conf-file", "", "conf file to pull RPC creds from")
flag.IntVar(&opts.cacheSize, "cache-size", 40000, "number of blocks to hold in the cache")
flag.UintVar(&opts.paramsPort, "params-port", 8090, "the port on which the params server listens")
flag.UintVar(&opts.metricsPort, "metrics-port", 2234, "the port on which to run the prometheus metrics exported")
// TODO prod metrics
// TODO support config from file and env vars
flag.Parse()
if opts.zcashConfPath == "" {
flag.Usage()
os.Exit(1)
}
if !opts.noTLS && (opts.tlsCertPath == "" || opts.tlsKeyPath == "") {
println("Please specify a TLS certificate/key to use. You can use a self-signed certificate.")
println("See 'https://github.com/adityapk00/lightwalletd/blob/master/README.md#running-your-own-zeclite-lightwalletd'")
os.Exit(1)
}
if opts.logPath != "" {
// instead write parsable logs for logstash/splunk/etc
output, err := os.OpenFile(opts.logPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.WithFields(logrus.Fields{
"error": err,
"path": opts.logPath,
}).Fatal("couldn't open log file")
}
defer output.Close()
logger.SetOutput(output)
logger.SetFormatter(&logrus.JSONFormatter{})
}
logger.SetLevel(logrus.Level(opts.logLevel))
// gRPC initialization
var server *grpc.Server
if !opts.noTLS && (opts.tlsCertPath != "" && opts.tlsKeyPath != "") {
transportCreds, err := credentials.NewServerTLSFromFile(opts.tlsCertPath, opts.tlsKeyPath)
if err != nil {
log.WithFields(logrus.Fields{
"cert_file": opts.tlsCertPath,
"key_path": opts.tlsKeyPath,
"error": err,
}).Fatal("couldn't load TLS credentials")
}
server = grpc.NewServer(grpc.Creds(transportCreds), LoggingInterceptor())
} else {
server = grpc.NewServer(LoggingInterceptor())
}
// Enable reflection for debugging
if opts.logLevel >= uint64(logrus.WarnLevel) {
reflection.Register(server)
}
// Initialize Zcash RPC client. Right now (Jan 2018) this is only for
// sending transactions, but in the future it could back a different type
// of block streamer.
rpcClient, err := frontend.NewZRPCFromConf(opts.zcashConfPath)
if err != nil {
log.WithFields(logrus.Fields{
"error": err,
}).Warn("zcash.conf failed, will try empty credentials for rpc")
rpcClient, err = frontend.NewZRPCFromCreds("127.0.0.1:8232", "", "")
if err != nil {
log.WithFields(logrus.Fields{
"error": err,
}).Warn("couldn't start rpc conn. won't be able to send transactions")
}
}
// Get the sapling activation height from the RPC
saplingHeight, blockHeight, chainName, branchID, err := common.GetSaplingInfo(rpcClient)
if err != nil {
log.WithFields(logrus.Fields{
"error": err,
}).Warn("Unable to get sapling activation height")
}
log.Info("Got sapling height ", saplingHeight, " chain ", chainName, " branchID ", branchID)
// Initialize the cache
cache := common.NewBlockCache(opts.cacheSize, log)
stopChan := make(chan bool, 1)
// Start the block cache importer at 100 blocks, so that the server is ready immediately.
// The remaining blocks are added historically
cacheStart := blockHeight - 100
if cacheStart < saplingHeight {
cacheStart = saplingHeight
}
// Start the ingestor
go common.BlockIngestor(rpcClient, cache, log, stopChan, cacheStart)
// Add historical blocks also
go common.HistoricalBlockIngestor(rpcClient, cache, log, cacheStart-1, opts.cacheSize, saplingHeight)
// Signal handler for graceful stops
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
go func() {
s := <-signals
log.WithFields(logrus.Fields{
"signal": s.String(),
}).Info("caught signal, stopping gRPC server")
// Stop the server
server.GracefulStop()
// Stop the block ingestor
stopChan <- true
}()
// Start the metrics server
go func() {
http.Handle("/metrics", promhttp.HandlerFor(
promRegistry,
promhttp.HandlerOpts{},
))
metricsport := fmt.Sprintf(":%d", opts.metricsPort)
log.Fatal(http.ListenAndServe(metricsport, nil))
}()
// Start the download params handler
log.Infof("Starting params handler")
paramsport := fmt.Sprintf(":%d", opts.paramsPort)
go common.ParamsDownloadHandler(metrics, log, paramsport)
// Start the GRPC server
log.Infof("Starting gRPC server on %s", opts.bindAddr)
// Compact transaction service initialization
service, err := frontend.NewSQLiteStreamer(rpcClient, cache, log, metrics)
if err != nil {
log.WithFields(logrus.Fields{
"error": err,
}).Fatal("couldn't create SQL backend")
}
defer service.(*frontend.SqlStreamer).GracefulStop()
// Register service
walletrpc.RegisterCompactTxStreamerServer(server, service)
// Start listening
listener, err := net.Listen("tcp", opts.bindAddr)
if err != nil {
log.WithFields(logrus.Fields{
"bind_addr": opts.bindAddr,
"error": err,
}).Fatal("couldn't create listener")
}
err = server.Serve(listener)
if err != nil {
log.WithFields(logrus.Fields{
"error": err,
}).Fatal("gRPC server exited")
}
}

22
cmd/version.go Normal file
View File

@ -0,0 +1,22 @@
package cmd
import (
"fmt"
"github.com/adityapk00/lightwalletd/common"
"github.com/spf13/cobra"
)
// versionCmd represents the version command
var versionCmd = &cobra.Command{
Use: "version",
Short: "Display lightwalletd version",
Long: `Display lightwalletd version.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("lightwalletd version: ", common.Version)
fmt.Println("from commit: ", common.GitCommit)
fmt.Println("on: ", common.BuildDate)
fmt.Println("by: ", common.BuildUser)
},
}

View File

@ -1,164 +1,405 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
// Package common contains utilities that are shared by other packages.
package common package common
import ( import (
"bytes" "bytes"
"fmt" "encoding/binary"
"hash/fnv"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync" "sync"
"github.com/adityapk00/lightwalletd/walletrpc" "github.com/adityapk00/lightwalletd/walletrpc"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
) )
type BlockCacheEntry struct { // BlockCache contains a consecutive set of recent compact blocks in marshalled form.
data []byte
hash []byte
}
type BlockCache struct { type BlockCache struct {
MaxEntries int lengthsName, blocksName string // pathnames
lengthsFile, blocksFile *os.File
FirstBlock int starts []int64 // Starting offset of each block within blocksFile
LastBlock int firstBlock int // height of the first block in the cache (usually Sapling activation)
nextBlock int // height of the first block not in the cache
m map[int]*BlockCacheEntry latestHash []byte // hash of the most recent (highest height) block, for detecting reorgs.
mutex sync.RWMutex
log *logrus.Entry
mutex sync.RWMutex
} }
func NewBlockCache(maxEntries int, log *logrus.Entry) *BlockCache { // GetNextHeight returns the height of the lowest unobtained block.
return &BlockCache{ func (c *BlockCache) GetNextHeight() int {
MaxEntries: maxEntries, c.mutex.RLock()
FirstBlock: -1, defer c.mutex.RUnlock()
LastBlock: -1, return c.nextBlock
m: make(map[int]*BlockCacheEntry),
log: log,
mutex: sync.RWMutex{},
}
} }
func (c *BlockCache) AddHistorical(height int, block *walletrpc.CompactBlock) (error, bool) { // GetFirstHeight returns the height of the lowest block (usually Sapling activation).
c.mutex.Lock() func (c *BlockCache) GetFirstHeight() int {
defer c.mutex.Unlock() c.mutex.RLock()
defer c.mutex.RUnlock()
// If the cache is full, then we'll ignore this block return c.firstBlock
if c.LastBlock-c.FirstBlock+1 > c.MaxEntries {
return nil, true
}
// We can only add one block before the first block.
if height != c.FirstBlock-1 {
fmt.Printf("Can't add historical block out of order. adding %d, firstblock is %d", height, c.FirstBlock)
return errors.New("Incorrect historical block order"), false
}
// Add the entry and update the counters
data, err := proto.Marshal(block)
if err != nil {
println("Error marshalling block!")
return err, false
}
c.m[height] = &BlockCacheEntry{
data: data,
hash: block.GetHash(),
}
c.FirstBlock = height
c.log.WithFields(logrus.Fields{
"method": "CacheHistoricalBlock",
"block": height,
}).Info("Cache")
return nil, false
} }
func (c *BlockCache) Add(height int, block *walletrpc.CompactBlock) (error, bool) { // GetLatestHash returns the hash (block ID) of the most recent (highest) known block.
c.mutex.Lock() func (c *BlockCache) GetLatestHash() []byte {
defer c.mutex.Unlock() c.mutex.RLock()
defer c.mutex.RUnlock()
return c.latestHash
}
//println("Cache add", height) // HashMismatch indicates if the given prev-hash doesn't match the most recent block's hash
if c.FirstBlock == -1 && c.LastBlock == -1 { // so reorgs can be detected.
// If this is the first block, prep the data structure func (c *BlockCache) HashMismatch(prevhash []byte) bool {
c.FirstBlock = height c.mutex.RLock()
c.LastBlock = height - 1 defer c.mutex.RUnlock()
} return c.latestHash != nil && !bytes.Equal(c.latestHash, prevhash)
}
// If we're adding a block in the middle of the cache, remove all // Make the block at the given height the lowest height that we don't have.
// blocks after it, since this might be a reorg, and we don't want // In other words, wipe out this height and beyond.
// Any outdated blocks returned // This should never increase the size of the cache, only decrease.
if height >= c.FirstBlock && height <= c.LastBlock { // Caller should hold c.mutex.Lock().
for i := height; i <= c.LastBlock; i++ { func (c *BlockCache) setDbFiles(height int) {
delete(c.m, i) if height <= c.nextBlock {
if height < c.firstBlock {
height = c.firstBlock
} }
c.LastBlock = height - 1 index := height - c.firstBlock
if err := c.lengthsFile.Truncate(int64(index * 4)); err != nil {
Log.Fatal("truncate lengths file failed: ", err)
}
if err := c.blocksFile.Truncate(c.starts[index]); err != nil {
Log.Fatal("truncate blocks file failed: ", err)
}
c.Sync()
c.starts = c.starts[:index+1]
c.nextBlock = height
c.setLatestHash()
} }
// Don't allow out-of-order blocks. This is more of a sanity check than anything
// If there is a reorg, then the ingestor needs to handle it.
if c.m[height-1] != nil && !bytes.Equal(block.PrevHash, c.m[height-1].hash) {
return nil, true
}
// Add the entry and update the counters
data, err := proto.Marshal(block)
if err != nil {
println("Error marshalling block!")
return err, false
}
c.m[height] = &BlockCacheEntry{
data: data,
hash: block.GetHash(),
}
c.LastBlock = height
// If the cache is full, remove the oldest block
if c.LastBlock-c.FirstBlock+1 > c.MaxEntries {
//println("Deleteing at height", c.FirstBlock)
delete(c.m, c.FirstBlock)
c.FirstBlock = c.FirstBlock + 1
}
c.log.WithFields(logrus.Fields{
"method": "CacheLatestBlock",
"block": height,
}).Info("Cache")
return nil, false
} }
func copyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
if err != nil {
return err
}
return out.Close()
}
// Caller should hold c.mutex.Lock().
func (c *BlockCache) recoverFromCorruption(height int) {
Log.Warning("CORRUPTION detected in db blocks-cache files, height ", height, " redownloading")
// Save the corrupted files for post-mortem analysis.
save := c.lengthsName + "-corrupted"
if err := copyFile(c.lengthsName, save); err != nil {
Log.Warning("Could not copy db lengths file: ", err)
}
save = c.blocksName + "-corrupted"
if err := copyFile(c.blocksName, save); err != nil {
Log.Warning("Could not copy db lengths file: ", err)
}
c.setDbFiles(height)
}
// not including the checksum
func (c *BlockCache) blockLength(height int) int {
index := height - c.firstBlock
return int(c.starts[index+1] - c.starts[index] - 8)
}
// Calculate the 8-byte checksum that precedes each block in the blocks file.
func checksum(height int, b []byte) []byte {
h := make([]byte, 8)
binary.LittleEndian.PutUint64(h, uint64(height))
cs := fnv.New64a()
cs.Write(h)
cs.Write(b)
return cs.Sum(nil)
}
// Caller should hold (at least) c.mutex.RLock().
func (c *BlockCache) readBlock(height int) *walletrpc.CompactBlock {
blockLen := c.blockLength(height)
b := make([]byte, blockLen+8)
offset := c.starts[height-c.firstBlock]
n, err := c.blocksFile.ReadAt(b, offset)
if err != nil || n != len(b) {
Log.Warning("blocks read offset: ", offset, " failed: ", n, err)
return nil
}
diskcs := b[:8]
b = b[8 : blockLen+8]
if !bytes.Equal(checksum(height, b), diskcs) {
Log.Warning("bad block checksum at height: ", height, " offset: ", offset)
return nil
}
block := &walletrpc.CompactBlock{}
err = proto.Unmarshal(b, block)
if err != nil {
// Could be file corruption.
Log.Warning("blocks unmarshal at offset: ", offset, " failed: ", err)
return nil
}
if int(block.Height) != height {
// Could be file corruption.
Log.Warning("block unexpected height at height ", height, " offset: ", offset)
return nil
}
return block
}
// Caller should hold c.mutex.Lock().
func (c *BlockCache) setLatestHash() {
c.latestHash = nil
// There is at least one block; get the last block's hash
if c.nextBlock > c.firstBlock {
// At least one block remains; get the last block's hash
block := c.readBlock(c.nextBlock - 1)
if block == nil {
c.recoverFromCorruption(c.nextBlock - 10000)
return
}
c.latestHash = make([]byte, len(block.Hash))
copy(c.latestHash, block.Hash)
}
}
// Reset is used only for darkside testing.
func (c *BlockCache) Reset(startHeight int) {
c.setDbFiles(c.firstBlock) // empty the cache
c.firstBlock = startHeight
c.nextBlock = startHeight
}
// NewBlockCache returns an instance of a block cache object.
// (No locking here, we assume this is single-threaded.)
func NewBlockCache(dbPath string, chainName string, startHeight int, redownload bool) *BlockCache {
c := &BlockCache{}
c.firstBlock = startHeight
c.nextBlock = startHeight
c.lengthsName, c.blocksName = dbFileNames(dbPath, chainName)
var err error
if err := os.MkdirAll(filepath.Join(dbPath, chainName), 0755); err != nil {
Log.Fatal("mkdir ", dbPath, " failed: ", err)
}
c.blocksFile, err = os.OpenFile(c.blocksName, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
Log.Fatal("open ", c.blocksName, " failed: ", err)
}
c.lengthsFile, err = os.OpenFile(c.lengthsName, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644)
if err != nil {
Log.Fatal("open ", c.lengthsName, " failed: ", err)
}
if redownload {
if err := c.lengthsFile.Truncate(0); err != nil {
Log.Fatal("truncate lengths file failed: ", err)
}
if err := c.blocksFile.Truncate(0); err != nil {
Log.Fatal("truncate blocks file failed: ", err)
}
}
lengths, err := ioutil.ReadFile(c.lengthsName)
if err != nil {
Log.Fatal("read ", c.lengthsName, " failed: ", err)
}
// The last entry in starts[] is where to write the next block.
var offset int64
c.starts = nil
c.starts = append(c.starts, 0)
for i := 0; i < len(lengths)/4; i++ {
if len(lengths[:4]) < 4 {
Log.Warning("lengths file has a partial entry")
c.recoverFromCorruption(c.nextBlock)
break
}
length := binary.LittleEndian.Uint32(lengths[i*4 : (i+1)*4])
if length < 74 || length > 4*1000*1000 {
Log.Warning("lengths file has impossible value ", length)
c.recoverFromCorruption(c.nextBlock)
break
}
offset += int64(length) + 8
c.starts = append(c.starts, offset)
// Check for corruption.
block := c.readBlock(c.nextBlock)
if block == nil {
Log.Warning("error reading block")
c.recoverFromCorruption(c.nextBlock)
break
}
c.nextBlock++
}
c.setDbFiles(c.nextBlock)
Log.Info("Found ", c.nextBlock-c.firstBlock, " blocks in cache")
return c
}
func dbFileNames(dbPath string, chainName string) (string, string) {
return filepath.Join(dbPath, chainName, "lengths"),
filepath.Join(dbPath, chainName, "blocks")
}
// Add adds the given block to the cache at the given height, returning true
// if a reorg was detected.
func (c *BlockCache) Add(height int, block *walletrpc.CompactBlock) error {
// Invariant: m[firstBlock..nextBlock) are valid.
c.mutex.Lock()
defer c.mutex.Unlock()
if height > c.nextBlock {
// Cache has been reset (for example, checksum error)
return nil
}
if height < c.firstBlock {
// Should never try to add a block before Sapling activation height
Log.Fatal("cache.Add height below Sapling: ", height)
return nil
}
if height < c.nextBlock {
// Should never try to "backup" (call Reorg() instead).
Log.Fatal("cache.Add height going backwards: ", height)
return nil
}
bheight := int(block.Height)
if bheight != height {
// This could only happen if zcashd returned the wrong
// block (not the height we requested).
Log.Fatal("cache.Add wrong height: ", bheight, " expecting: ", height)
return nil
}
// Add the new block and its length to the db files.
data, err := proto.Marshal(block)
if err != nil {
return err
}
b := append(checksum(height, data), data...)
n, err := c.blocksFile.Write(b)
if err != nil {
Log.Fatal("blocks write failed: ", err)
}
if n != len(b) {
Log.Fatal("blocks write incorrect length: expected: ", len(b), "written: ", n)
}
b = make([]byte, 4)
binary.LittleEndian.PutUint32(b, uint32(len(data)))
n, err = c.lengthsFile.Write(b)
if err != nil {
Log.Fatal("lengths write failed: ", err)
}
if n != len(b) {
Log.Fatal("lengths write incorrect length: expected: ", len(b), "written: ", n)
}
// update the in-memory variables
offset := c.starts[len(c.starts)-1]
c.starts = append(c.starts, offset+int64(len(data)+8))
if c.latestHash == nil {
c.latestHash = make([]byte, len(block.Hash))
}
copy(c.latestHash, block.Hash)
c.nextBlock++
// Invariant: m[firstBlock..nextBlock) are valid.
return nil
}
// Reorg resets nextBlock (the block that should be Add()ed next)
// downward to the given height.
func (c *BlockCache) Reorg(height int) {
c.mutex.Lock()
defer c.mutex.Unlock()
// Allow the caller not to have to worry about Sapling start height.
if height < c.firstBlock {
height = c.firstBlock
}
if height >= c.nextBlock {
// Timing window, ignore this request
return
}
// Remove the end of the cache.
c.nextBlock = height
newCacheLen := height - c.firstBlock
c.starts = c.starts[:newCacheLen+1]
if err := c.lengthsFile.Truncate(int64(4 * newCacheLen)); err != nil {
Log.Fatal("truncate failed: ", err)
}
if err := c.blocksFile.Truncate(c.starts[newCacheLen]); err != nil {
Log.Fatal("truncate failed: ", err)
}
c.setLatestHash()
}
// Get returns the compact block at the requested height if it's
// in the cache, else nil.
func (c *BlockCache) Get(height int) *walletrpc.CompactBlock { func (c *BlockCache) Get(height int) *walletrpc.CompactBlock {
c.mutex.RLock() c.mutex.RLock()
defer c.mutex.RUnlock() defer c.mutex.RUnlock()
//println("Cache get", height) if height < c.firstBlock || height >= c.nextBlock {
if c.LastBlock == -1 || c.FirstBlock == -1 {
return nil return nil
} }
block := c.readBlock(height)
if height < c.FirstBlock || height > c.LastBlock { if block == nil {
//println("Cache miss: index out of range") go func() {
// We hold only the read lock, need the exclusive lock.
c.mutex.Lock()
c.recoverFromCorruption(height - 10000)
c.mutex.Unlock()
}()
return nil return nil
} }
return block
//println("Cache returned")
serialized := &walletrpc.CompactBlock{}
err := proto.Unmarshal(c.m[height].data, serialized)
if err != nil {
println("Error unmarshalling compact block")
return nil
}
return serialized
} }
func (c *BlockCache) GetLatestBlock() int { // GetLatestHeight returns the height of the most recent block, or -1
// if the cache is empty.
func (c *BlockCache) GetLatestHeight() int {
c.mutex.RLock() c.mutex.RLock()
defer c.mutex.RUnlock() defer c.mutex.RUnlock()
if c.firstBlock == c.nextBlock {
return c.LastBlock return -1
}
return c.nextBlock - 1
}
// Sync ensures that the db files are flushed to disk, can be called unnecessarily.
func (c *BlockCache) Sync() {
c.lengthsFile.Sync()
c.blocksFile.Sync()
}
// Close is Currently used only for testing.
func (c *BlockCache) Close() {
// Some operating system require you to close files before you can remove them.
if c.lengthsFile != nil {
c.lengthsFile.Close()
c.lengthsFile = nil
}
if c.blocksFile != nil {
c.blocksFile.Close()
c.blocksFile = nil
}
} }

184
common/cache_test.go Normal file
View File

@ -0,0 +1,184 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package common
import (
"encoding/hex"
"encoding/json"
"io/ioutil"
"os"
"testing"
"github.com/adityapk00/lightwalletd/parser"
"github.com/adityapk00/lightwalletd/walletrpc"
)
var compacts []*walletrpc.CompactBlock
var cache *BlockCache
const (
unitTestPath = "unittestcache"
unitTestChain = "unittestnet"
)
func TestCache(t *testing.T) {
type compactTest struct {
BlockHeight int `json:"block"`
BlockHash string `json:"hash"`
PrevHash string `json:"prev"`
Full string `json:"full"`
Compact string `json:"compact"`
}
var compactTests []compactTest
blockJSON, err := ioutil.ReadFile("../testdata/compact_blocks.json")
if err != nil {
t.Fatal(err)
}
err = json.Unmarshal(blockJSON, &compactTests)
if err != nil {
t.Fatal(err)
}
// Derive compact blocks from file data (setup, not part of the test).
for _, test := range compactTests {
blockData, _ := hex.DecodeString(test.Full)
block := parser.NewBlock()
blockData, err = block.ParseFromSlice(blockData)
if err != nil {
t.Fatal(err)
}
if len(blockData) > 0 {
t.Error("Extra data remaining")
}
compacts = append(compacts, block.ToCompact())
}
// Pretend Sapling starts at 289460.
os.RemoveAll(unitTestPath)
cache = NewBlockCache(unitTestPath, unitTestChain, 289460, true)
// Initially cache is empty.
if cache.GetLatestHeight() != -1 {
t.Fatal("unexpected GetLatestHeight")
}
if cache.firstBlock != 289460 {
t.Fatal("unexpected initial firstBlock")
}
if cache.nextBlock != 289460 {
t.Fatal("unexpected initial nextBlock")
}
fillCache(t)
reorgCache(t)
fillCache(t)
// Simulate a restart to ensure the db files are read correctly.
cache = NewBlockCache(unitTestPath, unitTestChain, 289460, false)
// Should still be 6 blocks.
if cache.nextBlock != 289466 {
t.Fatal("unexpected nextBlock height")
}
reorgCache(t)
// Reorg to before the first block moves back to only the first block
cache.Reorg(289459)
if cache.latestHash != nil {
t.Fatal("unexpected latestHash, should be nil")
}
if cache.nextBlock != 289460 {
t.Fatal("unexpected nextBlock: ", cache.nextBlock)
}
// Clean up the test files.
cache.Close()
os.RemoveAll(unitTestPath)
}
func reorgCache(t *testing.T) {
// Simulate a reorg by adding a block whose height is lower than the latest;
// we're replacing the second block, so there should be only two blocks.
cache.Reorg(289461)
err := cache.Add(289461, compacts[1])
if err != nil {
t.Fatal(err)
}
if cache.firstBlock != 289460 {
t.Fatal("unexpected firstBlock height")
}
if cache.nextBlock != 289462 {
t.Fatal("unexpected nextBlock height")
}
if len(cache.starts) != 3 {
t.Fatal("unexpected len(cache.starts)")
}
// some "black-box" tests (using exported interfaces)
if cache.GetLatestHeight() != 289461 {
t.Fatal("unexpected GetLatestHeight")
}
if int(cache.Get(289461).Height) != 289461 {
t.Fatal("unexpected block contents")
}
// Make sure we can go forward from here
err = cache.Add(289462, compacts[2])
if err != nil {
t.Fatal(err)
}
if cache.firstBlock != 289460 {
t.Fatal("unexpected firstBlock height")
}
if cache.nextBlock != 289463 {
t.Fatal("unexpected nextBlock height")
}
if len(cache.starts) != 4 {
t.Fatal("unexpected len(cache.starts)")
}
if cache.GetLatestHeight() != 289462 {
t.Fatal("unexpected GetLatestHeight")
}
if int(cache.Get(289462).Height) != 289462 {
t.Fatal("unexpected block contents")
}
}
// Whatever the state of the cache, add 6 blocks starting at the
// pretend Sapling height, 289460 (this could cause a reorg).
func fillCache(t *testing.T) {
next := 289460
cache.Reorg(next)
for i, compact := range compacts {
err := cache.Add(next, compact)
if err != nil {
t.Fatal(err)
}
next++
// some "white-box" checks
if cache.firstBlock != 289460 {
t.Fatal("unexpected firstBlock height")
}
if cache.nextBlock != 289460+i+1 {
t.Fatal("unexpected nextBlock height")
}
if len(cache.starts) != i+2 {
t.Fatal("unexpected len(cache.starts)")
}
// some "black-box" tests (using exported interfaces)
if cache.GetLatestHeight() != 289460+i {
t.Fatal("unexpected GetLatestHeight")
}
b := cache.Get(289460 + i)
if b == nil {
t.Fatal("unexpected Get failure")
}
if int(b.Height) != 289460+i {
t.Fatal("unexpected block contents")
}
}
}

View File

@ -1,72 +1,236 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package common package common
import ( import (
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"fmt"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/adityapk00/lightwalletd/parser" "github.com/adityapk00/lightwalletd/parser"
"github.com/adityapk00/lightwalletd/walletrpc" "github.com/adityapk00/lightwalletd/walletrpc"
"github.com/btcsuite/btcd/rpcclient"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
func GetSaplingInfo(rpcClient *rpcclient.Client) (int, int, string, string, error) { // 'make build' will overwrite this string with the output of git-describe (tag)
result, rpcErr := rpcClient.RawRequest("getblockchaininfo", make([]json.RawMessage, 0)) var (
Version = "v0.0.0.0-dev"
GitCommit = ""
Branch = ""
BuildDate = ""
BuildUser = ""
)
var err error type Options struct {
var errCode int64 GRPCBindAddr string `json:"grpc_bind_address,omitempty"`
GRPCLogging bool `json:"grpc_logging_insecure,omitempty"`
// For some reason, the error responses are not JSON HTTPBindAddr string `json:"http_bind_address,omitempty"`
if rpcErr != nil { TLSCertPath string `json:"tls_cert_path,omitempty"`
errParts := strings.SplitN(rpcErr.Error(), ":", 2) TLSKeyPath string `json:"tls_cert_key,omitempty"`
errCode, err = strconv.ParseInt(errParts[0], 10, 32) LogLevel uint64 `json:"log_level,omitempty"`
//Check to see if we are requesting a height the zcashd doesn't have yet LogFile string `json:"log_file,omitempty"`
if err == nil && errCode == -8 { ZcashConfPath string `json:"zcash_conf,omitempty"`
return -1, -1, "", "", nil RPCUser string `json:"rpcuser"`
} RPCPassword string `json:"rpcpassword"`
return -1, -1, "", "", errors.Wrap(rpcErr, "error requesting block") RPCHost string `json:"rpchost"`
} RPCPort string `json:"rpcport"`
NoTLSVeryInsecure bool `json:"no_tls_very_insecure,omitempty"`
var f interface{} GenCertVeryInsecure bool `json:"gen_cert_very_insecure,omitempty"`
err = json.Unmarshal(result, &f) Redownload bool `json:"redownload"`
if err != nil { DataDir string `json:"data_dir"`
return -1, -1, "", "", errors.Wrap(err, "error reading JSON response") PingEnable bool `json:"ping_enable"`
} Darkside bool `json:"darkside"`
DarksideTimeout uint64 `json:"darkside_timeout"`
chainName := f.(map[string]interface{})["chain"].(string)
upgradeJSON := f.(map[string]interface{})["upgrades"]
saplingJSON := upgradeJSON.(map[string]interface{})["76b809bb"] // Sapling ID
saplingHeight := saplingJSON.(map[string]interface{})["activationheight"].(float64)
blockHeight := f.(map[string]interface{})["headers"].(float64)
consensus := f.(map[string]interface{})["consensus"]
branchID := consensus.(map[string]interface{})["nextblock"].(string)
return int(saplingHeight), int(blockHeight), chainName, branchID, nil
} }
func getBlockFromRPC(rpcClient *rpcclient.Client, height int) (*walletrpc.CompactBlock, error) { // RawRequest points to the function to send a an RPC request to zcashd;
params := make([]json.RawMessage, 2) // in production, it points to btcsuite/btcd/rpcclient/rawrequest.go:RawRequest();
params[0] = json.RawMessage("\"" + strconv.Itoa(height) + "\"") // in unit tests it points to a function to mock RPCs to zcashd.
params[1] = json.RawMessage("0") var RawRequest func(method string, params []json.RawMessage) (json.RawMessage, error)
result, rpcErr := rpcClient.RawRequest("getblock", params)
var err error // Sleep allows a request to time.Sleep() to be mocked for testing;
var errCode int64 // in production, it points to the standard library time.Sleep();
// in unit tests it points to a mock function.
var Sleep func(d time.Duration)
// Log as a global variable simplifies logging
var Log *logrus.Entry
// Metrics as a global object to simplify things
var Metrics *PrometheusMetrics
// The following are JSON zcashd rpc requests and replies.
type (
// zcashd rpc "getblockchaininfo"
Upgradeinfo struct {
// unneeded fields can be omitted
ActivationHeight int
Status string // "active"
}
ConsensusInfo struct { // consensus branch IDs
Nextblock string // example: "e9ff75a6" (canopy)
Chaintip string // example: "e9ff75a6" (canopy)
}
ZcashdRpcReplyGetblockchaininfo struct {
Chain string
Upgrades map[string]Upgradeinfo
Blocks int
Consensus ConsensusInfo
EstimatedHeight int
}
// zcashd rpc "getinfo"
ZcashdRpcReplyGetinfo struct {
Build string
Subversion string
}
// zcashd rpc "getaddresstxids"
ZcashdRpcRequestGetaddresstxids struct {
Addresses []string `json:"addresses"`
Start uint64 `json:"start"`
End uint64 `json:"end"`
}
// zcashd rpc "z_gettreestate"
ZcashdRpcReplyGettreestate struct {
Height int
Hash string
Time uint32
Sapling struct {
Commitments struct {
FinalState string
}
SkipHash string
}
}
// zcashd rpc "getrawtransaction"
ZcashdRpcReplyGetrawtransaction struct {
Hex string
Height int
}
// zcashd rpc "getaddressbalance"
ZcashdRpcRequestGetaddressbalance struct {
Addresses []string `json:"addresses"`
}
ZcashdRpcReplyGetaddressbalance struct {
Balance int64
}
// zcashd rpc "getaddressutxos"
ZcashdRpcRequestGetaddressutxos struct {
Addresses []string `json:"addresses"`
}
ZcashdRpcReplyGetaddressutxos []struct {
Address string
Txid string
OutputIndex int64
Script string
Satoshis uint64
Height int
}
)
// FirstRPC tests that we can successfully reach zcashd through the RPC
// interface. The specific RPC used here is not important.
func FirstRPC() {
retryCount := 0
for {
result, rpcErr := RawRequest("getblockchaininfo", []json.RawMessage{})
if rpcErr == nil {
if retryCount > 0 {
Log.Warn("getblockchaininfo RPC successful")
}
var getblockchaininfo ZcashdRpcReplyGetblockchaininfo
err := json.Unmarshal(result, &getblockchaininfo)
if err != nil {
Log.Fatalf("error parsing JSON getblockchaininfo response: %v", err)
}
break
}
retryCount++
if retryCount > 10 {
Log.WithFields(logrus.Fields{
"timeouts": retryCount,
}).Fatal("unable to issue getblockchaininfo RPC call to zcashd node")
}
Log.WithFields(logrus.Fields{
"error": rpcErr.Error(),
"retry": retryCount,
}).Warn("error with getblockchaininfo rpc, retrying...")
Sleep(time.Duration(10+retryCount*5) * time.Second) // backoff
}
}
func GetLightdInfo() (*walletrpc.LightdInfo, error) {
result, rpcErr := RawRequest("getinfo", []json.RawMessage{})
if rpcErr != nil {
return nil, rpcErr
}
var getinfoReply ZcashdRpcReplyGetinfo
err := json.Unmarshal(result, &getinfoReply)
if err != nil {
return nil, rpcErr
}
result, rpcErr = RawRequest("getblockchaininfo", []json.RawMessage{})
if rpcErr != nil {
return nil, rpcErr
}
var getblockchaininfoReply ZcashdRpcReplyGetblockchaininfo
err = json.Unmarshal(result, &getblockchaininfoReply)
if err != nil {
return nil, rpcErr
}
// If the sapling consensus branch doesn't exist, it must be regtest
var saplingHeight int
if saplingJSON, ok := getblockchaininfoReply.Upgrades["76b809bb"]; ok { // Sapling ID
saplingHeight = saplingJSON.ActivationHeight
}
vendor := "Zecwallet LightWalletD"
if DarksideEnabled {
vendor = "Zecwallet DarksideWalletD"
}
return &walletrpc.LightdInfo{
Version: Version,
Vendor: vendor,
TaddrSupport: true,
ChainName: getblockchaininfoReply.Chain,
SaplingActivationHeight: uint64(saplingHeight),
ConsensusBranchId: getblockchaininfoReply.Consensus.Chaintip,
BlockHeight: uint64(getblockchaininfoReply.Blocks),
GitCommit: GitCommit,
Branch: Branch,
BuildDate: BuildDate,
BuildUser: BuildUser,
EstimatedHeight: uint64(getblockchaininfoReply.EstimatedHeight),
ZcashdBuild: getinfoReply.Build,
ZcashdSubversion: getinfoReply.Subversion,
}, nil
}
func getBlockFromRPC(height int) (*walletrpc.CompactBlock, error) {
params := make([]json.RawMessage, 2)
heightJSON, err := json.Marshal(strconv.Itoa(height))
if err != nil {
return nil, errors.Wrap(err, "error marshaling height")
}
params[0] = heightJSON
params[1] = json.RawMessage("0") // non-verbose (raw hex)
result, rpcErr := RawRequest("getblock", params)
// For some reason, the error responses are not JSON // For some reason, the error responses are not JSON
if rpcErr != nil { if rpcErr != nil {
errParts := strings.SplitN(rpcErr.Error(), ":", 2) // Check to see if we are requesting a height the zcashd doesn't have yet
errCode, err = strconv.ParseInt(errParts[0], 10, 32) if (strings.Split(rpcErr.Error(), ":"))[0] == "-8" {
//Check to see if we are requesting a height the zcashd doesn't have yet
if err == nil && errCode == -8 {
return nil, nil return nil, nil
} }
return nil, errors.Wrap(rpcErr, "error requesting block") return nil, errors.Wrap(rpcErr, "error requesting block")
@ -92,194 +256,170 @@ func getBlockFromRPC(rpcClient *rpcclient.Client, height int) (*walletrpc.Compac
return nil, errors.New("received overlong message") return nil, errors.New("received overlong message")
} }
if block.GetHeight() != height {
return nil, errors.New("received unexpected height block")
}
return block.ToCompact(), nil return block.ToCompact(), nil
} }
// HistoricalBlockIngestor adds historical blocks in reverse order. var (
func HistoricalBlockIngestor(rpcClient *rpcclient.Client, cache *BlockCache, log *logrus.Entry, ingestorRunning bool
startBlock int, totalBlocks int, saplingHeight int) { stopIngestorChan = make(chan struct{})
// Wait for at least some blocks in the cache )
for {
if cache.FirstBlock == -1 { func startIngestor(c *BlockCache) {
println("Historical block ingestor sleeping for 2s") if !ingestorRunning {
time.Sleep(2 * time.Second) ingestorRunning = true
} else { go BlockIngestor(c, 0)
break
}
} }
}
log.WithFields(logrus.Fields{ func stopIngestor() {
"method": "CacheHistoricalBlock", if ingestorRunning {
"op": "Starting", ingestorRunning = false
"startBlock": startBlock, stopIngestorChan <- struct{}{}
"endBlock": (startBlock - totalBlocks),
}).Info("Cache")
// We don't have to worry about reorgs, becaue we'll be at least 100 blocks in the history, where there are no reorgs
for height := startBlock; height > (startBlock-totalBlocks) && height > saplingHeight; height-- {
block, err := getBlockFromRPC(rpcClient, height)
if err != nil {
log.WithFields(logrus.Fields{
"height": height,
"error": err,
}).Warn("error with getblock for historical block")
break
}
if block != nil {
err, full := cache.AddHistorical(height, block)
if full {
log.WithFields(logrus.Fields{
"method": "CacheHistoricalBlock",
"op": "Finished",
}).Info("Cache")
break
}
if err != nil {
log.Error("Error adding historical block to cache: ", err)
break
}
}
} }
} }
func BlockIngestor(rpcClient *rpcclient.Client, cache *BlockCache, log *logrus.Entry, // BlockIngestor runs as a goroutine and polls zcashd for new blocks, adding them
stopChan chan bool, startHeight int) { // to the cache. The repetition count, rep, is nonzero only for unit-testing.
func BlockIngestor(c *BlockCache, rep int) {
lastLog := time.Now()
reorgCount := 0 reorgCount := 0
height := startHeight lastHeightLogged := 0
timeoutCount := 0 retryCount := 0
wait := true
// Start listening for new blocks // Start listening for new blocks
for { for i := 0; rep == 0 || i < rep; i++ {
// stop if requested
select { select {
case <-stopChan: case <-stopIngestorChan:
break return
default:
}
case <-time.After(5 * time.Second): height := c.GetNextHeight()
for { block, err := getBlockFromRPC(height)
if reorgCount > 0 { if err != nil {
height -= 10 Log.WithFields(logrus.Fields{
} "height": height,
"error": err,
if reorgCount > 10 { }).Warn("error zcashd getblock rpc")
log.Error("Reorg exceeded max of 100 blocks! Help!") retryCount++
return if retryCount > 10 {
} Log.WithFields(logrus.Fields{
"timeouts": retryCount,
block, err := getBlockFromRPC(rpcClient, height) }).Fatal("unable to issue RPC call to zcashd node")
if err != nil {
log.WithFields(logrus.Fields{
"height": height,
"error": err,
}).Warn("error with getblock")
timeoutCount++
if timeoutCount == 3 {
log.WithFields(logrus.Fields{
"timeouts": timeoutCount,
}).Warn("unable to issue RPC call to zcashd node 3 times")
break
}
}
if block != nil {
if timeoutCount > 0 {
timeoutCount--
}
log.Info("Ingestor adding block to cache: ", height)
err, reorg := cache.Add(height, block)
if err != nil {
log.Error("Error adding block to cache: ", err)
continue
}
//check for reorgs once we have inital block hash from startup
if reorg {
reorgCount++
log.WithFields(logrus.Fields{
"height": height,
"hash": displayHash(block.Hash),
"phash": displayHash(block.PrevHash),
"reorg": reorgCount,
}).Warn("REORG")
} else {
reorgCount = 0
height++
}
} else {
break
}
} }
// Delay then retry the same height.
c.Sync()
Sleep(10 * time.Second)
wait = true
continue
}
retryCount = 0
if block == nil {
// No block at this height.
if height == c.GetFirstHeight() {
Log.Info("Waiting for zcashd height to reach Sapling activation height ",
"(", c.GetFirstHeight(), ")...")
reorgCount = 0
Sleep(20 * time.Second)
continue
}
if wait {
// Wait a bit then retry the same height.
c.Sync()
if lastHeightLogged+1 != height {
Log.Info("Ingestor waiting for block: ", height)
lastHeightLogged = height - 1
}
Sleep(2 * time.Second)
wait = false
continue
}
}
if block == nil || c.HashMismatch(block.PrevHash) {
// This may not be a reorg; it may be we're at the tip
// and there's no new block yet, but we want to back up
// so we detect a reorg in which the new chain is the
// same length or shorter.
reorgCount++
if reorgCount > 100 {
Log.Fatal("Reorg exceeded max of 100 blocks! Help!")
}
// Print the hash of the block that is getting reorg-ed away
// as 'phash', not the prevhash of the block we just received.
if block != nil {
Log.WithFields(logrus.Fields{
"height": height,
"hash": displayHash(block.Hash),
"phash": displayHash(c.GetLatestHash()),
"reorg": reorgCount,
}).Warn("REORG")
} else if reorgCount > 1 {
Log.WithFields(logrus.Fields{
"height": height,
"phash": displayHash(c.GetLatestHash()),
"reorg": reorgCount,
}).Warn("REORG")
}
// Try backing up
c.Reorg(height - 1)
continue
}
// We have a valid block to add.
wait = true
reorgCount = 0
if err := c.Add(height, block); err != nil {
Log.Fatal("Cache add failed:", err)
}
// Don't log these too often.
if time.Now().Sub(lastLog).Seconds() >= 4 && c.GetNextHeight() == height+1 && height != lastHeightLogged {
lastLog = time.Now()
lastHeightLogged = height
Log.Info("Ingestor adding block to cache: ", height)
} }
} }
} }
func GetBlock(rpcClient *rpcclient.Client, cache *BlockCache, height int) (*walletrpc.CompactBlock, error) { // GetBlock returns the compact block at the requested height, first by querying
// the cache, then, if not found, will request the block from zcashd. It returns
// nil if no block exists at this height.
func GetBlock(cache *BlockCache, height int) (*walletrpc.CompactBlock, error) {
// First, check the cache to see if we have the block // First, check the cache to see if we have the block
block := cache.Get(height) block := cache.Get(height)
if block != nil { if block != nil {
return block, nil return block, nil
} }
// If a block was not found, make sure user is requesting a historical block // Not in the cache, ask zcashd
if height > cache.GetLatestBlock() { block, err := getBlockFromRPC(height)
cache.log.WithFields(logrus.Fields{
"error": "BlockOutOfRange",
"height": height,
"latestblock": cache.GetLatestBlock(),
}).Info("Cache")
return nil, errors.New(
fmt.Sprintf(
"Block requested is newer than latest block. Requested: %d Latest: %d",
height, cache.GetLatestBlock()))
}
block, err := getBlockFromRPC(rpcClient, height)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if block == nil {
cache.log.WithFields(logrus.Fields{ // Block height is too large
"method": "CacheMiss", return nil, errors.New("block requested is newer than latest block")
"height": height, }
}).Info("Cache")
return block, nil return block, nil
} }
func GetBlockRange(rpcClient *rpcclient.Client, cache *BlockCache, // GetBlockRange returns a sequence of consecutive blocks in the given range.
blockOut chan<- walletrpc.CompactBlock, errOut chan<- error, start, end int) { func GetBlockRange(cache *BlockCache, blockOut chan<- *walletrpc.CompactBlock, errOut chan<- error, start, end int) {
// Go over [start, end] inclusive // Go over [start, end] inclusive
for i := start; i <= end; i++ { for i := start; i <= end; i++ {
block, err := GetBlock(rpcClient, cache, i) block, err := GetBlock(cache, i)
if err != nil { if err != nil {
errOut <- err errOut <- err
return return
} }
blockOut <- block
blockOut <- *block
} }
errOut <- nil errOut <- nil
} }
func displayHash(hash []byte) string { func displayHash(hash []byte) string {
rhash := make([]byte, len(hash)) return hex.EncodeToString(parser.Reverse(hash))
copy(rhash, hash)
// Reverse byte order
for i := 0; i < len(rhash)/2; i++ {
j := len(rhash) - 1 - i
rhash[i], rhash[j] = rhash[j], rhash[i]
}
return hex.EncodeToString(rhash)
} }

341
common/common_test.go Normal file
View File

@ -0,0 +1,341 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package common
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
"time"
"github.com/adityapk00/lightwalletd/walletrpc"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ------------------------------------------ Setup
//
// This section does some setup things that may (even if not currently)
// be useful across multiple tests.
var (
testT *testing.T
// The various stub callbacks need to sequence through states
step int
getblockchaininfoReply []byte
logger = logrus.New()
blocks [][]byte // four test blocks
)
// TestMain does common setup that's shared across multiple tests
func TestMain(m *testing.M) {
output, err := os.OpenFile("test-log", os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
os.Stderr.WriteString(fmt.Sprintf("Cannot open test-log: %v", err))
os.Exit(1)
}
logger.SetOutput(output)
Log = logger.WithFields(logrus.Fields{
"app": "test",
})
// Several tests need test blocks; read all 4 into memory just once
// (for efficiency).
testBlocks, err := os.Open("../testdata/blocks")
if err != nil {
os.Stderr.WriteString(fmt.Sprintf("Cannot open testdata/blocks: %v", err))
os.Exit(1)
}
scan := bufio.NewScanner(testBlocks)
for scan.Scan() { // each line (block)
blockJSON, _ := json.Marshal(scan.Text())
blocks = append(blocks, blockJSON)
}
// Setup is done; run all tests.
exitcode := m.Run()
// cleanup
os.Remove("test-log")
os.Exit(exitcode)
}
// Allow tests to verify that sleep has been called (for retries)
var sleepCount int
var sleepDuration time.Duration
func sleepStub(d time.Duration) {
sleepCount++
sleepDuration += d
}
// ------------------------------------------ GetLightdInfo()
func getLightdInfoStub(method string, params []json.RawMessage) (json.RawMessage, error) {
step++
switch method {
case "getinfo":
r, _ := json.Marshal(&ZcashdRpcReplyGetinfo{})
return r, nil
case "getblockchaininfo":
// Test retry logic (for the moment, it's very simple, just one retry).
switch step {
case 1:
return json.RawMessage{}, errors.New("first failure")
case 2:
if sleepCount != 1 || sleepDuration != 15*time.Second {
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
}
}
r, _ := json.Marshal(&ZcashdRpcReplyGetblockchaininfo{
Blocks: 9977,
Chain: "bugsbunny",
Consensus: ConsensusInfo{Chaintip: "someid"},
})
return r, nil
}
return nil, nil
}
func TestGetLightdInfo(t *testing.T) {
testT = t
RawRequest = getLightdInfoStub
Sleep = sleepStub
// This calls the getblockchaininfo rpc just to establish connectivity with zcashd
FirstRPC()
// Ensure the retry happened as expected
logFile, err := ioutil.ReadFile("test-log")
if err != nil {
t.Fatal("Cannot read test-log", err)
}
logStr := string(logFile)
if !strings.Contains(logStr, "retrying") {
t.Fatal("Cannot find retrying in test-log")
}
if !strings.Contains(logStr, "retry=1") {
t.Fatal("Cannot find retry=1 in test-log")
}
// Check the success case (second attempt)
getLightdInfo, err := GetLightdInfo()
if err != nil {
t.Fatal("GetLightdInfo failed")
}
if getLightdInfo.SaplingActivationHeight != 0 {
t.Error("unexpected saplingActivationHeight", getLightdInfo.SaplingActivationHeight)
}
if getLightdInfo.BlockHeight != 9977 {
t.Error("unexpected blockHeight", getLightdInfo.BlockHeight)
}
if getLightdInfo.ChainName != "bugsbunny" {
t.Error("unexpected chainName", getLightdInfo.ChainName)
}
if getLightdInfo.ConsensusBranchId != "someid" {
t.Error("unexpected ConsensusBranchId", getLightdInfo.ConsensusBranchId)
}
if sleepCount != 1 || sleepDuration != 15*time.Second {
t.Error("unexpected sleeps", sleepCount, sleepDuration)
}
step = 0
sleepCount = 0
sleepDuration = 0
}
// ------------------------------------------ BlockIngestor()
// There are four test blocks, 0..3
func getblockStub(method string, params []json.RawMessage) (json.RawMessage, error) {
var height string
err := json.Unmarshal(params[0], &height)
if err != nil {
testT.Fatal("could not unmarshal height")
}
step++
switch step {
case 1:
if height != "380640" {
testT.Error("unexpected height")
}
// Sunny-day
return blocks[0], nil
case 2:
if height != "380641" {
testT.Error("unexpected height")
}
// Sunny-day
return blocks[1], nil
case 3:
if height != "380642" {
testT.Error("unexpected height", height)
}
// Simulate that we're synced (caught up);
// this should cause one 10s sleep (then retry).
return nil, errors.New("-8: Block height out of range")
case 4:
if sleepCount != 1 || sleepDuration != 2*time.Second {
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
}
if height != "380642" {
testT.Error("unexpected height", height)
}
// Simulate that we're still caught up; this should cause a 1s
// wait then a check for reorg to shorter chain (back up one).
return nil, errors.New("-8: Block height out of range")
case 5:
if sleepCount != 1 || sleepDuration != 2*time.Second {
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
}
// Back up to 41.
if height != "380641" {
testT.Error("unexpected height", height)
}
// Return the expected block (as normally happens, no actual reorg),
// ingestor will immediately re-request the next block (42).
return blocks[1], nil
case 6:
if sleepCount != 1 || sleepDuration != 2*time.Second {
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
}
if height != "380642" {
testT.Error("unexpected height", height)
}
// Block 42 has now finally appeared, it will immediately ask for 43.
return blocks[2], nil
case 7:
if sleepCount != 1 || sleepDuration != 2*time.Second {
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
}
if height != "380643" {
testT.Error("unexpected height", height)
}
// Simulate a reorg by modifying the block's hash temporarily,
// this causes a 1s sleep and then back up one block (to 42).
blocks[3][9]++ // first byte of the prevhash
return blocks[3], nil
case 8:
blocks[3][9]-- // repair first byte of the prevhash
if sleepCount != 1 || sleepDuration != 2*time.Second {
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
}
if height != "380642" {
testT.Error("unexpected height ", height)
}
return blocks[2], nil
case 9:
if sleepCount != 1 || sleepDuration != 2*time.Second {
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
}
if height != "380643" {
testT.Error("unexpected height ", height)
}
// Instead of returning expected (43), simulate block unmarshal
// failure, should cause 10s sleep, retry
return nil, nil
case 10:
if sleepCount != 2 || sleepDuration != 12*time.Second {
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
}
if height != "380643" {
testT.Error("unexpected height ", height)
}
// Back to sunny-day
return blocks[3], nil
case 11:
if sleepCount != 2 || sleepDuration != 12*time.Second {
testT.Error("unexpected sleeps", sleepCount, sleepDuration)
}
if height != "380644" {
testT.Error("unexpected height ", height)
}
// next block not ready
return nil, nil
}
testT.Error("getblockStub called too many times")
return nil, nil
}
func TestBlockIngestor(t *testing.T) {
testT = t
RawRequest = getblockStub
Sleep = sleepStub
os.RemoveAll(unitTestPath)
testcache := NewBlockCache(unitTestPath, unitTestChain, 380640, false)
BlockIngestor(testcache, 11)
if step != 11 {
t.Error("unexpected final step", step)
}
step = 0
sleepCount = 0
sleepDuration = 0
os.RemoveAll(unitTestPath)
}
func TestGetBlockRange(t *testing.T) {
testT = t
RawRequest = getblockStub
os.RemoveAll(unitTestPath)
testcache := NewBlockCache(unitTestPath, unitTestChain, 380640, true)
blockChan := make(chan *walletrpc.CompactBlock)
errChan := make(chan error)
go GetBlockRange(testcache, blockChan, errChan, 380640, 380642)
// read in block 380640
select {
case err := <-errChan:
// this will also catch context.DeadlineExceeded from the timeout
t.Fatal("unexpected error:", err)
case cBlock := <-blockChan:
if cBlock.Height != 380640 {
t.Fatal("unexpected Height:", cBlock.Height)
}
}
// read in block 380641
select {
case err := <-errChan:
// this will also catch context.DeadlineExceeded from the timeout
t.Fatal("unexpected error:", err)
case cBlock := <-blockChan:
if cBlock.Height != 380641 {
t.Fatal("unexpected Height:", cBlock.Height)
}
}
// try to read in block 380642, but this will fail (see case 3 above)
select {
case err := <-errChan:
// this will also catch context.DeadlineExceeded from the timeout
if err.Error() != "block requested is newer than latest block" {
t.Fatal("unexpected error:", err)
}
case _ = <-blockChan:
t.Fatal("reading height 22 should have failed")
}
// check goroutine GetBlockRange() reaching the end of the range (and exiting)
go GetBlockRange(testcache, blockChan, errChan, 1, 0)
err := <-errChan
if err != nil {
t.Fatal("unexpected err return")
}
os.RemoveAll(unitTestPath)
}
func TestGenerateCerts(t *testing.T) {
if GenerateCerts() == nil {
t.Fatal("GenerateCerts returned nil")
}
}

587
common/darkside.go Normal file
View File

@ -0,0 +1,587 @@
package common
import (
"bufio"
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/adityapk00/lightwalletd/parser"
)
type darksideState struct {
resetted bool
startHeight int // activeBlocks[0] corresponds to this height
branchID string
chainName string
cache *BlockCache
mutex sync.RWMutex
// This is the highest (latest) block height currently being presented
// by the mock zcashd.
latestHeight int
// These blocks (up to and including tip) are presented by mock zcashd.
// activeBlocks[0] is the block at height startHeight.
activeBlocks [][]byte // full blocks, binary, as from zcashd getblock rpc
// Staged blocks are waiting to be applied (by ApplyStaged()) to activeBlocks.
// They are in order of arrival (not necessarily sorted by height), and are
// applied in arrival order.
stagedBlocks [][]byte // full blocks, binary
// These are full transactions as received from the wallet by SendTransaction().
// They are conceptually in the mempool. They are not yet available to be fetched
// by GetTransaction(). They can be fetched by darkside GetIncomingTransaction().
incomingTransactions [][]byte
// These transactions come from StageTransactions(); they will be merged into
// activeBlocks by ApplyStaged() (and this list then cleared).
stagedTransactions []stagedTx
}
var state darksideState
type stagedTx struct {
height int
bytes []byte
}
// DarksideEnabled is true if --darkside-very-insecure was given on
// the command line.
var DarksideEnabled bool
// DarksideInit should be called once at startup in darksidewalletd mode.
func DarksideInit(c *BlockCache, timeout int) {
Log.Info("Darkside mode running")
DarksideEnabled = true
state.cache = c
RawRequest = darksideRawRequest
go func() {
time.Sleep(time.Duration(timeout) * time.Minute)
Log.Fatal("Shutting down darksidewalletd to prevent accidental deployment in production.")
}()
}
// DarksideReset allows the wallet test code to specify values
// that are returned by GetLightdInfo().
func DarksideReset(sa int, bi, cn string) error {
Log.Info("Reset(saplingActivation=", sa, ")")
stopIngestor()
state = darksideState{
resetted: true,
startHeight: sa,
latestHeight: -1,
branchID: bi,
chainName: cn,
cache: state.cache,
activeBlocks: make([][]byte, 0),
stagedBlocks: make([][]byte, 0),
incomingTransactions: make([][]byte, 0),
stagedTransactions: make([]stagedTx, 0),
}
state.cache.Reset(sa)
return nil
}
// DarksideAddBlock adds a single block to the active blocks list.
func addBlockActive(blockBytes []byte) error {
block := parser.NewBlock()
rest, err := block.ParseFromSlice(blockBytes)
if err != nil {
return err
}
if len(rest) != 0 {
return errors.New("block serialization is too long")
}
blockHeight := block.GetHeight()
// first block, add to existing blocks slice if possible
if blockHeight > state.startHeight+len(state.activeBlocks) {
return errors.New(fmt.Sprint("adding block at height ", blockHeight,
" would create a gap in the blockchain"))
}
if blockHeight < state.startHeight {
return errors.New(fmt.Sprint("adding block at height ", blockHeight,
" is lower than Sapling activation height ", state.startHeight))
}
// Drop the block that will be overwritten, and its children, then add block.
state.activeBlocks = state.activeBlocks[:blockHeight-state.startHeight]
state.activeBlocks = append(state.activeBlocks, blockBytes)
return nil
}
// Set missing prev hashes of the blocks in the active chain
func setPrevhash() {
var prevhash []byte
for _, blockBytes := range state.activeBlocks {
// Set this block's prevhash.
block := parser.NewBlock()
rest, err := block.ParseFromSlice(blockBytes)
if err != nil {
Log.Fatal(err)
}
if len(rest) != 0 {
Log.Fatal(errors.New("block is too long"))
}
if prevhash != nil {
copy(blockBytes[4:4+32], prevhash)
}
prevhash = block.GetEncodableHash()
Log.Info("active block height ", block.GetHeight(), " hash ",
hex.EncodeToString(block.GetDisplayHash()),
" txcount ", block.GetTxCount())
}
}
// DarksideApplyStaged moves the staging area to the active block list.
// If this returns an error, the state could be weird; perhaps it may
// be better to simply crash.
func DarksideApplyStaged(height int) error {
state.mutex.Lock()
defer state.mutex.Unlock()
if !state.resetted {
return errors.New("please call Reset first")
}
Log.Info("ApplyStaged(height=", height, ")")
if height < state.startHeight {
return errors.New(fmt.Sprint("height ", height,
" is less than sapling activation height ", state.startHeight))
}
// Move the staged blocks into active list
stagedBlocks := state.stagedBlocks
state.stagedBlocks = nil
for _, blockBytes := range stagedBlocks {
if err := addBlockActive(blockBytes); err != nil {
return err
}
}
if len(state.activeBlocks) == 0 {
return errors.New("No active blocks after applying staged blocks")
}
// Add staged transactions into blocks. Note we're not trying to
// recover to the initial state; maybe it's better to just crash
// on errors.
stagedTransactions := state.stagedTransactions
state.stagedTransactions = nil
for _, tx := range stagedTransactions {
if tx.height < state.startHeight {
return errors.New("transaction height too low")
}
if tx.height >= state.startHeight+len(state.activeBlocks) {
return errors.New("transaction height too high")
}
block := state.activeBlocks[tx.height-state.startHeight]
// The next one or 3 bytes encode the number of transactions to follow,
// little endian.
nTxFirstByte := block[1487]
switch {
case nTxFirstByte < 252:
block[1487]++
case nTxFirstByte == 252:
// incrementing to 253, requires "253" followed by 2-byte length,
// extend the block by two bytes, shift existing transaction bytes
block = append(block, 0, 0)
copy(block[1490:], block[1488:len(block)-2])
block[1487] = 253
block[1488] = 253
block[1489] = 0
case nTxFirstByte == 253:
block[1488]++
if block[1488] == 0 {
// wrapped around
block[1489]++
}
default:
// no need to worry about more than 64k transactions
Log.Fatal("unexpected compact transaction count ", nTxFirstByte,
", can't support more than 64k transactions in a block")
}
block[68]++ // hack HashFinalSaplingRoot to mod the block hash
block = append(block, tx.bytes...)
state.activeBlocks[tx.height-state.startHeight] = block
}
setPrevhash()
state.latestHeight = height
Log.Info("active blocks from ", state.startHeight,
" to ", state.startHeight+len(state.activeBlocks)-1,
", latest presented height ", state.latestHeight)
// The block ingestor can only run if there are blocks
if len(state.activeBlocks) > 0 {
startIngestor(state.cache)
} else {
stopIngestor()
}
return nil
}
// DarksideGetIncomingTransactions returns all transactions we're
// received via SendTransaction().
func DarksideGetIncomingTransactions() [][]byte {
return state.incomingTransactions
}
// Add the serialized block to the staging list, but do some sanity checks first.
func darksideStageBlock(caller string, b []byte) error {
block := parser.NewBlock()
rest, err := block.ParseFromSlice(b)
if err != nil {
Log.Error("stage block error: ", err)
return err
}
if len(rest) != 0 {
return errors.New("block serialization is too long")
}
Log.Info(caller, "(height=", block.GetHeight(), ")")
if block.GetHeight() < state.startHeight {
return errors.New(fmt.Sprint("block height ", block.GetHeight(),
" is less than sapling activation height ", state.startHeight))
}
state.stagedBlocks = append(state.stagedBlocks, b)
return nil
}
// DarksideStageBlocks opens and reads blocks from the given URL and
// adds them to the staging area.
func DarksideStageBlocks(url string) error {
if !state.resetted {
return errors.New("please call Reset first")
}
Log.Info("StageBlocks(url=", url, ")")
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
// some blocks are too large, especially when encoded in hex, for the
// default buffer size, so set up a larger one; 8mb should be enough.
scan := bufio.NewScanner(resp.Body)
var scanbuf []byte
scan.Buffer(scanbuf, 8*1000*1000)
for scan.Scan() { // each line (block)
blockHex := scan.Text()
if blockHex == "404: Not Found" {
// special case error (http resource not found, bad pathname)
return errors.New(blockHex)
}
blockBytes, err := hex.DecodeString(blockHex)
if err != nil {
return err
}
if err = darksideStageBlock("DarksideStageBlocks", blockBytes); err != nil {
return err
}
}
return scan.Err()
}
// DarksideStageBlockStream adds the block to the staging area
func DarksideStageBlockStream(blockHex string) error {
if !state.resetted {
return errors.New("please call Reset first")
}
Log.Info("StageBlocksStream()")
blockBytes, err := hex.DecodeString(blockHex)
if err != nil {
return err
}
if err = darksideStageBlock("DarksideStageBlockStream", blockBytes); err != nil {
return err
}
return nil
}
// DarksideStageBlocksCreate creates empty blocks and adds them to the staging area.
func DarksideStageBlocksCreate(height int32, nonce int32, count int32) error {
if !state.resetted {
return errors.New("please call Reset first")
}
Log.Info("StageBlocksCreate(height=", height, ", nonce=", nonce, ", count=", count, ")")
for i := 0; i < int(count); i++ {
fakeCoinbase := "0400008085202f890100000000000000000000000000000000000000000000000000" +
"00000000000000ffffffff2a03d12c0c00043855975e464b8896790758f824ceac97836" +
"22c17ed38f1669b8a45ce1da857dbbe7950e2ffffffff02a0ebce1d000000001976a914" +
"7ed15946ec14ae0cd8fa8991eb6084452eb3f77c88ac405973070000000017a914e445cf" +
"a944b6f2bdacefbda904a81d5fdd26d77f8700000000000000000000000000000000000000"
// This coinbase transaction was pulled from block 797905, whose
// little-endian encoding is 0xD12C0C00. Replace it with the block
// number we want.
fakeCoinbase = strings.Replace(fakeCoinbase, "d12c0c00",
fmt.Sprintf("%02x", height&0xFF)+
fmt.Sprintf("%02x", (height>>8)&0xFF)+
fmt.Sprintf("%02x", (height>>16)&0xFF)+
fmt.Sprintf("%02x", (height>>24)&0xFF), 1)
fakeCoinbaseBytes, err := hex.DecodeString(fakeCoinbase)
if err != nil {
Log.Fatal(err)
}
hashOfTxnsAndHeight := sha256.Sum256([]byte(string(nonce) + "#" + string(height)))
blockHeader := &parser.BlockHeader{
RawBlockHeader: &parser.RawBlockHeader{
Version: 4, // start: 0
HashPrevBlock: make([]byte, 32), // start: 4
HashMerkleRoot: hashOfTxnsAndHeight[:], // start: 36
HashFinalSaplingRoot: make([]byte, 32), // start: 68
Time: 1, // start: 100
NBitsBytes: make([]byte, 4), // start: 104
Nonce: make([]byte, 32), // start: 108
Solution: make([]byte, 1344), // starts: 140, 143
}, // length: 1487
}
headerBytes, err := blockHeader.MarshalBinary()
if err != nil {
Log.Fatal(err)
}
blockBytes := make([]byte, 0)
blockBytes = append(blockBytes, headerBytes...)
blockBytes = append(blockBytes, byte(1))
blockBytes = append(blockBytes, fakeCoinbaseBytes...)
if err = darksideStageBlock("DarksideStageBlockCreate", blockBytes); err != nil {
// This should never fail since we created the block ourselves.
return err
}
height++
}
return nil
}
// DarksideClearIncomingTransactions empties the incoming transaction list.
func DarksideClearIncomingTransactions() {
state.incomingTransactions = make([][]byte, 0)
}
func darksideRawRequest(method string, params []json.RawMessage) (json.RawMessage, error) {
switch method {
case "getblockchaininfo":
blockchaininfo := &ZcashdRpcReplyGetblockchaininfo{
Chain: state.chainName,
Upgrades: map[string]Upgradeinfo{
"76b809bb": {ActivationHeight: state.startHeight},
},
Blocks: state.latestHeight,
Consensus: ConsensusInfo{state.branchID, state.branchID},
}
return json.Marshal(blockchaininfo)
case "getinfo":
info := &ZcashdRpcReplyGetinfo{}
return json.Marshal(info)
case "getblock":
var heightStr string
err := json.Unmarshal(params[0], &heightStr)
if err != nil {
return nil, errors.New("failed to parse getblock request")
}
height, err := strconv.Atoi(heightStr)
if err != nil {
return nil, errors.New("error parsing height as integer")
}
state.mutex.RLock()
defer state.mutex.RUnlock()
const notFoundErr = "-8:"
if len(state.activeBlocks) == 0 {
return nil, errors.New(notFoundErr)
}
if height > state.latestHeight {
return nil, errors.New(notFoundErr)
}
if height < state.startHeight {
return nil, errors.New(fmt.Sprint("getblock: requesting height ", height,
" is less than sapling activation height"))
}
index := height - state.startHeight
if index >= len(state.activeBlocks) {
return nil, errors.New(notFoundErr)
}
return json.Marshal(hex.EncodeToString(state.activeBlocks[index]))
case "getaddresstxids":
// Not required for minimal reorg testing.
return nil, errors.New("not implemented yet")
case "getrawtransaction":
return darksideGetRawTransaction(params)
case "sendrawtransaction":
var rawtx string
err := json.Unmarshal(params[0], &rawtx)
if err != nil {
return nil, errors.New("failed to parse sendrawtransaction JSON")
}
txBytes, err := hex.DecodeString(rawtx)
if err != nil {
return nil, errors.New("failed to parse sendrawtransaction value as a hex string")
}
// Parse the transaction to get its hash (txid).
tx := parser.NewTransaction()
rest, err := tx.ParseFromSlice(txBytes)
if err != nil {
return nil, err
}
if len(rest) != 0 {
return nil, errors.New("transaction serialization is too long")
}
state.incomingTransactions = append(state.incomingTransactions, txBytes)
return []byte(hex.EncodeToString(tx.GetDisplayHash())), nil
case "getrawmempool":
reply := make([]string, 0)
addTxToReply := func(txBytes []byte) {
ctx := parser.NewTransaction()
ctx.ParseFromSlice(txBytes)
reply = append(reply, hex.EncodeToString(ctx.GetDisplayHash()))
}
for _, blockBytes := range state.stagedBlocks {
block := parser.NewBlock()
block.ParseFromSlice(blockBytes)
for _, tx := range block.Transactions() {
addTxToReply(tx.Bytes())
}
}
for _, tx := range state.stagedTransactions {
addTxToReply(tx.bytes)
}
return json.Marshal(reply)
default:
return nil, errors.New("there was an attempt to call an unsupported RPC")
}
}
func darksideGetRawTransaction(params []json.RawMessage) (json.RawMessage, error) {
if !state.resetted {
return nil, errors.New("please call Reset first")
}
var rawtx string
err := json.Unmarshal(params[0], &rawtx)
if err != nil {
return nil, errors.New("failed to parse getrawtransaction JSON")
}
txid, err := hex.DecodeString(rawtx)
if err != nil {
return nil, errors.New("-9: " + err.Error())
}
marshalReply := func(tx *parser.Transaction, height int) []byte {
switch string(params[1]) {
case "0":
txJSON, _ := json.Marshal(hex.EncodeToString(tx.Bytes()))
return txJSON
case "1":
reply := struct {
Hex string
Height int
}{hex.EncodeToString(tx.Bytes()), height}
txVerboseJSON, _ := json.Marshal(reply)
return txVerboseJSON
default:
Log.Fatal("darkside only recognizes verbose 0 or 1")
return nil
}
}
// Linear search for the tx, somewhat inefficient but this is test code
// and there aren't many blocks. If this becomes a performance problem,
// we can maintain a map of transactions indexed by txid.
findTxInBlocks := func(blocks [][]byte) json.RawMessage {
for _, b := range blocks {
block := parser.NewBlock()
_, _ = block.ParseFromSlice(b)
for _, tx := range block.Transactions() {
if bytes.Equal(tx.GetDisplayHash(), txid) {
return marshalReply(tx, block.GetHeight())
}
}
}
return nil
}
// Search for the transaction (by txid) in the 3 places it could be.
reply := findTxInBlocks(state.activeBlocks)
if reply != nil {
return reply, nil
}
reply = findTxInBlocks(state.stagedBlocks)
if reply != nil {
return reply, nil
}
for _, stx := range state.stagedTransactions {
tx := parser.NewTransaction()
_, _ = tx.ParseFromSlice(stx.bytes)
if bytes.Equal(tx.GetDisplayHash(), txid) {
return marshalReply(tx, 0), nil
}
}
return nil, errors.New("-5: No information available about transaction")
}
// DarksideStageTransaction adds the given transaction to the staging area.
func DarksideStageTransaction(height int, txBytes []byte) error {
if !state.resetted {
return errors.New("please call Reset first")
}
Log.Info("DarksideStageTransaction(height=", height, ")")
tx := parser.NewTransaction()
rest, err := tx.ParseFromSlice(txBytes)
if err != nil {
return err
}
if len(rest) != 0 {
return errors.New("transaction serialization is too long")
}
state.stagedTransactions = append(state.stagedTransactions,
stagedTx{
height: height,
bytes: txBytes,
})
return nil
}
// DarksideStageTransactionsURL reads a list of transactions (hex-encoded, one
// per line) from the given URL, and associates them with the given height.
func DarksideStageTransactionsURL(height int, url string) error {
if !state.resetted {
return errors.New("please call Reset first")
}
Log.Info("StageTransactionsURL(height=", height, ", url=", url, ")")
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
// some blocks are too large, especially when encoded in hex, for the
// default buffer size, so set up a larger one; 8mb should be enough.
scan := bufio.NewScanner(resp.Body)
var scanbuf []byte
scan.Buffer(scanbuf, 8*1000*1000)
for scan.Scan() { // each line (transaction)
transactionHex := scan.Text()
if transactionHex == "404: Not Found" {
// special case error (http resource not found, bad pathname)
return errors.New(transactionHex)
}
transactionBytes, err := hex.DecodeString(transactionHex)
if err != nil {
return err
}
if err = DarksideStageTransaction(height, transactionBytes); err != nil {
return err
}
}
return scan.Err()
}

View File

@ -7,16 +7,11 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
var (
metrics *PrometheusMetrics
log *logrus.Entry
)
// Handle http(s) downloads for zcash params // Handle http(s) downloads for zcash params
func paramsHandler(w http.ResponseWriter, req *http.Request) { func ParamsHandler(w http.ResponseWriter, req *http.Request) {
if strings.HasSuffix(req.URL.Path, "sapling-output.params") { if strings.HasSuffix(req.URL.Path, "sapling-output.params") {
metrics.TotalSaplingParamsCounter.Inc() Metrics.TotalSaplingParamsCounter.Inc()
log.WithFields(logrus.Fields{ Log.WithFields(logrus.Fields{
"method": "params", "method": "params",
"param": "sapling-output", "param": "sapling-output",
}).Info("ParamsHandler") }).Info("ParamsHandler")
@ -26,7 +21,7 @@ func paramsHandler(w http.ResponseWriter, req *http.Request) {
} }
if strings.HasSuffix(req.URL.Path, "sapling-spend.params") { if strings.HasSuffix(req.URL.Path, "sapling-spend.params") {
log.WithFields(logrus.Fields{ Log.WithFields(logrus.Fields{
"method": "params", "method": "params",
"param": "sapling-spend", "param": "sapling-spend",
}).Info("ParamsHandler") }).Info("ParamsHandler")
@ -36,11 +31,11 @@ func paramsHandler(w http.ResponseWriter, req *http.Request) {
} }
if strings.HasSuffix(req.URL.Path, "sprout-groth16.params") { if strings.HasSuffix(req.URL.Path, "sprout-groth16.params") {
log.WithFields(logrus.Fields{ Log.WithFields(logrus.Fields{
"method": "params", "method": "params",
"param": "sprout", "param": "sprout",
}).Info("ParamsHandler") }).Info("ParamsHandler")
metrics.TotalSproutParamsCounter.Inc() Metrics.TotalSproutParamsCounter.Inc()
http.Redirect(w, req, "https://z.cash/downloads/sprout-groth16.params", 301) http.Redirect(w, req, "https://z.cash/downloads/sprout-groth16.params", 301)
return return
@ -48,13 +43,3 @@ func paramsHandler(w http.ResponseWriter, req *http.Request) {
http.Error(w, "Not Found", 404) http.Error(w, "Not Found", 404)
} }
// ParamsDownloadHandler Listens on port 8090 for download requests for params
func ParamsDownloadHandler(prommetrics *PrometheusMetrics, logger *logrus.Entry, port string) {
metrics = prommetrics
log = logger
http.HandleFunc("/params/", paramsHandler)
http.ListenAndServe(port, nil)
}

71
common/generatecerts.go Normal file
View File

@ -0,0 +1,71 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package common
import (
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"math/big"
"time"
)
// GenerateCerts create self signed certificate for local development use
// (and, if using grpcurl, specify the -insecure argument option)
func GenerateCerts() *tls.Certificate {
privKey, err := rsa.GenerateKey(rand.Reader, 2048)
publicKey := &privKey.PublicKey
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
Log.Fatal("Failed to generate serial number:", err)
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"Lighwalletd developer"},
},
NotBefore: time.Now(),
NotAfter: time.Now().Local().Add(time.Hour * 24 * 365),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
// List of hostnames and IPs for the cert
template.DNSNames = append(template.DNSNames, "localhost")
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey, privKey)
if err != nil {
Log.Fatal("Failed to create certificate:", err)
}
// PEM encode the certificate (this is a standard TLS encoding)
b := pem.Block{Type: "CERTIFICATE", Bytes: certDER}
certPEM := pem.EncodeToMemory(&b)
// PEM encode the private key
privBytes, err := x509.MarshalPKCS8PrivateKey(privKey)
if err != nil {
Log.Fatal("Unable to marshal private key:", err)
}
keyPEM := pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY", Bytes: privBytes,
})
// Create a TLS cert using the private key and certificate
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
Log.Fatal("invalid key pair:", err)
}
return &tlsCert
}

53
common/logging/logging.go Normal file
View File

@ -0,0 +1,53 @@
package logging
import (
"context"
"time"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
)
var LogToStderr bool
func LoggingInterceptor() grpc.ServerOption {
return grpc.UnaryInterceptor(LogInterceptor)
}
func loggerFromContext(ctx context.Context) *logrus.Entry {
// TODO: anonymize the addresses. cryptopan?
if peerInfo, ok := peer.FromContext(ctx); ok {
return log.WithFields(logrus.Fields{"peer_addr": peerInfo.Addr})
}
return log.WithFields(logrus.Fields{"peer_addr": "unknown"})
}
func LogInterceptor(
ctx context.Context,
req interface{},
info *grpc.UnaryServerInfo,
handler grpc.UnaryHandler,
) (interface{}, error) {
reqLog := loggerFromContext(ctx)
start := time.Now()
resp, err := handler(ctx, req)
if LogToStderr {
entry := reqLog.WithFields(logrus.Fields{
"method": info.FullMethod,
"duration": time.Since(start),
"error": err,
})
if err != nil {
entry.Error("call failed")
} else {
entry.Info("method called")
}
}
return resp, err
}

View File

@ -0,0 +1,62 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package logging
import (
"context"
"fmt"
"os"
"testing"
"errors"
"github.com/adityapk00/lightwalletd/common"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
)
var step int
func testhandler(ctx context.Context, req interface{}) (interface{}, error) {
step++
switch step {
case 1:
return nil, errors.New("test error")
case 2:
return nil, nil
}
return nil, nil
}
func TestLogInterceptor(t *testing.T) {
output, err := os.OpenFile("test-log", os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
os.Stderr.WriteString(fmt.Sprint("Cannot open test-log:", err))
os.Exit(1)
}
logger := logrus.New()
logger.SetOutput(output)
common.Log = logger.WithFields(logrus.Fields{
"app": "test",
})
var req interface{}
resp, err := LogInterceptor(peer.NewContext(context.Background(), &peer.Peer{}),
&req, &grpc.UnaryServerInfo{}, testhandler)
if err == nil {
t.Fatal("unexpected success")
}
if resp != nil {
t.Fatal("unexpected response", resp)
}
resp, err = LogInterceptor(context.Background(), &req, &grpc.UnaryServerInfo{}, testhandler)
if err != nil {
t.Fatal("unexpected error", err)
}
if resp != nil {
t.Fatal("unexpected response", resp)
}
os.Remove("test-log")
step = 0
}

View File

@ -13,6 +13,11 @@ type PrometheusMetrics struct {
} }
func GetPrometheusMetrics() *PrometheusMetrics { func GetPrometheusMetrics() *PrometheusMetrics {
if Metrics != nil {
return Metrics
}
// Create the metrics container
m := &PrometheusMetrics{} m := &PrometheusMetrics{}
m.LatestBlockCounter = prometheus.NewCounter(prometheus.CounterOpts{ m.LatestBlockCounter = prometheus.NewCounter(prometheus.CounterOpts{
Name: "lightwalletd_get_latest_block", Name: "lightwalletd_get_latest_block",

23
docgen.sh Executable file
View File

@ -0,0 +1,23 @@
#!/bin/bash
#
# read argument files, construct simple html
echo '<html>'
echo '<head>'
echo '<title>Lightwalletd reference API</title>'
echo '</head>'
echo '<body>'
echo '<h1>Lightwalletd API reference</h1>'
for f
do
echo "<h2>$f</h2>"
echo '<pre>'
# list of reserved words https://developers.google.com/protocol-buffers/docs/proto3
sed <$f '
s/\/\/.*/<font color="grey">&<\/font>/
s/\(^\|[^a-zA-Z_.]\)\(message\|service\|enum\)\($\|[^a-zA-Z_0-9]\)/\1<font color="red">\2<\/font>\3/
s/\(^\|[^a-zA-Z_.]\)\(rpc\|reserved\|repeated\|enum|stream\)\($\|[^a-zA-Z_0-9]\)/\1<font color="green">\2\3<\/font>\3/
s/\(^\|[^a-zA-Z_.]\)\(double\|float\|int32\|int64\|uint32\|uint64\|sint32\|sint64\|fixed32\|fixed64\|sfixed32\|sfixed64\|bool\|string\|bytes\)\($\|[^a-zA-Z_0-9]\)/\1<font color="blue">\2<\/font>\3/'
echo '</pre>'
done
echo '</body>'
echo '</html>'

99
docker-compose.yml Normal file
View File

@ -0,0 +1,99 @@
---
version: '2'
services:
lightwalletd:
build: .
env_file:
- .env
#entrypoint: ["/bin/bash", "-c", "sleep infinity"]
command:
- --grpc-bind-addr=0.0.0.0:$LWD_GRPC_PORT
- --http-bind-addr=0.0.0.0:$LWD_HTTP_PORT
- --zcash-conf-path=$ZCASHD_CONF_PATH
- --log-file=/dev/stdout
- --log-level=7
ports:
- "127.0.0.1:$LWD_GRPC_PORT:$LWD_GRPC_PORT"
- "127.0.0.1:$LWD_HTTP_PORT:$LWD_HTTP_PORT"
volumes:
- ./docker/:/srv/lightwalletd
- lightwalletd_cache:/var/lib/lightwalletd
logging:
driver: loki
options:
loki-url: 'http://localhost:3100/api/prom/push'
zcashd:
image: electriccoinco/zcashd:latest
volumes:
- $ZCASHD_DATADIR:/srv/zcashd/.zcash
- $ZCASHD_PARMDIR:/srv/zcashd/.zcash-params
env_file:
- .env
mem_limit: 4G
logging:
driver: loki
options:
loki-url: 'http://localhost:3100/api/prom/push'
zcashd_exporter:
image: electriccoinco/zcashd_exporter:latest
environment:
- ZCASHD_RPCUSER=$ZCASHD_RPCUSER
- ZCASHD_RPCPASSWORD=$ZCASHD_RPCPASSWORD
command:
- --rpc.host=zcashd
- --rpc.port=$ZCASHD_RPCPORT
- --rpc.user=$ZCASHD_RPCUSER
- --rpc.password=$ZCASHD_RPCPASSWORD
ports:
- "127.0.0.1:9100:9100"
logging:
driver: loki
options:
loki-url: 'http://localhost:3100/api/prom/push'
grafana:
image: grafana/grafana:6.4.3
entrypoint:
- bash
- -c
- grafana-cli plugins install grafana-piechart-panel && /run.sh
ports:
- "127.0.0.1:3000:3000"
env_file:
- .env
volumes:
- ./docker/grafana/provisioning/:/etc/grafana/provisioning/
logging:
driver: loki
options:
loki-url: 'http://localhost:3100/api/prom/push'
prometheus:
image: prom/prometheus:v2.13.1
ports:
- "127.0.0.1:9090:9090"
volumes:
- ./docker/prometheus/config.yml:/etc/prometheus/prometheus.yml
- promethus_data:/promethus_data
mem_limit: 2G
logging:
driver: loki
options:
loki-url: 'http://localhost:3100/api/prom/push'
loki:
image: grafana/loki:master
ports:
- "127.0.0.1:3100:3100"
command: -config.file=/etc/loki/local-config.yaml
logging:
driver: loki
options:
loki-url: 'http://localhost:3100/api/prom/push'
volumes:
promethus_data:
lightwalletd_cache:

View File

@ -1,5 +0,0 @@
FROM scratch
COPY main /
ENTRYPOINT ["/main"]

145
docs/architecture.md Normal file
View File

@ -0,0 +1,145 @@
# Definitions
A **light wallet** is not a full participant in the network of Zcash peers. It can send and receive payments, but does not store or validate a copy of the blockchain.
A **compact transaction** is a representation of a Zcash Sapling transaction that contains only the information necessary to detect that a given Sapling payment output is for you and to spend a note.
A **compact block** is a collection of compact transactions along with certain metadata (such as the block header) from their source block.
# Architecture
```
+----------+
| zcashd | +----------+ +-------+
+----+-----+ +------->+ frontend +--->+ |
| | +----------+ | L +<----Client
| raw blocks +----+----+ | O B |
v | | | A A |
+----+-----+ | | +----------+ | D L +<---Client
| ingester +-------->+ storage +-->+ frontend +--->+ A |
+----------+ compact | | +----------+ | N +<-------Client
blocks | | | C |
+----+----+ | E +<----Client
| +----------+ | R |
+------->+ frontend +--->+ +<------Client
+----------+ +-------+
```
## Ingester
The ingester is the component responsible for transforming raw Zcash block data into a compact block.
The ingester is a modular component. Anything that can retrieve the necessary data and put it into storage can fulfill this role. Currently, the only ingester available communicated to zcashd through RPCs and parses that raw block data.
**How do I run it?**
⚠️ This section literally describes how to execute the binaries from source code. This is suitable only for testing, not production deployment. See section Production for cleaner instructions.
⚠️ Bringing up a fresh compact block database can take several hours of uninterrupted runtime.
First, install [Go >= 1.11](https://golang.org/dl/#stable). Older versions of Go may work but are not actively supported at this time. Note that the version of Go packaged by Debian stable (or anything prior to Buster) is far too old to work.
Now clone this repo and start the ingester. The first run will start slow as Go builds the sqlite C interface:
```
$ git clone https://github.com/adityapk00/lightwalletd
$ cd lightwalletd
$ go run cmd/ingest/main.go --conf-file <path_to_zcash.conf> --db-path <path_to_sqllightdb>
```
To see the other command line options, run `go run cmd/ingest/main.go --help`.
## Frontend
The frontend is the component that talks to clients.
It exposes an API that allows a client to query for current blockheight, request ranges of compact block data, request specific transaction details, and send new Zcash transactions.
The API is specified in [Protocol Buffers](https://developers.google.com/protocol-buffers/) and implemented using [gRPC](https://grpc.io). You can find the exact details in [these files](https://github.com/adityapk00/lightwalletd/tree/master/walletrpc).
**How do I run it?**
⚠️ This section literally describes how to execute the binaries from source code. This is suitable only for testing, not production deployment. See section Production for cleaner instructions.
First, install [Go >= 1.11](https://golang.org/dl/#stable). Older versions of Go may work but are not actively supported at this time. Note that the version of Go packaged by Debian stable (or anything prior to Buster) is far too old to work.
Now clone this repo and start the frontend. The first run will start slow as Go builds the sqlite C interface:
```
$ git clone https://github.com/adityapk00/lightwalletd
$ cd lightwalletd
$ go run cmd/server/main.go --db-path <path to the same sqlite db> --bind-addr 0.0.0.0:9067
```
To see the other command line options, run `go run cmd/server/main.go --help`.
**What should I watch out for?**
x509 Certificates! This software relies on the confidentiality and integrity of a modern TLS connection between incoming clients and the front-end. Without an x509 certificate that incoming clients accurately authenticate, the security properties of this software are lost.
Otherwise, not much! This is a very simple piece of software. Make sure you point it at the same storage as the ingester. See the "Production" section for some caveats.
Support for users sending transactions will require the ability to make JSON-RPC calls to a zcashd instance. By default the frontend tries to pull RPC credentials from your zcashd.conf file, but you can specify other credentials via command line flag. In the future, it should be possible to do this with environment variables [(#2)](https://github.com/adityapk00/lightwalletd/issues/2).
## Storage
The storage provider is the component that caches compact blocks and their metadata for the frontend to retrieve and serve to clients.
It currently assumes a SQL database. The schema can be found [here](https://github.com/adityapk00/lightwalletd/blob/d53507cc39e8da52e14d08d9c63fee96d3bd16c3/storage/sqlite3.go#L15), but they're extremely provisional. We expect that anyone deploying lightwalletd at scale will adapt it to their own existing data infrastructure.
**How do I run it?**
It's not necessary to explicitly run anything. Both the ingester and the frontend code know how to use a generic SQL database via Go's [database/sql](https://golang.org/pkg/database/sql/) package. It should be possible to swap out for MySQL or Postgres by changing the driver import and connection string.
**What should I watch out for?**
sqlite is extremely reliable for what it is, but it isn't good at high concurrency. Because sqlite uses a global write lock, the code limits the number of open database connections to *one* and currently makes no distinction between read-only (frontend) and read/write (ingester) connections. It will probably begin to exhibit lock contention at low user counts, and should be improved or replaced with your own data store in production.
## Production
⚠️ This is informational documentation about a piece of alpha software. It has not yet undergone audits or been subject to rigorous testing. It lacks some affordances necessary for production-level reliability. We do not recommend using it to handle customer funds at this time (March 2019).
**x509 Certificates**
You will need to supply an x509 certificate that connecting clients will have good reason to trust (hint: do not use a self-signed one, our SDK will reject those unless you distribute them to the client out-of-band). We suggest that you be sure to buy a reputable one from a supplier that uses a modern hashing algorithm (NOT md5 or sha1) and that uses Certificate Transparency (OID 1.3.6.1.4.1.11129.2.4.2 will be present in the certificate).
To check a given certificate's (cert.pem) hashing algorithm:
```
openssl x509 -text -in certificate.crt | grep "Signature Algorithm"
```
To check if a given certificate (cert.pem) contains a Certificate Transparency OID:
```
echo "1.3.6.1.4.1.11129.2.4.2 certTransparency Certificate Transparency" > oid.txt
openssl asn1parse -in cert.pem -oid ./oid.txt | grep 'Certificate Transparency'
```
To use Let's Encrypt to generate a free certificate for your frontend, one method is to:
1) Install certbot
2) Open port 80 to your host
3) Point some forward dns to that host (some.forward.dns.com)
4) Run
```
certbot certonly --standalone --preferred-challenges http -d some.forward.dns.com
```
5) Pass the resulting certificate and key to frontend using the -tls-cert and -tls-key options.
**Dependencies**
The first-order dependencies of this code are:
- Go (>= 1.11 suggested; older versions are currently unsupported)
- libsqlite3-dev (used by our sqlite interface library; optional with another datastore)
**Containers**
This software was designed to be container-friendly! We highly recommend that you package and deploy the software in this manner. We've created an example Docker environment that is likewise new and minimally tested, but it's functional.
**What's missing?**
lightwalletd currently lacks several things that you'll want in production. Caveats include:
- There are no monitoring / metrics endpoints yet. You're on your own to notice if it goes down or check on its performance.
- Logging coverage is patchy and inconsistent. However, what exists emits structured JSON compatible with various collectors.
- Logging may capture identifiable user data. It hasn't received any privacy analysis yet and makes no attempt at sanitization.
- The only storage provider we've implemented is sqlite. sqlite is [likely not appropriate](https://sqlite.org/whentouse.html) for the number of concurrent requests we expect to handle. Because sqlite uses a global write lock, the code limits the number of open database connections to *one* and currently makes no distinction between read-only (frontend) and read/write (ingester) connections. It will probably begin to exhibit lock contention at low user counts, and should be improved or replaced with your own data store in production.
- [Load-balancing with gRPC](https://grpc.io/blog/loadbalancing) may not work quite like you're used to. A full explanation is beyond the scope of this document, but we recommend looking into [Envoy](https://www.envoyproxy.io/), [nginx](https://nginx.com), or [haproxy](https://www.haproxy.org) depending on your existing infrastructure.

356
docs/darksidewalletd.md Normal file
View File

@ -0,0 +1,356 @@
# Intro to darksidewalletd
Darksidewalletd is a feature included in lightwalletd, enabled by the
`--darkside-very-insecure` flag, which can serve arbitrary blocks to a Zcash
light client wallet. This is useful for security and reorg testing. It includes
a minimally-functional mock zcashd which comes with a gRPC API for controlling
which blocks it will serve.
This means that you can use darksidewalletd to control the blocks and
transactions that are exposed to any light wallets that connect, to see how
they behave under different circumstances. Multiple wallets can connect to
the same darksidewalletd at the same time. Darksidewalletd should only be
used for testing, and therefore is hard-coded to shut down after 30 minutes
of operation to prevent accidental deployment as a server.
## Security warning
Leaving darksidewalletd running puts your machine at greater risk because (a)
it may be possible to use file: paths with `StageBlocks` to read arbitrary
files on your system, and (b) also using `StageBlocks`, someone can force
your system to make a web request to an arbitrary URL (which could have your
system download questionable material, perform attacks on other systems,
etc.). The maximum 30-minute run time limit built into darksidewalletd
mitigates these risks, but users should still be cautious.
## Dependencies
Lightwalletd and most dependencies of lightwalletd, including Go version 1.11 or
later, but not zcashd. Since Darksidewalletd mocks zcashd, it can run standalone
and does use zcashd to get blocks or send and receive transactions.
For the tutorial the `grpcurl` tool is needed to call the `darksidewalletd`
gRPC API.
## Overview
### How Darksidewalletd Works
Lightwalletd and the wallets themselves dont actually perform any validation
of the blocks (beyond checking the blocks prevhashes, which is used to
detect reorgs). That means the blocks we give darksidewalletd dont need to
be fully valid, see table:
Block component|Must be valid|Must be partially valid|Not checked for validity
:-----|:-----|:-----|:-----
nVersion|x| |
hashPrevBlock|x| |
hashMerkleRoot| | |x
hashFinalSaplingRoot| | |x
nTime| | |x
nBits| | |x
nNonce| | |x
Equihash solution| | |x
Transaction Data*| |x|
\*Transactions in blocks must conform to the transaction format, but not need
valid zero-knowledge proofs etc.
For more information about block headers, see the Zcash protocol specification.
Lightwalletd provides us with a gRPC API for generating these
minimally-acceptable fake blocks. The API allows us to "stage" blocks and
transactions and later "apply" the staged objects so that they become visible
to lightwalletd and the wallets. How this is done is illustrated in the
tutorial below, but first we must start darksidewalletd.
### Running darksidewalletd
To start darksidewalletd, you run lightwalletd with the
`--darkside-very-insecure` flag:
```
./lightwalletd --darkside-very-insecure --no-tls-very-insecure --data-dir . --log-file /dev/stdout
```
To prevent accidental deployment in production, it will automatically shut off
after 30 minutes.
Now that `darksidewalletd` is running, you can control it by calling various
gRPCs to reset its state, stage blocks, stage transactions, and apply the
staged objects so that they become visible to the wallet. Examples of using
these gRPCs are given in the following tutorial.
## Tutorial
This tutorial is intended to illustrate basic control of `darksidewalletd`
using the `grpcurl` tool. You can use any gRPC library of your choice in
order to implement similar tests in your apps' test suite.
### Simulating a reorg that moves a transaction
In this example, we will simulate a reorg that moves a transaction from one
block height to another. This happens in two parts, first we create and apply
the "before reorg" state. Then we create the "after reorg" stage and apply
it, which makes the reorg happen.
#### Creating the Before-Reorg State
If you haven't already started darksidewalletd, please start it:
```
./lightwalletd --darkside-very-insecure --no-tls-very-insecure --data-dir . --log-file /dev/stdout
```
First, we need to reset darksidewalletd, specifying the sapling activation
height, branch ID, and chain name that will be told to wallets when they ask:
```
grpcurl -plaintext -d '{"saplingActivation": 663150,"branchID": "bad", "chainName":"x"}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/Reset
```
Next, we will stage the real mainnet block 663150. In ECC's example wallets, this block is used as a checkpoint so we need to use the real block to pass that check.
```
grpcurl -plaintext -d '{"url": "https://raw.githubusercontent.com/zcash-hackworks/darksidewalletd-test-data/master/basic-reorg/663150.txt"}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/StageBlocks
```
This has put block 663150 into darksidewalletd's staging area. The block has
not yet been exposed to the internal block-processing mechanism in
lightwalletd, and thus any wallets connected will have no idea it exists yet.
Next, we will use the `StageBlocksCreate` gRPC to generate 100 fake blocks on top of 663150 in darksidewalletd's staging area:
```
grpcurl -plaintext -d '{"height":663151,"count":100}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/StageBlocksCreate
```
Still, everything is in darksidewalletd's staging area, nothing has been
shown to any connected wallets yet. The staging area now contains the real
mainnet block 663150 and 100 fake blocks from 663151 to 663250.
Next we'll stage a transaction to go into block 663190. 663190 is within the
range of blocks we've staged; when we "apply" the staging area later on
darksidewalletd will merge this transaction into the fake 663190 block.
```
grpcurl -plaintext -d '{"height":663190,"url":"https://raw.githubusercontent.com/zcash-hackworks/darksidewalletd-test-data/master/transactions/recv/0821a89be7f2fc1311792c3fa1dd2171a8cdfb2effd98590cbd5ebcdcfcf491f.txt"}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/StageTransactions
```
We have now finished filling darksidewalletd's staging area with the "before
reorg" state blocks. In darksidewalletd's staging area, we have blocks from
663150 to 663250, with a transaction staged to go in block 663190. All that's
left to do is "apply" the staging area, which will reveal the blocks to
lightwalletd's internal block processor and then on to any wallets that are
connected. We will apply the staged blocks up to height 663210 (any higher
staged blocks will remain in the staging area):
```
grpcurl -plaintext -d '{"height":663210}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/ApplyStaged
```
Note that we could have done this in the opposite order, it would have been
okay to stage the transaction first, and then stage the blocks later. All
that matters is that the transactions we stage get staged into block heights
that will have blocks staged for them before we "apply".
Now we can check that the transaction is in block 663190:
```
$ grpcurl -plaintext -d '{"height":663190}' localhost:9067 cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock
{
"height": "663190",
"hash": "Ax/AHLeTfnDuXWX3ZiYo+nWvh24lyMjvR0e2CAfqEok=",
"prevHash": "m5/epQ9d3wl4Z8bctOB/ZCuSl8Uko4DeIpKtKZayK4U=",
"time": 1,
"vtx": [
{
"index": "1",
"hash": "H0nPz83r1cuQhdn/LvvNqHEh3aE/LHkRE/zy55uoIQg=",
"spends": [
{
"nf": "xrZLCu+Kbv6PXo8cqM+f25Hp55L2cm95bM68JwUnDHg="
}
],
"outputs": [
{
"cmu": "pe/G9q13FyE6vAhrTPzIGpU5Dht5DvJTuc9zmTEx0gU=",
"epk": "qw5MPsRoe8aOnvZ/VB3r1Ja/WkHb52TVU1vyHjGEOqc=",
"ciphertext": "R2uN3CHagj7Oo+6O9VeBrE6x4dQ07Jl18rVM27vGhl1Io75lFYCHA1SrV72Zu+bgwMilTA=="
},
{
"cmu": "3rQ9DMmk7RaWGf9q0uOYQ7FieHL/TE8Z+QCcS/IJfkA=",
"epk": "U1NCOlTzIF1qlprAjuGUUj591GpO5Vs5WTsmCW35Pio=",
"ciphertext": "2MbBHjPbkDT/GVsXgDHhihFQizxvizHINXKVbXKnv3Ih1P4c1f3By+TLH2g1yAG3lSARuQ=="
}
]
}
]
}
$
```
#### Creating the After-Reorg State
Now, we can stage that same transaction into a different height, and force a
reorg.
First, stage 100 fake blocks starting at height 663180. This stages empty
blocks for heights 663180 through 663279. These are the blocks that will
change after the reorg.
```
grpcurl -plaintext -d '{"height":663180,"count":100}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/StageBlocksCreate
```
Now, stage that same transaction as before, but this time to height 663195
(previously we had put it in 663190):
```
grpcurl -plaintext -d '{"height":663195,"url":"https://raw.githubusercontent.com/zcash-hackworks/darksidewalletd-test-data/master/transactions/recv/0821a89be7f2fc1311792c3fa1dd2171a8cdfb2effd98590cbd5ebcdcfcf491f.txt"}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/StageTransactions
```
Finally, we can apply the staged blocks and transactions to trigger a reorg:
```
grpcurl -plaintext -d '{"height":663210}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/ApplyStaged
```
This will simulate a reorg back to 663180 (new versions of 663180 and
beyond, same 663179), and the transaction will now be included in 663195
and will _not_ be in 663190.
After a moment you should see some "reorg" messages in the lightwalletd log
output to indicate that lightwalletd's internal block processor detected and
handled the reorg. If a wallet were connected to the lightwalletd instance,
it should also detect a reorg too.
Now we can check that the transaction is no longer in 663190:
```
$ grpcurl -plaintext -d '{"height":663190}' localhost:9067 cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock
{
"height": "663190",
"hash": "btosPfiJBX9m3nNSCP+vjAxWpEDS7Kfut9H7FY+mSYo=",
"prevHash": "m5/epQ9d3wl4Z8bctOB/ZCuSl8Uko4DeIpKtKZayK4U=",
"time": 1
}
$
```
Instead, it has "moved" to 663195:
```
$ grpcurl -plaintext -d '{"height":663195}' localhost:9067 cash.z.wallet.sdk.rpc.CompactTxStreamer/GetBlock
{
"height": "663195",
"hash": "CmcEQ/NZ9nSk+VdNfCEHvKu9MTNeWKoF1dZ7cWUTnCc=",
"prevHash": "04i1neRIgx7vgtDkrydYJu3KWjbY5g7QvUygNBfu6ug=",
"time": 1,
"vtx": [
{
"index": "1",
"hash": "H0nPz83r1cuQhdn/LvvNqHEh3aE/LHkRE/zy55uoIQg=",
"spends": [
{
"nf": "xrZLCu+Kbv6PXo8cqM+f25Hp55L2cm95bM68JwUnDHg="
}
],
"outputs": [
{
"cmu": "pe/G9q13FyE6vAhrTPzIGpU5Dht5DvJTuc9zmTEx0gU=",
"epk": "qw5MPsRoe8aOnvZ/VB3r1Ja/WkHb52TVU1vyHjGEOqc=",
"ciphertext": "R2uN3CHagj7Oo+6O9VeBrE6x4dQ07Jl18rVM27vGhl1Io75lFYCHA1SrV72Zu+bgwMilTA=="
},
{
"cmu": "3rQ9DMmk7RaWGf9q0uOYQ7FieHL/TE8Z+QCcS/IJfkA=",
"epk": "U1NCOlTzIF1qlprAjuGUUj591GpO5Vs5WTsmCW35Pio=",
"ciphertext": "2MbBHjPbkDT/GVsXgDHhihFQizxvizHINXKVbXKnv3Ih1P4c1f3By+TLH2g1yAG3lSARuQ=="
}
]
}
]
}
$
```
Just to illustrate a little more about how `ApplyStaged` works, we can check
that the current height is 663210 just like we specified in our last call to
`ApplyStaged`:
```
$ grpcurl -plaintext -d '' localhost:9067 cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock
{
"height": "663210"
}
```
Then apply 10 more of the blocks that are still in the staging area:
```
grpcurl -plaintext -d '{"height":663220}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/ApplyStaged
```
And confirm that the current height has increased:
```
$ grpcurl -plaintext -d '' localhost:9067 cash.z.wallet.sdk.rpc.CompactTxStreamer/GetLatestBlock
{
"height": "663220"
}
```
That concludes the tutorial. You should now know how to stage blocks from a
URL using `StageBlocks`, stage synthetic empty blocks using
`StageBlocksCreate`, stage transactions from a URL to go into particular
blocks using `StageTransactions`, and then make the staged blocks and
transactions live using `ApplyStaged`.
On top of what we covered in the tutorial, you can also...
- Stage blocks and transactions directly (without them having to be
accessible at a URL) using `StageBlocksStream` and `StageTransactionsStream`.
- Get all of the transactions sent by connected wallets using
`GetIncomingTransactions` (and clear the buffer that holds them using
`ClearIncomingTransactions`).
See [darkside.proto](/walletrpc/darkside.proto) for a complete definition of
all the gRPCs that darksidewalletd supports.
## Generating Fake Block Sets
Theres a tool to help with generating these fake just-barely-valid-enough
blocks, its called genblocks. To use it you create a directory of text files,
one file per block, and each line in the file is a hex-encoded transaction that
should go into that block:
```
mkdir blocksA
touch blocksA/{1000,1001,1002,1003,1004,1005}.txt
echo “some hex-encoded transaction you want to put in block 1003” > blocksA/1003.txt
```
This will output the blocks, one hex-encoded block per line. This is the
format that will be accepted by `StageBlocks`.
Tip: Because nothing is checking the full validity of transactions, you can get
any hex-encoded transaction you want from a block explorer and put those in the
block files. The sochain block explorer makes it easy to obtain the raw
transaction hex, by viewing the transaction (example), clicking “Raw Data”, then
copying the “tx_hex” field.
### Simulating the mempool
The `GetMempoolTx` gRPC will return staged transactions that are either within
staged blocks or that have been staged separately. Here is an example:
```
grpcurl -plaintext -d '{"saplingActivation": 663150,"branchID": "bad", "chainName":"x"}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/Reset
grpcurl -plaintext -d '{"url": "https://raw.githubusercontent.com/zcash-hackworks/darksidewalletd-test-data/master/tx-incoming/blocks.txt"}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/StageBlocks
grpcurl -plaintext -d '{"txid":["qg=="]}' localhost:9067 cash.z.wallet.sdk.rpc.CompactTxStreamer/GetMempoolTx
```
## Use cases
Check out some of the potential security test cases here: [wallet <->
lightwalletd integration
tests](https://github.com/adityapk00/lightwalletd/blob/master/docs/integration-tests.md)

View File

@ -0,0 +1,100 @@
# Installation and setup
## Install requirements
- [docker](https://docs.docker.com/install/)
- [docker-compose](https://docs.docker.com/compose/install/)
- loki plugin for docker logs
```
docker plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions
```
## Setup .env file
Copy `.env.example` to `.env` and change any required paramaters.
| Variable | Usage |
| ------------- |:-------------:|
| `GF_SECURITY_ADMIN_USER` | Grafana admin user name |
| `ZCASHD_RPCUSER` | zcashd rpc user |
| `ZCASHD_RPCPORT` | zcashd rpc port |
|`ZCASHD_ALLOWIP`| zcashd rpc allowed IPs (don't |change unless you know what you're doing)|
|`ZCASHD_DATADIR`| local location of zcasd data directory. `uid` 2001 needs write access|
|`ZCASHD_PARMDIR`| local location of zcasd data directory. `uid` 2001 needs read access|
|`ZCASHD_NETWORK`| zcashd network to use, `testnet` or `mainnet`|
|`ZCASHD_GEN`| should zcashd mine? `0` or `1`
|`LWD_PORT`| port for lightwalletd to bind to|
|`ZCASHD_CONF_PATH`| path for lightwalletd to pick up configuration|
## Populate secret env vars with random values
```
./buildenv.sh | tee .env
```
## Build initial local docker image
`docker-compose build`
## Start the project
```
docker-compose up -d
```
# Setup and use Grafana
Open a browser to http://localhost:3000
![grafana-login](./images/grafana-login.png)
Login with the user (`GF_SECURITY_ADMIN_USER`) and password (`GF_SECURITY_ADMIN_PASSWORD`).
The values can be found in your `.env` file
Open the `Dashboard Manage` menu on the left
![grafana-manage](./images/grafana-manage.png)
Select `Import`
![grafana-import](./images/grafana-import-1.png)
Enter `11325` for the `Grafana.com Dashboard`
![grafana-import](./images/grafana-import-2.png)
On the next screen, select the `Prometheus` and `Loki` values (there should only be 1 to select)
![grafana-configure](./images/grafana-configure.png)
Click `Import`
This should then be taken to the `Zcashd node exporter` dashboard.
![grafana-zcashd-dashboard](./images/grafana-zcashd-dashboard.png)
If all goes as planned, the dashboard should start populating data from the container services.
If there are an issues, you can view all the `docker-compose` services under the `Explore` section.
# Viewing container logs
Open the `Explore` menu entry
![grafana-explore.png](./images/grafana-explore.png)
Make sure `Loki` is selected as the datasource at the top.
![grafana-explore2](./images/grafana-explore-2.png)
Then choose the container to view it's logs.
![grafana-explore3](./images/grafana-explore-3.png)
Loki as a rich query syntax to help with log in many ways, for example combine 2 container logs entries:
![grafana-explore4](./images/grafana-explore-4.png)
See more here: https://github.com/grafana/loki/blob/master/docs/logql.md

81
docs/docker-run.md Normal file
View File

@ -0,0 +1,81 @@
# Docker images
Docker images are available on Docker Hub at [electriccoinco/lightwalletd](https://hub.docker.com/repository/docker/electriccoinco/lightwalletd).
## Using command line options
Already have a Zcash node running with an exposed RPC endpoint?
Try the docker container with command lines flags like:
```
docker run --rm -p 9067:9067 \
electriccoinco/lightwalletd:v0.4.2 \
--grpc-bind-addr 0.0.0.0:9067 \
--no-tls-very-insecure \
--rpchost 192.168.86.46 \
--rpcport 38237 \
--rpcuser zcashrpc \
--rpcpassword notsecure \
--log-file /dev/stdout
```
## Preserve the compactblocks database between runs
Like the first example, but this will preserve the lightwalletd compactblocks database for use between runs.
Create a directory somewhere and change the `uid` to `2002`.
The is the id of the restricted lightwalletd user inside of the container.
```
mkdir ./lightwalletd_db_volume
sudo chown 2002 ./lightwalletd_db_volume
```
Now add a `--volume` mapping from the local file path to where we want it to show up inside the container.
Then, add the `--data-dir` to the lightwalletd command with the value of path mapping as viewed from inside the container.
```
docker run --rm -p 9067:9067 \
--volume $(pwd)/lightwalletd_db_volume:/srv/lightwalletd/db_volume \
electriccoinco/lightwalletd:v0.4.2 \
--grpc-bind-addr 0.0.0.0:9067 \
--no-tls-very-insecure \
--rpchost 192.168.86.46 \
--rpcport 38237 \
--rpcuser zcashrpc \
--rpcpassword notsecure \
--data-dir /srv/lightwalletd/db_volume \
--log-file /dev/stdout
```
## Using a YAML config file
When using a configuration file with the docker image, you must create the configuration file and then map it into the container. Finally, provide a command line option referencing the mapped file location.
Create a configuration file:
```
cat <<EOF >lightwalletd.yml
no-tls-very-insecure: true
log-file: /dev/stdout
rpcuser: zcashrpc
rpcpassword: notsecure
rpchost: 192.168.86.46
rpcport: 38237
grpc-bind-addr: 0.0.0.0:9067
EOF
```
Use it with the docker container
```
docker run --rm \
-p 9067:9067 \
-v $(pwd)/lightwalletd.yml:/tmp/lightwalletd.yml \
electriccoinco/lightwalletd:v0.4.2 \
--config /tmp/lightwalletd.yml
```
## Using docker-compose for a full stack
Don't have an existing Zcash node? Check out the [docker-compose](./docker-compose-setup.md) for examples of multi-container usage.

Binary file not shown.

After

Width:  |  Height:  |  Size: 147 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 156 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 123 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 129 KiB

71
docs/integration-tests.md Normal file
View File

@ -0,0 +1,71 @@
# Wallet ⟷ Lightwalletd Integration Tests
## High-priority tests
Funds are at risk if these tests fail.
**Reorged-Away Transaction**
A transparent/shielded transaction is sent to the wallet in block N containing value v. There's a reorg to height N-k for some k >= 1, and after the reorg the original transaction is no longer there but there is a new transaction with a different value u. Before the reorg, the wallet should detect the transaction and show unconfirmed balance v. After the reorg, the wallet should show unconfirmed balance u. Some number of blocks later, the balance is marked as confirmed.
Consequences if this test fails: An attacker could take advantage of regular/accidental reorgs to confuse the wallet about its balance.
**Dropped from Mempool**
Similar to the reorged-away transaction test, except the transaction only enters the mempool and is never mined.
Consequences: An attacker could confuse wallets about their balance by arranging for a transaction to enter the mempool but not be mined.
**Transparent TXID Malleated**
The wallet sends a transparent transaction. Its transaction ID is malleated to change its transaction ID, and then mined. After sending the transaction, the wallets balance should be reduced by the value of the transaction. 100 blocks after the transparent transaction was mined, the wallets balance should still be reduced by that amount.
Consequences if this test fails: An attacker could malleate one of the wallets transparent transactions, and if it times out thinking it was never mined, the wallet would think it has balance when it doesnt.
**Transaction Never Mined**
The wallet broadcasts a transparent/shielded transaction optionally with an expiry height. For 100 blocks (or at least until the expiry height), the transaction is never mined. After sending the transaction, the wallets balance should be reduced by the value of the transaction, and it should stay reduced by that amount until the expiry height (if any).
Consequences if this test fails: If the wallet concludes the transaction will never be mined before the expiry height, then an attacker can delay mining the transaction to cause the wallet to think it has more funds than it does.
**Transaction Created By Other Wallet**
A seed is imported into three wallets with transparent/shielded funds. Wallet A sends a payment to some address. At the same time, Wallet B sends a payment of a different amount using some of the same UTXOs or notes. Wallet C does not send any payments. Wallet Bs transaction gets mined instead of Wallet As. The balances of all three wallets are decreased by the value of Wallet Bs transaction.
Consequences if this test fails: A user importing their seed into multiple wallets and making simultaneous transactions could lead them to be confused about their balance.
**Anchor Invalidation**
A wallet broadcasts a sapling transaction using a recent anchor. A reorg occurs which invalidates that anchor, i.e. some of the previous shielded transactions changed. (Depending on how we want to handle this) the wallet either detects this and re-broadcasts the transaction or marks the transaction as failed and the funds become spendable again.
Consequences if this test fails: Wallets might get confused about their balance if this ever occurs.
**Secret Transactions**
Lightwalletd has some shielded/transparent funds. It creates a real transaction sending these funds to the wallet, such that if the transaction were broadcast on the Zcash network, the wallet really would get the funds. However, instead of broadcasting the transaction, the lightwalletd operator includes the transaction in a compact block, but does not broadcast the transaction to the actual network. The wallet should detect that the transaction has not really been mined by miners on the Zcash network, and not show an increased balance.
(Currently, this test will fail, since the wallet is not checking the PoW or block headers at all. Worse, even with PoW/header checks, lightwalletd could mine down the difficulty to have their wallets follow an invalid chain. To comabt this wallets would need to reach out to multiple independent lightwalletds to verify it has the highest PoW chain. Or Larrys idea, to warn when the PoW is below a certain threshold (chosen to be above what most attackers could do but low enough wed legitimately want to warn users if it drops that low), I like a lot better.)
Consequences if this test fails: lightwalletd can make it appear as though a wallet received funds when it didnt.
## Medium-priority tests
Funds arent at risk if these fail but theres a severe problem.
**Normal Payments**
Wallet A sends a shielded/transparent transaction to Wallet B. Wallet B receives the transaction and sends half back to wallet A. Wallet A receives the transaction, and B receives change. All of the balances end up as expected.
Consequences if this test fails: Normal functionality of the wallet is broken.
**Mempool DoS**
The transactions in the mempool constantly churn. The wallet should limit its bandwidth used to fetch new transactions in the mempool, rather than using an unlimited amount.
Consequences if this test fails: Its possible to run up the bandwidth bills of wallet users.
## Low-priority tests
These wont occur unless lightwalletd is evil.
**High Block Number**
Lightwalletd announces that the latest block is some very large number, much larger than the actual block height. The wallet syncs up to that point (with lightwalletd providing fake blocks all the way). Lightwalletd then stops lying about the block height and blocks. This should trigger the wallets reorg limit and the wallet should be unusable.
**Repeated Note**
A shielded transaction is sent to the wallet. Lightwalletd simply repeats the transaction in a compact block sent to the wallet. The wallet should not think it has twice as much money. From this point, no shielded transactions the wallet sends can be mined, since they will use invalid anchors.
**Invalid Note**
Same as repeated note above, but random data. The results should be exactly the same.
**Omitted Note**
A shielded transaction is sent to the wallet. Lightwalletd simply does not send the transaction to the wallet (omits it from the compact block). From this point, no shielded transactions the wallet sends can be mined, since they will use invalid anchors.

View File

@ -0,0 +1,69 @@
0.4.1 Release Notes
===============================
Lightwalletd version 0.4.1 is now available from:
<https://github.com/zcash/lightwalletd/releases/tag/v0.4.1>
Or cloned from:
<https://github.com/zcash/lightwalletd/tree/v0.4.1>
Lightwalletd must be built from source code (there are no binary releases
at this time).
This minor release includes various bug fixes, performance
improvements, and test code improvements.
Please report bugs using the issue tracker at GitHub:
<https://github.com/zcash/lightwalletd/issues>
How to Upgrade
==============
If you are running an older version, shut it down. Run `make` to generate
the `./lightwalletd` executable. Run `./lightwalletd version` to verify
that you're running the correct version (v0.4.0). Some of the command-line
arguments (options) have changed since the previous release; please
run `./lightwalletd help` to view them.
Compatibility
==============
Lightwalletd is supported and extensively tested on operating systems using
the Linux kernel, and to a lesser degree macOS. It is not recommended
to use Lightwalletd on unsupported systems.
0.4.1 change log
=================
### Infrastructure
- #161 Add docker-compose
- #227 Added tekton for Docker image build
- #236 Add http endpoint and prometheus metrics framework
### Tests and QA
- #234 darksidewalletd
### Documentation
- #107 Reorg documents for updates and upcoming new details
- #188 add documentation for lightwalletd APIs and data types
- #195 add simple gRPC test client
- #270 add issue and PR templates
Credits
=======
Thanks to everyone who directly contributed to this release:
- adityapk00
- Marshall Gaucher
- Kevin Gorhan
- Taylor Hornby
- Linda Lee
- Brad Miller
- Charlie O'Keefe
- Larry Ruane
- Za Wilcox
- Ben Wilson

1810
docs/rtd/index.html Normal file

File diff suppressed because it is too large Load Diff

594
frontend/frontend_test.go Normal file
View File

@ -0,0 +1,594 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package frontend
import (
"bufio"
"bytes"
"context"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"os"
"strings"
"testing"
"github.com/adityapk00/lightwalletd/common"
"github.com/adityapk00/lightwalletd/walletrpc"
"github.com/sirupsen/logrus"
)
var (
testT *testing.T
logger = logrus.New()
step int
blocks [][]byte // four test blocks
rawTxData [][]byte
)
const (
unitTestPath = "unittestcache"
unitTestChain = "unittestnet"
)
func testsetup() (walletrpc.CompactTxStreamerServer, *common.BlockCache) {
os.RemoveAll(unitTestPath)
cache := common.NewBlockCache(unitTestPath, unitTestChain, 380640, true)
lwd, err := NewLwdStreamer(cache, "main", false /* enablePing */)
if err != nil {
os.Stderr.WriteString(fmt.Sprint("NewLwdStreamer failed:", err))
os.Exit(1)
}
return lwd, cache
}
func TestMain(m *testing.M) {
output, err := os.OpenFile("test-log", os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
os.Stderr.WriteString(fmt.Sprint("Cannot open test-log:", err))
os.Exit(1)
}
logger.SetOutput(output)
common.Log = logger.WithFields(logrus.Fields{
"app": "test",
})
// Several tests need test blocks; read all 4 into memory just once
// (for efficiency).
testBlocks, err := os.Open("../testdata/blocks")
if err != nil {
os.Stderr.WriteString(fmt.Sprint("Error:", err))
os.Exit(1)
}
defer testBlocks.Close()
scan := bufio.NewScanner(testBlocks)
for scan.Scan() { // each line (block)
blockJSON, _ := json.Marshal(scan.Text())
blocks = append(blocks, blockJSON)
}
testData, err := os.Open("../testdata/zip243_raw_tx")
if err != nil {
os.Stderr.WriteString(fmt.Sprint("Error:", err))
os.Exit(1)
}
defer testData.Close()
// Parse the raw transactions file
rawTxData = [][]byte{}
scan = bufio.NewScanner(testData)
for scan.Scan() {
dataLine := scan.Text()
// Skip the comments
if strings.HasPrefix(dataLine, "#") {
continue
}
txData, err := hex.DecodeString(dataLine)
if err != nil {
os.Stderr.WriteString(fmt.Sprint("Error:", err))
os.Exit(1)
}
rawTxData = append(rawTxData, txData)
}
// Setup is done; run all tests.
exitcode := m.Run()
// cleanup
os.Remove("test-log")
os.RemoveAll(unitTestPath)
os.Exit(exitcode)
}
func TestGetTransaction(t *testing.T) {
// GetTransaction() will mostly be tested below via TestGetTaddressTxids
lwd, _ := testsetup()
rawtx, err := lwd.GetTransaction(context.Background(),
&walletrpc.TxFilter{})
if err == nil {
testT.Fatal("GetTransaction unexpectedly succeeded")
}
if err.Error() != "Please call GetTransaction with txid" {
testT.Fatal("GetTransaction unexpected error message")
}
if rawtx != nil {
testT.Fatal("GetTransaction non-nil rawtx returned")
}
rawtx, err = lwd.GetTransaction(context.Background(),
&walletrpc.TxFilter{Block: &walletrpc.BlockID{Hash: []byte{}}})
if err == nil {
testT.Fatal("GetTransaction unexpectedly succeeded")
}
if err.Error() != "Can't GetTransaction with a blockhash+num. Please call GetTransaction with txid" {
testT.Fatal("GetTransaction unexpected error message")
}
if rawtx != nil {
testT.Fatal("GetTransaction non-nil rawtx returned")
}
}
func getblockStub(method string, params []json.RawMessage) (json.RawMessage, error) {
step++
var height string
err := json.Unmarshal(params[0], &height)
if err != nil {
testT.Fatal("could not unmarshal height")
}
if height != "380640" {
testT.Fatal("unexpected getblock height", height)
}
// Test retry logic (for the moment, it's very simple, just one retry).
switch step {
case 1:
return blocks[0], nil
case 2:
return nil, errors.New("getblock test error")
}
testT.Fatal("unexpected call to getblockStub")
return nil, nil
}
func TestGetLatestBlock(t *testing.T) {
testT = t
common.RawRequest = getblockStub
lwd, cache := testsetup()
// This argument is not used (it may be in the future)
req := &walletrpc.ChainSpec{}
blockID, err := lwd.GetLatestBlock(context.Background(), req)
if err == nil {
t.Fatal("GetLatestBlock should have failed, empty cache")
}
if err.Error() != "Cache is empty. Server is probably not yet ready" {
t.Fatal("GetLatestBlock incorrect error", err)
}
if blockID != nil {
t.Fatal("unexpected blockID", blockID)
}
// This does zcashd rpc "getblock", calls getblockStub() above
block, err := common.GetBlock(cache, 380640)
if err != nil {
t.Fatal("getBlockFromRPC failed", err)
}
if err = cache.Add(380640, block); err != nil {
t.Fatal("cache.Add failed:", err)
}
blockID, err = lwd.GetLatestBlock(context.Background(), req)
if err != nil {
t.Fatal("lwd.GetLatestBlock failed", err)
}
if blockID.Height != 380640 {
t.Fatal("unexpected blockID.height")
}
step = 0
}
// A valid address starts with "t", followed by 34 alpha characters;
// these should all be detected as invalid.
var addressTests = []string{
"", // too short
"a", // too short
"t123456789012345678901234567890123", // one byte too short
"t12345678901234567890123456789012345", // one byte too long
"t123456789012345678901234567890123*", // invalid "*"
"s1234567890123456789012345678901234", // doesn't start with "t"
" t1234567890123456789012345678901234", // extra stuff before
"t1234567890123456789012345678901234 ", // extra stuff after
"\nt1234567890123456789012345678901234", // newline before
"t1234567890123456789012345678901234\n", // newline after
}
func zcashdrpcStub(method string, params []json.RawMessage) (json.RawMessage, error) {
step++
switch method {
case "getaddresstxids":
var filter common.ZcashdRpcRequestGetaddresstxids
err := json.Unmarshal(params[0], &filter)
if err != nil {
testT.Fatal("could not unmarshal block filter")
}
if len(filter.Addresses) != 1 {
testT.Fatal("wrong number of addresses")
}
if filter.Addresses[0] != "t1234567890123456789012345678901234" {
testT.Fatal("wrong address")
}
if filter.Start != 20 {
testT.Fatal("wrong start")
}
if filter.End != 30 {
testT.Fatal("wrong end")
}
return []byte("[\"6732cf8d67aac5b82a2a0f0217a7d4aa245b2adb0b97fd2d923dfc674415e221\"]"), nil
case "getrawtransaction":
switch step {
case 2:
tx := &common.ZcashdRpcReplyGetrawtransaction{
Hex: hex.EncodeToString(rawTxData[0]),
Height: 1234567,
}
return json.Marshal(tx)
case 4:
// empty return value, should be okay
return []byte(""), errors.New("-5: test getrawtransaction error")
}
}
testT.Fatal("unexpected call to zcashdrpcStub")
return nil, nil
}
type testgettx struct {
walletrpc.CompactTxStreamer_GetTaddressTxidsServer
}
func (tg *testgettx) Context() context.Context {
return context.Background()
}
func (tg *testgettx) Send(tx *walletrpc.RawTransaction) error {
if !bytes.Equal(tx.Data, rawTxData[0]) {
testT.Fatal("mismatch transaction data")
}
if tx.Height != 1234567 {
testT.Fatal("unexpected transaction height", tx.Height)
}
return nil
}
func TestGetTaddressTxids(t *testing.T) {
testT = t
common.RawRequest = zcashdrpcStub
lwd, _ := testsetup()
addressBlockFilter := &walletrpc.TransparentAddressBlockFilter{
Range: &walletrpc.BlockRange{
Start: &walletrpc.BlockID{Height: 20},
End: &walletrpc.BlockID{Height: 30},
},
}
// Ensure that a bad address is detected
for i, addressTest := range addressTests {
addressBlockFilter.Address = addressTest
err := lwd.GetTaddressTxids(addressBlockFilter, &testgettx{})
if err == nil {
t.Fatal("GetTaddressTxids should have failed on bad address, case", i)
}
if err.Error() != "Invalid address" {
t.Fatal("GetTaddressTxids incorrect error on bad address, case", i)
}
}
// valid address
addressBlockFilter.Address = "t1234567890123456789012345678901234"
err := lwd.GetTaddressTxids(addressBlockFilter, &testgettx{})
if err != nil {
t.Fatal("GetTaddressTxids failed", err)
}
// this time GetTransaction() will return an error
err = lwd.GetTaddressTxids(addressBlockFilter, &testgettx{})
if err == nil {
t.Fatal("GetTaddressTxids succeeded")
}
step = 0
}
func TestGetTaddressTxidsNilArgs(t *testing.T) {
lwd, _ := testsetup()
{
noRange := &walletrpc.TransparentAddressBlockFilter{
Range: nil,
}
err := lwd.GetTaddressTxids(noRange, &testgettx{})
if err == nil {
t.Fatal("GetBlockRange nil range argument should fail")
}
}
{
noStart := &walletrpc.TransparentAddressBlockFilter{
Range: &walletrpc.BlockRange{
Start: nil,
End: &walletrpc.BlockID{Height: 20},
},
}
err := lwd.GetTaddressTxids(noStart, &testgettx{})
if err == nil {
t.Fatal("GetBlockRange nil range argument should fail")
}
}
{
noEnd := &walletrpc.TransparentAddressBlockFilter{
Range: &walletrpc.BlockRange{
Start: &walletrpc.BlockID{Height: 30},
End: nil,
},
}
err := lwd.GetTaddressTxids(noEnd, &testgettx{})
if err == nil {
t.Fatal("GetBlockRange nil range argument should fail")
}
}
}
func TestGetBlock(t *testing.T) {
testT = t
common.RawRequest = getblockStub
lwd, _ := testsetup()
_, err := lwd.GetBlock(context.Background(), &walletrpc.BlockID{})
if err == nil {
t.Fatal("GetBlock should have failed")
}
_, err = lwd.GetBlock(context.Background(), &walletrpc.BlockID{Height: 0})
if err == nil {
t.Fatal("GetBlock should have failed")
}
_, err = lwd.GetBlock(context.Background(), &walletrpc.BlockID{Hash: []byte{0}})
if err == nil {
t.Fatal("GetBlock should have failed")
}
if err.Error() != "GetBlock by Hash is not yet implemented" {
t.Fatal("GetBlock hash unimplemented error message failed")
}
// getblockStub() case 1: return error
block, err := lwd.GetBlock(context.Background(), &walletrpc.BlockID{Height: 380640})
if err != nil {
t.Fatal("GetBlock failed:", err)
}
if block.Height != 380640 {
t.Fatal("GetBlock returned unexpected block:", err)
}
// getblockStub() case 2: return error
block, err = lwd.GetBlock(context.Background(), &walletrpc.BlockID{Height: 380640})
if err == nil {
t.Fatal("GetBlock should have failed")
}
if block != nil {
t.Fatal("GetBlock returned unexpected non-nil block")
}
step = 0
}
type testgetbrange struct {
walletrpc.CompactTxStreamer_GetBlockRangeServer
}
func (tg *testgetbrange) Context() context.Context {
return context.Background()
}
func (tg *testgetbrange) Send(cb *walletrpc.CompactBlock) error {
return nil
}
func TestGetBlockRange(t *testing.T) {
testT = t
common.RawRequest = getblockStub
common.RawRequest = getblockStub
lwd, _ := testsetup()
blockrange := &walletrpc.BlockRange{
Start: &walletrpc.BlockID{Height: 380640},
End: &walletrpc.BlockID{Height: 380640},
}
// getblockStub() case 1 (success)
err := lwd.GetBlockRange(blockrange, &testgetbrange{})
if err != nil {
t.Fatal("GetBlockRange failed", err)
}
// getblockStub() case 2 (failure)
err = lwd.GetBlockRange(blockrange, &testgetbrange{})
if err == nil {
t.Fatal("GetBlockRange should have failed")
}
step = 0
}
func TestGetBlockRangeNilArgs(t *testing.T) {
lwd, _ := testsetup()
{
noEnd := &walletrpc.BlockRange{
Start: &walletrpc.BlockID{Height: 380640},
End: nil,
}
err := lwd.GetBlockRange(noEnd, &testgetbrange{})
if err == nil {
t.Fatal("GetBlockRange nil argument should fail")
}
}
{
noStart := &walletrpc.BlockRange{
Start: nil,
End: &walletrpc.BlockID{Height: 380640},
}
err := lwd.GetBlockRange(noStart, &testgetbrange{})
if err == nil {
t.Fatal("GetBlockRange nil argument should fail")
}
}
}
func sendrawtransactionStub(method string, params []json.RawMessage) (json.RawMessage, error) {
step++
if method != "sendrawtransaction" {
testT.Fatal("unexpected method")
}
if string(params[0]) != "\"07\"" {
testT.Fatal("unexpected tx data")
}
switch step {
case 1:
return []byte("sendtxresult"), nil
case 2:
return nil, errors.New("-17: some error")
}
testT.Fatal("unexpected call to sendrawtransactionStub")
return nil, nil
}
func TestSendTransaction(t *testing.T) {
testT = t
lwd, _ := testsetup()
common.RawRequest = sendrawtransactionStub
rawtx := walletrpc.RawTransaction{Data: []byte{7}}
sendresult, err := lwd.SendTransaction(context.Background(), &rawtx)
if err != nil {
t.Fatal("SendTransaction failed", err)
}
if sendresult.ErrorCode != 0 {
t.Fatal("SendTransaction unexpected ErrorCode return")
}
if sendresult.ErrorMessage != "sendtxresult" {
t.Fatal("SendTransaction unexpected ErrorMessage return")
}
// sendrawtransactionStub case 2 (error)
// but note that the error is send within the response
sendresult, err = lwd.SendTransaction(context.Background(), &rawtx)
if err != nil {
t.Fatal("SendTransaction failed:", err)
}
if sendresult.ErrorCode != -17 {
t.Fatal("SendTransaction unexpected ErrorCode return")
}
if sendresult.ErrorMessage != "some error" {
t.Fatal("SendTransaction unexpected ErrorMessage return")
}
step = 0
}
var sampleconf = `
testnet = 1
rpcport = 18232
rpcbind = 127.0.0.1
rpcuser = testlightwduser
rpcpassword = testlightwdpassword
`
func TestNewZRPCFromConf(t *testing.T) {
connCfg, err := connFromConf([]byte(sampleconf))
if err != nil {
t.Fatal("connFromConf failed")
}
if connCfg.Host != "127.0.0.1:18232" {
t.Fatal("connFromConf returned unexpected Host")
}
if connCfg.User != "testlightwduser" {
t.Fatal("connFromConf returned unexpected User")
}
if connCfg.Pass != "testlightwdpassword" {
t.Fatal("connFromConf returned unexpected User")
}
if !connCfg.HTTPPostMode {
t.Fatal("connFromConf returned unexpected HTTPPostMode")
}
if !connCfg.DisableTLS {
t.Fatal("connFromConf returned unexpected DisableTLS")
}
// can't pass an integer
connCfg, err = connFromConf(10)
if err == nil {
t.Fatal("connFromConf unexpected success")
}
// Can't verify returned values, but at least run it
_, err = NewZRPCFromConf([]byte(sampleconf))
if err != nil {
t.Fatal("NewZRPCFromClient failed")
}
_, err = NewZRPCFromConf(10)
if err == nil {
t.Fatal("NewZRPCFromClient unexpected success")
}
}
func TestMempoolFilter(t *testing.T) {
txidlist := []string{
"2e819d0bab5c819dc7d5f92d1bfb4127ce321daf847f6602",
"29e594c312eee49bc2c9ad37367ba58f857c4a7387ec9715",
"d4d090e60bf9141c6573f0598b84cc1f9817543e55a4d84d",
"d4714779c6dd32a72077bd79d4a70cb2153b552d7addec15",
"9839c1d4deca000656caff57c1f720f4fbd114b52239edde",
"ce5a28854a509ab309faa433542e73414fef6e903a3d52f5",
}
exclude := []string{
"98aa", // common prefix (98) but no match
"19", // no match
"29", // one match (should not appear)
"d4", // 2 matches (both should appear in result)
"ce5a28854a509ab309faa433542e73414fef6e903a3d52f5", // exact match
"ce5a28854a509ab309faa433542e73414fef6e903a3d52f500", // extra stuff ignored
}
expected := []string{
"2e819d0bab5c819dc7d5f92d1bfb4127ce321daf847f6602",
"9839c1d4deca000656caff57c1f720f4fbd114b52239edde",
"d4714779c6dd32a72077bd79d4a70cb2153b552d7addec15",
"d4d090e60bf9141c6573f0598b84cc1f9817543e55a4d84d",
}
actual := MempoolFilter(txidlist, exclude)
if len(actual) != len(expected) {
t.Fatal("mempool: wrong number of filter results")
}
for i := 0; i < len(actual); i++ {
if actual[i] != expected[i] {
t.Fatal(fmt.Sprintf("mempool: expected: %s actual: %s",
expected[i], actual[i]))
}
}
// If the exclude list is empty, return the entire mempool.
actual = MempoolFilter(txidlist, []string{})
expected = []string{
"29e594c312eee49bc2c9ad37367ba58f857c4a7387ec9715",
"2e819d0bab5c819dc7d5f92d1bfb4127ce321daf847f6602",
"9839c1d4deca000656caff57c1f720f4fbd114b52239edde",
"ce5a28854a509ab309faa433542e73414fef6e903a3d52f5",
"d4714779c6dd32a72077bd79d4a70cb2153b552d7addec15",
"d4d090e60bf9141c6573f0598b84cc1f9817543e55a4d84d",
}
if len(actual) != len(expected) {
t.Fatal("mempool: wrong number of filter results")
}
for i := 0; i < len(actual); i++ {
if actual[i] != expected[i] {
t.Fatal(fmt.Sprintf("mempool: expected: %s actual: %s",
expected[i], actual[i]))
}
}
}

View File

@ -1,31 +1,67 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package frontend package frontend
import ( import (
"net" "net"
"github.com/adityapk00/lightwalletd/common"
"github.com/btcsuite/btcd/rpcclient" "github.com/btcsuite/btcd/rpcclient"
"github.com/pkg/errors" "github.com/pkg/errors"
ini "gopkg.in/ini.v1" ini "gopkg.in/ini.v1"
) )
func NewZRPCFromConf(confPath string) (*rpcclient.Client, error) { // NewZRPCFromConf reads the zcashd configuration file.
func NewZRPCFromConf(confPath interface{}) (*rpcclient.Client, error) {
connCfg, err := connFromConf(confPath)
if err != nil {
return nil, err
}
return rpcclient.New(connCfg, nil)
}
// NewZRPCFromFlags gets zcashd rpc connection information from provided flags.
func NewZRPCFromFlags(opts *common.Options) (*rpcclient.Client, error) {
// Connect to local Zcash RPC server using HTTP POST mode.
connCfg := &rpcclient.ConnConfig{
Host: net.JoinHostPort(opts.RPCHost, opts.RPCPort),
User: opts.RPCUser,
Pass: opts.RPCPassword,
HTTPPostMode: true, // Zcash only supports HTTP POST mode
DisableTLS: true, // Zcash does not provide TLS by default
}
return rpcclient.New(connCfg, nil)
}
// If passed a string, interpret as a path, open and read; if passed
// a byte slice, interpret as the config file content (used in testing).
func connFromConf(confPath interface{}) (*rpcclient.ConnConfig, error) {
cfg, err := ini.Load(confPath) cfg, err := ini.Load(confPath)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read config file") return nil, errors.Wrap(err, "failed to read config file")
} }
rpcaddr := cfg.Section("").Key("rpcbind").String() rpcaddr := cfg.Section("").Key("rpcbind").String()
if rpcaddr == "" {
rpcaddr = "127.0.0.1"
}
rpcport := cfg.Section("").Key("rpcport").String() rpcport := cfg.Section("").Key("rpcport").String()
if rpcport == "" {
rpcport = "8232" // default mainnet
testnet, _ := cfg.Section("").Key("testnet").Int()
regtest, _ := cfg.Section("").Key("regtest").Int()
if testnet > 0 || regtest > 0 {
rpcport = "18232"
}
}
username := cfg.Section("").Key("rpcuser").String() username := cfg.Section("").Key("rpcuser").String()
password := cfg.Section("").Key("rpcpassword").String() password := cfg.Section("").Key("rpcpassword").String()
return NewZRPCFromCreds(net.JoinHostPort(rpcaddr, rpcport), username, password) // Connect to local Zcash RPC server using HTTP POST mode.
}
func NewZRPCFromCreds(addr, username, password string) (*rpcclient.Client, error) {
// Connect to local zcash RPC server using HTTP POST mode.
connCfg := &rpcclient.ConnConfig{ connCfg := &rpcclient.ConnConfig{
Host: addr, Host: net.JoinHostPort(rpcaddr, rpcport),
User: username, User: username,
Pass: password, Pass: password,
HTTPPostMode: true, // Zcash only supports HTTP POST mode HTTPPostMode: true, // Zcash only supports HTTP POST mode
@ -33,5 +69,5 @@ func NewZRPCFromCreds(addr, username, password string) (*rpcclient.Client, error
} }
// Notice the notification parameter is nil since notifications are // Notice the notification parameter is nil since notifications are
// not supported in HTTP POST mode. // not supported in HTTP POST mode.
return rpcclient.New(connCfg, nil) return connCfg, nil
} }

File diff suppressed because it is too large Load Diff

57
go.mod
View File

@ -3,44 +3,29 @@ module github.com/adityapk00/lightwalletd
go 1.12 go 1.12
require ( require (
cloud.google.com/go v0.46.3 // indirect github.com/btcsuite/btcd v0.20.1-beta
github.com/btcsuite/btcd v0.0.0-20190926002857-ba530c4abb35 github.com/golang/protobuf v1.5.2
github.com/btcsuite/goleveldb v1.0.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de // indirect
github.com/creack/pty v1.1.9 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.0.0
github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/golang/protobuf v1.3.2
github.com/google/pprof v0.0.0-20190930153522-6ce02741cba3 // indirect
github.com/gopherjs/gopherjs v0.0.0-20190915194858-d3ddacdb130f // indirect
github.com/hashicorp/golang-lru v0.5.3 // indirect
github.com/jessevdk/go-flags v1.4.0 // indirect
github.com/jstemmer/go-junit-report v0.9.1 // indirect
github.com/kkdai/bstream v1.0.0 // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/kr/pty v1.1.8 // indirect github.com/kr/text v0.2.0 // indirect
github.com/mattn/go-sqlite3 v1.11.0 github.com/pkg/errors v0.9.1
github.com/onsi/ginkgo v1.10.2 // indirect
github.com/onsi/gomega v1.7.0 // indirect
github.com/pebbe/zmq4 v1.0.0 // indirect
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v1.5.1 github.com/prometheus/client_golang v1.5.1
github.com/rogpeppe/go-internal v1.5.0 // indirect
github.com/sirupsen/logrus v1.4.2 github.com/sirupsen/logrus v1.4.2
github.com/smartystreets/assertions v1.0.1 // indirect github.com/smartystreets/assertions v1.0.1 // indirect
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect github.com/spf13/afero v1.5.1 // indirect
github.com/stretchr/objx v0.2.0 // indirect github.com/spf13/cobra v1.0.0
github.com/zcash-hackworks/lightwalletd v0.0.0-20191007195656-ac5aa8e42f09 // indirect github.com/spf13/viper v1.6.2
go.opencensus.io v0.22.1 // indirect github.com/stretchr/testify v1.6.1 // indirect
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc // indirect golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad // indirect
golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3 // indirect golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 // indirect
golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a // indirect golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 // indirect
golang.org/x/lint v0.0.0-20190930215403-16217165b5de // indirect golang.org/x/text v0.3.6 // indirect
golang.org/x/mobile v0.0.0-20191002175909-6d0d39b2ca82 // indirect google.golang.org/genproto v0.0.0-20210406143921-e86de6bf7a46 // indirect
golang.org/x/net v0.0.0-20191007182048-72f939374954 google.golang.org/grpc v1.37.0
golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 // indirect google.golang.org/protobuf v1.26.0
golang.org/x/tools v0.0.0-20191007185444-6536af71d98a // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
google.golang.org/api v0.10.0 // indirect gopkg.in/ini.v1 v1.51.0
google.golang.org/appengine v1.6.5 // indirect gopkg.in/yaml.v3 v3.0.0-20210105161348-2e78108cf5f8 // indirect
google.golang.org/genproto v0.0.0-20191007204434-a023cd5227bd // indirect
google.golang.org/grpc v1.24.0
gopkg.in/ini.v1 v1.48.0
) )

495
go.sum
View File

@ -1,398 +1,433 @@
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d h1:xG8Pj6Y6J760xwETNmMzmlt38QSwz0BLp1cZ09g27uw= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3 h1:A/EVblehb75cUgXA5njHPn0kLAsykn6mJGz7rnmW5W0=
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
github.com/btcsuite/btcd v0.0.0-20190926002857-ba530c4abb35 h1:o2mPiVrkVzpBg/Q+lSfuf/92pEgsSIJvsQ13DyHs/3A=
github.com/btcsuite/btcd v0.0.0-20190926002857-ba530c4abb35/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a h1:RQMUrEILyYJEoAT34XS/kLu40vC0+po/UfxrBBA4qZE=
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd h1:qdGvebPBDuYDPGi1WCPjy1tGyMpmDK8IEapSsszn7HE=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723 h1:ZA/jbKoGcVAnER6pCHPEkGdZOV7U1oLUedErBHCUMs0=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190908185732-236ed259b199/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190930153522-6ce02741cba3/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gopherjs/gopherjs v0.0.0-20190915194858-d3ddacdb130f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89 h1:12K8AlpT0/6QUXSfV0yi4Q0jkbq8NDtIKFtF61AoqV0=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0 h1:ZqfnKyx9KGpRcW04j5nnPDgRgoXUeLh2YFBeFzphcA0=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pebbe/zmq4 v1.0.0/go.mod h1:7N4y5R18zBiu3l0vajMUWQgZyjv464prE8RCyBcmnZM= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/rogpeppe/go-internal v1.3.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af h1:gu+uRPtBe88sKxUCEXRoeCvVG90TJmwhiqRpvdhQFng=
github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.5.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY=
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.5.1 h1:VHu76Lk0LSP1x254maIu2bplkWpfBWI+B+6fdoZprcg=
github.com/spf13/afero v1.5.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.6 h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs=
github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E=
github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/zcash-hackworks/lightwalletd v0.0.0-20190906012100-a91acdbdecfb h1:0vTMQHSoogL5ZI3Z3MUjpeEMx5RzHzUHScTAA3KiSks= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/zcash-hackworks/lightwalletd v0.0.0-20190906012100-a91acdbdecfb/go.mod h1:2114E67+iUQVth+Gi4K2pxJw9KWyIBODRYoDn5mFxlY= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/zcash-hackworks/lightwalletd v0.0.0-20190925163758-a4578219497f/go.mod h1:hSyp0zSIqYe3SdSRkKchLeccOW7vgOgZy1/Igldmr8o= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/zcash-hackworks/lightwalletd v0.0.0-20191007195656-ac5aa8e42f09/go.mod h1:hSyp0zSIqYe3SdSRkKchLeccOW7vgOgZy1/Igldmr8o= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5hzyggAkLLlCUjqfRxd8Q4=
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20190912063710-ac5d2bfcbfe0/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20190919035709-81c71964d733/go.mod h1:lopKMxgphN5jWNwrkPRQU99WV/Hs5LrdgRBxZ5ELgOQ=
golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20190902063713-cb417be4ba39/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mobile v0.0.0-20190830201351-c6da95954960/go.mod h1:mJOp/i0LXPxJZ9weeIadcPqKVfS05Ai7m6/t9z1Hs/Y=
golang.org/x/mobile v0.0.0-20190910184405-b558ed863381/go.mod h1:p895TfNkDgPEmEQrNiOtIl3j98d/tGU95djDj7NfyjQ=
golang.org/x/mobile v0.0.0-20190923204409-d3ece3b6da5f/go.mod h1:p895TfNkDgPEmEQrNiOtIl3j98d/tGU95djDj7NfyjQ=
golang.org/x/mobile v0.0.0-20191002175909-6d0d39b2ca82/go.mod h1:p895TfNkDgPEmEQrNiOtIl3j98d/tGU95djDj7NfyjQ=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3 h1:eH6Eip3UpmR+yM/qI9Ijluzb1bNv/cAU/n+6l8tRSis=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2 h1:4dVFTC832rPn4pomLSz1vA+are2+dU19w1H8OngV7nc=
golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191007182048-72f939374954 h1:JGZucVF/L/TotR719NbujzadOZ2AgnYlqphQGHDCKaU=
golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b h1:3S2h5FadpNr0zUUCVZjlKIEYF+KaX/OBplTGo89CYHI=
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8 h1:41hwlulw1prEMBxLQSlMSux1zxJf07B3WPsdjJlKZxE=
golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 h1:F5Gozwx4I1xtr/sr/8CFbb57iKi3297KFs0QDbGN60A=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.0.0-20190905173453-6b3d1c9ba8bf/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190909214602-067311248421/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190912152909-b0a6c2aa3ffa/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190925164712-ae58c0ff6b32/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191007185444-6536af71d98a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.3/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20210406143921-e86de6bf7a46 h1:f4STrQZf8jaowsiUitigvrqMCCM4QJH1A2JCSI7U1ow=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U= google.golang.org/genproto v0.0.0-20210406143921-e86de6bf7a46/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20190916214212-f660b8655731 h1:Phvl0+G5t5k/EUFUi0wPdUUeTL2HydMQUXHnunWgSb0=
google.golang.org/genproto v0.0.0-20190916214212-f660b8655731/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191007204434-a023cd5227bd h1:84VQPzup3IpKLxuIAZjHMhVjJ8fZ4/i3yUnj3k6fUdw=
google.golang.org/genproto v0.0.0-20191007204434-a023cd5227bd/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.41.0 h1:Ka3ViY6gNYSKiVy71zXBEqKplnV35ImDLVG+8uoIklE= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.46.0 h1:VeDZbLYGaupuvIrsYCEOe/L/2Pcs5n7hdO1ZTjporag= gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=
gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/ini.v1 v1.48.0 h1:URjZc+8ugRY5mL5uUeQH/a63JcHwdX9xZaWvmNWD7z8= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/ini.v1 v1.48.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210105161348-2e78108cf5f8 h1:tH9C0MON9YI3/KuD+u5+tQrQQ8px0MrcJ/avzeALw7o=
gopkg.in/yaml.v3 v3.0.0-20210105161348-2e78108cf5f8/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=

7
lightwalletd-example.yml Normal file
View File

@ -0,0 +1,7 @@
bind-addr: 0.0.0.0:9067
cache-size: 10
log-file: /dev/stdout
log-level: 10
tls-cert: /secrets/lightwallted/cert.pem
tls-key: /secrets/lightwallted/cert.key
zcash-conf-path: /srv/zcashd/zcash.conf

7
main.go Normal file
View File

@ -0,0 +1,7 @@
package main
import "github.com/adityapk00/lightwalletd/cmd"
func main() {
cmd.Execute()
}

View File

@ -1,31 +1,42 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
// Package parser deserializes blocks from zcashd.
package parser package parser
import ( import (
"fmt" "fmt"
"github.com/pkg/errors"
"github.com/adityapk00/lightwalletd/parser/internal/bytestring" "github.com/adityapk00/lightwalletd/parser/internal/bytestring"
"github.com/adityapk00/lightwalletd/walletrpc" "github.com/adityapk00/lightwalletd/walletrpc"
"github.com/pkg/errors"
) )
// Block represents a full block (not a compact block).
type Block struct { type Block struct {
hdr *blockHeader hdr *BlockHeader
vtx []*Transaction vtx []*Transaction
height int height int
} }
// NewBlock constructs a block instance.
func NewBlock() *Block { func NewBlock() *Block {
return &Block{height: -1} return &Block{height: -1}
} }
// GetVersion returns a block's version number (current 4)
func (b *Block) GetVersion() int { func (b *Block) GetVersion() int {
return int(b.hdr.Version) return int(b.hdr.Version)
} }
// GetTxCount returns the number of transactions in the block,
// including the coinbase transaction (minimum 1).
func (b *Block) GetTxCount() int { func (b *Block) GetTxCount() int {
return len(b.vtx) return len(b.vtx)
} }
// Transactions returns the list of the block's transactions.
func (b *Block) Transactions() []*Transaction { func (b *Block) Transactions() []*Transaction {
// TODO: these should NOT be mutable // TODO: these should NOT be mutable
return b.vtx return b.vtx
@ -43,20 +54,15 @@ func (b *Block) GetEncodableHash() []byte {
return b.hdr.GetEncodableHash() return b.hdr.GetEncodableHash()
} }
// GetDisplayPrevHash returns the block's previous hash in big-endian format.
func (b *Block) GetDisplayPrevHash() []byte { func (b *Block) GetDisplayPrevHash() []byte {
rhash := make([]byte, len(b.hdr.HashPrevBlock)) return b.hdr.GetDisplayPrevHash()
copy(rhash, b.hdr.HashPrevBlock)
// Reverse byte order
for i := 0; i < len(rhash)/2; i++ {
j := len(rhash) - 1 - i
rhash[i], rhash[j] = rhash[j], rhash[i]
}
return rhash
} }
// HasSaplingTransactions indicates if the block contains any Sapling tx.
func (b *Block) HasSaplingTransactions() bool { func (b *Block) HasSaplingTransactions() bool {
for _, tx := range b.vtx { for _, tx := range b.vtx {
if tx.HasSaplingTransactions() { if tx.HasSaplingElements() {
return true return true
} }
} }
@ -66,7 +72,7 @@ func (b *Block) HasSaplingTransactions() bool {
// see https://github.com/adityapk00/lightwalletd/issues/17#issuecomment-467110828 // see https://github.com/adityapk00/lightwalletd/issues/17#issuecomment-467110828
const genesisTargetDifficulty = 520617983 const genesisTargetDifficulty = 520617983
// GetHeight() extracts the block height from the coinbase transaction. See // GetHeight extracts the block height from the coinbase transaction. See
// BIP34. Returns block height on success, or -1 on error. // BIP34. Returns block height on success, or -1 on error.
func (b *Block) GetHeight() int { func (b *Block) GetHeight() int {
if b.height != -1 { if b.height != -1 {
@ -74,7 +80,7 @@ func (b *Block) GetHeight() int {
} }
coinbaseScript := bytestring.String(b.vtx[0].transparentInputs[0].ScriptSig) coinbaseScript := bytestring.String(b.vtx[0].transparentInputs[0].ScriptSig)
var heightNum int64 var heightNum int64
if ok := coinbaseScript.ReadScriptInt64(&heightNum); !ok { if !coinbaseScript.ReadScriptInt64(&heightNum) {
return -1 return -1
} }
if heightNum < 0 { if heightNum < 0 {
@ -94,23 +100,25 @@ func (b *Block) GetHeight() int {
return int(blockHeight) return int(blockHeight)
} }
// GetPrevHash returns the hash of the block's previous block (little-endian).
func (b *Block) GetPrevHash() []byte { func (b *Block) GetPrevHash() []byte {
return b.hdr.HashPrevBlock return b.hdr.HashPrevBlock
} }
// ToCompact returns the compact representation of the full block.
func (b *Block) ToCompact() *walletrpc.CompactBlock { func (b *Block) ToCompact() *walletrpc.CompactBlock {
compactBlock := &walletrpc.CompactBlock{ compactBlock := &walletrpc.CompactBlock{
//TODO ProtoVersion: 1, //TODO ProtoVersion: 1,
Height: uint64(b.GetHeight()), Height: uint64(b.GetHeight()),
PrevHash: b.hdr.HashPrevBlock, PrevHash: b.hdr.HashPrevBlock,
Hash: b.GetEncodableHash(), Hash: b.GetEncodableHash(),
Time: b.hdr.Time, Time: b.hdr.Time,
} }
// Only Sapling transactions have a meaningful compact encoding // Only Sapling transactions have a meaningful compact encoding
saplingTxns := make([]*walletrpc.CompactTx, 0, len(b.vtx)) saplingTxns := make([]*walletrpc.CompactTx, 0, len(b.vtx))
for idx, tx := range b.vtx { for idx, tx := range b.vtx {
if tx.HasSaplingTransactions() { if tx.HasSaplingElements() {
saplingTxns = append(saplingTxns, tx.ToCompact(idx)) saplingTxns = append(saplingTxns, tx.ToCompact(idx))
} }
} }
@ -118,6 +126,9 @@ func (b *Block) ToCompact() *walletrpc.CompactBlock {
return compactBlock return compactBlock
} }
// ParseFromSlice deserializes a block from the given data stream
// and returns a slice to the remaining data. The caller should verify
// there is no remaining data if none is expected.
func (b *Block) ParseFromSlice(data []byte) (rest []byte, err error) { func (b *Block) ParseFromSlice(data []byte) (rest []byte, err error) {
hdr := NewBlockHeader() hdr := NewBlockHeader()
data, err = hdr.ParseFromSlice(data) data, err = hdr.ParseFromSlice(data)
@ -127,13 +138,14 @@ func (b *Block) ParseFromSlice(data []byte) (rest []byte, err error) {
s := bytestring.String(data) s := bytestring.String(data)
var txCount int var txCount int
if ok := s.ReadCompactSize(&txCount); !ok { if !s.ReadCompactSize(&txCount) {
return nil, errors.New("could not read tx_count") return nil, errors.New("could not read tx_count")
} }
data = []byte(s) data = []byte(s)
vtx := make([]*Transaction, 0, txCount) vtx := make([]*Transaction, 0, txCount)
for i := 0; len(data) > 0; i++ { var i int
for i = 0; i < txCount && len(data) > 0; i++ {
tx := NewTransaction() tx := NewTransaction()
data, err = tx.ParseFromSlice(data) data, err = tx.ParseFromSlice(data)
if err != nil { if err != nil {
@ -141,9 +153,10 @@ func (b *Block) ParseFromSlice(data []byte) (rest []byte, err error) {
} }
vtx = append(vtx, tx) vtx = append(vtx, tx)
} }
if i < txCount {
return nil, errors.New("parsing block transactions: not enough data")
}
b.hdr = hdr b.hdr = hdr
b.vtx = vtx b.vtx = vtx
return data, nil return data, nil
} }

View File

@ -1,14 +1,18 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
// Package parser deserializes the block header from zcashd.
package parser package parser
import ( import (
"bytes" "bytes"
"crypto/sha256" "crypto/sha256"
"encoding/binary" "encoding/binary"
"log"
"math/big" "math/big"
"github.com/pkg/errors"
"github.com/adityapk00/lightwalletd/parser/internal/bytestring" "github.com/adityapk00/lightwalletd/parser/internal/bytestring"
"github.com/pkg/errors"
) )
const ( const (
@ -16,8 +20,9 @@ const (
equihashSizeMainnet = 1344 // size of a mainnet / testnet Equihash solution in bytes equihashSizeMainnet = 1344 // size of a mainnet / testnet Equihash solution in bytes
) )
// A block header as defined in version 2018.0-beta-29 of the Zcash Protocol Spec. // RawBlockHeader implements the block header as defined in version
type rawBlockHeader struct { // 2018.0-beta-29 of the Zcash Protocol Spec.
type RawBlockHeader struct {
// The block version number indicates which set of block validation rules // The block version number indicates which set of block validation rules
// to follow. The current and only defined block version number for Zcash // to follow. The current and only defined block version number for Zcash
// is 4. // is 4.
@ -56,14 +61,15 @@ type rawBlockHeader struct {
Solution []byte Solution []byte
} }
type blockHeader struct { // BlockHeader extends RawBlockHeader by adding a cache for the block hash.
*rawBlockHeader type BlockHeader struct {
cachedHash []byte *RawBlockHeader
targetThreshold *big.Int cachedHash []byte
} }
func CompactLengthPrefixedLen(val []byte) int { // CompactLengthPrefixedLen calculates the total number of bytes needed to
length := len(val) // encode 'length' bytes.
func CompactLengthPrefixedLen(length int) int {
if length < 253 { if length < 253 {
return 1 + length return 1 + length
} else if length <= 0xffff { } else if length <= 0xffff {
@ -75,33 +81,34 @@ func CompactLengthPrefixedLen(val []byte) int {
} }
} }
func WriteCompactLengthPrefixed(buf *bytes.Buffer, val []byte) error { // WriteCompactLengthPrefixedLen writes the given length to the stream.
length := len(val) func WriteCompactLengthPrefixedLen(buf *bytes.Buffer, length int) {
if length < 253 { if length < 253 {
binary.Write(buf, binary.LittleEndian, uint8(length)) binary.Write(buf, binary.LittleEndian, uint8(length))
binary.Write(buf, binary.LittleEndian, val)
} else if length <= 0xffff { } else if length <= 0xffff {
binary.Write(buf, binary.LittleEndian, byte(253)) binary.Write(buf, binary.LittleEndian, byte(253))
binary.Write(buf, binary.LittleEndian, uint16(length)) binary.Write(buf, binary.LittleEndian, uint16(length))
binary.Write(buf, binary.LittleEndian, val)
} else if length <= 0xffffffff { } else if length <= 0xffffffff {
binary.Write(buf, binary.LittleEndian, byte(254)) binary.Write(buf, binary.LittleEndian, byte(254))
binary.Write(buf, binary.LittleEndian, uint32(length)) binary.Write(buf, binary.LittleEndian, uint32(length))
binary.Write(buf, binary.LittleEndian, val)
} else { } else {
binary.Write(buf, binary.LittleEndian, byte(255)) binary.Write(buf, binary.LittleEndian, byte(255))
binary.Write(buf, binary.LittleEndian, uint64(length)) binary.Write(buf, binary.LittleEndian, uint64(length))
binary.Write(buf, binary.LittleEndian, val)
} }
return nil
} }
func (hdr *rawBlockHeader) GetSize() int { func writeCompactLengthPrefixed(buf *bytes.Buffer, val []byte) {
return serBlockHeaderMinusEquihashSize + CompactLengthPrefixedLen(hdr.Solution) WriteCompactLengthPrefixedLen(buf, len(val))
binary.Write(buf, binary.LittleEndian, val)
} }
func (hdr *rawBlockHeader) MarshalBinary() ([]byte, error) { func (hdr *RawBlockHeader) getSize() int {
headerSize := hdr.GetSize() return serBlockHeaderMinusEquihashSize + CompactLengthPrefixedLen(len(hdr.Solution))
}
// MarshalBinary returns the block header in serialized form
func (hdr *RawBlockHeader) MarshalBinary() ([]byte, error) {
headerSize := hdr.getSize()
backing := make([]byte, 0, headerSize) backing := make([]byte, 0, headerSize)
buf := bytes.NewBuffer(backing) buf := bytes.NewBuffer(backing)
binary.Write(buf, binary.LittleEndian, hdr.Version) binary.Write(buf, binary.LittleEndian, hdr.Version)
@ -111,53 +118,54 @@ func (hdr *rawBlockHeader) MarshalBinary() ([]byte, error) {
binary.Write(buf, binary.LittleEndian, hdr.Time) binary.Write(buf, binary.LittleEndian, hdr.Time)
binary.Write(buf, binary.LittleEndian, hdr.NBitsBytes) binary.Write(buf, binary.LittleEndian, hdr.NBitsBytes)
binary.Write(buf, binary.LittleEndian, hdr.Nonce) binary.Write(buf, binary.LittleEndian, hdr.Nonce)
WriteCompactLengthPrefixed(buf, hdr.Solution) writeCompactLengthPrefixed(buf, hdr.Solution)
return backing[:headerSize], nil return backing[:headerSize], nil
} }
func NewBlockHeader() *blockHeader { // NewBlockHeader return a pointer to a new block header instance.
return &blockHeader{ func NewBlockHeader() *BlockHeader {
rawBlockHeader: new(rawBlockHeader), return &BlockHeader{
RawBlockHeader: new(RawBlockHeader),
} }
} }
// ParseFromSlice parses the block header struct from the provided byte slice, // ParseFromSlice parses the block header struct from the provided byte slice,
// advancing over the bytes read. If successful it returns the rest of the // advancing over the bytes read. If successful it returns the rest of the
// slice, otherwise it returns the input slice unaltered along with an error. // slice, otherwise it returns the input slice unaltered along with an error.
func (hdr *blockHeader) ParseFromSlice(in []byte) (rest []byte, err error) { func (hdr *BlockHeader) ParseFromSlice(in []byte) (rest []byte, err error) {
s := bytestring.String(in) s := bytestring.String(in)
// Primary parsing layer: sort the bytes into things // Primary parsing layer: sort the bytes into things
if ok := s.ReadInt32(&hdr.Version); !ok { if !s.ReadInt32(&hdr.Version) {
return in, errors.New("could not read header version") return in, errors.New("could not read header version")
} }
if ok := s.ReadBytes(&hdr.HashPrevBlock, 32); !ok { if !s.ReadBytes(&hdr.HashPrevBlock, 32) {
return in, errors.New("could not read HashPrevBlock") return in, errors.New("could not read HashPrevBlock")
} }
if ok := s.ReadBytes(&hdr.HashMerkleRoot, 32); !ok { if !s.ReadBytes(&hdr.HashMerkleRoot, 32) {
return in, errors.New("could not read HashMerkleRoot") return in, errors.New("could not read HashMerkleRoot")
} }
if ok := s.ReadBytes(&hdr.HashFinalSaplingRoot, 32); !ok { if !s.ReadBytes(&hdr.HashFinalSaplingRoot, 32) {
return in, errors.New("could not read HashFinalSaplingRoot") return in, errors.New("could not read HashFinalSaplingRoot")
} }
if ok := s.ReadUint32(&hdr.Time); !ok { if !s.ReadUint32(&hdr.Time) {
return in, errors.New("could not read timestamp") return in, errors.New("could not read timestamp")
} }
if ok := s.ReadBytes(&hdr.NBitsBytes, 4); !ok { if !s.ReadBytes(&hdr.NBitsBytes, 4) {
return in, errors.New("could not read NBits bytes") return in, errors.New("could not read NBits bytes")
} }
if ok := s.ReadBytes(&hdr.Nonce, 32); !ok { if !s.ReadBytes(&hdr.Nonce, 32) {
return in, errors.New("could not read Nonce bytes") return in, errors.New("could not read Nonce bytes")
} }
if ok := s.ReadCompactLengthPrefixed((*bytestring.String)(&hdr.Solution)); !ok { if !s.ReadCompactLengthPrefixed((*bytestring.String)(&hdr.Solution)) {
return in, errors.New("could not read CompactSize-prefixed Equihash solution") return in, errors.New("could not read CompactSize-prefixed Equihash solution")
} }
@ -187,14 +195,13 @@ func parseNBits(b []byte) *big.Int {
} }
// GetDisplayHash returns the bytes of a block hash in big-endian order. // GetDisplayHash returns the bytes of a block hash in big-endian order.
func (hdr *blockHeader) GetDisplayHash() []byte { func (hdr *BlockHeader) GetDisplayHash() []byte {
if hdr.cachedHash != nil { if hdr.cachedHash != nil {
return hdr.cachedHash return hdr.cachedHash
} }
serializedHeader, err := hdr.MarshalBinary() serializedHeader, err := hdr.MarshalBinary()
if err != nil { if err != nil {
log.Fatalf("error marshaling block header: %v", err)
return nil return nil
} }
@ -202,22 +209,16 @@ func (hdr *blockHeader) GetDisplayHash() []byte {
digest := sha256.Sum256(serializedHeader) digest := sha256.Sum256(serializedHeader)
digest = sha256.Sum256(digest[:]) digest = sha256.Sum256(digest[:])
// Reverse byte order // Convert to big-endian
for i := 0; i < len(digest)/2; i++ { hdr.cachedHash = Reverse(digest[:])
j := len(digest) - 1 - i
digest[i], digest[j] = digest[j], digest[i]
}
hdr.cachedHash = digest[:]
return hdr.cachedHash return hdr.cachedHash
} }
// GetEncodableHash returns the bytes of a block hash in little-endian wire order. // GetEncodableHash returns the bytes of a block hash in little-endian wire order.
func (hdr *blockHeader) GetEncodableHash() []byte { func (hdr *BlockHeader) GetEncodableHash() []byte {
serializedHeader, err := hdr.MarshalBinary() serializedHeader, err := hdr.MarshalBinary()
if err != nil { if err != nil {
log.Fatalf("error marshaling block header: %v", err)
return nil return nil
} }
@ -227,3 +228,8 @@ func (hdr *blockHeader) GetEncodableHash() []byte {
return digest[:] return digest[:]
} }
// GetDisplayPrevHash returns the block hash in big-endian order.
func (hdr *BlockHeader) GetDisplayPrevHash() []byte {
return Reverse(hdr.HashPrevBlock)
}

View File

@ -1,3 +1,6 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package parser package parser
import ( import (
@ -64,6 +67,7 @@ func TestBlockHeader(t *testing.T) {
lastBlockTime := uint32(0) lastBlockTime := uint32(0)
scan := bufio.NewScanner(testBlocks) scan := bufio.NewScanner(testBlocks)
var prevHash []byte
for scan.Scan() { for scan.Scan() {
blockDataHex := scan.Text() blockDataHex := scan.Text()
blockData, err := hex.DecodeString(blockDataHex) blockData, err := hex.DecodeString(blockDataHex)
@ -124,6 +128,10 @@ func TestBlockHeader(t *testing.T) {
} }
hash := blockHeader.GetDisplayHash() hash := blockHeader.GetDisplayHash()
// test caching
if !bytes.Equal(hash, blockHeader.GetDisplayHash()) {
t.Error("caching is broken")
}
// This is not necessarily true for anything but our current test cases. // This is not necessarily true for anything but our current test cases.
for _, b := range hash[:4] { for _, b := range hash[:4] {
@ -131,5 +139,121 @@ func TestBlockHeader(t *testing.T) {
t.Errorf("Hash lacked leading zeros: %x", hash) t.Errorf("Hash lacked leading zeros: %x", hash)
} }
} }
if prevHash != nil && !bytes.Equal(blockHeader.GetDisplayPrevHash(), prevHash) {
t.Errorf("Previous hash mismatch")
}
prevHash = hash
}
}
func TestBadBlockHeader(t *testing.T) {
testBlocks, err := os.Open("../testdata/badblocks")
if err != nil {
t.Fatal(err)
}
defer testBlocks.Close()
scan := bufio.NewScanner(testBlocks)
// the first "block" contains an illegal hex character
{
scan.Scan()
blockDataHex := scan.Text()
_, err := hex.DecodeString(blockDataHex)
if err == nil {
t.Error("unexpected success parsing illegal hex bad block")
}
}
// these bad blocks are short in various ways
for i := 1; scan.Scan(); i++ {
blockDataHex := scan.Text()
blockData, err := hex.DecodeString(blockDataHex)
if err != nil {
t.Error(err)
continue
}
blockHeader := NewBlockHeader()
_, err = blockHeader.ParseFromSlice(blockData)
if err == nil {
t.Errorf("unexpected success parsing bad block %d", i)
}
}
}
var compactLengthPrefixedLenTests = []struct {
length int
returnLength int
}{
/* 00 */ {0, 1},
/* 01 */ {1, 1 + 1},
/* 02 */ {2, 1 + 2},
/* 03 */ {252, 1 + 252},
/* 04 */ {253, 1 + 2 + 253},
/* 05 */ {0xffff, 1 + 2 + 0xffff},
/* 06 */ {0x10000, 1 + 4 + 0x10000},
/* 07 */ {0x10001, 1 + 4 + 0x10001},
/* 08 */ {0xffffffff, 1 + 4 + 0xffffffff},
/* 09 */ {0x100000000, 1 + 8 + 0x100000000},
/* 10 */ {0x100000001, 1 + 8 + 0x100000001},
}
func TestCompactLengthPrefixedLen(t *testing.T) {
for i, tt := range compactLengthPrefixedLenTests {
returnLength := CompactLengthPrefixedLen(tt.length)
if returnLength != tt.returnLength {
t.Errorf("TestCompactLengthPrefixedLen case %d: want: %v have %v",
i, tt.returnLength, returnLength)
}
}
}
var writeCompactLengthPrefixedTests = []struct {
argLen int
returnLength int
header []byte
}{
/* 00 */ {0, 1, []byte{0}},
/* 01 */ {1, 1, []byte{1}},
/* 02 */ {2, 1, []byte{2}},
/* 03 */ {252, 1, []byte{252}},
/* 04 */ {253, 1 + 2, []byte{253, 253, 0}},
/* 05 */ {254, 1 + 2, []byte{253, 254, 0}},
/* 06 */ {0xffff, 1 + 2, []byte{253, 0xff, 0xff}},
/* 07 */ {0x10000, 1 + 4, []byte{254, 0x00, 0x00, 0x01, 0x00}},
/* 08 */ {0x10003, 1 + 4, []byte{254, 0x03, 0x00, 0x01, 0x00}},
/* 09 */ {0xffffffff, 1 + 4, []byte{254, 0xff, 0xff, 0xff, 0xff}},
/* 10 */ {0x100000000, 1 + 8, []byte{255, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00}},
/* 11 */ {0x100000007, 1 + 8, []byte{255, 0x07, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00}},
}
func TestWriteCompactLengthPrefixedLen(t *testing.T) {
for i, tt := range writeCompactLengthPrefixedTests {
var b bytes.Buffer
WriteCompactLengthPrefixedLen(&b, tt.argLen)
if b.Len() != tt.returnLength {
t.Fatalf("TestWriteCompactLengthPrefixed case %d: unexpected length", i)
}
// check the header (tag and length)
r := make([]byte, len(tt.header))
b.Read(r)
if !bytes.Equal(r, tt.header) {
t.Fatalf("TestWriteCompactLengthPrefixed case %d: incorrect header", i)
}
if b.Len() > 0 {
t.Fatalf("TestWriteCompactLengthPrefixed case %d: unexpected data remaining", i)
}
}
}
func TestWriteCompactLengthPrefixed(t *testing.T) {
var b bytes.Buffer
val := []byte{22, 33, 44}
writeCompactLengthPrefixed(&b, val)
r := make([]byte, 4)
b.Read(r)
expected := []byte{3, 22, 33, 44}
if !bytes.Equal(r, expected) {
t.Fatal("TestWriteCompactLengthPrefixed incorrect result")
} }
} }

View File

@ -1,7 +1,11 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package parser package parser
import ( import (
"bufio" "bufio"
"bytes"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
@ -15,6 +19,21 @@ import (
) )
func TestBlockParser(t *testing.T) { func TestBlockParser(t *testing.T) {
// These (valid on testnet) correspond to the transactions in testdata/blocks;
// for each block, the hashes for the tx within that block.
var txhashes = [][]string{
{
"81096ff101a4f01d25ffd34a446bee4368bd46c233a59ac0faf101e1861c6b22",
}, {
"921dc41bef3a0d887c615abac60a29979efc8b4bbd3d887caeb6bb93501bde8e",
}, {
"d8e4c336ffa69dacaa4e0b4eaf8e3ae46897f1930a573c10b53837a03318c980",
"4d5ccbfc6984680c481ff5ce145b8a93d59dfea90c150dfa45c938ab076ee5b2",
}, {
"df2b03619d441ce3d347e9278d87618e975079d0e235dfb3b3d8271510f707aa",
"8d2593edfc328fa637b4ac91c7d569ee922bb9a6fda7cea230e92deb3ae4b634",
},
}
testBlocks, err := os.Open("../testdata/blocks") testBlocks, err := os.Open("../testdata/blocks")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -22,6 +41,122 @@ func TestBlockParser(t *testing.T) {
defer testBlocks.Close() defer testBlocks.Close()
scan := bufio.NewScanner(testBlocks) scan := bufio.NewScanner(testBlocks)
for blockindex := 0; scan.Scan(); blockindex++ {
blockDataHex := scan.Text()
blockData, err := hex.DecodeString(blockDataHex)
if err != nil {
t.Error(err)
continue
}
// This is just a sanity check of the test:
if int(blockData[1487]) != len(txhashes[blockindex]) {
t.Error("wrong number of transactions, test broken?")
}
// Make a copy of just the transactions alone, which,
// for these blocks, start just beyond the header and
// the one-byte nTx value, which is offset 1488.
transactions := make([]byte, len(blockData[1488:]))
copy(transactions, blockData[1488:])
// Each iteration of this loop appends the block's original
// transactions, so we build an ever-larger block. The loop
// limit is arbitrary, but make sure we get into double-digit
// transaction counts (compact integer).
for i := 0; i < 264; i++ {
b := blockData
block := NewBlock()
b, err = block.ParseFromSlice(b)
if err != nil {
t.Error(errors.Wrap(err, fmt.Sprintf("parsing block %d", i)))
continue
}
if len(b) > 0 {
t.Error("Extra data remaining")
}
// Some basic sanity checks
if block.hdr.Version != 4 {
t.Error("Read wrong version in a test block.")
break
}
if block.GetVersion() != 4 {
t.Error("Read wrong version in a test block.")
break
}
if block.GetTxCount() < 1 {
t.Error("No transactions in block")
break
}
if len(block.Transactions()) != block.GetTxCount() {
t.Error("Number of transactions mismatch")
break
}
if block.GetTxCount() != len(txhashes[blockindex])*(i+1) {
t.Error("Unexpected number of transactions")
}
if block.HasSaplingTransactions() {
t.Error("Unexpected Sapling tx")
break
}
for txindex, tx := range block.Transactions() {
if tx.HasSaplingElements() {
t.Error("Unexpected Sapling tx")
break
}
expectedHash := txhashes[blockindex][txindex%len(txhashes[blockindex])]
if hex.EncodeToString(tx.GetDisplayHash()) != expectedHash {
t.Error("incorrect tx hash")
}
}
// Keep appending the original transactions, which is unrealistic
// because the coinbase is being replicated, but it works; first do
// some surgery to the transaction count (see DarksideApplyStaged()).
for j := 0; j < len(txhashes[blockindex]); j++ {
nTxFirstByte := blockData[1487]
switch {
case nTxFirstByte < 252:
blockData[1487]++
case nTxFirstByte == 252:
// incrementing to 253, requires "253" followed by 2-byte length,
// extend the block by two bytes, shift existing transaction bytes
blockData = append(blockData, 0, 0)
copy(blockData[1490:], blockData[1488:len(blockData)-2])
blockData[1487] = 253
blockData[1488] = 253
blockData[1489] = 0
case nTxFirstByte == 253:
blockData[1488]++
if blockData[1488] == 0 {
// wrapped around
blockData[1489]++
}
}
}
blockData = append(blockData, transactions...)
}
}
}
func TestBlockParserFail(t *testing.T) {
testBlocks, err := os.Open("../testdata/badblocks")
if err != nil {
t.Fatal(err)
}
defer testBlocks.Close()
scan := bufio.NewScanner(testBlocks)
// the first "block" contains an illegal hex character
{
scan.Scan()
blockDataHex := scan.Text()
_, err := hex.DecodeString(blockDataHex)
if err == nil {
t.Error("unexpected success parsing illegal hex bad block")
}
}
for i := 0; scan.Scan(); i++ { for i := 0; scan.Scan(); i++ {
blockDataHex := scan.Text() blockDataHex := scan.Text()
blockData, err := hex.DecodeString(blockDataHex) blockData, err := hex.DecodeString(blockDataHex)
@ -32,20 +167,8 @@ func TestBlockParser(t *testing.T) {
block := NewBlock() block := NewBlock()
blockData, err = block.ParseFromSlice(blockData) blockData, err = block.ParseFromSlice(blockData)
if err != nil { if err == nil {
t.Error(errors.Wrap(err, fmt.Sprintf("parsing block %d", i))) t.Error("unexpected success parsing bad block")
continue
}
// Some basic sanity checks
if block.hdr.Version != 4 {
t.Error("Read wrong version in a test block.")
break
}
if block.GetTxCount() < 1 {
t.Error("No transactions in block")
break
} }
} }
} }
@ -73,6 +196,9 @@ func TestGenesisBlockParser(t *testing.T) {
t.Error(err) t.Error(err)
continue continue
} }
if len(blockData) > 0 {
t.Error("Extra data remaining")
}
// Some basic sanity checks // Some basic sanity checks
if block.hdr.Version != 4 { if block.hdr.Version != 4 {
@ -90,6 +216,7 @@ func TestCompactBlocks(t *testing.T) {
type compactTest struct { type compactTest struct {
BlockHeight int `json:"block"` BlockHeight int `json:"block"`
BlockHash string `json:"hash"` BlockHash string `json:"hash"`
PrevHash string `json:"prev"`
Full string `json:"full"` Full string `json:"full"`
Compact string `json:"compact"` Compact string `json:"compact"`
} }
@ -113,6 +240,9 @@ func TestCompactBlocks(t *testing.T) {
t.Error(errors.Wrap(err, fmt.Sprintf("parsing testnet block %d", test.BlockHeight))) t.Error(errors.Wrap(err, fmt.Sprintf("parsing testnet block %d", test.BlockHeight)))
continue continue
} }
if len(blockData) > 0 {
t.Error("Extra data remaining")
}
if block.GetHeight() != test.BlockHeight { if block.GetHeight() != test.BlockHeight {
t.Errorf("incorrect block height in testnet block %d", test.BlockHeight) t.Errorf("incorrect block height in testnet block %d", test.BlockHeight)
continue continue
@ -121,6 +251,13 @@ func TestCompactBlocks(t *testing.T) {
t.Errorf("incorrect block hash in testnet block %x", test.BlockHash) t.Errorf("incorrect block hash in testnet block %x", test.BlockHash)
continue continue
} }
if hex.EncodeToString(block.GetDisplayPrevHash()) != test.PrevHash {
t.Errorf("incorrect block prevhash in testnet block %x", test.BlockHash)
continue
}
if !bytes.Equal(block.GetPrevHash(), block.hdr.HashPrevBlock) {
t.Error("block and block header prevhash don't match")
}
compact := block.ToCompact() compact := block.ToCompact()
marshaled, err := protobuf.Marshal(compact) marshaled, err := protobuf.Marshal(compact)

View File

@ -1,3 +1,6 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
// +build gofuzz // +build gofuzz
package parser package parser

View File

@ -1,9 +1,12 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
// Package bytestring provides a cryptobyte-inspired API specialized to the // Package bytestring provides a cryptobyte-inspired API specialized to the
// needs of parsing Zcash transactions. // needs of parsing Zcash transactions.
package bytestring package bytestring
import ( import (
"errors"
"io" "io"
) )
@ -45,9 +48,7 @@ func (s *String) Read(p []byte) (n int, err error) {
} }
n = copy(p, *s) n = copy(p, *s)
if !s.Skip(n) { s.Skip(n)
return 0, errors.New("unexpected end of bytestring read")
}
return n, nil return n, nil
} }
@ -58,7 +59,11 @@ func (s *String) Empty() bool {
// Skip advances the string by n bytes and reports whether it was successful. // Skip advances the string by n bytes and reports whether it was successful.
func (s *String) Skip(n int) bool { func (s *String) Skip(n int) bool {
return s.read(n) != nil if len(*s) < n {
return false
}
(*s) = (*s)[n:]
return true
} }
// ReadByte reads a single byte into out and advances over it. It reports if // ReadByte reads a single byte into out and advances over it. It reports if
@ -87,6 +92,7 @@ func (s *String) ReadBytes(out *[]byte, n int) bool {
// encoding used for length-prefixing and count values. If the values fall // encoding used for length-prefixing and count values. If the values fall
// outside the expected canonical ranges, it returns false. // outside the expected canonical ranges, it returns false.
func (s *String) ReadCompactSize(size *int) bool { func (s *String) ReadCompactSize(size *int) bool {
*size = 0
lenBytes := s.read(1) lenBytes := s.read(1)
if lenBytes == nil { if lenBytes == nil {
return false return false
@ -106,13 +112,18 @@ func (s *String) ReadCompactSize(size *int) bool {
lenLen = 4 lenLen = 4
minSize = 0x10000 minSize = 0x10000
case lenByte == 255: case lenByte == 255:
lenLen = 8 // this case is not currently usable, beyond maxCompactSize;
minSize = 0x100000000 // also, this is not possible if sizeof(int) is 4 bytes
// lenLen = 8; minSize = 0x100000000
return false
} }
if lenLen > 0 { if lenLen > 0 {
// expect little endian uint of varying size // expect little endian uint of varying size
lenBytes := s.read(lenLen) lenBytes := s.read(lenLen)
if len(lenBytes) < lenLen {
return false
}
for i := lenLen - 1; i >= 0; i-- { for i := lenLen - 1; i >= 0; i-- {
length <<= 8 length <<= 8
length = length | uint64(lenBytes[i]) length = length | uint64(lenBytes[i])
@ -122,7 +133,6 @@ func (s *String) ReadCompactSize(size *int) bool {
if length > maxCompactSize || length < minSize { if length > maxCompactSize || length < minSize {
return false return false
} }
*size = int(length) *size = int(length)
return true return true
} }
@ -131,7 +141,7 @@ func (s *String) ReadCompactSize(size *int) bool {
// length field into out. It reports whether the read was successful. // length field into out. It reports whether the read was successful.
func (s *String) ReadCompactLengthPrefixed(out *String) bool { func (s *String) ReadCompactLengthPrefixed(out *String) bool {
var length int var length int
if ok := s.ReadCompactSize(&length); !ok { if !s.ReadCompactSize(&length) {
return false return false
} }
@ -148,7 +158,7 @@ func (s *String) ReadCompactLengthPrefixed(out *String) bool {
// signed, and advances over it. It reports whether the read was successful. // signed, and advances over it. It reports whether the read was successful.
func (s *String) ReadInt32(out *int32) bool { func (s *String) ReadInt32(out *int32) bool {
var tmp uint32 var tmp uint32
if ok := s.ReadUint32(&tmp); !ok { if !s.ReadUint32(&tmp) {
return false return false
} }
@ -160,7 +170,7 @@ func (s *String) ReadInt32(out *int32) bool {
// signed, and advances over it. It reports whether the read was successful. // signed, and advances over it. It reports whether the read was successful.
func (s *String) ReadInt64(out *int64) bool { func (s *String) ReadInt64(out *int64) bool {
var tmp uint64 var tmp uint64
if ok := s.ReadUint64(&tmp); !ok { if !s.ReadUint64(&tmp) {
return false return false
} }
@ -175,7 +185,11 @@ func (s *String) ReadUint16(out *uint16) bool {
if v == nil { if v == nil {
return false return false
} }
*out = uint16(v[0]) | uint16(v[1])<<8 *out = 0
for i := 1; i >= 0; i-- {
*out <<= 8
*out |= uint16(v[i])
}
return true return true
} }
@ -186,7 +200,11 @@ func (s *String) ReadUint32(out *uint32) bool {
if v == nil { if v == nil {
return false return false
} }
*out = uint32(v[0]) | uint32(v[1])<<8 | uint32(v[2])<<16 | uint32(v[3])<<24 *out = 0
for i := 3; i >= 0; i-- {
*out <<= 8
*out |= uint32(v[i])
}
return true return true
} }
@ -197,8 +215,11 @@ func (s *String) ReadUint64(out *uint64) bool {
if v == nil { if v == nil {
return false return false
} }
*out = uint64(v[0]) | uint64(v[1])<<8 | uint64(v[2])<<16 | uint64(v[3])<<24 | *out = 0
uint64(v[4])<<32 | uint64(v[5])<<40 | uint64(v[6])<<48 | uint64(v[7])<<56 for i := 7; i >= 0; i-- {
*out <<= 8
*out |= uint64(v[i])
}
return true return true
} }
@ -213,6 +234,7 @@ func (s *String) ReadUint64(out *uint64) bool {
func (s *String) ReadScriptInt64(num *int64) bool { func (s *String) ReadScriptInt64(num *int64) bool {
// First byte is either an integer opcode, or the number of bytes in the // First byte is either an integer opcode, or the number of bytes in the
// number. // number.
*num = 0
firstBytes := s.read(1) firstBytes := s.read(1)
if firstBytes == nil { if firstBytes == nil {
return false return false

View File

@ -1,3 +1,6 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package bytestring package bytestring
import ( import (
@ -182,6 +185,8 @@ func TestString_ReadBytes(t *testing.T) {
} }
} }
// compact sizes are little-endian (least significant byte first, lower memory addr),
// see https://en.bitcoin.it/wiki/Protocol_documentation#Variable_length_integer
var readCompactSizeTests = []struct { var readCompactSizeTests = []struct {
s String s String
ok bool ok bool
@ -193,13 +198,15 @@ var readCompactSizeTests = []struct {
/* 03 */ {String{253, 1, 0}, false, 0}, // 1 < minSize (253) /* 03 */ {String{253, 1, 0}, false, 0}, // 1 < minSize (253)
/* 04 */ {String{253, 252, 0}, false, 0}, // 252 < minSize (253) /* 04 */ {String{253, 252, 0}, false, 0}, // 252 < minSize (253)
/* 05 */ {String{253, 253, 0}, true, 253}, /* 05 */ {String{253, 253, 0}, true, 253},
/* 06 */ {String{253, 255, 255}, true, 0xffff}, /* 06 */ {String{253, 254, 0}, true, 254},
/* 07 */ {String{254, 0xff, 0xff, 0, 0}, false, 0}, // 0xffff < minSize /* 07 */ {String{253, 254}, false, 0}, // insufficient length bytes
/* 08 */ {String{254, 0, 0, 1, 0}, true, 0x00010000}, /* 08 */ {String{253, 255, 255}, true, 0xffff},
/* 09 */ {String{254, 7, 0, 1, 0}, true, 0x00010007}, /* 09 */ {String{254, 0xff, 0xff, 0, 0}, false, 0}, // 0xffff < minSize
/* 10 */ {String{254, 0, 0, 0, 2}, true, 0x02000000}, /* 10 */ {String{254, 0, 0, 1, 0}, true, 0x00010000},
/* 11 */ {String{254, 7, 0, 1, 0}, true, 0x00010007},
/* 12 */ {String{254, 0, 0, 0, 2}, true, 0x02000000},
/* 11 */ {String{254, 1, 0, 0, 2}, false, 0}, // > maxCompactSize /* 11 */ {String{254, 1, 0, 0, 2}, false, 0}, // > maxCompactSize
/* 12 */ {String{255, 0, 0, 0, 2, 0, 0, 0, 0}, false, 0}, /* 14 */ {String{255, 0, 0, 0, 2, 0, 0, 0, 0}, false, 0},
} }
func TestString_ReadCompactSize(t *testing.T) { func TestString_ReadCompactSize(t *testing.T) {
@ -207,10 +214,10 @@ func TestString_ReadCompactSize(t *testing.T) {
var expected int var expected int
ok := tt.s.ReadCompactSize(&expected) ok := tt.s.ReadCompactSize(&expected)
if ok != tt.ok { if ok != tt.ok {
t.Fatalf("ReadCompactSize case %d: want: %v, have: %v", i, tt.ok, ok) t.Errorf("ReadCompactSize case %d: want: %v, have: %v", i, tt.ok, ok)
} }
if expected != tt.expected { if expected != tt.expected {
t.Fatalf("ReadCompactSize case %d: want: %v, have: %v", i, tt.expected, expected) t.Errorf("ReadCompactSize case %d: want: %v, have: %v", i, tt.expected, expected)
} }
} }
} }
@ -526,10 +533,10 @@ var readScriptInt64Tests = []struct {
/* 05 */ {String{0x5f}, true, 0x0f}, /* 05 */ {String{0x5f}, true, 0x0f},
/* 06 */ {String{0x60}, true, 0x10}, /* 06 */ {String{0x60}, true, 0x10},
/* 07 */ {String{0x01}, false, 0}, // should be one byte following count 0x01 /* 07 */ {String{0x01}, false, 0}, // should be one byte following count 0x01
/* 07 */ {String{0x01, 0xbd}, true, 0xbd}, /* 08 */ {String{0x01, 0xbd}, true, 0xbd},
/* 07 */ {String{0x02, 0xbd, 0xac}, true, 0xacbd}, /* 09 */ {String{0x02, 0xbd, 0xac}, true, 0xacbd},
/* 07 */ {String{0x08, 0xbd, 0xac, 0x12, 0x34, 0x56, 0x78, 0x9a, 0x44}, true, 0x449a78563412acbd}, /* 10 */ {String{0x08, 0xbd, 0xac, 0x12, 0x34, 0x56, 0x78, 0x9a, 0x44}, true, 0x449a78563412acbd},
/* 07 */ {String{0x08, 0xbd, 0xac, 0x12, 0x34, 0x56, 0x78, 0x9a, 0x94}, true, -7738740698046616387}, /* 11 */ {String{0x08, 0xbd, 0xac, 0x12, 0x34, 0x56, 0x78, 0x9a, 0x94}, true, -7738740698046616387},
} }
func TestString_ReadScriptInt64(t *testing.T) { func TestString_ReadScriptInt64(t *testing.T) {
@ -537,14 +544,14 @@ func TestString_ReadScriptInt64(t *testing.T) {
var v int64 var v int64
ok := tt.s.ReadScriptInt64(&v) ok := tt.s.ReadScriptInt64(&v)
if ok != tt.ok { if ok != tt.ok {
t.Fatalf("ReadScriptInt64 case %d: want: %v, have: %v", i, tt.ok, ok) t.Errorf("ReadScriptInt64 case %d: want: %v, have: %v", i, tt.ok, ok)
} }
if v != tt.expected { if v != tt.expected {
t.Fatalf("ReadScriptInt64 case %d: want: %v, have: %v", i, tt.expected, v) t.Errorf("ReadScriptInt64 case %d: want: %v, have: %v", i, tt.expected, v)
} }
// there should be no bytes remaining // there should be no bytes remaining
if ok && len(tt.s) != 0 { if ok && len(tt.s) != 0 {
t.Fatalf("ReadScriptInt64 case %d: stream mispositioned", i) t.Errorf("ReadScriptInt64 case %d: stream mispositioned", i)
} }
} }
} }

View File

@ -1,3 +1,8 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
// Package parser deserializes (full) transactions (zcashd).
package parser package parser
import ( import (
@ -11,7 +16,7 @@ import (
type rawTransaction struct { type rawTransaction struct {
fOverwintered bool fOverwintered bool
version uint32 version uint32
nVersionGroupId uint32 nVersionGroupID uint32
transparentInputs []*txIn transparentInputs []*txIn
transparentOutputs []*txOut transparentOutputs []*txOut
nLockTime uint32 nLockTime uint32
@ -43,19 +48,19 @@ type txIn struct {
func (tx *txIn) ParseFromSlice(data []byte) ([]byte, error) { func (tx *txIn) ParseFromSlice(data []byte) ([]byte, error) {
s := bytestring.String(data) s := bytestring.String(data)
if ok := s.ReadBytes(&tx.PrevTxHash, 32); !ok { if !s.ReadBytes(&tx.PrevTxHash, 32) {
return nil, errors.New("could not read PrevTxHash") return nil, errors.New("could not read PrevTxHash")
} }
if ok := s.ReadUint32(&tx.PrevTxOutIndex); !ok { if !s.ReadUint32(&tx.PrevTxOutIndex) {
return nil, errors.New("could not read PrevTxOutIndex") return nil, errors.New("could not read PrevTxOutIndex")
} }
if ok := s.ReadCompactLengthPrefixed((*bytestring.String)(&tx.ScriptSig)); !ok { if !s.ReadCompactLengthPrefixed((*bytestring.String)(&tx.ScriptSig)) {
return nil, errors.New("could not read ScriptSig") return nil, errors.New("could not read ScriptSig")
} }
if ok := s.ReadUint32(&tx.SequenceNumber); !ok { if !s.ReadUint32(&tx.SequenceNumber) {
return nil, errors.New("could not read SequenceNumber") return nil, errors.New("could not read SequenceNumber")
} }
@ -64,7 +69,7 @@ func (tx *txIn) ParseFromSlice(data []byte) ([]byte, error) {
// Txout format as described in https://en.bitcoin.it/wiki/Transaction // Txout format as described in https://en.bitcoin.it/wiki/Transaction
type txOut struct { type txOut struct {
// Non-negative int giving the number of Satoshis to be transferred // Non-negative int giving the number of zatoshis to be transferred
Value uint64 Value uint64
// Script. CompactSize-prefixed. // Script. CompactSize-prefixed.
@ -74,11 +79,11 @@ type txOut struct {
func (tx *txOut) ParseFromSlice(data []byte) ([]byte, error) { func (tx *txOut) ParseFromSlice(data []byte) ([]byte, error) {
s := bytestring.String(data) s := bytestring.String(data)
if ok := s.ReadUint64(&tx.Value); !ok { if !s.ReadUint64(&tx.Value) {
return nil, errors.New("could not read txOut value") return nil, errors.New("could not read txOut value")
} }
if ok := s.ReadCompactLengthPrefixed((*bytestring.String)(&tx.Script)); !ok { if !s.ReadCompactLengthPrefixed((*bytestring.String)(&tx.Script)) {
return nil, errors.New("could not read txOut script") return nil, errors.New("could not read txOut script")
} }
@ -99,27 +104,27 @@ type spend struct {
func (p *spend) ParseFromSlice(data []byte) ([]byte, error) { func (p *spend) ParseFromSlice(data []byte) ([]byte, error) {
s := bytestring.String(data) s := bytestring.String(data)
if ok := s.ReadBytes(&p.cv, 32); !ok { if !s.ReadBytes(&p.cv, 32) {
return nil, errors.New("could not read cv") return nil, errors.New("could not read cv")
} }
if ok := s.ReadBytes(&p.anchor, 32); !ok { if !s.ReadBytes(&p.anchor, 32) {
return nil, errors.New("could not read anchor") return nil, errors.New("could not read anchor")
} }
if ok := s.ReadBytes(&p.nullifier, 32); !ok { if !s.ReadBytes(&p.nullifier, 32) {
return nil, errors.New("could not read nullifier") return nil, errors.New("could not read nullifier")
} }
if ok := s.ReadBytes(&p.rk, 32); !ok { if !s.ReadBytes(&p.rk, 32) {
return nil, errors.New("could not read rk") return nil, errors.New("could not read rk")
} }
if ok := s.ReadBytes(&p.zkproof, 192); !ok { if !s.ReadBytes(&p.zkproof, 192) {
return nil, errors.New("could not read zkproof") return nil, errors.New("could not read zkproof")
} }
if ok := s.ReadBytes(&p.spendAuthSig, 64); !ok { if !s.ReadBytes(&p.spendAuthSig, 64) {
return nil, errors.New("could not read spendAuthSig") return nil, errors.New("could not read spendAuthSig")
} }
@ -146,27 +151,27 @@ type output struct {
func (p *output) ParseFromSlice(data []byte) ([]byte, error) { func (p *output) ParseFromSlice(data []byte) ([]byte, error) {
s := bytestring.String(data) s := bytestring.String(data)
if ok := s.ReadBytes(&p.cv, 32); !ok { if !s.ReadBytes(&p.cv, 32) {
return nil, errors.New("could not read cv") return nil, errors.New("could not read cv")
} }
if ok := s.ReadBytes(&p.cmu, 32); !ok { if !s.ReadBytes(&p.cmu, 32) {
return nil, errors.New("could not read cmu") return nil, errors.New("could not read cmu")
} }
if ok := s.ReadBytes(&p.ephemeralKey, 32); !ok { if !s.ReadBytes(&p.ephemeralKey, 32) {
return nil, errors.New("could not read ephemeralKey") return nil, errors.New("could not read ephemeralKey")
} }
if ok := s.ReadBytes(&p.encCiphertext, 580); !ok { if !s.ReadBytes(&p.encCiphertext, 580) {
return nil, errors.New("could not read encCiphertext") return nil, errors.New("could not read encCiphertext")
} }
if ok := s.ReadBytes(&p.outCiphertext, 80); !ok { if !s.ReadBytes(&p.outCiphertext, 80) {
return nil, errors.New("could not read outCiphertext") return nil, errors.New("could not read outCiphertext")
} }
if ok := s.ReadBytes(&p.zkproof, 192); !ok { if !s.ReadBytes(&p.zkproof, 192) {
return nil, errors.New("could not read zkproof") return nil, errors.New("could not read zkproof")
} }
@ -204,50 +209,50 @@ type joinSplit struct {
func (p *joinSplit) ParseFromSlice(data []byte) ([]byte, error) { func (p *joinSplit) ParseFromSlice(data []byte) ([]byte, error) {
s := bytestring.String(data) s := bytestring.String(data)
if ok := s.ReadUint64(&p.vpubOld); !ok { if !s.ReadUint64(&p.vpubOld) {
return nil, errors.New("could not read vpubOld") return nil, errors.New("could not read vpubOld")
} }
if ok := s.ReadUint64(&p.vpubNew); !ok { if !s.ReadUint64(&p.vpubNew) {
return nil, errors.New("could not read vpubNew") return nil, errors.New("could not read vpubNew")
} }
if ok := s.ReadBytes(&p.anchor, 32); !ok { if !s.ReadBytes(&p.anchor, 32) {
return nil, errors.New("could not read anchor") return nil, errors.New("could not read anchor")
} }
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
if ok := s.ReadBytes(&p.nullifiers[i], 32); !ok { if !s.ReadBytes(&p.nullifiers[i], 32) {
return nil, errors.New("could not read a nullifier") return nil, errors.New("could not read a nullifier")
} }
} }
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
if ok := s.ReadBytes(&p.commitments[i], 32); !ok { if !s.ReadBytes(&p.commitments[i], 32) {
return nil, errors.New("could not read a commitment") return nil, errors.New("could not read a commitment")
} }
} }
if ok := s.ReadBytes(&p.ephemeralKey, 32); !ok { if !s.ReadBytes(&p.ephemeralKey, 32) {
return nil, errors.New("could not read ephemeralKey") return nil, errors.New("could not read ephemeralKey")
} }
if ok := s.ReadBytes(&p.randomSeed, 32); !ok { if !s.ReadBytes(&p.randomSeed, 32) {
return nil, errors.New("could not read randomSeed") return nil, errors.New("could not read randomSeed")
} }
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
if ok := s.ReadBytes(&p.vmacs[i], 32); !ok { if !s.ReadBytes(&p.vmacs[i], 32) {
return nil, errors.New("could not read a vmac") return nil, errors.New("could not read a vmac")
} }
} }
if p.version == 2 || p.version == 3 { if p.version == 2 || p.version == 3 {
if ok := s.ReadBytes(&p.proofPHGR13, 296); !ok { if !s.ReadBytes(&p.proofPHGR13, 296) {
return nil, errors.New("could not read PHGR13 proof") return nil, errors.New("could not read PHGR13 proof")
} }
} else if p.version >= 4 { } else if p.version >= 4 {
if ok := s.ReadBytes(&p.proofGroth16, 192); !ok { if !s.ReadBytes(&p.proofGroth16, 192) {
return nil, errors.New("could not read Groth16 proof") return nil, errors.New("could not read Groth16 proof")
} }
} else { } else {
@ -255,7 +260,7 @@ func (p *joinSplit) ParseFromSlice(data []byte) ([]byte, error) {
} }
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
if ok := s.ReadBytes(&p.encCiphertexts[i], 601); !ok { if !s.ReadBytes(&p.encCiphertexts[i], 601) {
return nil, errors.New("could not read an encCiphertext") return nil, errors.New("could not read an encCiphertext")
} }
} }
@ -263,30 +268,25 @@ func (p *joinSplit) ParseFromSlice(data []byte) ([]byte, error) {
return []byte(s), nil return []byte(s), nil
} }
// Transaction encodes a full (zcashd) transaction.
type Transaction struct { type Transaction struct {
*rawTransaction *rawTransaction
rawBytes []byte rawBytes []byte
txId []byte cachedTxID []byte // cached for performance
} }
// GetDisplayHash returns the transaction hash in big-endian display order. // GetDisplayHash returns the transaction hash in big-endian display order.
func (tx *Transaction) GetDisplayHash() []byte { func (tx *Transaction) GetDisplayHash() []byte {
if tx.txId != nil { if tx.cachedTxID != nil {
return tx.txId return tx.cachedTxID
} }
// SHA256d // SHA256d
digest := sha256.Sum256(tx.rawBytes) digest := sha256.Sum256(tx.rawBytes)
digest = sha256.Sum256(digest[:]) digest = sha256.Sum256(digest[:])
// Convert to big-endian
// Reverse byte order tx.cachedTxID = Reverse(digest[:])
for i := 0; i < len(digest)/2; i++ { return tx.cachedTxID
j := len(digest) - 1 - i
digest[i], digest[j] = digest[j], digest[i]
}
tx.txId = digest[:]
return tx.txId
} }
// GetEncodableHash returns the transaction hash in little-endian wire format order. // GetEncodableHash returns the transaction hash in little-endian wire format order.
@ -296,14 +296,18 @@ func (tx *Transaction) GetEncodableHash() []byte {
return digest[:] return digest[:]
} }
// Bytes returns a full transaction's raw bytes.
func (tx *Transaction) Bytes() []byte { func (tx *Transaction) Bytes() []byte {
return tx.rawBytes return tx.rawBytes
} }
func (tx *Transaction) HasSaplingTransactions() bool { // HasSaplingElements indicates whether a transaction has
// at least one shielded input or output.
func (tx *Transaction) HasSaplingElements() bool {
return tx.version >= 4 && (len(tx.shieldedSpends)+len(tx.shieldedOutputs)) > 0 return tx.version >= 4 && (len(tx.shieldedSpends)+len(tx.shieldedOutputs)) > 0
} }
// ToCompact converts the given (full) transaction to compact format.
func (tx *Transaction) ToCompact(index int) *walletrpc.CompactTx { func (tx *Transaction) ToCompact(index int) *walletrpc.CompactTx {
ctx := &walletrpc.CompactTx{ ctx := &walletrpc.CompactTx{
Index: uint64(index), // index is contextual Index: uint64(index), // index is contextual
@ -321,6 +325,7 @@ func (tx *Transaction) ToCompact(index int) *walletrpc.CompactTx {
return ctx return ctx
} }
// ParseFromSlice deserializes a single transaction from the given data.
func (tx *Transaction) ParseFromSlice(data []byte) ([]byte, error) { func (tx *Transaction) ParseFromSlice(data []byte) ([]byte, error) {
s := bytestring.String(data) s := bytestring.String(data)
@ -328,7 +333,7 @@ func (tx *Transaction) ParseFromSlice(data []byte) ([]byte, error) {
var err error var err error
var header uint32 var header uint32
if ok := s.ReadUint32(&header); !ok { if !s.ReadUint32(&header) {
return nil, errors.New("could not read header") return nil, errors.New("could not read header")
} }
@ -336,13 +341,13 @@ func (tx *Transaction) ParseFromSlice(data []byte) ([]byte, error) {
tx.version = header & 0x7FFFFFFF tx.version = header & 0x7FFFFFFF
if tx.version >= 3 { if tx.version >= 3 {
if ok := s.ReadUint32(&tx.nVersionGroupId); !ok { if !s.ReadUint32(&tx.nVersionGroupID) {
return nil, errors.New("could not read nVersionGroupId") return nil, errors.New("could not read nVersionGroupId")
} }
} }
var txInCount int var txInCount int
if ok := s.ReadCompactSize(&txInCount); !ok { if !s.ReadCompactSize(&txInCount) {
return nil, errors.New("could not read tx_in_count") return nil, errors.New("could not read tx_in_count")
} }
@ -363,7 +368,7 @@ func (tx *Transaction) ParseFromSlice(data []byte) ([]byte, error) {
} }
var txOutCount int var txOutCount int
if ok := s.ReadCompactSize(&txOutCount); !ok { if !s.ReadCompactSize(&txOutCount) {
return nil, errors.New("could not read tx_out_count") return nil, errors.New("could not read tx_out_count")
} }
@ -379,12 +384,12 @@ func (tx *Transaction) ParseFromSlice(data []byte) ([]byte, error) {
} }
} }
if ok := s.ReadUint32(&tx.nLockTime); !ok { if !s.ReadUint32(&tx.nLockTime) {
return nil, errors.New("could not read nLockTime") return nil, errors.New("could not read nLockTime")
} }
if tx.fOverwintered { if tx.fOverwintered {
if ok := s.ReadUint32(&tx.nExpiryHeight); !ok { if !s.ReadUint32(&tx.nExpiryHeight) {
return nil, errors.New("could not read nExpiryHeight") return nil, errors.New("could not read nExpiryHeight")
} }
} }
@ -392,11 +397,11 @@ func (tx *Transaction) ParseFromSlice(data []byte) ([]byte, error) {
var spendCount, outputCount int var spendCount, outputCount int
if tx.version >= 4 { if tx.version >= 4 {
if ok := s.ReadInt64(&tx.valueBalance); !ok { if !s.ReadInt64(&tx.valueBalance) {
return nil, errors.New("could not read valueBalance") return nil, errors.New("could not read valueBalance")
} }
if ok := s.ReadCompactSize(&spendCount); !ok { if !s.ReadCompactSize(&spendCount) {
return nil, errors.New("could not read nShieldedSpend") return nil, errors.New("could not read nShieldedSpend")
} }
@ -412,7 +417,7 @@ func (tx *Transaction) ParseFromSlice(data []byte) ([]byte, error) {
} }
} }
if ok := s.ReadCompactSize(&outputCount); !ok { if !s.ReadCompactSize(&outputCount) {
return nil, errors.New("could not read nShieldedOutput") return nil, errors.New("could not read nShieldedOutput")
} }
@ -431,7 +436,7 @@ func (tx *Transaction) ParseFromSlice(data []byte) ([]byte, error) {
if tx.version >= 2 { if tx.version >= 2 {
var joinSplitCount int var joinSplitCount int
if ok := s.ReadCompactSize(&joinSplitCount); !ok { if !s.ReadCompactSize(&joinSplitCount) {
return nil, errors.New("could not read nJoinSplit") return nil, errors.New("could not read nJoinSplit")
} }
@ -446,18 +451,18 @@ func (tx *Transaction) ParseFromSlice(data []byte) ([]byte, error) {
tx.joinSplits[i] = js tx.joinSplits[i] = js
} }
if ok := s.ReadBytes(&tx.joinSplitPubKey, 32); !ok { if !s.ReadBytes(&tx.joinSplitPubKey, 32) {
return nil, errors.New("could not read joinSplitPubKey") return nil, errors.New("could not read joinSplitPubKey")
} }
if ok := s.ReadBytes(&tx.joinSplitSig, 64); !ok { if !s.ReadBytes(&tx.joinSplitSig, 64) {
return nil, errors.New("could not read joinSplitSig") return nil, errors.New("could not read joinSplitSig")
} }
} }
} }
if tx.version >= 4 && (spendCount+outputCount > 0) { if tx.version >= 4 && (spendCount+outputCount > 0) {
if ok := s.ReadBytes(&tx.bindingSig, 64); !ok { if !s.ReadBytes(&tx.bindingSig, 64) {
return nil, errors.New("could not read bindingSig") return nil, errors.New("could not read bindingSig")
} }
} }
@ -469,6 +474,7 @@ func (tx *Transaction) ParseFromSlice(data []byte) ([]byte, error) {
return []byte(s), nil return []byte(s), nil
} }
// NewTransaction is the constructor for a full transaction.
func NewTransaction() *Transaction { func NewTransaction() *Transaction {
return &Transaction{ return &Transaction{
rawTransaction: new(rawTransaction), rawTransaction: new(rawTransaction),

View File

@ -1,3 +1,6 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package parser package parser
import ( import (
@ -48,10 +51,10 @@ type outputTestVector struct {
type txTestVector struct { type txTestVector struct {
// Sprout and Sapling // Sprout and Sapling
header, nVersionGroupId, nLockTime, nExpiryHeight string txid, header, nVersionGroupID, nLockTime, nExpiryHeight string
vin, vout [][]string vin, vout [][]string
vJoinSplits []joinSplitTestVector vJoinSplits []joinSplitTestVector
joinSplitPubKey, joinSplitSig string joinSplitPubKey, joinSplitSig string
// Sapling-only // Sapling-only
valueBalance string // encoded int64 valueBalance string // encoded int64
@ -64,8 +67,9 @@ type txTestVector struct {
var zip143tests = []txTestVector{ var zip143tests = []txTestVector{
{ {
// Test vector 1 // Test vector 1
txid: "f0b22277ac851b5f4df590fe6a128aad9d0ce8063235eb2b328c2dc6a23c1ec5",
header: "03000080", header: "03000080",
nVersionGroupId: "7082c403", nVersionGroupID: "7082c403",
nLockTime: "481cdd86", nLockTime: "481cdd86",
nExpiryHeight: "b3cc4318", nExpiryHeight: "b3cc4318",
vin: nil, vin: nil,
@ -77,8 +81,9 @@ var zip143tests = []txTestVector{
{ {
// Test vector 2 // Test vector 2
//raw: "we have some raw data for this tx, which this comment is too small to contain", //raw: "we have some raw data for this tx, which this comment is too small to contain",
txid: "39fe585a56b005f568c3171d22afa916e946e2a8aff5971d58ee8a6fc1482059",
header: "03000080", header: "03000080",
nVersionGroupId: "7082c403", nVersionGroupID: "7082c403",
nLockTime: "97b0e4e4", nLockTime: "97b0e4e4",
nExpiryHeight: "c705fc05", nExpiryHeight: "c705fc05",
vin: [][]string{ vin: [][]string{
@ -159,9 +164,8 @@ func TestSproutTransactionParser(t *testing.T) {
defer testData.Close() defer testData.Close()
// Parse the raw transactions file // Parse the raw transactions file
rawTxData := make([][]byte, len(zip143tests)) rawTxData := [][]byte{}
scan := bufio.NewScanner(testData) scan := bufio.NewScanner(testData)
count := 0
for scan.Scan() { for scan.Scan() {
dataLine := scan.Text() dataLine := scan.Text()
// Skip the comments // Skip the comments
@ -173,9 +177,7 @@ func TestSproutTransactionParser(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
rawTxData = append(rawTxData, txData)
rawTxData[count] = txData
count++
} }
for i, tt := range zip143tests { for i, tt := range zip143tests {
@ -193,21 +195,21 @@ func TestSproutTransactionParser(t *testing.T) {
} }
// Transaction metadata // Transaction metadata
if ok := subTestCommonBlockMeta(&tt, tx, t, i); !ok { if !subTestCommonBlockMeta(&tt, tx, t, i) {
continue continue
} }
// Transparent inputs and outputs // Transparent inputs and outputs
if ok := subTestTransparentInputs(tt.vin, tx.transparentInputs, t, i); !ok { if !subTestTransparentInputs(tt.vin, tx.transparentInputs, t, i) {
continue continue
} }
if ok := subTestTransparentOutputs(tt.vout, tx.transparentOutputs, t, i); !ok { if !subTestTransparentOutputs(tt.vout, tx.transparentOutputs, t, i) {
continue continue
} }
// JoinSplits // JoinSplits
if ok := subTestJoinSplits(tt.vJoinSplits, tx.joinSplits, t, i); !ok { if !subTestJoinSplits(tt.vJoinSplits, tx.joinSplits, t, i) {
continue continue
} }
@ -222,6 +224,9 @@ func TestSproutTransactionParser(t *testing.T) {
t.Errorf("Test %d: jsSig mismatch %x %x", i, testJSSig, tx.joinSplitSig) t.Errorf("Test %d: jsSig mismatch %x %x", i, testJSSig, tx.joinSplitSig)
continue continue
} }
if hex.EncodeToString(tx.GetDisplayHash()) != tt.txid {
t.Errorf("Test %d: incorrect txid", i)
}
} }
} }
@ -237,9 +242,9 @@ func subTestCommonBlockMeta(tt *txTestVector, tx *Transaction, t *testing.T, cas
return false return false
} }
versionGroupBytes, _ := hex.DecodeString(tt.nVersionGroupId) versionGroupBytes, _ := hex.DecodeString(tt.nVersionGroupID)
versionGroup := binary.LittleEndian.Uint32(versionGroupBytes) versionGroup := binary.LittleEndian.Uint32(versionGroupBytes)
if versionGroup != tx.nVersionGroupId { if versionGroup != tx.nVersionGroupID {
t.Errorf("Test %d: unexpected versionGroupId", caseNum) t.Errorf("Test %d: unexpected versionGroupId", caseNum)
return false return false
} }
@ -507,8 +512,9 @@ func subTestTransparentOutputs(testOutputs [][]string, txOutputs []*txOut, t *te
var zip243tests = []txTestVector{ var zip243tests = []txTestVector{
// Test vector 1 // Test vector 1
{ {
txid: "5fc4867a1b8bd5ab709799adf322a85d10607e053726d5f5ab4b1c9ab897e6bc",
header: "04000080", header: "04000080",
nVersionGroupId: "85202f89", nVersionGroupID: "85202f89",
vin: nil, vin: nil,
vout: [][]string{ vout: [][]string{
{"e7719811893e0000", "095200ac6551ac636565"}, {"e7719811893e0000", "095200ac6551ac636565"},
@ -609,8 +615,9 @@ var zip243tests = []txTestVector{
}, },
// Test vector 2 // Test vector 2
{ {
txid: "6732cf8d67aac5b82a2a0f0217a7d4aa245b2adb0b97fd2d923dfc674415e221",
header: "04000080", header: "04000080",
nVersionGroupId: "85202f89", nVersionGroupID: "85202f89",
vin: [][]string{ vin: [][]string{
{"56e551406a7ee8355656a21e43e38ce129fdadb759eddfa08f00fc8e567cef93", "c6792d01", "0763656300ac63ac", "8df04245"}, {"56e551406a7ee8355656a21e43e38ce129fdadb759eddfa08f00fc8e567cef93", "c6792d01", "0763656300ac63ac", "8df04245"},
{"1a33590d3e8cf49b2627218f0c292fa66ada945fa55bb23548e33a83a562957a", "3149a993", "086a5352516a65006a", "78d97ce4"}, {"1a33590d3e8cf49b2627218f0c292fa66ada945fa55bb23548e33a83a562957a", "3149a993", "086a5352516a65006a", "78d97ce4"},
@ -665,9 +672,8 @@ func TestSaplingTransactionParser(t *testing.T) {
defer testData.Close() defer testData.Close()
// Parse the raw transactions file // Parse the raw transactions file
rawTxData := make([][]byte, len(zip243tests)) rawTxData := [][]byte{}
scan := bufio.NewScanner(testData) scan := bufio.NewScanner(testData)
count := 0
for scan.Scan() { for scan.Scan() {
dataLine := scan.Text() dataLine := scan.Text()
// Skip the comments // Skip the comments
@ -679,9 +685,7 @@ func TestSaplingTransactionParser(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
rawTxData = append(rawTxData, txData)
rawTxData[count] = txData
count++
} }
for i, tt := range zip243tests { for i, tt := range zip243tests {
@ -698,21 +702,35 @@ func TestSaplingTransactionParser(t *testing.T) {
continue continue
} }
// If the transaction is shorter than it should be, parsing
// should fail gracefully
for j := 0; j < len(rawTxData[i]); j++ {
_, err := tx.ParseFromSlice(rawTxData[i][0:j])
if err == nil {
t.Errorf("Test %d: Parsing transaction unexpected succeeded", i)
break
}
if len(rest) > 0 {
t.Errorf("Test %d: Parsing transaction unexpected rest", i)
break
}
}
// Transaction metadata // Transaction metadata
if ok := subTestCommonBlockMeta(&tt, tx, t, i); !ok { if !subTestCommonBlockMeta(&tt, tx, t, i) {
continue continue
} }
// Transparent inputs and outputs // Transparent inputs and outputs
if ok := subTestTransparentInputs(tt.vin, tx.transparentInputs, t, i); !ok { if !subTestTransparentInputs(tt.vin, tx.transparentInputs, t, i) {
continue continue
} }
if ok := subTestTransparentOutputs(tt.vout, tx.transparentOutputs, t, i); !ok { if !subTestTransparentOutputs(tt.vout, tx.transparentOutputs, t, i) {
continue continue
} }
// JoinSplits // JoinSplits
if ok := subTestJoinSplits(tt.vJoinSplits, tx.joinSplits, t, i); !ok { if !subTestJoinSplits(tt.vJoinSplits, tx.joinSplits, t, i) {
continue continue
} }
@ -736,11 +754,11 @@ func TestSaplingTransactionParser(t *testing.T) {
continue continue
} }
if ok := subTestShieldedSpends(tt.spends, tx.shieldedSpends, t, i); !ok { if !subTestShieldedSpends(tt.spends, tx.shieldedSpends, t, i) {
continue continue
} }
if ok := subTestShieldedOutputs(tt.outputs, tx.shieldedOutputs, t, i); !ok { if !subTestShieldedOutputs(tt.outputs, tx.shieldedOutputs, t, i) {
continue continue
} }
@ -749,6 +767,14 @@ func TestSaplingTransactionParser(t *testing.T) {
t.Errorf("Test %d: bindingSig %x %x", i, testBinding, tx.bindingSig) t.Errorf("Test %d: bindingSig %x %x", i, testBinding, tx.bindingSig)
continue continue
} }
if hex.EncodeToString(tx.GetDisplayHash()) != tt.txid {
t.Errorf("Test %d: incorrect txid", i)
}
// test caching
if hex.EncodeToString(tx.GetDisplayHash()) != tt.txid {
t.Errorf("Test %d: incorrect cached txid", i)
}
} }
} }

15
parser/util.go Normal file
View File

@ -0,0 +1,15 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package parser
// Reverse the given byte slice, returning a slice pointing to new data;
// the input slice is unchanged.
func Reverse(a []byte) []byte {
r := make([]byte, len(a), len(a))
for left, right := 0, len(a)-1; left <= right; left, right = left+1, right-1 {
r[left], r[right] = a[right], a[left]
}
return r
}

37
parser/util_test.go Normal file
View File

@ -0,0 +1,37 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
package parser
import (
"testing"
)
func TestReverse(t *testing.T) {
s := make([]byte, 32, 32)
for i := 0; i < 32; i++ {
s[i] = byte(i)
}
r := Reverse(s)
for i := 0; i < 32; i++ {
if r[i] != byte(32-1-i) {
t.Fatal("mismatch")
}
}
}
// Currently, Reverse() isn't called for odd-length slices, but
// it should work.
func TestReverseOdd(t *testing.T) {
s := make([]byte, 5, 5)
for i := 0; i < 5; i++ {
s[i] = byte(i)
}
r := Reverse(s)
for i := 0; i < 5; i++ {
if r[i] != byte(5-1-i) {
t.Fatal("mismatch")
}
}
}

22
tekton/resources.yml Normal file
View File

@ -0,0 +1,22 @@
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: lightwalletd-image
spec:
type: image
params:
- name: url
value: electriccoinco/lightwalletd
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: lightwalletd-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: https://github.com/zcash/lightwalletd.git

27
tekton/taskruns.yml Normal file
View File

@ -0,0 +1,27 @@
---
apiVersion: tekton.dev/v1alpha1
kind: TaskRun
metadata:
generateName: lightwalletd-dockerbuild-
spec:
serviceAccountName: zcashsysadmin-service
taskRef:
name: build-docker-image-from-git-source
inputs:
resources:
- name: docker-source
resourceRef:
name: lightwalletd-git
params:
- name: pathToDockerFile
value: /workspace/docker-source/Dockerfile
- name: pathToContext
value: /workspace/docker-source/
outputs:
resources:
- name: builtImage
resourceRef:
name: lightwalletd-image
- name: notification
resourceRef:
name: event-to-cloudlog

45
tekton/triggerbinding.yml Normal file
View File

@ -0,0 +1,45 @@
---
apiVersion: tekton.dev/v1alpha1
kind: TriggerBinding
metadata:
name: lightwalletd-master-binding
spec:
params:
- name: newSHA
value: $(body.after)
---
apiVersion: tekton.dev/v1alpha1
kind: TriggerTemplate
metadata:
name: lightwalletd-dockerbuild-template
spec:
params:
- name: newSHA
description: The git repository HEAD sha
resourcetemplates:
- apiVersion: tekton.dev/v1alpha1
kind: TaskRun
metadata:
generateName: lightwalletd-dockerbuild-
spec:
serviceAccountName: zcashsysadmin-service
taskRef:
name: build-docker-image-from-git-source
inputs:
resources:
- name: docker-source
resourceRef:
name: lightwalletd-git
params:
- name: pathToDockerFile
value: /workspace/docker-source/Dockerfile
- name: pathToContext
value: /workspace/docker-source/
outputs:
resources:
- name: builtImage
resourceRef:
name: lightwalletd-image
- name: notification
resourceRef:
name: event-to-cloudlog

151
testclient/main.go Normal file
View File

@ -0,0 +1,151 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package main implements a gRPC test client for lightwalletd.
// This file adapted from:
// https://github.com/grpc/grpc-go/blob/master/examples/helloworld/greeter_client/main.go
// For now at least, all it does is generate a load for performance and stress testing.
package main
import (
"context"
"flag"
"io"
"log"
"strconv"
"sync"
"time"
pb "github.com/adityapk00/lightwalletd/walletrpc"
"google.golang.org/grpc"
)
const (
address = "localhost:9067"
)
type Options struct {
concurrency int `json:"concurrency"`
iterations int `json:"iterations"`
op string `json:"op"`
verbose *bool `json:"v"`
}
func main() {
opts := &Options{}
flag.IntVar(&opts.concurrency, "concurrency", 1, "number of threads")
flag.IntVar(&opts.iterations, "iterations", 1, "number of iterations")
flag.StringVar(&opts.op, "op", "ping", "operation(ping|getlightdinfo|getblock|getblockrange)")
opts.verbose = flag.Bool("v", false, "verbose (print operation results)")
flag.Parse()
// Remaining args are all integers (at least for now)
args := make([]int64, flag.NArg())
for i := 0; i < flag.NArg(); i++ {
var err error
if args[i], err = strconv.ParseInt(flag.Arg(i), 10, 64); err != nil {
log.Fatalf("argument %v is not an int64: %v", flag.Arg(i), err)
}
}
// Set up a connection to the server.
conn, err := grpc.Dial(address, grpc.WithInsecure(),
grpc.WithConnectParams(grpc.ConnectParams{MinConnectTimeout: 30 * time.Second}))
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
c := pb.NewCompactTxStreamerClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), 100000*time.Second)
defer cancel()
var wg sync.WaitGroup
wg.Add(opts.concurrency)
for i := 0; i < opts.concurrency; i++ {
go func(i int) {
for j := 0; j < opts.iterations; j++ {
switch opts.op {
case "ping":
var a pb.Duration
a.IntervalUs = 8 * 1000 * 1000 // default 8 seconds
if len(args) > 0 {
a.IntervalUs = args[0]
}
r, err := c.Ping(ctx, &a)
if err != nil {
log.Fatalf("Ping failed: %v", err)
}
if *opts.verbose {
log.Println("thr:", i, "entry:", r.Entry, "exit:", r.Exit)
}
case "getlightdinfo":
r, err := c.GetLightdInfo(ctx, &pb.Empty{})
if err != nil {
log.Fatalf("GetLightwalletdInfo failed: %v", err)
}
if *opts.verbose {
log.Println("thr:", i, r)
}
case "getblock":
blockid := &pb.BlockID{Height: 748400} // default (arbitrary)
if len(args) > 0 {
blockid.Height = uint64(args[0])
}
r, err := c.GetBlock(ctx, blockid)
if err != nil {
log.Fatalf("GetLightwalletdInfo failed: %v", err)
}
// Height is enough to see if it's working
if *opts.verbose {
log.Println("thr:", i, r.Height)
}
case "getblockrange":
blockrange := &pb.BlockRange{ // defaults (arbitrary)
Start: &pb.BlockID{Height: 738100},
End: &pb.BlockID{Height: 738199},
}
if len(args) > 0 {
blockrange.Start.Height = uint64(args[0])
blockrange.End.Height = uint64(args[1])
}
stream, err := c.GetBlockRange(ctx, blockrange)
if err != nil {
log.Fatalf("GetLightwalletdInfo failed: %v", err)
}
for {
// each call to Recv returns a compact block
r, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Fatal(err)
}
// Height is enough to see if it's working
if *opts.verbose {
log.Println("thr:", i, r.Height)
}
}
default:
log.Fatalf("unknown op %s", opts.op)
}
}
wg.Done()
}(i)
}
wg.Wait()
}

25
testclient/stress.sh Executable file
View File

@ -0,0 +1,25 @@
#!/bin/bash
#
# Create a CSV file with various performance measurements
#
set -e
test $# -eq 0 && { echo "usage: $0 iterations op(getlighdinfo|getblock|getblockrange)";exit 1;}
iterations=$1
op=$2
export p=`pidof server`
test -z $p && { echo 'is the server running?';exit 1;}
set -- $p
test $# -ne 1 && { echo 'server pid is not unique';exit 1;}
echo "concurrency,iterations per thread,utime before (ticks),stime before (ticks),memory before (pages),time (sec),utime after (ticks),stime after (ticks),memory after (pages)"
for i in 1 200 400 600 800 1000
do
csv="$i,$iterations"
csv="$csv,`cat /proc/$p/stat|field 14`" # utime in 10ms ticks
csv="$csv,`cat /proc/$p/stat|field 15`" # stime in 10ms ticks
csv="$csv,`cat /proc/$p/statm|field 2`" # resident size in pages (8k)
csv="$csv,`/usr/bin/time -f '%e' testclient/main -concurrency $i -iterations $iterations -op $op 2>&1`"
csv="$csv,`cat /proc/$p/stat|field 14`" # utime in 10ms ticks
csv="$csv,`cat /proc/$p/stat|field 15`" # stime in 10ms ticks
csv="$csv,`cat /proc/$p/statm|field 2`"
echo $csv
done

10
testdata/badblocks vendored Normal file
View File

@ -0,0 +1,10 @@
0400x0
040000
040000008a024cebb99e30ff83d5b9f50cc5303351923da95a8dc7fda3e016090000
040000008a024cebb99e30ff83d5b9f50cc5303351923da95a8dc7fda3e0160900000000226b1c86e101f1fac09aa533c246bd6843ee6b444ad3ff251df0a401f16f
040000008a024cebb99e30ff83d5b9f50cc5303351923da95a8dc7fda3e0160900000000226b1c86e101f1fac09aa533c246bd6843ee6b444ad3ff251df0a401f16f0981000000000000000000000000000000000000000000000000000000000000
040000008a024cebb99e30ff83d5b9f50cc5303351923da95a8dc7fda3e0160900000000226b1c86e101f1fac09aa533c246bd6843ee6b444ad3ff251df0a401f16f0981000000000000000000000000000000000000000000000000000000000000000069cb
040000008a024cebb99e30ff83d5b9f50cc5303351923da95a8dc7fda3e0160900000000226b1c86e101f1fac09aa533c246bd6843ee6b444ad3ff251df0a401f16f0981000000000000000000000000000000000000000000000000000000000000000069cb7d5b0f1d0a
040000008a024cebb99e30ff83d5b9f50cc5303351923da95a8dc7fda3e0160900000000226b1c86e101f1fac09aa533c246bd6843ee6b444ad3ff251df0a401f16f0981000000000000000000000000000000000000000000000000000000000000000069cb7d5b0f1d0a1c000000
040000008a024cebb99e30ff83d5b9f50cc5303351923da95a8dc7fda3e0160900000000226b1c86e101f1fac09aa533c246bd6843ee6b444ad3ff251df0a401f16f0981000000000000000000000000000000000000000000000000000000000000000069cb7d5b0f1d0a1c0000000000000000004d6cdd939a4900000000000000000000000000310c50b3fd
040000008a024cebb99e30ff83d5b9f50cc5303351923da95a8dc7fda3e0160900000000226b1c86e101f1fac09aa533c246bd6843ee6b444ad3ff251df0a401f16f0981000000000000000000000000000000000000000000000000000000000000000069cb7d5b0f1d0a1c0000000000000000004d6cdd939a4900000000000000000000000000310c50b3fd40

File diff suppressed because one or more lines are too long

BIN
testdata/test.db vendored

Binary file not shown.

126
testtools/genblocks/main.go Normal file
View File

@ -0,0 +1,126 @@
// Copyright (c) 2019-2020 The Zcash developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or https://www.opensource.org/licenses/mit-license.php .
//
// This tool reads a set of files, each containing a list of transactions
// (one per line, can be empty), and writes to stdout a list of blocks,
// one per input file, in hex format (same as zcash-cli getblock 12345 0),
// each on a separate line. Each fake block contains a fake coinbase
// transaction and all of the transactions in the corresponding file.
// The default start height is 1000, so the program expects to find
// files blocks/1000.txt, blocks/1001.txt, ...
//
// Typical way to run this program to create 6 blocks, all empty except
// for the fifth, which contains one transaction:
// $ mkdir blocks
// $ touch blocks/{1000,1001,1002,1003,1004,1005}.txt
// $ echo "0400008085202f8901950521a79e89ed418a4b506f42e9829739b1ca516d4c590bddb4465b4b347bb2000000006a4730440220142920f2a9240c5c64406668c9a16d223bd01db33a773beada7f9c9b930cf02b0220171cbee9232f9c5684eb918db70918e701b86813732871e1bec6fbfb38194f53012102975c020dd223263d2a9bfff2fa6004df4c07db9f01c531967546ef941e2fcfbffeffffff026daf9b00000000001976a91461af073e7679f06677c83aa48f205e4b98feb8d188ac61760356100000001976a91406f6b9a7e1525ee12fd77af9b94a54179785011b88ac4c880b007f880b000000000000000000000000" > blocks/1004.txt
// $ go run testtools/genblocks/main.go >testdata/default-darkside-blocks
//
// Alternative way to create the empty files:
// $ seq 1000 1005 | while read i; do touch blocks/$i.txt; done
package main
import (
"bufio"
"crypto/sha256"
"encoding/hex"
"flag"
"fmt"
"os"
"path"
"strconv"
"strings"
"github.com/adityapk00/lightwalletd/parser"
)
type options struct {
startHeight int
blocksDir string
}
func main() {
opts := &options{}
flag.IntVar(&opts.startHeight, "start-height", 1000, "generated blocks start at this height")
flag.StringVar(&opts.blocksDir, "blocks-dir", "./blocks", "directory containing <N>.txt for each block height <N>, with one hex-encoded transaction per line")
flag.Parse()
prevhash := make([]byte, 32)
curHeight := opts.startHeight
// Keep opening <curHeight>.txt and incrementing until the file doesn't exist.
for {
testBlocks, err := os.Open(path.Join(opts.blocksDir, strconv.Itoa(curHeight)+".txt"))
if err != nil {
break
}
scan := bufio.NewScanner(testBlocks)
fakeCoinbase := "0400008085202f890100000000000000000000000000000000000000000000000000" +
"00000000000000ffffffff2a03d12c0c00043855975e464b8896790758f824ceac97836" +
"22c17ed38f1669b8a45ce1da857dbbe7950e2ffffffff02a0ebce1d000000001976a914" +
"7ed15946ec14ae0cd8fa8991eb6084452eb3f77c88ac405973070000000017a914e445cf" +
"a944b6f2bdacefbda904a81d5fdd26d77f8700000000000000000000000000000000000000"
// This coinbase transaction was pulled from block 797905, whose
// little-endian encoding is 0xD12C0C00. Replace it with the block
// number we want.
fakeCoinbase = strings.Replace(fakeCoinbase, "d12c0c00",
fmt.Sprintf("%02x", curHeight&0xFF)+
fmt.Sprintf("%02x", (curHeight>>8)&0xFF)+
fmt.Sprintf("%02x", (curHeight>>16)&0xFF)+
fmt.Sprintf("%02x", (curHeight>>24)&0xFF), 1)
var numTransactions uint = 1 // coinbase
allTransactionsHex := ""
for scan.Scan() { // each line (hex-encoded transaction)
allTransactionsHex += scan.Text()
numTransactions++
}
if err = scan.Err(); err != nil {
panic("line too long!")
}
if numTransactions > 65535 {
panic(fmt.Sprint("too many transactions ", numTransactions,
" maximum 65535"))
}
hashOfTxnsAndHeight := sha256.Sum256([]byte(allTransactionsHex + "#" + string(rune(curHeight))))
// These fields do not need to be valid for the lightwalletd/wallet stack to work.
// The lightwalletd/wallet stack rely on the miners to validate these.
// Make the block header depend on height + all transactions (in an incorrect way)
blockHeader := &parser.BlockHeader{
RawBlockHeader: &parser.RawBlockHeader{
Version: 4,
HashPrevBlock: prevhash,
HashMerkleRoot: hashOfTxnsAndHeight[:],
HashFinalSaplingRoot: make([]byte, 32),
Time: 1,
NBitsBytes: make([]byte, 4),
Nonce: make([]byte, 32),
Solution: make([]byte, 1344),
},
}
headerBytes, err := blockHeader.MarshalBinary()
if err != nil {
panic(fmt.Sprint("Cannot marshal block header: ", err))
}
fmt.Print(hex.EncodeToString(headerBytes))
// After the header, there's a compactsize representation of the number of transactions.
if numTransactions < 253 {
fmt.Printf("%02x", numTransactions)
} else {
fmt.Printf("%02x%02x%02x", 253, numTransactions%256, numTransactions/256)
}
fmt.Printf("%s%s\n", fakeCoinbase, allTransactionsHex)
curHeight++
prevhash = blockHeader.GetEncodableHash()
}
}

36
testtools/zap/main.go Normal file
View File

@ -0,0 +1,36 @@
// This program increments a given byte of a given file,
// to test data corruption detection -- BE CAREFUL!
package main
import (
"fmt"
"os"
"strconv"
)
func main() {
if len(os.Args) != 3 {
fmt.Println("usage:", os.Args[0], "file offset")
os.Exit(1)
}
f, err := os.OpenFile(os.Args[1], os.O_RDWR, 0644)
if err != nil {
fmt.Println("open failed:", err)
os.Exit(1)
}
offset, err := strconv.ParseInt(os.Args[2], 10, 64)
if err != nil {
fmt.Println("bad offset:", err)
os.Exit(1)
}
b := make([]byte, 1)
if n, err := f.ReadAt(b, offset); err != nil || n != 1 {
fmt.Println("read failed:", n, err)
os.Exit(1)
}
b[0] += 1
if n, err := f.WriteAt(b, offset); err != nil || n != 1 {
fmt.Println("read failed:", n, err)
os.Exit(1)
}
}

10
utils/pullblocks.sh Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
# Usage: ./pullblocks.sh 500000 500100 > blocks.txt
test $# -ne 2 && { echo usage: $0 start end;exit 1;}
let i=$1
while test $i -le $2
do
zcash-cli getblock $i 0
let i++
done

16
utils/submitblocks.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
# Submits a list of blocks, one per line in the file, to darksidewalletd.
# Usage: ./submitblocks.sh <sapling activation> <file>
# e.g. ./submitblocks.sh 1000 blocks.txt
#
set -e
test $# -ne 2 && { echo usage: $0 sapling-height blocks-file;exit 1;}
# must do a Reset first
grpcurl -plaintext -d '{"saplingActivation":'$1',"branchID":"2bb40e60","chainName":"main"}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/Reset
# send the blocks and make them active
sed 's/^/{"block":"/;s/$/"}/' $2 |
grpcurl -plaintext -d @ localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/StageBlocksStream
let latest=$1+$(cat $2|wc -l)-1
grpcurl -plaintext -d '{"height":'$latest'}' localhost:9067 cash.z.wallet.sdk.rpc.DarksideStreamer/ApplyStaged

20
vendor/github.com/beorn7/perks/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt generated vendored Normal file

File diff suppressed because it is too large Load Diff

316
vendor/github.com/beorn7/perks/quantile/stream.go generated vendored Normal file
View File

@ -0,0 +1,316 @@
// Package quantile computes approximate quantiles over an unbounded data
// stream within low memory and CPU bounds.
//
// A small amount of accuracy is traded to achieve the above properties.
//
// Multiple streams can be merged before calling Query to generate a single set
// of results. This is meaningful when the streams represent the same type of
// data. See Merge and Samples.
//
// For more detailed information about the algorithm used, see:
//
// Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile
import (
"math"
"sort"
)
// Sample holds an observed value and meta information for compression. JSON
// tags have been added for convenience.
type Sample struct {
Value float64 `json:",string"`
Width float64 `json:",string"`
Delta float64 `json:",string"`
}
// Samples represents a slice of samples. It implements sort.Interface.
type Samples []Sample
func (a Samples) Len() int { return len(a) }
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type invariant func(s *stream, r float64) float64
// NewLowBiased returns an initialized Stream for low-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the lower ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewLowBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * r
}
return newStream(ƒ)
}
// NewHighBiased returns an initialized Stream for high-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the higher ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewHighBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * (s.n - r)
}
return newStream(ƒ)
}
// NewTargeted returns an initialized Stream concerned with a particular set of
// quantile values that are supplied a priori. Knowing these a priori reduces
// space and computation time. The targets map maps the desired quantiles to
// their absolute errors, i.e. the true quantile of a value returned by a query
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
func NewTargeted(targetMap map[float64]float64) *Stream {
// Convert map to slice to avoid slow iterations on a map.
// ƒ is called on the hot path, so converting the map to a slice
// beforehand results in significant CPU savings.
targets := targetMapToSlice(targetMap)
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
for _, t := range targets {
if t.quantile*s.n <= r {
f = (2 * t.epsilon * r) / t.quantile
} else {
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
}
if f < m {
m = f
}
}
return m
}
return newStream(ƒ)
}
type target struct {
quantile float64
epsilon float64
}
func targetMapToSlice(targetMap map[float64]float64) []target {
targets := make([]target, 0, len(targetMap))
for quantile, epsilon := range targetMap {
t := target{
quantile: quantile,
epsilon: epsilon,
}
targets = append(targets, t)
}
return targets
}
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
*stream
b Samples
sorted bool
}
func newStream(ƒ invariant) *Stream {
x := &stream{ƒ: ƒ}
return &Stream{x, make(Samples, 0, 500), true}
}
// Insert inserts v into the stream.
func (s *Stream) Insert(v float64) {
s.insert(Sample{Value: v, Width: 1})
}
func (s *Stream) insert(sample Sample) {
s.b = append(s.b, sample)
s.sorted = false
if len(s.b) == cap(s.b) {
s.flush()
}
}
// Query returns the computed qth percentiles value. If s was created with
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
// will return an unspecified result.
func (s *Stream) Query(q float64) float64 {
if !s.flushed() {
// Fast path when there hasn't been enough data for a flush;
// this also yields better accuracy for small sets of data.
l := len(s.b)
if l == 0 {
return 0
}
i := int(math.Ceil(float64(l) * q))
if i > 0 {
i -= 1
}
s.maybeSort()
return s.b[i].Value
}
s.flush()
return s.stream.query(q)
}
// Merge merges samples into the underlying streams samples. This is handy when
// merging multiple streams from separate threads, database shards, etc.
//
// ATTENTION: This method is broken and does not yield correct results. The
// underlying algorithm is not capable of merging streams correctly.
func (s *Stream) Merge(samples Samples) {
sort.Sort(samples)
s.stream.merge(samples)
}
// Reset reinitializes and clears the list reusing the samples buffer memory.
func (s *Stream) Reset() {
s.stream.reset()
s.b = s.b[:0]
}
// Samples returns stream samples held by s.
func (s *Stream) Samples() Samples {
if !s.flushed() {
return s.b
}
s.flush()
return s.stream.samples()
}
// Count returns the total number of samples observed in the stream
// since initialization.
func (s *Stream) Count() int {
return len(s.b) + s.stream.count()
}
func (s *Stream) flush() {
s.maybeSort()
s.stream.merge(s.b)
s.b = s.b[:0]
}
func (s *Stream) maybeSort() {
if !s.sorted {
s.sorted = true
sort.Sort(s.b)
}
}
func (s *Stream) flushed() bool {
return len(s.stream.l) > 0
}
type stream struct {
n float64
l []Sample
ƒ invariant
}
func (s *stream) reset() {
s.l = s.l[:0]
s.n = 0
}
func (s *stream) insert(v float64) {
s.merge(Samples{{v, 1, 0}})
}
func (s *stream) merge(samples Samples) {
// TODO(beorn7): This tries to merge not only individual samples, but
// whole summaries. The paper doesn't mention merging summaries at
// all. Unittests show that the merging is inaccurate. Find out how to
// do merges properly.
var r float64
i := 0
for _, sample := range samples {
for ; i < len(s.l); i++ {
c := s.l[i]
if c.Value > sample.Value {
// Insert at position i.
s.l = append(s.l, Sample{})
copy(s.l[i+1:], s.l[i:])
s.l[i] = Sample{
sample.Value,
sample.Width,
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
// TODO(beorn7): How to calculate delta correctly?
}
i++
goto inserted
}
r += c.Width
}
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
i++
inserted:
s.n += sample.Width
r += sample.Width
}
s.compress()
}
func (s *stream) count() int {
return int(s.n)
}
func (s *stream) query(q float64) float64 {
t := math.Ceil(q * s.n)
t += math.Ceil(s.ƒ(s, t) / 2)
p := s.l[0]
var r float64
for _, c := range s.l[1:] {
r += p.Width
if r+c.Width+c.Delta > t {
return p.Value
}
p = c
}
return p.Value
}
func (s *stream) compress() {
if len(s.l) < 2 {
return
}
x := s.l[len(s.l)-1]
xi := len(s.l) - 1
r := s.n - 1 - x.Width
for i := len(s.l) - 2; i >= 0; i-- {
c := s.l[i]
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
x.Width += c.Width
s.l[xi] = x
// Remove element at i.
copy(s.l[i:], s.l[i+1:])
s.l = s.l[:len(s.l)-1]
xi -= 1
} else {
x = c
xi = i
}
r -= c.Width
}
}
func (s *stream) samples() Samples {
samples := make(Samples, len(s.l))
copy(samples, s.l)
return samples
}

View File

@ -36,10 +36,17 @@ var (
// interface from crypto/elliptic. // interface from crypto/elliptic.
type KoblitzCurve struct { type KoblitzCurve struct {
*elliptic.CurveParams *elliptic.CurveParams
q *big.Int
// q is the value (P+1)/4 used to compute the square root of field
// elements.
q *big.Int
H int // cofactor of the curve. H int // cofactor of the curve.
halfOrder *big.Int // half the order N halfOrder *big.Int // half the order N
// fieldB is the constant B of the curve as a fieldVal.
fieldB *fieldVal
// byteSize is simply the bit size / 8 and is provided for convenience // byteSize is simply the bit size / 8 and is provided for convenience
// since it is calculated repeatedly. // since it is calculated repeatedly.
byteSize int byteSize int
@ -879,12 +886,22 @@ func (curve *KoblitzCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
return curve.fieldJacobianToBigAffine(qx, qy, qz) return curve.fieldJacobianToBigAffine(qx, qy, qz)
} }
// QPlus1Div4 returns the Q+1/4 constant for the curve for use in calculating // QPlus1Div4 returns the (P+1)/4 constant for the curve for use in calculating
// square roots via exponention. // square roots via exponentiation.
//
// DEPRECATED: The actual value returned is (P+1)/4, where as the original
// method name implies that this value is (((P+1)/4)+1)/4. This method is kept
// to maintain backwards compatibility of the API. Use Q() instead.
func (curve *KoblitzCurve) QPlus1Div4() *big.Int { func (curve *KoblitzCurve) QPlus1Div4() *big.Int {
return curve.q return curve.q
} }
// Q returns the (P+1)/4 constant for the curve for use in calculating square
// roots via exponentiation.
func (curve *KoblitzCurve) Q() *big.Int {
return curve.q
}
var initonce sync.Once var initonce sync.Once
var secp256k1 KoblitzCurve var secp256k1 KoblitzCurve
@ -917,6 +934,7 @@ func initS256() {
big.NewInt(1)), big.NewInt(4)) big.NewInt(1)), big.NewInt(4))
secp256k1.H = 1 secp256k1.H = 1
secp256k1.halfOrder = new(big.Int).Rsh(secp256k1.N, 1) secp256k1.halfOrder = new(big.Int).Rsh(secp256k1.N, 1)
secp256k1.fieldB = new(fieldVal).SetByteSlice(secp256k1.B.Bytes())
// Provided for convenience since this gets computed repeatedly. // Provided for convenience since this gets computed repeatedly.
secp256k1.byteSize = secp256k1.BitSize / 8 secp256k1.byteSize = secp256k1.BitSize / 8

View File

@ -102,6 +102,20 @@ const (
fieldPrimeWordOne = 0x3ffffbf fieldPrimeWordOne = 0x3ffffbf
) )
var (
// fieldQBytes is the value Q = (P+1)/4 for the secp256k1 prime P. This
// value is used to efficiently compute the square root of values in the
// field via exponentiation. The value of Q in hex is:
//
// Q = 3fffffffffffffffffffffffffffffffffffffffffffffffffffffffbfffff0c
fieldQBytes = []byte{
0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x0c,
}
)
// fieldVal implements optimized fixed-precision arithmetic over the // fieldVal implements optimized fixed-precision arithmetic over the
// secp256k1 finite field. This means all arithmetic is performed modulo // secp256k1 finite field. This means all arithmetic is performed modulo
// 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f. It // 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f. It
@ -1221,3 +1235,118 @@ func (f *fieldVal) Inverse() *fieldVal {
f.Square().Square().Square().Square().Square() // f = a^(2^256 - 4294968320) f.Square().Square().Square().Square().Square() // f = a^(2^256 - 4294968320)
return f.Mul(&a45) // f = a^(2^256 - 4294968275) = a^(p-2) return f.Mul(&a45) // f = a^(2^256 - 4294968275) = a^(p-2)
} }
// SqrtVal computes the square root of x modulo the curve's prime, and stores
// the result in f. The square root is computed via exponentiation of x by the
// value Q = (P+1)/4 using the curve's precomputed big-endian representation of
// the Q. This method uses a modified version of square-and-multiply
// exponentiation over secp256k1 fieldVals to operate on bytes instead of bits,
// which offers better performance over both big.Int exponentiation and bit-wise
// square-and-multiply.
//
// NOTE: This method only works when P is intended to be the secp256k1 prime and
// is not constant time. The returned value is of magnitude 1, but is
// denormalized.
func (f *fieldVal) SqrtVal(x *fieldVal) *fieldVal {
// The following computation iteratively computes x^((P+1)/4) = x^Q
// using the recursive, piece-wise definition:
//
// x^n = (x^2)^(n/2) mod P if n is even
// x^n = x(x^2)^(n-1/2) mod P if n is odd
//
// Given n in its big-endian representation b_k, ..., b_0, x^n can be
// computed by defining the sequence r_k+1, ..., r_0, where:
//
// r_k+1 = 1
// r_i = (r_i+1)^2 * x^b_i for i = k, ..., 0
//
// The final value r_0 = x^n.
//
// See https://en.wikipedia.org/wiki/Exponentiation_by_squaring for more
// details.
//
// This can be further optimized, by observing that the value of Q in
// secp256k1 has the value:
//
// Q = 3fffffffffffffffffffffffffffffffffffffffffffffffffffffffbfffff0c
//
// We can unroll the typical bit-wise interpretation of the
// exponentiation algorithm above to instead operate on bytes.
// This reduces the number of comparisons by an order of magnitude,
// reducing the overhead of failed branch predictions and additional
// comparisons in this method.
//
// Since there there are only 4 unique bytes of Q, this keeps the jump
// table small without the need to handle all possible 8-bit values.
// Further, we observe that 29 of the 32 bytes are 0xff; making the
// first case handle 0xff therefore optimizes the hot path.
f.SetInt(1)
for _, b := range fieldQBytes {
switch b {
// Most common case, where all 8 bits are set.
case 0xff:
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
// First byte of Q (0x3f), where all but the top two bits are
// set. Note that this case only applies six operations, since
// the highest bit of Q resides in bit six of the first byte. We
// ignore the first two bits, since squaring for these bits will
// result in an invalid result. We forgo squaring f before the
// first multiply, since 1^2 = 1.
case 0x3f:
f.Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
// Byte 28 of Q (0xbf), where only bit 7 is unset.
case 0xbf:
f.Square().Mul(x)
f.Square()
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
f.Square().Mul(x)
// Byte 31 of Q (0x0c), where only bits 3 and 4 are set.
default:
f.Square()
f.Square()
f.Square()
f.Square()
f.Square().Mul(x)
f.Square().Mul(x)
f.Square()
f.Square()
}
}
return f
}
// Sqrt computes the square root of f modulo the curve's prime, and stores the
// result in f. The square root is computed via exponentiation of x by the value
// Q = (P+1)/4 using the curve's precomputed big-endian representation of the Q.
// This method uses a modified version of square-and-multiply exponentiation
// over secp256k1 fieldVals to operate on bytes instead of bits, which offers
// better performance over both big.Int exponentiation and bit-wise
// square-and-multiply.
//
// NOTE: This method only works when P is intended to be the secp256k1 prime and
// is not constant time. The returned value is of magnitude 1, but is
// denormalized.
func (f *fieldVal) Sqrt() *fieldVal {
return f.SqrtVal(f)
}

View File

@ -1,63 +0,0 @@
// Copyright 2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// This file is ignored during the regular build due to the following build tag.
// It is called by go generate and used to automatically generate pre-computed
// tables used to accelerate operations.
// +build ignore
package main
import (
"bytes"
"compress/zlib"
"encoding/base64"
"fmt"
"log"
"os"
"github.com/btcsuite/btcd/btcec"
)
func main() {
fi, err := os.Create("secp256k1.go")
if err != nil {
log.Fatal(err)
}
defer fi.Close()
// Compress the serialized byte points.
serialized := btcec.S256().SerializedBytePoints()
var compressed bytes.Buffer
w := zlib.NewWriter(&compressed)
if _, err := w.Write(serialized); err != nil {
fmt.Println(err)
os.Exit(1)
}
w.Close()
// Encode the compressed byte points with base64.
encoded := make([]byte, base64.StdEncoding.EncodedLen(compressed.Len()))
base64.StdEncoding.Encode(encoded, compressed.Bytes())
fmt.Fprintln(fi, "// Copyright (c) 2015 The btcsuite developers")
fmt.Fprintln(fi, "// Use of this source code is governed by an ISC")
fmt.Fprintln(fi, "// license that can be found in the LICENSE file.")
fmt.Fprintln(fi)
fmt.Fprintln(fi, "package btcec")
fmt.Fprintln(fi)
fmt.Fprintln(fi, "// Auto-generated file (see genprecomps.go)")
fmt.Fprintln(fi, "// DO NOT EDIT")
fmt.Fprintln(fi)
fmt.Fprintf(fi, "var secp256k1BytePoints = %q\n", string(encoded))
a1, b1, a2, b2 := btcec.S256().EndomorphismVectors()
fmt.Println("The following values are the computed linearly " +
"independent vectors needed to make use of the secp256k1 " +
"endomorphism:")
fmt.Printf("a1: %x\n", a1)
fmt.Printf("b1: %x\n", b1)
fmt.Printf("a2: %x\n", a2)
fmt.Printf("b2: %x\n", b2)
}

View File

@ -22,41 +22,40 @@ func isOdd(a *big.Int) bool {
return a.Bit(0) == 1 return a.Bit(0) == 1
} }
// decompressPoint decompresses a point on the given curve given the X point and // decompressPoint decompresses a point on the secp256k1 curve given the X point and
// the solution to use. // the solution to use.
func decompressPoint(curve *KoblitzCurve, x *big.Int, ybit bool) (*big.Int, error) { func decompressPoint(curve *KoblitzCurve, bigX *big.Int, ybit bool) (*big.Int, error) {
// TODO: This will probably only work for secp256k1 due to var x fieldVal
// optimizations. x.SetByteSlice(bigX.Bytes())
// Y = +-sqrt(x^3 + B) // Compute x^3 + B mod p.
x3 := new(big.Int).Mul(x, x) var x3 fieldVal
x3.Mul(x3, x) x3.SquareVal(&x).Mul(&x)
x3.Add(x3, curve.Params().B) x3.Add(curve.fieldB).Normalize()
x3.Mod(x3, curve.Params().P)
// Now calculate sqrt mod p of x^3 + B // Now calculate sqrt mod p of x^3 + B
// This code used to do a full sqrt based on tonelli/shanks, // This code used to do a full sqrt based on tonelli/shanks,
// but this was replaced by the algorithms referenced in // but this was replaced by the algorithms referenced in
// https://bitcointalk.org/index.php?topic=162805.msg1712294#msg1712294 // https://bitcointalk.org/index.php?topic=162805.msg1712294#msg1712294
y := new(big.Int).Exp(x3, curve.QPlus1Div4(), curve.Params().P) var y fieldVal
y.SqrtVal(&x3).Normalize()
if ybit != isOdd(y) { if ybit != y.IsOdd() {
y.Sub(curve.Params().P, y) y.Negate(1).Normalize()
} }
// Check that y is a square root of x^3 + B. // Check that y is a square root of x^3 + B.
y2 := new(big.Int).Mul(y, y) var y2 fieldVal
y2.Mod(y2, curve.Params().P) y2.SquareVal(&y).Normalize()
if y2.Cmp(x3) != 0 { if !y2.Equals(&x3) {
return nil, fmt.Errorf("invalid square root") return nil, fmt.Errorf("invalid square root")
} }
// Verify that y-coord has expected parity. // Verify that y-coord has expected parity.
if ybit != isOdd(y) { if ybit != y.IsOdd() {
return nil, fmt.Errorf("ybit doesn't match oddness") return nil, fmt.Errorf("ybit doesn't match oddness")
} }
return y, nil return new(big.Int).SetBytes(y.Bytes()[:]), nil
} }
const ( const (
@ -102,6 +101,17 @@ func ParsePubKey(pubKeyStr []byte, curve *KoblitzCurve) (key *PublicKey, err err
if format == pubkeyHybrid && ybit != isOdd(pubkey.Y) { if format == pubkeyHybrid && ybit != isOdd(pubkey.Y) {
return nil, fmt.Errorf("ybit doesn't match oddness") return nil, fmt.Errorf("ybit doesn't match oddness")
} }
if pubkey.X.Cmp(pubkey.Curve.Params().P) >= 0 {
return nil, fmt.Errorf("pubkey X parameter is >= to P")
}
if pubkey.Y.Cmp(pubkey.Curve.Params().P) >= 0 {
return nil, fmt.Errorf("pubkey Y parameter is >= to P")
}
if !pubkey.Curve.IsOnCurve(pubkey.X, pubkey.Y) {
return nil, fmt.Errorf("pubkey isn't on secp256k1 curve")
}
case PubKeyBytesLenCompressed: case PubKeyBytesLenCompressed:
// format is 0x2 | solution, <X coordinate> // format is 0x2 | solution, <X coordinate>
// solution determines which solution of the curve we use. // solution determines which solution of the curve we use.
@ -115,20 +125,12 @@ func ParsePubKey(pubKeyStr []byte, curve *KoblitzCurve) (key *PublicKey, err err
if err != nil { if err != nil {
return nil, err return nil, err
} }
default: // wrong! default: // wrong!
return nil, fmt.Errorf("invalid pub key length %d", return nil, fmt.Errorf("invalid pub key length %d",
len(pubKeyStr)) len(pubKeyStr))
} }
if pubkey.X.Cmp(pubkey.Curve.Params().P) >= 0 {
return nil, fmt.Errorf("pubkey X parameter is >= to P")
}
if pubkey.Y.Cmp(pubkey.Curve.Params().P) >= 0 {
return nil, fmt.Errorf("pubkey Y parameter is >= to P")
}
if !pubkey.Curve.IsOnCurve(pubkey.X, pubkey.Y) {
return nil, fmt.Errorf("pubkey isn't on secp256k1 curve")
}
return &pubkey, nil return &pubkey, nil
} }

View File

@ -85,10 +85,10 @@ func (sig *Signature) IsEqual(otherSig *Signature) bool {
sig.S.Cmp(otherSig.S) == 0 sig.S.Cmp(otherSig.S) == 0
} }
// minSigLen is the minimum length of a DER encoded signature and is // MinSigLen is the minimum length of a DER encoded signature and is when both R
// when both R and S are 1 byte each. // and S are 1 byte each.
// 0x30 + <1-byte> + 0x02 + 0x01 + <byte> + 0x2 + 0x01 + <byte> // 0x30 + <1-byte> + 0x02 + 0x01 + <byte> + 0x2 + 0x01 + <byte>
const minSigLen = 8 const MinSigLen = 8
func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error) { func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error) {
// Originally this code used encoding/asn1 in order to parse the // Originally this code used encoding/asn1 in order to parse the
@ -103,7 +103,7 @@ func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error)
signature := &Signature{} signature := &Signature{}
if len(sigStr) < minSigLen { if len(sigStr) < MinSigLen {
return nil, errors.New("malformed signature: too short") return nil, errors.New("malformed signature: too short")
} }
// 0x30 // 0x30
@ -118,7 +118,7 @@ func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error)
// siglen should be less than the entire message and greater than // siglen should be less than the entire message and greater than
// the minimal message size. // the minimal message size.
if int(siglen+2) > len(sigStr) || int(siglen+2) < minSigLen { if int(siglen+2) > len(sigStr) || int(siglen+2) < MinSigLen {
return nil, errors.New("malformed signature: bad length") return nil, errors.New("malformed signature: bad length")
} }
// trim the slice we're working on so we only look at what matters. // trim the slice we're working on so we only look at what matters.
@ -276,7 +276,7 @@ func hashToInt(hash []byte, c elliptic.Curve) *big.Int {
} }
// recoverKeyFromSignature recovers a public key from the signature "sig" on the // recoverKeyFromSignature recovers a public key from the signature "sig" on the
// given message hash "msg". Based on the algorithm found in section 5.1.5 of // given message hash "msg". Based on the algorithm found in section 4.1.6 of
// SEC 1 Ver 2.0, page 47-48 (53 and 54 in the pdf). This performs the details // SEC 1 Ver 2.0, page 47-48 (53 and 54 in the pdf). This performs the details
// in the inner loop in Step 1. The counter provided is actually the j parameter // in the inner loop in Step 1. The counter provided is actually the j parameter
// of the loop * 2 - on the first iteration of j we do the R case, else the -R // of the loop * 2 - on the first iteration of j we do the R case, else the -R

View File

@ -638,6 +638,7 @@ func NewSearchRawTransactionsCmd(address string, verbose, skip, count *int, vinE
type SendRawTransactionCmd struct { type SendRawTransactionCmd struct {
HexTx string HexTx string
AllowHighFees *bool `jsonrpcdefault:"false"` AllowHighFees *bool `jsonrpcdefault:"false"`
MaxFeeRate *int32
} }
// NewSendRawTransactionCmd returns a new instance which can be used to issue a // NewSendRawTransactionCmd returns a new instance which can be used to issue a
@ -652,6 +653,17 @@ func NewSendRawTransactionCmd(hexTx string, allowHighFees *bool) *SendRawTransac
} }
} }
// NewSendRawTransactionCmd returns a new instance which can be used to issue a
// sendrawtransaction JSON-RPC command to a bitcoind node.
//
// A 0 maxFeeRate indicates that a maximum fee rate won't be enforced.
func NewBitcoindSendRawTransactionCmd(hexTx string, maxFeeRate int32) *SendRawTransactionCmd {
return &SendRawTransactionCmd{
HexTx: hexTx,
MaxFeeRate: &maxFeeRate,
}
}
// SetGenerateCmd defines the setgenerate JSON-RPC command. // SetGenerateCmd defines the setgenerate JSON-RPC command.
type SetGenerateCmd struct { type SetGenerateCmd struct {
Generate bool Generate bool

View File

@ -90,28 +90,61 @@ type SoftForkDescription struct {
// Bip9SoftForkDescription describes the current state of a defined BIP0009 // Bip9SoftForkDescription describes the current state of a defined BIP0009
// version bits soft-fork. // version bits soft-fork.
type Bip9SoftForkDescription struct { type Bip9SoftForkDescription struct {
Status string `json:"status"` Status string `json:"status"`
Bit uint8 `json:"bit"` Bit uint8 `json:"bit"`
StartTime int64 `json:"startTime"` StartTime1 int64 `json:"startTime"`
Timeout int64 `json:"timeout"` StartTime2 int64 `json:"start_time"`
Since int32 `json:"since"` Timeout int64 `json:"timeout"`
Since int32 `json:"since"`
}
// StartTime returns the starting time of the softfork as a Unix epoch.
func (d *Bip9SoftForkDescription) StartTime() int64 {
if d.StartTime1 != 0 {
return d.StartTime1
}
return d.StartTime2
}
// SoftForks describes the current softforks enabled by the backend. Softforks
// activated through BIP9 are grouped together separate from any other softforks
// with different activation types.
type SoftForks struct {
SoftForks []*SoftForkDescription `json:"softforks"`
Bip9SoftForks map[string]*Bip9SoftForkDescription `json:"bip9_softforks"`
}
// UnifiedSoftForks describes a softforks in a general manner, irrespective of
// its activation type. This was a format introduced by bitcoind v0.19.0
type UnifiedSoftFork struct {
Type string `json:"type"`
BIP9SoftForkDescription *Bip9SoftForkDescription `json:"bip9"`
Height int32 `json:"height"`
Active bool `json:"active"`
}
// UnifiedSoftForks describes the current softforks enabled the by the backend
// in a unified manner, i.e, softforks with different activation types are
// grouped together. This was a format introduced by bitcoind v0.19.0
type UnifiedSoftForks struct {
SoftForks map[string]*UnifiedSoftFork `json:"softforks"`
} }
// GetBlockChainInfoResult models the data returned from the getblockchaininfo // GetBlockChainInfoResult models the data returned from the getblockchaininfo
// command. // command.
type GetBlockChainInfoResult struct { type GetBlockChainInfoResult struct {
Chain string `json:"chain"` Chain string `json:"chain"`
Blocks int32 `json:"blocks"` Blocks int32 `json:"blocks"`
Headers int32 `json:"headers"` Headers int32 `json:"headers"`
BestBlockHash string `json:"bestblockhash"` BestBlockHash string `json:"bestblockhash"`
Difficulty float64 `json:"difficulty"` Difficulty float64 `json:"difficulty"`
MedianTime int64 `json:"mediantime"` MedianTime int64 `json:"mediantime"`
VerificationProgress float64 `json:"verificationprogress,omitempty"` VerificationProgress float64 `json:"verificationprogress,omitempty"`
Pruned bool `json:"pruned"` Pruned bool `json:"pruned"`
PruneHeight int32 `json:"pruneheight,omitempty"` PruneHeight int32 `json:"pruneheight,omitempty"`
ChainWork string `json:"chainwork,omitempty"` ChainWork string `json:"chainwork,omitempty"`
SoftForks []*SoftForkDescription `json:"softforks"` *SoftForks
Bip9SoftForks map[string]*Bip9SoftForkDescription `json:"bip9_softforks"` *UnifiedSoftForks
} }
// GetBlockTemplateResultTx models the transactions field of the // GetBlockTemplateResultTx models the transactions field of the
@ -265,6 +298,7 @@ type GetPeerInfoResult struct {
type GetRawMempoolVerboseResult struct { type GetRawMempoolVerboseResult struct {
Size int32 `json:"size"` Size int32 `json:"size"`
Vsize int32 `json:"vsize"` Vsize int32 `json:"vsize"`
Weight int32 `json:"weight"`
Fee float64 `json:"fee"` Fee float64 `json:"fee"`
Time int64 `json:"time"` Time int64 `json:"time"`
Height int64 `json:"height"` Height int64 `json:"height"`
@ -505,6 +539,7 @@ type TxRawResult struct {
Hash string `json:"hash,omitempty"` Hash string `json:"hash,omitempty"`
Size int32 `json:"size,omitempty"` Size int32 `json:"size,omitempty"`
Vsize int32 `json:"vsize,omitempty"` Vsize int32 `json:"vsize,omitempty"`
Weight int32 `json:"weight,omitempty"`
Version int32 `json:"version"` Version int32 `json:"version"`
LockTime uint32 `json:"locktime"` LockTime uint32 `json:"locktime"`
Vin []Vin `json:"vin"` Vin []Vin `json:"vin"`
@ -523,6 +558,7 @@ type SearchRawTransactionsResult struct {
Hash string `json:"hash"` Hash string `json:"hash"`
Size string `json:"size"` Size string `json:"size"`
Vsize string `json:"vsize"` Vsize string `json:"vsize"`
Weight string `json:"weight"`
Version int32 `json:"version"` Version int32 `json:"version"`
LockTime uint32 `json:"locktime"` LockTime uint32 `json:"locktime"`
Vin []VinPrevOut `json:"vin"` Vin []VinPrevOut `json:"vin"`

View File

@ -76,6 +76,9 @@ const (
ErrRPCInvalidTxVout RPCErrorCode = -5 ErrRPCInvalidTxVout RPCErrorCode = -5
ErrRPCRawTxString RPCErrorCode = -32602 ErrRPCRawTxString RPCErrorCode = -32602
ErrRPCDecodeHexString RPCErrorCode = -22 ErrRPCDecodeHexString RPCErrorCode = -22
ErrRPCTxError RPCErrorCode = -25
ErrRPCTxRejected RPCErrorCode = -26
ErrRPCTxAlreadyInChain RPCErrorCode = -27
) )
// Errors that are specific to btcd. // Errors that are specific to btcd.

View File

@ -272,6 +272,13 @@ var MainNetParams = Params{
{343185, newHashFromStr("0000000000000000072b8bf361d01a6ba7d445dd024203fafc78768ed4368554")}, {343185, newHashFromStr("0000000000000000072b8bf361d01a6ba7d445dd024203fafc78768ed4368554")},
{352940, newHashFromStr("000000000000000010755df42dba556bb72be6a32f3ce0b6941ce4430152c9ff")}, {352940, newHashFromStr("000000000000000010755df42dba556bb72be6a32f3ce0b6941ce4430152c9ff")},
{382320, newHashFromStr("00000000000000000a8dc6ed5b133d0eb2fd6af56203e4159789b092defd8ab2")}, {382320, newHashFromStr("00000000000000000a8dc6ed5b133d0eb2fd6af56203e4159789b092defd8ab2")},
{400000, newHashFromStr("000000000000000004ec466ce4732fe6f1ed1cddc2ed4b328fff5224276e3f6f")},
{430000, newHashFromStr("000000000000000001868b2bb3a285f3cc6b33ea234eb70facf4dcdf22186b87")},
{460000, newHashFromStr("000000000000000000ef751bbce8e744ad303c47ece06c8d863e4d417efc258c")},
{490000, newHashFromStr("000000000000000000de069137b17b8d5a3dfbd5b145b2dcfb203f15d0c4de90")},
{520000, newHashFromStr("0000000000000000000d26984c0229c9f6962dc74db0a6d525f2f1640396f69c")},
{550000, newHashFromStr("000000000000000000223b7a2298fb1c6c75fb0efc28a4c56853ff4112ec6bc9")},
{560000, newHashFromStr("0000000000000000002c7b276daf6efb2b6aa68e2ce3be67ef925b3264ae7122")},
}, },
// Consensus rule change deployments. // Consensus rule change deployments.
@ -439,6 +446,9 @@ var TestNet3Params = Params{
{800010, newHashFromStr("000000000017ed35296433190b6829db01e657d80631d43f5983fa403bfdb4c1")}, {800010, newHashFromStr("000000000017ed35296433190b6829db01e657d80631d43f5983fa403bfdb4c1")},
{900000, newHashFromStr("0000000000356f8d8924556e765b7a94aaebc6b5c8685dcfa2b1ee8b41acd89b")}, {900000, newHashFromStr("0000000000356f8d8924556e765b7a94aaebc6b5c8685dcfa2b1ee8b41acd89b")},
{1000007, newHashFromStr("00000000001ccb893d8a1f25b70ad173ce955e5f50124261bbbc50379a612ddf")}, {1000007, newHashFromStr("00000000001ccb893d8a1f25b70ad173ce955e5f50124261bbbc50379a612ddf")},
{1100007, newHashFromStr("00000000000abc7b2cd18768ab3dee20857326a818d1946ed6796f42d66dd1e8")},
{1200007, newHashFromStr("00000000000004f2dc41845771909db57e04191714ed8c963f7e56713a7b6cea")},
{1300007, newHashFromStr("0000000072eab69d54df75107c052b26b0395b44f77578184293bf1bb1dbd9fa")},
}, },
// Consensus rule change deployments. // Consensus rule change deployments.

View File

@ -253,16 +253,15 @@ func (c *Client) GetDifficulty() (float64, error) {
// FutureGetBlockChainInfoResult is a promise to deliver the result of a // FutureGetBlockChainInfoResult is a promise to deliver the result of a
// GetBlockChainInfoAsync RPC invocation (or an applicable error). // GetBlockChainInfoAsync RPC invocation (or an applicable error).
type FutureGetBlockChainInfoResult chan *response type FutureGetBlockChainInfoResult struct {
client *Client
// Receive waits for the response promised by the future and returns chain info Response chan *response
// result provided by the server. }
func (r FutureGetBlockChainInfoResult) Receive() (*btcjson.GetBlockChainInfoResult, error) {
res, err := receiveFuture(r)
if err != nil {
return nil, err
}
// unmarshalPartialGetBlockChainInfoResult unmarshals the response into an
// instance of GetBlockChainInfoResult without populating the SoftForks and
// UnifiedSoftForks fields.
func unmarshalPartialGetBlockChainInfoResult(res []byte) (*btcjson.GetBlockChainInfoResult, error) {
var chainInfo btcjson.GetBlockChainInfoResult var chainInfo btcjson.GetBlockChainInfoResult
if err := json.Unmarshal(res, &chainInfo); err != nil { if err := json.Unmarshal(res, &chainInfo); err != nil {
return nil, err return nil, err
@ -270,6 +269,59 @@ func (r FutureGetBlockChainInfoResult) Receive() (*btcjson.GetBlockChainInfoResu
return &chainInfo, nil return &chainInfo, nil
} }
// unmarshalGetBlockChainInfoResultSoftForks properly unmarshals the softforks
// related fields into the GetBlockChainInfoResult instance.
func unmarshalGetBlockChainInfoResultSoftForks(chainInfo *btcjson.GetBlockChainInfoResult,
version BackendVersion, res []byte) error {
switch version {
// Versions of bitcoind on or after v0.19.0 use the unified format.
case BitcoindPost19:
var softForks btcjson.UnifiedSoftForks
if err := json.Unmarshal(res, &softForks); err != nil {
return err
}
chainInfo.UnifiedSoftForks = &softForks
// All other versions use the original format.
default:
var softForks btcjson.SoftForks
if err := json.Unmarshal(res, &softForks); err != nil {
return err
}
chainInfo.SoftForks = &softForks
}
return nil
}
// Receive waits for the response promised by the future and returns chain info
// result provided by the server.
func (r FutureGetBlockChainInfoResult) Receive() (*btcjson.GetBlockChainInfoResult, error) {
res, err := receiveFuture(r.Response)
if err != nil {
return nil, err
}
chainInfo, err := unmarshalPartialGetBlockChainInfoResult(res)
if err != nil {
return nil, err
}
// Inspect the version to determine how we'll need to parse the
// softforks from the response.
version, err := r.client.BackendVersion()
if err != nil {
return nil, err
}
err = unmarshalGetBlockChainInfoResultSoftForks(chainInfo, version, res)
if err != nil {
return nil, err
}
return chainInfo, nil
}
// GetBlockChainInfoAsync returns an instance of a type that can be used to get // GetBlockChainInfoAsync returns an instance of a type that can be used to get
// the result of the RPC at some future time by invoking the Receive function // the result of the RPC at some future time by invoking the Receive function
// on the returned instance. // on the returned instance.
@ -277,7 +329,10 @@ func (r FutureGetBlockChainInfoResult) Receive() (*btcjson.GetBlockChainInfoResu
// See GetBlockChainInfo for the blocking version and more details. // See GetBlockChainInfo for the blocking version and more details.
func (c *Client) GetBlockChainInfoAsync() FutureGetBlockChainInfoResult { func (c *Client) GetBlockChainInfoAsync() FutureGetBlockChainInfoResult {
cmd := btcjson.NewGetBlockChainInfoCmd() cmd := btcjson.NewGetBlockChainInfoCmd()
return c.sendCmd(cmd) return FutureGetBlockChainInfoResult{
client: c,
Response: c.sendCmd(cmd),
}
} }
// GetBlockChainInfo returns information related to the processing state of // GetBlockChainInfo returns information related to the processing state of

View File

@ -19,6 +19,7 @@ import (
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
"strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -103,6 +104,22 @@ type jsonRequest struct {
responseChan chan *response responseChan chan *response
} }
// BackendVersion represents the version of the backend the client is currently
// connected to.
type BackendVersion uint8
const (
// BitcoindPre19 represents a bitcoind version before 0.19.0.
BitcoindPre19 BackendVersion = iota
// BitcoindPost19 represents a bitcoind version equal to or greater than
// 0.19.0.
BitcoindPost19
// Btcd represents a catch-all btcd version.
Btcd
)
// Client represents a Bitcoin RPC client which allows easy access to the // Client represents a Bitcoin RPC client which allows easy access to the
// various RPC methods available on a Bitcoin RPC server. Each of the wrapper // various RPC methods available on a Bitcoin RPC server. Each of the wrapper
// functions handle the details of converting the passed and return types to and // functions handle the details of converting the passed and return types to and
@ -129,6 +146,11 @@ type Client struct {
// POST mode. // POST mode.
httpClient *http.Client httpClient *http.Client
// backendVersion is the version of the backend the client is currently
// connected to. This should be retrieved through GetVersion.
backendVersionMu sync.Mutex
backendVersion *BackendVersion
// mtx is a mutex to protect access to connection related fields. // mtx is a mutex to protect access to connection related fields.
mtx sync.Mutex mtx sync.Mutex
@ -659,6 +681,12 @@ out:
log.Infof("Reestablished connection to RPC server %s", log.Infof("Reestablished connection to RPC server %s",
c.config.Host) c.config.Host)
// Reset the version in case the backend was
// disconnected due to an upgrade.
c.backendVersionMu.Lock()
c.backendVersion = nil
c.backendVersionMu.Unlock()
// Reset the connection state and signal the reconnect // Reset the connection state and signal the reconnect
// has happened. // has happened.
c.wsConn = wsConn c.wsConn = wsConn
@ -1332,3 +1360,84 @@ func (c *Client) Connect(tries int) error {
// All connection attempts failed, so return the last error. // All connection attempts failed, so return the last error.
return err return err
} }
const (
// bitcoind19Str is the string representation of bitcoind v0.19.0.
bitcoind19Str = "0.19.0"
// bitcoindVersionPrefix specifies the prefix included in every bitcoind
// version exposed through GetNetworkInfo.
bitcoindVersionPrefix = "/Satoshi:"
// bitcoindVersionSuffix specifies the suffix included in every bitcoind
// version exposed through GetNetworkInfo.
bitcoindVersionSuffix = "/"
)
// parseBitcoindVersion parses the bitcoind version from its string
// representation.
func parseBitcoindVersion(version string) BackendVersion {
// Trim the version of its prefix and suffix to determine the
// appropriate version number.
version = strings.TrimPrefix(
strings.TrimSuffix(version, bitcoindVersionSuffix),
bitcoindVersionPrefix,
)
switch {
case version < bitcoind19Str:
return BitcoindPre19
default:
return BitcoindPost19
}
}
// BackendVersion retrieves the version of the backend the client is currently
// connected to.
func (c *Client) BackendVersion() (BackendVersion, error) {
c.backendVersionMu.Lock()
defer c.backendVersionMu.Unlock()
if c.backendVersion != nil {
return *c.backendVersion, nil
}
// We'll start by calling GetInfo. This method doesn't exist for
// bitcoind nodes as of v0.16.0, so we'll assume the client is connected
// to a btcd backend if it does exist.
info, err := c.GetInfo()
switch err := err.(type) {
// Parse the btcd version and cache it.
case nil:
log.Debugf("Detected btcd version: %v", info.Version)
version := Btcd
c.backendVersion = &version
return *c.backendVersion, nil
// Inspect the RPC error to ensure the method was not found, otherwise
// we actually ran into an error.
case *btcjson.RPCError:
if err.Code != btcjson.ErrRPCMethodNotFound.Code {
return 0, fmt.Errorf("unable to detect btcd version: "+
"%v", err)
}
default:
return 0, fmt.Errorf("unable to detect btcd version: %v", err)
}
// Since the GetInfo method was not found, we assume the client is
// connected to a bitcoind backend, which exposes its version through
// GetNetworkInfo.
networkInfo, err := c.GetNetworkInfo()
if err != nil {
return 0, fmt.Errorf("unable to detect bitcoind version: %v", err)
}
// Parse the bitcoind version and cache it.
log.Debugf("Detected bitcoind version: %v", networkInfo.SubVersion)
version := parseBitcoindVersion(networkInfo.SubVersion)
c.backendVersion = &version
return *c.backendVersion, nil
}

View File

@ -244,6 +244,43 @@ func (c *Client) Ping() error {
return c.PingAsync().Receive() return c.PingAsync().Receive()
} }
// FutureGetNetworkInfoResult is a future promise to deliver the result of a
// GetNetworkInfoAsync RPC invocation (or an applicable error).
type FutureGetNetworkInfoResult chan *response
// Receive waits for the response promised by the future and returns data about
// the current network.
func (r FutureGetNetworkInfoResult) Receive() (*btcjson.GetNetworkInfoResult, error) {
res, err := receiveFuture(r)
if err != nil {
return nil, err
}
// Unmarshal result as an array of getpeerinfo result objects.
var networkInfo btcjson.GetNetworkInfoResult
err = json.Unmarshal(res, &networkInfo)
if err != nil {
return nil, err
}
return &networkInfo, nil
}
// GetNetworkInfoAsync returns an instance of a type that can be used to get the
// result of the RPC at some future time by invoking the Receive function on the
// returned instance.
//
// See GetNetworkInfo for the blocking version and more details.
func (c *Client) GetNetworkInfoAsync() FutureGetNetworkInfoResult {
cmd := btcjson.NewGetNetworkInfoCmd()
return c.sendCmd(cmd)
}
// GetNetworkInfo returns data about the current network.
func (c *Client) GetNetworkInfo() (*btcjson.GetNetworkInfoResult, error) {
return c.GetNetworkInfoAsync().Receive()
}
// FutureGetPeerInfoResult is a future promise to deliver the result of a // FutureGetPeerInfoResult is a future promise to deliver the result of a
// GetPeerInfoAsync RPC invocation (or an applicable error). // GetPeerInfoAsync RPC invocation (or an applicable error).
type FutureGetPeerInfoResult chan *response type FutureGetPeerInfoResult chan *response

View File

@ -15,6 +15,12 @@ import (
"github.com/btcsuite/btcutil" "github.com/btcsuite/btcutil"
) )
const (
// defaultMaxFeeRate is the default maximum fee rate in sat/KB enforced
// by bitcoind v0.19.0 or after for transaction broadcast.
defaultMaxFeeRate = btcutil.SatoshiPerBitcoin / 10
)
// SigHashType enumerates the available signature hashing types that the // SigHashType enumerates the available signature hashing types that the
// SignRawTransaction function accepts. // SignRawTransaction function accepts.
type SigHashType string type SigHashType string
@ -296,7 +302,31 @@ func (c *Client) SendRawTransactionAsync(tx *wire.MsgTx, allowHighFees bool) Fut
txHex = hex.EncodeToString(buf.Bytes()) txHex = hex.EncodeToString(buf.Bytes())
} }
cmd := btcjson.NewSendRawTransactionCmd(txHex, &allowHighFees) // Due to differences in the sendrawtransaction API for different
// backends, we'll need to inspect our version and construct the
// appropriate request.
version, err := c.BackendVersion()
if err != nil {
return newFutureError(err)
}
var cmd *btcjson.SendRawTransactionCmd
switch version {
// Starting from bitcoind v0.19.0, the MaxFeeRate field should be used.
case BitcoindPost19:
// Using a 0 MaxFeeRate is interpreted as a maximum fee rate not
// being enforced by bitcoind.
var maxFeeRate int32
if !allowHighFees {
maxFeeRate = defaultMaxFeeRate
}
cmd = btcjson.NewBitcoindSendRawTransactionCmd(txHex, maxFeeRate)
// Otherwise, use the AllowHighFees field.
default:
cmd = btcjson.NewSendRawTransactionCmd(txHex, &allowHighFees)
}
return c.sendCmd(cmd) return c.sendCmd(cmd)
} }

View File

@ -321,9 +321,13 @@ func WriteMessageWithEncodingN(w io.Writer, msg Message, pver uint32,
return totalBytes, err return totalBytes, err
} }
// Write payload. // Only write the payload if there is one, e.g., verack messages don't
n, err = w.Write(payload) // have one.
totalBytes += n if len(payload) > 0 {
n, err = w.Write(payload)
totalBytes += n
}
return totalBytes, err return totalBytes, err
} }

View File

@ -188,8 +188,8 @@ func DecodeAddress(addr string, defaultNet *chaincfg.Params) (Address, error) {
} }
switch len(decoded) { switch len(decoded) {
case ripemd160.Size: // P2PKH or P2SH case ripemd160.Size: // P2PKH or P2SH
isP2PKH := chaincfg.IsPubKeyHashAddrID(netID) isP2PKH := netID == defaultNet.PubKeyHashAddrID
isP2SH := chaincfg.IsScriptHashAddrID(netID) isP2SH := netID == defaultNet.ScriptHashAddrID
switch hash160 := decoded; { switch hash160 := decoded; {
case isP2PKH && isP2SH: case isP2PKH && isP2SH:
return nil, ErrAddressCollision return nil, ErrAddressCollision

View File

@ -1,79 +0,0 @@
// Copyright (c) 2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
//+build ignore
package main
import (
"bytes"
"io"
"log"
"os"
"strconv"
)
var (
start = []byte(`// Copyright (c) 2015 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
// AUTOGENERATED by genalphabet.go; do not edit.
package base58
const (
// alphabet is the modified base58 alphabet used by Bitcoin.
alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
alphabetIdx0 = '1'
)
var b58 = [256]byte{`)
end = []byte(`}`)
alphabet = []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz")
tab = []byte("\t")
invalid = []byte("255")
comma = []byte(",")
space = []byte(" ")
nl = []byte("\n")
)
func write(w io.Writer, b []byte) {
_, err := w.Write(b)
if err != nil {
log.Fatal(err)
}
}
func main() {
fi, err := os.Create("alphabet.go")
if err != nil {
log.Fatal(err)
}
defer fi.Close()
write(fi, start)
write(fi, nl)
for i := byte(0); i < 32; i++ {
write(fi, tab)
for j := byte(0); j < 8; j++ {
idx := bytes.IndexByte(alphabet, i*8+j)
if idx == -1 {
write(fi, invalid)
} else {
write(fi, strconv.AppendInt(nil, int64(idx), 10))
}
write(fi, comma)
if j != 7 {
write(fi, space)
}
}
write(fi, nl)
}
write(fi, end)
write(fi, nl)
}

8
vendor/github.com/cespare/xxhash/v2/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,8 @@
language: go
go:
- "1.x"
- master
env:
- TAGS=""
- TAGS="-tags purego"
script: go test $TAGS -v ./...

22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,22 @@
Copyright (c) 2016 Caleb Spare
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

Some files were not shown because too many files have changed in this diff Show More