Merge pull request #178 from StephenButtolph/lets-go

Lets go
This commit is contained in:
Stephen Buttolph 2020-05-25 23:35:46 -04:00 committed by GitHub
commit 60668c3a91
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
55 changed files with 3820 additions and 3317 deletions

6
.ci/runscript_linux.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
set -ev
docker run --rm -v "$PWD:$GECKO_HOME" $DOCKERHUB_REPO:$COMMIT bash "$GECKO_HOME/scripts/build_test.sh"
docker run --rm -v "$PWD:$GECKO_HOME" $DOCKERHUB_REPO:$COMMIT bash "$GECKO_HOME/scripts/build.sh"

9
.ci/runscript_osx.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash
set -ev
go get -d -t -v github.com/ava-labs/gecko/...
cd $GOPATH/src/github.com/ava-labs/gecko
./scripts/build_test.sh
./scripts/build.sh

View File

@ -1,5 +1,13 @@
dist: bionic
language: go
go:
- 1.13.x
jobs:
include:
- os: linux
dist: bionic
- os: osx
osx_image: xcode11.4
services:
- docker
env:
@ -10,7 +18,16 @@ env:
- DOCKERHUB_REPO=avaplatform/gecko
- secure: "L/A9+re0NEKP6EV6H9RcTGiDhX3WMvsiWrkRKDYKqnviqbjY30RK6EM4vvjrM4Lrw2QwsO3YKgnku3+zioE/TxEZFkpkbjNUXru0nYBrWAg1TKVsDXnYaIZkHUejfryST3E8N7F4Hx6zCtGEO0sEdUeKuT+MNUIuHezHooTgGzDjMogm70EWMFjQHc7VucTJu7dWU1RBPjovWQ0q9qflrtCpbrvXFIiihQQ1PQha1Q2C4wLakKuLbhhSafue90Mnyss0blaPHy/tyewcASJu4vsGTKRBn0DzttlkNTwuD6+nKrbmJY0ohunnkVFzYjrZAw1gyN+DCDb/lPbz4ZDItKPwrIUPEtL5xuUOrxUZPUh+0io3Q2d6rjaqkdGjd1KQXzbnW1mn0BxX3d3b2UpIqhBn9umYYjHBKnMuoRiTK33b7U9+LF3K84+tEvVDCPeHs/mw6Inp5jGRSravnM6yPQ6feGzogs4+3EMzZXxnkngKFKCsnd67Oe9xfV9amOU2aQAx4jaAwlPjEpBEkUa8YKx3lPznvmUk1QsNCUbLjdSl5JBaXojLJoiuPbj29hp4S5AXXgn+3Hvwk3ndcFCxi6/l1W9mjYSOtFqg3EAUdF4EgnA/ykQg9ZokkoKY0+qgOzG2bKOAYuCDWeGr7P1apToh00ccsQXL81nVPiq7uDw="
- secure: "zfTm7tJBYiPYrli76d4Ep6Lc2TJQ8Xv//+7OoqTA/aIf6YJDHe05f2GFTWAHG2iOIix/yjwHYwnhyIW66eWPb+Ujejnmh4eXlYZFufX9J5jUpDpbFu/+ybOLgE1Tmr0je0ycneSMe/NAaS74nWU1wnP34/cEE4sYL7TJyhwbeEtgz3cbSWwkpdvHFbXCjSOA196jdIYYUwsnqU9yycAG+2WUSk3DHHzzdtMrh/UOH2r1VFyp5US0zmbW90WkWX+o3TIlzZJgTUGQRNnWKq95Mrh1EQotxgL6CJ8NkfY4bVAGAhusPjdjscJsHxfY93WRMH64TzPYYp0zdibatH0ztyhnZPXVKqv+AIIVTEW+xWv5V18kTQAd1uBW103NFacbgXhIGWtbFcN9g1+ws29HROMclYs7ci6+72Qnq0eL55huqSyFx6+InhYwn+LfJmaBcGW4wx1umdp505M0obZ4ghlyn6b0pDYmqsu1XyBC3mjUTFbwlQmWE2Fize4L5o+DdH4ZDc9japF9ntxIMvO+b3nOicr7tplY2AGp61bB89o3dUAFlN5mDaEJotiAuFk5mo244rY1FjSzyGiKkA3M9TkTIbgcbN098hOJoMCYybH7yqiPwNnZiFvUuYjHuC5D1kIYBWuqqO0iVcbIZn0rV2jyzbVFlhFVk2clTZGhkrY="
before_install: .ci/before_install.sh
install: DOCKER_BUILDKIT=1 docker build --progress plain -t $DOCKERHUB_REPO:$COMMIT .
script: docker run --rm -v "$PWD:$GECKO_HOME" $DOCKERHUB_REPO:$COMMIT bash "$GECKO_HOME/scripts/build_test.sh"
after_success: .ci/after_success.sh
before_install:
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/before_install_linux.sh; fi
install:
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then DOCKER_BUILDKIT=1 docker build --progress plain -t $DOCKERHUB_REPO:$COMMIT . ; fi
script:
- if [ "$TRAVIS_OS_NAME" = "osx" ]; then .ci/runscript_osx.sh; fi
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/runscript_linux.sh; fi
#Need to push to docker hub only from one build
after_success:
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/after_success.sh; fi

View File

@ -2,8 +2,6 @@
FROM golang:1.13.4-buster
RUN apt-get update && apt-get install -y libssl-dev libuv1-dev curl cmake
RUN mkdir -p /go/src/github.com/ava-labs
WORKDIR $GOPATH/src/github.com/ava-labs/

View File

@ -1,4 +1,4 @@
# gecko
# Gecko
## Installation
@ -11,23 +11,6 @@ AVA is an incredibly lightweight protocol, so the minimum computer requirements
### Native Install
Ubuntu users need the following libraries:
* libssl-dev
* libuv1-dev
* cmake
* make
* curl
* g++
Install the libraries:
```sh
sudo apt-get install libssl-dev libuv1-dev cmake make curl g++
```
#### Downloading Gecko Source Code
Clone the Gecko repository:
```sh
@ -43,7 +26,7 @@ Build Gecko using the build script:
./scripts/build.sh
```
The Gecko binary, named `ava`, is in the `build` directory.
The Gecko binary, named `ava`, is in the `build` directory.
### Docker Install
@ -72,10 +55,11 @@ You may see a few warnings. These are OK.
You can use `Ctrl + C` to kill the node.
If you want to specify your log level. You should set `--log-level` to one of the following values, in decreasing order of logging.
* `--log-level=verbo`
* `--log-level=debug`
* `--log-level=info`
* `--log-level=warn`
* `--log-level=error`
* `--log-level=fatal`
* `--log-level=off`
- `--log-level=verbo`
- `--log-level=debug`
- `--log-level=info`
- `--log-level=warn`
- `--log-level=error`
- `--log-level=fatal`
- `--log-level=off`

View File

@ -226,3 +226,17 @@ func (service *Admin) AliasChain(_ *http.Request, args *AliasChainArgs, reply *A
reply.Success = true
return service.httpServer.AddAliasesWithReadLock("bc/"+chainID.String(), "bc/"+args.Alias)
}
// StacktraceArgs are the arguments for calling Stacktrace
type StacktraceArgs struct{}
// StacktraceReply are the results from calling Stacktrace
type StacktraceReply struct {
Stacktrace string `json:"stacktrace"`
}
// Stacktrace returns the current global stacktrace
func (service *Admin) Stacktrace(_ *http.Request, _ *StacktraceArgs, reply *StacktraceReply) error {
reply.Stacktrace = logging.Stacktrace{Global: true}.String()
return nil
}

View File

@ -51,14 +51,14 @@ func (c gosundheitCheck) Name() string { return c.name }
// returning the results
func (c gosundheitCheck) Execute() (interface{}, error) { return c.checkFn() }
// heartbeater provides a getter to the most recently observed heartbeat
type heartbeater interface {
// Heartbeater provides a getter to the most recently observed heartbeat
type Heartbeater interface {
GetHeartbeat() int64
}
// HeartbeatCheckFn returns a CheckFn that checks the given heartbeater has
// pulsed within the given duration
func HeartbeatCheckFn(hb heartbeater, max time.Duration) CheckFn {
func HeartbeatCheckFn(hb Heartbeater, max time.Duration) CheckFn {
return func() (data interface{}, err error) {
// Get the heartbeat and create a data set to return to the caller
hb := hb.GetHeartbeat()

View File

@ -41,7 +41,7 @@ func (h *Health) Handler() *common.HTTPHandler {
// RegisterHeartbeat adds a check with default options and a CheckFn that checks
// the given heartbeater for a recent heartbeat
func (h *Health) RegisterHeartbeat(name string, hb heartbeater, max time.Duration) error {
func (h *Health) RegisterHeartbeat(name string, hb Heartbeater, max time.Duration) error {
return h.RegisterCheckFunc(name, HeartbeatCheckFn(hb, max))
}
@ -73,8 +73,8 @@ type GetLivenessReply struct {
}
// GetLiveness returns a summation of the health of the node
func (service *Health) GetLiveness(_ *http.Request, _ *GetLivenessArgs, reply *GetLivenessReply) error {
service.log.Debug("Health: GetLiveness called")
reply.Checks, reply.Healthy = service.health.Results()
func (h *Health) GetLiveness(_ *http.Request, _ *GetLivenessArgs, reply *GetLivenessReply) error {
h.log.Debug("Health: GetLiveness called")
reply.Checks, reply.Healthy = h.health.Results()
return nil
}

View File

@ -3,9 +3,44 @@
package chains
import "github.com/ava-labs/gecko/snow/networking"
import (
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow"
"github.com/ava-labs/gecko/snow/engine/common"
"github.com/ava-labs/gecko/snow/validators"
"github.com/ava-labs/gecko/utils/math"
)
// Awaiter can await connections to be connected
type Awaiter interface {
AwaitConnections(awaiting *networking.AwaitingConnections)
type awaiter struct {
vdrs validators.Set
reqWeight uint64
weight uint64
ctx *snow.Context
eng common.Engine
}
func (a *awaiter) Connected(vdrID ids.ShortID) bool {
vdr, ok := a.vdrs.Get(vdrID)
if !ok {
return false
}
weight, err := math.Add64(vdr.Weight(), a.weight)
a.weight = weight
if err == nil && a.weight < a.reqWeight {
return false
}
go func() {
a.ctx.Lock.Lock()
defer a.ctx.Lock.Unlock()
a.eng.Startup()
}()
return true
}
func (a *awaiter) Disconnected(vdrID ids.ShortID) bool {
if vdr, ok := a.vdrs.Get(vdrID); ok {
a.weight, _ = math.Sub64(vdr.Weight(), a.weight)
}
return false
}

View File

@ -13,13 +13,13 @@ import (
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/database/prefixdb"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/network"
"github.com/ava-labs/gecko/snow"
"github.com/ava-labs/gecko/snow/consensus/snowball"
"github.com/ava-labs/gecko/snow/engine/avalanche"
"github.com/ava-labs/gecko/snow/engine/avalanche/state"
"github.com/ava-labs/gecko/snow/engine/common"
"github.com/ava-labs/gecko/snow/engine/common/queue"
"github.com/ava-labs/gecko/snow/networking"
"github.com/ava-labs/gecko/snow/networking/handler"
"github.com/ava-labs/gecko/snow/networking/router"
"github.com/ava-labs/gecko/snow/networking/sender"
@ -102,16 +102,15 @@ type manager struct {
decisionEvents *triggers.EventDispatcher
consensusEvents *triggers.EventDispatcher
db database.Database
chainRouter router.Router // Routes incoming messages to the appropriate chain
sender sender.ExternalSender // Sends consensus messages to other validators
timeoutManager *timeout.Manager // Manages request timeouts when sending messages to other validators
consensusParams avacon.Parameters // The consensus parameters (alpha, beta, etc.) for new chains
validators validators.Manager // Validators validating on this chain
registrants []Registrant // Those notified when a chain is created
nodeID ids.ShortID // The ID of this node
networkID uint32 // ID of the network this node is connected to
awaiter Awaiter // Waits for required connections before running bootstrapping
server *api.Server // Handles HTTP API calls
chainRouter router.Router // Routes incoming messages to the appropriate chain
net network.Network // Sends consensus messages to other validators
timeoutManager *timeout.Manager // Manages request timeouts when sending messages to other validators
consensusParams avacon.Parameters // The consensus parameters (alpha, beta, etc.) for new chains
validators validators.Manager // Validators validating on this chain
registrants []Registrant // Those notified when a chain is created
nodeID ids.ShortID // The ID of this node
networkID uint32 // ID of the network this node is connected to
server *api.Server // Handles HTTP API calls
keystore *keystore.Keystore
sharedMemory *atomic.SharedMemory
@ -133,12 +132,11 @@ func New(
consensusEvents *triggers.EventDispatcher,
db database.Database,
router router.Router,
sender sender.ExternalSender,
net network.Network,
consensusParams avacon.Parameters,
validators validators.Manager,
nodeID ids.ShortID,
networkID uint32,
awaiter Awaiter,
server *api.Server,
keystore *keystore.Keystore,
sharedMemory *atomic.SharedMemory,
@ -158,13 +156,12 @@ func New(
consensusEvents: consensusEvents,
db: db,
chainRouter: router,
sender: sender,
net: net,
timeoutManager: &timeoutManager,
consensusParams: consensusParams,
validators: validators,
nodeID: nodeID,
networkID: networkID,
awaiter: awaiter,
server: server,
keystore: keystore,
sharedMemory: sharedMemory,
@ -390,7 +387,7 @@ func (m *manager) createAvalancheChain(
// Passes messages from the consensus engine to the network
sender := sender.Sender{}
sender.Initialize(ctx, m.sender, m.chainRouter, m.timeoutManager)
sender.Initialize(ctx, m.net, m.chainRouter, m.timeoutManager)
// The engine handles consensus
engine := avaeng.Transitive{
@ -438,17 +435,17 @@ func (m *manager) createAvalancheChain(
m.chainRouter.AddChain(handler)
go ctx.Log.RecoverAndPanic(handler.Dispatch)
awaiting := &networking.AwaitingConnections{
Requested: beacons,
WeightRequired: (3*bootstrapWeight + 3) / 4, // 75% must be connected to
Finish: func() {
ctx.Lock.Lock()
defer ctx.Lock.Unlock()
engine.Startup()
},
reqWeight := (3*bootstrapWeight + 3) / 4
if reqWeight == 0 {
engine.Startup()
} else {
go m.net.RegisterHandler(&awaiter{
vdrs: beacons,
reqWeight: reqWeight, // 75% must be connected to
ctx: ctx,
eng: &engine,
})
}
m.awaiter.AwaitConnections(awaiting)
return nil
}
@ -486,7 +483,7 @@ func (m *manager) createSnowmanChain(
// Passes messages from the consensus engine to the network
sender := sender.Sender{}
sender.Initialize(ctx, m.sender, m.chainRouter, m.timeoutManager)
sender.Initialize(ctx, m.net, m.chainRouter, m.timeoutManager)
bootstrapWeight := uint64(0)
for _, beacon := range beacons.List() {
@ -524,17 +521,17 @@ func (m *manager) createSnowmanChain(
m.chainRouter.AddChain(handler)
go ctx.Log.RecoverAndPanic(handler.Dispatch)
awaiting := &networking.AwaitingConnections{
Requested: beacons,
WeightRequired: (3*bootstrapWeight + 3) / 4, // 75% must be connected to
Finish: func() {
ctx.Lock.Lock()
defer ctx.Lock.Unlock()
engine.Startup()
},
reqWeight := (3*bootstrapWeight + 3) / 4
if reqWeight == 0 {
engine.Startup()
} else {
go m.net.RegisterHandler(&awaiter{
vdrs: beacons,
reqWeight: reqWeight, // 75% must be connected to
ctx: ctx,
eng: &engine,
})
}
m.awaiter.AwaitConnections(awaiting)
return nil
}

View File

@ -71,21 +71,16 @@ func main() {
mapper.MapPort(Config.StakingIP.Port, Config.StakingIP.Port)
mapper.MapPort(Config.HTTPPort, Config.HTTPPort)
node := node.Node{}
log.Debug("initializing node state")
// MainNode is a global variable in the node.go file
if err := node.MainNode.Initialize(&Config, log, factory); err != nil {
if err := node.Initialize(&Config, log, factory); err != nil {
log.Fatal("error initializing node state: %s", err)
return
}
log.Debug("Starting servers")
if err := node.MainNode.StartConsensusServer(); err != nil {
log.Fatal("problem starting servers: %s", err)
return
}
defer node.MainNode.Shutdown()
defer node.Shutdown()
log.Debug("Dispatching node handlers")
node.MainNode.Dispatch()
node.Dispatch()
}

View File

@ -29,7 +29,7 @@ import (
)
const (
dbVersion = "v0.2.0"
dbVersion = "v0.3.0"
)
// Results of parsing the CLI
@ -52,7 +52,7 @@ func GetIPs(networkID uint32) []string {
return []string{
"3.227.207.132:21001",
"34.207.133.167:21001",
"107.23.241.199:21001",
"54.162.71.9:21001",
"54.197.215.186:21001",
"18.234.153.22:21001",
}

View File

@ -1,11 +1,10 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package networking
package network
import (
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/utils"
)
@ -16,9 +15,10 @@ type Builder struct{ Codec }
func (m Builder) GetVersion() (Msg, error) { return m.Pack(GetVersion, nil) }
// Version message
func (m Builder) Version(networkID uint32, myTime uint64, ip utils.IPDesc, myVersion string) (Msg, error) {
func (m Builder) Version(networkID, nodeID uint32, myTime uint64, ip utils.IPDesc, myVersion string) (Msg, error) {
return m.Pack(Version, map[Field]interface{}{
NetworkID: networkID,
NodeID: nodeID,
MyTime: myTime,
IP: ip,
VersionStr: myVersion,
@ -130,28 +130,3 @@ func (m Builder) Chits(chainID ids.ID, requestID uint32, containerIDs ids.Set) (
ContainerIDs: containerIDBytes,
})
}
// Ping message
func (m Builder) Ping() (Msg, error) { return m.Pack(Ping, nil) }
// Pong message
func (m Builder) Pong() (Msg, error) { return m.Pack(Pong, nil) }
// Data message
func (m Builder) Data(b []byte) (Msg, error) { return m.Pack(Data, map[Field]interface{}{Bytes: b}) }
// IssueTx message
func (m Builder) IssueTx(chainID ids.ID, tx []byte) (Msg, error) {
return m.Pack(IssueTx, map[Field]interface{}{
ChainID: chainID.Bytes(),
Tx: tx,
})
}
// DecidedTx message
func (m Builder) DecidedTx(txID ids.ID, status choices.Status) (Msg, error) {
return m.Pack(DecidedTx, map[Field]interface{}{
TxID: txID.Bytes(),
Status: uint32(status),
})
}

316
network/builder_test.go Normal file
View File

@ -0,0 +1,316 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package network
import (
"net"
"testing"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils"
"github.com/stretchr/testify/assert"
)
var (
TestBuilder Builder
)
func TestBuildGetVersion(t *testing.T) {
msg, err := TestBuilder.GetVersion()
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, GetVersion, msg.Op())
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, GetVersion, parsedMsg.Op())
}
func TestBuildVersion(t *testing.T) {
networkID := uint32(1)
nodeID := uint32(3)
myTime := uint64(2)
ip := utils.IPDesc{
IP: net.IPv6loopback,
Port: 12345,
}
myVersion := "xD"
msg, err := TestBuilder.Version(
networkID,
nodeID,
myTime,
ip,
myVersion,
)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, Version, msg.Op())
assert.Equal(t, networkID, msg.Get(NetworkID))
assert.Equal(t, nodeID, msg.Get(NodeID))
assert.Equal(t, myTime, msg.Get(MyTime))
assert.Equal(t, ip, msg.Get(IP))
assert.Equal(t, myVersion, msg.Get(VersionStr))
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, Version, parsedMsg.Op())
assert.Equal(t, networkID, parsedMsg.Get(NetworkID))
assert.Equal(t, nodeID, parsedMsg.Get(NodeID))
assert.Equal(t, myTime, parsedMsg.Get(MyTime))
assert.Equal(t, ip, parsedMsg.Get(IP))
assert.Equal(t, myVersion, parsedMsg.Get(VersionStr))
}
func TestBuildGetPeerList(t *testing.T) {
msg, err := TestBuilder.GetPeerList()
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, GetPeerList, msg.Op())
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, GetPeerList, parsedMsg.Op())
}
func TestBuildPeerList(t *testing.T) {
ips := []utils.IPDesc{
utils.IPDesc{
IP: net.IPv6loopback,
Port: 12345,
},
utils.IPDesc{
IP: net.IPv6loopback,
Port: 54321,
},
}
msg, err := TestBuilder.PeerList(ips)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, PeerList, msg.Op())
assert.Equal(t, ips, msg.Get(Peers))
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, PeerList, parsedMsg.Op())
assert.Equal(t, ips, parsedMsg.Get(Peers))
}
func TestBuildGetAcceptedFrontier(t *testing.T) {
chainID := ids.Empty.Prefix(0)
requestID := uint32(5)
msg, err := TestBuilder.GetAcceptedFrontier(chainID, requestID)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, GetAcceptedFrontier, msg.Op())
assert.Equal(t, chainID.Bytes(), msg.Get(ChainID))
assert.Equal(t, requestID, msg.Get(RequestID))
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, GetAcceptedFrontier, parsedMsg.Op())
assert.Equal(t, chainID.Bytes(), parsedMsg.Get(ChainID))
assert.Equal(t, requestID, parsedMsg.Get(RequestID))
}
func TestBuildAcceptedFrontier(t *testing.T) {
chainID := ids.Empty.Prefix(0)
requestID := uint32(5)
containerID := ids.Empty.Prefix(1)
containerIDSet := ids.Set{}
containerIDSet.Add(containerID)
containerIDs := [][]byte{containerID.Bytes()}
msg, err := TestBuilder.AcceptedFrontier(chainID, requestID, containerIDSet)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, AcceptedFrontier, msg.Op())
assert.Equal(t, chainID.Bytes(), msg.Get(ChainID))
assert.Equal(t, requestID, msg.Get(RequestID))
assert.Equal(t, containerIDs, msg.Get(ContainerIDs))
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, AcceptedFrontier, parsedMsg.Op())
assert.Equal(t, chainID.Bytes(), parsedMsg.Get(ChainID))
assert.Equal(t, requestID, parsedMsg.Get(RequestID))
assert.Equal(t, containerIDs, parsedMsg.Get(ContainerIDs))
}
func TestBuildGetAccepted(t *testing.T) {
chainID := ids.Empty.Prefix(0)
requestID := uint32(5)
containerID := ids.Empty.Prefix(1)
containerIDSet := ids.Set{}
containerIDSet.Add(containerID)
containerIDs := [][]byte{containerID.Bytes()}
msg, err := TestBuilder.GetAccepted(chainID, requestID, containerIDSet)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, GetAccepted, msg.Op())
assert.Equal(t, chainID.Bytes(), msg.Get(ChainID))
assert.Equal(t, requestID, msg.Get(RequestID))
assert.Equal(t, containerIDs, msg.Get(ContainerIDs))
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, GetAccepted, parsedMsg.Op())
assert.Equal(t, chainID.Bytes(), parsedMsg.Get(ChainID))
assert.Equal(t, requestID, parsedMsg.Get(RequestID))
assert.Equal(t, containerIDs, parsedMsg.Get(ContainerIDs))
}
func TestBuildAccepted(t *testing.T) {
chainID := ids.Empty.Prefix(0)
requestID := uint32(5)
containerID := ids.Empty.Prefix(1)
containerIDSet := ids.Set{}
containerIDSet.Add(containerID)
containerIDs := [][]byte{containerID.Bytes()}
msg, err := TestBuilder.Accepted(chainID, requestID, containerIDSet)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, Accepted, msg.Op())
assert.Equal(t, chainID.Bytes(), msg.Get(ChainID))
assert.Equal(t, requestID, msg.Get(RequestID))
assert.Equal(t, containerIDs, msg.Get(ContainerIDs))
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, Accepted, parsedMsg.Op())
assert.Equal(t, chainID.Bytes(), parsedMsg.Get(ChainID))
assert.Equal(t, requestID, parsedMsg.Get(RequestID))
assert.Equal(t, containerIDs, parsedMsg.Get(ContainerIDs))
}
func TestBuildGet(t *testing.T) {
chainID := ids.Empty.Prefix(0)
requestID := uint32(5)
containerID := ids.Empty.Prefix(1)
msg, err := TestBuilder.Get(chainID, requestID, containerID)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, Get, msg.Op())
assert.Equal(t, chainID.Bytes(), msg.Get(ChainID))
assert.Equal(t, requestID, msg.Get(RequestID))
assert.Equal(t, containerID.Bytes(), msg.Get(ContainerID))
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, Get, parsedMsg.Op())
assert.Equal(t, chainID.Bytes(), parsedMsg.Get(ChainID))
assert.Equal(t, requestID, parsedMsg.Get(RequestID))
assert.Equal(t, containerID.Bytes(), parsedMsg.Get(ContainerID))
}
func TestBuildPut(t *testing.T) {
chainID := ids.Empty.Prefix(0)
requestID := uint32(5)
containerID := ids.Empty.Prefix(1)
container := []byte{2}
msg, err := TestBuilder.Put(chainID, requestID, containerID, container)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, Put, msg.Op())
assert.Equal(t, chainID.Bytes(), msg.Get(ChainID))
assert.Equal(t, requestID, msg.Get(RequestID))
assert.Equal(t, containerID.Bytes(), msg.Get(ContainerID))
assert.Equal(t, container, msg.Get(ContainerBytes))
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, Put, parsedMsg.Op())
assert.Equal(t, chainID.Bytes(), parsedMsg.Get(ChainID))
assert.Equal(t, requestID, parsedMsg.Get(RequestID))
assert.Equal(t, containerID.Bytes(), parsedMsg.Get(ContainerID))
assert.Equal(t, container, parsedMsg.Get(ContainerBytes))
}
func TestBuildPushQuery(t *testing.T) {
chainID := ids.Empty.Prefix(0)
requestID := uint32(5)
containerID := ids.Empty.Prefix(1)
container := []byte{2}
msg, err := TestBuilder.PushQuery(chainID, requestID, containerID, container)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, PushQuery, msg.Op())
assert.Equal(t, chainID.Bytes(), msg.Get(ChainID))
assert.Equal(t, requestID, msg.Get(RequestID))
assert.Equal(t, containerID.Bytes(), msg.Get(ContainerID))
assert.Equal(t, container, msg.Get(ContainerBytes))
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, PushQuery, parsedMsg.Op())
assert.Equal(t, chainID.Bytes(), parsedMsg.Get(ChainID))
assert.Equal(t, requestID, parsedMsg.Get(RequestID))
assert.Equal(t, containerID.Bytes(), parsedMsg.Get(ContainerID))
assert.Equal(t, container, parsedMsg.Get(ContainerBytes))
}
func TestBuildPullQuery(t *testing.T) {
chainID := ids.Empty.Prefix(0)
requestID := uint32(5)
containerID := ids.Empty.Prefix(1)
msg, err := TestBuilder.PullQuery(chainID, requestID, containerID)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, PullQuery, msg.Op())
assert.Equal(t, chainID.Bytes(), msg.Get(ChainID))
assert.Equal(t, requestID, msg.Get(RequestID))
assert.Equal(t, containerID.Bytes(), msg.Get(ContainerID))
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, PullQuery, parsedMsg.Op())
assert.Equal(t, chainID.Bytes(), parsedMsg.Get(ChainID))
assert.Equal(t, requestID, parsedMsg.Get(RequestID))
assert.Equal(t, containerID.Bytes(), parsedMsg.Get(ContainerID))
}
func TestBuildChits(t *testing.T) {
chainID := ids.Empty.Prefix(0)
requestID := uint32(5)
containerID := ids.Empty.Prefix(1)
containerIDSet := ids.Set{}
containerIDSet.Add(containerID)
containerIDs := [][]byte{containerID.Bytes()}
msg, err := TestBuilder.Chits(chainID, requestID, containerIDSet)
assert.NoError(t, err)
assert.NotNil(t, msg)
assert.Equal(t, Chits, msg.Op())
assert.Equal(t, chainID.Bytes(), msg.Get(ChainID))
assert.Equal(t, requestID, msg.Get(RequestID))
assert.Equal(t, containerIDs, msg.Get(ContainerIDs))
parsedMsg, err := TestBuilder.Parse(msg.Bytes())
assert.NoError(t, err)
assert.NotNil(t, parsedMsg)
assert.Equal(t, Chits, parsedMsg.Op())
assert.Equal(t, chainID.Bytes(), parsedMsg.Get(ChainID))
assert.Equal(t, requestID, parsedMsg.Get(RequestID))
assert.Equal(t, containerIDs, parsedMsg.Get(ContainerIDs))
}

View File

@ -1,15 +1,13 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package networking
package network
import (
"errors"
"fmt"
"math"
"github.com/ava-labs/salticidae-go"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/wrappers"
)
@ -23,15 +21,15 @@ var (
type Codec struct{}
// Pack attempts to pack a map of fields into a message.
//
// If a nil error is returned, the message's datastream must be freed manually
func (Codec) Pack(op salticidae.Opcode, fields map[Field]interface{}) (Msg, error) {
// The first byte of the message is the opcode of the message.
func (Codec) Pack(op Op, fields map[Field]interface{}) (Msg, error) {
message, ok := Messages[op]
if !ok {
return nil, errBadOp
}
p := wrappers.Packer{MaxSize: math.MaxInt32}
p.PackByte(byte(op))
for _, field := range message {
data, ok := fields[field]
if !ok {
@ -40,43 +38,35 @@ func (Codec) Pack(op salticidae.Opcode, fields map[Field]interface{}) (Msg, erro
field.Packer()(&p, data)
}
if p.Errored() { // Prevent the datastream from leaking
return nil, p.Err
}
return &msg{
op: op,
ds: salticidae.NewDataStreamFromBytes(p.Bytes, false),
fields: fields,
}, nil
bytes: p.Bytes,
}, p.Err
}
// Parse attempts to convert a byte stream into a message.
//
// The datastream is not freed.
func (Codec) Parse(op salticidae.Opcode, ds salticidae.DataStream) (Msg, error) {
// Parse attempts to convert bytes into a message.
// The first byte of the message is the opcode of the message.
func (Codec) Parse(b []byte) (Msg, error) {
p := wrappers.Packer{Bytes: b}
op := Op(p.UnpackByte())
message, ok := Messages[op]
if !ok {
return nil, errBadOp
}
size := ds.Size()
byteHandle := ds.GetDataInPlace(size)
p := wrappers.Packer{Bytes: utils.CopyBytes(byteHandle.Get())}
byteHandle.Release()
fields := make(map[Field]interface{}, len(message))
for _, field := range message {
fields[field] = field.Unpacker()(&p)
}
if p.Offset != size {
return nil, errBadLength
if p.Offset != len(b) {
p.Add(fmt.Errorf("expected length %d got %d", len(b), p.Offset))
}
return &msg{
op: op,
ds: ds,
fields: fields,
bytes: b,
}, p.Err
}

35
network/codec_test.go Normal file
View File

@ -0,0 +1,35 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package network
import (
"math"
"testing"
"github.com/stretchr/testify/assert"
)
var (
TestCodec Codec
)
func TestCodecPackInvalidOp(t *testing.T) {
_, err := TestCodec.Pack(math.MaxUint8, make(map[Field]interface{}))
assert.Error(t, err)
}
func TestCodecPackMissingField(t *testing.T) {
_, err := TestCodec.Pack(Get, make(map[Field]interface{}))
assert.Error(t, err)
}
func TestCodecParseInvalidOp(t *testing.T) {
_, err := TestCodec.Parse([]byte{math.MaxUint8})
assert.Error(t, err)
}
func TestCodecParseExtraSpace(t *testing.T) {
_, err := TestCodec.Parse([]byte{byte(GetVersion), 0x00})
assert.Error(t, err)
}

View File

@ -1,11 +1,9 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package networking
package network
import (
"github.com/ava-labs/salticidae-go"
"github.com/ava-labs/gecko/utils/wrappers"
)
@ -16,6 +14,7 @@ type Field uint32
const (
VersionStr Field = iota // Used in handshake
NetworkID // Used in handshake
NodeID // Used in handshake
MyTime // Used in handshake
IP // Used in handshake
Peers // Used in handshake
@ -24,10 +23,6 @@ const (
ContainerID // Used for querying
ContainerBytes // Used for gossiping
ContainerIDs // Used for querying
Bytes // Used as arbitrary data
TxID // Used for throughput tests
Tx // Used for throughput tests
Status // Used for throughput tests
)
// Packer returns the packer function that can be used to pack this field.
@ -37,6 +32,8 @@ func (f Field) Packer() func(*wrappers.Packer, interface{}) {
return wrappers.TryPackStr
case NetworkID:
return wrappers.TryPackInt
case NodeID:
return wrappers.TryPackInt
case MyTime:
return wrappers.TryPackLong
case IP:
@ -53,14 +50,6 @@ func (f Field) Packer() func(*wrappers.Packer, interface{}) {
return wrappers.TryPackBytes
case ContainerIDs:
return wrappers.TryPackHashes
case Bytes:
return wrappers.TryPackBytes
case TxID:
return wrappers.TryPackHash
case Tx:
return wrappers.TryPackBytes
case Status:
return wrappers.TryPackInt
default:
return nil
}
@ -73,6 +62,8 @@ func (f Field) Unpacker() func(*wrappers.Packer) interface{} {
return wrappers.TryUnpackStr
case NetworkID:
return wrappers.TryUnpackInt
case NodeID:
return wrappers.TryUnpackInt
case MyTime:
return wrappers.TryUnpackLong
case IP:
@ -89,14 +80,6 @@ func (f Field) Unpacker() func(*wrappers.Packer) interface{} {
return wrappers.TryUnpackBytes
case ContainerIDs:
return wrappers.TryUnpackHashes
case Bytes:
return wrappers.TryUnpackBytes
case TxID:
return wrappers.TryUnpackHash
case Tx:
return wrappers.TryUnpackBytes
case Status:
return wrappers.TryUnpackInt
default:
return nil
}
@ -108,6 +91,8 @@ func (f Field) String() string {
return "VersionStr"
case NetworkID:
return "NetworkID"
case NodeID:
return "NodeID"
case MyTime:
return "MyTime"
case IP:
@ -122,23 +107,51 @@ func (f Field) String() string {
return "Container Bytes"
case ContainerIDs:
return "Container IDs"
case Bytes:
return "Bytes"
case TxID:
return "TxID"
case Tx:
return "Tx"
case Status:
return "Status"
default:
return "Unknown Field"
}
}
// Op is an opcode
type Op byte
func (op Op) String() string {
switch op {
case GetVersion:
return "get_version"
case Version:
return "version"
case GetPeerList:
return "get_peerlist"
case PeerList:
return "peerlist"
case GetAcceptedFrontier:
return "get_accepted_frontier"
case AcceptedFrontier:
return "accepted_frontier"
case GetAccepted:
return "get_accepted"
case Accepted:
return "accepted"
case Get:
return "get"
case Put:
return "put"
case PushQuery:
return "push_query"
case PullQuery:
return "pull_query"
case Chits:
return "chits"
default:
return "Unknown Op"
}
}
// Public commands that may be sent between stakers
const (
// Handshake:
GetVersion salticidae.Opcode = iota
GetVersion Op = iota
Version
GetPeerList
PeerList
@ -153,22 +166,14 @@ const (
PushQuery
PullQuery
Chits
// Pinging:
Ping
Pong
// Arbitrary data message:
Data
// Throughput test:
IssueTx
DecidedTx
)
// Defines the messages that can be sent/received with this network
var (
Messages = map[salticidae.Opcode][]Field{
Messages = map[Op][]Field{
// Handshake:
GetVersion: []Field{},
Version: []Field{NetworkID, MyTime, IP, VersionStr},
Version: []Field{NetworkID, NodeID, MyTime, IP, VersionStr},
GetPeerList: []Field{},
PeerList: []Field{Peers},
// Bootstrapping:
@ -182,13 +187,5 @@ var (
PushQuery: []Field{ChainID, RequestID, ContainerID, ContainerBytes},
PullQuery: []Field{ChainID, RequestID, ContainerID},
Chits: []Field{ChainID, RequestID, ContainerIDs},
// Pinging:
Ping: []Field{},
Pong: []Field{},
// Arbitrary data message:
Data: []Field{Bytes},
// Throughput test:
IssueTx: []Field{ChainID, Tx},
DecidedTx: []Field{TxID, Status},
}
)

25
network/dialer.go Normal file
View File

@ -0,0 +1,25 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package network
import (
"net"
"github.com/ava-labs/gecko/utils"
)
// Dialer attempts to create a connection with the provided IP/port pair
type Dialer interface {
Dial(utils.IPDesc) (net.Conn, error)
}
type dialer struct {
network string
}
// NewDialer returns a new Dialer that calls `net.Dial` with the provided
// network.
func NewDialer(network string) Dialer { return &dialer{network: network} }
func (d *dialer) Dial(ip utils.IPDesc) (net.Conn, error) { return net.Dial(d.network, ip.String()) }

14
network/handler.go Normal file
View File

@ -0,0 +1,14 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package network
import "github.com/ava-labs/gecko/ids"
// Handler represents a handler that is called when a connection is marked as
// connected or disconnected
type Handler interface {
// returns true if the handler should be removed
Connected(id ids.ShortID) bool
Disconnected(id ids.ShortID) bool
}

125
network/metrics.go Normal file
View File

@ -0,0 +1,125 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package network
import (
"fmt"
"github.com/ava-labs/gecko/utils/wrappers"
"github.com/prometheus/client_golang/prometheus"
)
type messageMetrics struct {
numSent, numFailed, numReceived prometheus.Counter
}
func (mm *messageMetrics) initialize(msgType Op, registerer prometheus.Registerer) error {
mm.numSent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: fmt.Sprintf("%s_sent", msgType),
Help: fmt.Sprintf("Number of %s messages sent", msgType),
})
mm.numFailed = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: fmt.Sprintf("%s_failed", msgType),
Help: fmt.Sprintf("Number of %s messages that failed to be sent", msgType),
})
mm.numReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: fmt.Sprintf("%s_received", msgType),
Help: fmt.Sprintf("Number of %s messages received", msgType),
})
if err := registerer.Register(mm.numSent); err != nil {
return fmt.Errorf("failed to register sent statistics of %s due to %s",
msgType, err)
}
if err := registerer.Register(mm.numFailed); err != nil {
return fmt.Errorf("failed to register failed statistics of %s due to %s",
msgType, err)
}
if err := registerer.Register(mm.numReceived); err != nil {
return fmt.Errorf("failed to register received statistics of %s due to %s",
msgType, err)
}
return nil
}
type metrics struct {
numPeers prometheus.Gauge
getVersion, version,
getPeerlist, peerlist,
getAcceptedFrontier, acceptedFrontier,
getAccepted, accepted,
get, put,
pushQuery, pullQuery, chits messageMetrics
}
func (m *metrics) initialize(registerer prometheus.Registerer) error {
m.numPeers = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: "gecko",
Name: "peers",
Help: "Number of network peers",
})
errs := wrappers.Errs{}
if err := registerer.Register(m.numPeers); err != nil {
errs.Add(fmt.Errorf("failed to register peers statistics due to %s",
err))
}
errs.Add(m.getVersion.initialize(GetVersion, registerer))
errs.Add(m.version.initialize(Version, registerer))
errs.Add(m.getPeerlist.initialize(GetPeerList, registerer))
errs.Add(m.peerlist.initialize(PeerList, registerer))
errs.Add(m.getAcceptedFrontier.initialize(GetAcceptedFrontier, registerer))
errs.Add(m.acceptedFrontier.initialize(AcceptedFrontier, registerer))
errs.Add(m.getAccepted.initialize(GetAccepted, registerer))
errs.Add(m.accepted.initialize(Accepted, registerer))
errs.Add(m.get.initialize(Get, registerer))
errs.Add(m.put.initialize(Put, registerer))
errs.Add(m.pushQuery.initialize(PushQuery, registerer))
errs.Add(m.pullQuery.initialize(PullQuery, registerer))
errs.Add(m.chits.initialize(Chits, registerer))
return errs.Err
}
func (m *metrics) message(msgType Op) *messageMetrics {
switch msgType {
case GetVersion:
return &m.getVersion
case Version:
return &m.version
case GetPeerList:
return &m.getPeerlist
case PeerList:
return &m.peerlist
case GetAcceptedFrontier:
return &m.getAcceptedFrontier
case AcceptedFrontier:
return &m.acceptedFrontier
case GetAccepted:
return &m.getAccepted
case Accepted:
return &m.accepted
case Get:
return &m.get
case Put:
return &m.put
case PushQuery:
return &m.pushQuery
case PullQuery:
return &m.pullQuery
case Chits:
return &m.chits
default:
return nil
}
}

View File

@ -1,30 +1,26 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package networking
import (
"github.com/ava-labs/salticidae-go"
)
package network
// Msg represents a set of fields that can be serialized into a byte stream
type Msg interface {
Op() salticidae.Opcode
Op() Op
Get(Field) interface{}
DataStream() salticidae.DataStream
Bytes() []byte
}
type msg struct {
op salticidae.Opcode
ds salticidae.DataStream
op Op
fields map[Field]interface{}
bytes []byte
}
// Field returns the value of the specified field in this message
func (msg *msg) Op() salticidae.Opcode { return msg.op }
func (msg *msg) Op() Op { return msg.op }
// Field returns the value of the specified field in this message
func (msg *msg) Get(field Field) interface{} { return msg.fields[field] }
// Bytes returns this message in bytes
func (msg *msg) DataStream() salticidae.DataStream { return msg.ds }
func (msg *msg) Bytes() []byte { return msg.bytes }

843
network/network.go Normal file
View File

@ -0,0 +1,843 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package network
import (
"fmt"
"math"
"math/rand"
"net"
"sync"
"sync/atomic"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/ava-labs/gecko/api/health"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/networking/router"
"github.com/ava-labs/gecko/snow/networking/sender"
"github.com/ava-labs/gecko/snow/triggers"
"github.com/ava-labs/gecko/snow/validators"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/random"
"github.com/ava-labs/gecko/utils/timer"
"github.com/ava-labs/gecko/version"
)
const (
defaultInitialReconnectDelay = time.Second
defaultMaxReconnectDelay = time.Hour
defaultMaxMessageSize uint32 = 1 << 21
defaultSendQueueSize = 1 << 10
defaultMaxClockDifference = time.Minute
defaultPeerListGossipSpacing = time.Minute
defaultPeerListGossipSize = 100
defaultPeerListStakerGossipFraction = 2
defaultGetVersionTimeout = 2 * time.Second
defaultAllowPrivateIPs = true
defaultGossipSize = 50
)
// Network defines the functionality of the networking library.
type Network interface {
// All consensus messages can be sent through this interface. Thread safety
// must be managed internally in the network.
sender.ExternalSender
// The network must be able to broadcast accepted decisions to random peers.
// Thread safety must be managed internally in the network.
triggers.Acceptor
// The network should be able to report the last time the network interacted
// with a peer
health.Heartbeater
// Should only be called once, will run until either a fatal error occurs,
// or the network is closed. Returns a non-nil error.
Dispatch() error
// Attempt to connect to this IP. Thread safety must be managed internally
// to the network. The network will never stop attempting to connect to this
// IP.
Track(ip utils.IPDesc)
// Register a new handler that is called whenever a peer is connected to or
// disconnected to. If the handler returns true, then it will never be
// called again. Thread safety must be managed internally in the network.
// The handler will initially be called with this local node's ID.
RegisterHandler(h Handler)
// Returns the IPs of nodes this network is currently connected to
// externally. Thread safety must be managed internally to the network.
IPs() []utils.IPDesc
// Close this network and all existing connections it has. Thread safety
// must be managed internally to the network. Calling close multiple times
// will return a nil error.
Close() error
}
type network struct {
// The metrics that this network tracks
metrics
log logging.Logger
id ids.ShortID
ip utils.IPDesc
networkID uint32
version version.Version
parser version.Parser
listener net.Listener
dialer Dialer
serverUpgrader Upgrader
clientUpgrader Upgrader
vdrs validators.Set // set of current validators in the AVAnet
router router.Router // router must be thread safe
nodeID uint32
clock timer.Clock
lastHeartbeat int64
initialReconnectDelay time.Duration
maxReconnectDelay time.Duration
maxMessageSize uint32
sendQueueSize int
maxClockDifference time.Duration
peerListGossipSpacing time.Duration
peerListGossipSize int
peerListStakerGossipFraction int
getVersionTimeout time.Duration
allowPrivateIPs bool
gossipSize int
executor timer.Executor
b Builder
stateLock sync.Mutex
closed bool
disconnectedIPs map[string]struct{}
connectedIPs map[string]struct{}
// TODO: bound the size of [myIPs] to avoid DoS. LRU caching would be ideal
myIPs map[string]struct{} // set of IPs that resulted in my ID.
peers map[[20]byte]*peer
handlers []Handler
}
// NewDefaultNetwork returns a new Network implementation with the provided
// parameters and some reasonable default values.
func NewDefaultNetwork(
registerer prometheus.Registerer,
log logging.Logger,
id ids.ShortID,
ip utils.IPDesc,
networkID uint32,
version version.Version,
parser version.Parser,
listener net.Listener,
dialer Dialer,
serverUpgrader,
clientUpgrader Upgrader,
vdrs validators.Set,
router router.Router,
) Network {
return NewNetwork(
registerer,
log,
id,
ip,
networkID,
version,
parser,
listener,
dialer,
serverUpgrader,
clientUpgrader,
vdrs,
router,
defaultInitialReconnectDelay,
defaultMaxReconnectDelay,
defaultMaxMessageSize,
defaultSendQueueSize,
defaultMaxClockDifference,
defaultPeerListGossipSpacing,
defaultPeerListGossipSize,
defaultPeerListStakerGossipFraction,
defaultGetVersionTimeout,
defaultAllowPrivateIPs,
defaultGossipSize,
)
}
// NewNetwork returns a new Network implementation with the provided parameters.
func NewNetwork(
registerer prometheus.Registerer,
log logging.Logger,
id ids.ShortID,
ip utils.IPDesc,
networkID uint32,
version version.Version,
parser version.Parser,
listener net.Listener,
dialer Dialer,
serverUpgrader,
clientUpgrader Upgrader,
vdrs validators.Set,
router router.Router,
initialReconnectDelay,
maxReconnectDelay time.Duration,
maxMessageSize uint32,
sendQueueSize int,
maxClockDifference time.Duration,
peerListGossipSpacing time.Duration,
peerListGossipSize int,
peerListStakerGossipFraction int,
getVersionTimeout time.Duration,
allowPrivateIPs bool,
gossipSize int,
) Network {
net := &network{
log: log,
id: id,
ip: ip,
networkID: networkID,
version: version,
parser: parser,
listener: listener,
dialer: dialer,
serverUpgrader: serverUpgrader,
clientUpgrader: clientUpgrader,
vdrs: vdrs,
router: router,
nodeID: rand.Uint32(),
initialReconnectDelay: initialReconnectDelay,
maxReconnectDelay: maxReconnectDelay,
maxMessageSize: maxMessageSize,
sendQueueSize: sendQueueSize,
maxClockDifference: maxClockDifference,
peerListGossipSpacing: peerListGossipSpacing,
peerListGossipSize: peerListGossipSize,
peerListStakerGossipFraction: peerListStakerGossipFraction,
getVersionTimeout: getVersionTimeout,
allowPrivateIPs: allowPrivateIPs,
gossipSize: gossipSize,
disconnectedIPs: make(map[string]struct{}),
connectedIPs: make(map[string]struct{}),
myIPs: map[string]struct{}{ip.String(): struct{}{}},
peers: make(map[[20]byte]*peer),
}
net.initialize(registerer)
net.executor.Initialize()
net.heartbeat()
return net
}
// GetAcceptedFrontier implements the Sender interface.
func (n *network) GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32) {
msg, err := n.b.GetAcceptedFrontier(chainID, requestID)
n.log.AssertNoError(err)
n.stateLock.Lock()
defer n.stateLock.Unlock()
for _, validatorID := range validatorIDs.List() {
vID := validatorID
peer, sent := n.peers[vID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.executor.Add(func() { n.router.GetAcceptedFrontierFailed(vID, chainID, requestID) })
n.getAcceptedFrontier.numFailed.Inc()
} else {
n.getAcceptedFrontier.numSent.Inc()
}
}
}
// AcceptedFrontier implements the Sender interface.
func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
msg, err := n.b.AcceptedFrontier(chainID, requestID, containerIDs)
if err != nil {
n.log.Error("attempted to pack too large of an AcceptedFrontier message.\nNumber of containerIDs: %d",
containerIDs.Len())
return // Packing message failed
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send an AcceptedFrontier message to: %s", validatorID)
n.acceptedFrontier.numFailed.Inc()
} else {
n.acceptedFrontier.numSent.Inc()
}
}
// GetAccepted implements the Sender interface.
func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
msg, err := n.b.GetAccepted(chainID, requestID, containerIDs)
if err != nil {
for _, validatorID := range validatorIDs.List() {
vID := validatorID
n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) })
}
return
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
for _, validatorID := range validatorIDs.List() {
vID := validatorID
peer, sent := n.peers[vID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) })
n.getAccepted.numFailed.Inc()
} else {
n.getAccepted.numSent.Inc()
}
}
}
// Accepted implements the Sender interface.
func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
msg, err := n.b.Accepted(chainID, requestID, containerIDs)
if err != nil {
n.log.Error("attempted to pack too large of an Accepted message.\nNumber of containerIDs: %d",
containerIDs.Len())
return // Packing message failed
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send an Accepted message to: %s", validatorID)
n.accepted.numFailed.Inc()
} else {
n.accepted.numSent.Inc()
}
}
// Get implements the Sender interface.
func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
msg, err := n.b.Get(chainID, requestID, containerID)
n.log.AssertNoError(err)
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a Get message to: %s", validatorID)
n.get.numFailed.Inc()
} else {
n.get.numSent.Inc()
}
}
// Put implements the Sender interface.
func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.Put(chainID, requestID, containerID, container)
if err != nil {
n.log.Error("failed to build Put message because of container of size %d", len(container))
return
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a Put message to: %s", validatorID)
n.put.numFailed.Inc()
} else {
n.put.numSent.Inc()
}
}
// PushQuery implements the Sender interface.
func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.PushQuery(chainID, requestID, containerID, container)
if err != nil {
for _, validatorID := range validatorIDs.List() {
vID := validatorID
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
}
n.log.Error("attempted to pack too large of a PushQuery message.\nContainer length: %d", len(container))
return // Packing message failed
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
for _, validatorID := range validatorIDs.List() {
vID := validatorID
peer, sent := n.peers[vID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed sending a PushQuery message to: %s", vID)
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
n.pushQuery.numFailed.Inc()
} else {
n.pushQuery.numSent.Inc()
}
}
}
// PullQuery implements the Sender interface.
func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID) {
msg, err := n.b.PullQuery(chainID, requestID, containerID)
n.log.AssertNoError(err)
n.stateLock.Lock()
defer n.stateLock.Unlock()
for _, validatorID := range validatorIDs.List() {
vID := validatorID
peer, sent := n.peers[vID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed sending a PullQuery message to: %s", vID)
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
n.pullQuery.numFailed.Inc()
} else {
n.pullQuery.numSent.Inc()
}
}
}
// Chits implements the Sender interface.
func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) {
msg, err := n.b.Chits(chainID, requestID, votes)
if err != nil {
n.log.Error("failed to build Chits message because of %d votes", votes.Len())
return
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a Chits message to: %s", validatorID)
n.chits.numFailed.Inc()
} else {
n.chits.numSent.Inc()
}
}
// Gossip attempts to gossip the container to the network
func (n *network) Gossip(chainID, containerID ids.ID, container []byte) {
if err := n.gossipContainer(chainID, containerID, container); err != nil {
n.log.Error("error gossiping container %s to %s: %s", containerID, chainID, err)
}
}
// Accept is called after every consensus decision
func (n *network) Accept(chainID, containerID ids.ID, container []byte) error {
return n.gossipContainer(chainID, containerID, container)
}
// heartbeat registers a new heartbeat to signal liveness
func (n *network) heartbeat() { atomic.StoreInt64(&n.lastHeartbeat, n.clock.Time().Unix()) }
// GetHeartbeat returns the most recent heartbeat time
func (n *network) GetHeartbeat() int64 { return atomic.LoadInt64(&n.lastHeartbeat) }
// Dispatch starts accepting connections from other nodes attempting to connect
// to this node.
func (n *network) Dispatch() error {
go n.gossip()
for {
conn, err := n.listener.Accept()
if err != nil {
return err
}
go n.upgrade(&peer{
net: n,
conn: conn,
}, n.serverUpgrader)
}
}
// RegisterHandler implements the Network interface
func (n *network) RegisterHandler(h Handler) {
n.stateLock.Lock()
defer n.stateLock.Unlock()
if h.Connected(n.id) {
return
}
for _, peer := range n.peers {
if peer.connected {
if h.Connected(peer.id) {
return
}
}
}
n.handlers = append(n.handlers, h)
}
// IPs implements the Network interface
func (n *network) IPs() []utils.IPDesc {
n.stateLock.Lock()
defer n.stateLock.Unlock()
ips := []utils.IPDesc(nil)
for _, peer := range n.peers {
if peer.connected {
ips = append(ips, peer.ip)
}
}
return ips
}
// Close implements the Network interface
func (n *network) Close() error {
n.stateLock.Lock()
if n.closed {
n.stateLock.Unlock()
return nil
}
n.closed = true
err := n.listener.Close()
peersToClose := []*peer(nil)
for _, peer := range n.peers {
peersToClose = append(peersToClose, peer)
}
n.stateLock.Unlock()
for _, peer := range peersToClose {
peer.Close() // Grabs the stateLock
}
return err
}
// Track implements the Network interface
func (n *network) Track(ip utils.IPDesc) {
n.stateLock.Lock()
defer n.stateLock.Unlock()
n.track(ip)
}
// assumes the stateLock is not held.
func (n *network) gossipContainer(chainID, containerID ids.ID, container []byte) error {
msg, err := n.b.Put(chainID, math.MaxUint32, containerID, container)
if err != nil {
return fmt.Errorf("attempted to pack too large of a Put message.\nContainer length: %d", len(container))
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
allPeers := make([]*peer, 0, len(n.peers))
for _, peer := range n.peers {
allPeers = append(allPeers, peer)
}
numToGossip := n.gossipSize
if numToGossip > len(allPeers) {
numToGossip = len(allPeers)
}
sampler := random.Uniform{N: len(allPeers)}
for i := 0; i < numToGossip; i++ {
if allPeers[sampler.Sample()].send(msg) {
n.put.numSent.Inc()
} else {
n.put.numFailed.Inc()
}
}
return nil
}
// assumes the stateLock is held.
func (n *network) track(ip utils.IPDesc) {
if n.closed {
return
}
str := ip.String()
if _, ok := n.disconnectedIPs[str]; ok {
return
}
if _, ok := n.connectedIPs[str]; ok {
return
}
if _, ok := n.myIPs[str]; ok {
return
}
n.disconnectedIPs[str] = struct{}{}
go n.connectTo(ip)
}
// assumes the stateLock is not held. Only returns after the network is closed.
func (n *network) gossip() {
t := time.NewTicker(n.peerListGossipSpacing)
defer t.Stop()
for range t.C {
ips := n.validatorIPs()
if len(ips) == 0 {
n.log.Debug("skipping validator gossiping as no public validators are connected")
continue
}
msg, err := n.b.PeerList(ips)
if err != nil {
n.log.Warn("failed to gossip PeerList message due to %s", err)
continue
}
n.stateLock.Lock()
if n.closed {
n.stateLock.Unlock()
return
}
stakers := []*peer(nil)
nonStakers := []*peer(nil)
for _, peer := range n.peers {
if n.vdrs.Contains(peer.id) {
stakers = append(stakers, peer)
} else {
nonStakers = append(nonStakers, peer)
}
}
numStakersToSend := (n.peerListGossipSize + n.peerListStakerGossipFraction - 1) / n.peerListStakerGossipFraction
if len(stakers) < numStakersToSend {
numStakersToSend = len(stakers)
}
numNonStakersToSend := n.peerListGossipSize - numStakersToSend
if len(nonStakers) < numNonStakersToSend {
numNonStakersToSend = len(nonStakers)
}
sampler := random.Uniform{N: len(stakers)}
for i := 0; i < numStakersToSend; i++ {
stakers[sampler.Sample()].send(msg)
}
sampler.N = len(nonStakers)
sampler.Replace()
for i := 0; i < numNonStakersToSend; i++ {
nonStakers[sampler.Sample()].send(msg)
}
n.stateLock.Unlock()
}
}
// assumes the stateLock is not held. Only returns if the ip is connected to or
// the network is closed
func (n *network) connectTo(ip utils.IPDesc) {
str := ip.String()
delay := n.initialReconnectDelay
for {
n.stateLock.Lock()
_, isDisconnected := n.disconnectedIPs[str]
_, isConnected := n.connectedIPs[str]
_, isMyself := n.myIPs[str]
closed := n.closed
n.stateLock.Unlock()
if !isDisconnected || isConnected || isMyself || closed {
// If the IP was discovered by the peer connecting to us, we don't
// need to attempt to connect anymore
// If the IP was discovered to be our IP address, we don't need to
// attempt to connect anymore
// If the network was closed, we should stop attempting to connect
// to the peer
return
}
err := n.attemptConnect(ip)
if err == nil {
return
}
n.log.Verbo("error attempting to connect to %s: %s. Reattempting in %s",
ip, err, delay)
time.Sleep(delay)
delay *= 2
if delay > n.maxReconnectDelay {
delay = n.maxReconnectDelay
}
}
}
// assumes the stateLock is not held. Returns nil if a connection was able to be
// established, or the network is closed.
func (n *network) attemptConnect(ip utils.IPDesc) error {
n.log.Verbo("attempting to connect to %s", ip)
conn, err := n.dialer.Dial(ip)
if err != nil {
return err
}
return n.upgrade(&peer{
net: n,
ip: ip,
conn: conn,
}, n.clientUpgrader)
}
// assumes the stateLock is not held. Returns an error if the peer's connection
// wasn't able to be upgraded.
func (n *network) upgrade(p *peer, upgrader Upgrader) error {
id, conn, err := upgrader.Upgrade(p.conn)
if err != nil {
n.log.Verbo("failed to upgrade connection with %s", err)
return err
}
p.sender = make(chan []byte, n.sendQueueSize)
p.id = id
p.conn = conn
key := id.Key()
n.stateLock.Lock()
defer n.stateLock.Unlock()
if n.closed {
return nil
}
// if this connection is myself, then I should delete the connection and
// mark the IP as one of mine.
if id.Equals(n.id) {
if !p.ip.IsZero() {
// if n.ip is less useful than p.ip set it to this IP
if n.ip.IsZero() {
n.log.Info("setting my ip to %s because I was able to connect to myself through this channel",
p.ip)
n.ip = p.ip
}
str := p.ip.String()
delete(n.disconnectedIPs, str)
n.myIPs[str] = struct{}{}
}
p.conn.Close()
return nil
}
if _, ok := n.peers[key]; ok {
if !p.ip.IsZero() {
delete(n.disconnectedIPs, p.ip.String())
}
p.conn.Close()
return nil
}
n.peers[key] = p
n.numPeers.Set(float64(len(n.peers)))
p.Start()
return nil
}
// assumes the stateLock is not held. Returns the ips of connections that have
// valid IPs that are marked as validators.
func (n *network) validatorIPs() []utils.IPDesc {
n.stateLock.Lock()
defer n.stateLock.Unlock()
ips := []utils.IPDesc(nil)
for _, peer := range n.peers {
if peer.connected &&
!peer.ip.IsZero() &&
n.vdrs.Contains(peer.id) {
ips = append(ips, peer.ip)
}
}
return ips
}
// assumes the stateLock is held when called
// should only be called after the peer is marked as connected. Should not be
// called after disconnected is called with this peer.
func (n *network) connected(p *peer) {
n.log.Debug("connected to %s at %s", p.id, p.ip)
if !p.ip.IsZero() {
str := p.ip.String()
delete(n.disconnectedIPs, str)
n.connectedIPs[str] = struct{}{}
}
for i := 0; i < len(n.handlers); {
if n.handlers[i].Connected(p.id) {
newLen := len(n.handlers) - 1
n.handlers[i] = n.handlers[newLen] // remove the current handler
n.handlers = n.handlers[:newLen]
} else {
i++
}
}
}
// assumes the stateLock is held when called
// should only be called after the peer is marked as connected.
func (n *network) disconnected(p *peer) {
n.log.Debug("disconnected from %s at %s", p.id, p.ip)
key := p.id.Key()
delete(n.peers, key)
n.numPeers.Set(float64(len(n.peers)))
if !p.ip.IsZero() {
str := p.ip.String()
delete(n.disconnectedIPs, str)
delete(n.connectedIPs, str)
n.track(p.ip)
}
if p.connected {
for i := 0; i < len(n.handlers); {
if n.handlers[i].Disconnected(p.id) {
newLen := len(n.handlers) - 1
n.handlers[i] = n.handlers[newLen] // remove the current handler
n.handlers = n.handlers[:newLen]
} else {
i++
}
}
}
}

1039
network/network_test.go Normal file

File diff suppressed because it is too large Load Diff

581
network/peer.go Normal file
View File

@ -0,0 +1,581 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package network
import (
"bytes"
"math"
"net"
"sync"
"time"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/wrappers"
)
type peer struct {
net *network // network this peer is part of
// if the version message has been received and is valid. is only modified
// on the connection's reader routine with the network state lock held.
connected bool
// only close the peer once
once sync.Once
// if the close function has been called, is only modifed when the network
// state lock held.
closed bool
// queue of messages this connection is attempting to send the peer. Is
// closed when the connection is closed.
sender chan []byte
// ip may or may not be set when the peer is first started. is only modified
// on the connection's reader routine with the network state lock held.
ip utils.IPDesc
// id should be set when the peer is first started.
id ids.ShortID
// the connection object that is used to read/write messages from
conn net.Conn
}
// assume the stateLock is held
func (p *peer) Start() {
go p.ReadMessages()
go p.WriteMessages()
// Initially send the version to the peer
go p.Version()
go p.requestVersion()
}
// request the version from the peer until we get the version from them
func (p *peer) requestVersion() {
t := time.NewTicker(p.net.getVersionTimeout)
defer t.Stop()
for range t.C {
p.net.stateLock.Lock()
connected := p.connected
closed := p.closed
p.net.stateLock.Unlock()
if connected || closed {
return
}
p.GetVersion()
}
}
// attempt to read messages from the peer
func (p *peer) ReadMessages() {
defer p.Close()
pendingBuffer := wrappers.Packer{}
readBuffer := make([]byte, 1<<10)
for {
read, err := p.conn.Read(readBuffer)
if err != nil {
p.net.log.Verbo("error on connection read to %s %s", p.id, err)
return
}
pendingBuffer.Bytes = append(pendingBuffer.Bytes, readBuffer[:read]...)
msgBytes := pendingBuffer.UnpackBytes()
if pendingBuffer.Errored() {
// if reading the bytes errored, then we haven't read the full
// message yet
pendingBuffer.Offset = 0
pendingBuffer.Err = nil
if uint32(len(pendingBuffer.Bytes)) > p.net.maxMessageSize+wrappers.IntLen {
// we have read more bytes than the max message size allows for,
// so we should terminate this connection
p.net.log.Verbo("error reading too many bytes on %s %s", p.id, err)
return
}
// we should try to read more bytes to finish the message
continue
}
// we read the full message bytes
// set the pending bytes to any extra bytes that were read
pendingBuffer.Bytes = pendingBuffer.Bytes[pendingBuffer.Offset:]
// set the offset back to the start of the next message
pendingBuffer.Offset = 0
if uint32(len(msgBytes)) > p.net.maxMessageSize {
// if this message is longer than the max message length, then we
// should terminate this connection
p.net.log.Verbo("error reading too many bytes on %s %s", p.id, err)
return
}
p.net.log.Verbo("parsing new message from %s:\n%s",
p.id,
formatting.DumpBytes{Bytes: msgBytes})
msg, err := p.net.b.Parse(msgBytes)
if err != nil {
p.net.log.Debug("failed to parse new message from %s:\n%s\n%s",
p.id,
formatting.DumpBytes{Bytes: msgBytes},
err)
return
}
p.handle(msg)
}
}
// attempt to write messages to the peer
func (p *peer) WriteMessages() {
defer p.Close()
for msg := range p.sender {
p.net.log.Verbo("sending new message to %s:\n%s",
p.id,
formatting.DumpBytes{Bytes: msg})
packer := wrappers.Packer{Bytes: make([]byte, len(msg)+wrappers.IntLen)}
packer.PackBytes(msg)
msg = packer.Bytes
for len(msg) > 0 {
written, err := p.conn.Write(msg)
if err != nil {
p.net.log.Verbo("error writing to %s at %s due to: %s", p.id, p.ip, err)
return
}
msg = msg[written:]
}
}
}
// send assumes that the stateLock is not held.
func (p *peer) Send(msg Msg) bool {
p.net.stateLock.Lock()
defer p.net.stateLock.Unlock()
return p.send(msg)
}
// send assumes that the stateLock is held.
func (p *peer) send(msg Msg) bool {
if p.closed {
p.net.log.Debug("dropping message to %s due to a closed connection", p.id)
return false
}
select {
case p.sender <- msg.Bytes():
return true
default:
p.net.log.Debug("dropping message to %s due to a full send queue", p.id)
return false
}
}
// assumes the stateLock is not held
func (p *peer) handle(msg Msg) {
p.net.heartbeat()
op := msg.Op()
msgMetrics := p.net.message(op)
if msgMetrics == nil {
p.net.log.Debug("dropping an unknown message from %s with op %d", p.id, op)
return
}
msgMetrics.numReceived.Inc()
switch op {
case Version:
p.version(msg)
return
case GetVersion:
p.getVersion(msg)
return
}
if !p.connected {
p.net.log.Debug("dropping message from %s because the connection hasn't been established yet", p.id)
// send a get version message so that the peer's future messages are hopefully not dropped
p.GetVersion()
return
}
switch op {
case GetPeerList:
p.getPeerList(msg)
case PeerList:
p.peerList(msg)
case GetAcceptedFrontier:
p.getAcceptedFrontier(msg)
case AcceptedFrontier:
p.acceptedFrontier(msg)
case GetAccepted:
p.getAccepted(msg)
case Accepted:
p.accepted(msg)
case Get:
p.get(msg)
case Put:
p.put(msg)
case PushQuery:
p.pushQuery(msg)
case PullQuery:
p.pullQuery(msg)
case Chits:
p.chits(msg)
}
}
// assumes the stateLock is not held
func (p *peer) Close() { p.once.Do(p.close) }
// assumes only `peer.Close` calls this
func (p *peer) close() {
p.net.stateLock.Lock()
defer p.net.stateLock.Unlock()
p.closed = true
p.conn.Close()
close(p.sender)
p.net.disconnected(p)
}
// assumes the stateLock is not held
func (p *peer) GetVersion() {
msg, err := p.net.b.GetVersion()
p.net.log.AssertNoError(err)
p.Send(msg)
}
// assumes the stateLock is not held
func (p *peer) Version() {
p.net.stateLock.Lock()
msg, err := p.net.b.Version(
p.net.networkID,
p.net.nodeID,
p.net.clock.Unix(),
p.net.ip,
p.net.version.String(),
)
p.net.stateLock.Unlock()
p.net.log.AssertNoError(err)
p.Send(msg)
}
// assumes the stateLock is not held
func (p *peer) GetPeerList() {
msg, err := p.net.b.GetPeerList()
p.net.log.AssertNoError(err)
p.Send(msg)
}
// assumes the stateLock is not held
func (p *peer) PeerList(peers []utils.IPDesc) {
msg, err := p.net.b.PeerList(peers)
if err != nil {
p.net.log.Warn("failed to send PeerList message due to %s", err)
return
}
p.Send(msg)
return
}
// assumes the stateLock is not held
func (p *peer) getVersion(_ Msg) { p.Version() }
// assumes the stateLock is not held
func (p *peer) version(msg Msg) {
if p.connected {
p.net.log.Verbo("dropping duplicated version message from %s", p.id)
return
}
if networkID := msg.Get(NetworkID).(uint32); networkID != p.net.networkID {
p.net.log.Debug("peer's network ID doesn't match our networkID: Peer's = %d ; Ours = %d",
networkID,
p.net.networkID)
// By clearing the IP, we will not attempt to reconnect to this peer
if !p.ip.IsZero() {
p.net.stateLock.Lock()
delete(p.net.disconnectedIPs, p.ip.String())
p.ip = utils.IPDesc{}
p.net.stateLock.Unlock()
}
p.Close()
return
}
if nodeID := msg.Get(NodeID).(uint32); nodeID == p.net.nodeID {
p.net.log.Debug("peer's node ID matches our nodeID")
// By clearing the IP, we will not attempt to reconnect to this peer
if !p.ip.IsZero() {
p.net.stateLock.Lock()
str := p.ip.String()
p.net.myIPs[str] = struct{}{}
delete(p.net.disconnectedIPs, str)
p.ip = utils.IPDesc{}
p.net.stateLock.Unlock()
}
p.Close()
return
}
myTime := float64(p.net.clock.Unix())
if peerTime := float64(msg.Get(MyTime).(uint64)); math.Abs(peerTime-myTime) > p.net.maxClockDifference.Seconds() {
p.net.log.Debug("peer's clock is too far out of sync with mine. Peer's = %d, Ours = %d (seconds)",
uint64(peerTime),
uint64(myTime))
// By clearing the IP, we will not attempt to reconnect to this peer
if !p.ip.IsZero() {
p.net.stateLock.Lock()
delete(p.net.disconnectedIPs, p.ip.String())
p.ip = utils.IPDesc{}
p.net.stateLock.Unlock()
}
p.Close()
return
}
peerVersionStr := msg.Get(VersionStr).(string)
peerVersion, err := p.net.parser.Parse(peerVersionStr)
if err != nil {
p.net.log.Debug("peer version could not be parsed due to %s", err)
// By clearing the IP, we will not attempt to reconnect to this peer
if !p.ip.IsZero() {
p.net.stateLock.Lock()
delete(p.net.disconnectedIPs, p.ip.String())
p.ip = utils.IPDesc{}
p.net.stateLock.Unlock()
}
p.Close()
return
}
if p.net.version.Before(peerVersion) {
p.net.log.Info("peer attempting to connect with newer version %s. You may want to update your client",
peerVersion)
}
if err := p.net.version.Compatible(peerVersion); err != nil {
p.net.log.Debug("peer version not compatible due to %s", err)
// By clearing the IP, we will not attempt to reconnect to this peer
if !p.ip.IsZero() {
p.net.stateLock.Lock()
delete(p.net.disconnectedIPs, p.ip.String())
p.ip = utils.IPDesc{}
p.net.stateLock.Unlock()
}
p.Close()
return
}
if p.ip.IsZero() {
// we only care about the claimed IP if we don't know the IP yet
peerIP := msg.Get(IP).(utils.IPDesc)
addr := p.conn.RemoteAddr()
localPeerIP, err := utils.ToIPDesc(addr.String())
if err == nil {
// If we have no clue what the peer's IP is, we can't perform any
// verification
if bytes.Equal(peerIP.IP, localPeerIP.IP) {
// if the IPs match, add this ip:port pair to be tracked
p.net.stateLock.Lock()
p.ip = peerIP
p.net.stateLock.Unlock()
}
}
}
p.SendPeerList()
p.net.stateLock.Lock()
defer p.net.stateLock.Unlock()
// the network connected function can only be called if disconnected wasn't
// already called
if p.closed {
return
}
p.connected = true
p.net.connected(p)
}
// assumes the stateLock is not held
func (p *peer) SendPeerList() {
ips := p.net.validatorIPs()
reply, err := p.net.b.PeerList(ips)
if err != nil {
p.net.log.Warn("failed to send PeerList message due to %s", err)
return
}
p.Send(reply)
}
// assumes the stateLock is not held
func (p *peer) getPeerList(_ Msg) { p.SendPeerList() }
// assumes the stateLock is not held
func (p *peer) peerList(msg Msg) {
ips := msg.Get(Peers).([]utils.IPDesc)
p.net.stateLock.Lock()
for _, ip := range ips {
if !ip.Equal(p.net.ip) &&
!ip.IsZero() &&
(p.net.allowPrivateIPs || !ip.IsPrivate()) {
// TODO: only try to connect once
p.net.track(ip)
}
}
p.net.stateLock.Unlock()
}
// assumes the stateLock is not held
func (p *peer) getAcceptedFrontier(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(RequestID).(uint32)
p.net.router.GetAcceptedFrontier(p.id, chainID, requestID)
}
// assumes the stateLock is not held
func (p *peer) acceptedFrontier(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(RequestID).(uint32)
containerIDs := ids.Set{}
for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
p.net.log.Debug("error parsing ContainerID 0x%x: %s", containerIDBytes, err)
return
}
containerIDs.Add(containerID)
}
p.net.router.AcceptedFrontier(p.id, chainID, requestID, containerIDs)
}
// assumes the stateLock is not held
func (p *peer) getAccepted(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(RequestID).(uint32)
containerIDs := ids.Set{}
for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
p.net.log.Debug("error parsing ContainerID 0x%x: %s", containerIDBytes, err)
return
}
containerIDs.Add(containerID)
}
p.net.router.GetAccepted(p.id, chainID, requestID, containerIDs)
}
// assumes the stateLock is not held
func (p *peer) accepted(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(RequestID).(uint32)
containerIDs := ids.Set{}
for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
p.net.log.Debug("error parsing ContainerID 0x%x: %s", containerIDBytes, err)
return
}
containerIDs.Add(containerID)
}
p.net.router.Accepted(p.id, chainID, requestID, containerIDs)
}
// assumes the stateLock is not held
func (p *peer) get(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(RequestID).(uint32)
containerID, err := ids.ToID(msg.Get(ContainerID).([]byte))
p.net.log.AssertNoError(err)
p.net.router.Get(p.id, chainID, requestID, containerID)
}
// assumes the stateLock is not held
func (p *peer) put(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(RequestID).(uint32)
containerID, err := ids.ToID(msg.Get(ContainerID).([]byte))
p.net.log.AssertNoError(err)
container := msg.Get(ContainerBytes).([]byte)
p.net.router.Put(p.id, chainID, requestID, containerID, container)
}
// assumes the stateLock is not held
func (p *peer) pushQuery(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(RequestID).(uint32)
containerID, err := ids.ToID(msg.Get(ContainerID).([]byte))
p.net.log.AssertNoError(err)
container := msg.Get(ContainerBytes).([]byte)
p.net.router.PushQuery(p.id, chainID, requestID, containerID, container)
}
// assumes the stateLock is not held
func (p *peer) pullQuery(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(RequestID).(uint32)
containerID, err := ids.ToID(msg.Get(ContainerID).([]byte))
p.net.log.AssertNoError(err)
p.net.router.PullQuery(p.id, chainID, requestID, containerID)
}
// assumes the stateLock is not held
func (p *peer) chits(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(RequestID).(uint32)
containerIDs := ids.Set{}
for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
p.net.log.Debug("error parsing ContainerID 0x%x: %s", containerIDBytes, err)
return
}
containerIDs.Add(containerID)
}
p.net.router.Chits(p.id, chainID, requestID, containerIDs)
}

91
network/upgrader.go Normal file
View File

@ -0,0 +1,91 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package network
import (
"crypto/tls"
"errors"
"net"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/hashing"
)
var (
errNoCert = errors.New("tls handshake finished with no peer certificate")
)
// Upgrader ...
type Upgrader interface {
// Must be thread safe
Upgrade(net.Conn) (ids.ShortID, net.Conn, error)
}
type ipUpgrader struct{}
// NewIPUpgrader ...
func NewIPUpgrader() Upgrader { return ipUpgrader{} }
func (ipUpgrader) Upgrade(conn net.Conn) (ids.ShortID, net.Conn, error) {
addr := conn.RemoteAddr()
str := addr.String()
id := ids.NewShortID(hashing.ComputeHash160Array([]byte(str)))
return id, conn, nil
}
type tlsServerUpgrader struct {
config *tls.Config
}
// NewTLSServerUpgrader ...
func NewTLSServerUpgrader(config *tls.Config) Upgrader {
return tlsServerUpgrader{
config: config,
}
}
func (t tlsServerUpgrader) Upgrade(conn net.Conn) (ids.ShortID, net.Conn, error) {
encConn := tls.Server(conn, t.config)
if err := encConn.Handshake(); err != nil {
return ids.ShortID{}, nil, err
}
connState := encConn.ConnectionState()
if len(connState.PeerCertificates) == 0 {
return ids.ShortID{}, nil, errNoCert
}
peerCert := connState.PeerCertificates[0]
id := ids.NewShortID(
hashing.ComputeHash160Array(
hashing.ComputeHash256(peerCert.Raw)))
return id, encConn, nil
}
type tlsClientUpgrader struct {
config *tls.Config
}
// NewTLSClientUpgrader ...
func NewTLSClientUpgrader(config *tls.Config) Upgrader {
return tlsClientUpgrader{
config: config,
}
}
func (t tlsClientUpgrader) Upgrade(conn net.Conn) (ids.ShortID, net.Conn, error) {
encConn := tls.Client(conn, t.config)
if err := encConn.Handshake(); err != nil {
return ids.ShortID{}, nil, err
}
connState := encConn.ConnectionState()
if len(connState.PeerCertificates) == 0 {
return ids.ShortID{}, nil, errNoCert
}
peerCert := connState.PeerCertificates[0]
id := ids.NewShortID(
hashing.ComputeHash160Array(
hashing.ComputeHash256(peerCert.Raw)))
return id, encConn, nil
}

View File

@ -1,281 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package networking
import (
"fmt"
"sync"
"github.com/ava-labs/salticidae-go"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils"
)
// Connections provides an interface for what a group of connections will
// support.
type Connections interface {
Add(salticidae.PeerID, ids.ShortID, utils.IPDesc)
GetPeerID(ids.ShortID) (salticidae.PeerID, bool)
GetID(salticidae.PeerID) (ids.ShortID, bool)
ContainsPeerID(salticidae.PeerID) bool
ContainsID(ids.ShortID) bool
ContainsIP(utils.IPDesc) bool
Remove(salticidae.PeerID, ids.ShortID)
RemovePeerID(salticidae.PeerID)
RemoveID(ids.ShortID)
PeerIDs() []salticidae.PeerID
IDs() ids.ShortSet
IPs() []utils.IPDesc
Conns() ([]salticidae.PeerID, []ids.ShortID, []utils.IPDesc)
Len() int
}
type connections struct {
mux sync.Mutex
// peerID -> id
peerIDToID map[[32]byte]ids.ShortID
// id -> peerID
idToPeerID map[[20]byte]salticidae.PeerID
// id -> ip
idToIP map[[20]byte]utils.IPDesc
}
// NewConnections returns a new and empty connections object
func NewConnections() Connections {
return &connections{
peerIDToID: make(map[[32]byte]ids.ShortID),
idToPeerID: make(map[[20]byte]salticidae.PeerID),
idToIP: make(map[[20]byte]utils.IPDesc),
}
}
// Add Assumes that peer is garbage collected normally
func (c *connections) Add(peer salticidae.PeerID, id ids.ShortID, ip utils.IPDesc) {
c.mux.Lock()
defer c.mux.Unlock()
c.add(peer, id, ip)
}
// GetPeerID returns the peer mapped to the id that is provided if one exists.
func (c *connections) GetPeerID(id ids.ShortID) (salticidae.PeerID, bool) {
c.mux.Lock()
defer c.mux.Unlock()
return c.getPeerID(id)
}
// GetID returns the id mapped to the peer that is provided if one exists.
func (c *connections) GetID(peer salticidae.PeerID) (ids.ShortID, bool) {
c.mux.Lock()
defer c.mux.Unlock()
return c.getID(peer)
}
// ContainsPeerID returns true if the peer is contained in the connection pool
func (c *connections) ContainsPeerID(peer salticidae.PeerID) bool {
_, exists := c.GetID(peer)
return exists
}
// ContainsID returns true if the id is contained in the connection pool
func (c *connections) ContainsID(id ids.ShortID) bool {
_, exists := c.GetPeerID(id)
return exists
}
// ContainsIP returns true if the ip is contained in the connection pool
func (c *connections) ContainsIP(ip utils.IPDesc) bool {
for _, otherIP := range c.IPs() {
if ip.Equal(otherIP) {
return true
}
}
return false
}
// Remove ensures that no connection will have any mapping containing [peer] or
// [id].
func (c *connections) Remove(peer salticidae.PeerID, id ids.ShortID) {
c.mux.Lock()
defer c.mux.Unlock()
c.remove(peer, id)
}
// RemovePeerID ensures that no connection will have a mapping containing [peer]
func (c *connections) RemovePeerID(peer salticidae.PeerID) {
c.mux.Lock()
defer c.mux.Unlock()
c.removePeerID(peer)
}
// RemoveID ensures that no connection will have a mapping containing [id]
func (c *connections) RemoveID(id ids.ShortID) {
c.mux.Lock()
defer c.mux.Unlock()
c.removeID(id)
}
// PeerIDs returns the full list of peers contained in this connection pool.
func (c *connections) PeerIDs() []salticidae.PeerID {
c.mux.Lock()
defer c.mux.Unlock()
return c.peerIDs()
}
// IDs return the set of IDs that are mapping in this connection pool.
func (c *connections) IDs() ids.ShortSet {
c.mux.Lock()
defer c.mux.Unlock()
return c.ids()
}
// IPs return the set of IPs that are mapped in this connection pool.
func (c *connections) IPs() []utils.IPDesc {
c.mux.Lock()
defer c.mux.Unlock()
return c.ips()
}
// Conns return the set of connections in this connection pool.
func (c *connections) Conns() ([]salticidae.PeerID, []ids.ShortID, []utils.IPDesc) {
c.mux.Lock()
defer c.mux.Unlock()
return c.conns()
}
// Len returns the number of elements in the map
func (c *connections) Len() int {
c.mux.Lock()
defer c.mux.Unlock()
return c.len()
}
func (c *connections) add(peer salticidae.PeerID, id ids.ShortID, ip utils.IPDesc) {
c.remove(peer, id)
key := id.Key()
c.peerIDToID[toID(peer)] = id
c.idToPeerID[key] = peer
c.idToIP[key] = ip
}
func (c *connections) getPeerID(id ids.ShortID) (salticidae.PeerID, bool) {
peer, exists := c.idToPeerID[id.Key()]
return peer, exists
}
func (c *connections) getID(peer salticidae.PeerID) (ids.ShortID, bool) {
id, exists := c.peerIDToID[toID(peer)]
return id, exists
}
func (c *connections) remove(peer salticidae.PeerID, id ids.ShortID) {
c.removePeerID(peer)
c.removeID(id)
}
func (c *connections) removePeerID(peer salticidae.PeerID) {
peerID := toID(peer)
if id, exists := c.peerIDToID[peerID]; exists {
idKey := id.Key()
delete(c.peerIDToID, peerID)
delete(c.idToPeerID, idKey)
delete(c.idToIP, idKey)
}
}
func (c *connections) removeID(id ids.ShortID) {
idKey := id.Key()
if peer, exists := c.idToPeerID[idKey]; exists {
delete(c.peerIDToID, toID(peer))
delete(c.idToPeerID, idKey)
delete(c.idToIP, idKey)
}
}
func (c *connections) peerIDs() []salticidae.PeerID {
peers := make([]salticidae.PeerID, 0, len(c.idToPeerID))
for _, peer := range c.idToPeerID {
peers = append(peers, peer)
}
return peers
}
func (c *connections) ids() ids.ShortSet {
ids := ids.ShortSet{}
for _, id := range c.peerIDToID {
ids.Add(id)
}
return ids
}
func (c *connections) ips() []utils.IPDesc {
ips := make([]utils.IPDesc, 0, len(c.idToIP))
for _, ip := range c.idToIP {
ips = append(ips, ip)
}
return ips
}
func (c *connections) conns() ([]salticidae.PeerID, []ids.ShortID, []utils.IPDesc) {
peers := make([]salticidae.PeerID, 0, len(c.idToPeerID))
idList := make([]ids.ShortID, 0, len(c.idToPeerID))
ips := make([]utils.IPDesc, 0, len(c.idToPeerID))
for id, peer := range c.idToPeerID {
idList = append(idList, ids.NewShortID(id))
peers = append(peers, peer)
ips = append(ips, c.idToIP[id])
}
return peers, idList, ips
}
func (c *connections) len() int { return len(c.idToPeerID) }
func toID(peer salticidae.PeerID) [32]byte {
ds := salticidae.NewDataStream(false)
peerInt := peer.AsUInt256()
peerInt.Serialize(ds)
size := ds.Size()
dsb := ds.GetDataInPlace(size)
idBytes := dsb.Get()
id := [32]byte{}
copy(id[:], idBytes)
dsb.Release()
ds.Free()
return id
}
func toIPDesc(addr salticidae.NetAddr) utils.IPDesc {
ip, err := ToIPDesc(addr)
HandshakeNet.log.AssertNoError(err)
return ip
}
// ToIPDesc converts an address to an IP
func ToIPDesc(addr salticidae.NetAddr) (utils.IPDesc, error) {
ip := salticidae.FromBigEndianU32(addr.GetIP())
port := salticidae.FromBigEndianU16(addr.GetPort())
return utils.ToIPDesc(fmt.Sprintf("%d.%d.%d.%d:%d", byte(ip>>24), byte(ip>>16), byte(ip>>8), byte(ip), port))
}

View File

@ -1,840 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package networking
// #include "salticidae/network.h"
// bool connHandler(msgnetwork_conn_t *, bool, void *);
// void unknownPeerHandler(netaddr_t *, x509_t *, void *);
// void peerHandler(peernetwork_conn_t *, bool, void *);
// void ping(msg_t *, msgnetwork_conn_t *, void *);
// void pong(msg_t *, msgnetwork_conn_t *, void *);
// void getVersion(msg_t *, msgnetwork_conn_t *, void *);
// void version(msg_t *, msgnetwork_conn_t *, void *);
// void getPeerList(msg_t *, msgnetwork_conn_t *, void *);
// void peerList(msg_t *, msgnetwork_conn_t *, void *);
import "C"
import (
"errors"
"fmt"
"math"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/prometheus/client_golang/prometheus"
"github.com/ava-labs/salticidae-go"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/networking"
"github.com/ava-labs/gecko/snow/validators"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/random"
"github.com/ava-labs/gecko/utils/timer"
)
/*
Receive a new connection.
- Send version message.
Receive version message.
- Validate data
- Send peer list
- Mark this node as being connected
*/
/*
Periodically gossip peerlists.
- Only connected stakers should be gossiped.
- Gossip to a caped number of peers.
- The peers to gossip to should be at least half full of stakers (or all the
stakers should be in the set).
*/
/*
Attempt reconnections
- If a non-staker disconnects, delete the connection
- If a staker disconnects, attempt to reconnect to the node for awhile. If the
node isn't connected to after awhile delete the connection.
*/
// Version this avalanche instance is executing.
var (
VersionPrefix = "avalanche/"
VersionSeparator = "."
MajorVersion = 0
MinorVersion = 2
PatchVersion = 1
ClientVersion = fmt.Sprintf("%s%d%s%d%s%d",
VersionPrefix,
MajorVersion,
VersionSeparator,
MinorVersion,
VersionSeparator,
PatchVersion)
)
const (
// MaxClockDifference allowed between connected nodes.
MaxClockDifference = time.Minute
// PeerListGossipSpacing is the amount of time to wait between pushing this
// node's peer list to other nodes.
PeerListGossipSpacing = time.Minute
// PeerListGossipSize is the number of peers to gossip each period.
PeerListGossipSize = 100
// PeerListStakerGossipFraction calculates the fraction of stakers that are
// gossiped to. If set to 1, then only stakers will be gossiped to.
PeerListStakerGossipFraction = 2
// ConnectTimeout is the amount of time to wait before attempt to connect to
// an unknown peer
ConnectTimeout = 6 * time.Second
// GetVersionTimeout is the amount of time to wait before sending a
// getVersion message to a partially connected peer
GetVersionTimeout = 2 * time.Second
// ReconnectTimeout is the amount of time to wait to reconnect to a staker
// before giving up
ReconnectTimeout = 10 * time.Minute
)
// Manager is the struct that will be accessed on event calls
var (
HandshakeNet = Handshake{}
)
var (
errDSValidators = errors.New("couldn't get validator set of default subnet")
)
// Handshake handles the authentication of new peers. Only valid stakers
// will appear connected.
type Handshake struct {
handshakeMetrics
networkID uint32 // ID of the network I'm running, used to prevent connecting to the wrong network
log logging.Logger
vdrs validators.Set // set of current validators in the AVAnet
myAddr salticidae.NetAddr // IP I communicate to peers
myID ids.ShortID // ID that identifies myself as a staker or not
net salticidae.PeerNetwork // C messaging network
enableStaking bool // Should only be false for local tests
clock timer.Clock
// Connections that I have added by IP, but haven't gotten an ID from
requestedLock sync.Mutex
requested map[string]struct{}
requestedTimeout timer.TimeoutManager // keys are hashes of the ip:port string
// Connections that I have added as a peer, but haven't gotten a version
// message from
pending Connections
versionTimeout timer.TimeoutManager // keys are the peer IDs
// Connections that I have gotten a valid version message from
connections Connections
reconnectTimeout timer.TimeoutManager // keys are the peer IDs
// IPs of nodes I'm connected to will be repeatedly gossiped throughout the network
peerListGossiper *timer.Repeater
// If any chain is blocked on connecting to peers, track these blockers here
awaitingLock sync.Mutex
awaiting []*networking.AwaitingConnections
lastHeartbeat int64
}
// Initialize to the c networking library. This should only be done once during
// node setup.
func (nm *Handshake) Initialize(
log logging.Logger,
vdrs validators.Set,
myAddr salticidae.NetAddr,
myID ids.ShortID,
peerNet salticidae.PeerNetwork,
registerer prometheus.Registerer,
enableStaking bool,
networkID uint32,
) {
log.AssertTrue(nm.net == nil, "Should only register network handlers once")
nm.handshakeMetrics.Initialize(log, registerer)
nm.networkID = networkID
nm.log = log
nm.vdrs = vdrs
nm.myAddr = myAddr
nm.myID = myID
nm.net = peerNet
nm.enableStaking = enableStaking
nm.requested = make(map[string]struct{})
nm.requestedTimeout.Initialize(ConnectTimeout)
go nm.log.RecoverAndPanic(nm.requestedTimeout.Dispatch)
nm.pending = NewConnections()
nm.versionTimeout.Initialize(GetVersionTimeout)
go nm.log.RecoverAndPanic(nm.versionTimeout.Dispatch)
nm.connections = NewConnections()
nm.reconnectTimeout.Initialize(ReconnectTimeout)
go nm.log.RecoverAndPanic(nm.reconnectTimeout.Dispatch)
nm.peerListGossiper = timer.NewRepeater(nm.gossipPeerList, PeerListGossipSpacing)
go nm.log.RecoverAndPanic(nm.peerListGossiper.Dispatch)
// register c message callbacks
net := peerNet.AsMsgNetwork()
net.RegConnHandler(salticidae.MsgNetworkConnCallback(C.connHandler), nil)
peerNet.RegPeerHandler(salticidae.PeerNetworkPeerCallback(C.peerHandler), nil)
peerNet.RegUnknownPeerHandler(salticidae.PeerNetworkUnknownPeerCallback(C.unknownPeerHandler), nil)
net.RegHandler(Ping, salticidae.MsgNetworkMsgCallback(C.ping), nil)
net.RegHandler(Pong, salticidae.MsgNetworkMsgCallback(C.pong), nil)
net.RegHandler(GetVersion, salticidae.MsgNetworkMsgCallback(C.getVersion), nil)
net.RegHandler(Version, salticidae.MsgNetworkMsgCallback(C.version), nil)
net.RegHandler(GetPeerList, salticidae.MsgNetworkMsgCallback(C.getPeerList), nil)
net.RegHandler(PeerList, salticidae.MsgNetworkMsgCallback(C.peerList), nil)
nm.heartbeat()
}
// ConnectTo add the peer as a connection and connects to them.
//
// assumes the peerID and addr are autofreed
func (nm *Handshake) ConnectTo(peer salticidae.PeerID, stakerID ids.ShortID, addr salticidae.NetAddr) {
if nm.pending.ContainsPeerID(peer) || nm.connections.ContainsPeerID(peer) {
return
}
nm.log.Debug("attempting to connect to %s", stakerID)
nm.net.AddPeer(peer)
nm.net.SetPeerAddr(peer, addr)
nm.net.ConnPeer(peer, 600, 1)
ip := toIPDesc(addr)
nm.pending.Add(peer, stakerID, ip)
peerBytes := toID(peer)
peerID := ids.NewID(peerBytes)
nm.reconnectTimeout.Put(peerID, func() {
nm.pending.Remove(peer, stakerID)
nm.connections.Remove(peer, stakerID)
nm.net.DelPeer(peer)
nm.numPeers.Set(float64(nm.connections.Len()))
})
}
// Connect attempts to start a connection with this provided address
//
// assumes addr is autofreed.
func (nm *Handshake) Connect(addr salticidae.NetAddr) {
ip := toIPDesc(addr)
ipStr := ip.String()
if nm.pending.ContainsIP(ip) || nm.connections.ContainsIP(ip) {
return
}
if !nm.enableStaking {
nm.log.Debug("adding peer %s", ip)
peer := salticidae.NewPeerIDFromNetAddr(addr, true)
nm.ConnectTo(peer, toShortID(ip), addr)
return
}
nm.requestedLock.Lock()
_, exists := nm.requested[ipStr]
nm.requestedLock.Unlock()
if exists {
return
}
nm.log.Debug("adding peer %s", ip)
count := new(int)
*count = 100
handler := new(func())
*handler = func() {
nm.requestedLock.Lock()
defer nm.requestedLock.Unlock()
if *count == 100 {
nm.requested[ipStr] = struct{}{}
}
if _, exists := nm.requested[ipStr]; !exists {
return
}
if *count <= 0 {
delete(nm.requested, ipStr)
return
}
*count--
if nm.pending.ContainsIP(ip) || nm.connections.ContainsIP(ip) {
return
}
nm.log.Debug("attempting to discover peer at %s", ipStr)
msgNet := nm.net.AsMsgNetwork()
msgNet.Connect(addr)
ipID := ids.NewID(hashing.ComputeHash256Array([]byte(ipStr)))
nm.requestedTimeout.Put(ipID, *handler)
}
(*handler)()
}
// AwaitConnections ...
func (nm *Handshake) AwaitConnections(awaiting *networking.AwaitingConnections) {
nm.awaitingLock.Lock()
defer nm.awaitingLock.Unlock()
awaiting.Add(nm.myID)
for _, cert := range nm.connections.IDs().List() {
awaiting.Add(cert)
}
if awaiting.Ready() {
go awaiting.Finish()
} else {
nm.awaiting = append(nm.awaiting, awaiting)
}
}
func (nm *Handshake) gossipPeerList() {
stakers := []ids.ShortID{}
nonStakers := []ids.ShortID{}
for _, id := range nm.connections.IDs().List() {
if nm.vdrs.Contains(id) {
stakers = append(stakers, id)
} else {
nonStakers = append(nonStakers, id)
}
}
numStakersToSend := (PeerListGossipSize + PeerListStakerGossipFraction - 1) / PeerListStakerGossipFraction
if len(stakers) < numStakersToSend {
numStakersToSend = len(stakers)
}
numNonStakersToSend := PeerListGossipSize - numStakersToSend
if len(nonStakers) < numNonStakersToSend {
numNonStakersToSend = len(nonStakers)
}
idsToSend := []ids.ShortID{}
sampler := random.Uniform{N: len(stakers)}
for i := 0; i < numStakersToSend; i++ {
idsToSend = append(idsToSend, stakers[sampler.Sample()])
}
sampler.N = len(nonStakers)
sampler.Replace()
for i := 0; i < numNonStakersToSend; i++ {
idsToSend = append(idsToSend, nonStakers[sampler.Sample()])
}
peers := []salticidae.PeerID{}
for _, id := range idsToSend {
if peer, exists := nm.connections.GetPeerID(id); exists {
peers = append(peers, peer)
}
}
nm.SendPeerList(peers...)
}
// Connections returns the object that tracks the nodes that are currently
// connected to this node.
func (nm *Handshake) Connections() Connections { return nm.connections }
// Shutdown the network
func (nm *Handshake) Shutdown() {
nm.versionTimeout.Stop()
nm.peerListGossiper.Stop()
}
// SendGetVersion to the requested peer
func (nm *Handshake) SendGetVersion(peer salticidae.PeerID) {
build := Builder{}
gv, err := build.GetVersion()
nm.log.AssertNoError(err)
nm.send(gv, peer)
nm.numGetVersionSent.Inc()
}
// SendVersion to the requested peer
func (nm *Handshake) SendVersion(peer salticidae.PeerID) error {
build := Builder{}
v, err := build.Version(nm.networkID, nm.clock.Unix(), toIPDesc(nm.myAddr), ClientVersion)
if err != nil {
return fmt.Errorf("packing version failed due to: %w", err)
}
nm.send(v, peer)
nm.numVersionSent.Inc()
return nil
}
// SendPeerList to the requested peer
func (nm *Handshake) SendPeerList(peers ...salticidae.PeerID) error {
if len(peers) == 0 {
return nil
}
_, ids, ips := nm.connections.Conns()
ipsToSend := []utils.IPDesc(nil)
for i, id := range ids {
ip := ips[i]
if !ip.IsZero() && nm.vdrs.Contains(id) {
ipsToSend = append(ipsToSend, ip)
}
}
if len(ipsToSend) == 0 {
nm.log.Debug("no IPs to send to %d peer(s)", len(peers))
return nil
}
nm.log.Verbo("sending %d ips to %d peer(s)", len(ipsToSend), len(peers))
build := Builder{}
pl, err := build.PeerList(ipsToSend)
if err != nil {
return fmt.Errorf("packing peer list failed due to: %w", err)
}
nm.send(pl, peers...)
nm.numPeerlistSent.Add(float64(len(peers)))
return nil
}
func (nm *Handshake) send(msg Msg, peers ...salticidae.PeerID) {
ds := msg.DataStream()
defer ds.Free()
ba := salticidae.NewByteArrayMovedFromDataStream(ds, false)
defer ba.Free()
cMsg := salticidae.NewMsgMovedFromByteArray(msg.Op(), ba, false)
defer cMsg.Free()
switch len(peers) {
case 0:
case 1:
nm.net.SendMsg(cMsg, peers[0])
default:
nm.net.MulticastMsgByMove(cMsg, peers)
}
}
// connHandler notifies of a new inbound connection
//export connHandler
func connHandler(_conn *C.struct_msgnetwork_conn_t, connected C.bool, _ unsafe.Pointer) C.bool {
if !HandshakeNet.enableStaking || !bool(connected) {
return connected
}
HandshakeNet.requestedLock.Lock()
defer HandshakeNet.requestedLock.Unlock()
conn := salticidae.MsgNetworkConnFromC(salticidae.CMsgNetworkConn(_conn))
addr := conn.GetAddr().Copy(true)
ip := toIPDesc(addr)
ipStr := ip.String()
ipID := ids.NewID(hashing.ComputeHash256Array([]byte(ipStr)))
HandshakeNet.requestedTimeout.Remove(ipID)
if _, exists := HandshakeNet.requested[ipStr]; !exists {
HandshakeNet.log.Debug("connHandler called with ip %s", ip)
return true
}
delete(HandshakeNet.requested, ipStr)
cert := conn.GetPeerCert()
peer := salticidae.NewPeerIDFromX509(cert, true)
HandshakeNet.ConnectTo(peer, getCert(cert), addr)
return true
}
// assumes peer is autofreed
func (nm *Handshake) connectedToPeer(conn *C.struct_peernetwork_conn_t, peer salticidae.PeerID) {
peerBytes := toID(peer)
peerID := ids.NewID(peerBytes)
// If we're enforcing staking, use a peer's certificate to uniquely identify them
// Otherwise, use a hash of their ip to identify them
cert := ids.ShortID{}
if nm.enableStaking {
cert = getPeerCert(conn)
} else {
key := [20]byte{}
copy(key[:], peerID.Bytes())
cert = ids.NewShortID(key)
}
nm.log.Debug("connected to %s", cert)
nm.reconnectTimeout.Remove(peerID)
handler := new(func())
*handler = func() {
if nm.pending.ContainsPeerID(peer) {
nm.SendGetVersion(peer)
nm.versionTimeout.Put(peerID, *handler)
}
}
(*handler)()
}
// assumes peer is autofreed
func (nm *Handshake) disconnectedFromPeer(peer salticidae.PeerID) {
cert := ids.ShortID{}
if pendingCert, exists := nm.pending.GetID(peer); exists {
cert = pendingCert
nm.log.Debug("disconnected from pending peer %s", cert)
} else if connectedCert, exists := nm.connections.GetID(peer); exists {
cert = connectedCert
nm.log.Debug("disconnected from peer %s", cert)
} else {
return
}
peerBytes := toID(peer)
peerID := ids.NewID(peerBytes)
nm.versionTimeout.Remove(peerID)
nm.connections.Remove(peer, cert)
nm.numPeers.Set(float64(nm.connections.Len()))
if !nm.enableStaking || nm.vdrs.Contains(cert) {
nm.reconnectTimeout.Put(peerID, func() {
nm.pending.Remove(peer, cert)
nm.connections.Remove(peer, cert)
nm.net.DelPeer(peer)
nm.numPeers.Set(float64(nm.connections.Len()))
})
nm.pending.Add(peer, cert, utils.IPDesc{})
} else {
nm.pending.Remove(peer, cert)
nm.net.DelPeer(peer)
}
if !nm.enableStaking {
nm.vdrs.Remove(cert)
}
nm.awaitingLock.Lock()
defer nm.awaitingLock.Unlock()
for _, awaiting := range HandshakeNet.awaiting {
awaiting.Remove(cert)
}
}
// checkCompatibility Check to make sure that the peer and I speak the same language.
func (nm *Handshake) checkCompatibility(peerVersion string) bool {
if !strings.HasPrefix(peerVersion, VersionPrefix) {
nm.log.Debug("peer attempted to connect with an invalid version prefix")
return false
}
peerVersion = peerVersion[len(VersionPrefix):]
splitPeerVersion := strings.SplitN(peerVersion, VersionSeparator, 3)
if len(splitPeerVersion) != 3 {
nm.log.Debug("peer attempted to connect with an invalid number of subversions")
return false
}
major, err := strconv.Atoi(splitPeerVersion[0])
if err != nil {
nm.log.Debug("peer attempted to connect with an invalid major version")
return false
}
minor, err := strconv.Atoi(splitPeerVersion[1])
if err != nil {
nm.log.Debug("peer attempted to connect with an invalid minor version")
return false
}
patch, err := strconv.Atoi(splitPeerVersion[2])
if err != nil {
nm.log.Debug("peer attempted to connect with an invalid patch version")
return false
}
switch {
case major < MajorVersion:
// peers major version is too low
return false
case major > MajorVersion:
nm.log.Warn("peer attempted to connect with a higher major version, this client may need to be updated")
return false
}
switch {
case minor < MinorVersion:
// peers minor version is too low
return false
case minor > MinorVersion:
nm.log.Warn("peer attempted to connect with a higher minor version, this client may need to be updated")
return false
}
if patch > PatchVersion {
nm.log.Warn("peer is connecting with a higher patch version, this client may need to be updated")
}
return true
}
// heartbeat registers a new heartbeat to signal liveness
func (nm *Handshake) heartbeat() {
atomic.StoreInt64(&nm.lastHeartbeat, nm.clock.Time().Unix())
}
// GetHeartbeat returns the most recent heartbeat time
func (nm *Handshake) GetHeartbeat() int64 {
return atomic.LoadInt64(&nm.lastHeartbeat)
}
// peerHandler notifies a change to the set of connected peers
// connected is true if a new peer is connected
// connected is false if a formerly connected peer has disconnected
//export peerHandler
func peerHandler(_conn *C.struct_peernetwork_conn_t, connected C.bool, _ unsafe.Pointer) {
HandshakeNet.log.Debug("peerHandler called")
pConn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn(_conn))
peer := pConn.GetPeerID(true)
if connected {
HandshakeNet.connectedToPeer(_conn, peer)
} else {
HandshakeNet.disconnectedFromPeer(peer)
}
}
// unknownPeerHandler notifies of an unknown peer connection attempt
//export unknownPeerHandler
func unknownPeerHandler(_addr *C.netaddr_t, _cert *C.x509_t, _ unsafe.Pointer) {
HandshakeNet.log.Debug("unknownPeerHandler called")
addr := salticidae.NetAddrFromC(salticidae.CNetAddr(_addr)).Copy(true)
ip := toIPDesc(addr)
HandshakeNet.log.Debug("adding peer at %s", ip)
var peer salticidae.PeerID
var id ids.ShortID
if HandshakeNet.enableStaking {
cert := salticidae.X509FromC(salticidae.CX509(_cert))
peer = salticidae.NewPeerIDFromX509(cert, true)
id = getCert(cert)
} else {
peer = salticidae.NewPeerIDFromNetAddr(addr, true)
id = toShortID(ip)
}
peerBytes := toID(peer)
peerID := ids.NewID(peerBytes)
HandshakeNet.reconnectTimeout.Put(peerID, func() {
HandshakeNet.pending.Remove(peer, id)
HandshakeNet.connections.Remove(peer, id)
HandshakeNet.net.DelPeer(peer)
HandshakeNet.numPeers.Set(float64(HandshakeNet.connections.Len()))
})
HandshakeNet.pending.Add(peer, id, utils.IPDesc{})
HandshakeNet.net.AddPeer(peer)
}
// ping handles the recept of a ping message
//export ping
func ping(_ *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
conn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn(_conn))
peer := conn.GetPeerID(false)
defer peer.Free()
build := Builder{}
pong, err := build.Pong()
HandshakeNet.log.AssertNoError(err)
HandshakeNet.send(pong, peer)
}
// pong handles the recept of a pong message
//export pong
func pong(*C.struct_msg_t, *C.struct_msgnetwork_conn_t, unsafe.Pointer) {}
// getVersion handles the recept of a getVersion message
//export getVersion
func getVersion(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
HandshakeNet.numGetVersionReceived.Inc()
HandshakeNet.heartbeat()
conn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn(_conn))
peer := conn.GetPeerID(false)
defer peer.Free()
HandshakeNet.SendVersion(peer)
}
// version handles the recept of a version message
//export version
func version(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
HandshakeNet.numVersionReceived.Inc()
HandshakeNet.heartbeat()
msg := salticidae.MsgFromC(salticidae.CMsg(_msg))
conn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn(_conn))
peer := conn.GetPeerID(true)
peerBytes := toID(peer)
peerID := ids.NewID(peerBytes)
HandshakeNet.versionTimeout.Remove(peerID)
id, exists := HandshakeNet.pending.GetID(peer)
if !exists {
HandshakeNet.log.Debug("dropping Version message because the peer isn't pending")
return
}
HandshakeNet.pending.Remove(peer, id)
build := Builder{}
pMsg, err := build.Parse(Version, msg.GetPayloadByMove())
if err != nil {
HandshakeNet.log.Debug("failed to parse Version message")
HandshakeNet.net.DelPeer(peer)
return
}
if networkID := pMsg.Get(NetworkID).(uint32); networkID != HandshakeNet.networkID {
HandshakeNet.log.Debug("peer's network ID doesn't match our networkID: Peer's = %d ; Ours = %d", networkID, HandshakeNet.networkID)
HandshakeNet.net.DelPeer(peer)
return
}
myTime := float64(HandshakeNet.clock.Unix())
if peerTime := float64(pMsg.Get(MyTime).(uint64)); math.Abs(peerTime-myTime) > MaxClockDifference.Seconds() {
HandshakeNet.log.Debug("peer's clock is too far out of sync with mine. Peer's = %d, Ours = %d (seconds)", uint64(peerTime), uint64(myTime))
HandshakeNet.net.DelPeer(peer)
return
}
if peerVersion := pMsg.Get(VersionStr).(string); !HandshakeNet.checkCompatibility(peerVersion) {
HandshakeNet.log.Debug("peer version, %s, is not compatible. dropping connection.", peerVersion)
HandshakeNet.net.DelPeer(peer)
return
}
ip := pMsg.Get(IP).(utils.IPDesc)
HandshakeNet.log.Debug("Finishing handshake with %s", ip)
HandshakeNet.SendPeerList(peer)
HandshakeNet.connections.Add(peer, id, ip)
HandshakeNet.numPeers.Set(float64(HandshakeNet.connections.Len()))
if !HandshakeNet.enableStaking {
HandshakeNet.vdrs.Add(validators.NewValidator(id, 1))
}
HandshakeNet.awaitingLock.Lock()
defer HandshakeNet.awaitingLock.Unlock()
for i := 0; i < len(HandshakeNet.awaiting); i++ {
awaiting := HandshakeNet.awaiting[i]
awaiting.Add(id)
if !awaiting.Ready() {
continue
}
newLen := len(HandshakeNet.awaiting) - 1
HandshakeNet.awaiting[i] = HandshakeNet.awaiting[newLen]
HandshakeNet.awaiting = HandshakeNet.awaiting[:newLen]
i--
go awaiting.Finish()
}
}
// getPeerList handles the recept of a getPeerList message
//export getPeerList
func getPeerList(_ *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
HandshakeNet.numGetPeerlistReceived.Inc()
HandshakeNet.heartbeat()
conn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn(_conn))
peer := conn.GetPeerID(false)
defer peer.Free()
HandshakeNet.SendPeerList(peer)
}
// peerList handles the recept of a peerList message
//export peerList
func peerList(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
HandshakeNet.numPeerlistReceived.Inc()
HandshakeNet.heartbeat()
msg := salticidae.MsgFromC(salticidae.CMsg(_msg))
build := Builder{}
pMsg, err := build.Parse(PeerList, msg.GetPayloadByMove())
if err != nil {
HandshakeNet.log.Debug("failed to parse PeerList message due to %s", err)
// TODO: What should we do here?
return
}
ips := pMsg.Get(Peers).([]utils.IPDesc)
cErr := salticidae.NewError()
for _, ip := range ips {
addr := salticidae.NewNetAddrFromIPPortString(ip.String(), true, &cErr)
if cErr.GetCode() != 0 || HandshakeNet.myAddr.IsEq(addr) {
// Make sure not to connect to myself
continue
}
HandshakeNet.Connect(addr)
}
}
func getPeerCert(_conn *C.struct_peernetwork_conn_t) ids.ShortID {
conn := salticidae.MsgNetworkConnFromC(salticidae.CMsgNetworkConn(_conn))
return getCert(conn.GetPeerCert())
}
func getCert(cert salticidae.X509) ids.ShortID {
der := cert.GetDer(false)
certDS := salticidae.NewDataStreamMovedFromByteArray(der, false)
certBytes := certDS.GetDataInPlace(certDS.Size()).Get()
certID, err := ids.ToShortID(hashing.PubkeyBytesToAddress(certBytes))
certDS.Free()
der.Free()
HandshakeNet.log.AssertNoError(err)
return certID
}
func toShortID(ip utils.IPDesc) ids.ShortID {
return ids.NewShortID(hashing.ComputeHash160Array([]byte(ip.String())))
}

View File

@ -1,104 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package networking
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/ava-labs/gecko/utils/logging"
)
type handshakeMetrics struct {
numPeers prometheus.Gauge
numGetVersionSent, numGetVersionReceived,
numVersionSent, numVersionReceived,
numGetPeerlistSent, numGetPeerlistReceived,
numPeerlistSent, numPeerlistReceived prometheus.Counter
}
func (hm *handshakeMetrics) Initialize(log logging.Logger, registerer prometheus.Registerer) {
hm.numPeers = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: "gecko",
Name: "peers",
Help: "Number of network peers",
})
hm.numGetVersionSent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "get_version_sent",
Help: "Number of get_version messages sent",
})
hm.numGetVersionReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "get_version_received",
Help: "Number of get_version messages received",
})
hm.numVersionSent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "version_sent",
Help: "Number of version messages sent",
})
hm.numVersionReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "version_received",
Help: "Number of version messages received",
})
hm.numGetPeerlistSent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "get_peerlist_sent",
Help: "Number of get_peerlist messages sent",
})
hm.numGetPeerlistReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "get_peerlist_received",
Help: "Number of get_peerlist messages received",
})
hm.numPeerlistSent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "peerlist_sent",
Help: "Number of peerlist messages sent",
})
hm.numPeerlistReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "peerlist_received",
Help: "Number of peerlist messages received",
})
if err := registerer.Register(hm.numPeers); err != nil {
log.Error("Failed to register peers statistics due to %s", err)
}
if err := registerer.Register(hm.numGetVersionSent); err != nil {
log.Error("Failed to register get_version_sent statistics due to %s", err)
}
if err := registerer.Register(hm.numGetVersionReceived); err != nil {
log.Error("Failed to register get_version_received statistics due to %s", err)
}
if err := registerer.Register(hm.numVersionSent); err != nil {
log.Error("Failed to register version_sent statistics due to %s", err)
}
if err := registerer.Register(hm.numVersionReceived); err != nil {
log.Error("Failed to register version_received statistics due to %s", err)
}
if err := registerer.Register(hm.numGetPeerlistSent); err != nil {
log.Error("Failed to register get_peerlist_sent statistics due to %s", err)
}
if err := registerer.Register(hm.numGetPeerlistReceived); err != nil {
log.Error("Failed to register get_peerlist_received statistics due to %s", err)
}
if err := registerer.Register(hm.numPeerlistSent); err != nil {
log.Error("Failed to register peerlist_sent statistics due to %s", err)
}
if err := registerer.Register(hm.numPeerlistReceived); err != nil {
log.Error("Failed to register peerlist_received statistics due to %s", err)
}
}

View File

@ -1,658 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package networking
// #include "salticidae/network.h"
// void getAcceptedFrontier(msg_t *, msgnetwork_conn_t *, void *);
// void acceptedFrontier(msg_t *, msgnetwork_conn_t *, void *);
// void getAccepted(msg_t *, msgnetwork_conn_t *, void *);
// void accepted(msg_t *, msgnetwork_conn_t *, void *);
// void get(msg_t *, msgnetwork_conn_t *, void *);
// void put(msg_t *, msgnetwork_conn_t *, void *);
// void pushQuery(msg_t *, msgnetwork_conn_t *, void *);
// void pullQuery(msg_t *, msgnetwork_conn_t *, void *);
// void chits(msg_t *, msgnetwork_conn_t *, void *);
import "C"
import (
"errors"
"fmt"
"math"
"unsafe"
"github.com/prometheus/client_golang/prometheus"
"github.com/ava-labs/salticidae-go"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/networking/router"
"github.com/ava-labs/gecko/snow/validators"
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/random"
"github.com/ava-labs/gecko/utils/timer"
)
// GossipSize is the maximum number of peers to gossip a container to
const (
GossipSize = 50
)
var (
// VotingNet implements the SenderExternal interface.
VotingNet = Voting{}
)
var (
errConnectionDropped = errors.New("connection dropped before receiving message")
)
// Voting implements the SenderExternal interface with a c++ library.
type Voting struct {
votingMetrics
log logging.Logger
vdrs validators.Set
net salticidae.PeerNetwork
conns Connections
router router.Router
executor timer.Executor
}
// Initialize to the c networking library. Should only be called once ever.
func (s *Voting) Initialize(log logging.Logger, vdrs validators.Set, peerNet salticidae.PeerNetwork, conns Connections, router router.Router, registerer prometheus.Registerer) {
log.AssertTrue(s.net == nil, "Should only register network handlers once")
log.AssertTrue(s.conns == nil, "Should only set connections once")
log.AssertTrue(s.router == nil, "Should only set the router once")
s.log = log
s.vdrs = vdrs
s.net = peerNet
s.conns = conns
s.router = router
s.votingMetrics.Initialize(log, registerer)
net := peerNet.AsMsgNetwork()
net.RegHandler(GetAcceptedFrontier, salticidae.MsgNetworkMsgCallback(C.getAcceptedFrontier), nil)
net.RegHandler(AcceptedFrontier, salticidae.MsgNetworkMsgCallback(C.acceptedFrontier), nil)
net.RegHandler(GetAccepted, salticidae.MsgNetworkMsgCallback(C.getAccepted), nil)
net.RegHandler(Accepted, salticidae.MsgNetworkMsgCallback(C.accepted), nil)
net.RegHandler(Get, salticidae.MsgNetworkMsgCallback(C.get), nil)
net.RegHandler(Put, salticidae.MsgNetworkMsgCallback(C.put), nil)
net.RegHandler(PushQuery, salticidae.MsgNetworkMsgCallback(C.pushQuery), nil)
net.RegHandler(PullQuery, salticidae.MsgNetworkMsgCallback(C.pullQuery), nil)
net.RegHandler(Chits, salticidae.MsgNetworkMsgCallback(C.chits), nil)
s.executor.Initialize()
go log.RecoverAndPanic(s.executor.Dispatch)
}
// Shutdown threads
func (s *Voting) Shutdown() { s.executor.Stop() }
// Accept is called after every consensus decision
func (s *Voting) Accept(chainID, containerID ids.ID, container []byte) error {
return s.gossip(chainID, containerID, container)
}
// GetAcceptedFrontier implements the Sender interface.
func (s *Voting) GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32) {
peers := []salticidae.PeerID(nil)
validatorIDList := validatorIDs.List()
for _, validatorID := range validatorIDList {
vID := validatorID
if peer, exists := s.conns.GetPeerID(vID); exists {
peers = append(peers, peer)
s.log.Verbo("Sending a GetAcceptedFrontier to %s", vID)
} else {
s.log.Debug("attempted to send a GetAcceptedFrontier message to a disconnected validator: %s", vID)
s.executor.Add(func() { s.router.GetAcceptedFrontierFailed(vID, chainID, requestID) })
}
}
build := Builder{}
msg, err := build.GetAcceptedFrontier(chainID, requestID)
s.log.AssertNoError(err)
s.log.Verbo("Sending a GetAcceptedFrontier message."+
"\nNumber of Validators: %d"+
"\nChain: %s"+
"\nRequest ID: %d",
len(peers),
chainID,
requestID,
)
s.send(msg, peers...)
s.numGetAcceptedFrontierSent.Add(float64(len(peers)))
}
// AcceptedFrontier implements the Sender interface.
func (s *Voting) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
peer, exists := s.conns.GetPeerID(validatorID)
if !exists {
s.log.Debug("attempted to send an AcceptedFrontier message to disconnected validator: %s", validatorID)
return // Validator is not connected
}
build := Builder{}
msg, err := build.AcceptedFrontier(chainID, requestID, containerIDs)
if err != nil {
s.log.Error("attempted to pack too large of an AcceptedFrontier message.\nNumber of containerIDs: %d", containerIDs.Len())
return // Packing message failed
}
s.log.Verbo("Sending an AcceptedFrontier message."+
"\nValidator: %s"+
"\nChain: %s"+
"\nRequest ID: %d"+
"\nContainer IDs: %s",
validatorID,
chainID,
requestID,
containerIDs,
)
s.send(msg, peer)
s.numAcceptedFrontierSent.Inc()
}
// GetAccepted implements the Sender interface.
func (s *Voting) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
peers := []salticidae.PeerID(nil)
validatorIDList := validatorIDs.List()
for _, validatorID := range validatorIDList {
vID := validatorID
if peer, exists := s.conns.GetPeerID(validatorID); exists {
peers = append(peers, peer)
s.log.Verbo("sending a GetAccepted to %s", vID)
} else {
s.log.Debug("attempted to send a GetAccepted message to a disconnected validator: %s", vID)
s.executor.Add(func() { s.router.GetAcceptedFailed(vID, chainID, requestID) })
}
}
build := Builder{}
msg, err := build.GetAccepted(chainID, requestID, containerIDs)
if err != nil {
for _, peer := range peers {
if validatorID, exists := s.conns.GetID(peer); exists {
s.executor.Add(func() { s.router.GetAcceptedFailed(validatorID, chainID, requestID) })
}
}
s.log.Debug("attempted to pack too large of a GetAccepted message.\nNumber of containerIDs: %d", containerIDs.Len())
return // Packing message failed
}
s.log.Verbo("Sending a GetAccepted message."+
"\nNumber of Validators: %d"+
"\nChain: %s"+
"\nRequest ID: %d"+
"\nContainer IDs:%s",
len(peers),
chainID,
requestID,
containerIDs,
)
s.send(msg, peers...)
s.numGetAcceptedSent.Add(float64(len(peers)))
}
// Accepted implements the Sender interface.
func (s *Voting) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
peer, exists := s.conns.GetPeerID(validatorID)
if !exists {
s.log.Debug("attempted to send an Accepted message to a disconnected validator: %s", validatorID)
return // Validator is not connected
}
build := Builder{}
msg, err := build.Accepted(chainID, requestID, containerIDs)
if err != nil {
s.log.Error("attempted to pack too large of an Accepted message.\nNumber of containerIDs: %d", containerIDs.Len())
return // Packing message failed
}
s.log.Verbo("Sending an Accepted message."+
"\nValidator: %s"+
"\nChain: %s"+
"\nRequest ID: %d"+
"\nContainer IDs: %s",
validatorID,
chainID,
requestID,
containerIDs,
)
s.send(msg, peer)
s.numAcceptedSent.Inc()
}
// Get implements the Sender interface.
func (s *Voting) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
peer, exists := s.conns.GetPeerID(validatorID)
if !exists {
s.log.Debug("attempted to send a Get message to a disconnected validator: %s", validatorID)
s.executor.Add(func() { s.router.GetFailed(validatorID, chainID, requestID) })
return // Validator is not connected
}
build := Builder{}
msg, err := build.Get(chainID, requestID, containerID)
s.log.AssertNoError(err)
s.log.Verbo("Sending a Get message."+
"\nValidator: %s"+
"\nChain: %s"+
"\nRequest ID: %d"+
"\nContainer ID: %s",
validatorID,
chainID,
requestID,
containerID,
)
s.send(msg, peer)
s.numGetSent.Inc()
}
// Put implements the Sender interface.
func (s *Voting) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
peer, exists := s.conns.GetPeerID(validatorID)
if !exists {
s.log.Debug("attempted to send a Container message to a disconnected validator: %s", validatorID)
return // Validator is not connected
}
build := Builder{}
msg, err := build.Put(chainID, requestID, containerID, container)
if err != nil {
s.log.Error("attempted to pack too large of a Put message.\nContainer length: %d", len(container))
return // Packing message failed
}
s.log.Verbo("Sending a Container message."+
"\nValidator: %s"+
"\nChain: %s"+
"\nRequest ID: %d"+
"\nContainer ID: %s"+
"\nContainer:\n%s",
validatorID,
chainID,
requestID,
containerID,
formatting.DumpBytes{Bytes: container},
)
s.send(msg, peer)
s.numPutSent.Inc()
}
// PushQuery implements the Sender interface.
func (s *Voting) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
peers := []salticidae.PeerID(nil)
validatorIDList := validatorIDs.List()
for _, validatorID := range validatorIDList {
vID := validatorID
if peer, exists := s.conns.GetPeerID(vID); exists {
peers = append(peers, peer)
s.log.Verbo("Sending a PushQuery to %s", vID)
} else {
s.log.Debug("attempted to send a PushQuery message to a disconnected validator: %s", vID)
s.executor.Add(func() { s.router.QueryFailed(vID, chainID, requestID) })
}
}
build := Builder{}
msg, err := build.PushQuery(chainID, requestID, containerID, container)
if err != nil {
for _, peer := range peers {
if validatorID, exists := s.conns.GetID(peer); exists {
s.executor.Add(func() { s.router.QueryFailed(validatorID, chainID, requestID) })
}
}
s.log.Error("attempted to pack too large of a PushQuery message.\nContainer length: %d", len(container))
return // Packing message failed
}
s.log.Verbo("Sending a PushQuery message."+
"\nNumber of Validators: %d"+
"\nChain: %s"+
"\nRequest ID: %d"+
"\nContainer ID: %s"+
"\nContainer:\n%s",
len(peers),
chainID,
requestID,
containerID,
formatting.DumpBytes{Bytes: container},
)
s.send(msg, peers...)
s.numPushQuerySent.Add(float64(len(peers)))
}
// PullQuery implements the Sender interface.
func (s *Voting) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID) {
peers := []salticidae.PeerID(nil)
validatorIDList := validatorIDs.List()
for _, validatorID := range validatorIDList {
vID := validatorID
if peer, exists := s.conns.GetPeerID(vID); exists {
peers = append(peers, peer)
s.log.Verbo("Sending a PullQuery to %s", vID)
} else {
s.log.Debug("attempted to send a PullQuery message to a disconnected validator: %s", vID)
s.executor.Add(func() { s.router.QueryFailed(vID, chainID, requestID) })
}
}
build := Builder{}
msg, err := build.PullQuery(chainID, requestID, containerID)
s.log.AssertNoError(err)
s.log.Verbo("Sending a PullQuery message."+
"\nNumber of Validators: %d"+
"\nChain: %s"+
"\nRequest ID: %d"+
"\nContainer ID: %s",
len(peers),
chainID,
requestID,
containerID,
)
s.send(msg, peers...)
s.numPullQuerySent.Add(float64(len(peers)))
}
// Chits implements the Sender interface.
func (s *Voting) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) {
peer, exists := s.conns.GetPeerID(validatorID)
if !exists {
s.log.Debug("attempted to send a Chits message to a disconnected validator: %s", validatorID)
return // Validator is not connected
}
build := Builder{}
msg, err := build.Chits(chainID, requestID, votes)
if err != nil {
s.log.Error("attempted to pack too large of a Chits message.\nChits length: %d", votes.Len())
return // Packing message failed
}
s.log.Verbo("Sending a Chits message."+
"\nValidator: %s"+
"\nChain: %s"+
"\nRequest ID: %d"+
"\nNumber of Chits: %d",
validatorID,
chainID,
requestID,
votes.Len(),
)
s.send(msg, peer)
s.numChitsSent.Inc()
}
// Gossip attempts to gossip the container to the network
func (s *Voting) Gossip(chainID, containerID ids.ID, container []byte) {
if err := s.gossip(chainID, containerID, container); err != nil {
s.log.Error("error gossiping container %s to %s: %s", containerID, chainID, err)
}
}
func (s *Voting) send(msg Msg, peers ...salticidae.PeerID) {
ds := msg.DataStream()
defer ds.Free()
ba := salticidae.NewByteArrayMovedFromDataStream(ds, false)
defer ba.Free()
cMsg := salticidae.NewMsgMovedFromByteArray(msg.Op(), ba, false)
defer cMsg.Free()
switch len(peers) {
case 0:
case 1:
s.net.SendMsg(cMsg, peers[0])
default:
s.net.MulticastMsgByMove(cMsg, peers)
}
}
func (s *Voting) gossip(chainID, containerID ids.ID, container []byte) error {
allPeers := s.conns.PeerIDs()
numToGossip := GossipSize
if numToGossip > len(allPeers) {
numToGossip = len(allPeers)
}
peers := make([]salticidae.PeerID, numToGossip)
sampler := random.Uniform{N: len(allPeers)}
for i := range peers {
peers[i] = allPeers[sampler.Sample()]
}
build := Builder{}
msg, err := build.Put(chainID, math.MaxUint32, containerID, container)
if err != nil {
return fmt.Errorf("attempted to pack too large of a Put message.\nContainer length: %d", len(container))
}
s.log.Verbo("Sending a Put message to peers."+
"\nNumber of Peers: %d"+
"\nChain: %s"+
"\nContainer ID: %s"+
"\nContainer:\n%s",
len(peers),
chainID,
containerID,
formatting.DumpBytes{Bytes: container},
)
s.send(msg, peers...)
s.numPutSent.Add(float64(len(peers)))
return nil
}
// getAcceptedFrontier handles the recept of a getAcceptedFrontier container
// message for a chain
//export getAcceptedFrontier
func getAcceptedFrontier(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
VotingNet.numGetAcceptedFrontierReceived.Inc()
validatorID, chainID, requestID, _, err := VotingNet.sanitize(_msg, _conn, GetAcceptedFrontier)
if err != nil {
VotingNet.log.Debug("failed to sanitize getAcceptedFrontier message due to: %s", err)
return
}
VotingNet.router.GetAcceptedFrontier(validatorID, chainID, requestID)
}
// acceptedFrontier handles the recept of an acceptedFrontier message
//export acceptedFrontier
func acceptedFrontier(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
VotingNet.numAcceptedFrontierReceived.Inc()
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, AcceptedFrontier)
if err != nil {
VotingNet.log.Debug("failed to sanitize acceptedFrontier message due to: %s", err)
return
}
containerIDs := ids.Set{}
for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
VotingNet.log.Debug("error parsing ContainerID %v: %s", containerIDBytes, err)
return
}
containerIDs.Add(containerID)
}
VotingNet.router.AcceptedFrontier(validatorID, chainID, requestID, containerIDs)
}
// getAccepted handles the recept of a getAccepted message
//export getAccepted
func getAccepted(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
VotingNet.numGetAcceptedReceived.Inc()
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, GetAccepted)
if err != nil {
VotingNet.log.Debug("failed to sanitize getAccepted message due to: %s", err)
return
}
containerIDs := ids.Set{}
for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
VotingNet.log.Debug("error parsing ContainerID %v: %s", containerIDBytes, err)
return
}
containerIDs.Add(containerID)
}
VotingNet.router.GetAccepted(validatorID, chainID, requestID, containerIDs)
}
// accepted handles the recept of an accepted message
//export accepted
func accepted(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
VotingNet.numAcceptedReceived.Inc()
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, Accepted)
if err != nil {
VotingNet.log.Debug("failed to sanitize accepted message due to: %s", err)
return
}
containerIDs := ids.Set{}
for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
VotingNet.log.Debug("error parsing ContainerID %v: %s", containerIDBytes, err)
return
}
containerIDs.Add(containerID)
}
VotingNet.router.Accepted(validatorID, chainID, requestID, containerIDs)
}
// get handles the recept of a get container message for a chain
//export get
func get(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
VotingNet.numGetReceived.Inc()
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, Get)
if err != nil {
VotingNet.log.Debug("failed to sanitize get message due to: %s", err)
return
}
containerID, _ := ids.ToID(msg.Get(ContainerID).([]byte))
VotingNet.router.Get(validatorID, chainID, requestID, containerID)
}
// put handles the receipt of a container message
//export put
func put(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
VotingNet.numPutReceived.Inc()
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, Put)
if err != nil {
VotingNet.log.Debug("failed to sanitize put message due to: %s", err)
return
}
containerID, _ := ids.ToID(msg.Get(ContainerID).([]byte))
containerBytes := msg.Get(ContainerBytes).([]byte)
VotingNet.router.Put(validatorID, chainID, requestID, containerID, containerBytes)
}
// pushQuery handles the recept of a pull query message
//export pushQuery
func pushQuery(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
VotingNet.numPushQueryReceived.Inc()
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, PushQuery)
if err != nil {
VotingNet.log.Debug("failed to sanitize pushQuery message due to: %s", err)
return
}
containerID, _ := ids.ToID(msg.Get(ContainerID).([]byte))
containerBytes := msg.Get(ContainerBytes).([]byte)
VotingNet.router.PushQuery(validatorID, chainID, requestID, containerID, containerBytes)
}
// pullQuery handles the recept of a query message
//export pullQuery
func pullQuery(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
VotingNet.numPullQueryReceived.Inc()
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, PullQuery)
if err != nil {
VotingNet.log.Debug("failed to sanitize pullQuery message due to: %s", err)
return
}
containerID, _ := ids.ToID(msg.Get(ContainerID).([]byte))
VotingNet.router.PullQuery(validatorID, chainID, requestID, containerID)
}
// chits handles the recept of a chits message
//export chits
func chits(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
VotingNet.numChitsReceived.Inc()
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, Chits)
if err != nil {
VotingNet.log.Debug("failed to sanitize chits message due to: %s", err)
return
}
votes := ids.Set{}
for _, voteBytes := range msg.Get(ContainerIDs).([][]byte) {
vote, err := ids.ToID(voteBytes)
if err != nil {
VotingNet.log.Debug("error parsing chit %v: %s", voteBytes, err)
return
}
votes.Add(vote)
}
VotingNet.router.Chits(validatorID, chainID, requestID, votes)
}
func (s *Voting) sanitize(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, op salticidae.Opcode) (ids.ShortID, ids.ID, uint32, Msg, error) {
conn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn((*C.peernetwork_conn_t)(_conn)))
peer := conn.GetPeerID(false)
defer peer.Free()
validatorID, exists := s.conns.GetID(peer)
if !exists {
return ids.ShortID{}, ids.ID{}, 0, nil, fmt.Errorf("received message from un-registered peer %s", validatorID)
}
s.log.Verbo("received message from %s", validatorID)
msg := salticidae.MsgFromC(salticidae.CMsg(_msg))
codec := Codec{}
pMsg, err := codec.Parse(op, msg.GetPayloadByMove())
if err != nil {
return ids.ShortID{}, ids.ID{}, 0, nil, fmt.Errorf("couldn't parse payload: %w", err) // The message couldn't be parsed
}
chainID, err := ids.ToID(pMsg.Get(ChainID).([]byte))
s.log.AssertNoError(err)
requestID := pMsg.Get(RequestID).(uint32)
return validatorID, chainID, requestID, pMsg, nil
}

View File

@ -1,188 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package networking
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/ava-labs/gecko/utils/logging"
)
type votingMetrics struct {
numGetAcceptedFrontierSent, numGetAcceptedFrontierReceived,
numAcceptedFrontierSent, numAcceptedFrontierReceived,
numGetAcceptedSent, numGetAcceptedReceived,
numAcceptedSent, numAcceptedReceived,
numGetSent, numGetReceived,
numPutSent, numPutReceived,
numPushQuerySent, numPushQueryReceived,
numPullQuerySent, numPullQueryReceived,
numChitsSent, numChitsReceived prometheus.Counter
}
func (vm *votingMetrics) Initialize(log logging.Logger, registerer prometheus.Registerer) {
vm.numGetAcceptedFrontierSent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "get_accepted_frontier_sent",
Help: "Number of get accepted frontier messages sent",
})
vm.numGetAcceptedFrontierReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "get_accepted_frontier_received",
Help: "Number of get accepted frontier messages received",
})
vm.numAcceptedFrontierSent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "accepted_frontier_sent",
Help: "Number of accepted frontier messages sent",
})
vm.numAcceptedFrontierReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "accepted_frontier_received",
Help: "Number of accepted frontier messages received",
})
vm.numGetAcceptedSent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "get_accepted_sent",
Help: "Number of get accepted messages sent",
})
vm.numGetAcceptedReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "get_accepted_received",
Help: "Number of get accepted messages received",
})
vm.numAcceptedSent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "accepted_sent",
Help: "Number of accepted messages sent",
})
vm.numAcceptedReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "accepted_received",
Help: "Number of accepted messages received",
})
vm.numGetSent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "get_sent",
Help: "Number of get messages sent",
})
vm.numGetReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "get_received",
Help: "Number of get messages received",
})
vm.numPutSent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "put_sent",
Help: "Number of put messages sent",
})
vm.numPutReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "put_received",
Help: "Number of put messages received",
})
vm.numPushQuerySent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "push_query_sent",
Help: "Number of push query messages sent",
})
vm.numPushQueryReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "push_query_received",
Help: "Number of push query messages received",
})
vm.numPullQuerySent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "pull_query_sent",
Help: "Number of pull query messages sent",
})
vm.numPullQueryReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "pull_query_received",
Help: "Number of pull query messages received",
})
vm.numChitsSent = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "chits_sent",
Help: "Number of chits messages sent",
})
vm.numChitsReceived = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "gecko",
Name: "chits_received",
Help: "Number of chits messages received",
})
if err := registerer.Register(vm.numGetAcceptedFrontierSent); err != nil {
log.Error("Failed to register get_accepted_frontier_sent statistics due to %s", err)
}
if err := registerer.Register(vm.numGetAcceptedFrontierReceived); err != nil {
log.Error("Failed to register get_accepted_frontier_received statistics due to %s", err)
}
if err := registerer.Register(vm.numAcceptedFrontierSent); err != nil {
log.Error("Failed to register accepted_frontier_sent statistics due to %s", err)
}
if err := registerer.Register(vm.numAcceptedFrontierReceived); err != nil {
log.Error("Failed to register accepted_frontier_received statistics due to %s", err)
}
if err := registerer.Register(vm.numGetAcceptedSent); err != nil {
log.Error("Failed to register get_accepted_sent statistics due to %s", err)
}
if err := registerer.Register(vm.numGetAcceptedReceived); err != nil {
log.Error("Failed to register get_accepted_received statistics due to %s", err)
}
if err := registerer.Register(vm.numAcceptedSent); err != nil {
log.Error("Failed to register accepted_sent statistics due to %s", err)
}
if err := registerer.Register(vm.numAcceptedReceived); err != nil {
log.Error("Failed to register accepted_received statistics due to %s", err)
}
if err := registerer.Register(vm.numGetSent); err != nil {
log.Error("Failed to register get_sent statistics due to %s", err)
}
if err := registerer.Register(vm.numGetReceived); err != nil {
log.Error("Failed to register get_received statistics due to %s", err)
}
if err := registerer.Register(vm.numPutSent); err != nil {
log.Error("Failed to register put_sent statistics due to %s", err)
}
if err := registerer.Register(vm.numPutReceived); err != nil {
log.Error("Failed to register put_received statistics due to %s", err)
}
if err := registerer.Register(vm.numPushQuerySent); err != nil {
log.Error("Failed to register push_query_sent statistics due to %s", err)
}
if err := registerer.Register(vm.numPushQueryReceived); err != nil {
log.Error("Failed to register push_query_received statistics due to %s", err)
}
if err := registerer.Register(vm.numPullQuerySent); err != nil {
log.Error("Failed to register pull_query_sent statistics due to %s", err)
}
if err := registerer.Register(vm.numPullQueryReceived); err != nil {
log.Error("Failed to register pull_query_received statistics due to %s", err)
}
if err := registerer.Register(vm.numChitsSent); err != nil {
log.Error("Failed to register chits_sent statistics due to %s", err)
}
if err := registerer.Register(vm.numChitsReceived); err != nil {
log.Error("Failed to register chits_received statistics due to %s", err)
}
}

View File

@ -1,75 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package xputtest
// #include "salticidae/network.h"
// void issueTx(msg_t *, msgnetwork_conn_t *, void *);
import "C"
import (
"unsafe"
"github.com/ava-labs/salticidae-go"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/networking"
"github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/utils/hashing"
)
// CClientHandler is the struct that will be accessed on event calls
var CClientHandler CClient
// CClient manages a client network using the c networking library
type CClient struct {
issuer *Issuer
net salticidae.MsgNetwork
}
// Initialize to the c networking library. This should only be called once
// during setup of the node.
func (h *CClient) Initialize(net salticidae.MsgNetwork, issuer *Issuer) {
h.issuer = issuer
h.net = net
net.RegHandler(networking.IssueTx, salticidae.MsgNetworkMsgCallback(C.issueTx), nil)
}
func (h *CClient) send(msg networking.Msg, conn salticidae.MsgNetworkConn) {
ds := msg.DataStream()
defer ds.Free()
ba := salticidae.NewByteArrayMovedFromDataStream(ds, false)
defer ba.Free()
cMsg := salticidae.NewMsgMovedFromByteArray(msg.Op(), ba, false)
defer cMsg.Free()
h.net.SendMsg(cMsg, conn)
}
// issueTx handles the recept of an IssueTx message
//export issueTx
func issueTx(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
msg := salticidae.MsgFromC(salticidae.CMsg(_msg))
build := networking.Builder{}
pMsg, err := build.Parse(networking.IssueTx, msg.GetPayloadByMove())
if err != nil {
return
}
chainID, _ := ids.ToID(pMsg.Get(networking.ChainID).([]byte))
txBytes := pMsg.Get(networking.Tx).([]byte)
txID := ids.NewID(hashing.ComputeHash256Array(txBytes))
conn := salticidae.MsgNetworkConnFromC(salticidae.CMsgNetworkConn(_conn)).Copy(false)
CClientHandler.issuer.IssueTx(chainID, txBytes, func(status choices.Status) {
build := networking.Builder{}
msg, _ := build.DecidedTx(txID, status)
CClientHandler.send(msg, conn)
conn.Free()
})
}

View File

@ -1,77 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package xputtest
import (
"sync"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow"
"github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/utils/logging"
)
type issuableVM interface {
IssueTx([]byte, func(choices.Status)) (ids.ID, error)
}
// Issuer manages all the chain transaction flushing.
type Issuer struct {
lock sync.Mutex
log logging.Logger
vms map[[32]byte]issuableVM
locks map[[32]byte]sync.Locker
callbacks chan func()
}
// Initialize this flusher
func (i *Issuer) Initialize(log logging.Logger) {
i.lock.Lock()
defer i.lock.Unlock()
i.log = log
i.vms = make(map[[32]byte]issuableVM)
i.locks = make(map[[32]byte]sync.Locker)
i.callbacks = make(chan func(), 1000)
go func() {
for callback := range i.callbacks {
callback()
}
}()
}
// RegisterChain implements the registrant
func (i *Issuer) RegisterChain(ctx *snow.Context, vm interface{}) {
i.lock.Lock()
defer i.lock.Unlock()
key := ctx.ChainID.Key()
switch vm := vm.(type) {
case issuableVM:
i.vms[key] = vm
i.locks[key] = &ctx.Lock
}
}
// IssueTx issue the transaction to the chain and register the timeout.
func (i *Issuer) IssueTx(chainID ids.ID, tx []byte, finalized func(choices.Status)) {
i.lock.Lock()
defer i.lock.Unlock()
key := chainID.Key()
if lock, exists := i.locks[key]; exists {
i.callbacks <- func() {
lock.Lock()
defer lock.Unlock()
if vm, exists := i.vms[key]; exists {
if _, err := vm.IssueTx(tx, finalized); err != nil {
i.log.Error("Issuing the tx returned with %s unexpectedly", err)
}
}
}
} else {
i.log.Warn("Attempted to issue a Tx to an unsupported chain %s", chainID)
}
}

View File

@ -3,24 +3,17 @@
package node
// #include "salticidae/network.h"
// void onTerm(threadcall_handle_t *, void *);
// void errorHandler(SalticidaeCError *, bool, int32_t, void *);
import "C"
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"sync"
"time"
"unsafe"
"github.com/ava-labs/salticidae-go"
"github.com/ava-labs/gecko/api"
"github.com/ava-labs/gecko/api/admin"
@ -34,14 +27,14 @@ import (
"github.com/ava-labs/gecko/database/prefixdb"
"github.com/ava-labs/gecko/genesis"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/networking"
"github.com/ava-labs/gecko/networking/xputtest"
"github.com/ava-labs/gecko/network"
"github.com/ava-labs/gecko/snow/triggers"
"github.com/ava-labs/gecko/snow/validators"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/wrappers"
"github.com/ava-labs/gecko/version"
"github.com/ava-labs/gecko/vms"
"github.com/ava-labs/gecko/vms/avm"
"github.com/ava-labs/gecko/vms/nftfx"
@ -54,16 +47,17 @@ import (
"github.com/ava-labs/gecko/vms/timestampvm"
)
// Networking constants
const (
maxMessageSize = 1 << 25 // maximum size of a message sent with salticidae
TCP = "tcp"
)
var (
genesisHashKey = []byte("genesisID")
)
// MainNode is the reference for node callbacks
var MainNode = Node{}
nodeVersion = version.NewDefaultVersion("avalanche", 0, 3, 0)
versionParser = version.NewDefaultParser()
)
// Node is an instance of an Ava node.
type Node struct {
@ -94,30 +88,12 @@ type Node struct {
DecisionDispatcher *triggers.EventDispatcher
ConsensusDispatcher *triggers.EventDispatcher
// Event loop manager
EC salticidae.EventContext
// Caller to the event context
TCall salticidae.ThreadCall
// Network that manages validator peers
PeerNet salticidae.PeerNetwork
// Network that manages clients
ClientNet salticidae.MsgNetwork // TODO: Remove
// API that handles new connections
ValidatorAPI *networking.Handshake
// API that handles voting messages
ConsensusAPI *networking.Voting
// Net runs the networking stack
Net network.Network
// current validators of the network
vdrs validators.Manager
// APIs that handle client messages
// TODO: Remove
Issuer *xputtest.Issuer
CClientAPI *xputtest.CClient
// Handles HTTP API calls
APIServer api.Server
@ -134,71 +110,38 @@ type Node struct {
******************************************************************************
*/
//export onTerm
func onTerm(*C.threadcall_handle_t, unsafe.Pointer) {
MainNode.Log.Debug("Terminate signal received")
MainNode.EC.Stop()
}
//export errorHandler
func errorHandler(_err *C.struct_SalticidaeCError, fatal C.bool, asyncID C.int32_t, _ unsafe.Pointer) {
err := (*salticidae.Error)(unsafe.Pointer(_err))
if fatal {
MainNode.Log.Fatal("Error during async call: %s", salticidae.StrError(err.GetCode()))
MainNode.EC.Stop()
return
func (n *Node) initNetworking() error {
listener, err := net.Listen(TCP, n.Config.StakingIP.PortString())
if err != nil {
return err
}
MainNode.Log.Debug("Error during async with ID %d call: %s", asyncID, salticidae.StrError(err.GetCode()))
}
func (n *Node) initNetlib() error {
// Create main event context
n.EC = salticidae.NewEventContext()
n.TCall = salticidae.NewThreadCall(n.EC)
n.nodeCloser = utils.HandleSignals(func(os.Signal) {
n.TCall.AsyncCall(salticidae.ThreadCallCallback(C.onTerm), nil)
}, os.Interrupt, os.Kill)
// Create peer network config, may have tls enabled
peerConfig := salticidae.NewPeerNetworkConfig()
peerConfig.ConnTimeout(60)
msgConfig := peerConfig.AsMsgNetworkConfig()
msgConfig.MaxMsgSize(maxMessageSize)
dialer := network.NewDialer(TCP)
var serverUpgrader, clientUpgrader network.Upgrader
if n.Config.EnableStaking {
msgConfig.EnableTLS(true)
msgConfig.TLSKeyFile(n.Config.StakingKeyFile)
msgConfig.TLSCertFile(n.Config.StakingCertFile)
}
// Create the peer network
err := salticidae.NewError()
n.PeerNet = salticidae.NewPeerNetwork(n.EC, peerConfig, &err)
if code := err.GetCode(); code != 0 {
return errors.New(salticidae.StrError(code))
}
// Add peer network error handling
net := n.PeerNet.AsMsgNetwork()
net.RegErrorHandler(salticidae.MsgNetworkErrorCallback(C.errorHandler), nil)
if n.Config.ThroughputServerEnabled {
// Create the client network
msgConfig := salticidae.NewMsgNetworkConfig()
msgConfig.MaxMsgSize(maxMessageSize)
n.ClientNet = salticidae.NewMsgNetwork(n.EC, msgConfig, &err)
if code := err.GetCode(); code != 0 {
return errors.New(salticidae.StrError(code))
cert, err := tls.LoadX509KeyPair(n.Config.StakingCertFile, n.Config.StakingKeyFile)
if err != nil {
return err
}
// Add client network error handling
n.ClientNet.RegErrorHandler(salticidae.MsgNetworkErrorCallback(C.errorHandler), nil)
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert},
ClientAuth: tls.RequireAnyClientCert,
// We do not use TLS's CA functionality, we just require an
// authenticated channel. Therefore, we can safely skip verification
// here.
//
// TODO: Security audit required
InsecureSkipVerify: true,
}
serverUpgrader = network.NewTLSServerUpgrader(tlsConfig)
clientUpgrader = network.NewTLSClientUpgrader(tlsConfig)
} else {
serverUpgrader = network.NewIPUpgrader()
clientUpgrader = network.NewIPUpgrader()
}
return nil
}
func (n *Node) initValidatorNet() error {
// Initialize validator manager and default subnet's validator set
defaultSubnetValidators := validators.NewSet()
if !n.Config.EnableStaking {
@ -207,104 +150,64 @@ func (n *Node) initValidatorNet() error {
n.vdrs = validators.NewManager()
n.vdrs.PutValidatorSet(platformvm.DefaultSubnetID, defaultSubnetValidators)
cErr := salticidae.NewError()
serverIP := salticidae.NewNetAddrFromIPPortString(n.Config.StakingIP.String(), true, &cErr)
if code := cErr.GetCode(); code != 0 {
return errors.New(salticidae.StrError(code))
n.Net = network.NewDefaultNetwork(
n.Config.ConsensusParams.Metrics,
n.Log,
n.ID,
n.Config.StakingIP,
n.Config.NetworkID,
nodeVersion,
versionParser,
listener,
dialer,
serverUpgrader,
clientUpgrader,
defaultSubnetValidators,
n.Config.ConsensusRouter,
)
if !n.Config.EnableStaking {
n.Net.RegisterHandler(&insecureValidatorManager{
vdrs: defaultSubnetValidators,
})
}
n.ValidatorAPI = &networking.HandshakeNet
n.ValidatorAPI.Initialize(
/*log=*/ n.Log,
/*validators=*/ defaultSubnetValidators,
/*myIP=*/ serverIP,
/*myID=*/ n.ID,
/*network=*/ n.PeerNet,
/*metrics=*/ n.Config.ConsensusParams.Metrics,
/*enableStaking=*/ n.Config.EnableStaking,
/*networkID=*/ n.Config.NetworkID,
)
n.nodeCloser = utils.HandleSignals(func(os.Signal) {
n.Net.Close()
}, os.Interrupt, os.Kill)
return nil
}
func (n *Node) initConsensusNet() {
vdrs, ok := n.vdrs.GetValidatorSet(platformvm.DefaultSubnetID)
n.Log.AssertTrue(ok, "should have initialize the validator set already")
n.ConsensusAPI = &networking.VotingNet
n.ConsensusAPI.Initialize(n.Log, vdrs, n.PeerNet, n.ValidatorAPI.Connections(), n.chainManager.Router(), n.Config.ConsensusParams.Metrics)
n.Log.AssertNoError(n.ConsensusDispatcher.Register("gossip", n.ConsensusAPI))
type insecureValidatorManager struct {
vdrs validators.Set
}
func (n *Node) initClients() {
n.Issuer = &xputtest.Issuer{}
n.Issuer.Initialize(n.Log)
n.CClientAPI = &xputtest.CClientHandler
n.CClientAPI.Initialize(n.ClientNet, n.Issuer)
n.chainManager.AddRegistrant(n.Issuer)
func (i *insecureValidatorManager) Connected(vdrID ids.ShortID) bool {
i.vdrs.Add(validators.NewValidator(vdrID, 1))
return false
}
// StartConsensusServer starts the P2P server this node uses to communicate
// with other nodes
func (n *Node) StartConsensusServer() error {
n.Log.Verbo("starting the consensus server")
n.PeerNet.AsMsgNetwork().Start()
err := salticidae.NewError()
// The IP this node listens on for P2P messaging
serverIP := salticidae.NewNetAddrFromIPPortString(n.Config.StakingIP.String(), true, &err)
if code := err.GetCode(); code != 0 {
return fmt.Errorf("failed to create ip addr: %s", salticidae.StrError(code))
}
// Listen for P2P messages
n.PeerNet.Listen(serverIP, &err)
if code := err.GetCode(); code != 0 {
return fmt.Errorf("failed to listen on consensus server at %s: %s", n.Config.StakingIP, salticidae.StrError(code))
}
// Start a server to handle throughput tests if configuration says to. Disabled by default.
if n.Config.ThroughputServerEnabled {
n.ClientNet.Start()
clientIP := salticidae.NewNetAddrFromIPPortString(fmt.Sprintf("127.0.0.1:%d", n.Config.ThroughputPort), true, &err)
if code := err.GetCode(); code != 0 {
return fmt.Errorf("failed to start xput server: %s", salticidae.StrError(code))
}
n.ClientNet.Listen(clientIP, &err)
if code := err.GetCode(); code != 0 {
return fmt.Errorf("failed to listen on xput server at 127.0.0.1:%d: %s", n.Config.ThroughputPort, salticidae.StrError(code))
}
}
func (i *insecureValidatorManager) Disconnected(vdrID ids.ShortID) bool {
i.vdrs.Remove(vdrID)
return false
}
// Dispatch starts the node's servers.
// Returns when the node exits.
func (n *Node) Dispatch() {
// Add bootstrap nodes to the peer network
for _, peer := range n.Config.BootstrapPeers {
if !peer.IP.Equal(n.Config.StakingIP) {
bootstrapAddr := salticidae.NewNetAddrFromIPPortString(peer.IP.String(), true, &err)
if code := err.GetCode(); code != 0 {
return fmt.Errorf("failed to create bootstrap ip addr: %s", salticidae.StrError(code))
}
n.ValidatorAPI.Connect(bootstrapAddr)
n.Net.Track(peer.IP)
} else {
n.Log.Error("can't add self as a bootstrapper")
}
}
return nil
n.Net.Dispatch()
}
// Dispatch starts the node's servers.
// Returns when the node exits.
func (n *Node) Dispatch() { n.EC.Dispatch() }
/*
******************************************************************************
*********************** End P2P Networking Section ***************************
@ -410,6 +313,8 @@ func (n *Node) initEventDispatcher() {
n.ConsensusDispatcher = &triggers.EventDispatcher{}
n.ConsensusDispatcher.Initialize(n.Log)
n.Log.AssertNoError(n.ConsensusDispatcher.Register("gossip", n.Net))
}
// Initializes the Platform chain.
@ -492,7 +397,7 @@ func (n *Node) initAPIServer() {
err := n.APIServer.Dispatch()
n.Log.Fatal("API server initialization failed with %s", err)
n.TCall.AsyncCall(salticidae.ThreadCallCallback(C.onTerm), nil)
n.Net.Close()
})
}
@ -507,12 +412,11 @@ func (n *Node) initChainManager() {
n.ConsensusDispatcher,
n.DB,
n.Config.ConsensusRouter,
&networking.VotingNet,
n.Net,
n.Config.ConsensusParams,
n.vdrs,
n.ID,
n.Config.NetworkID,
n.ValidatorAPI,
&n.APIServer,
&n.keystoreServer,
&n.sharedMemory,
@ -556,7 +460,7 @@ func (n *Node) initMetricsAPI() {
func (n *Node) initAdminAPI() {
if n.Config.AdminAPIEnabled {
n.Log.Info("initializing Admin API")
service := admin.NewService(n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.ValidatorAPI.Connections(), &n.APIServer)
service := admin.NewService(n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer)
n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog)
}
}
@ -570,7 +474,7 @@ func (n *Node) initHealthAPI() {
n.Log.Info("initializing Health API")
service := health.NewService(n.Log)
service.RegisterHeartbeat("network.validators.heartbeat", n.ValidatorAPI, 5*time.Minute)
service.RegisterHeartbeat("network.validators.heartbeat", n.Net, 5*time.Minute)
n.APIServer.AddRoute(service.Handler(), &sync.RWMutex{}, "health", "", n.HTTPLog)
}
@ -636,34 +540,24 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg
return fmt.Errorf("problem initializing staker ID: %w", err)
}
// initialize shared memory
n.initSharedMemory()
if err = n.initNetlib(); err != nil { // Set up all networking
return fmt.Errorf("problem initializing networking: %w", err)
}
// Start HTTP APIs
n.initAPIServer() // Start the API Server
n.initKeystoreAPI() // Start the Keystore API
n.initMetricsAPI() // Start the Metrics API
// Start node-to-node consensus server
if err := n.initValidatorNet(); err != nil { // Set up the validator handshake + authentication
return fmt.Errorf("problem initializing validator network: %w", err)
// initialize shared memory
n.initSharedMemory()
if err = n.initNetworking(); err != nil { // Set up all networking
return fmt.Errorf("problem initializing networking: %w", err)
}
if err := n.initVMManager(); err != nil { // Set up the vm manager
return fmt.Errorf("problem initializing the VM manager: %w", err)
}
n.initEventDispatcher() // Set up the event dipatcher
n.initChainManager() // Set up the chain manager
n.initConsensusNet() // Set up the main consensus network
// TODO: Remove once API is fully featured for throughput tests
if n.Config.ThroughputServerEnabled {
n.initClients() // Set up the client servers
}
n.initAdminAPI() // Start the Admin API
n.initHealthAPI() // Start the Health API
@ -678,8 +572,7 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg
// Shutdown this node
func (n *Node) Shutdown() {
n.Log.Info("shutting down the node")
n.ValidatorAPI.Shutdown()
n.ConsensusAPI.Shutdown()
n.Net.Close()
n.chainManager.Shutdown()
utils.ClearSignals(n.nodeCloser)
}

View File

@ -46,7 +46,7 @@ borealis_node:
node1:
ansible_host: 34.207.133.167
node2:
ansible_host: 107.23.241.199
ansible_host: 54.162.71.9
node3:
ansible_host: 54.197.215.186
node4:

View File

@ -10,7 +10,7 @@ PREFIX="${PREFIX:-$(pwd)/build}"
PLUGIN_PREFIX="$PREFIX/plugins"
SRC_DIR="$(dirname "${BASH_SOURCE[0]}")"
source "$SRC_DIR/env.sh"
GOPATH="$(go env GOPATH)"
CORETH_PKG=github.com/ava-labs/coreth
CORETH_PATH="$GOPATH/src/$CORETH_PKG"
@ -36,10 +36,9 @@ else
fi
go build -o "$PREFIX/ava" "$GECKO_PATH/main/"*.go
go build -o "$PREFIX/xputtest" "$GECKO_PATH/xputtest/"*.go
go build -o "$PLUGIN_PREFIX/evm" "$CORETH_PATH/plugin/"*.go
if [[ -f "$PREFIX/ava" && -f "$PREFIX/xputtest" && -f "$PLUGIN_PREFIX/evm" ]]; then
if [[ -f "$PREFIX/ava" && -f "$PLUGIN_PREFIX/evm" ]]; then
echo "Build Successful"
else
echo "Build failure"
fi
fi

View File

@ -6,7 +6,4 @@ set -o pipefail
# Ted: contact me when you make any changes
SRC_DIR="$(dirname "${BASH_SOURCE[0]}")"
source "$SRC_DIR/env.sh"
go test -race -timeout="30s" -coverprofile="coverage.out" -covermode="atomic" ./...
go test -race -timeout="60s" -coverprofile="coverage.out" -covermode="atomic" ./...

View File

@ -1,13 +0,0 @@
#!/bin/bash
# Ted: contact me when you make any changes
# resolve the required env for salticidae-go
GOPATH="$(go env GOPATH)"
SALTICIDAE_GO_HOME="$GOPATH/src/github.com/ava-labs/salticidae-go/"
if [[ -f "$SALTICIDAE_GO_HOME/salticidae/libsalticidae.a" ]]; then
source "$SALTICIDAE_GO_HOME/scripts/env.sh"
else
source /dev/stdin <<<"$(curl -sS https://raw.githubusercontent.com/ava-labs/salticidae-go/v0.1.0/setup.sh)"
fi

View File

@ -30,9 +30,16 @@ type bootstrapper struct {
metrics
common.Bootstrapper
// IDs of vertices that we're already in the process of getting
// TODO: Find a better way to track; this keeps every single vertex's ID in memory when bootstrapping from nothing
seen ids.Set
numFetched uint64 // number of vertices that have been fetched from validators
// vtxReqs prevents asking validators for the same vertex
vtxReqs common.Requests
// IDs of vertices that we have requested from other validators but haven't received
pending ids.Set
finished bool
onFinished func()
@ -91,8 +98,6 @@ func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) {
// Put ...
func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) {
b.BootstrapConfig.Context.Log.Verbo("Put called for vertexID %s", vtxID)
vtx, err := b.State.ParseVertex(vtxBytes)
if err != nil {
b.BootstrapConfig.Context.Log.Debug("ParseVertex failed due to %s for block:\n%s",
@ -168,6 +173,10 @@ func (b *bootstrapper) addVertex(vtx avalanche.Vertex) {
func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) {
vts := []avalanche.Vertex{vtx}
b.numFetched++
if b.numFetched%2500 == 0 { // perioidcally inform user of progress
b.BootstrapConfig.Context.Log.Info("bootstrapping has fetched %d vertices", b.numFetched)
}
for len(vts) > 0 {
newLen := len(vts) - 1
@ -187,6 +196,8 @@ func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) {
vtx: vtx,
}); err == nil {
b.numBlockedVtx.Inc()
} else {
b.BootstrapConfig.Context.Log.Verbo("couldn't push to vtxBlocked")
}
for _, tx := range vtx.Txs() {
if err := b.TxBlocked.Push(&txJob{
@ -195,10 +206,16 @@ func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) {
tx: tx,
}); err == nil {
b.numBlockedTx.Inc()
} else {
b.BootstrapConfig.Context.Log.Verbo("couldn't push to txBlocked")
}
}
for _, parent := range vtx.Parents() {
if parentID := parent.ID(); !b.seen.Contains(parentID) {
b.seen.Add(parentID)
vts = append(vts, parent)
}
}
vts = append(vts, vtx.Parents()...)
case choices.Accepted:
b.BootstrapConfig.Context.Log.Verbo("Bootstrapping confirmed %s", vtxID)
case choices.Rejected:
@ -214,12 +231,14 @@ func (b *bootstrapper) finish() {
if b.finished {
return
}
b.BootstrapConfig.Context.Log.Info("bootstrapping finished fetching vertices. executing state transitions...")
b.executeAll(b.TxBlocked, b.numBlockedTx)
b.executeAll(b.VtxBlocked, b.numBlockedVtx)
// Start consensus
b.onFinished()
b.seen = ids.Set{}
b.finished = true
}

View File

@ -8,7 +8,6 @@ import (
"crypto/sha256"
"encoding/binary"
"errors"
"fmt"
"io"
"golang.org/x/crypto/ripemd160"
@ -50,7 +49,7 @@ func ByteArraysToHash256Array(byteArray ...[]byte) [32]byte {
for _, b := range byteArray {
err := binary.Write(buffer, binary.LittleEndian, b)
if err != nil {
fmt.Println(err)
panic(err)
}
}
return ComputeHash256Array(buffer.Bytes())

66
version/parser.go Normal file
View File

@ -0,0 +1,66 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package version
import (
"fmt"
"strconv"
"strings"
)
// Parser defines the interface of a version parser
type Parser interface {
Parse(string) (Version, error)
}
type parser struct {
appSeparator string
versionSeparator string
}
// NewDefaultParser returns a new parser with the default separators
func NewDefaultParser() Parser { return NewParser(defaultAppSeparator, defaultVersionSeparator) }
// NewParser returns a new parser
func NewParser(appSeparator string, versionSeparator string) Parser {
return &parser{
appSeparator: appSeparator,
versionSeparator: versionSeparator,
}
}
func (p *parser) Parse(s string) (Version, error) {
splitApp := strings.SplitN(s, p.appSeparator, 2)
if len(splitApp) != 2 {
return nil, fmt.Errorf("failed to parse %s as a version", s)
}
splitVersion := strings.SplitN(splitApp[1], p.versionSeparator, 3)
if len(splitVersion) != 3 {
return nil, fmt.Errorf("failed to parse %s as a version", s)
}
major, err := strconv.Atoi(splitVersion[0])
if err != nil {
return nil, fmt.Errorf("failed to parse %s as a version due to %w", s, err)
}
minor, err := strconv.Atoi(splitVersion[1])
if err != nil {
return nil, fmt.Errorf("failed to parse %s as a version due to %w", s, err)
}
patch, err := strconv.Atoi(splitVersion[2])
if err != nil {
return nil, fmt.Errorf("failed to parse %s as a version due to %w", s, err)
}
return NewVersion(
splitApp[0],
p.appSeparator,
p.versionSeparator,
major,
minor,
patch,
), nil
}

54
version/parser_test.go Normal file
View File

@ -0,0 +1,54 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package version
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewDefaultParser(t *testing.T) {
p := NewDefaultParser()
v, err := p.Parse("ava/1.2.3")
assert.NoError(t, err)
assert.NotNil(t, v)
assert.Equal(t, "ava/1.2.3", v.String())
assert.Equal(t, "ava", v.App())
assert.Equal(t, 1, v.Major())
assert.Equal(t, 2, v.Minor())
assert.Equal(t, 3, v.Patch())
assert.NoError(t, v.Compatible(v))
assert.False(t, v.Before(v))
badVersions := []string{
"",
"ava/",
"ava/z.0.0",
"ava/0.z.0",
"ava/0.0.z",
}
for _, badVersion := range badVersions {
_, err := p.Parse(badVersion)
assert.Error(t, err)
}
}
func TestNewParser(t *testing.T) {
p := NewParser(":", ",")
v, err := p.Parse("ava:1,2,3")
assert.NoError(t, err)
assert.NotNil(t, v)
assert.Equal(t, "ava:1,2,3", v.String())
assert.Equal(t, "ava", v.App())
assert.Equal(t, 1, v.Major())
assert.Equal(t, 2, v.Minor())
assert.Equal(t, 3, v.Patch())
assert.NoError(t, v.Compatible(v))
assert.False(t, v.Before(v))
}

142
version/version.go Normal file
View File

@ -0,0 +1,142 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package version
import (
"errors"
"fmt"
)
const (
defaultAppSeparator = "/"
defaultVersionSeparator = "."
)
var (
errDifferentApps = errors.New("different applications")
errDifferentMajor = errors.New("different major version")
errDifferentMinor = errors.New("different minor version")
)
// Version defines what is needed to describe a version
type Version interface {
fmt.Stringer
App() string
Major() int
Minor() int
Patch() int
Compatible(Version) error
Before(Version) bool
}
type version struct {
app string
major int
minor int
patch int
str string
}
// NewDefaultVersion returns a new version with default separators
func NewDefaultVersion(
app string,
major int,
minor int,
patch int,
) Version {
return NewVersion(
app,
defaultAppSeparator,
defaultVersionSeparator,
major,
minor,
patch,
)
}
// NewVersion returns a new version
func NewVersion(
app string,
appSeparator string,
versionSeparator string,
major int,
minor int,
patch int,
) Version {
return &version{
app: app,
major: major,
minor: minor,
patch: patch,
str: fmt.Sprintf("%s%s%d%s%d%s%d",
app,
appSeparator,
major,
versionSeparator,
minor,
versionSeparator,
patch,
),
}
}
func (v *version) App() string { return v.app }
func (v *version) Major() int { return v.major }
func (v *version) Minor() int { return v.minor }
func (v *version) Patch() int { return v.patch }
func (v *version) String() string { return v.str }
func (v *version) Compatible(o Version) error {
switch {
case v.App() != o.App():
return errDifferentApps
case v.Major() != o.Major():
return errDifferentMajor
case v.Minor() != o.Minor():
return errDifferentMinor
default:
return nil
}
}
func (v *version) Before(o Version) bool {
if v.App() != o.App() {
return false
}
{
v := v.Major()
o := o.Major()
switch {
case v < o:
return true
case v > o:
return false
}
}
{
v := v.Minor()
o := o.Minor()
switch {
case v < o:
return true
case v > o:
return false
}
}
{
v := v.Patch()
o := o.Patch()
switch {
case v < o:
return true
}
}
return false
}

88
version/version_test.go Normal file
View File

@ -0,0 +1,88 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package version
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewDefaultVersion(t *testing.T) {
v := NewDefaultVersion("ava", 1, 2, 3)
assert.NotNil(t, v)
assert.Equal(t, "ava/1.2.3", v.String())
assert.Equal(t, "ava", v.App())
assert.Equal(t, 1, v.Major())
assert.Equal(t, 2, v.Minor())
assert.Equal(t, 3, v.Patch())
assert.NoError(t, v.Compatible(v))
assert.False(t, v.Before(v))
}
func TestNewVersion(t *testing.T) {
v := NewVersion("ava", ":", ",", 1, 2, 3)
assert.NotNil(t, v)
assert.Equal(t, "ava:1,2,3", v.String())
assert.Equal(t, "ava", v.App())
assert.Equal(t, 1, v.Major())
assert.Equal(t, 2, v.Minor())
assert.Equal(t, 3, v.Patch())
assert.NoError(t, v.Compatible(v))
assert.False(t, v.Before(v))
}
func TestIncompatibleApps(t *testing.T) {
v0 := NewDefaultVersion("ava", 1, 2, 3)
v1 := NewDefaultVersion("notava", 1, 2, 3)
assert.NotNil(t, v0)
assert.NotNil(t, v1)
assert.Error(t, v0.Compatible(v1))
assert.Error(t, v1.Compatible(v0))
assert.False(t, v0.Before(v1))
assert.False(t, v1.Before(v0))
}
func TestIncompatibleMajor(t *testing.T) {
v0 := NewDefaultVersion("ava", 1, 2, 3)
v1 := NewDefaultVersion("ava", 2, 2, 3)
assert.NotNil(t, v0)
assert.NotNil(t, v1)
assert.Error(t, v0.Compatible(v1))
assert.Error(t, v1.Compatible(v0))
assert.True(t, v0.Before(v1))
assert.False(t, v1.Before(v0))
}
func TestIncompatibleMinor(t *testing.T) {
v0 := NewDefaultVersion("ava", 1, 2, 3)
v1 := NewDefaultVersion("ava", 1, 3, 3)
assert.NotNil(t, v0)
assert.NotNil(t, v1)
assert.Error(t, v0.Compatible(v1))
assert.Error(t, v1.Compatible(v0))
assert.True(t, v0.Before(v1))
assert.False(t, v1.Before(v0))
}
func TestCompatiblePatch(t *testing.T) {
v0 := NewDefaultVersion("ava", 1, 2, 3)
v1 := NewDefaultVersion("ava", 1, 2, 4)
assert.NotNil(t, v0)
assert.NotNil(t, v1)
assert.NoError(t, v0.Compatible(v1))
assert.NoError(t, v1.Compatible(v0))
assert.True(t, v0.Before(v1))
assert.False(t, v1.Before(v0))
}

View File

@ -16,10 +16,11 @@ import (
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/utils/json"
safemath "github.com/ava-labs/gecko/utils/math"
"github.com/ava-labs/gecko/vms/components/ava"
"github.com/ava-labs/gecko/vms/components/verify"
"github.com/ava-labs/gecko/vms/secp256k1fx"
safemath "github.com/ava-labs/gecko/utils/math"
)
var (
@ -37,6 +38,7 @@ var (
errUnknownOutputType = errors.New("unknown output type")
errUnneededAddress = errors.New("address not required to sign")
errUnknownCredentialType = errors.New("unknown credential type")
errNilTxID = errors.New("nil transaction ID")
)
// Service defines the base service for the asset vm
@ -75,10 +77,6 @@ type GetTxStatusReply struct {
Status choices.Status `json:"status"`
}
var (
errNilTxID = errors.New("nil transaction ID")
)
// GetTxStatus returns the status of the specified transaction
func (service *Service) GetTxStatus(r *http.Request, args *GetTxStatusArgs, reply *GetTxStatusReply) error {
service.vm.ctx.Log.Verbo("GetTxStatus called with %s", args.TxID)
@ -247,6 +245,7 @@ func (service *Service) GetBalance(r *http.Request, args *GetBalanceArgs, reply
return err
}
reply.UTXOIDs = make([]ava.UTXOID, 0, len(utxos))
for _, utxo := range utxos {
if !utxo.AssetID().Equals(assetID) {
continue
@ -545,7 +544,7 @@ func (service *Service) CreateAddress(r *http.Request, args *CreateAddressArgs,
}
addresses, _ := user.Addresses(db)
addresses = append(addresses, ids.NewID(hashing.ComputeHash256Array(sk.PublicKey().Address().Bytes())))
addresses = append(addresses, sk.PublicKey().Address())
if err := user.SetAddresses(db, addresses); err != nil {
return fmt.Errorf("problem saving address: %w", err)
@ -555,6 +554,40 @@ func (service *Service) CreateAddress(r *http.Request, args *CreateAddressArgs,
return nil
}
// ListAddressesArgs ...
type ListAddressesArgs struct {
// User that we're listing the addresses of
Username string `json:"username"`
Password string `json:"password"`
}
// ListAddressesResponse ...
type ListAddressesResponse struct {
// Each element is an address controlled by specified account
Addresses []string `json:"addresses"`
}
// ListAddresses returns all of the addresses controlled by user [args.Username]
func (service *Service) ListAddresses(_ *http.Request, args *ListAddressesArgs, response *ListAddressesResponse) error {
db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password)
if err != nil {
return fmt.Errorf("problem retrieving user: %w", err)
}
response.Addresses = []string{}
user := userState{vm: service.vm}
addresses, err := user.Addresses(db)
if err != nil {
return nil
}
for _, address := range addresses {
response.Addresses = append(response.Addresses, service.vm.Format(address.Bytes()))
}
return nil
}
// ExportKeyArgs are arguments for ExportKey
type ExportKeyArgs struct {
Username string `json:"username"`
@ -576,6 +609,10 @@ func (service *Service) ExportKey(r *http.Request, args *ExportKeyArgs, reply *E
if err != nil {
return fmt.Errorf("problem parsing address: %w", err)
}
addr, err := ids.ToShortID(address)
if err != nil {
return fmt.Errorf("problem parsing address: %w", err)
}
db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password)
if err != nil {
@ -584,7 +621,7 @@ func (service *Service) ExportKey(r *http.Request, args *ExportKeyArgs, reply *E
user := userState{vm: service.vm}
sk, err := user.Key(db, ids.NewID(hashing.ComputeHash256Array(address)))
sk, err := user.Key(db, addr)
if err != nil {
return fmt.Errorf("problem retrieving private key: %w", err)
}
@ -629,7 +666,7 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *I
}
addresses, _ := user.Addresses(db)
addresses = append(addresses, ids.NewID(hashing.ComputeHash256Array(sk.PublicKey().Address().Bytes())))
addresses = append(addresses, sk.PublicKey().Address())
if err := user.SetAddresses(db, addresses); err != nil {
return fmt.Errorf("problem saving addresses: %w", err)
@ -688,7 +725,9 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply)
addresses, _ := user.Addresses(db)
addrs := ids.Set{}
addrs.Add(addresses...)
for _, addr := range addresses {
addrs.Add(ids.NewID(hashing.ComputeHash256Array(addr.Bytes())))
}
utxos, err := service.vm.GetUTXOs(addrs)
if err != nil {
return fmt.Errorf("problem retrieving user's UTXOs: %w", err)
@ -957,6 +996,10 @@ func (service *Service) SignMintTx(r *http.Request, args *SignMintTxArgs, reply
if err != nil {
return fmt.Errorf("problem parsing address '%s': %w", args.Minter, err)
}
addr, err := ids.ToShortID(minter)
if err != nil {
return fmt.Errorf("problem parsing address '%s': %w", args.Minter, err)
}
db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password)
if err != nil {
@ -965,7 +1008,6 @@ func (service *Service) SignMintTx(r *http.Request, args *SignMintTxArgs, reply
user := userState{vm: service.vm}
addr := ids.NewID(hashing.ComputeHash256Array(minter))
sk, err := user.Key(db, addr)
if err != nil {
return fmt.Errorf("problem retriving private key: %w", err)
@ -1095,7 +1137,10 @@ func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, reply *I
addresses, _ := user.Addresses(db)
addrs := ids.Set{}
addrs.Add(addresses...)
for _, addr := range addresses {
addrs.Add(ids.NewID(hashing.ComputeHash256Array(addr.Bytes())))
}
utxos, err := service.vm.GetAtomicUTXOs(addrs)
if err != nil {
return fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err)
@ -1239,7 +1284,10 @@ func (service *Service) ExportAVA(_ *http.Request, args *ExportAVAArgs, reply *E
addresses, _ := user.Addresses(db)
addrs := ids.Set{}
addrs.Add(addresses...)
for _, addr := range addresses {
addrs.Add(ids.NewID(hashing.ComputeHash256Array(addr.Bytes())))
}
utxos, err := service.vm.GetUTXOs(addrs)
if err != nil {
return fmt.Errorf("problem retrieving user's UTXOs: %w", err)

View File

@ -7,14 +7,13 @@ import (
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/crypto"
"github.com/ava-labs/gecko/utils/hashing"
)
var addresses = ids.Empty
type userState struct{ vm *VM }
func (s *userState) SetAddresses(db database.Database, addrs []ids.ID) error {
func (s *userState) SetAddresses(db database.Database, addrs []ids.ShortID) error {
bytes, err := s.vm.codec.Marshal(addrs)
if err != nil {
return err
@ -22,12 +21,12 @@ func (s *userState) SetAddresses(db database.Database, addrs []ids.ID) error {
return db.Put(addresses.Bytes(), bytes)
}
func (s *userState) Addresses(db database.Database) ([]ids.ID, error) {
func (s *userState) Addresses(db database.Database) ([]ids.ShortID, error) {
bytes, err := db.Get(addresses.Bytes())
if err != nil {
return nil, err
}
addresses := []ids.ID{}
addresses := []ids.ShortID{}
if err := s.vm.codec.Unmarshal(bytes, &addresses); err != nil {
return nil, err
}
@ -35,10 +34,10 @@ func (s *userState) Addresses(db database.Database) ([]ids.ID, error) {
}
func (s *userState) SetKey(db database.Database, sk *crypto.PrivateKeySECP256K1R) error {
return db.Put(hashing.ComputeHash256(sk.PublicKey().Address().Bytes()), sk.Bytes())
return db.Put(sk.PublicKey().Address().Bytes(), sk.Bytes())
}
func (s *userState) Key(db database.Database, address ids.ID) (*crypto.PrivateKeySECP256K1R, error) {
func (s *userState) Key(db database.Database, address ids.ShortID) (*crypto.PrivateKeySECP256K1R, error) {
factory := crypto.FactorySECP256K1R{}
bytes, err := db.Get(address.Bytes())

View File

@ -1,17 +0,0 @@
# Throughput testing
A throughput test is run in two parts. First a network must be running with at least one of the nodes running a throughput server. To start a throughput server when running a node the `--xput-server-enabled=true` flag should be passed.
An example single node network can be started with:
```sh
./build/ava --public-ip=127.0.0.1 --xput-server-port=9652 --xput-server-enabled=true --db-enabled=false --staking-tls-enabled=false --snow-sample-size=1 --snow-quorum-size=1
```
The thoughput node can be started with:
```sh
./build/xputtest --ip=127.0.0.1 --port=9652 --sp-chain
```
The above example with run a throughput test on the simple payment chain. Tests can be run with `--sp-dag` to run throughput tests on the simple payment dag. Tests can be run with `--avm` to run throughput tests on the AVA virtual machine.

View File

@ -1,108 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package main
import (
"time"
"github.com/ava-labs/salticidae-go"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/networking"
"github.com/ava-labs/gecko/utils/crypto"
"github.com/ava-labs/gecko/utils/timer"
"github.com/ava-labs/gecko/vms/avm"
"github.com/ava-labs/gecko/vms/platformvm"
"github.com/ava-labs/gecko/xputtest/avmwallet"
)
// benchmark an instance of the avm
func (n *network) benchmarkAVM(chain *platformvm.CreateChainTx) {
genesisBytes := chain.GenesisData
wallet, err := avmwallet.NewWallet(n.log, n.networkID, chain.ID(), config.AvaTxFee)
n.log.AssertNoError(err)
factory := crypto.FactorySECP256K1R{}
sk, err := factory.ToPrivateKey(config.Key)
n.log.AssertNoError(err)
wallet.ImportKey(sk.(*crypto.PrivateKeySECP256K1R))
codec := wallet.Codec()
genesis := avm.Genesis{}
n.log.AssertNoError(codec.Unmarshal(genesisBytes, &genesis))
genesisTx := genesis.Txs[0]
tx := avm.Tx{
UnsignedTx: &genesisTx.CreateAssetTx,
}
txBytes, err := codec.Marshal(&tx)
n.log.AssertNoError(err)
tx.Initialize(txBytes)
for _, utxo := range tx.UTXOs() {
wallet.AddUTXO(utxo)
}
assetID := genesisTx.ID()
n.log.AssertNoError(wallet.GenerateTxs(config.NumTxs, assetID))
go n.log.RecoverAndPanic(func() { n.IssueAVM(chain.ID(), assetID, wallet) })
}
// issue transactions to the instance of the avm funded by the provided wallet
func (n *network) IssueAVM(chainID ids.ID, assetID ids.ID, wallet *avmwallet.Wallet) {
n.log.Debug("Issuing with %d", wallet.Balance(assetID))
numAccepted := 0
numPending := 0
n.decided <- ids.ID{}
// track the last second of transactions
meter := timer.TimedMeter{Duration: time.Second}
for d := range n.decided {
// display the TPS every 1000 txs
if numAccepted%1000 == 0 {
n.log.Info("TPS: %d", meter.Ticks())
}
// d is the ID of the tx that was accepted
if !d.IsZero() {
meter.Tick()
n.log.Debug("Finalized %s", d)
numAccepted++
numPending--
}
// Issue all the txs that we can right now
for numPending < config.MaxOutstandingTxs && wallet.Balance(assetID) > 0 && numAccepted+numPending < config.NumTxs {
tx := wallet.NextTx()
n.log.AssertTrue(tx != nil, "Tx creation failed")
// send the IssueTx message
it, err := n.build.IssueTx(chainID, tx.Bytes())
n.log.AssertNoError(err)
ds := it.DataStream()
ba := salticidae.NewByteArrayMovedFromDataStream(ds, false)
newMsg := salticidae.NewMsgMovedFromByteArray(networking.IssueTx, ba, false)
n.conn.GetNet().SendMsg(newMsg, n.conn)
ds.Free()
ba.Free()
newMsg.Free()
numPending++
n.log.Debug("Sent tx, pending = %d, accepted = %d", numPending, numAccepted)
}
// If we are done issuing txs, return from the function
if numAccepted+numPending >= config.NumTxs {
n.log.Info("done with test")
net.ec.Stop()
return
}
}
}

View File

@ -1,15 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package main
// ChainType ...
type ChainType int
// Chain types
const (
unknown ChainType = iota
spChain
spDAG
avmDAG
)

View File

@ -1,32 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package main
import (
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/logging"
)
// Config contains all of the configurations of an Ava client.
type Config struct {
// Networking configurations
RemoteIP utils.IPDesc // Which Ava node to connect to
// ID of the network that this client will be issuing transactions to
NetworkID uint32
// Transaction fee
AvaTxFee uint64
EnableCrypto bool
LoggingConfig logging.Config
// Key describes which key to use to issue transactions
Key []byte
// NumTxs describes the number of transactions to issue
// MaxOutstandingTxs describes how many txs to pipeline
NumTxs, MaxOutstandingTxs int
Chain ChainType
}

View File

@ -1,97 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package main
import (
"fmt"
"os"
"path"
"runtime"
"runtime/pprof"
"github.com/ava-labs/salticidae-go"
"github.com/ava-labs/gecko/genesis"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/crypto"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/vms/avm"
"github.com/ava-labs/gecko/vms/spchainvm"
"github.com/ava-labs/gecko/vms/spdagvm"
)
func main() {
if err != nil {
fmt.Printf("Failed to parse arguments: %s\n", err)
}
// set up logging
config.LoggingConfig.Directory = path.Join(config.LoggingConfig.Directory, "client")
log, err := logging.New(config.LoggingConfig)
if err != nil {
fmt.Printf("Failed to start the logger: %s\n", err)
return
}
defer log.Stop()
// initialize state based on CLI args
net.log = log
crypto.EnableCrypto = config.EnableCrypto
net.decided = make(chan ids.ID, config.MaxOutstandingTxs)
// Init the network
log.AssertNoError(net.Initialize())
net.net.Start()
defer net.net.Stop()
// connect to the node
serr := salticidae.NewError()
remoteIP := salticidae.NewNetAddrFromIPPortString(config.RemoteIP.String(), true, &serr)
if code := serr.GetCode(); code != 0 {
log.Fatal("Sync error %s", salticidae.StrError(serr.GetCode()))
return
}
net.conn = net.net.ConnectSync(remoteIP, true, &serr)
if serr.GetCode() != 0 {
log.Fatal("Sync error %s", salticidae.StrError(serr.GetCode()))
return
}
// start a cpu profile
file, gErr := os.Create("cpu_client.profile")
log.AssertNoError(gErr)
gErr = pprof.StartCPUProfile(file)
log.AssertNoError(gErr)
runtime.SetMutexProfileFraction(1)
defer file.Close()
defer pprof.StopCPUProfile()
net.networkID = config.NetworkID
// start the benchmark we want to run
switch config.Chain {
case spChain:
tx, err := genesis.VMGenesis(config.NetworkID, spchainvm.ID)
log.AssertNoError(err)
net.benchmarkSPChain(tx)
case spDAG:
tx, err := genesis.VMGenesis(config.NetworkID, spdagvm.ID)
log.AssertNoError(err)
net.benchmarkSPChain(tx)
case avmDAG:
tx, err := genesis.VMGenesis(config.NetworkID, avm.ID)
log.AssertNoError(err)
net.benchmarkSPChain(tx)
default:
log.Fatal("did not specify whether to test dag or chain. Exiting")
return
}
// start processing network messages
net.ec.Dispatch()
}

View File

@ -1,78 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package main
// #include "salticidae/network.h"
// void onTerm(int sig, void *);
// void decidedTx(msg_t *, msgnetwork_conn_t *, void *);
import "C"
import (
"fmt"
"unsafe"
"github.com/ava-labs/salticidae-go"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/networking"
"github.com/ava-labs/gecko/utils/logging"
)
// network stores the persistent data needed when running the test.
type network struct {
ec salticidae.EventContext
build networking.Builder
net salticidae.MsgNetwork
conn salticidae.MsgNetworkConn
log logging.Logger
decided chan ids.ID
networkID uint32
}
var net = network{}
func (n *network) Initialize() error {
n.ec = salticidae.NewEventContext()
evInt := salticidae.NewSigEvent(n.ec, salticidae.SigEventCallback(C.onTerm), nil)
evInt.Add(salticidae.SIGINT)
evTerm := salticidae.NewSigEvent(n.ec, salticidae.SigEventCallback(C.onTerm), nil)
evTerm.Add(salticidae.SIGTERM)
serr := salticidae.NewError()
netconfig := salticidae.NewMsgNetworkConfig()
n.net = salticidae.NewMsgNetwork(n.ec, netconfig, &serr)
if serr.GetCode() != 0 {
return fmt.Errorf("sync error %s", salticidae.StrError(serr.GetCode()))
}
n.net.RegHandler(networking.DecidedTx, salticidae.MsgNetworkMsgCallback(C.decidedTx), nil)
return nil
}
//export onTerm
func onTerm(C.int, unsafe.Pointer) {
net.log.Info("Terminate signal received")
net.ec.Stop()
}
// decidedTx handles the recept of a decidedTx message
//export decidedTx
func decidedTx(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) {
msg := salticidae.MsgFromC(salticidae.CMsg(_msg))
pMsg, err := net.build.Parse(networking.DecidedTx, msg.GetPayloadByMove())
if err != nil {
net.log.Warn("Failed to parse DecidedTx message")
return
}
txID, err := ids.ToID(pMsg.Get(networking.TxID).([]byte))
net.log.AssertNoError(err) // Length is checked in message parsing
net.log.Debug("Decided %s", txID)
net.decided <- txID
}

View File

@ -1,115 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package main
import (
"flag"
"fmt"
"os"
stdnet "net"
"github.com/ava-labs/gecko/genesis"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/wrappers"
)
var (
config Config
err error
)
// Parse the CLI arguments
func init() {
errs := &wrappers.Errs{}
defer func() { err = errs.Err }()
loggingConfig, err := logging.DefaultConfig()
errs.Add(err)
fs := flag.NewFlagSet("xputtest", flag.ContinueOnError)
// NetworkID:
networkName := fs.String("network-id", genesis.LocalName, "Network ID this node will connect to")
// Ava fees:
fs.Uint64Var(&config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva")
// Assertions:
fs.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution")
// Crypto:
fs.BoolVar(&config.EnableCrypto, "signature-verification-enabled", true, "Turn on signature verification")
// Remote Server:
ip := fs.String("ip", "127.0.0.1", "IP address of the remote server socket")
port := fs.Uint("port", 9652, "Port of the remote server socket")
// Logging:
logsDir := fs.String("log-dir", "", "Logging directory for Ava")
logLevel := fs.String("log-level", "info", "The log level. Should be one of {all, debug, info, warn, error, fatal, off}")
// Test Variables:
spchain := fs.Bool("sp-chain", false, "Execute simple payment chain transactions")
spdag := fs.Bool("sp-dag", false, "Execute simple payment dag transactions")
avm := fs.Bool("avm", false, "Execute avm transactions")
key := fs.String("key", "", "Funded key in the genesis key to use to issue transactions")
fs.IntVar(&config.NumTxs, "num-txs", 25000, "Total number of transaction to issue")
fs.IntVar(&config.MaxOutstandingTxs, "max-outstanding", 1000, "Maximum number of transactions to leave outstanding")
ferr := fs.Parse(os.Args[1:])
if ferr == flag.ErrHelp {
// display usage/help text and exit successfully
os.Exit(0)
}
if ferr != nil {
// other type of error occurred when parsing args
os.Exit(2)
}
networkID, err := genesis.NetworkID(*networkName)
errs.Add(err)
config.NetworkID = networkID
// Remote:
parsedIP := stdnet.ParseIP(*ip)
if parsedIP == nil {
errs.Add(fmt.Errorf("invalid IP Address %s", *ip))
}
config.RemoteIP = utils.IPDesc{
IP: parsedIP,
Port: uint16(*port),
}
cb58 := formatting.CB58{}
errs.Add(cb58.FromString(*key))
config.Key = cb58.Bytes
// Logging:
if *logsDir != "" {
loggingConfig.Directory = *logsDir
}
level, err := logging.ToLevel(*logLevel)
errs.Add(err)
loggingConfig.LogLevel = level
loggingConfig.DisplayLevel = level
config.LoggingConfig = loggingConfig
// Test Variables:
switch {
case *spchain:
config.Chain = spChain
case *spdag:
config.Chain = spDAG
case *avm:
config.Chain = avmDAG
default:
config.Chain = unknown
}
}

View File

@ -1,89 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package main
import (
"time"
"github.com/ava-labs/salticidae-go"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/networking"
"github.com/ava-labs/gecko/utils/crypto"
"github.com/ava-labs/gecko/utils/timer"
"github.com/ava-labs/gecko/vms/platformvm"
"github.com/ava-labs/gecko/vms/spchainvm"
"github.com/ava-labs/gecko/xputtest/chainwallet"
)
// benchmark an instance of the sp chain
func (n *network) benchmarkSPChain(chain *platformvm.CreateChainTx) {
genesisBytes := chain.GenesisData
wallet := chainwallet.NewWallet(n.log, n.networkID, chain.ID())
codec := spchainvm.Codec{}
accounts, err := codec.UnmarshalGenesis(genesisBytes)
n.log.AssertNoError(err)
factory := crypto.FactorySECP256K1R{}
skGen, err := factory.ToPrivateKey(config.Key)
n.log.AssertNoError(err)
sk := skGen.(*crypto.PrivateKeySECP256K1R)
wallet.ImportKey(sk)
for _, account := range accounts {
wallet.AddAccount(account)
break
}
n.log.AssertNoError(wallet.GenerateTxs(config.NumTxs))
go n.log.RecoverAndPanic(func() { n.IssueSPChain(chain.ID(), wallet) })
}
func (n *network) IssueSPChain(chainID ids.ID, wallet *chainwallet.Wallet) {
n.log.Debug("Issuing with %d", wallet.Balance())
numAccepted := 0
numPending := 0
n.decided <- ids.ID{}
meter := timer.TimedMeter{Duration: time.Second}
for d := range n.decided {
if numAccepted%1000 == 0 {
n.log.Info("TPS: %d", meter.Ticks())
}
if !d.IsZero() {
meter.Tick()
n.log.Debug("Finalized %s", d)
numAccepted++
numPending--
}
for numPending < config.MaxOutstandingTxs && wallet.Balance() > 0 && numAccepted+numPending < config.NumTxs {
tx := wallet.NextTx()
n.log.AssertTrue(tx != nil, "Tx creation failed")
it, err := n.build.IssueTx(chainID, tx.Bytes())
n.log.AssertNoError(err)
ds := it.DataStream()
ba := salticidae.NewByteArrayMovedFromDataStream(ds, false)
newMsg := salticidae.NewMsgMovedFromByteArray(networking.IssueTx, ba, false)
n.conn.GetNet().SendMsg(newMsg, n.conn)
ds.Free()
ba.Free()
newMsg.Free()
numPending++
n.log.Debug("Sent tx, pending = %d, accepted = %d", numPending, numAccepted)
}
if numAccepted+numPending >= config.NumTxs {
n.log.Info("done with test")
net.ec.Stop()
return
}
}
}

View File

@ -1,96 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package main
import (
"time"
"github.com/ava-labs/salticidae-go"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/networking"
"github.com/ava-labs/gecko/utils/crypto"
"github.com/ava-labs/gecko/utils/timer"
"github.com/ava-labs/gecko/vms/platformvm"
"github.com/ava-labs/gecko/vms/spdagvm"
"github.com/ava-labs/gecko/xputtest/dagwallet"
)
// benchmark an instance of the sp dag
func (n *network) benchmarkSPDAG(chain *platformvm.CreateChainTx) {
genesisBytes := chain.GenesisData
wallet := dagwallet.NewWallet(n.networkID, chain.ID(), config.AvaTxFee)
codec := spdagvm.Codec{}
tx, err := codec.UnmarshalTx(genesisBytes)
n.log.AssertNoError(err)
factory := crypto.FactorySECP256K1R{}
skGen, err := factory.ToPrivateKey(config.Key)
n.log.AssertNoError(err)
sk := skGen.(*crypto.PrivateKeySECP256K1R)
wallet.ImportKey(sk)
for _, utxo := range tx.UTXOs() {
wallet.AddUTXO(utxo)
}
go n.log.RecoverAndPanic(func() { n.IssueSPDAG(chain.ID(), wallet) })
}
// issue transactions to the instance of the spdag funded by the provided wallet
func (n *network) IssueSPDAG(chainID ids.ID, wallet *dagwallet.Wallet) {
n.log.Info("starting avalanche benchmark")
pending := make(map[[32]byte]*spdagvm.Tx)
canAdd := []*spdagvm.Tx{}
numAccepted := 0
n.decided <- ids.ID{}
meter := timer.TimedMeter{Duration: time.Second}
for d := range n.decided {
if numAccepted%1000 == 0 {
n.log.Info("TPS: %d", meter.Ticks())
}
if !d.IsZero() {
meter.Tick()
key := d.Key()
if tx := pending[key]; tx != nil {
canAdd = append(canAdd, tx)
n.log.Debug("Finalized %s", d)
delete(pending, key)
numAccepted++
}
}
for len(pending) < config.MaxOutstandingTxs && (wallet.Balance() > 0 || len(canAdd) > 0) {
if wallet.Balance() == 0 {
tx := canAdd[0]
canAdd = canAdd[1:]
for _, utxo := range tx.UTXOs() {
wallet.AddUTXO(utxo)
}
}
tx := wallet.Send(1, 0, wallet.GetAddress())
n.log.AssertTrue(tx != nil, "Tx creation failed")
it, err := n.build.IssueTx(chainID, tx.Bytes())
n.log.AssertNoError(err)
ds := it.DataStream()
ba := salticidae.NewByteArrayMovedFromDataStream(ds, false)
newMsg := salticidae.NewMsgMovedFromByteArray(networking.IssueTx, ba, false)
n.conn.GetNet().SendMsg(newMsg, n.conn)
ds.Free()
ba.Free()
newMsg.Free()
pending[tx.ID().Key()] = tx
n.log.Debug("Sent tx, pending = %d, accepted = %d", len(pending), numAccepted)
}
}
}