2020-08-02 14:12:58 -07:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-08-03 13:33:35 -07:00
|
|
|
"encoding/base64"
|
2020-08-02 14:12:58 -07:00
|
|
|
"flag"
|
|
|
|
"fmt"
|
2020-08-03 13:33:35 -07:00
|
|
|
"io/ioutil"
|
2020-08-02 14:12:58 -07:00
|
|
|
"os"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2020-08-16 08:05:58 -07:00
|
|
|
eth_common "github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/golang/protobuf/proto"
|
2020-08-02 14:12:58 -07:00
|
|
|
"github.com/libp2p/go-libp2p-core/protocol"
|
2020-08-16 06:02:11 -07:00
|
|
|
swarm "github.com/libp2p/go-libp2p-swarm"
|
2020-08-02 14:12:58 -07:00
|
|
|
"github.com/multiformats/go-multiaddr"
|
2020-08-16 08:05:58 -07:00
|
|
|
"github.com/prometheus/common/log"
|
2020-08-02 14:12:58 -07:00
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2020-08-16 08:05:58 -07:00
|
|
|
"github.com/certusone/wormhole/bridge/pkg/common"
|
|
|
|
"github.com/certusone/wormhole/bridge/pkg/ethereum"
|
|
|
|
gossipv1 "github.com/certusone/wormhole/bridge/pkg/proto/gossip/v1"
|
2020-08-02 14:12:58 -07:00
|
|
|
"github.com/certusone/wormhole/bridge/pkg/supervisor"
|
|
|
|
|
|
|
|
ipfslog "github.com/ipfs/go-log/v2"
|
|
|
|
"github.com/libp2p/go-libp2p"
|
|
|
|
connmgr "github.com/libp2p/go-libp2p-connmgr"
|
|
|
|
"github.com/libp2p/go-libp2p-core/crypto"
|
|
|
|
"github.com/libp2p/go-libp2p-core/host"
|
|
|
|
"github.com/libp2p/go-libp2p-core/peer"
|
|
|
|
"github.com/libp2p/go-libp2p-core/routing"
|
|
|
|
dht "github.com/libp2p/go-libp2p-kad-dht"
|
|
|
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
|
|
|
libp2pquic "github.com/libp2p/go-libp2p-quic-transport"
|
|
|
|
libp2ptls "github.com/libp2p/go-libp2p-tls"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
p2pNetworkID = flag.String("network", "/wormhole/dev", "P2P network identifier")
|
|
|
|
p2pPort = flag.Uint("port", 8999, "P2P UDP listener port")
|
|
|
|
p2pBootstrap = flag.String("bootstrap", "", "P2P bootstrap peers (comma-separated)")
|
2020-08-16 08:05:58 -07:00
|
|
|
|
2020-08-03 13:33:35 -07:00
|
|
|
nodeKeyPath = flag.String("nodeKey", "", "Path to node key (will be generated if it doesn't exist)")
|
2020-08-16 08:05:58 -07:00
|
|
|
|
|
|
|
ethRPC = flag.String("ethRPC", "", "Ethereum RPC URL")
|
|
|
|
ethContract = flag.String("ethContract", "", "Ethereum bridge contract address")
|
|
|
|
ethConfirmations = flag.Uint64("ethConfirmations", 5, "Ethereum confirmation count requirement")
|
|
|
|
|
2020-08-02 14:12:58 -07:00
|
|
|
logLevel = flag.String("loglevel", "info", "Logging level (debug, info, warn, error, dpanic, panic, fatal)")
|
|
|
|
)
|
|
|
|
|
|
|
|
func main() {
|
|
|
|
flag.Parse()
|
|
|
|
|
|
|
|
// Set up logging. The go-log zap wrapper that libp2p uses is compatible with our
|
|
|
|
// usage of zap in supervisor, which is nice.
|
|
|
|
lvl, err := ipfslog.LevelFromString(*logLevel)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Println("Invalid log level")
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
2020-08-03 13:33:35 -07:00
|
|
|
// FIXME: add hostname to root logger for cleaner console output in multi-node development.
|
|
|
|
// The proper way is to change the output format to include the hostname.
|
|
|
|
hostname, err := os.Hostname()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2020-08-02 14:12:58 -07:00
|
|
|
// Our root logger.
|
2020-08-03 13:33:35 -07:00
|
|
|
logger := ipfslog.Logger(fmt.Sprintf("%s-%s", "wormhole", hostname))
|
2020-08-02 14:12:58 -07:00
|
|
|
|
|
|
|
// Override the default go-log config, which uses a magic environment variable.
|
|
|
|
ipfslog.SetAllLoggers(lvl)
|
|
|
|
|
|
|
|
// Mute chatty subsystems.
|
|
|
|
ipfslog.SetLogLevel("swarm2", "error") // connection errors
|
|
|
|
|
2020-08-03 13:33:35 -07:00
|
|
|
// Verify flags
|
|
|
|
if *nodeKeyPath == "" {
|
|
|
|
logger.Fatal("Please specify -nodeKey")
|
|
|
|
}
|
2020-08-16 08:05:58 -07:00
|
|
|
if *ethRPC == "" {
|
|
|
|
logger.Fatal("Please specify -ethRPC")
|
|
|
|
}
|
|
|
|
|
|
|
|
ethContractAddr := eth_common.HexToAddress(*ethContract)
|
2020-08-03 13:33:35 -07:00
|
|
|
|
2020-08-02 14:12:58 -07:00
|
|
|
// Node's main lifecycle context.
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
|
2020-08-16 08:05:58 -07:00
|
|
|
// Ethereum lock event channel
|
|
|
|
ec := make(chan *common.ChainLock)
|
|
|
|
|
2020-08-02 14:12:58 -07:00
|
|
|
// Run supervisor.
|
|
|
|
supervisor.New(ctx, logger.Desugar(), func(ctx context.Context) error {
|
|
|
|
if err := supervisor.Run(ctx, "p2p", p2p); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-08-16 08:05:58 -07:00
|
|
|
watcher := ethereum.NewEthBridgeWatcher(
|
|
|
|
*ethRPC, ethContractAddr, *ethConfirmations, ec)
|
|
|
|
|
|
|
|
if err := supervisor.Run(ctx, "eth", watcher.Run); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-08-02 14:12:58 -07:00
|
|
|
supervisor.Signal(ctx, supervisor.SignalHealthy)
|
|
|
|
logger.Info("Created services")
|
|
|
|
|
|
|
|
select {}
|
2020-08-16 06:02:11 -07:00
|
|
|
}, supervisor.WithPropagatePanic)
|
|
|
|
// TODO(leo): only propagate panics in debug mode. We currently need this to properly reset p2p
|
|
|
|
// (it leaks its socket and we need to restart the process to fix it)
|
2020-08-02 14:12:58 -07:00
|
|
|
|
|
|
|
select {}
|
|
|
|
}
|
|
|
|
|
2020-08-03 13:33:35 -07:00
|
|
|
func getOrCreateNodeKey(logger *zap.Logger, path string) (crypto.PrivKey, error) {
|
|
|
|
b, err := ioutil.ReadFile(path)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
logger.Info("No node key found, generating a new one...", zap.String("path", path))
|
|
|
|
|
|
|
|
// TODO(leo): what does -1 mean?
|
|
|
|
priv, _, err := crypto.GenerateKeyPair(crypto.Ed25519, -1)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
s, err := crypto.MarshalPrivateKey(priv)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ioutil.WriteFile(path, s, 0600)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to write node key: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return priv, nil
|
|
|
|
} else {
|
|
|
|
return nil, fmt.Errorf("failed to read node key: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
priv, err := crypto.UnmarshalPrivateKey(b)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to unmarshal node key: %w", err)
|
|
|
|
}
|
2020-08-02 14:12:58 -07:00
|
|
|
|
2020-08-03 13:33:35 -07:00
|
|
|
logger.Info("Found existing node key", zap.String("path", path))
|
2020-08-02 14:12:58 -07:00
|
|
|
|
2020-08-03 13:33:35 -07:00
|
|
|
return priv, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: this hardcodes the private key if we're guardian-0.
|
|
|
|
// Proper fix is to add a debug mode and fetch the remote peer ID,
|
|
|
|
// or add a special bootstrap pod.
|
|
|
|
func bootstrapNodePrivateKeyHack() crypto.PrivKey {
|
|
|
|
hostname, err := os.Hostname()
|
2020-08-02 14:12:58 -07:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2020-08-03 13:33:35 -07:00
|
|
|
if hostname == "guardian-0" {
|
|
|
|
// node ID: 12D3KooWQ1sV2kowPY1iJX1hJcVTysZjKv3sfULTGwhdpUGGZ1VF
|
|
|
|
b, err := base64.StdEncoding.DecodeString("CAESQGlv6OJOMXrZZVTCC0cgCv7goXr6QaSVMZIndOIXKNh80vYnG+EutVlZK20Nx9cLkUG5ymKB\n88LXi/vPBwP8zfY=")
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
priv, err := crypto.UnmarshalPrivateKey(b)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return priv
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func p2p(ctx context.Context) error {
|
|
|
|
logger := supervisor.Logger(ctx)
|
|
|
|
|
|
|
|
priv := bootstrapNodePrivateKeyHack()
|
|
|
|
|
|
|
|
var err error
|
|
|
|
if priv == nil {
|
|
|
|
priv, err = getOrCreateNodeKey(logger, *nodeKeyPath)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
logger.Info("HACK: loaded hardcoded guardian-0 node key")
|
|
|
|
}
|
|
|
|
|
2020-08-02 14:12:58 -07:00
|
|
|
var idht *dht.IpfsDHT
|
|
|
|
|
|
|
|
h, err := libp2p.New(ctx,
|
|
|
|
// Use the keypair we generated
|
|
|
|
libp2p.Identity(priv),
|
|
|
|
// Multiple listen addresses
|
|
|
|
libp2p.ListenAddrStrings(
|
|
|
|
// Listen on QUIC only.
|
|
|
|
// TODO(leo): listen on ipv6
|
|
|
|
// TODO(leo): is this more or less stable than using both TCP and QUIC transports?
|
|
|
|
// https://github.com/libp2p/go-libp2p/issues/688
|
|
|
|
fmt.Sprintf("/ip4/0.0.0.0/udp/%d/quic", *p2pPort),
|
|
|
|
),
|
|
|
|
|
|
|
|
// Enable TLS security only.
|
|
|
|
libp2p.Security(libp2ptls.ID, libp2ptls.New),
|
|
|
|
|
|
|
|
// Enable QUIC transports.
|
|
|
|
libp2p.Transport(libp2pquic.NewTransport),
|
|
|
|
|
|
|
|
// Enable TCP so we can connect to bootstrap nodes.
|
|
|
|
// (can be disabled if we bootstrap our own network)
|
|
|
|
libp2p.DefaultTransports,
|
|
|
|
|
|
|
|
// Let's prevent our peer from having too many
|
|
|
|
// connections by attaching a connection manager.
|
|
|
|
libp2p.ConnectionManager(connmgr.NewConnManager(
|
|
|
|
100, // Lowwater
|
|
|
|
400, // HighWater,
|
|
|
|
time.Minute, // GracePeriod
|
|
|
|
)),
|
|
|
|
|
|
|
|
// Let this host use the DHT to find other hosts
|
|
|
|
libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) {
|
|
|
|
// TODO(leo): Persistent data store (i.e. address book)
|
|
|
|
idht, err = dht.New(ctx, h, dht.Mode(dht.ModeServer),
|
|
|
|
// TODO(leo): This intentionally makes us incompatible with the global IPFS DHT
|
|
|
|
dht.ProtocolPrefix(protocol.ID("/"+*p2pNetworkID)),
|
|
|
|
)
|
|
|
|
return idht, err
|
|
|
|
}),
|
|
|
|
)
|
2020-08-16 06:02:11 -07:00
|
|
|
defer func() {
|
|
|
|
fmt.Printf("h is %+v", h)
|
|
|
|
// FIXME: why can this be nil? We need to close the host to free the socket because apparently,
|
|
|
|
// closing the context is not enough, but sometimes h is nil when the function runs.
|
|
|
|
if h != nil {
|
|
|
|
h.Close()
|
|
|
|
}
|
|
|
|
}()
|
2020-08-02 14:12:58 -07:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Info("Connecting to bootstrap peers")
|
|
|
|
// TODO(leo): use our own bootstrap peers rather than the IPFS ones so we have a dedicated network
|
|
|
|
//for _, addr := range dht.DefaultBootstrapPeers {
|
|
|
|
// pi, _ := peer.AddrInfoFromP2pAddr(addr)
|
|
|
|
// // We ignore errors as some bootstrap peers may be down and that is fine.
|
|
|
|
// _ = h.Connect(ctx, *pi)
|
|
|
|
//}
|
|
|
|
|
|
|
|
// Add our own bootstrap nodes
|
2020-08-16 06:02:11 -07:00
|
|
|
|
|
|
|
// Count number of successful connection attempts. If we fail to connect to every bootstrap peer, kill
|
|
|
|
// the service and have supervisor retry it.
|
|
|
|
successes := 0
|
|
|
|
|
2020-08-02 14:12:58 -07:00
|
|
|
for _, addr := range strings.Split(*p2pBootstrap, ",") {
|
|
|
|
if addr == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
ma, err := multiaddr.NewMultiaddr(addr)
|
|
|
|
if err != nil {
|
|
|
|
logger.Error("Invalid bootstrap address", zap.String("peer", addr), zap.Error(err))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
pi, err := peer.AddrInfoFromP2pAddr(ma)
|
|
|
|
if err != nil {
|
|
|
|
logger.Error("Invalid bootstrap address", zap.String("peer", addr), zap.Error(err))
|
|
|
|
continue
|
|
|
|
}
|
2020-08-16 06:02:11 -07:00
|
|
|
|
2020-08-02 14:12:58 -07:00
|
|
|
if err = h.Connect(ctx, *pi); err != nil {
|
2020-08-16 06:02:11 -07:00
|
|
|
if err != swarm.ErrDialToSelf {
|
|
|
|
logger.Error("Failed to connect to bootstrap peer", zap.String("peer", addr), zap.Error(err))
|
|
|
|
} else {
|
|
|
|
// Dialing self, carrying on... (we're a bootstrap peer)
|
|
|
|
logger.Info("Tried to connect to ourselves - we're a bootstrap peer")
|
|
|
|
successes += 1
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
successes += 1
|
2020-08-02 14:12:58 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-16 06:02:11 -07:00
|
|
|
if successes == 0 {
|
2020-08-16 08:05:58 -07:00
|
|
|
h.Close()
|
2020-08-16 06:02:11 -07:00
|
|
|
return fmt.Errorf("Failed to connect to any bootstrap peer")
|
|
|
|
} else {
|
|
|
|
logger.Info("Connected to bootstrap peers", zap.Int("num", successes))
|
|
|
|
}
|
|
|
|
|
2020-08-03 13:33:35 -07:00
|
|
|
// TODO(leo): crash if we couldn't connect to any bootstrap peers?
|
|
|
|
// (i.e. can we get stuck here if the other nodes have yet to come up?)
|
|
|
|
|
2020-08-02 14:12:58 -07:00
|
|
|
topic := fmt.Sprintf("%s/%s", *p2pNetworkID, "broadcast")
|
|
|
|
|
|
|
|
logger.Info("Subscribing pubsub topic", zap.String("topic", topic))
|
|
|
|
ps, err := pubsub.NewGossipSub(ctx, h)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2020-08-16 08:05:58 -07:00
|
|
|
th, err := ps.Join(topic)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to join topic: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
sub, err := th.Subscribe()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to subscribe topic: %w", err)
|
|
|
|
}
|
|
|
|
|
2020-08-02 14:12:58 -07:00
|
|
|
logger.Info("Node has been started", zap.String("peer_id", h.ID().String()),
|
|
|
|
zap.String("addrs", fmt.Sprintf("%v", h.Addrs())))
|
|
|
|
|
2020-08-16 08:05:58 -07:00
|
|
|
hostname, err := os.Hostname()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
ctr := int64(0)
|
|
|
|
|
|
|
|
for {
|
|
|
|
msg := gossipv1.Heartbeat{
|
|
|
|
Hostname: hostname,
|
|
|
|
Index: ctr,
|
|
|
|
}
|
|
|
|
|
|
|
|
b, err := proto.Marshal(&msg)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = th.Publish(ctx, b)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to publish message", zap.Error(err))
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(15 * time.Second)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-08-02 14:12:58 -07:00
|
|
|
for {
|
2020-08-16 08:05:58 -07:00
|
|
|
msg, err := sub.Next(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to receive pubsub message: %w", err)
|
2020-08-02 14:12:58 -07:00
|
|
|
}
|
|
|
|
|
2020-08-16 08:05:58 -07:00
|
|
|
logger.Info("received message", zap.String("data", string(msg.Data)), zap.String("from", msg.GetFrom().String()))
|
|
|
|
}
|
2020-08-02 14:12:58 -07:00
|
|
|
}
|