Add persistent keys and bootstrap peer

This connects all nodes to a full mesh.
This commit is contained in:
Leo 2020-08-03 22:33:35 +02:00 committed by Leopold Schabel
parent b8c2efdf86
commit a7d17cf5aa
2 changed files with 104 additions and 6 deletions

View File

@ -2,8 +2,10 @@ package main
import (
"context"
"encoding/base64"
"flag"
"fmt"
"io/ioutil"
"os"
"strings"
"time"
@ -31,6 +33,7 @@ var (
p2pNetworkID = flag.String("network", "/wormhole/dev", "P2P network identifier")
p2pPort = flag.Uint("port", 8999, "P2P UDP listener port")
p2pBootstrap = flag.String("bootstrap", "", "P2P bootstrap peers (comma-separated)")
nodeKeyPath = flag.String("nodeKey", "", "Path to node key (will be generated if it doesn't exist)")
logLevel = flag.String("loglevel", "info", "Logging level (debug, info, warn, error, dpanic, panic, fatal)")
)
@ -45,8 +48,15 @@ func main() {
os.Exit(1)
}
// FIXME: add hostname to root logger for cleaner console output in multi-node development.
// The proper way is to change the output format to include the hostname.
hostname, err := os.Hostname()
if err != nil {
panic(err)
}
// Our root logger.
logger := ipfslog.Logger("wormhole")
logger := ipfslog.Logger(fmt.Sprintf("%s-%s", "wormhole", hostname))
// Override the default go-log config, which uses a magic environment variable.
ipfslog.SetAllLoggers(lvl)
@ -54,6 +64,11 @@ func main() {
// Mute chatty subsystems.
ipfslog.SetLogLevel("swarm2", "error") // connection errors
// Verify flags
if *nodeKeyPath == "" {
logger.Fatal("Please specify -nodeKey")
}
// Node's main lifecycle context.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -73,14 +88,84 @@ func main() {
select {}
}
func getOrCreateNodeKey(logger *zap.Logger, path string) (crypto.PrivKey, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
logger.Info("No node key found, generating a new one...", zap.String("path", path))
// TODO(leo): what does -1 mean?
priv, _, err := crypto.GenerateKeyPair(crypto.Ed25519, -1)
if err != nil {
panic(err)
}
s, err := crypto.MarshalPrivateKey(priv)
if err != nil {
panic(err)
}
err = ioutil.WriteFile(path, s, 0600)
if err != nil {
return nil, fmt.Errorf("failed to write node key: %w", err)
}
return priv, nil
} else {
return nil, fmt.Errorf("failed to read node key: %w", err)
}
}
priv, err := crypto.UnmarshalPrivateKey(b)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal node key: %w", err)
}
logger.Info("Found existing node key", zap.String("path", path))
return priv, nil
}
// FIXME: this hardcodes the private key if we're guardian-0.
// Proper fix is to add a debug mode and fetch the remote peer ID,
// or add a special bootstrap pod.
func bootstrapNodePrivateKeyHack() crypto.PrivKey {
hostname, err := os.Hostname()
if err != nil {
panic(err)
}
if hostname == "guardian-0" {
// node ID: 12D3KooWQ1sV2kowPY1iJX1hJcVTysZjKv3sfULTGwhdpUGGZ1VF
b, err := base64.StdEncoding.DecodeString("CAESQGlv6OJOMXrZZVTCC0cgCv7goXr6QaSVMZIndOIXKNh80vYnG+EutVlZK20Nx9cLkUG5ymKB\n88LXi/vPBwP8zfY=")
if err != nil {
panic(err)
}
priv, err := crypto.UnmarshalPrivateKey(b)
if err != nil {
panic(err)
}
return priv
}
return nil
}
func p2p(ctx context.Context) error {
logger := supervisor.Logger(ctx)
// TODO(leo): persist the key
priv, _, err := crypto.GenerateKeyPair(crypto.Ed25519, -1)
priv := bootstrapNodePrivateKeyHack()
if err != nil {
panic(err)
var err error
if priv == nil {
priv, err = getOrCreateNodeKey(logger, *nodeKeyPath)
if err != nil {
panic(err)
}
} else {
logger.Info("HACK: loaded hardcoded guardian-0 node key")
}
var idht *dht.IpfsDHT
@ -159,6 +244,9 @@ func p2p(ctx context.Context) error {
}
}
// TODO(leo): crash if we couldn't connect to any bootstrap peers?
// (i.e. can we get stuck here if the other nodes have yet to come up?)
topic := fmt.Sprintf("%s/%s", *p2pNetworkID, "broadcast")
logger.Info("Subscribing pubsub topic", zap.String("topic", topic))

View File

@ -23,7 +23,11 @@ spec:
matchLabels:
app: guardian
serviceName: guardian
replicas: 20
replicas: 10
updateStrategy:
# The StatefulSet rolling update strategy is rather dumb, and updates one pod after another.
# If we want blue-green deployments, we should use a Deployment instead.
type: RollingUpdate
template:
metadata:
labels:
@ -33,6 +37,12 @@ spec:
containers:
- name: guardiand
image: guardiand-image
command:
- /guardiand
- -nodeKey
- /data/node.key
- -bootstrap
- /dns4/guardian-0.guardian/udp/8999/quic/p2p/12D3KooWQ1sV2kowPY1iJX1hJcVTysZjKv3sfULTGwhdpUGGZ1VF
ports:
- containerPort: 8999
name: p2p