Add persistent keys and bootstrap peer
This connects all nodes to a full mesh.
This commit is contained in:
parent
b8c2efdf86
commit
a7d17cf5aa
|
@ -2,8 +2,10 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -31,6 +33,7 @@ var (
|
||||||
p2pNetworkID = flag.String("network", "/wormhole/dev", "P2P network identifier")
|
p2pNetworkID = flag.String("network", "/wormhole/dev", "P2P network identifier")
|
||||||
p2pPort = flag.Uint("port", 8999, "P2P UDP listener port")
|
p2pPort = flag.Uint("port", 8999, "P2P UDP listener port")
|
||||||
p2pBootstrap = flag.String("bootstrap", "", "P2P bootstrap peers (comma-separated)")
|
p2pBootstrap = flag.String("bootstrap", "", "P2P bootstrap peers (comma-separated)")
|
||||||
|
nodeKeyPath = flag.String("nodeKey", "", "Path to node key (will be generated if it doesn't exist)")
|
||||||
logLevel = flag.String("loglevel", "info", "Logging level (debug, info, warn, error, dpanic, panic, fatal)")
|
logLevel = flag.String("loglevel", "info", "Logging level (debug, info, warn, error, dpanic, panic, fatal)")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -45,8 +48,15 @@ func main() {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: add hostname to root logger for cleaner console output in multi-node development.
|
||||||
|
// The proper way is to change the output format to include the hostname.
|
||||||
|
hostname, err := os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Our root logger.
|
// Our root logger.
|
||||||
logger := ipfslog.Logger("wormhole")
|
logger := ipfslog.Logger(fmt.Sprintf("%s-%s", "wormhole", hostname))
|
||||||
|
|
||||||
// Override the default go-log config, which uses a magic environment variable.
|
// Override the default go-log config, which uses a magic environment variable.
|
||||||
ipfslog.SetAllLoggers(lvl)
|
ipfslog.SetAllLoggers(lvl)
|
||||||
|
@ -54,6 +64,11 @@ func main() {
|
||||||
// Mute chatty subsystems.
|
// Mute chatty subsystems.
|
||||||
ipfslog.SetLogLevel("swarm2", "error") // connection errors
|
ipfslog.SetLogLevel("swarm2", "error") // connection errors
|
||||||
|
|
||||||
|
// Verify flags
|
||||||
|
if *nodeKeyPath == "" {
|
||||||
|
logger.Fatal("Please specify -nodeKey")
|
||||||
|
}
|
||||||
|
|
||||||
// Node's main lifecycle context.
|
// Node's main lifecycle context.
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -73,15 +88,85 @@ func main() {
|
||||||
select {}
|
select {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getOrCreateNodeKey(logger *zap.Logger, path string) (crypto.PrivKey, error) {
|
||||||
|
b, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
logger.Info("No node key found, generating a new one...", zap.String("path", path))
|
||||||
|
|
||||||
|
// TODO(leo): what does -1 mean?
|
||||||
|
priv, _, err := crypto.GenerateKeyPair(crypto.Ed25519, -1)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := crypto.MarshalPrivateKey(priv)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(path, s, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to write node key: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return priv, nil
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("failed to read node key: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
priv, err := crypto.UnmarshalPrivateKey(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unmarshal node key: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info("Found existing node key", zap.String("path", path))
|
||||||
|
|
||||||
|
return priv, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: this hardcodes the private key if we're guardian-0.
|
||||||
|
// Proper fix is to add a debug mode and fetch the remote peer ID,
|
||||||
|
// or add a special bootstrap pod.
|
||||||
|
func bootstrapNodePrivateKeyHack() crypto.PrivKey {
|
||||||
|
hostname, err := os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostname == "guardian-0" {
|
||||||
|
// node ID: 12D3KooWQ1sV2kowPY1iJX1hJcVTysZjKv3sfULTGwhdpUGGZ1VF
|
||||||
|
b, err := base64.StdEncoding.DecodeString("CAESQGlv6OJOMXrZZVTCC0cgCv7goXr6QaSVMZIndOIXKNh80vYnG+EutVlZK20Nx9cLkUG5ymKB\n88LXi/vPBwP8zfY=")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
priv, err := crypto.UnmarshalPrivateKey(b)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return priv
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func p2p(ctx context.Context) error {
|
func p2p(ctx context.Context) error {
|
||||||
logger := supervisor.Logger(ctx)
|
logger := supervisor.Logger(ctx)
|
||||||
|
|
||||||
// TODO(leo): persist the key
|
priv := bootstrapNodePrivateKeyHack()
|
||||||
priv, _, err := crypto.GenerateKeyPair(crypto.Ed25519, -1)
|
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if priv == nil {
|
||||||
|
priv, err = getOrCreateNodeKey(logger, *nodeKeyPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
logger.Info("HACK: loaded hardcoded guardian-0 node key")
|
||||||
|
}
|
||||||
|
|
||||||
var idht *dht.IpfsDHT
|
var idht *dht.IpfsDHT
|
||||||
|
|
||||||
|
@ -159,6 +244,9 @@ func p2p(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(leo): crash if we couldn't connect to any bootstrap peers?
|
||||||
|
// (i.e. can we get stuck here if the other nodes have yet to come up?)
|
||||||
|
|
||||||
topic := fmt.Sprintf("%s/%s", *p2pNetworkID, "broadcast")
|
topic := fmt.Sprintf("%s/%s", *p2pNetworkID, "broadcast")
|
||||||
|
|
||||||
logger.Info("Subscribing pubsub topic", zap.String("topic", topic))
|
logger.Info("Subscribing pubsub topic", zap.String("topic", topic))
|
||||||
|
|
|
@ -23,7 +23,11 @@ spec:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
app: guardian
|
app: guardian
|
||||||
serviceName: guardian
|
serviceName: guardian
|
||||||
replicas: 20
|
replicas: 10
|
||||||
|
updateStrategy:
|
||||||
|
# The StatefulSet rolling update strategy is rather dumb, and updates one pod after another.
|
||||||
|
# If we want blue-green deployments, we should use a Deployment instead.
|
||||||
|
type: RollingUpdate
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
|
@ -33,6 +37,12 @@ spec:
|
||||||
containers:
|
containers:
|
||||||
- name: guardiand
|
- name: guardiand
|
||||||
image: guardiand-image
|
image: guardiand-image
|
||||||
|
command:
|
||||||
|
- /guardiand
|
||||||
|
- -nodeKey
|
||||||
|
- /data/node.key
|
||||||
|
- -bootstrap
|
||||||
|
- /dns4/guardian-0.guardian/udp/8999/quic/p2p/12D3KooWQ1sV2kowPY1iJX1hJcVTysZjKv3sfULTGwhdpUGGZ1VF
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8999
|
- containerPort: 8999
|
||||||
name: p2p
|
name: p2p
|
||||||
|
|
Loading…
Reference in New Issue