2019-10-09 15:32:15 -07:00
|
|
|
package zcash
|
|
|
|
|
|
|
|
import (
|
|
|
|
"log"
|
|
|
|
"net"
|
|
|
|
"os"
|
2020-05-20 16:10:55 -07:00
|
|
|
"runtime"
|
|
|
|
"strconv"
|
2019-10-09 15:32:15 -07:00
|
|
|
"sync"
|
2020-05-21 22:02:32 -07:00
|
|
|
"sync/atomic"
|
2019-10-09 15:32:15 -07:00
|
|
|
"time"
|
|
|
|
|
2020-05-20 16:10:55 -07:00
|
|
|
"github.com/btcsuite/btcd/addrmgr"
|
2019-10-09 15:32:15 -07:00
|
|
|
"github.com/btcsuite/btcd/peer"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
|
2020-05-19 18:45:45 -07:00
|
|
|
"github.com/zcashfoundation/dnsseeder/zcash/network"
|
2019-10-09 15:32:15 -07:00
|
|
|
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
)
|
|
|
|
|
2019-10-12 16:18:25 -07:00
|
|
|
var (
|
|
|
|
ErrRepeatConnection = errors.New("attempted repeat connection to existing peer")
|
2019-10-12 19:00:20 -07:00
|
|
|
ErrNoSuchPeer = errors.New("no record of requested peer")
|
2019-12-30 17:54:38 -08:00
|
|
|
ErrAddressTimeout = errors.New("wait for addresses timed out")
|
2019-10-16 21:41:08 -07:00
|
|
|
ErrBlacklistedPeer = errors.New("peer is blacklisted")
|
2019-10-12 16:18:25 -07:00
|
|
|
)
|
|
|
|
|
2019-10-09 15:32:15 -07:00
|
|
|
var defaultPeerConfig = &peer.Config{
|
2021-05-05 20:07:04 -07:00
|
|
|
UserAgentName: "zfnd-seeder",
|
2022-03-30 07:16:01 -07:00
|
|
|
UserAgentVersion: "0.1.3-alpha.4",
|
2021-05-05 20:07:04 -07:00
|
|
|
ChainParams: nil,
|
|
|
|
Services: 0,
|
|
|
|
TrickleInterval: time.Second * 10,
|
2021-09-23 16:38:37 -07:00
|
|
|
// The protocol version advertised to peers by this DNS seeder.
|
|
|
|
//
|
|
|
|
// If this version is too low, newer peers will disconnect from the DNS seeder,
|
|
|
|
// and it will only be able to talk to outdated peers.
|
|
|
|
//
|
2021-05-05 20:02:53 -07:00
|
|
|
// TODO: fork https://github.com/gtank/btcd/blob/master/peer/peer.go
|
2021-09-23 16:38:37 -07:00
|
|
|
// and set MinAcceptableProtocolVersion based on the most recently activated network upgrade
|
2021-09-29 14:52:27 -07:00
|
|
|
// see ticket #10 for details
|
2022-03-30 07:16:01 -07:00
|
|
|
ProtocolVersion: 170100, // Zcash NU5 mainnet
|
2019-10-09 15:32:15 -07:00
|
|
|
}
|
|
|
|
|
2020-05-21 22:02:32 -07:00
|
|
|
var (
|
2020-05-21 14:25:01 -07:00
|
|
|
// The minimum number of addresses we need to know about to begin serving introductions
|
|
|
|
minimumReadyAddresses = 10
|
2020-05-20 15:08:34 -07:00
|
|
|
|
2020-05-21 14:25:01 -07:00
|
|
|
// The maximum amount of time we will wait for a peer to complete the initial handshake
|
2020-06-08 10:10:09 -07:00
|
|
|
maximumHandshakeWait = 5 * time.Second
|
2020-05-21 14:25:01 -07:00
|
|
|
|
|
|
|
// The timeout for the underlying dial to a peer
|
2020-06-08 10:10:09 -07:00
|
|
|
connectionDialTimeout = 5 * time.Second
|
2020-05-21 14:25:01 -07:00
|
|
|
|
2020-05-22 14:33:54 -07:00
|
|
|
// The amount of time crawler goroutines will wait after the last new incoming address
|
2020-05-21 14:25:01 -07:00
|
|
|
crawlerThreadTimeout = 30 * time.Second
|
2020-05-21 22:02:32 -07:00
|
|
|
|
|
|
|
// The number of goroutines to spawn for a crawl request
|
2020-05-22 14:33:54 -07:00
|
|
|
crawlerGoroutineCount = runtime.NumCPU() * 32
|
|
|
|
|
|
|
|
// The amount of space we allocate to keep things moving smoothly.
|
2020-06-08 10:10:09 -07:00
|
|
|
incomingAddressBufferSize = 4096
|
2020-06-01 10:33:18 -07:00
|
|
|
|
|
|
|
// The amount of time a peer can spend on the blacklist before we forget about it entirely.
|
|
|
|
blacklistDropTime = 3 * 24 * time.Hour
|
2020-05-21 14:25:01 -07:00
|
|
|
)
|
2020-05-20 15:08:34 -07:00
|
|
|
|
|
|
|
// Seeder contains all of the state and configuration needed to request addresses from Zcash peers and present them to a DNS provider.
|
2019-10-09 15:32:15 -07:00
|
|
|
type Seeder struct {
|
|
|
|
peer *peer.Peer
|
|
|
|
config *peer.Config
|
2019-10-12 09:42:16 -07:00
|
|
|
logger *log.Logger
|
2019-10-09 15:32:15 -07:00
|
|
|
|
2019-10-16 19:18:30 -07:00
|
|
|
// Peer list handling
|
|
|
|
peerState sync.RWMutex
|
2019-10-12 16:18:25 -07:00
|
|
|
handshakeSignals *sync.Map
|
2019-10-17 19:47:40 -07:00
|
|
|
pendingPeers *PeerMap
|
|
|
|
livePeers *PeerMap
|
2019-10-09 15:32:15 -07:00
|
|
|
|
2019-10-17 19:47:40 -07:00
|
|
|
// The set of known addresses
|
|
|
|
addrBook *AddressBook
|
2020-05-20 16:10:55 -07:00
|
|
|
|
|
|
|
// The queue of incoming potential addresses
|
|
|
|
addrQueue chan *wire.NetAddress
|
2019-10-09 15:32:15 -07:00
|
|
|
}
|
|
|
|
|
2019-10-12 09:42:16 -07:00
|
|
|
func NewSeeder(network network.Network) (*Seeder, error) {
|
|
|
|
config, err := newSeederPeerConfig(network, defaultPeerConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "could not construct seeder")
|
|
|
|
}
|
|
|
|
|
2020-05-23 13:55:47 -07:00
|
|
|
sink, _ := os.OpenFile(os.DevNull, os.O_WRONLY, 0666)
|
|
|
|
logger := log.New(sink, "zcash_seeder: ", log.Ldate|log.Ltime|log.Lshortfile|log.LUTC)
|
|
|
|
// logger := log.New(os.Stdout, "zcash_seeder: ", log.Ldate|log.Ltime|log.Lshortfile|log.LUTC)
|
2019-10-12 09:42:16 -07:00
|
|
|
|
|
|
|
newSeeder := Seeder{
|
|
|
|
config: config,
|
|
|
|
logger: logger,
|
2019-10-12 16:18:25 -07:00
|
|
|
handshakeSignals: new(sync.Map),
|
2019-10-17 19:47:40 -07:00
|
|
|
pendingPeers: NewPeerMap(),
|
|
|
|
livePeers: NewPeerMap(),
|
2020-05-20 15:08:34 -07:00
|
|
|
addrBook: NewAddressBook(),
|
2020-05-22 14:33:54 -07:00
|
|
|
addrQueue: make(chan *wire.NetAddress, incomingAddressBufferSize),
|
2019-10-12 09:42:16 -07:00
|
|
|
}
|
|
|
|
|
2021-11-22 05:44:01 -08:00
|
|
|
// The seeder only acts on verack, addr and addrv2 messages.
|
|
|
|
// verack is used to keep track of peers, while addr and addrv2 receives
|
|
|
|
// new addresses which are requested by the seeder periodically
|
|
|
|
// sending getaddr requests to peers (see `RequestAddresses`).
|
2019-10-12 09:42:16 -07:00
|
|
|
newSeeder.config.Listeners.OnVerAck = newSeeder.onVerAck
|
2019-10-12 19:00:20 -07:00
|
|
|
newSeeder.config.Listeners.OnAddr = newSeeder.onAddr
|
2021-11-22 05:44:01 -08:00
|
|
|
// Note that per ZIP-155 we should not receive addrv2 messages from pre-170017
|
|
|
|
// peers, but we don't explicitly check for that.
|
|
|
|
newSeeder.config.Listeners.OnAddrV2 = newSeeder.onAddrV2
|
2019-10-12 09:42:16 -07:00
|
|
|
|
|
|
|
return &newSeeder, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func newTestSeeder(network network.Network) (*Seeder, error) {
|
|
|
|
config, err := newSeederPeerConfig(network, defaultPeerConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "could not construct seeder")
|
|
|
|
}
|
|
|
|
|
2019-12-30 17:54:38 -08:00
|
|
|
// sink, _ := os.OpenFile(os.DevNull, os.O_WRONLY, 0666)
|
|
|
|
// logger := log.New(sink, "zcash_seeder: ", log.Ldate|log.Ltime|log.Lshortfile|log.LUTC)
|
|
|
|
logger := log.New(os.Stdout, "zcash_seeder: ", log.Ldate|log.Ltime|log.Lshortfile|log.LUTC)
|
2019-10-12 09:42:16 -07:00
|
|
|
|
|
|
|
// Allows connections to self for easy mocking
|
|
|
|
config.AllowSelfConns = true
|
|
|
|
|
|
|
|
newSeeder := Seeder{
|
|
|
|
config: config,
|
|
|
|
logger: logger,
|
2019-10-12 16:18:25 -07:00
|
|
|
handshakeSignals: new(sync.Map),
|
2019-10-17 19:47:40 -07:00
|
|
|
pendingPeers: NewPeerMap(),
|
|
|
|
livePeers: NewPeerMap(),
|
2020-05-20 15:08:34 -07:00
|
|
|
addrBook: NewAddressBook(),
|
2020-05-22 14:33:54 -07:00
|
|
|
addrQueue: make(chan *wire.NetAddress, incomingAddressBufferSize),
|
2019-10-12 09:42:16 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
newSeeder.config.Listeners.OnVerAck = newSeeder.onVerAck
|
2019-10-12 19:00:20 -07:00
|
|
|
newSeeder.config.Listeners.OnAddr = newSeeder.onAddr
|
2021-11-22 05:44:01 -08:00
|
|
|
newSeeder.config.Listeners.OnAddrV2 = newSeeder.onAddrV2
|
2019-10-12 09:42:16 -07:00
|
|
|
|
|
|
|
return &newSeeder, nil
|
|
|
|
}
|
|
|
|
|
2019-10-09 15:32:15 -07:00
|
|
|
func newSeederPeerConfig(magic network.Network, template *peer.Config) (*peer.Config, error) {
|
|
|
|
var newPeerConfig peer.Config
|
|
|
|
|
|
|
|
// Load the default values
|
|
|
|
if template != nil {
|
|
|
|
newPeerConfig = *template
|
|
|
|
}
|
|
|
|
|
|
|
|
params, err := network.GetNetworkParams(magic)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "couldn't construct peer config")
|
|
|
|
}
|
|
|
|
newPeerConfig.ChainParams = params
|
|
|
|
|
|
|
|
return &newPeerConfig, nil
|
|
|
|
}
|
|
|
|
|
2019-10-16 18:05:42 -07:00
|
|
|
// GetNetworkDefaultPort returns the default port of the network this seeder is configured for.
|
|
|
|
func (s *Seeder) GetNetworkDefaultPort() string {
|
|
|
|
return s.config.ChainParams.DefaultPort
|
|
|
|
}
|
|
|
|
|
2019-12-30 17:54:38 -08:00
|
|
|
// ConnectOnDefaultPort attempts to connect to a peer on the default port at the
|
2019-10-16 18:05:42 -07:00
|
|
|
// specified address. It returns an error if it can't complete handshake with
|
|
|
|
// the peer. Otherwise it returns nil and adds the peer to the list of live
|
|
|
|
// connections and known-good addresses.
|
|
|
|
func (s *Seeder) ConnectOnDefaultPort(addr string) error {
|
2020-05-29 13:50:25 -07:00
|
|
|
_, err := s.Connect(addr, s.config.ChainParams.DefaultPort)
|
|
|
|
return err
|
2019-10-16 18:05:42 -07:00
|
|
|
}
|
2019-10-09 15:32:15 -07:00
|
|
|
|
2020-06-08 13:48:53 -07:00
|
|
|
// Connect attempts to connect to a peer at the given address and port. It will
|
|
|
|
// not connect to addresses known to be unusable. It returns a handle to the peer
|
|
|
|
// connection if the connection is successful or nil and an error if it fails.
|
2020-05-29 13:50:25 -07:00
|
|
|
func (s *Seeder) Connect(addr, port string) (*peer.Peer, error) {
|
2020-06-08 13:48:53 -07:00
|
|
|
host := net.JoinHostPort(addr, port)
|
|
|
|
p, err := peer.NewOutboundPeer(s.config, host)
|
2019-10-09 15:32:15 -07:00
|
|
|
if err != nil {
|
2020-05-29 13:50:25 -07:00
|
|
|
return nil, errors.Wrap(err, "constructing outbound peer")
|
2019-10-09 15:32:15 -07:00
|
|
|
}
|
|
|
|
|
2020-05-20 15:08:34 -07:00
|
|
|
pk := peerKeyFromPeer(p)
|
|
|
|
|
|
|
|
if s.addrBook.IsBlacklisted(pk) {
|
2020-05-29 13:50:25 -07:00
|
|
|
return nil, ErrBlacklistedPeer
|
2019-10-16 21:41:08 -07:00
|
|
|
}
|
|
|
|
|
2020-06-08 13:48:53 -07:00
|
|
|
return s.connect(p)
|
|
|
|
}
|
|
|
|
|
|
|
|
// connect attempts to connect to a peer at the given address and port. It
|
|
|
|
// returns a handle to the peer connection if the connection is successful
|
|
|
|
// or nil and an error if it fails.
|
|
|
|
func (s *Seeder) connect(p *peer.Peer) (*peer.Peer, error) {
|
|
|
|
// PeerKeys are used in our internal maps to keep signals and responses from specific peers straight.
|
|
|
|
pk := peerKeyFromPeer(p)
|
|
|
|
|
2020-05-20 15:08:34 -07:00
|
|
|
_, alreadyPending := s.pendingPeers.Load(pk)
|
|
|
|
_, alreadyHandshaking := s.handshakeSignals.Load(pk)
|
|
|
|
_, alreadyLive := s.livePeers.Load(pk)
|
2019-10-12 16:18:25 -07:00
|
|
|
|
2019-10-16 19:18:30 -07:00
|
|
|
if alreadyPending {
|
|
|
|
s.logger.Printf("Peer is already pending: %s", p.Addr())
|
2020-05-29 13:50:25 -07:00
|
|
|
return nil, ErrRepeatConnection
|
2019-10-16 19:18:30 -07:00
|
|
|
}
|
2020-05-20 15:08:34 -07:00
|
|
|
s.pendingPeers.Store(pk, p)
|
2020-06-08 13:48:53 -07:00
|
|
|
defer s.pendingPeers.Delete(pk)
|
2019-10-16 19:18:30 -07:00
|
|
|
|
|
|
|
if alreadyHandshaking {
|
|
|
|
s.logger.Printf("Peer is already handshaking: %s", p.Addr())
|
2020-05-29 13:50:25 -07:00
|
|
|
return nil, ErrRepeatConnection
|
2019-10-16 19:18:30 -07:00
|
|
|
}
|
2020-05-20 15:08:34 -07:00
|
|
|
s.handshakeSignals.Store(pk, make(chan struct{}, 1))
|
2020-06-08 13:48:53 -07:00
|
|
|
defer s.handshakeSignals.Delete(pk)
|
2019-10-16 19:18:30 -07:00
|
|
|
|
|
|
|
if alreadyLive {
|
|
|
|
s.logger.Printf("Peer is already live: %s", p.Addr())
|
2020-05-29 13:50:25 -07:00
|
|
|
return nil, ErrRepeatConnection
|
2019-10-12 16:18:25 -07:00
|
|
|
}
|
|
|
|
|
2020-05-21 14:25:01 -07:00
|
|
|
conn, err := net.DialTimeout("tcp", p.Addr(), connectionDialTimeout)
|
2019-10-09 15:32:15 -07:00
|
|
|
if err != nil {
|
2020-05-29 13:50:25 -07:00
|
|
|
return nil, errors.Wrap(err, "dialing peer address")
|
2019-10-09 15:32:15 -07:00
|
|
|
}
|
|
|
|
|
2019-10-12 09:42:16 -07:00
|
|
|
// Begin connection negotiation.
|
2020-05-22 14:33:54 -07:00
|
|
|
s.logger.Printf("Handshake initated with peer %s", p.Addr())
|
2019-10-09 15:32:15 -07:00
|
|
|
p.AssociateConnection(conn)
|
|
|
|
|
2020-06-08 13:48:53 -07:00
|
|
|
// Wait for it
|
|
|
|
if handshakeChan, ok := s.handshakeSignals.Load(pk); ok {
|
|
|
|
select {
|
|
|
|
case <-handshakeChan.(chan struct{}):
|
|
|
|
s.logger.Printf("Handshake completed with peer %s", p.Addr())
|
|
|
|
return p, nil
|
|
|
|
case <-time.After(maximumHandshakeWait):
|
|
|
|
p.Disconnect()
|
|
|
|
p.WaitForDisconnect()
|
|
|
|
return nil, errors.New("peer handshake started but timed out")
|
|
|
|
}
|
2019-10-09 15:32:15 -07:00
|
|
|
}
|
|
|
|
|
2020-06-08 13:48:53 -07:00
|
|
|
return nil, errors.New("peer was not in handshake channel")
|
2019-10-09 15:32:15 -07:00
|
|
|
}
|
|
|
|
|
2019-10-16 18:05:42 -07:00
|
|
|
// GetPeer returns a live peer identified by "host:port" string, or an error if
|
|
|
|
// we aren't connected to that peer.
|
2019-10-17 19:47:40 -07:00
|
|
|
func (s *Seeder) GetPeer(addr PeerKey) (*peer.Peer, error) {
|
2019-10-16 18:05:42 -07:00
|
|
|
p, ok := s.livePeers.Load(addr)
|
2019-10-09 15:32:15 -07:00
|
|
|
|
2019-10-12 16:18:25 -07:00
|
|
|
if ok {
|
2019-12-30 17:54:38 -08:00
|
|
|
return p, nil
|
2019-10-09 15:32:15 -07:00
|
|
|
}
|
|
|
|
|
2019-10-12 19:00:20 -07:00
|
|
|
return nil, ErrNoSuchPeer
|
|
|
|
}
|
|
|
|
|
2019-10-16 18:05:42 -07:00
|
|
|
// DisconnectPeer disconnects from a live peer identified by "host:port"
|
|
|
|
// string. It returns an error if we aren't connected to that peer.
|
2019-10-17 19:47:40 -07:00
|
|
|
func (s *Seeder) DisconnectPeer(addr PeerKey) error {
|
2019-10-16 18:05:42 -07:00
|
|
|
p, ok := s.livePeers.Load(addr)
|
2019-10-12 19:00:20 -07:00
|
|
|
|
|
|
|
if !ok {
|
|
|
|
return ErrNoSuchPeer
|
|
|
|
}
|
|
|
|
|
2019-10-17 19:47:40 -07:00
|
|
|
s.logger.Printf("Disconnecting from peer %s", p.Addr())
|
|
|
|
p.Disconnect()
|
|
|
|
p.WaitForDisconnect()
|
2019-10-16 18:05:42 -07:00
|
|
|
s.livePeers.Delete(addr)
|
2019-10-12 19:00:20 -07:00
|
|
|
return nil
|
2019-10-12 09:42:16 -07:00
|
|
|
}
|
|
|
|
|
2020-05-20 15:08:34 -07:00
|
|
|
// DisconnectAndBlacklist disconnects from a live peer identified by
|
2019-10-16 19:18:30 -07:00
|
|
|
// "host:port" string. It returns an error if we aren't connected to that peer.
|
2020-05-20 15:08:34 -07:00
|
|
|
// It furthermore removes this peer from the list of known good
|
|
|
|
// addresses and adds them to a blacklist. to prevent future connections.
|
|
|
|
func (s *Seeder) DisconnectAndBlacklist(addr PeerKey) error {
|
2019-10-16 19:18:30 -07:00
|
|
|
p, ok := s.livePeers.Load(addr)
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
return ErrNoSuchPeer
|
|
|
|
}
|
|
|
|
|
2019-10-17 19:47:40 -07:00
|
|
|
s.logger.Printf("Disconnecting from peer %s", addr)
|
|
|
|
p.Disconnect()
|
|
|
|
p.WaitForDisconnect()
|
2019-10-16 19:18:30 -07:00
|
|
|
|
2019-10-17 19:47:40 -07:00
|
|
|
// Remove from live peer set
|
2019-10-16 19:18:30 -07:00
|
|
|
s.livePeers.Delete(addr)
|
|
|
|
|
2019-10-17 19:47:40 -07:00
|
|
|
// Never connect to them again
|
|
|
|
s.logger.Printf("Blacklisting peer %s", addr)
|
|
|
|
s.addrBook.Blacklist(addr)
|
2019-10-16 19:18:30 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-16 18:05:42 -07:00
|
|
|
// DisconnectAllPeers terminates the connections to all live and pending peers.
|
2019-10-12 09:42:16 -07:00
|
|
|
func (s *Seeder) DisconnectAllPeers() {
|
2019-10-17 19:47:40 -07:00
|
|
|
s.pendingPeers.Range(func(key PeerKey, p *peer.Peer) bool {
|
2019-10-12 16:18:25 -07:00
|
|
|
p.Disconnect()
|
|
|
|
p.WaitForDisconnect()
|
|
|
|
s.pendingPeers.Delete(key)
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
2019-10-17 19:47:40 -07:00
|
|
|
s.livePeers.Range(func(key PeerKey, p *peer.Peer) bool {
|
2019-12-30 17:54:38 -08:00
|
|
|
s.DisconnectPeer(key)
|
2019-10-12 19:00:20 -07:00
|
|
|
return true
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-05-20 16:10:55 -07:00
|
|
|
// RequestAddresses sends a request for more addresses to every peer we're connected to,
|
|
|
|
// then checks to make sure the addresses that come back are usable before adding them to
|
2020-05-21 22:02:32 -07:00
|
|
|
// the address book. The call attempts to block until all addresses have been processed,
|
|
|
|
// but since we can't know how many that will be it eventually times out. Therefore,
|
|
|
|
// while calling RequestAddresses synchronously is possible, it risks a major delay; most
|
|
|
|
// users will be better served by giving this its own goroutine and using WaitForAddresses
|
|
|
|
// with a timeout to pause only until a sufficient number of addresses are ready.
|
|
|
|
func (s *Seeder) RequestAddresses() int {
|
2019-10-17 19:47:40 -07:00
|
|
|
s.livePeers.Range(func(key PeerKey, p *peer.Peer) bool {
|
2019-10-12 19:00:20 -07:00
|
|
|
s.logger.Printf("Requesting addresses from peer %s", p.Addr())
|
|
|
|
p.QueueMessage(wire.NewMsgGetAddr(), nil)
|
2019-10-12 16:18:25 -07:00
|
|
|
return true
|
|
|
|
})
|
2020-05-20 16:10:55 -07:00
|
|
|
|
|
|
|
// There's a sync concern: if this is called repeatedly you could end up broadcasting
|
|
|
|
// GetAddr messages to briefly live trial connections without meaning to. It's
|
|
|
|
// meant to be run on a timer that takes longer to fire than it takes to check addresses.
|
|
|
|
|
2020-05-21 22:02:32 -07:00
|
|
|
var peerCount int32
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(crawlerGoroutineCount)
|
|
|
|
|
|
|
|
for i := 0; i < crawlerGoroutineCount; i++ {
|
2020-05-20 16:10:55 -07:00
|
|
|
go func() {
|
2020-05-22 14:33:54 -07:00
|
|
|
defer wg.Done()
|
|
|
|
|
2020-05-20 16:10:55 -07:00
|
|
|
var na *wire.NetAddress
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case next := <-s.addrQueue:
|
|
|
|
// Pull the next address off the queue
|
|
|
|
na = next
|
2020-05-21 14:25:01 -07:00
|
|
|
case <-time.After(crawlerThreadTimeout):
|
2020-05-20 16:10:55 -07:00
|
|
|
// Or die if there wasn't one
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if !addrmgr.IsRoutable(na) && !s.config.AllowSelfConns {
|
|
|
|
s.logger.Printf("Got bad addr %s:%d from peer %s", na.IP, na.Port, "<placeholder>")
|
|
|
|
// TODO blacklist peers who give us crap addresses
|
|
|
|
//s.DisconnectAndBlacklist(peerKeyFromPeer(p))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
potentialPeer := peerKeyFromNA(na)
|
|
|
|
|
|
|
|
if s.addrBook.IsKnown(potentialPeer) {
|
|
|
|
s.logger.Printf("Already knew about %s:%d", na.IP, na.Port)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
portString := strconv.Itoa(int(na.Port))
|
2020-05-29 13:50:25 -07:00
|
|
|
newPeer, err := s.Connect(na.IP.String(), portString)
|
2020-05-20 16:10:55 -07:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
if err == ErrRepeatConnection {
|
|
|
|
//s.logger.Printf("Got duplicate peer %s:%d.", na.IP, na.Port)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Blacklist the potential peer. We might try to connect again later,
|
|
|
|
// since we assume IsRoutable filtered out the truly wrong ones.
|
|
|
|
s.logger.Printf("Got unusable peer %s:%d. Error: %s", na.IP, na.Port, err)
|
|
|
|
s.addrBook.Blacklist(potentialPeer)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-05-29 13:50:25 -07:00
|
|
|
// Ask the newly discovered peer if they know anyone we haven't met yet.
|
|
|
|
newPeer.QueueMessage(wire.NewMsgGetAddr(), nil)
|
|
|
|
|
2020-05-20 16:10:55 -07:00
|
|
|
s.logger.Printf("Successfully learned about %s:%d.", na.IP, na.Port)
|
2020-05-21 22:02:32 -07:00
|
|
|
atomic.AddInt32(&peerCount, 1)
|
2020-05-20 16:10:55 -07:00
|
|
|
s.addrBook.Add(potentialPeer)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2020-05-21 22:02:32 -07:00
|
|
|
|
|
|
|
wg.Wait()
|
2020-05-22 14:33:54 -07:00
|
|
|
s.logger.Printf("RequestAddresses() finished.")
|
2020-05-21 22:02:32 -07:00
|
|
|
return int(peerCount)
|
|
|
|
}
|
|
|
|
|
|
|
|
// RefreshAddresses checks to make sure the addresses we think we know are
|
|
|
|
// still usable and removes them from the address book if they aren't.
|
|
|
|
// The call blocks until all addresses have been processed. If disconnect is
|
|
|
|
// true, we immediately disconnect from the peers after verifying them.
|
|
|
|
func (s *Seeder) RefreshAddresses(disconnect bool) {
|
2020-05-22 14:33:54 -07:00
|
|
|
s.logger.Printf("Refreshing address book")
|
2020-06-08 13:48:53 -07:00
|
|
|
defer s.logger.Printf("RefreshAddresses() finished.")
|
2020-05-22 14:33:54 -07:00
|
|
|
|
|
|
|
var refreshQueue chan *Address
|
|
|
|
var wg sync.WaitGroup
|
2020-05-21 22:02:32 -07:00
|
|
|
|
2020-05-22 14:33:54 -07:00
|
|
|
// XXX lil awkward to allocate a channel whose size we can't determine without a lock here
|
|
|
|
s.addrBook.enqueueAddrs(&refreshQueue)
|
2020-06-08 13:48:53 -07:00
|
|
|
s.logger.Printf("Address book contains %d addresses", len(refreshQueue))
|
|
|
|
|
|
|
|
if len(refreshQueue) == 0 {
|
|
|
|
return
|
|
|
|
}
|
2020-05-21 22:02:32 -07:00
|
|
|
|
|
|
|
for i := 0; i < crawlerGoroutineCount; i++ {
|
2020-05-22 14:33:54 -07:00
|
|
|
wg.Add(1)
|
2020-05-21 22:02:32 -07:00
|
|
|
go func() {
|
2020-05-22 14:33:54 -07:00
|
|
|
for len(refreshQueue) > 0 {
|
|
|
|
// Pull the next address off the queue
|
|
|
|
next := <-refreshQueue
|
|
|
|
na := next.netaddr
|
2020-05-21 22:02:32 -07:00
|
|
|
|
2020-05-22 14:33:54 -07:00
|
|
|
ipString := na.IP.String()
|
2020-05-21 22:02:32 -07:00
|
|
|
portString := strconv.Itoa(int(na.Port))
|
|
|
|
|
2020-05-29 13:50:25 -07:00
|
|
|
// Don't care about the peer individually, just that we can connect.
|
|
|
|
_, err := s.Connect(ipString, portString)
|
2020-05-21 22:02:32 -07:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
if err != ErrRepeatConnection {
|
|
|
|
s.logger.Printf("Peer %s:%d unusable on refresh. Error: %s", na.IP, na.Port, err)
|
2020-05-22 14:33:54 -07:00
|
|
|
// Blacklist the peer. We might try to connect again later.
|
|
|
|
// This would deadlock if enqueueAddrs still holds the RLock,
|
|
|
|
// hence the awkward channel allocation above.
|
|
|
|
s.addrBook.Blacklist(next.asPeerKey())
|
2020-05-21 22:02:32 -07:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if disconnect {
|
2020-05-22 14:33:54 -07:00
|
|
|
s.DisconnectPeer(next.asPeerKey())
|
2020-05-21 22:02:32 -07:00
|
|
|
}
|
2020-05-22 14:33:54 -07:00
|
|
|
|
|
|
|
s.logger.Printf("Validated %s", na.IP)
|
2020-05-21 22:02:32 -07:00
|
|
|
}
|
2020-05-22 14:33:54 -07:00
|
|
|
wg.Done()
|
2020-05-21 22:02:32 -07:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2020-05-22 14:33:54 -07:00
|
|
|
wg.Wait()
|
2019-10-09 15:32:15 -07:00
|
|
|
}
|
2019-10-12 16:18:25 -07:00
|
|
|
|
2020-05-24 18:20:28 -07:00
|
|
|
// RetryBlacklist checks if the addresses in our blacklist are usable again.
|
|
|
|
// If the trial connection succeeds, they're removed from the blacklist.
|
|
|
|
func (s *Seeder) RetryBlacklist() {
|
|
|
|
s.logger.Printf("Giving the blacklist another chance")
|
2020-06-08 13:48:53 -07:00
|
|
|
defer s.logger.Printf("RetryBlacklist() finished.")
|
2020-05-24 18:20:28 -07:00
|
|
|
|
|
|
|
var blacklistQueue chan *Address
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
// XXX lil awkward to allocate a channel whose size we can't determine without a lock here
|
2020-06-01 10:33:18 -07:00
|
|
|
s.addrBook.enqueueBlacklist(&blacklistQueue)
|
2020-06-08 13:48:53 -07:00
|
|
|
s.logger.Printf("Blacklist contains %d addresses", len(blacklistQueue))
|
|
|
|
|
|
|
|
if len(blacklistQueue) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var peerCount int32
|
2020-05-24 18:20:28 -07:00
|
|
|
|
|
|
|
for i := 0; i < crawlerGoroutineCount; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
for len(blacklistQueue) > 0 {
|
|
|
|
// Pull the next address off the queue
|
|
|
|
next := <-blacklistQueue
|
|
|
|
na := next.netaddr
|
|
|
|
|
2020-06-08 13:48:53 -07:00
|
|
|
ip := na.IP.String()
|
|
|
|
port := strconv.Itoa(int(na.Port))
|
2020-05-24 18:20:28 -07:00
|
|
|
|
2020-06-08 13:48:53 -07:00
|
|
|
// Call internal connect directly to avoid being blocked
|
|
|
|
host := net.JoinHostPort(ip, port)
|
|
|
|
p, err := peer.NewOutboundPeer(s.config, host)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
_, err = s.connect(p)
|
2020-05-24 18:20:28 -07:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
// Connection failed. Peer remains blacklisted.
|
2020-06-01 10:33:18 -07:00
|
|
|
if time.Since(next.lastUpdate) > blacklistDropTime {
|
|
|
|
// If we've been retrying for a while, forget about this peer entirely.
|
|
|
|
// This would deadlock if enqueueBlacklist still held the RLock.
|
|
|
|
s.addrBook.DropFromBlacklist(next.asPeerKey())
|
2020-06-08 13:48:53 -07:00
|
|
|
s.logger.Printf("Dropping %s from blacklist", next.asPeerKey())
|
2020-06-01 10:33:18 -07:00
|
|
|
}
|
2020-05-24 18:20:28 -07:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
s.DisconnectPeer(next.asPeerKey())
|
|
|
|
|
2020-06-01 10:33:18 -07:00
|
|
|
// Remove the peer from the blacklist and add it back to the address book.
|
|
|
|
// This would deadlock if enqueueBlacklist still held the RLock.
|
2020-06-08 13:48:53 -07:00
|
|
|
atomic.AddInt32(&peerCount, 1)
|
2020-05-24 18:20:28 -07:00
|
|
|
s.addrBook.Redeem(next.asPeerKey())
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
2020-06-08 13:48:53 -07:00
|
|
|
s.logger.Printf("Added %d on retry", peerCount)
|
2020-05-24 18:20:28 -07:00
|
|
|
}
|
|
|
|
|
2020-05-20 15:08:34 -07:00
|
|
|
// WaitForAddresses waits for n addresses to be confirmed and available in the address book.
|
2019-12-30 17:54:38 -08:00
|
|
|
func (s *Seeder) WaitForAddresses(n int, timeout time.Duration) error {
|
|
|
|
done := make(chan struct{})
|
|
|
|
go s.addrBook.waitForAddresses(n, done)
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return nil
|
|
|
|
case <-time.After(timeout):
|
|
|
|
return ErrAddressTimeout
|
2019-10-12 19:00:20 -07:00
|
|
|
}
|
|
|
|
}
|
2020-05-19 19:32:23 -07:00
|
|
|
|
|
|
|
// Ready reports if the seeder is ready to provide addresses.
|
|
|
|
func (s *Seeder) Ready() bool {
|
2020-05-20 15:08:34 -07:00
|
|
|
return s.WaitForAddresses(minimumReadyAddresses, 1*time.Millisecond) == nil
|
2020-05-19 19:32:23 -07:00
|
|
|
}
|
2020-05-20 19:16:41 -07:00
|
|
|
|
2020-05-21 16:07:37 -07:00
|
|
|
// Addresses returns a slice of n IPv4 addresses or as many as we have if it's less than that.
|
2020-05-20 19:16:41 -07:00
|
|
|
func (s *Seeder) Addresses(n int) []net.IP {
|
2022-06-06 17:07:21 -07:00
|
|
|
return s.addrBook.shuffleAddressList(n, false, s.GetNetworkDefaultPort())
|
2020-05-21 16:07:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// AddressesV6 returns a slice of n IPv6 addresses or as many as we have if it's less than that.
|
|
|
|
func (s *Seeder) AddressesV6(n int) []net.IP {
|
2022-06-06 17:07:21 -07:00
|
|
|
return s.addrBook.shuffleAddressList(n, true, s.GetNetworkDefaultPort())
|
2020-05-20 19:16:41 -07:00
|
|
|
}
|
2020-05-21 15:39:10 -07:00
|
|
|
|
2020-05-21 22:02:32 -07:00
|
|
|
// GetPeerCount returns how many valid peers we know about.
|
|
|
|
func (s *Seeder) GetPeerCount() int {
|
|
|
|
return s.addrBook.Count()
|
|
|
|
}
|
|
|
|
|
2020-05-21 15:39:10 -07:00
|
|
|
// testBlacklist adds a peer to the blacklist directly, for testing.
|
|
|
|
func (s *Seeder) testBlacklist(pk PeerKey) {
|
|
|
|
s.addrBook.Blacklist(pk)
|
|
|
|
}
|
2020-05-24 18:20:28 -07:00
|
|
|
|
|
|
|
// testRedeen adds a peer to the blacklist directly, for testing.
|
|
|
|
func (s *Seeder) testRedeem(pk PeerKey) {
|
2020-06-01 10:33:18 -07:00
|
|
|
s.addrBook.DropFromBlacklist(pk)
|
2020-05-24 18:20:28 -07:00
|
|
|
}
|