discovery+routing: split 'routing' package on 'routing' and 'discovery'

In this commit the routing package was divided on two separete one,
this was done because 'routing' package start take too much responsibily
on themself, so with following commit:

Routing pacakge:
Enitites:
* channeldb.ChannelEdge
* channeldb.ChannelPolicy
* channeldb.NodeLightning

Responsibilities:
* send topology notification
* find payment paths
* send payment
* apply topology changes to the graph
* prune graph
* validate that funding point exist and corresponds to given one
* to be the source of topology data

Discovery package:
Entities:
* lnwire.AnnounceSignature
* lnwire.ChannelAnnouncement
* lnwire.NodeAnnouncement
* lnwire.ChannelUpdateAnnouncement

Responsibilities:
* validate announcement signatures
* sync topology with newly connected peers
* handle the premature annoucement
* redirect topology changes to the router susbsystem
* broadcast announcement to the rest of the network
* exchange channel announcement proofs

Before that moment all that was in the 'routing' which is quite big for
one subsystem.

split
This commit is contained in:
Andrey Samokhvalov 2017-03-19 21:40:25 +03:00 committed by Olaoluwa Osuntokun
parent 0e96d273d9
commit b4ac7071ff
11 changed files with 1543 additions and 683 deletions

72
discovery/log.go Normal file
View File

@ -0,0 +1,72 @@
package discovery
import (
"errors"
"io"
"github.com/btcsuite/btclog"
btcwallet "github.com/roasbeef/btcwallet/wallet"
)
// log is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var log btclog.Logger
// The default amount of logging is none.
func init() {
DisableLog()
}
// DisableLog disables all library log output. Logging output is disabled
// by default until either UseLogger or SetLogWriter are called.
func DisableLog() {
log = btclog.Disabled
}
// UseLogger uses a specified Logger to output package logging info.
// This should be used in preference to SetLogWriter if the caller is also
// using btclog.
func UseLogger(logger btclog.Logger) {
log = logger
btcwallet.UseLogger(logger)
}
// SetLogWriter uses a specified io.Writer to output package logging info.
// This allows a caller to direct package logging output without needing a
// dependency on seelog. If the caller is also using btclog, UseLogger should
// be used instead.
func SetLogWriter(w io.Writer, level string) error {
if w == nil {
return errors.New("nil writer")
}
lvl, ok := btclog.LogLevelFromString(level)
if !ok {
return errors.New("invalid log level")
}
l, err := btclog.NewLoggerFromWriter(w, lvl)
if err != nil {
return err
}
UseLogger(l)
return nil
}
// logClosure is used to provide a closure over expensive logging operations
// so don't have to be performed when the logging level doesn't warrant it.
type logClosure func() string
// String invokes the underlying function and returns the result.
func (c logClosure) String() string {
return c()
}
// newLogClosure returns a new closure over a function that returns a string
// which itself provides a Stringer interface so that it can be used with the
// logging system.
func newLogClosure(c func() string) logClosure {
return logClosure(c)
}

643
discovery/service.go Normal file
View File

@ -0,0 +1,643 @@
package discovery
import (
"sync"
"sync/atomic"
"time"
"encoding/hex"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing"
"github.com/roasbeef/btcd/btcec"
"github.com/roasbeef/btcutil"
)
// networkMsg couples a routing related wire message with the peer that
// originally sent it.
type networkMsg struct {
msg lnwire.Message
isRemote bool
peer *btcec.PublicKey
}
// syncRequest represents a request from an outside subsystem to the wallet to
// sync a new node to the latest graph state.
type syncRequest struct {
node *btcec.PublicKey
}
// Config defines the configuration for the service. ALL elements within the
// configuration MUST be non-nil for the service to carry out its duties.
type Config struct {
// Router is the subsystem which is responsible for managing the
// topology of lightning network. After incoming channel, node,
// channel updates announcements are validated they are sent to the
// router in order to be included in the LN graph.
Router routing.ChannelGraphSource
// Notifier is used for receiving notifications of incoming blocks.
// With each new incoming block found we process previously premature
// announcements.
// TODO(roasbeef): could possibly just replace this with an epoch
// channel.
Notifier chainntnfs.ChainNotifier
// Broadcast broadcasts a particular set of announcements to all peers
// that the daemon is connected to. If supplied, the exclude parameter
// indicates that the target peer should be excluded from the broadcast.
Broadcast func(exclude *btcec.PublicKey, msg ...lnwire.Message) error
// SendMessages is a function which allows the service to send a set of
// messages to a particular peer identified by the target public
// key.
SendMessages func(target *btcec.PublicKey, msg ...lnwire.Message) error
}
// New create new discovery service structure.
func New(cfg Config) (*Discovery, error) {
// TODO(roasbeef): remove this place holder after sigs are properly
// stored in the graph.
s := "30450221008ce2bc69281ce27da07e6683571319d18e949ddfa2965fb6caa" +
"1bf0314f882d70220299105481d63e0f4bc2a88121167221b6700d72a0e" +
"ad154c03be696a292d24ae"
fakeSigHex, err := hex.DecodeString(s)
if err != nil {
return nil, err
}
fakeSig, err := btcec.ParseSignature(fakeSigHex, btcec.S256())
if err != nil {
return nil, err
}
return &Discovery{
cfg: &cfg,
networkMsgs: make(chan *networkMsg),
quit: make(chan bool),
syncRequests: make(chan *syncRequest),
prematureAnnouncements: make(map[uint32][]*networkMsg),
fakeSig: fakeSig,
}, nil
}
// Discovery is a subsystem which is responsible for receiving announcements
// validate them and apply the changes to router, syncing lightning network
// with newly connected nodes, broadcasting announcements after validation,
// negotiating the channel announcement proofs exchange and handling the
// premature announcements.
type Discovery struct {
// Parameters which are needed to properly handle the start and stop
// of the service.
started uint32
stopped uint32
quit chan bool
wg sync.WaitGroup
// cfg is a copy of the configuration struct that the discovery service
// was initialized with.
cfg *Config
// newBlocks is a channel in which new blocks connected to the end of
// the main chain are sent over.
newBlocks <-chan *chainntnfs.BlockEpoch
// prematureAnnouncements maps a blockheight to a set of announcements
// which are "premature" from our PoV. An message is premature if
// it claims to be anchored in a block which is beyond the current main
// chain tip as we know it. Premature network messages will be processed
// once the chain tip as we know it extends to/past the premature
// height.
//
// TODO(roasbeef): limit premature networkMsgs to N
prematureAnnouncements map[uint32][]*networkMsg
// networkMsgs is a channel that carries new network broadcasted
// message from outside the discovery service to be processed by the
// networkHandler.
networkMsgs chan *networkMsg
// syncRequests is a channel that carries requests to synchronize newly
// connected peers to the state of the lightning network topology from
// our PoV.
syncRequests chan *syncRequest
// bestHeight is the height of the block at the tip of the main chain
// as we know it.
bestHeight uint32
fakeSig *btcec.Signature
}
// ProcessRemoteAnnouncement sends a new remote announcement message along with
// the peer that sent the routing message. The announcement will be processed then
// added to a queue for batched trickled announcement to all connected peers.
// Remote channel announcements should contain the announcement proof and be
// fully validated.
func (d *Discovery) ProcessRemoteAnnouncement(msg lnwire.Message,
src *btcec.PublicKey) error {
aMsg := &networkMsg{
msg: msg,
isRemote: true,
peer: src,
}
select {
case d.networkMsgs <- aMsg:
return nil
case <-d.quit:
return errors.New("discovery has been shutted down")
}
}
// ProcessLocalAnnouncement sends a new remote announcement message along with
// the peer that sent the routing message. The announcement will be processed then
// added to a queue for batched trickled announcement to all connected peers.
// Local channel announcements not contain the announcement proof and should be
// fully validated. The channels proofs will be included farther if nodes agreed
// to announce this channel to the rest of the network.
func (d *Discovery) ProcessLocalAnnouncement(msg lnwire.Message,
src *btcec.PublicKey) error {
aMsg := &networkMsg{
msg: msg,
isRemote: false,
peer: src,
}
select {
case d.networkMsgs <- aMsg:
return nil
case <-d.quit:
return errors.New("discovery has been shutted down")
}
}
// SynchronizeNode sends a message to the service indicating it should
// synchronize lightning topology state with the target node. This method
// is to be utilized when a node connections for the first time to provide it
// with the latest topology update state.
func (d *Discovery) SynchronizeNode(pub *btcec.PublicKey) {
select {
case d.syncRequests <- &syncRequest{
node: pub,
}:
case <-d.quit:
return
}
}
// Start spawns network messages handler goroutine and registers on new block
// notifications in order to properly handle the premature announcements.
func (d *Discovery) Start() error {
if !atomic.CompareAndSwapUint32(&d.started, 0, 1) {
return nil
}
// First we register for new notifications of newly discovered blocks.
// We do this immediately so we'll later be able to consume any/all
// blocks which were discovered.
blockEpochs, err := d.cfg.Notifier.RegisterBlockEpochNtfn()
if err != nil {
return err
}
d.newBlocks = blockEpochs.Epochs
height, err := d.cfg.Router.CurrentBlockHeight()
if err != nil {
return err
}
d.bestHeight = height
d.wg.Add(1)
go d.networkHandler()
log.Info("Discovery service is started")
return nil
}
// Stop signals any active goroutines for a graceful closure.
func (d *Discovery) Stop() {
if !atomic.CompareAndSwapUint32(&d.stopped, 0, 1) {
return
}
close(d.quit)
d.wg.Wait()
log.Info("Discovery service is stoped.")
}
// networkHandler is the primary goroutine. The roles of this goroutine include
// answering queries related to the state of the network, syncing up newly
// connected peers, and also periodically broadcasting our latest topology state
// to all connected peers.
//
// NOTE: This MUST be run as a goroutine.
func (d *Discovery) networkHandler() {
defer d.wg.Done()
var announcementBatch []lnwire.Message
// TODO(roasbeef): parametrize the above
retransmitTimer := time.NewTicker(time.Minute * 30)
defer retransmitTimer.Stop()
// TODO(roasbeef): parametrize the above
trickleTimer := time.NewTicker(time.Millisecond * 300)
defer trickleTimer.Stop()
for {
select {
case announcement := <-d.networkMsgs:
// Process the network announcement to determine if
// this is either a new announcement from our PoV or an
// updates to a prior vertex/edge we previously
// accepted.
accepted := d.processNetworkAnnouncement(announcement)
// If the updates was accepted, then add it to our next
// announcement batch to be broadcast once the trickle
// timer ticks gain.
if accepted {
// TODO(roasbeef): exclude peer that sent
announcementBatch = append(
announcementBatch,
announcement.msg,
)
}
// A new block has arrived, so we can re-process the
// previously premature announcements.
case newBlock, ok := <-d.newBlocks:
// If the channel has been closed, then this indicates
// the daemon is shutting down, so we exit ourselves.
if !ok {
return
}
// Once a new block arrives, we updates our running
// track of the height of the chain tip.
blockHeight := uint32(newBlock.Height)
d.bestHeight = blockHeight
// Next we check if we have any premature announcements
// for this height, if so, then we process them once
// more as normal announcements.
prematureAnns := d.prematureAnnouncements[uint32(newBlock.Height)]
if len(prematureAnns) != 0 {
log.Infof("Re-processing %v premature "+
"announcements for height %v",
len(prematureAnns), blockHeight)
}
for _, ann := range prematureAnns {
accepted := d.processNetworkAnnouncement(ann)
if accepted {
announcementBatch = append(
announcementBatch,
ann.msg,
)
}
}
delete(d.prematureAnnouncements, blockHeight)
// The trickle timer has ticked, which indicates we should
// flush to the network the pending batch of new announcements
// we've received since the last trickle tick.
case <-trickleTimer.C:
// If the current announcement batch is nil, then we
// have no further work here.
if len(announcementBatch) == 0 {
continue
}
log.Infof("Broadcasting batch of %v new announcements",
len(announcementBatch))
// If we have new things to announce then broadcast
// them to all our immediately connected peers.
err := d.cfg.Broadcast(nil, announcementBatch...)
if err != nil {
log.Errorf("unable to send batch announcement: %v", err)
continue
}
// If we're able to broadcast the current batch
// successfully, then we reset the batch for a new
// round of announcements.
announcementBatch = nil
// The retransmission timer has ticked which indicates that we
// should broadcast our personal channels to the network. This
// addresses the case of channel advertisements whether being
// dropped, or not properly propagated through the network.
case <-retransmitTimer.C:
var selfChans []lnwire.Message
// Iterate over our channels and construct the
// announcements array.
err := d.cfg.Router.ForAllOutgoingChannels(
func(p *channeldb.ChannelEdgePolicy) error {
c := &lnwire.ChannelUpdateAnnouncement{
Signature: d.fakeSig,
ChannelID: lnwire.NewChanIDFromInt(p.ChannelID),
Timestamp: uint32(p.LastUpdate.Unix()),
Flags: p.Flags,
TimeLockDelta: p.TimeLockDelta,
HtlcMinimumMsat: uint32(p.MinHTLC),
FeeBaseMsat: uint32(p.FeeBaseMSat),
FeeProportionalMillionths: uint32(p.FeeProportionalMillionths),
}
selfChans = append(selfChans, c)
return nil
})
if err != nil {
log.Errorf("unable to iterate over chann"+
"els: %v", err)
continue
} else if len(selfChans) == 0 {
continue
}
log.Debugf("Retransmitting %v outgoing channels",
len(selfChans))
// With all the wire announcements properly crafted,
// we'll broadcast our known outgoing channel to all our
// immediate peers.
if err := d.cfg.Broadcast(nil, selfChans...); err != nil {
log.Errorf("unable to re-broadcast "+
"channels: %v", err)
}
// We've just received a new request to synchronize a peer with
// our latest lightning network topology state. This indicates
// that a peer has just connected for the first time, so for now
// we dump our entire network graph and allow them to sift
// through the (subjectively) new information on their own.
case syncReq := <-d.syncRequests:
nodePub := syncReq.node.SerializeCompressed()
log.Infof("Synchronizing channel graph with %x", nodePub)
if err := d.synchronize(syncReq); err != nil {
log.Errorf("unable to sync graph state with %x: %v",
nodePub, err)
}
// The discovery has been signalled to exit, to we exit our main
// loop so the wait group can be decremented.
case <-d.quit:
return
}
}
}
// processNetworkAnnouncement processes a new network relate authenticated
// channel or node announcement. If the updates didn't affect the internal state
// of the draft due to either being out of date, invalid, or redundant, then
// false is returned. Otherwise, true is returned indicating that the caller
// may want to batch this request to be broadcast to immediate peers during the
// next announcement epoch.
func (d *Discovery) processNetworkAnnouncement(aMsg *networkMsg) bool {
isPremature := func(chanID *lnwire.ChannelID) bool {
return chanID.BlockHeight > d.bestHeight
}
switch msg := aMsg.msg.(type) {
// A new node announcement has arrived which either presents a new
// node, or a node updating previously advertised information.
case *lnwire.NodeAnnouncement:
if aMsg.isRemote {
// TODO(andrew.shvv) Add node validation
}
node := &channeldb.LightningNode{
LastUpdate: time.Unix(int64(msg.Timestamp), 0),
Addresses: msg.Addresses,
PubKey: msg.NodeID,
Alias: msg.Alias.String(),
AuthSig: msg.Signature,
Features: msg.Features,
}
if err := d.cfg.Router.AddNode(node); err != nil {
log.Errorf("unable to add node: %v", err)
return false
}
// A new channel announcement has arrived, this indicates the
// *creation* of a new channel within the network. This only advertises
// the existence of a channel and not yet the routing policies in
// either direction of the channel.
case *lnwire.ChannelAnnouncement:
// If the advertised inclusionary block is beyond our knowledge
// of the chain tip, then we'll put the announcement in limbo
// to be fully verified once we advance forward in the chain.
if isPremature(&msg.ChannelID) {
blockHeight := msg.ChannelID.BlockHeight
log.Infof("Announcement for chan_id=(%v), is "+
"premature: advertises height %v, only height "+
"%v is known", msg.ChannelID, msg.ChannelID.BlockHeight,
d.bestHeight)
d.prematureAnnouncements[blockHeight] = append(
d.prematureAnnouncements[blockHeight],
aMsg,
)
return false
}
var proof *channeldb.ChannelAuthProof
if aMsg.isRemote {
// TODO(andrew.shvv) Add channel validation
}
proof = &channeldb.ChannelAuthProof{
NodeSig1: msg.FirstNodeSig,
NodeSig2: msg.SecondNodeSig,
BitcoinSig1: msg.FirstBitcoinSig,
BitcoinSig2: msg.SecondBitcoinSig,
}
edge := &channeldb.ChannelEdgeInfo{
ChannelID: msg.ChannelID.ToUint64(),
NodeKey1: msg.FirstNodeID,
NodeKey2: msg.SecondNodeID,
BitcoinKey1: msg.FirstBitcoinKey,
BitcoinKey2: msg.SecondBitcoinKey,
AuthProof: proof,
}
if err := d.cfg.Router.AddEdge(edge); err != nil {
if !routing.IsError(err, routing.ErrOutdated) {
log.Errorf("unable to add edge: %v", err)
} else {
log.Info("Unable to add edge: %v", err)
}
return false
}
// A new authenticated channel updates has arrived, this indicates
// that the directional information for an already known channel has
// been updated.
case *lnwire.ChannelUpdateAnnouncement:
chanID := msg.ChannelID.ToUint64()
// If the advertised inclusionary block is beyond our knowledge
// of the chain tip, then we'll put the announcement in limbo
// to be fully verified once we advance forward in the chain.
if isPremature(&msg.ChannelID) {
blockHeight := msg.ChannelID.BlockHeight
log.Infof("Update announcement for chan_id=(%v), is "+
"premature: advertises height %v, only height "+
"%v is known", chanID, blockHeight,
d.bestHeight)
d.prematureAnnouncements[blockHeight] = append(
d.prematureAnnouncements[blockHeight],
aMsg,
)
return false
}
if aMsg.isRemote {
// TODO(andrew.shvv) Add update channel validation
}
// TODO(roasbeef): should be msat here
update := &channeldb.ChannelEdgePolicy{
ChannelID: chanID,
LastUpdate: time.Unix(int64(msg.Timestamp), 0),
Flags: msg.Flags,
TimeLockDelta: msg.TimeLockDelta,
MinHTLC: btcutil.Amount(msg.HtlcMinimumMsat),
FeeBaseMSat: btcutil.Amount(msg.FeeBaseMsat),
FeeProportionalMillionths: btcutil.Amount(msg.FeeProportionalMillionths),
}
if err := d.cfg.Router.UpdateEdge(update); err != nil {
log.Errorf("unable to update edge: %v", err)
return false
}
}
return true
}
// synchronize attempts to synchronize the target node in the syncReq to
// the latest channel graph state. In order to accomplish this, (currently) the
// entire network graph is read from disk, then serialized to the format
// defined within the current wire protocol. This cache of graph data is then
// sent directly to the target node.
func (d *Discovery) synchronize(syncReq *syncRequest) error {
targetNode := syncReq.node
// TODO(roasbeef): need to also store sig data in db
// * will be nice when we switch to pairing sigs would only need one ^_^
// We'll collate all the gathered routing messages into a single slice
// containing all the messages to be sent to the target peer.
var announceMessages []lnwire.Message
// First run through all the vertexes in the graph, retrieving the data
// for the announcement we originally retrieved.
var numNodes uint32
if err := d.cfg.Router.ForEachNode(func(node *channeldb.LightningNode) error {
alias, err := lnwire.NewAlias(node.Alias)
if err != nil {
return err
}
ann := &lnwire.NodeAnnouncement{
Signature: d.fakeSig,
Timestamp: uint32(node.LastUpdate.Unix()),
Addresses: node.Addresses,
NodeID: node.PubKey,
Alias: alias,
Features: node.Features,
}
announceMessages = append(announceMessages, ann)
numNodes++
return nil
}); err != nil {
return err
}
// With the vertexes gathered, we'll no retrieve the initial
// announcement, as well as the latest channel update announcement for
// both of the directed edges that make up the channel.
var numEdges uint32
if err := d.cfg.Router.ForEachChannel(func(chanInfo *channeldb.ChannelEdgeInfo,
e1, e2 *channeldb.ChannelEdgePolicy) error {
chanID := lnwire.NewChanIDFromInt(chanInfo.ChannelID)
// First, using the parameters of the channel, along with the
// channel authentication proof, we'll create re-create the
// original authenticated channel announcement.
// TODO(andrew.shvv) skip if proof is nil
authProof := chanInfo.AuthProof
chanAnn := &lnwire.ChannelAnnouncement{
FirstNodeSig: authProof.NodeSig1,
SecondNodeSig: authProof.NodeSig2,
ChannelID: chanID,
FirstBitcoinSig: authProof.BitcoinSig1,
SecondBitcoinSig: authProof.BitcoinSig2,
FirstNodeID: chanInfo.NodeKey1,
SecondNodeID: chanInfo.NodeKey2,
FirstBitcoinKey: chanInfo.BitcoinKey1,
SecondBitcoinKey: chanInfo.BitcoinKey2,
}
announceMessages = append(announceMessages, chanAnn)
// Since it's up to a node's policy as to whether they
// advertise the edge in dire direction, we don't create an
// advertisement if the edge is nil.
if e1 != nil {
announceMessages = append(announceMessages, &lnwire.ChannelUpdateAnnouncement{
Signature: d.fakeSig,
ChannelID: chanID,
Timestamp: uint32(e1.LastUpdate.Unix()),
Flags: 0,
TimeLockDelta: e1.TimeLockDelta,
HtlcMinimumMsat: uint32(e1.MinHTLC),
FeeBaseMsat: uint32(e1.FeeBaseMSat),
FeeProportionalMillionths: uint32(e1.FeeProportionalMillionths),
})
}
if e2 != nil {
announceMessages = append(announceMessages, &lnwire.ChannelUpdateAnnouncement{
Signature: d.fakeSig,
ChannelID: chanID,
Timestamp: uint32(e2.LastUpdate.Unix()),
Flags: 1,
TimeLockDelta: e2.TimeLockDelta,
HtlcMinimumMsat: uint32(e2.MinHTLC),
FeeBaseMsat: uint32(e2.FeeBaseMSat),
FeeProportionalMillionths: uint32(e2.FeeProportionalMillionths),
})
}
numEdges++
return nil
}); err != nil && err != channeldb.ErrGraphNoEdgesFound {
log.Errorf("unable to sync edges w/ peer: %v", err)
return err
}
log.Infof("Syncing channel graph state with %x, sending %v "+
"nodes and %v edges", targetNode.SerializeCompressed(),
numNodes, numEdges)
// With all the announcement messages gathered, send them all in a
// single batch to the target peer.
return d.cfg.SendMessages(targetNode, announceMessages...)
}

373
discovery/service_test.go Normal file
View File

@ -0,0 +1,373 @@
package discovery
import (
"fmt"
"net"
"sync"
prand "math/rand"
"testing"
"math/big"
"time"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing"
"github.com/roasbeef/btcd/btcec"
"github.com/roasbeef/btcd/chaincfg/chainhash"
"github.com/roasbeef/btcd/wire"
)
var (
testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}),
Port: 9000}
testAddrs = []net.Addr{testAddr}
testFeatures = lnwire.NewFeatureVector([]lnwire.Feature{})
testSig = &btcec.Signature{
R: new(big.Int),
S: new(big.Int),
}
_, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10)
_, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10)
)
type mockGraphSource struct {
nodes []*channeldb.LightningNode
edges []*channeldb.ChannelEdgeInfo
updates []*channeldb.ChannelEdgePolicy
bestHeight uint32
}
func newMockRouter(height uint32) *mockGraphSource {
return &mockGraphSource{
bestHeight: height,
}
}
var _ routing.ChannelGraphSource = (*mockGraphSource)(nil)
func (r *mockGraphSource) AddNode(node *channeldb.LightningNode) error {
r.nodes = append(r.nodes, node)
return nil
}
func (r *mockGraphSource) AddEdge(edge *channeldb.ChannelEdgeInfo) error {
r.edges = append(r.edges, edge)
return nil
}
func (r *mockGraphSource) UpdateEdge(policy *channeldb.ChannelEdgePolicy) error {
r.updates = append(r.updates, policy)
return nil
}
func (r *mockGraphSource) AddProof(chanID uint8,
proof *channeldb.ChannelAuthProof) error {
return nil
}
func (r *mockGraphSource) SelfEdges() ([]*channeldb.ChannelEdgePolicy, error) {
return nil, nil
}
func (r *mockGraphSource) CurrentBlockHeight() (uint32, error) {
return r.bestHeight, nil
}
func (r *mockGraphSource) ForEachNode(func(node *channeldb.LightningNode) error) error {
return nil
}
func (r *mockGraphSource) ForAllOutgoingChannels(cb func(c *channeldb.ChannelEdgePolicy) error) error {
return nil
}
func (r *mockGraphSource) ForEachChannel(func(chanInfo *channeldb.ChannelEdgeInfo,
e1, e2 *channeldb.ChannelEdgePolicy) error) error {
return nil
}
type mockNotifier struct {
clientCounter uint32
epochClients map[uint32]chan *chainntnfs.BlockEpoch
sync.RWMutex
}
func newMockNotifier() *mockNotifier {
return &mockNotifier{
epochClients: make(map[uint32]chan *chainntnfs.BlockEpoch),
}
}
func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
numConfs uint32) (*chainntnfs.ConfirmationEvent, error) {
return nil, nil
}
func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint) (*chainntnfs.SpendEvent, error) {
return nil, nil
}
func (m *mockNotifier) notifyBlock(hash chainhash.Hash, height uint32) {
m.RLock()
defer m.RUnlock()
for _, client := range m.epochClients {
client <- &chainntnfs.BlockEpoch{
Height: int32(height),
Hash: &hash,
}
}
}
func (m *mockNotifier) RegisterBlockEpochNtfn() (*chainntnfs.BlockEpochEvent, error) {
m.RLock()
defer m.RUnlock()
epochChan := make(chan *chainntnfs.BlockEpoch)
clientID := m.clientCounter
m.clientCounter++
m.epochClients[clientID] = epochChan
return &chainntnfs.BlockEpochEvent{
Epochs: epochChan,
Cancel: func() {},
}, nil
}
func (m *mockNotifier) Start() error {
return nil
}
func (m *mockNotifier) Stop() error {
return nil
}
func createNodeAnnouncement() (*lnwire.NodeAnnouncement,
error) {
priv, err := btcec.NewPrivateKey(btcec.S256())
if err != nil {
return nil, err
}
pub := priv.PubKey().SerializeCompressed()
alias, err := lnwire.NewAlias("kek" + string(pub[:]))
if err != nil {
return nil, err
}
return &lnwire.NodeAnnouncement{
Timestamp: uint32(prand.Int31()),
Addresses: testAddrs,
NodeID: priv.PubKey(),
Alias: alias,
Features: testFeatures,
}, nil
}
func createUpdateAnnouncement(blockHeight uint32) *lnwire.ChannelUpdateAnnouncement {
return &lnwire.ChannelUpdateAnnouncement{
Signature: testSig,
ChannelID: lnwire.ChannelID{
BlockHeight: blockHeight,
},
Timestamp: uint32(prand.Int31()),
TimeLockDelta: uint16(prand.Int63()),
HtlcMinimumMsat: uint32(prand.Int31()),
FeeBaseMsat: uint32(prand.Int31()),
FeeProportionalMillionths: uint32(prand.Int31()),
}
}
func createChannelAnnouncement(blockHeight uint32) *lnwire.ChannelAnnouncement {
// Our fake channel will be "confirmed" at height 101.
chanID := lnwire.ChannelID{
BlockHeight: blockHeight,
TxIndex: 0,
TxPosition: 0,
}
return &lnwire.ChannelAnnouncement{
ChannelID: chanID,
}
}
type testCtx struct {
discovery *Discovery
router *mockGraphSource
notifier *mockNotifier
broadcastedMessage chan lnwire.Message
}
func createTestCtx(startHeight uint32) (*testCtx, func(), error) {
// Next we'll initialize an instance of the channel router with mock
// versions of the chain and channel notifier. As we don't need to test
// any p2p functionality, the peer send and switch send,
// broadcast functions won't be populated.
notifier := newMockNotifier()
router := newMockRouter(startHeight)
broadcastedMessage := make(chan lnwire.Message)
discovery, err := New(Config{
Notifier: notifier,
Broadcast: func(_ *btcec.PublicKey, msgs ...lnwire.Message) error {
for _, msg := range msgs {
broadcastedMessage <- msg
}
return nil
},
SendMessages: nil,
Router: router,
})
if err != nil {
return nil, nil, fmt.Errorf("unable to create router %v", err)
}
if err := discovery.Start(); err != nil {
return nil, nil, fmt.Errorf("unable to start router: %v", err)
}
cleanUp := func() {
discovery.Stop()
}
return &testCtx{
router: router,
notifier: notifier,
discovery: discovery,
broadcastedMessage: broadcastedMessage,
}, cleanUp, nil
}
// TestProcessAnnouncement checks that mature announcements are propagated to
// the router subsystem.
func TestProcessAnnouncement(t *testing.T) {
ctx, cleanup, err := createTestCtx(0)
if err != nil {
t.Fatalf("can't create context: %v", err)
}
defer cleanup()
priv, err := btcec.NewPrivateKey(btcec.S256())
if err != nil {
t.Fatalf("can't create node pub key: %v", err)
}
nodePub := priv.PubKey()
na, err := createNodeAnnouncement()
if err != nil {
t.Fatalf("can't create node announcement: %v", err)
}
ctx.discovery.ProcessRemoteAnnouncement(na, nodePub)
select {
case <-ctx.broadcastedMessage:
case <-time.After(time.Second):
t.Fatal("announcememt wasn't proceeded")
}
if len(ctx.router.nodes) != 1 {
t.Fatalf("node wasn't added to router: %v", err)
}
ca := createChannelAnnouncement(0)
ctx.discovery.ProcessRemoteAnnouncement(ca, nodePub)
select {
case <-ctx.broadcastedMessage:
case <-time.After(time.Second):
t.Fatal("announcememt wasn't proceeded")
}
if len(ctx.router.edges) != 1 {
t.Fatalf("edge wasn't added to router: %v", err)
}
ua := createUpdateAnnouncement(0)
ctx.discovery.ProcessRemoteAnnouncement(ua, nodePub)
select {
case <-ctx.broadcastedMessage:
case <-time.After(time.Second):
t.Fatal("announcememt wasn't proceeded")
}
if len(ctx.router.updates) != 1 {
t.Fatalf("edge update wasn't added to router: %v", err)
}
}
// TestPrematureAnnouncement checks that premature networkMsgs are
// not propagated to the router subsystem until block with according
// block height received.
func TestPrematureAnnouncement(t *testing.T) {
ctx, cleanup, err := createTestCtx(0)
if err != nil {
t.Fatalf("can't create context: %v", err)
}
defer cleanup()
priv, err := btcec.NewPrivateKey(btcec.S256())
if err != nil {
t.Fatalf("can't create node pub key: %v", err)
}
nodePub := priv.PubKey()
ca := createChannelAnnouncement(1)
ctx.discovery.ProcessRemoteAnnouncement(ca, nodePub)
select {
case <-ctx.broadcastedMessage:
t.Fatal("announcement was proceeded")
case <-time.After(100 * time.Millisecond):
}
if len(ctx.router.edges) != 0 {
t.Fatal("edge was added to router")
}
ua := createUpdateAnnouncement(1)
ctx.discovery.ProcessRemoteAnnouncement(ua, nodePub)
select {
case <-ctx.broadcastedMessage:
t.Fatal("announcement was proceeded")
case <-time.After(100 * time.Millisecond):
}
if len(ctx.router.updates) != 0 {
t.Fatal("edge update was added to router")
}
newBlock := &wire.MsgBlock{}
ctx.notifier.notifyBlock(newBlock.Header.BlockHash(), 1)
select {
case <-ctx.broadcastedMessage:
if err != nil {
t.Fatalf("announcememt was proceeded with err: %v", err)
}
case <-time.After(time.Second):
t.Fatal("announcememt wasn't proceeded")
}
if len(ctx.router.edges) != 1 {
t.Fatalf("edge was't added to router: %v", err)
}
select {
case <-ctx.broadcastedMessage:
if err != nil {
t.Fatalf("announcememt was proceeded with err: %v", err)
}
case <-time.After(time.Second):
t.Fatal("announcememt wasn't proceeded")
}
if len(ctx.router.updates) != 1 {
t.Fatalf("edge update wasn't added to router: %v", err)
}
}

7
log.go
View File

@ -8,6 +8,7 @@ import (
"github.com/btcsuite/seelog"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/discovery"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/routing"
"github.com/roasbeef/btcd/connmgr"
@ -22,6 +23,7 @@ var (
ltndLog = btclog.Disabled
lnwlLog = btclog.Disabled
peerLog = btclog.Disabled
discLog = btclog.Disabled
fndgLog = btclog.Disabled
rpcsLog = btclog.Disabled
srvrLog = btclog.Disabled
@ -39,6 +41,7 @@ var subsystemLoggers = map[string]btclog.Logger{
"LTND": ltndLog,
"LNWL": lnwlLog,
"PEER": peerLog,
"DISC": discLog,
"RPCS": rpcsLog,
"SRVR": srvrLog,
"NTFN": ntfnLog,
@ -70,6 +73,10 @@ func useLogger(subsystemID string, logger btclog.Logger) {
case "PEER":
peerLog = logger
case "DISC":
discLog = logger
discovery.UseLogger(logger)
case "RPCS":
rpcsLog = logger

View File

@ -1,31 +1,91 @@
package routing
import "errors"
import "github.com/go-errors/errors"
var (
// errorCode represent the error code and is used for compile time check.
type errorCode uint8
const (
// ErrNoPathFound is returned when a path to the target destination
// does not exist in the graph.
ErrNoPathFound = errors.New("unable to find a path to " +
"destination")
ErrNoPathFound errorCode = iota
// ErrNoRouteFound is returned when the router is unable to find a
// valid route to the target destination after fees and time-lock
// limitations are factored in.
ErrNoRouteFound = errors.New("unable to find a eligible route to " +
"the destination")
ErrNoRouteFound
// ErrInsufficientCapacity is returned when a path if found, yet the
// capacity of one of the channels in the path is insufficient to carry
// the payment.
ErrInsufficientCapacity = errors.New("channel graph has " +
"insufficient capacity for the payment")
ErrInsufficientCapacity
// ErrMaxHopsExceeded is returned when a candidate path is found, but
// the length of that path exceeds HopLimit.
ErrMaxHopsExceeded = errors.New("potential path has too many hops")
ErrMaxHopsExceeded
// ErrTargetNotInNetwork is returned when the target of a path-finding
// or payment attempt isn't known to be within the current version of
// the channel graph.
ErrTargetNotInNetwork = errors.New("target not found")
ErrTargetNotInNetwork
// ErrOutdated is returned when the routing update already have
// been applied.
ErrOutdated
// ErrIgnored is returned when the update have been ignored because
// this update can't bring us something new.
ErrIgnored
)
// routerError is a structure that represent the error inside the routing package,
// this structure carries additional information about error code in order to
// be able distinguish errors outside of the current package.
type routerError struct {
err *errors.Error
code errorCode
}
// Error represents errors as the string
// NOTE: Part of the error interface.
func (e *routerError) Error() string {
return e.err.Error()
}
// A compile time check to ensure routerError implements the error interface.
var _ error = (*routerError)(nil)
// newErr creates an routerError by the given error description and its
// corresponding error code.
func newErr(code errorCode, a interface{}) *routerError {
return &routerError{
code: code,
err: errors.New(a),
}
}
// newErrf creates an routerError by the given error formatted description and
// its corresponding error code.
func newErrf(code errorCode, format string, a ...interface{}) *routerError {
return &routerError{
code: code,
err: errors.Errorf(format, a...),
}
}
// IsError is a helper function which is needed to have ability to check that
// returned error has specific error code.
func IsError(e interface{}, codes ...errorCode) bool {
err, ok := e.(*routerError)
if !ok {
return false
}
for _, code := range codes {
if err.code == code {
return true
}
}
return false
}

View File

@ -1,14 +1,13 @@
package routing
import (
"errors"
"fmt"
"net"
"sync/atomic"
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/roasbeef/btcd/btcec"
"github.com/roasbeef/btcd/wire"
"github.com/roasbeef/btcutil"
@ -277,17 +276,17 @@ type ChannelEdgeUpdate struct {
// information required to create the topology change update from the graph
// database.
func addToTopologyChange(graph *channeldb.ChannelGraph, update *TopologyChange,
msg lnwire.Message) error {
msg interface{}) error {
switch m := msg.(type) {
// Any node announcement maps directly to a NetworkNodeUpdate struct.
// No further data munging or db queries are required.
case *lnwire.NodeAnnouncement:
case *channeldb.LightningNode:
nodeUpdate := &NetworkNodeUpdate{
Addresses: m.Addresses,
IdentityKey: m.NodeID,
Alias: m.Alias.String(),
IdentityKey: m.PubKey,
Alias: m.Alias,
}
nodeUpdate.IdentityKey.Curve = nil
@ -296,19 +295,19 @@ func addToTopologyChange(graph *channeldb.ChannelGraph, update *TopologyChange,
// We ignore initial channel announcements as we'll only send out
// updates once the individual edges themselves have been updated.
case *lnwire.ChannelAnnouncement:
case *channeldb.ChannelEdgeInfo:
return nil
// Any new ChannelUpdateAnnouncements will generate a corresponding
// ChannelEdgeUpdate notification.
case *lnwire.ChannelUpdateAnnouncement:
case *channeldb.ChannelEdgePolicy:
// We'll need to fetch the edge's information from the database
// in order to get the information concerning which nodes are
// being connected.
chanID := m.ChannelID.ToUint64()
edgeInfo, _, _, err := graph.FetchChannelEdgesByID(chanID)
edgeInfo, _, _, err := graph.FetchChannelEdgesByID(m.ChannelID)
if err != nil {
return err
return errors.Errorf("unable fetch channel edge: %v",
err)
}
// If the flag is one, then the advertising node is actually
@ -321,13 +320,13 @@ func addToTopologyChange(graph *channeldb.ChannelGraph, update *TopologyChange,
}
edgeUpdate := &ChannelEdgeUpdate{
ChanID: chanID,
ChanID: m.ChannelID,
ChanPoint: edgeInfo.ChannelPoint,
TimeLockDelta: m.TimeLockDelta,
Capacity: edgeInfo.Capacity,
MinHTLC: btcutil.Amount(m.HtlcMinimumMsat),
BaseFee: btcutil.Amount(m.FeeBaseMsat),
FeeRate: btcutil.Amount(m.FeeProportionalMillionths),
MinHTLC: m.MinHTLC,
BaseFee: m.FeeBaseMSat,
FeeRate: m.FeeProportionalMillionths,
AdvertisingNode: sourceNode,
ConnectingNode: connectingNode,
}

View File

@ -53,7 +53,7 @@ func createGraphNode() (*channeldb.LightningNode, error) {
}, nil
}
func createTestWireNode() (*lnwire.NodeAnnouncement, error) {
func createTestNode() (*channeldb.LightningNode, error) {
priv, err := btcec.NewPrivateKey(btcec.S256())
if err != nil {
return nil, err
@ -66,25 +66,26 @@ func createTestWireNode() (*lnwire.NodeAnnouncement, error) {
return nil, err
}
return &lnwire.NodeAnnouncement{
Timestamp: uint32(prand.Int31()),
Addresses: testAddrs,
NodeID: priv.PubKey(),
Alias: alias,
return &channeldb.LightningNode{
LastUpdate: time.Now(),
Address: testAddr,
PubKey: priv.PubKey(),
Alias: alias.String(),
Features: testFeatures,
}, nil
}
func randEdgePolicyAnn(chanID lnwire.ChannelID) *lnwire.ChannelUpdateAnnouncement {
func randEdgePolicy(chanID lnwire.ChannelID,
node *channeldb.LightningNode) *channeldb.ChannelEdgePolicy {
return &lnwire.ChannelUpdateAnnouncement{
Signature: testSig,
ChannelID: chanID,
Timestamp: uint32(prand.Int31()),
return &channeldb.ChannelEdgePolicy{
ChannelID: chanID.ToUint64(),
LastUpdate: time.Unix(int64(prand.Int31()), 0),
TimeLockDelta: uint16(prand.Int63()),
HtlcMinimumMsat: uint32(prand.Int31()),
FeeBaseMsat: uint32(prand.Int31()),
FeeProportionalMillionths: uint32(prand.Int31()),
MinHTLC: btcutil.Amount(prand.Int31()),
FeeBaseMSat: btcutil.Amount(prand.Int31()),
FeeProportionalMillionths: btcutil.Amount(prand.Int31()),
Node: node,
}
}
@ -268,7 +269,7 @@ func (m *mockNotifier) Stop() error {
return nil
}
// TestEdgeUpdateNotification tests that when edges are updated or discovered,
// TestEdgeUpdateNotification tests that when edges are updated or added,
// a proper notification is sent of to all registered clients.
func TestEdgeUpdateNotification(t *testing.T) {
const startingBlockHeight = 101
@ -292,34 +293,43 @@ func TestEdgeUpdateNotification(t *testing.T) {
// Next we'll create two test nodes that the fake channel will be open
// between and add then as members of the channel graph.
node1, err := createTestWireNode()
node1, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
node2, err := createTestWireNode()
node2, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
// Send the two node announcements to the channel router so they can be
// validated and stored within the graph database.
ctx.router.ProcessRoutingMessage(node1, node1.NodeID)
ctx.router.ProcessRoutingMessage(node2, node2.NodeID)
// Send the two node topology updates to the channel router so they
// can be validated and stored within the graph database.
if err := ctx.router.AddNode(node1); err != nil {
t.Fatal(err)
}
if err := ctx.router.AddNode(node2); err != nil {
t.Fatal(err)
}
// Finally, to conclude our test set up, we'll create a channel
// announcement to announce the created channel between the two nodes.
channelAnn := &lnwire.ChannelAnnouncement{
FirstNodeSig: testSig,
SecondNodeSig: testSig,
ChannelID: chanID,
FirstBitcoinSig: testSig,
SecondBitcoinSig: testSig,
FirstNodeID: node1.NodeID,
SecondNodeID: node2.NodeID,
FirstBitcoinKey: node1.NodeID,
SecondBitcoinKey: node2.NodeID,
// update to announce the created channel between the two nodes.
edge := &channeldb.ChannelEdgeInfo{
ChannelID: chanID.ToUint64(),
NodeKey1: node1.PubKey,
NodeKey2: node2.PubKey,
BitcoinKey1: node1.PubKey,
BitcoinKey2: node2.PubKey,
AuthProof: &channeldb.ChannelAuthProof{
NodeSig1: testSig,
NodeSig2: testSig,
BitcoinSig1: testSig,
BitcoinSig2: testSig,
},
}
if err := ctx.router.AddEdge(edge); err != nil {
t.Fatal(err)
}
ctx.router.ProcessRoutingMessage(channelAnn, node1.NodeID)
// With the channel edge now in place, we'll subscribe for topology
// notifications.
@ -330,17 +340,21 @@ func TestEdgeUpdateNotification(t *testing.T) {
// Create random policy edges that are stemmed to the channel id
// created above.
edge1 := randEdgePolicyAnn(chanID)
edge1 := randEdgePolicy(chanID, node1)
edge1.Flags = 0
edge2 := randEdgePolicyAnn(chanID)
edge2 := randEdgePolicy(chanID, node2)
edge2.Flags = 1
ctx.router.ProcessRoutingMessage(edge1, node1.NodeID)
ctx.router.ProcessRoutingMessage(edge2, node2.NodeID)
if err := ctx.router.UpdateEdge(edge1); err != nil {
t.Fatalf("unable to add edge update: %v", err)
}
if err := ctx.router.UpdateEdge(edge2); err != nil {
t.Fatalf("unable to add edge update: %v", err)
}
assertEdgeAnnCorrect := func(t *testing.T, edgeUpdate *ChannelEdgeUpdate,
edgeAnn *lnwire.ChannelUpdateAnnouncement) {
if edgeUpdate.ChanID != edgeAnn.ChannelID.ToUint64() {
assertEdgeCorrect := func(t *testing.T, edgeUpdate *ChannelEdgeUpdate,
edgeAnn *channeldb.ChannelEdgePolicy) {
if edgeUpdate.ChanID != edgeAnn.ChannelID {
t.Fatalf("channel ID of edge doesn't match: "+
"expected %v, got %v", chanID.ToUint64(), edgeUpdate.ChanID)
}
@ -354,14 +368,14 @@ func TestEdgeUpdateNotification(t *testing.T) {
t.Fatalf("capacity of edge doesn't match: "+
"expected %v, got %v", chanValue, edgeUpdate.Capacity)
}
if edgeUpdate.MinHTLC != btcutil.Amount(edgeAnn.HtlcMinimumMsat) {
if edgeUpdate.MinHTLC != btcutil.Amount(edgeAnn.MinHTLC) {
t.Fatalf("min HTLC of edge doesn't match: "+
"expected %v, got %v", btcutil.Amount(edgeAnn.HtlcMinimumMsat),
"expected %v, got %v", btcutil.Amount(edgeAnn.MinHTLC),
edgeUpdate.MinHTLC)
}
if edgeUpdate.BaseFee != btcutil.Amount(edgeAnn.FeeBaseMsat) {
if edgeUpdate.BaseFee != btcutil.Amount(edgeAnn.FeeBaseMSat) {
t.Fatalf("base fee of edge doesn't match: "+
"expected %v, got %v", edgeAnn.FeeBaseMsat,
"expected %v, got %v", edgeAnn.FeeBaseMSat,
edgeUpdate.BaseFee)
}
if edgeUpdate.FeeRate != btcutil.Amount(edgeAnn.FeeProportionalMillionths) {
@ -382,26 +396,26 @@ func TestEdgeUpdateNotification(t *testing.T) {
case ntfn := <-ntfnClient.TopologyChanges:
edgeUpdate := ntfn.ChannelEdgeUpdates[0]
if i == 0 {
assertEdgeAnnCorrect(t, edgeUpdate, edge1)
if !edgeUpdate.AdvertisingNode.IsEqual(node1.NodeID) {
t.Fatalf("advertising node mismatch")
assertEdgeCorrect(t, edgeUpdate, edge1)
if !edgeUpdate.AdvertisingNode.IsEqual(node1.PubKey) {
t.Fatal("advertising node mismatch")
}
if !edgeUpdate.ConnectingNode.IsEqual(node2.NodeID) {
t.Fatalf("connecting node mismatch")
if !edgeUpdate.ConnectingNode.IsEqual(node2.PubKey) {
t.Fatal("connecting node mismatch")
}
continue
}
assertEdgeAnnCorrect(t, edgeUpdate, edge2)
if !edgeUpdate.ConnectingNode.IsEqual(node1.NodeID) {
t.Fatalf("connecting node mismatch")
assertEdgeCorrect(t, edgeUpdate, edge2)
if !edgeUpdate.ConnectingNode.IsEqual(node1.PubKey) {
t.Fatal("connecting node mismatch")
}
if !edgeUpdate.AdvertisingNode.IsEqual(node2.NodeID) {
t.Fatalf("advertising node mismatch")
if !edgeUpdate.AdvertisingNode.IsEqual(node2.PubKey) {
t.Fatal("advertising node mismatch")
}
case <-time.After(time.Second * 5):
t.Fatalf("update not received")
t.Fatal("update not received")
}
}
}
@ -424,20 +438,24 @@ func TestNodeUpdateNotification(t *testing.T) {
// Create two random nodes to add to send as node announcement messages
// to trigger notifications.
node1Ann, err := createTestWireNode()
node1, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
node2Ann, err := createTestWireNode()
node2, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
// Send both announcement message to the channel router.
ctx.router.ProcessRoutingMessage(node1Ann, node1Ann.NodeID)
ctx.router.ProcessRoutingMessage(node2Ann, node2Ann.NodeID)
// Change network topology by adding nodes to the channel router.
if err := ctx.router.AddNode(node1); err != nil {
t.Fatalf("unable to add node: %v", err)
}
if err := ctx.router.AddNode(node2); err != nil {
t.Fatalf("unable to add node: %v", err)
}
assertNodeNtfnCorrect := func(t *testing.T, ann *lnwire.NodeAnnouncement,
assertNodeNtfnCorrect := func(t *testing.T, ann *channeldb.LightningNode,
ntfns []*NetworkNodeUpdate) {
// For each processed announcement we should only receive a
@ -454,14 +472,14 @@ func TestNodeUpdateNotification(t *testing.T) {
t.Fatalf("node address doesn't match: expected %v, got %v",
nodeNtfn.Addresses[0], ann.Addresses[0])
}
if !nodeNtfn.IdentityKey.IsEqual(ann.NodeID) {
if !nodeNtfn.IdentityKey.IsEqual(ann.PubKey) {
t.Fatalf("node identity keys don't match: expected %x, "+
"got %x", ann.NodeID.SerializeCompressed(),
"got %x", ann.PubKey.SerializeCompressed(),
nodeNtfn.IdentityKey.SerializeCompressed())
}
if nodeNtfn.Alias != ann.Alias.String() {
if nodeNtfn.Alias != ann.Alias {
t.Fatalf("node alias doesn't match: expected %v, got %v",
ann.Alias.String(), nodeNtfn.Alias)
ann.Alias, nodeNtfn.Alias)
}
}
@ -472,11 +490,11 @@ func TestNodeUpdateNotification(t *testing.T) {
select {
case ntfn := <-ntfnClient.TopologyChanges:
if i == 0 {
assertNodeNtfnCorrect(t, node1Ann, ntfn.NodeUpdates)
assertNodeNtfnCorrect(t, node1, ntfn.NodeUpdates)
continue
}
assertNodeNtfnCorrect(t, node2Ann, ntfn.NodeUpdates)
assertNodeNtfnCorrect(t, node2, ntfn.NodeUpdates)
case <-time.After(time.Second * 5):
}
}
@ -484,11 +502,13 @@ func TestNodeUpdateNotification(t *testing.T) {
// If we receive a new update from a node (with a higher timestamp),
// then it should trigger a new notification.
// TODO(roasbeef): assume monotonic time.
nodeUpdateAnn := *node1Ann
nodeUpdateAnn.Timestamp = node1Ann.Timestamp + 300
nodeUpdateAnn := *node1
nodeUpdateAnn.LastUpdate = node1.LastUpdate.Add(300 * time.Millisecond)
// Send off the new node announcement to the channel router.
ctx.router.ProcessRoutingMessage(&nodeUpdateAnn, node1Ann.NodeID)
// Add new node topology update to the channel router.
if err := ctx.router.AddNode(&nodeUpdateAnn); err != nil {
t.Fatalf("unable to add node: %v", err)
}
// Once again a notification should be received reflecting the up to
// date node announcement.
@ -515,9 +535,9 @@ func TestNotificationCancellation(t *testing.T) {
t.Fatalf("unable to subscribe for channel notifications: %v", err)
}
// We'll create a fresh new node announcement to feed to the channel
// We'll create a fresh new node topology update to feed to the channel
// router.
node1Ann, err := createTestWireNode()
node, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
@ -528,7 +548,9 @@ func TestNotificationCancellation(t *testing.T) {
// client.
ntfnClient.Cancel()
ctx.router.ProcessRoutingMessage(node1Ann, node1Ann.NodeID)
if err := ctx.router.AddNode(node); err != nil {
t.Fatalf("unable to add node: %v", err)
}
select {
// The notification shouldn't be sent, however, the channel should be
@ -538,10 +560,10 @@ func TestNotificationCancellation(t *testing.T) {
return
}
t.Fatalf("notification sent but shouldn't have been")
t.Fatal("notification sent but shouldn't have been")
case <-time.After(time.Second * 5):
t.Fatalf("notification client never cancelled")
t.Fatal("notification client never cancelled")
}
}
@ -569,29 +591,33 @@ func TestChannelCloseNotification(t *testing.T) {
// Next we'll create two test nodes that the fake channel will be open
// between and add then as members of the channel graph.
node1, err := createTestWireNode()
node1, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
node2, err := createTestWireNode()
node2, err := createTestNode()
if err != nil {
t.Fatalf("unable to create test node: %v", err)
}
// Finally, to conclude our test set up, we'll create a channel
// announcement to announce the created channel between the two nodes.
channelAnn := lnwire.ChannelAnnouncement{
FirstNodeSig: testSig,
SecondNodeSig: testSig,
ChannelID: chanID,
FirstBitcoinSig: testSig,
SecondBitcoinSig: testSig,
FirstNodeID: node1.NodeID,
SecondNodeID: node2.NodeID,
FirstBitcoinKey: node1.NodeID,
SecondBitcoinKey: node2.NodeID,
edge := &channeldb.ChannelEdgeInfo{
ChannelID: chanID.ToUint64(),
NodeKey1: node1.PubKey,
NodeKey2: node2.PubKey,
BitcoinKey1: node1.PubKey,
BitcoinKey2: node2.PubKey,
AuthProof: &channeldb.ChannelAuthProof{
NodeSig1: testSig,
NodeSig2: testSig,
BitcoinSig1: testSig,
BitcoinSig2: testSig,
},
}
if err := ctx.router.AddEdge(edge); err != nil {
t.Fatalf("unable to add edge: %v", err)
}
ctx.router.ProcessRoutingMessage(&channelAnn, node1.NodeID)
// With the channel edge now in place, we'll subscribe for topology
// notifications.
@ -626,7 +652,7 @@ func TestChannelCloseNotification(t *testing.T) {
// "closed" above.
closedChans := ntfn.ClosedChannels
if len(closedChans) == 0 {
t.Fatalf("close channel ntfn not populated")
t.Fatal("close channel ntfn not populated")
} else if len(closedChans) != 1 {
t.Fatalf("only one should've been detected as closed, "+
"instead %v were", len(closedChans))
@ -656,6 +682,6 @@ func TestChannelCloseNotification(t *testing.T) {
}
case <-time.After(time.Second * 5):
t.Fatalf("notification not sent")
t.Fatal("notification not sent")
}
}

View File

@ -187,7 +187,8 @@ func newRoute(amtToSend btcutil.Amount, pathEdges []*ChannelHop) (*Route, error)
// enough capacity to forward the required amount which
// includes the fee dictated at each hop.
if nextHop.AmtToForward > nextHop.Channel.Capacity {
return nil, ErrInsufficientCapacity
return nil, newErrf(ErrInsufficientCapacity, "channel graph has "+
"insufficient capacity for the payment")
}
// We don't pay any fees to ourselves on the first-hop channel,
@ -373,7 +374,8 @@ func findPath(graph *channeldb.ChannelGraph, sourceNode *channeldb.LightningNode
// If the target node isn't found in the prev hop map, then a path
// doesn't exist, so we terminate in an error.
if _, ok := prev[newVertex(target)]; !ok {
return nil, ErrNoPathFound
return nil, newErrf(ErrNoPathFound, "unable to find a path to "+
"destination")
}
// If the potential route if below the max hop limit, then we'll use
@ -398,7 +400,8 @@ func findPath(graph *channeldb.ChannelGraph, sourceNode *channeldb.LightningNode
// hops, then it's invalid.
numEdges := len(pathEdges)
if numEdges > HopLimit {
return nil, ErrMaxHopsExceeded
return nil, newErr(ErrMaxHopsExceeded, "potential path has "+
"too many hops")
}
// As our traversal of the prev map above walked backwards from the
@ -515,7 +518,7 @@ func findPaths(graph *channeldb.ChannelGraph, source *channeldb.LightningNode,
// If we weren't able to find a path, we'll continue to
// the next round.
if err == ErrNoPathFound {
if IsError(err, ErrNoPathFound) {
continue
} else if err != nil {
return nil, err

View File

@ -508,7 +508,7 @@ func TestPathNotAvailable(t *testing.T) {
_, err = findPath(graph, sourceNode, unknownNode, ignoredVertexes,
ignoredEdges, 100)
if err != ErrNoPathFound {
if !IsError(err, ErrNoPathFound) {
t.Fatalf("path shouldn't have been found: %v", err)
}
}
@ -540,7 +540,7 @@ func TestPathInsufficientCapacity(t *testing.T) {
const payAmt = btcutil.SatoshiPerBitcoin
_, err = findPath(graph, sourceNode, target, ignoredVertexes,
ignoredEdges, payAmt)
if err != ErrNoPathFound {
if !IsError(err, ErrNoPathFound) {
t.Fatalf("graph shouldn't be able to support payment: %v", err)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -77,12 +77,6 @@ func createTestCtx(startingHeight uint32, testGraph ...string) (*testCtx, func()
Graph: graph,
Chain: chain,
Notifier: notifier,
Broadcast: func(_ *btcec.PublicKey, msg ...lnwire.Message) error {
return nil
},
SendMessages: func(_ *btcec.PublicKey, msg ...lnwire.Message) error {
return nil
},
SendToSwitch: func(_ *btcec.PublicKey,
_ *lnwire.UpdateAddHTLC) ([32]byte, error) {
return [32]byte{}, nil