htlcswitch: remove redudant variables

Because processing of onion blob have been moved in another place we
could get rid of the variables which are not needed any more.

NOTE: pendingBatch have been replaced with batchCounter variable, but
it should be removed at all, because number of pending batch updates
might be counted by the state machine itself.
This commit is contained in:
Andrey Samokhvalov 2017-05-02 00:06:10 +03:00 committed by Olaoluwa Osuntokun
parent 0e2209cb12
commit a14f25830e
1 changed files with 34 additions and 112 deletions

View File

@ -9,8 +9,6 @@ import (
"io" "io"
"github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
"github.com/roasbeef/btcd/btcec" "github.com/roasbeef/btcd/btcec"
@ -56,54 +54,29 @@ type ChannelLinkConfig struct {
// switch. Additionally, the link encapsulate logic of commitment protocol // switch. Additionally, the link encapsulate logic of commitment protocol
// message ordering and updates. // message ordering and updates.
type channelLink struct { type channelLink struct {
// htlcsToSettle is a list of preimages which allow us to settle one or
// many of the pending HTLCs we've received from the upstream peer.
htlcsToSettle map[uint64]*channeldb.Invoice
// htlcsToCancel is a set of HTLCs identified by their log index which
// are to be cancelled upon the next state transition.
htlcsToCancel map[uint64]lnwire.FailCode
// cancelReasons stores the reason why a particular HTLC was cancelled. // cancelReasons stores the reason why a particular HTLC was cancelled.
// The index of the HTLC within the log is mapped to the cancellation // The index of the HTLC within the log is mapped to the cancellation
// reason. This value is used to thread the proper error through to the // reason. This value is used to thread the proper error through to the
// htlcSwitch, or subsystem that initiated the HTLC. // htlcSwitch, or subsystem that initiated the HTLC.
// TODO(andrew.shvv) remove after payment descriptor start store
// htlc cancel reasons.
cancelReasons map[uint64]lnwire.OpaqueReason cancelReasons map[uint64]lnwire.OpaqueReason
// blobs tracks the remote log index of the incoming htlc's, // blobs tracks the remote log index of the incoming htlc's,
// mapped to the htlc onion blob which encapsulates the next hop. // mapped to the htlc onion blob which encapsulates the next hop.
// TODO(andrew.shvv) state machine might be used instead to determine // TODO(andrew.shvv) remove after payment descriptor start store
// the pending number of updates. // htlc onion blobs.
blobs map[uint64][lnwire.OnionPacketSize]byte blobs map[uint64][lnwire.OnionPacketSize]byte
// pendingBatch is slice of payments which have been added to the // batchCounter is the number of updates which we received from
// channel update log, but not yet committed to latest commitment. // remote side, but not include in commitment transaciton yet.
pendingBatch []*pendingPayment // TODO(andrew.shvv) remove after we add additional
// BatchNumber() method in state machine.
batchCounter uint64
// clearedHTCLs is a map of outgoing HTLCs we've committed to in our // channel is a lightning network channel to which we apply htlc
// chain which have not yet been settled by the upstream peer. // updates.
clearedHTCLs map[uint64]*pendingPayment channel *lnwallet.LightningChannel
// switchChan is a channel used to send packets to the htlc switch for
// forwarding.
switchChan chan<- *htlcPacket
// sphinx is an instance of the Sphinx onion Router for this node. The
// router will be used to process all incoming Sphinx packets embedded
// within HTLC add messages.
sphinx *sphinx.Router
// pendingCircuits tracks the remote log index of the incoming HTLCs,
// mapped to the processed Sphinx packet contained within the HTLC.
// This map is used as a staging area between when an HTLC is added to
// the log, and when it's locked into the commitment state of both
// chains. Once locked in, the processed packet is sent to the switch
// along with the HTLC to forward the packet to the next hop.
pendingCircuits map[uint64]*sphinx.ProcessedPacket
channel *lnwallet.LightningChannel
chanPoint *wire.OutPoint
chanID lnwire.ChannelID
// cfg is a structure which carries all dependable fields/handlers // cfg is a structure which carries all dependable fields/handlers
// which may affect behaviour of the service. // which may affect behaviour of the service.
@ -130,6 +103,22 @@ type channelLink struct {
quit chan struct{} quit chan struct{}
} }
// NewChannelLink create new instance of channel link.
func NewChannelLink(cfg *ChannelLinkConfig,
channel *lnwallet.LightningChannel) ChannelLink {
return &channelLink{
cfg: cfg,
channel: channel,
blobs: make(map[uint64][lnwire.OnionPacketSize]byte),
upstream: make(chan lnwire.Message),
downstream: make(chan *htlcPacket),
control: make(chan interface{}),
cancelReasons: make(map[uint64]lnwire.OpaqueReason),
quit: make(chan struct{}),
}
}
// A compile time check to ensure channelLink implements the ChannelLink // A compile time check to ensure channelLink implements the ChannelLink
// interface. // interface.
var _ ChannelLink = (*channelLink)(nil) var _ ChannelLink = (*channelLink)(nil)
@ -229,8 +218,7 @@ out:
// update in some time, check to see if we have any // update in some time, check to see if we have any
// pending updates we need to commit due to our // pending updates we need to commit due to our
// commitment chains being desynchronized. // commitment chains being desynchronized.
if l.channel.FullySynced() && if l.channel.FullySynced() {
len(l.htlcsToSettle) == 0 {
continue continue
} }
@ -244,7 +232,7 @@ out:
case <-batchTimer.C: case <-batchTimer.C:
// If the current batch is empty, then we have no work // If the current batch is empty, then we have no work
// here. // here.
if len(l.pendingBatch) == 0 { if l.batchCounter == 0 {
continue continue
} }
@ -313,14 +301,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket) {
htlc.ID = index htlc.ID = index
l.cfg.Peer.SendMessage(htlc) l.cfg.Peer.SendMessage(htlc)
l.batchCounter++
l.pendingBatch = append(l.pendingBatch, &pendingPayment{
htlc: htlc,
index: index,
preImage: pkt.preImage,
err: pkt.err,
done: pkt.done,
})
case *lnwire.UpdateFufillHTLC: case *lnwire.UpdateFufillHTLC:
// An HTLC we forward to the switch has just settled somewhere // An HTLC we forward to the switch has just settled somewhere
@ -372,7 +353,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket) {
// If this newly added update exceeds the min batch size for adds, or // If this newly added update exceeds the min batch size for adds, or
// this is a settle request, then initiate an update. // this is a settle request, then initiate an update.
// TODO(roasbeef): enforce max HTLCs in flight limit // TODO(roasbeef): enforce max HTLCs in flight limit
if len(l.pendingBatch) >= 10 || isSettle { if l.batchCounter >= 10 || isSettle {
if err := l.updateCommitTx(); err != nil { if err := l.updateCommitTx(); err != nil {
log.Errorf("unable to update "+ log.Errorf("unable to update "+
"commitment: %v", err) "commitment: %v", err)
@ -506,7 +487,7 @@ func (l *channelLink) updateCommitTx() error {
sigTheirs, err := l.channel.SignNextCommitment() sigTheirs, err := l.channel.SignNextCommitment()
if err == lnwallet.ErrNoWindow { if err == lnwallet.ErrNoWindow {
log.Tracef("revocation window exhausted, unable to send %v", log.Tracef("revocation window exhausted, unable to send %v",
len(l.pendingBatch)) l.batchCounter)
return nil return nil
} else if err != nil { } else if err != nil {
return err return err
@ -523,69 +504,10 @@ func (l *channelLink) updateCommitTx() error {
} }
l.cfg.Peer.SendMessage(commitSig) l.cfg.Peer.SendMessage(commitSig)
// As we've just cleared out a batch, move all pending updates to the l.batchCounter = 0
// map of cleared HTLCs, clearing out the set of pending updates.
for _, update := range l.pendingBatch {
l.clearedHTCLs[update.index] = update
}
// Finally, clear our the current batch, and flip the pendingUpdate
// bool to indicate were waiting for a commitment signature.
// TODO(roasbeef): re-slice instead to avoid GC?
l.pendingBatch = nil
return nil return nil
} }
// logEntryToHtlcPkt converts a particular Lightning Commitment Protocol (LCP)
// log entry the corresponding htlcPacket with src/dest set along with the
// proper wire message. This helper method is provided in order to aid an
// htlcManager in forwarding packets to the htlcSwitch.
func logEntryToHtlcPkt(chanID lnwire.ChannelID, pd *lnwallet.PaymentDescriptor,
onionPkt *sphinx.ProcessedPacket,
reason lnwire.FailCode) (*htlcPacket, error) {
pkt := &htlcPacket{}
// TODO(roasbeef): alter after switch to log entry interface
var msg lnwire.Message
switch pd.EntryType {
case lnwallet.Add:
// TODO(roasbeef): timeout, onion blob, etc
var b bytes.Buffer
if err := onionPkt.Packet.Encode(&b); err != nil {
return nil, err
}
htlc := &lnwire.UpdateAddHTLC{
Amount: pd.Amount,
PaymentHash: pd.RHash,
}
copy(htlc.OnionBlob[:], b.Bytes())
msg = htlc
case lnwallet.Settle:
msg = &lnwire.UpdateFufillHTLC{
PaymentPreimage: pd.RPreimage,
}
case lnwallet.Fail:
// For cancellation messages, we'll also need to set the rHash
// within the htlcPacket so the switch knows on which outbound
// link to forward the cancellation message
msg = &lnwire.UpdateFailHTLC{
Reason: []byte{byte(reason)},
}
pkt.payHash = pd.RHash
}
pkt.htlc = msg
pkt.src = chanID
return pkt, nil
}
// Peer returns the representation of remote peer with which we // Peer returns the representation of remote peer with which we
// have the channel link opened. // have the channel link opened.
// NOTE: Part of the ChannelLink interface. // NOTE: Part of the ChannelLink interface.