funding+server: ensure we cancel all reservations when a peer disconnects

In this commit, we fix an existing issue that could at times cause an
inconsistent view between the set of total coins, and the set of segwit
coins in the wallet of the node. This could be caused by initiating a
funding flow, but then the funding negotiation breaking down somewhere
along the lines. In this case, us or the other peer will disconnect.
When we initiate funding flows, we lock coins exclusively, to ensure
that concurrent funding flows don’t end up double spending the same
coin. Before this commit, we wouldn’t ever unlock those coins. As a
result, our view of available coins would be skewed.

The walletbalance call would show all the coins, but when adding the
—witness_only flag, some coins would be missing, or gone all together.
This is because the former call actually scans the txstore and manually
tallies the amount of available coins, while the latter looks at the
sent of available outputs, which is filtered based on which coins are
locked.

To remedy this, we now ensure that when a peer disconnects, we wipe all
existing reservations which will return any locked outputs to the set
of available outputs for funding flows.
This commit is contained in:
Olaoluwa Osuntokun 2017-11-26 13:25:26 -06:00
parent 7016f5ba1e
commit c986e52da7
No known key found for this signature in database
GPG Key ID: 964EA263DD637C21
2 changed files with 43 additions and 2 deletions

View File

@ -575,6 +575,41 @@ func (f *fundingManager) PendingChannels() ([]*pendingChannel, error) {
return <-respChan, <-errChan
}
// CancelPeerReservations cancels all active reservations associated with the
// passed node. This will ensure any outputs which have been pre committed,
// (and thus locked from coin selection), are properly freed.
func (f *fundingManager) CancelPeerReservations(nodePub [33]byte) {
fndgLog.Debugf("Cancelling all reservations for peer %x", nodePub[:])
f.resMtx.Lock()
defer f.resMtx.Unlock()
// We'll attempt to look up this node in the set of active
// reservations. If they don't have any, then there's no further work
// to be done.
nodeReservations, ok := f.activeReservations[nodePub]
if !ok {
fndgLog.Debugf("No active reservations for node: %x", nodePub[:])
return
}
// If they do have any active reservations, then we'll cancel all of
// them (which releases any locked UTXO's), and also delete it from the
// reservation map.
for pendingID, resCtx := range nodeReservations {
if err := resCtx.reservation.Cancel(); err != nil {
fndgLog.Errorf("unable to cancel reservation for "+
"node=%x: %v", nodePub[:], err)
}
delete(nodeReservations, pendingID)
}
// Finally, we'll delete the node itself from the set of reservations.
delete(f.activeReservations, nodePub)
}
// failFundingFlow will fail the active funding flow with the target peer,
// identified by it's unique temporary channel ID. This method is send an error
// to the remote peer, and also remove the reservation from our set of pending
@ -1089,8 +1124,8 @@ func (f *fundingManager) handleFundingCreated(fmsg *fundingCreatedMsg) {
f.newChanBarriers[channelID] = make(chan struct{})
f.barrierMtx.Unlock()
fndgLog.Infof("sending signComplete for pendingID(%x) over ChannelPoint(%v)",
pendingChanID[:], fundingOut)
fndgLog.Infof("sending FundingSigned for pendingID(%x) over "+
"ChannelPoint(%v)", pendingChanID[:], fundingOut)
// With their signature for our version of the commitment transaction
// verified, we can now send over our signature to the remote peer.

View File

@ -1036,6 +1036,12 @@ func (s *server) peerTerminationWatcher(p *peer) {
return
}
// Next, we'll cancel all pending funding reservations with this node.
// If we tried to initiate any funding flows that haven't yet finished,
// then we need to unlock those committed outputs so they're still
// available for use.
s.fundingMgr.CancelPeerReservations(p.PubKey())
// Tell the switch to remove all links associated with this peer.
// Passing nil as the target link indicates that all links associated
// with this interface should be closed.