Merge pull request #384 from cfromknecht/utxn-incubate-2nd-layer-htlcs

Utxo Nursery, adds outgoing 2nd-layer HTLC persistence
This commit is contained in:
Olaoluwa Osuntokun 2017-11-16 19:00:40 -08:00 committed by GitHub
commit caec23a236
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 3446 additions and 1367 deletions

View File

@ -753,6 +753,13 @@ func testDisconnectingTargetPeer(net *networkHarness, t *harnessTest) {
// Check existing connection. // Check existing connection.
assertNumConnections(ctxb, t, net.Alice, net.Bob, 1) assertNumConnections(ctxb, t, net.Alice, net.Bob, 1)
// Mine enough blocks to clear the force closed outputs from the UTXO
// nursery.
if _, err := net.Miner.Node.Generate(4); err != nil {
t.Fatalf("unable to mine blocks: %v", err)
}
time.Sleep(300 * time.Millisecond)
} }
// testFundingPersistence is intended to ensure that the Funding Manager // testFundingPersistence is intended to ensure that the Funding Manager
@ -962,57 +969,248 @@ func testChannelBalance(net *networkHarness, t *harnessTest) {
closeChannelAndAssert(ctx, t, net, net.Alice, chanPoint, false) closeChannelAndAssert(ctx, t, net, net.Alice, chanPoint, false)
} }
// findForceClosedChannel searches a pending channel response for a particular
// channel, returning the force closed channel upon success.
func findForceClosedChannel(t *harnessTest,
pendingChanResp *lnrpc.PendingChannelResponse,
op *wire.OutPoint) *lnrpc.PendingChannelResponse_ForceClosedChannel {
var found bool
var forceClose *lnrpc.PendingChannelResponse_ForceClosedChannel
for _, forceClose = range pendingChanResp.PendingForceClosingChannels {
if forceClose.Channel.ChannelPoint == op.String() {
found = true
break
}
}
if !found {
t.Fatalf("channel not marked as force closed")
}
return forceClose
}
func assertCommitmentMaturity(t *harnessTest,
forceClose *lnrpc.PendingChannelResponse_ForceClosedChannel,
maturityHeight uint32, blocksTilMaturity int32) {
if forceClose.MaturityHeight != maturityHeight {
t.Fatalf("expected commitment maturity height to be %d, "+
"found %d instead", maturityHeight,
forceClose.MaturityHeight)
}
if forceClose.BlocksTilMaturity != blocksTilMaturity {
t.Fatalf("expected commitment blocks til maturity to be %d, "+
"found %d instead", blocksTilMaturity,
forceClose.BlocksTilMaturity)
}
}
// assertForceClosedChannelNumHtlcs verifies that a force closed channel has the
// proper number of htlcs.
func assertPendingChannelNumHtlcs(t *harnessTest,
forceClose *lnrpc.PendingChannelResponse_ForceClosedChannel,
expectedNumHtlcs int) {
if len(forceClose.PendingHtlcs) != expectedNumHtlcs {
t.Fatalf("expected force closed channel to have %d pending "+
"htlcs, found %d instead", expectedNumHtlcs,
len(forceClose.PendingHtlcs))
}
}
// assertNumForceClosedChannels checks that a pending channel response has the
// expected number of force closed channels.
func assertNumForceClosedChannels(t *harnessTest,
pendingChanResp *lnrpc.PendingChannelResponse, expectedNumChans int) {
if len(pendingChanResp.PendingForceClosingChannels) != expectedNumChans {
t.Fatalf("expected to find %d force closed channels, got %d",
expectedNumChans,
len(pendingChanResp.PendingForceClosingChannels))
}
}
// assertPendingHtlcStageAndMaturity uniformly tests all pending htlc's
// belonging to a force closed channel, testing for the expeced stage number,
// blocks till maturity, and the maturity height.
func assertPendingHtlcStageAndMaturity(t *harnessTest,
forceClose *lnrpc.PendingChannelResponse_ForceClosedChannel,
stage, maturityHeight uint32, blocksTillMaturity int32) {
for _, pendingHtlc := range forceClose.PendingHtlcs {
if pendingHtlc.Stage != stage {
t.Fatalf("expected pending htlc to be stage %d, "+
"found %d", stage, pendingHtlc.Stage)
}
if pendingHtlc.MaturityHeight != maturityHeight {
t.Fatalf("expected pending htlc maturity height to be "+
"%d, instead has %d", maturityHeight,
pendingHtlc.MaturityHeight)
}
if pendingHtlc.BlocksTilMaturity != blocksTillMaturity {
t.Fatalf("expected pending htlc blocks til maturity "+
"to be %d, instead has %d", blocksTillMaturity,
pendingHtlc.BlocksTilMaturity)
}
}
}
// testChannelForceClosure performs a test to exercise the behavior of "force" // testChannelForceClosure performs a test to exercise the behavior of "force"
// closing a channel or unilaterally broadcasting the latest local commitment // closing a channel or unilaterally broadcasting the latest local commitment
// state on-chain. The test creates a new channel between Alice and Bob, then // state on-chain. The test creates a new channel between Alice and Carol, then
// force closes the channel after some cursory assertions. Within the test, two // force closes the channel after some cursory assertions. Within the test, a
// transactions should be broadcast on-chain, the commitment transaction itself // total of 3 + n transactions will be broadcast, representing the commitment
// (which closes the channel), and the sweep transaction a few blocks later // transaction, a transaction sweeping the local CSV delayed output, a
// once the output(s) become mature. This test also includes several restarts // transaction sweeping the CSV delayed 2nd-layer htlcs outputs, and n
// to ensure that the transaction output states are persisted throughout // htlc success transactions, where n is the number of payments Alice attempted
// the forced closure process. // to send to Carol. This test includes several restarts to ensure that the
// transaction output states are persisted throughout the forced closure
// process.
// //
// TODO(roasbeef): also add an unsettled HTLC before force closing. // TODO(roasbeef): also add an unsettled HTLC before force closing.
func testChannelForceClosure(net *networkHarness, t *harnessTest) { func testChannelForceClosure(net *networkHarness, t *harnessTest) {
timeout := time.Duration(time.Second * 10)
ctxb := context.Background() ctxb := context.Background()
const (
timeout = time.Duration(time.Second * 10)
chanAmt = btcutil.Amount(10e6)
pushAmt = btcutil.Amount(5e6)
paymentAmt = 100000
numInvoices = 6
)
// Before we start, obtain Bob's current wallet balance, we'll check to // TODO(roasbeef): should check default value in config here
// ensure that at the end of the force closure by Alice, Bob recognizes // instead, or make delay a param
// his new on-chain output. defaultCSV := uint32(4)
bobBalReq := &lnrpc.WalletBalanceRequest{} defaultCLTV := defaultBitcoinForwardingPolicy.TimeLockDelta
bobBalResp, err := net.Bob.WalletBalance(ctxb, bobBalReq)
// Since we'd like to test failure scenarios with outstanding htlcs,
// we'll introduce another node into our test network: Carol.
carol, err := net.NewNode([]string{"--debughtlc", "--hodlhtlc"})
if err != nil { if err != nil {
t.Fatalf("unable to get bob's balance: %v", err) t.Fatalf("unable to create new nodes: %v", err)
} }
bobStartingBalance := btcutil.Amount(bobBalResp.Balance * 1e8)
// First establish a channel with a capacity of 100k satoshis between // We must let Alice have an open channel before she can send a node
// Alice and Bob. We also push 50k satoshis of the initial amount // announcement, so we open a channel with Carol,
// towards Bob. if err := net.ConnectNodes(ctxb, net.Alice, carol); err != nil {
numFundingConfs := uint32(1) t.Fatalf("unable to connect alice to carol: %v", err)
chanAmt := btcutil.Amount(10e4) }
pushAmt := btcutil.Amount(5e4)
chanOpenUpdate, err := net.OpenChannel(ctxb, net.Alice, net.Bob, // Before we start, obtain Carol's current wallet balance, we'll check
chanAmt, pushAmt) // to ensure that at the end of the force closure by Alice, Carol
// recognizes his new on-chain output.
carolBalReq := &lnrpc.WalletBalanceRequest{}
carolBalResp, err := carol.WalletBalance(ctxb, carolBalReq)
if err != nil { if err != nil {
t.Fatalf("unable to open channel: %v", err) t.Fatalf("unable to get carol's balance: %v", err)
} }
if _, err := net.Miner.Node.Generate(numFundingConfs); err != nil { carolStartingBalance := btcutil.Amount(carolBalResp.Balance * 1e8)
t.Fatalf("unable to mine block: %v", err)
}
ctxt, _ := context.WithTimeout(ctxb, timeout) ctxt, _ := context.WithTimeout(ctxb, timeout)
chanPoint, err := net.WaitForChannelOpen(ctxt, chanOpenUpdate) chanPoint := openChannelAndAssert(ctxt, t, net, net.Alice, carol,
chanAmt, pushAmt)
// Wait for Alice to receive the channel edge from the funding manager.
ctxt, _ = context.WithTimeout(ctxb, timeout)
err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
if err != nil { if err != nil {
t.Fatalf("error while waiting for channel to open: %v", err) t.Fatalf("alice didn't see the alice->carol channel before "+
"timeout: %v", err)
} }
// Now that the channel is open, immediately execute a force closure of // With the channel open, we'll create a few invoices for Carol that
// the channel. This will also assert that the commitment transaction // Alice will pay to in order to advance the state of the channel.
// was immediately broadcast in order to fulfill the force closure carolPaymentReqs := make([]string, numInvoices)
// request. for i := 0; i < numInvoices; i++ {
preimage := bytes.Repeat([]byte{byte(128 - i)}, 32)
invoice := &lnrpc.Invoice{
Memo: "testing",
RPreimage: preimage,
Value: paymentAmt,
}
resp, err := carol.AddInvoice(ctxb, invoice)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
carolPaymentReqs[i] = resp.PaymentRequest
}
// As we'll be querying the state of Carols's channels frequently we'll
// create a closure helper function for the purpose.
getAliceChanInfo := func() (*lnrpc.ActiveChannel, error) {
req := &lnrpc.ListChannelsRequest{}
aliceChannelInfo, err := net.Alice.ListChannels(ctxb, req)
if err != nil {
return nil, err
}
if len(aliceChannelInfo.Channels) != 1 {
t.Fatalf("alice should only have a single channel, "+
"instead he has %v",
len(aliceChannelInfo.Channels))
}
return aliceChannelInfo.Channels[0], nil
}
// Open up a payment stream to Alice that we'll use to send payment to
// Carol. We also create a small helper function to send payments to
// Carol, consuming the payment hashes we generated above.
alicePayStream, err := net.Alice.SendPayment(ctxb)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
sendPayments := func(start, stop int) error {
for i := start; i < stop; i++ {
sendReq := &lnrpc.SendRequest{
PaymentRequest: carolPaymentReqs[i],
}
if err := alicePayStream.Send(sendReq); err != nil {
return err
}
}
return nil
}
// Fetch starting height of this test so we can compute the block
// heights we expect certain events to take place.
_, curHeight, err := net.Miner.Node.GetBestBlock()
if err != nil {
t.Fatalf("unable to get best block height")
}
// Using the current height of the chain, derive the relevant heights
// for incubating two-stage htlcs.
var (
startHeight = uint32(curHeight)
commCsvMaturityHeight = startHeight + 1 + defaultCSV
htlcExpiryHeight = startHeight + defaultCLTV
htlcCsvMaturityHeight = startHeight + defaultCLTV + 1 + defaultCSV
)
// Send payments from Alice to Carol, since Carol is htlchodl mode,
// the htlc outputs should be left unsettled, and should be swept by the
// utxo nursery.
if err := sendPayments(0, numInvoices); err != nil {
t.Fatalf("unable to send payment: %v", err)
}
time.Sleep(200 * time.Millisecond)
aliceChan, err := getAliceChanInfo()
if err != nil {
t.Fatalf("unable to get alice's channel info: %v", err)
}
if aliceChan.NumUpdates == 0 {
t.Fatalf("alice should see at least one update to her channel")
}
// Now that the channel is open and we have unsettled htlcs, immediately
// execute a force closure of the channel. This will also assert that
// the commitment transaction was immediately broadcast in order to
// fulfill the force closure request.
_, closingTxID, err := net.CloseChannel(ctxb, net.Alice, chanPoint, true) _, closingTxID, err := net.CloseChannel(ctxb, net.Alice, chanPoint, true)
if err != nil { if err != nil {
t.Fatalf("unable to execute force channel closure: %v", err) t.Fatalf("unable to execute force channel closure: %v", err)
@ -1025,25 +1223,41 @@ func testChannelForceClosure(net *networkHarness, t *harnessTest) {
if err != nil { if err != nil {
t.Fatalf("unable to query for pending channels: %v", err) t.Fatalf("unable to query for pending channels: %v", err)
} }
var found bool assertNumForceClosedChannels(t, pendingChanResp, 1)
// Compute the outpoint of the channel, which we will use repeatedly to
// locate the pending channel information in the rpc responses.
txid, _ := chainhash.NewHash(chanPoint.FundingTxid[:]) txid, _ := chainhash.NewHash(chanPoint.FundingTxid[:])
op := wire.OutPoint{ op := wire.OutPoint{
Hash: *txid, Hash: *txid,
Index: chanPoint.OutputIndex, Index: chanPoint.OutputIndex,
} }
for _, forceClose := range pendingChanResp.PendingForceClosingChannels {
if forceClose.Channel.ChannelPoint == op.String() { forceClose := findForceClosedChannel(t, pendingChanResp, &op)
found = true
break // Immediately after force closing, all of the funds should be in limbo,
// and the pending channels response should not indicate that any funds
// have been recovered.
if forceClose.LimboBalance == 0 {
t.Fatalf("all funds should still be in limbo")
} }
} if forceClose.RecoveredBalance != 0 {
if !found { t.Fatalf("no funds should yet be shown as recovered")
t.Fatalf("channel not marked as force close for alice")
} }
// TODO(roasbeef): should check default value in config here instead, // The commitment transaction has not been confirmed, so we expect to
// or make delay a param // see a maturity height and blocks til maturity of 0.
const defaultCSV = 4 assertCommitmentMaturity(t, forceClose, 0, 0)
// Since all of our payments were sent with Carol in hodl mode, all of
// them should be unsettled and attached to the commitment transaction.
// They also should have been configured such that they are not filtered
// as dust. At this point, all pending htlcs should be in stage 1, with
// a timeout set to the default CLTV expiry (144) blocks above the
// starting height.
assertPendingChannelNumHtlcs(t, forceClose, numInvoices)
assertPendingHtlcStageAndMaturity(t, forceClose, 1, htlcExpiryHeight,
int32(defaultCLTV))
// The several restarts in this test are intended to ensure that when a // The several restarts in this test are intended to ensure that when a
// channel is force-closed, the UTXO nursery has persisted the state of // channel is force-closed, the UTXO nursery has persisted the state of
@ -1071,23 +1285,31 @@ func testChannelForceClosure(net *networkHarness, t *harnessTest) {
duration := time.Millisecond * 300 duration := time.Millisecond * 300
time.Sleep(duration) time.Sleep(duration)
// Now that the channel has been force closed, it should now have the pendingChanResp, err = net.Alice.PendingChannels(ctxb, pendingChansRequest)
// height and number of blocks to confirm populated.
pendingChan, err := net.Alice.PendingChannels(ctxb, pendingChansRequest)
if err != nil { if err != nil {
t.Fatalf("unable to query for pending channels: %v", err) t.Fatalf("unable to query for pending channels: %v", err)
} }
if len(pendingChan.PendingForceClosingChannels) == 0 { assertNumForceClosedChannels(t, pendingChanResp, 1)
t.Fatalf("channel not marked as force close for alice")
forceClose = findForceClosedChannel(t, pendingChanResp, &op)
// Now that the channel has been force closed, it should now have the
// height and number of blocks to confirm populated.
assertCommitmentMaturity(t, forceClose, commCsvMaturityHeight,
int32(defaultCSV))
// Check that our pending htlcs have deducted the block confirming the
// commitment transactionfrom their blocks til maturity value.
assertPendingChannelNumHtlcs(t, forceClose, numInvoices)
assertPendingHtlcStageAndMaturity(t, forceClose, 1, htlcExpiryHeight,
int32(defaultCLTV)-1)
// None of our outputs have been swept, so they should all be limbo.
if forceClose.LimboBalance == 0 {
t.Fatalf("all funds should still be in limbo")
} }
forceClosedChan := pendingChan.PendingForceClosingChannels[0] if forceClose.RecoveredBalance != 0 {
if forceClosedChan.MaturityHeight == 0 { t.Fatalf("no funds should yet be shown as recovered")
t.Fatalf("force close channel marked as not confirmed")
}
if forceClosedChan.BlocksTilMaturity != defaultCSV {
t.Fatalf("force closed channel has incorrect maturity time: "+
"expected %v, got %v", forceClosedChan.BlocksTilMaturity,
defaultCSV)
} }
// The following restart is intended to ensure that outputs from the // The following restart is intended to ensure that outputs from the
@ -1106,13 +1328,40 @@ func testChannelForceClosure(net *networkHarness, t *harnessTest) {
t.Fatalf("unable to mine blocks: %v", err) t.Fatalf("unable to mine blocks: %v", err)
} }
// The following restart checks to ensure that outputs in the kindergarten // The following restart checks to ensure that outputs in the
// bucket are persisted while waiting for the required number of // kindergarten bucket are persisted while waiting for the required
// confirmations to be reported. // number of confirmations to be reported.
if err := net.RestartNode(net.Alice, nil); err != nil { if err := net.RestartNode(net.Alice, nil); err != nil {
t.Fatalf("Node restart failed: %v", err) t.Fatalf("Node restart failed: %v", err)
} }
pendingChanResp, err = net.Alice.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
assertNumForceClosedChannels(t, pendingChanResp, 1)
forceClose = findForceClosedChannel(t, pendingChanResp, &op)
// At this point, the nursery should show that the commitment output has
// 1 block left before its CSV delay expires. In total, we have mined
// exactly defaultCSV blocks, so the htlc outputs should also reflect
// that this many blocks have passed.
assertCommitmentMaturity(t, forceClose, commCsvMaturityHeight, 1)
assertPendingChannelNumHtlcs(t, forceClose, numInvoices)
assertPendingHtlcStageAndMaturity(t, forceClose, 1, htlcExpiryHeight,
int32(defaultCLTV)-int32(defaultCSV))
// All funds should still be shown in limbo.
if forceClose.LimboBalance == 0 {
t.Fatalf("all funds should still be in limbo")
}
if forceClose.RecoveredBalance != 0 {
t.Fatalf("no funds should yet be shown as recovered")
}
// Generate an additional block, which should cause the CSV delayed
// output from the commitment txn to expire.
if _, err := net.Miner.Node.Generate(1); err != nil { if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to mine blocks: %v", err) t.Fatalf("unable to mine blocks: %v", err)
} }
@ -1120,36 +1369,10 @@ func testChannelForceClosure(net *networkHarness, t *harnessTest) {
// At this point, the sweeping transaction should now be broadcast. So // At this point, the sweeping transaction should now be broadcast. So
// we fetch the node's mempool to ensure it has been properly // we fetch the node's mempool to ensure it has been properly
// broadcast. // broadcast.
var sweepingTXID *chainhash.Hash sweepingTXID, err := waitForTxInMempool(net.Miner.Node, 3*time.Second)
var mempool []*chainhash.Hash
mempoolTimeout := time.After(3 * time.Second)
checkMempoolTick := time.NewTicker(100 * time.Millisecond)
defer checkMempoolTick.Stop()
mempoolPoll:
for {
select {
case <-mempoolTimeout:
t.Fatalf("sweep tx not found in mempool")
case <-checkMempoolTick.C:
mempool, err = net.Miner.Node.GetRawMempool()
if err != nil { if err != nil {
t.Fatalf("unable to fetch node's mempool: %v", err) t.Fatalf("failed to get sweep tx from mempool: %v", err)
} }
if len(mempool) != 0 {
break mempoolPoll
}
}
}
// There should be exactly one transaction within the mempool at this
// point.
// TODO(roasbeef): assertion may not necessarily hold with concurrent
// test executions
if len(mempool) != 1 {
t.Fatalf("node's mempool is wrong size, expected 1 got %v",
len(mempool))
}
sweepingTXID = mempool[0]
// Fetch the sweep transaction, all input it's spending should be from // Fetch the sweep transaction, all input it's spending should be from
// the commitment transaction which was broadcast on-chain. // the commitment transaction which was broadcast on-chain.
@ -1165,7 +1388,13 @@ mempoolPoll:
} }
} }
// Finally, we mine an additional block which should include the sweep // Restart Alice to ensure that she resumes watching the finalized
// commitment sweep txid.
if err := net.RestartNode(net.Alice, nil); err != nil {
t.Fatalf("Node restart failed: %v", err)
}
// Next, we mine an additional block which should include the sweep
// transaction as the input scripts and the sequence locks on the // transaction as the input scripts and the sequence locks on the
// inputs should be properly met. // inputs should be properly met.
blockHash, err := net.Miner.Node.Generate(1) blockHash, err := net.Miner.Node.Generate(1)
@ -1179,28 +1408,309 @@ mempoolPoll:
assertTxInBlock(t, block, sweepTx.Hash()) assertTxInBlock(t, block, sweepTx.Hash())
// Now that the channel has been fully swept, it should no longer show // We sleep here to ensure that Alice has enough time to receive a
// up within the pending channels RPC. // confirmation for the commitment transaction, which we already
time.Sleep(time.Millisecond * 300) // asserted was in the last block.
pendingChans, err := net.Alice.PendingChannels(ctxb, pendingChansRequest) time.Sleep(300 * time.Millisecond)
// Now that the commit output has been fully swept, check to see that
// the channel remains open for the pending htlc outputs.
pendingChanResp, err = net.Alice.PendingChannels(ctxb, pendingChansRequest)
if err != nil { if err != nil {
t.Fatalf("unable to query for pending channels: %v", err) t.Fatalf("unable to query for pending channels: %v", err)
} }
if len(pendingChans.PendingForceClosingChannels) != 0 { assertNumForceClosedChannels(t, pendingChanResp, 1)
t.Fatalf("no channels should be shown as force closed")
// Check that the commitment transactions shows that we are still past
// the maturity of the commitment output.
forceClose = findForceClosedChannel(t, pendingChanResp, &op)
assertCommitmentMaturity(t, forceClose, commCsvMaturityHeight, -1)
// Our pending htlcs should still be shown in the first stage, having
// deducted an additional two blocks from the relative maturity time..
assertPendingChannelNumHtlcs(t, forceClose, numInvoices)
assertPendingHtlcStageAndMaturity(t, forceClose, 1, htlcExpiryHeight,
int32(defaultCLTV)-int32(defaultCSV)-2)
// The htlc funds will still be shown as limbo, since they are still in
// their first stage. The commitment funds will have been recovered
// after the commit txn was included in the last block.
if forceClose.LimboBalance == 0 {
t.Fatalf("htlc funds should still be in limbo")
}
if forceClose.RecoveredBalance == 0 {
t.Fatalf("commitment funds should be shown as recovered")
} }
// At this point, Bob should now be aware of his new immediately // Compute the height preceding that which will cause the htlc CLTV
// timeouts will expire. The outputs entered at the same height as the
// output spending from the commitment txn, so we must deduct the number
// of blocks we have generated since adding it to the nursery, and take
// an additional block off so that we end up one block shy of the expiry
// height.
cltvHeightDelta := defaultCLTV - defaultCSV - 2 - 1
// Check that our htlcs are still expected to expire the computed expiry
// height, and that the remaining number of blocks is equal to the delta
// we just computed, including an additional block to actually trigger
// the broadcast.
assertPendingChannelNumHtlcs(t, forceClose, numInvoices)
assertPendingHtlcStageAndMaturity(t, forceClose, 1, htlcExpiryHeight,
int32(cltvHeightDelta+1))
// Advance the blockchain until just before the CLTV expires, nothing
// exciting should have happened during this time.
blockHash, err = net.Miner.Node.Generate(cltvHeightDelta)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
time.Sleep(duration)
// We now restart Alice, to ensure that she will broadcast the presigned
// htlc timeout txns after the delay expires after experiencing an while
// waiting for the htlc outputs to incubate.
if err := net.RestartNode(net.Alice, nil); err != nil {
t.Fatalf("Node restart failed: %v", err)
}
time.Sleep(duration)
pendingChanResp, err = net.Alice.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
assertNumForceClosedChannels(t, pendingChanResp, 1)
forceClose = findForceClosedChannel(t, pendingChanResp, &op)
// Verify that commitment output was confirmed many moons ago.
assertCommitmentMaturity(t, forceClose, commCsvMaturityHeight,
-int32(cltvHeightDelta)-1)
// We should now be at the block just before the utxo nursery will
// attempt to broadcast the htlc timeout transactions.
assertPendingChannelNumHtlcs(t, forceClose, numInvoices)
assertPendingHtlcStageAndMaturity(t, forceClose, 1, htlcExpiryHeight, 1)
// Now that our commitment confirmation depth has been surpassed, we
// should now see a non-zero recovered balance. All htlc outputs are
// still left in limbo, so it should be non-zero as well.
if forceClose.LimboBalance == 0 {
t.Fatalf("htlc funds should still be in limbo")
}
if forceClose.RecoveredBalance == 0 {
t.Fatalf("commitment funds should not be in limbo")
}
// Now, generate the block which will cause Alice to broadcast the
// presigned htlc timeout txns.
blockHash, err = net.Miner.Node.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// Since Alice had numInvoices (6) htlcs extended to Carol before force
// closing, we expect Alice to broadcast an htlc timeout txn for each
// one. Wait for them all to show up in the mempool.
htlcTxIDs, err := waitForNTxsInMempool(net.Miner.Node, numInvoices,
3*time.Second)
if err != nil {
t.Fatalf("unable to find htlc timeout txns in mempool: %v", err)
}
// Retrieve each htlc timeout txn from the mempool, and ensure it is
// well-formed. This entails verifying that each only spends from
// output, and that that output is from the commitment txn.
for _, htlcTxID := range htlcTxIDs {
// Fetch the sweep transaction, all input it's spending should
// be from the commitment transaction which was broadcast
// on-chain.
htlcTx, err := net.Miner.Node.GetRawTransaction(htlcTxID)
if err != nil {
t.Fatalf("unable to fetch sweep tx: %v", err)
}
// Ensure the htlc transaction only has one input.
if len(htlcTx.MsgTx().TxIn) != 1 {
t.Fatalf("htlc transaction should only have one txin, "+
"has %d", len(htlcTx.MsgTx().TxIn))
}
// Ensure the htlc transaction is spending from the commitment
// transaction.
txIn := htlcTx.MsgTx().TxIn[0]
if !closingTxID.IsEqual(&txIn.PreviousOutPoint.Hash) {
t.Fatalf("htlc transaction not spending from commit "+
"tx %v, instead spending %v",
closingTxID, txIn.PreviousOutPoint)
}
}
// With the htlc timeout txns still in the mempool, we restart Alice to
// verify that she can resume watching the htlc txns she broadcasted
// before crashing.
if err := net.RestartNode(net.Alice, nil); err != nil {
t.Fatalf("Node restart failed: %v", err)
}
time.Sleep(duration)
// Generate a block that mines the htlc timeout txns. Doing so now
// activates the 2nd-stage CSV delayed outputs.
blockHash, err = net.Miner.Node.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// This sleep gives Alice enough to time move the crib outputs into the
// kindergarten bucket.
time.Sleep(duration)
// Alice is restarted here to ensure that she promptly moved the crib
// outputs to the kindergarten bucket after the htlc timeout txns were
// confirmed.
if err := net.RestartNode(net.Alice, nil); err != nil {
t.Fatalf("Node restart failed: %v", err)
}
// Advance the chain until just before the 2nd-layer CSV delays expire.
blockHash, err = net.Miner.Node.Generate(defaultCSV - 1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// Restart Alice to ensure that she can recover from a failure before
// having graduated the htlc outputs in the kindergarten bucket.
if err := net.RestartNode(net.Alice, nil); err != nil {
t.Fatalf("Node restart failed: %v", err)
}
// Now that the channel has been fully swept, it should no longer show
// incubated, check to see that Alice's node still reports the channel
// as pending force closed.
pendingChanResp, err = net.Alice.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
assertNumForceClosedChannels(t, pendingChanResp, 1)
forceClose = findForceClosedChannel(t, pendingChanResp, &op)
assertCommitmentMaturity(t, forceClose, commCsvMaturityHeight,
-int32(cltvHeightDelta)-int32(defaultCSV)-2)
if forceClose.LimboBalance == 0 {
t.Fatalf("htlc funds should still be in limbo")
}
if forceClose.RecoveredBalance == 0 {
t.Fatalf("commitment funds should not be in limbo")
}
assertPendingChannelNumHtlcs(t, forceClose, numInvoices)
// Generate a block that causes Alice to sweep the htlc outputs in the
// kindergarten bucket.
blockHash, err = net.Miner.Node.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// Wait for the single sweep txn to appear in the mempool.
htlcSweepTxID, err := waitForTxInMempool(net.Miner.Node, 3*time.Second)
if err != nil {
t.Fatalf("failed to get sweep tx from mempool: %v", err)
}
// Construct a map of the already confirmed htlc timeout txids, that
// will count the number of times each is spent by the sweep txn. We
// prepopulate it in this way so that we can later detect if we are
// spending from an output that was not a confirmed htlc timeout txn.
var htlcTxIDSet = make(map[chainhash.Hash]int)
for _, htlcTxID := range htlcTxIDs {
htlcTxIDSet[*htlcTxID] = 0
}
// Fetch the htlc sweep transaction from the mempool.
htlcSweepTx, err := net.Miner.Node.GetRawTransaction(htlcSweepTxID)
if err != nil {
t.Fatalf("unable to fetch sweep tx: %v", err)
}
// Ensure the htlc sweep transaction only has one input for each htlc
// Alice extended before force closing.
if len(htlcSweepTx.MsgTx().TxIn) != numInvoices {
t.Fatalf("htlc transaction should have %d txin, "+
"has %d", numInvoices, len(htlcSweepTx.MsgTx().TxIn))
}
// Ensure that each output spends from exactly one htlc timeout txn.
for _, txIn := range htlcSweepTx.MsgTx().TxIn {
outpoint := txIn.PreviousOutPoint.Hash
// Check that the input is a confirmed htlc timeout txn.
if _, ok := htlcTxIDSet[outpoint]; !ok {
t.Fatalf("htlc sweep output not spending from htlc "+
"tx, instead spending output %v", outpoint)
}
// Increment our count for how many times this output was spent.
htlcTxIDSet[outpoint]++
// Check that each is only spent once.
if htlcTxIDSet[outpoint] > 1 {
t.Fatalf("htlc sweep tx has multiple spends from "+
"outpoint %v", outpoint)
}
}
// The following restart checks to ensure that the nursery store is
// storing the txid of the previously broadcast htlc sweep txn, and that
// it begins watching that txid after restarting.
if err := net.RestartNode(net.Alice, nil); err != nil {
t.Fatalf("Node restart failed: %v", err)
}
time.Sleep(duration)
// Now that the channel has been fully swept, it should no longer show
// incubated, check to see that Alice's node still reports the channel
// as pending force closed.
pendingChanResp, err = net.Alice.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
assertNumForceClosedChannels(t, pendingChanResp, 1)
// All htlcs should show zero blocks until maturity, as evidenced by
// having checked the sweep transaction in the mempool.
forceClose = findForceClosedChannel(t, pendingChanResp, &op)
assertPendingChannelNumHtlcs(t, forceClose, numInvoices)
assertPendingHtlcStageAndMaturity(t, forceClose, 2,
htlcCsvMaturityHeight, 0)
// Generate the final block that sweeps all htlc funds into the user's
// wallet.
blockHash, err = net.Miner.Node.Generate(1)
if err != nil {
t.Fatalf("unable to generate block: %v", err)
}
time.Sleep(3 * duration)
// Now that the channel has been fully swept, it should no longer show
// up within the pending channels RPC.
pendingChanResp, err = net.Alice.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
assertNumForceClosedChannels(t, pendingChanResp, 0)
// In addition to there being no pending channels, we verify that
// pending channels does not report any money still in limbo.
if pendingChanResp.TotalLimboBalance != 0 {
t.Fatalf("no user funds should be left in limbo after incubation")
}
// At this point, Carol should now be aware of his new immediately
// spendable on-chain balance, as it was Alice who broadcast the // spendable on-chain balance, as it was Alice who broadcast the
// commitment transaction. // commitment transaction.
bobBalResp, err = net.Bob.WalletBalance(ctxb, bobBalReq) carolBalResp, err = net.Bob.WalletBalance(ctxb, carolBalReq)
if err != nil { if err != nil {
t.Fatalf("unable to get bob's balance: %v", err) t.Fatalf("unable to get carol's balance: %v", err)
} }
bobExpectedBalance := bobStartingBalance + pushAmt carolExpectedBalance := carolStartingBalance + pushAmt
if btcutil.Amount(bobBalResp.Balance*1e8) < bobExpectedBalance { if btcutil.Amount(carolBalResp.Balance*1e8) < carolExpectedBalance {
t.Fatalf("bob's balance is incorrect: expected %v got %v", t.Fatalf("carol's balance is incorrect: expected %v got %v",
bobExpectedBalance, btcutil.Amount(bobBalResp.Balance*1e8)) carolExpectedBalance,
btcutil.Amount(carolBalResp.Balance*1e8))
} }
} }
@ -1984,6 +2494,36 @@ poll:
return txid, nil return txid, nil
} }
// waitForNTxsInMempool polls until finding the desired number of transactions
// in the provided miner's mempool. An error is returned if the this number is
// not met after the given timeout.
func waitForNTxsInMempool(miner *rpcclient.Client, n int,
timeout time.Duration) ([]*chainhash.Hash, error) {
breakTimeout := time.After(timeout)
ticker := time.NewTicker(50 * time.Millisecond)
defer ticker.Stop()
var err error
var mempool []*chainhash.Hash
for {
select {
case <-breakTimeout:
return nil, fmt.Errorf("wanted %v, only found %v txs "+
"in mempool", n, len(mempool))
case <-ticker.C:
mempool, err = miner.GetRawMempool()
if err != nil {
return nil, err
}
if len(mempool) == n {
return mempool, nil
}
}
}
}
// testRevokedCloseRetributinPostBreachConf tests that Alice is able carry out // testRevokedCloseRetributinPostBreachConf tests that Alice is able carry out
// retribution in the event that she fails immediately after detecting Bob's // retribution in the event that she fails immediately after detecting Bob's
// breach txn in the mempool. // breach txn in the mempool.
@ -3613,7 +4153,7 @@ func testBidirectionalAsyncPayments(net *networkHarness, t *harnessTest) {
const ( const (
timeout = time.Duration(time.Second * 5) timeout = time.Duration(time.Second * 5)
paymentAmt = 100 paymentAmt = 1000
) )
// First establish a channel with a capacity equals to the overall // First establish a channel with a capacity equals to the overall

File diff suppressed because it is too large Load Diff

View File

@ -564,7 +564,15 @@ func RegisterWalletUnlockerHandlerFromEndpoint(ctx context.Context, mux *runtime
// RegisterWalletUnlockerHandler registers the http handlers for service WalletUnlocker to "mux". // RegisterWalletUnlockerHandler registers the http handlers for service WalletUnlocker to "mux".
// The handlers forward requests to the grpc endpoint over "conn". // The handlers forward requests to the grpc endpoint over "conn".
func RegisterWalletUnlockerHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { func RegisterWalletUnlockerHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
client := NewWalletUnlockerClient(conn) return RegisterWalletUnlockerHandlerClient(ctx, mux, NewWalletUnlockerClient(conn))
}
// RegisterWalletUnlockerHandler registers the http handlers for service WalletUnlocker to "mux".
// The handlers forward requests to the grpc endpoint over the given implementation of "WalletUnlockerClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WalletUnlockerClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "WalletUnlockerClient" to call the correct interceptors.
func RegisterWalletUnlockerHandlerClient(ctx context.Context, mux *runtime.ServeMux, client WalletUnlockerClient) error {
mux.Handle("POST", pattern_WalletUnlocker_CreateWallet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { mux.Handle("POST", pattern_WalletUnlocker_CreateWallet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
@ -667,7 +675,15 @@ func RegisterLightningHandlerFromEndpoint(ctx context.Context, mux *runtime.Serv
// RegisterLightningHandler registers the http handlers for service Lightning to "mux". // RegisterLightningHandler registers the http handlers for service Lightning to "mux".
// The handlers forward requests to the grpc endpoint over "conn". // The handlers forward requests to the grpc endpoint over "conn".
func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
client := NewLightningClient(conn) return RegisterLightningHandlerClient(ctx, mux, NewLightningClient(conn))
}
// RegisterLightningHandler registers the http handlers for service Lightning to "mux".
// The handlers forward requests to the grpc endpoint over the given implementation of "LightningClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LightningClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "LightningClient" to call the correct interceptors.
func RegisterLightningHandlerClient(ctx context.Context, mux *runtime.ServeMux, client LightningClient) error {
mux.Handle("GET", pattern_Lightning_WalletBalance_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { mux.Handle("GET", pattern_Lightning_WalletBalance_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)

View File

@ -840,6 +840,31 @@ message OpenStatusUpdate {
} }
} }
message PendingHTLC {
/// The direction within the channel that the htlc was sent
bool incoming = 1 [ json_name = "incoming" ];
/// The total value of the htlc
int64 amount = 2 [ json_name = "amount" ];
/// The final output to be swept back to the user's wallet
string outpoint = 3 [ json_name = "outpoint" ];
/// The next block height at which we can spend the current stage
uint32 maturity_height = 4 [ json_name = "maturity_height" ];
/**
The number of blocks remaining until the current stage can be swept.
Negative values indicate how many blocks have passed since becoming
mature.
*/
int32 blocks_til_maturity = 5 [ json_name = "blocks_til_maturity" ];
/// Indicates whether the htlc is in its first or second stage of recovery
uint32 stage = 6 [ json_name = "stage" ];
}
message PendingChannelRequest {} message PendingChannelRequest {}
message PendingChannelResponse { message PendingChannelResponse {
message PendingChannel { message PendingChannel {
@ -894,8 +919,6 @@ message PendingChannelResponse {
/// The pending channel to be force closed /// The pending channel to be force closed
PendingChannel channel = 1 [ json_name = "channel" ]; PendingChannel channel = 1 [ json_name = "channel" ];
// TODO(roasbeef): HTLC's as well?
/// The transaction id of the closing transaction /// The transaction id of the closing transaction
string closing_txid = 2 [ json_name = "closing_txid" ]; string closing_txid = 2 [ json_name = "closing_txid" ];
@ -905,8 +928,17 @@ message PendingChannelResponse {
/// The height at which funds can be sweeped into the wallet /// The height at which funds can be sweeped into the wallet
uint32 maturity_height = 4 [ json_name = "maturity_height" ]; uint32 maturity_height = 4 [ json_name = "maturity_height" ];
/// Remaining # of blocks until funds can be sweeped into the wallet /*
uint32 blocks_til_maturity = 5 [ json_name = "blocks_til_maturity" ]; Remaining # of blocks until the commitment output can be swept.
Negative values indicate how many blocks have passed since becoming
mature.
*/
int32 blocks_til_maturity = 5 [ json_name = "blocks_til_maturity" ];
/// The total value of funds successfully recovered from this channel
int64 recovered_balance = 6 [ json_name = "recovered_balance" ];
repeated PendingHTLC pending_htlcs = 8 [ json_name = "pending_htlcs" ];
} }
/// The balance in satoshis encumbered in pending channels /// The balance in satoshis encumbered in pending channels

View File

@ -732,8 +732,19 @@
}, },
"blocks_til_maturity": { "blocks_til_maturity": {
"type": "integer", "type": "integer",
"format": "int32",
"description": "Remaining # of blocks until the commitment output can be swept.\nNegative values indicate how many blocks have passed since becoming\nmature."
},
"recovered_balance": {
"type": "string",
"format": "int64", "format": "int64",
"title": "/ Remaining # of blocks until funds can be sweeped into the wallet" "title": "/ The total value of funds successfully recovered from this channel"
},
"pending_htlcs": {
"type": "array",
"items": {
"$ref": "#/definitions/lnrpcPendingHTLC"
}
} }
} }
}, },
@ -1743,6 +1754,40 @@
} }
} }
}, },
"lnrpcPendingHTLC": {
"type": "object",
"properties": {
"incoming": {
"type": "boolean",
"format": "boolean",
"title": "/ The direction within the channel that the htlc was sent"
},
"amount": {
"type": "string",
"format": "int64",
"title": "/ The total value of the htlc"
},
"outpoint": {
"type": "string",
"title": "/ The final output to be swept back to the user's wallet"
},
"maturity_height": {
"type": "integer",
"format": "int64",
"title": "/ The next block height at which we can spend the current stage"
},
"blocks_til_maturity": {
"type": "integer",
"format": "int32",
"description": "*\nThe number of blocks remaining until the current stage can be swept.\nNegative values indicate how many blocks have passed since becoming\nmature."
},
"stage": {
"type": "integer",
"format": "int64",
"title": "/ Indicates whether the htlc is in its first or second stage of recovery"
}
}
},
"lnrpcPendingUpdate": { "lnrpcPendingUpdate": {
"type": "object", "type": "object",
"properties": { "properties": {

View File

@ -809,6 +809,34 @@ func htlcSpendSuccess(signer Signer, signDesc *SignDescriptor,
return witnessStack, nil return witnessStack, nil
} }
// HtlcSpendSuccess exposes the public witness generation function for spending
// an HTLC success transaction, either due to an expiring time lock or having
// had the payment preimage.
// NOTE: The caller MUST set the txn version, sequence number, and sign
// descriptor's sig hash cache before invocation.
func HtlcSpendSuccess(signer Signer, signDesc *SignDescriptor,
sweepTx *wire.MsgTx) (wire.TxWitness, error) {
// With the proper sequence an version set, we'll now sign the timeout
// transaction using the passed signed descriptor. In order to generate
// a valid signature, then signDesc should be using the base delay
// public key, and the proper single tweak bytes.
sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc)
if err != nil {
return nil, err
}
// We set a zero as the first element the witness stack (ignoring the
// witness script), in order to force execution to the second portion
// of the if clause.
witnessStack := wire.TxWitness(make([][]byte, 3))
witnessStack[0] = append(sweepSig, byte(txscript.SigHashAll))
witnessStack[1] = nil
witnessStack[2] = signDesc.WitnessScript
return witnessStack, nil
}
// htlcTimeoutRevoke spends a second-level HTLC output. This function is to be // htlcTimeoutRevoke spends a second-level HTLC output. This function is to be
// used by the sender or receiver of an HTLC to claim the HTLC after a revoked // used by the sender or receiver of an HTLC to claim the HTLC after a revoked
// commitment transaction was broadcast. // commitment transaction was broadcast.

View File

@ -186,20 +186,29 @@ const (
// weight limits. // weight limits.
MaxHTLCNumber = 966 MaxHTLCNumber = 966
// ToLocalPenaltyScriptSize 83 bytes // ToLocalScriptSize 83 bytes
// - OP_IF: 1 byte // - OP_IF: 1 byte
// - OP_DATA: 1 byte (revocationkey length) // - OP_DATA: 1 byte (revocationkey length)
// - revocationkey: 33 bytes // - revocationkey: 33 bytes
// - OP_CHECKSIG: 1 byte // - OP_CHECKSIG: 1 byte
// - OP_ELSE: 1 byte // - OP_ELSE: 1 byte
// - OP_DATA: 1 byte (localkey length) // - OP_DATA: 1 byte (localkey length)
// - localkey: 33 bytes // - local_delay_key: 33 bytes
// - OP_CHECKSIG_VERIFY: 1 byte // - OP_CHECKSIG_VERIFY: 1 byte
// - OP_DATA: 1 byte (delay length) // - OP_DATA: 1 byte (delay length)
// - delay: 8 bytes // - delay: 8 bytes
// -OP_CHECKSEQUENCEVERIFY: 1 byte // -OP_CHECKSEQUENCEVERIFY: 1 byte
// - OP_ENDIF: 1 byte // - OP_ENDIF: 1 byte
ToLocalPenaltyScriptSize = 1 + 1 + 33 + 1 + 1 + 1 + 33 + 1 + 1 + 8 + 1 + 1 ToLocalScriptSize = 1 + 1 + 33 + 1 + 1 + 1 + 33 + 1 + 1 + 8 + 1 + 1
// ToLocalTimeoutWitnessSize x bytes
// - number_of_witness_elements: 1 byte
// - local_delay_sig_length: 1 byte
// - local_delay_sig: 73 bytes
// - zero_length: 1 byte
// - witness_script_length: 1 byte
// - witness_script (to_local_script)
ToLocalTimeoutWitnessSize = 1 + 1 + 73 + 1 + 1 + ToLocalScriptSize
// ToLocalPenaltyWitnessSize 160 bytes // ToLocalPenaltyWitnessSize 160 bytes
// - number_of_witness_elements: 1 byte // - number_of_witness_elements: 1 byte
@ -208,9 +217,9 @@ const (
// - one_length: 1 byte // - one_length: 1 byte
// - witness_script_length: 1 byte // - witness_script_length: 1 byte
// - witness_script (to_local_script) // - witness_script (to_local_script)
ToLocalPenaltyWitnessSize = 1 + 1 + 73 + 1 + 1 + ToLocalPenaltyScriptSize ToLocalPenaltyWitnessSize = 1 + 1 + 73 + 1 + 1 + ToLocalScriptSize
// AcceptedHtlcPenaltyScriptSize 139 bytes // AcceptedHtlcScriptSize 139 bytes
// - OP_DUP: 1 byte // - OP_DUP: 1 byte
// - OP_HASH160: 1 byte // - OP_HASH160: 1 byte
// - OP_DATA: 1 byte (RIPEMD160(SHA256(revocationkey)) length) // - OP_DATA: 1 byte (RIPEMD160(SHA256(revocationkey)) length)
@ -245,9 +254,22 @@ const (
// - OP_CHECKSIG: 1 byte // - OP_CHECKSIG: 1 byte
// - OP_ENDIF: 1 byte // - OP_ENDIF: 1 byte
// - OP_ENDIF: 1 byte // - OP_ENDIF: 1 byte
AcceptedHtlcPenaltyScriptSize = 3*1 + 20 + 5*1 + 33 + 7*1 + 20 + 4*1 + AcceptedHtlcScriptSize = 3*1 + 20 + 5*1 + 33 + 7*1 + 20 + 4*1 +
33 + 5*1 + 4 + 5*1 33 + 5*1 + 4 + 5*1
// AcceptedHtlcSuccessWitnessSize 325 bytes
// - number_of_witness_elements: 1 byte
// - nil_length: 1 byte
// - sig_alice_length: 1 byte
// - sig_alice: 73 bytes
// - sig_bob_length: 1 byte
// - sig_bob: 73 bytes
// - preimage_length: 1 byte
// - preimage: 32 bytes
// - witness_script_length: 1 byte
// - witness_script (accepted_htlc_script)
AcceptedHtlcSuccessWitnessSize = 1 + 1 + 73 + 1 + 73 + 1 + 32 + 1 + AcceptedHtlcScriptSize
// AcceptedHtlcPenaltyWitnessSize 249 bytes // AcceptedHtlcPenaltyWitnessSize 249 bytes
// - number_of_witness_elements: 1 byte // - number_of_witness_elements: 1 byte
// - revocation_sig_length: 1 byte // - revocation_sig_length: 1 byte
@ -256,8 +278,7 @@ const (
// - revocation_key: 33 bytes // - revocation_key: 33 bytes
// - witness_script_length: 1 byte // - witness_script_length: 1 byte
// - witness_script (accepted_htlc_script) // - witness_script (accepted_htlc_script)
AcceptedHtlcPenaltyWitnessSize = 1 + 1 + 73 + 1 + 33 + 1 + AcceptedHtlcPenaltyWitnessSize = 1 + 1 + 73 + 1 + 33 + 1 + AcceptedHtlcScriptSize
AcceptedHtlcPenaltyScriptSize
// OfferedHtlcScriptSize 133 bytes // OfferedHtlcScriptSize 133 bytes
// - OP_DUP: 1 byte // - OP_DUP: 1 byte
@ -303,6 +324,18 @@ const (
// - witness_script (offered_htlc_script) // - witness_script (offered_htlc_script)
OfferedHtlcWitnessSize = 1 + 1 + 73 + 1 + 33 + 1 + OfferedHtlcScriptSize OfferedHtlcWitnessSize = 1 + 1 + 73 + 1 + 33 + 1 + OfferedHtlcScriptSize
// OfferedHtlcTimeoutWitnessSize 285 bytes
// - number_of_witness_elements: 1 byte
// - nil_length: 1 byte
// - sig_alice_length: 1 byte
// - sig_alice: 73 bytes
// - sig_bob_length: 1 byte
// - sig_bob: 73 bytes
// - nil_length: 1 byte
// - witness_script_length: 1 byte
// - witness_script (offered_htlc_script)
OfferedHtlcTimeoutWitnessSize = 1 + 1 + 1 + 73 + 1 + 73 + 1 + 1 + OfferedHtlcScriptSize
// OfferedHtlcPenaltyWitnessSize 243 bytes // OfferedHtlcPenaltyWitnessSize 243 bytes
// - number_of_witness_elements: 1 byte // - number_of_witness_elements: 1 byte
// - revocation_sig_length: 1 byte // - revocation_sig_length: 1 byte

View File

@ -13,17 +13,18 @@ import (
type WitnessType uint16 type WitnessType uint16
const ( const (
// CommitmentTimeLock is a witness that allows us to spend the output of a // CommitmentTimeLock is a witness that allows us to spend the output of
// commitment transaction after a relative lock-time lockout. // a commitment transaction after a relative lock-time lockout.
CommitmentTimeLock WitnessType = 0 CommitmentTimeLock WitnessType = 0
// CommitmentNoDelay is a witness that allows us to spend a settled no-delay // CommitmentNoDelay is a witness that allows us to spend a settled
// output immediately on a counterparty's commitment transaction. // no-delay output immediately on a counterparty's commitment
// transaction.
CommitmentNoDelay WitnessType = 1 CommitmentNoDelay WitnessType = 1
// CommitmentRevoke is a witness that allows us to sweep the settled output // CommitmentRevoke is a witness that allows us to sweep the settled
// of a malicious counterparty's who broadcasts a revoked commitment // output of a malicious counterparty's who broadcasts a revoked
// transaction. // commitment transaction.
CommitmentRevoke WitnessType = 2 CommitmentRevoke WitnessType = 2
// HtlcOfferedRevoke is a witness that allows us to sweep an HTLC // HtlcOfferedRevoke is a witness that allows us to sweep an HTLC
@ -33,6 +34,15 @@ const (
// HtlcAcceptedRevoke is a witness that allows us to sweep an HTLC // HtlcAcceptedRevoke is a witness that allows us to sweep an HTLC
// output that we accepted from the counterparty. // output that we accepted from the counterparty.
HtlcAcceptedRevoke WitnessType = 4 HtlcAcceptedRevoke WitnessType = 4
// HtlcOfferedTimeout is a witness that allows us to sweep an HTLC
// output that we extended to a party, but was never fulfilled.
HtlcOfferedTimeout WitnessType = 5
// HtlcAcceptedSuccess is a witness that allows us to sweep an HTLC
// output that was offered to us, and for which we have a payment
// preimage.
HtlcAcceptedSuccess WitnessType = 6
) )
// WitnessGenerator represents a function which is able to generate the final // WitnessGenerator represents a function which is able to generate the final
@ -64,6 +74,8 @@ func (wt WitnessType) GenWitnessFunc(signer Signer,
return ReceiverHtlcSpendRevoke(signer, desc, tx) return ReceiverHtlcSpendRevoke(signer, desc, tx)
case HtlcAcceptedRevoke: case HtlcAcceptedRevoke:
return SenderHtlcSpendRevoke(signer, desc, tx) return SenderHtlcSpendRevoke(signer, desc, tx)
case HtlcOfferedTimeout:
return HtlcSpendSuccess(signer, desc, tx)
default: default:
return nil, fmt.Errorf("unknown witness type: %v", wt) return nil, fmt.Errorf("unknown witness type: %v", wt)
} }

View File

@ -25,19 +25,18 @@ import (
// | // |
// | LAST PURGED + FINALIZED HEIGHTS // | LAST PURGED + FINALIZED HEIGHTS
// | // |
// | Each nursery store tracks a "last purged height", which records the // | Each nursery store tracks a "last graduated height", which records the
// | most recent block height for which the nursery store has purged all // | most recent block height for which the nursery store has successfully
// | state. This value lags behind the best block height for reorg safety, // | graduated all outputs. It also tracks a "last finalized height", which
// | and serves as a starting height for rescans after a restart. It also // | records the last block height that the nursery attempted to graduate
// | tracks a "last finalized height", which records the last block height // | If a finalized height has kindergarten outputs, the sweep txn for these
// | that the nursery attempted to graduate. If a finalized height has // | outputs will be stored in the height bucket. This ensure that the same
// | kindergarten outputs, the sweep txn for these outputs will be stored in // | txid will be used after restarts. Otherwise, the nursery will be unable
// | the height bucket. This ensure that the same txid will be used after // | to recover the txid of kindergarten sweep transaction it has already
// | restarts. Otherwise, the nursery will be unable to recover the txid // | broadcast.
// | of kindergarten sweep transaction it has already broadcast.
// | // |
// ├── last-purged-height-key: <last-purged-height>
// ├── last-finalized-height-key: <last-finalized-height> // ├── last-finalized-height-key: <last-finalized-height>
// ├── last-graduated-height-key: <last-graduated-height>
// | // |
// | CHANNEL INDEX // | CHANNEL INDEX
// | // |
@ -142,13 +141,17 @@ type NurseryStore interface {
// nursery store finalized a kindergarten class. // nursery store finalized a kindergarten class.
LastFinalizedHeight() (uint32, error) LastFinalizedHeight() (uint32, error)
// PurgeHeight deletes specified the height bucket if it exists, and // GraduateHeight records the provided height as the last height for
// records it as that last purged height. // which the nursery store successfully graduated all outputs.
PurgeHeight(height uint32) error GraduateHeight(height uint32) error
// LastPurgedHeight returns the last block height for which the nursery // LastGraduatedHeight returns the last block height for which the
// store has purged all persistent state. // nursery store successfully graduated all outputs.
LastPurgedHeight() (uint32, error) LastGraduatedHeight() (uint32, error)
// HeightsBelowOrEqual returns the lowest non-empty heights in the
// height index, that exist at or below the provided upper bound.
HeightsBelowOrEqual(height uint32) ([]uint32, error)
// ForChanOutputs iterates over all outputs being incubated for a // ForChanOutputs iterates over all outputs being incubated for a
// particular channel point. This method accepts a callback that allows // particular channel point. This method accepts a callback that allows
@ -179,9 +182,9 @@ var (
// last finalized height. // last finalized height.
lastFinalizedHeightKey = []byte("last-finalized-height") lastFinalizedHeightKey = []byte("last-finalized-height")
// lastPurgedHeightKey is a static key used to retrieve the height of // lastGraduatedHeightKey is a static key used to retrieve the height of
// the last bucket that was purged. // the last bucket that successfully graduated all outputs.
lastPurgedHeightKey = []byte("last-purged-height") lastGraduatedHeightKey = []byte("last-graduated-height")
// channelIndexKey is a static key used to lookup the bucket containing // channelIndexKey is a static key used to lookup the bucket containing
// all of the nursery's active channels. // all of the nursery's active channels.
@ -557,10 +560,10 @@ func (ns *nurseryStore) GraduateKinder(height uint32) error {
}) })
} }
// FinalizeKinder accepts a block height as a parameter and purges its // FinalizeKinder accepts a block height and a finalized kindergarten sweep
// persistent state for all outputs at that height. During a restart, the utxo // transaction, persisting the transaction at the appropriate height bucket. The
// nursery will begin it's recovery procedure from the next height that has // nursery store's last finalized height is also updated with the provided
// yet to be finalized. // height.
func (ns *nurseryStore) FinalizeKinder(height uint32, func (ns *nurseryStore) FinalizeKinder(height uint32,
finalTx *wire.MsgTx) error { finalTx *wire.MsgTx) error {
@ -569,17 +572,12 @@ func (ns *nurseryStore) FinalizeKinder(height uint32,
}) })
} }
// PurgeHeight accepts a block height as a parameter and purges its persistent // GraduateHeight persists the provided height as the nursery store's last
// state for all outputs at that height. During a restart, the utxo nursery will // graduated height.
// begin it's recovery procedure from the next height that has yet to be func (ns *nurseryStore) GraduateHeight(height uint32) error {
// finalized.
func (ns *nurseryStore) PurgeHeight(height uint32) error {
return ns.db.Update(func(tx *bolt.Tx) error {
if err := ns.purgeHeightBucket(tx, height); err != nil {
return err
}
return ns.putLastPurgedHeight(tx, height) return ns.db.Update(func(tx *bolt.Tx) error {
return ns.putLastGraduatedHeight(tx, height)
}) })
} }
@ -725,6 +723,45 @@ func (ns *nurseryStore) FetchPreschools() ([]kidOutput, error) {
return kids, nil return kids, nil
} }
// HeightsBelowOrEqual returns a slice of all non-empty heights in the height
// index at or below the provided upper bound.
func (ns *nurseryStore) HeightsBelowOrEqual(height uint32) ([]uint32, error) {
var activeHeights []uint32
err := ns.db.View(func(tx *bolt.Tx) error {
// Ensure that the chain bucket for this nursery store exists.
chainBucket := tx.Bucket(ns.pfxChainKey)
if chainBucket == nil {
return nil
}
// Ensure that the height index has been properly initialized for this
// chain.
hghtIndex := chainBucket.Bucket(heightIndexKey)
if hghtIndex == nil {
return nil
}
// Serialize the provided height, as this will form the name of the
// bucket.
var lower, upper [4]byte
byteOrder.PutUint32(upper[:], height)
c := hghtIndex.Cursor()
for k, _ := c.Seek(lower[:]); bytes.Compare(k, upper[:]) <= 0 &&
len(k) == 4; k, _ = c.Next() {
activeHeights = append(activeHeights, byteOrder.Uint32(k))
}
return nil
})
if err != nil {
return nil, err
}
return activeHeights, nil
}
// ForChanOutputs iterates over all outputs being incubated for a particular // ForChanOutputs iterates over all outputs being incubated for a particular
// channel point. This method accepts a callback that allows the caller to // channel point. This method accepts a callback that allows the caller to
// process each key-value pair. The key will be a prefixed outpoint, and the // process each key-value pair. The key will be a prefixed outpoint, and the
@ -863,8 +900,7 @@ func (ns *nurseryStore) RemoveChannel(chanPoint *wire.OutPoint) error {
} }
// LastFinalizedHeight returns the last block height for which the nursery // LastFinalizedHeight returns the last block height for which the nursery
// store has purged all persistent state. This occurs after a fixed interval // store has finalized a kindergarten class.
// for reorg safety.
func (ns *nurseryStore) LastFinalizedHeight() (uint32, error) { func (ns *nurseryStore) LastFinalizedHeight() (uint32, error) {
var lastFinalizedHeight uint32 var lastFinalizedHeight uint32
err := ns.db.View(func(tx *bolt.Tx) error { err := ns.db.View(func(tx *bolt.Tx) error {
@ -876,18 +912,17 @@ func (ns *nurseryStore) LastFinalizedHeight() (uint32, error) {
return lastFinalizedHeight, err return lastFinalizedHeight, err
} }
// LastPurgedHeight returns the last block height for which the nursery store // LastGraduatedHeight returns the last block height for which the nursery
// has purged all persistent state. This occurs after a fixed interval for reorg // store has successfully graduated all outputs.
// safety. func (ns *nurseryStore) LastGraduatedHeight() (uint32, error) {
func (ns *nurseryStore) LastPurgedHeight() (uint32, error) { var lastGraduatedHeight uint32
var lastPurgedHeight uint32
err := ns.db.View(func(tx *bolt.Tx) error { err := ns.db.View(func(tx *bolt.Tx) error {
var err error var err error
lastPurgedHeight, err = ns.getLastPurgedHeight(tx) lastGraduatedHeight, err = ns.getLastGraduatedHeight(tx)
return err return err
}) })
return lastPurgedHeight, err return lastGraduatedHeight, err
} }
// Helper Methods // Helper Methods
@ -1091,24 +1126,6 @@ func (ns *nurseryStore) getHeightBucket(tx *bolt.Tx,
return hghtBucket return hghtBucket
} }
// purgeHeightBucket ensures that the height bucket at the provided index is
// purged from the nursery store.
func (ns *nurseryStore) purgeHeightBucket(tx *bolt.Tx, height uint32) error {
// Ensure that the height bucket already exists.
_, hghtIndex, hghtBucket := ns.getHeightBucketPath(tx, height)
if hghtBucket == nil {
return nil
}
// Serialize the provided height, as this will form the name of the
// bucket.
var heightBytes [4]byte
byteOrder.PutUint32(heightBytes[:], height)
// Finally, delete the bucket in question.
return removeBucketIfExists(hghtIndex, heightBytes[:])
}
// createHeightChanBucket creates or retrieves an existing height-channel bucket // createHeightChanBucket creates or retrieves an existing height-channel bucket
// for the provided block height and channel point. This method will attempt to // for the provided block height and channel point. This method will attempt to
// instantiate all buckets along the path if required. // instantiate all buckets along the path if required.
@ -1365,29 +1382,29 @@ func (ns *nurseryStore) getFinalizedTxn(tx *bolt.Tx,
return txn, nil return txn, nil
} }
// getLastPurgedHeight is a helper method that retrieves the last height for // getLastGraduatedHeight is a helper method that retrieves the last height for
// which the database purged its persistent state. // which the database graduated all outputs successfully.
func (ns *nurseryStore) getLastPurgedHeight(tx *bolt.Tx) (uint32, error) { func (ns *nurseryStore) getLastGraduatedHeight(tx *bolt.Tx) (uint32, error) {
// Retrieve the chain bucket associated with the given nursery store. // Retrieve the chain bucket associated with the given nursery store.
chainBucket := tx.Bucket(ns.pfxChainKey) chainBucket := tx.Bucket(ns.pfxChainKey)
if chainBucket == nil { if chainBucket == nil {
return 0, nil return 0, nil
} }
// Lookup the last purged height in the top-level chain bucket. // Lookup the last graduated height in the top-level chain bucket.
heightBytes := chainBucket.Get(lastPurgedHeightKey) heightBytes := chainBucket.Get(lastGraduatedHeightKey)
if heightBytes == nil { if heightBytes == nil {
// We have never purged before, return height 0. // We have never graduated before, return height 0.
return 0, nil return 0, nil
} }
// Otherwise, parse the bytes and return the last purged height. // Otherwise, parse the bytes and return the last graduated height.
return byteOrder.Uint32(heightBytes), nil return byteOrder.Uint32(heightBytes), nil
} }
// pubLastPurgedHeight is a helper method that writes the provided height under // pubLastGraduatedHeight is a helper method that writes the provided height under
// the last purged height key. // the last graduated height key.
func (ns *nurseryStore) putLastPurgedHeight(tx *bolt.Tx, height uint32) error { func (ns *nurseryStore) putLastGraduatedHeight(tx *bolt.Tx, height uint32) error {
// Ensure that the chain bucket for this nursery store exists. // Ensure that the chain bucket for this nursery store exists.
chainBucket, err := tx.CreateBucketIfNotExists(ns.pfxChainKey) chainBucket, err := tx.CreateBucketIfNotExists(ns.pfxChainKey)
@ -1395,12 +1412,12 @@ func (ns *nurseryStore) putLastPurgedHeight(tx *bolt.Tx, height uint32) error {
return err return err
} }
// Serialize the provided last-purged height, and store it in the // Serialize the provided last-gradauted height, and store it in the
// top-level chain bucket for this nursery store. // top-level chain bucket for this nursery store.
var lastHeightBytes [4]byte var lastHeightBytes [4]byte
byteOrder.PutUint32(lastHeightBytes[:], height) byteOrder.PutUint32(lastHeightBytes[:], height)
return chainBucket.Put(lastPurgedHeightKey, lastHeightBytes[:]) return chainBucket.Put(lastGraduatedHeightKey, lastHeightBytes[:])
} }
// errBucketNotEmpty signals that an attempt to prune a particular // errBucketNotEmpty signals that an attempt to prune a particular

754
nursery_store_test.go Normal file
View File

@ -0,0 +1,754 @@
// +build !rpctest
package main
import (
"io/ioutil"
"os"
"reflect"
"testing"
"github.com/btcsuite/btclog"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/roasbeef/btcd/wire"
)
func init() {
// Disable logging to prevent panics bc. of global state
channeldb.UseLogger(btclog.Disabled)
utxnLog = btclog.Disabled
}
// makeTestDB creates a new instance of the ChannelDB for testing purposes. A
// callback which cleans up the created temporary directories is also returned
// and intended to be executed after the test completes.
func makeTestDB() (*channeldb.DB, func(), error) {
// First, create a temporary directory to be used for the duration of
// this test.
tempDirName, err := ioutil.TempDir("", "channeldb")
if err != nil {
return nil, nil, err
}
// Next, create channeldb for the first time.
cdb, err := channeldb.Open(tempDirName)
if err != nil {
return nil, nil, err
}
cleanUp := func() {
cdb.Close()
os.RemoveAll(tempDirName)
}
return cdb, cleanUp, nil
}
type incubateTest struct {
nOutputs int
chanPoint *wire.OutPoint
commOutput *kidOutput
htlcOutputs []babyOutput
err error
}
// incubateTests holds the test vectors used to test the state transitions of
// outputs stored in the nursery store.
var incubateTests []incubateTest
// initIncubateTests instantiates the test vectors during package init, which
// properly captures the sign descriptors and public keys.
func initIncubateTests() {
incubateTests = []incubateTest{
{
nOutputs: 0,
chanPoint: &outPoints[3],
},
{
nOutputs: 1,
chanPoint: &outPoints[0],
commOutput: &kidOutputs[0],
},
{
nOutputs: 4,
chanPoint: &outPoints[0],
commOutput: &kidOutputs[0],
htlcOutputs: babyOutputs,
},
}
}
// TestNurseryStoreInit verifies basic properties of the nursery store before
// any modifying calls are made.
func TestNurseryStoreInit(t *testing.T) {
cdb, cleanUp, err := makeTestDB()
if err != nil {
t.Fatalf("unable to open channel db: %v", err)
}
defer cleanUp()
ns, err := newNurseryStore(&bitcoinGenesis, cdb)
if err != nil {
t.Fatalf("unable to open nursery store: %v", err)
}
assertNumChannels(t, ns, 0)
assertNumPreschools(t, ns, 0)
assertLastFinalizedHeight(t, ns, 0)
assertLastGraduatedHeight(t, ns, 0)
}
// TestNurseryStoreIncubate tests the primary state transitions taken by outputs
// in the nursery store. The test is designed to walk both commitment or htlc
// outputs through the nursery store, verifying the properties of the
// intermediate states.
func TestNurseryStoreIncubate(t *testing.T) {
cdb, cleanUp, err := makeTestDB()
if err != nil {
t.Fatalf("unable to open channel db: %v", err)
}
defer cleanUp()
ns, err := newNurseryStore(&bitcoinGenesis, cdb)
if err != nil {
t.Fatalf("unable to open nursery store: %v", err)
}
for i, test := range incubateTests {
// At the beginning of each test, we do not expect to the
// nursery store to be tracking any outputs for this channel
// point.
assertNumChanOutputs(t, ns, test.chanPoint, 0)
// Nursery store should be completely empty.
assertNumChannels(t, ns, 0)
assertNumPreschools(t, ns, 0)
// Begin incubating all of the outputs provided in this test
// vector.
err = ns.Incubate(test.commOutput, test.htlcOutputs)
if err != nil {
t.Fatalf("unable to incubate outputs"+
"on test #%d: %v", i, err)
}
// Now that the outputs have been inserted, the nursery store
// should see exactly that many outputs under this channel
// point.
// NOTE: This property should remain intact after every state
// change until the channel has been completely removed.
assertNumChanOutputs(t, ns, test.chanPoint, test.nOutputs)
// If there were no inputs to be incubated, just check that the
// no trace of the channel was left.
if test.nOutputs == 0 {
assertNumChannels(t, ns, 0)
continue
}
// The test vector has a non-zero number of outputs, we will
// expect to only see the one channel from this test case.
assertNumChannels(t, ns, 1)
// The channel should be shown as immature, since none of the
// outputs should be graduated directly after being inserted.
// It should also be impossible to remove the channel, if it is
// also immature.
// NOTE: These two tests properties should hold between every
// state change until all outputs have been fully graduated.
assertChannelMaturity(t, ns, test.chanPoint, false)
assertCanRemoveChannel(t, ns, test.chanPoint, false)
// Verify that the htlc outputs, if any, reside in the height
// index at their first stage CLTV's expiry height.
for _, htlcOutput := range test.htlcOutputs {
assertCribAtExpiryHeight(t, ns, &htlcOutput)
}
// If the commitment output was not dust, we will move it from
// the preschool bucket to the kindergarten bucket.
if test.commOutput != nil {
// If the commitment output was not considered dust, we
// should see exactly one preschool output in the
// nursery store.
assertNumPreschools(t, ns, 1)
// Now, move the commitment output to the kindergarten
// bucket.
err = ns.PreschoolToKinder(test.commOutput)
if err != test.err {
t.Fatalf("unable to move commitment output from "+
"pscl to kndr: %v", err)
}
// The total number of outputs for this channel should
// not have changed, and the kindergarten output should
// reside at it's maturity height.
assertNumChanOutputs(t, ns, test.chanPoint, test.nOutputs)
assertKndrAtMaturityHeight(t, ns, test.commOutput)
// The total number of channels should not have changed.
assertNumChannels(t, ns, 1)
// Channel maturity and removal should reflect that the
// channel still has non-graduated outputs.
assertChannelMaturity(t, ns, test.chanPoint, false)
assertCanRemoveChannel(t, ns, test.chanPoint, false)
// Moving the preschool output should have no effect on
// the placement of crib outputs in the height index.
for _, htlcOutput := range test.htlcOutputs {
assertCribAtExpiryHeight(t, ns, &htlcOutput)
}
}
// At this point, we should see no more preschool outputs in the
// nursery store. Either it was moved to the kindergarten
// bucket, or never inserted.
assertNumPreschools(t, ns, 0)
// If the commitment output is not-dust, we will graduate the
// class at its maturity height.
if test.commOutput != nil {
// Compute the commitment output's maturity height, and
// move proceed to graduate that class.
maturityHeight := test.commOutput.ConfHeight() +
test.commOutput.BlocksToMaturity()
err = ns.GraduateKinder(maturityHeight)
if err != nil {
t.Fatalf("unable to graduate kindergarten class at "+
"height %d: %v", maturityHeight, err)
}
// The total number of outputs for this channel should
// not have changed, but the kindergarten output should
// have been removed from it's maturity height.
assertNumChanOutputs(t, ns, test.chanPoint, test.nOutputs)
assertKndrNotAtMaturityHeight(t, ns, test.commOutput)
// The total number of channels should not have changed.
assertNumChannels(t, ns, 1)
// Moving the preschool output should have no effect on
// the placement of crib outputs in the height index.
for _, htlcOutput := range test.htlcOutputs {
assertCribAtExpiryHeight(t, ns, &htlcOutput)
}
}
// If there are any htlc outputs to incubate, we will walk them
// through their two-stage incubation process.
if len(test.htlcOutputs) > 0 {
for i, htlcOutput := range test.htlcOutputs {
// Begin by moving each htlc output from the
// crib to kindergarten state.
err = ns.CribToKinder(&htlcOutput)
if err != nil {
t.Fatalf("unable to move htlc output from "+
"crib to kndr: %v", err)
}
// Number of outputs for this channel should
// remain unchanged.
assertNumChanOutputs(t, ns, test.chanPoint,
test.nOutputs)
// If the output hasn't moved to kndr, it should
// be at it's crib expiry height, otherwise is
// should have been removed.
for j := range test.htlcOutputs {
if j > i {
assertCribAtExpiryHeight(t, ns,
&test.htlcOutputs[j])
assertKndrNotAtMaturityHeight(t,
ns, &test.htlcOutputs[j].kidOutput)
} else {
assertCribNotAtExpiryHeight(t, ns,
&test.htlcOutputs[j])
assertKndrAtMaturityHeight(t,
ns, &test.htlcOutputs[j].kidOutput)
}
}
}
// Total number of channels in the nursery store should
// be the same, no outputs should be marked as
// preschool.
assertNumChannels(t, ns, 1)
assertNumPreschools(t, ns, 0)
// Channel should also not be mature, as it we should
// still have outputs in kindergarten.
assertChannelMaturity(t, ns, test.chanPoint, false)
assertCanRemoveChannel(t, ns, test.chanPoint, false)
// Now, graduate each htlc kindergarten output,
// asserting the invariant number of outputs being
// tracked in this channel
for _, htlcOutput := range test.htlcOutputs {
maturityHeight := htlcOutput.ConfHeight() +
htlcOutput.BlocksToMaturity()
err = ns.GraduateKinder(maturityHeight)
if err != nil {
t.Fatalf("unable to graduate htlc output "+
"from kndr to grad: %v", err)
}
assertNumChanOutputs(t, ns, test.chanPoint,
test.nOutputs)
}
}
// All outputs have been advanced through the nursery store, but
// no attempt has been made to clean up this channel. We expect
// to see the same channel remaining, and no kindergarten
// outputs.
assertNumChannels(t, ns, 1)
assertNumPreschools(t, ns, 0)
// Since all outputs have now been graduated, the nursery store
// should recognize that the channel is mature, and attempting
// to remove it should succeed.
assertChannelMaturity(t, ns, test.chanPoint, true)
assertCanRemoveChannel(t, ns, test.chanPoint, true)
// Now that the channel has been removed, the nursery store
// should be no channels in the nursery store, and no outputs
// being tracked for this channel point.
assertNumChannels(t, ns, 0)
assertNumChanOutputs(t, ns, test.chanPoint, 0)
// If we had a commitment output, ensure it was removed from the
// height index.
if test.commOutput != nil {
assertKndrNotAtMaturityHeight(t, ns, test.commOutput)
}
// Check that all htlc outputs are no longer stored in their
// crib or kindergarten height buckets.
for _, htlcOutput := range test.htlcOutputs {
assertCribNotAtExpiryHeight(t, ns, &htlcOutput)
assertKndrNotAtMaturityHeight(t, ns, &htlcOutput.kidOutput)
}
// Lastly, there should be no lingering preschool outputs.
assertNumPreschools(t, ns, 0)
}
}
// TestNurseryStoreFinalize tests that kindergarten sweep transactions are
// properly persistted, and that the last finalized height is being set
// accordingly.
func TestNurseryStoreFinalize(t *testing.T) {
cdb, cleanUp, err := makeTestDB()
if err != nil {
t.Fatalf("unable to open channel db: %v", err)
}
defer cleanUp()
ns, err := newNurseryStore(&bitcoinGenesis, cdb)
if err != nil {
t.Fatalf("unable to open nursery store: %v", err)
}
kid := &kidOutputs[3]
// Compute the maturity height at which to enter the commitment output.
maturityHeight := kid.ConfHeight() + kid.BlocksToMaturity()
// Since we haven't finalized before, we should see a last finalized
// height of 0.
assertLastFinalizedHeight(t, ns, 0)
// Begin incubating the commitment output, which will be placed in the
// preschool bucket.
err = ns.Incubate(kid, nil)
if err != nil {
t.Fatalf("unable to incubate commitment output: %v", err)
}
// Then move the commitment output to the kindergarten bucket, so that
// the output is registered in the height index.
err = ns.PreschoolToKinder(kid)
if err != nil {
t.Fatalf("unable to move pscl output to kndr: %v", err)
}
// We should still see a last finalized height of 0, since no classes
// have been graduated.
assertLastFinalizedHeight(t, ns, 0)
// Now, iteratively finalize all heights below the maturity height,
// ensuring that the last finalized height is properly persisted, and
// that the finalized transactions are all nil.
for i := 0; i < int(maturityHeight); i++ {
err = ns.FinalizeKinder(uint32(i), nil)
if err != nil {
t.Fatalf("unable to finalize kndr at height=%d: %v",
i, err)
}
assertLastFinalizedHeight(t, ns, uint32(i))
assertFinalizedTxn(t, ns, uint32(i), nil)
}
// As we have now finalized all heights below the maturity height, we
// should still see the commitment output in the kindergarten bucket at
// its maturity height.
assertKndrAtMaturityHeight(t, ns, kid)
// Now, finalize the kindergarten sweep transaction at the maturity
// height.
err = ns.FinalizeKinder(maturityHeight, timeoutTx)
if err != nil {
t.Fatalf("unable to finalize kndr at height=%d: %v",
maturityHeight, err)
}
// The nursery store should now see the maturity height finalized, and
// the finalized kindergarten sweep txn should be returned at this
// height.
assertLastFinalizedHeight(t, ns, maturityHeight)
assertFinalizedTxn(t, ns, maturityHeight, timeoutTx)
// Lastly, continue to finalize heights above the maturity height. Each
// should report having a nil finalized kindergarten sweep txn.
for i := maturityHeight + 1; i < maturityHeight+10; i++ {
err = ns.FinalizeKinder(uint32(i), nil)
if err != nil {
t.Fatalf("unable to finalize kndr at height=%d: %v",
i, err)
}
assertLastFinalizedHeight(t, ns, uint32(i))
assertFinalizedTxn(t, ns, uint32(i), nil)
}
}
// TestNurseryStoreGraduate verifies that the nursery store properly removes
// populated entries from the height index as it is purged, and that the last
// purged height is set appropriately.
func TestNurseryStoreGraduate(t *testing.T) {
cdb, cleanUp, err := makeTestDB()
if err != nil {
t.Fatalf("unable to open channel db: %v", err)
}
defer cleanUp()
ns, err := newNurseryStore(&bitcoinGenesis, cdb)
if err != nil {
t.Fatalf("unable to open nursery store: %v", err)
}
kid := &kidOutputs[3]
// Compute the height at which this output will be inserted in the
// height index.
maturityHeight := kid.ConfHeight() + kid.BlocksToMaturity()
// Since we have never purged, the last purged height should be 0.
assertLastGraduatedHeight(t, ns, 0)
// First, add a commitment output to the nursery store, which is
// initially inserted in the preschool bucket.
err = ns.Incubate(kid, nil)
if err != nil {
t.Fatalf("unable to incubate commitment output: %v", err)
}
// Then, move the commitment output to the kindergarten bucket, such
// that it resides in the height index at it's maturity height.
err = ns.PreschoolToKinder(kid)
if err != nil {
t.Fatalf("unable to move pscl output to kndr: %v", err)
}
// Now, iteratively purge all height below the target maturity height,
// checking that each class is now empty, and that the last purged
// height is set correctly.
for i := 0; i < int(maturityHeight); i++ {
err = ns.GraduateHeight(uint32(i))
if err != nil {
t.Fatalf("unable to purge height=%d: %v", i, err)
}
assertLastGraduatedHeight(t, ns, uint32(i))
assertHeightIsPurged(t, ns, uint32(i))
}
// Check that the commitment output currently exists at it's maturity
// height.
assertKndrAtMaturityHeight(t, ns, kid)
// Finalize the kindergarten transaction, ensuring that it is a non-nil
// value.
err = ns.FinalizeKinder(maturityHeight, timeoutTx)
if err != nil {
t.Fatalf("unable to finalize kndr at height=%d: %v",
maturityHeight, err)
}
// Verify that the maturity height has now been finalized.
assertLastFinalizedHeight(t, ns, maturityHeight)
assertFinalizedTxn(t, ns, maturityHeight, timeoutTx)
// Finally, purge the non-empty maturity height, and check that returned
// class is empty.
err = ns.GraduateHeight(maturityHeight)
if err != nil {
t.Fatalf("unable to set graduated height=%d: %v", maturityHeight,
err)
}
err = ns.GraduateKinder(maturityHeight)
if err != nil {
t.Fatalf("unable to graduate kindergarten outputs at height=%d: "+
"%v", maturityHeight, err)
}
assertHeightIsPurged(t, ns, maturityHeight)
}
// assertNumChanOutputs checks that the channel bucket has the expected number
// of outputs.
func assertNumChanOutputs(t *testing.T, ns NurseryStore,
chanPoint *wire.OutPoint, expectedNum int) {
var count int
err := ns.ForChanOutputs(chanPoint, func([]byte, []byte) error {
count++
return nil
})
if count == 0 && err == ErrContractNotFound {
return
} else if err != nil {
t.Fatalf("unable to count num outputs for channel %v: %v",
chanPoint, err)
}
if count != expectedNum {
t.Fatalf("nursery store should have %d outputs, found %d",
expectedNum, count)
}
}
// assertLastFinalizedHeight checks that the nursery stores last finalized
// height matches the expected height.
func assertLastFinalizedHeight(t *testing.T, ns NurseryStore,
expected uint32) {
lfh, err := ns.LastFinalizedHeight()
if err != nil {
t.Fatalf("unable to get last finalized height: %v", err)
}
if lfh != expected {
t.Fatalf("expected last finalized height to be %d, got %d",
expected, lfh)
}
}
// assertLastGraduatedHeight checks that the nursery stores last purged height
// matches the expected height.
func assertLastGraduatedHeight(t *testing.T, ns NurseryStore, expected uint32) {
lgh, err := ns.LastGraduatedHeight()
if err != nil {
t.Fatalf("unable to get last graduated height: %v", err)
}
if lgh != expected {
t.Fatalf("expected last graduated height to be %d, got %d",
expected, lgh)
}
}
// assertNumPreschools loads all preschool outputs and verifies their count
// matches the expected number.
func assertNumPreschools(t *testing.T, ns NurseryStore, expected int) {
psclOutputs, err := ns.FetchPreschools()
if err != nil {
t.Fatalf("unable to retrieve preschool outputs: %v", err)
}
if len(psclOutputs) != expected {
t.Fatalf("expected number of pscl outputs to be %d, got %v",
expected, len(psclOutputs))
}
}
// assertNumChannels checks that the nursery has a given number of active
// channels.
func assertNumChannels(t *testing.T, ns NurseryStore, expected int) {
channels, err := ns.ListChannels()
if err != nil {
t.Fatalf("unable to fetch channels from nursery store: %v",
err)
}
if len(channels) != expected {
t.Fatalf("expected number of active channels to be %d, got %d",
expected, len(channels))
}
}
// assertHeightIsPurged checks that the finalized transaction, kindergarten, and
// htlc outputs at a particular height are all nil.
func assertHeightIsPurged(t *testing.T, ns NurseryStore,
height uint32) {
finalTx, kndrOutputs, cribOutputs, err := ns.FetchClass(height)
if err != nil {
t.Fatalf("unable to retrieve class at height=%d: %v",
height, err)
}
if finalTx != nil {
t.Fatalf("height=%d not purged, final txn should be nil", height)
}
if kndrOutputs != nil {
t.Fatalf("height=%d not purged, kndr outputs should be nil", height)
}
if cribOutputs != nil {
t.Fatalf("height=%d not purged, crib outputs should be nil", height)
}
}
// assertCribAtExpiryHeight loads the class at the given height, and verifies
// that the given htlc output is one of the crib outputs.
func assertCribAtExpiryHeight(t *testing.T, ns NurseryStore,
htlcOutput *babyOutput) {
expiryHeight := htlcOutput.expiry
_, _, cribOutputs, err := ns.FetchClass(expiryHeight)
if err != nil {
t.Fatalf("unable to retrieve class at height=%d: %v",
expiryHeight, err)
}
for _, crib := range cribOutputs {
if reflect.DeepEqual(&crib, htlcOutput) {
return
}
}
t.Fatalf("could not find crib output %v at height %d",
htlcOutput.OutPoint(), expiryHeight)
}
// assertCribNotAtExpiryHeight loads the class at the given height, and verifies
// that the given htlc output is not one of the crib outputs.
func assertCribNotAtExpiryHeight(t *testing.T, ns NurseryStore,
htlcOutput *babyOutput) {
expiryHeight := htlcOutput.expiry
_, _, cribOutputs, err := ns.FetchClass(expiryHeight)
if err != nil {
t.Fatalf("unable to retrieve class at height %d: %v",
expiryHeight, err)
}
for _, crib := range cribOutputs {
if reflect.DeepEqual(&crib, htlcOutput) {
t.Fatalf("found find crib output %v at height %d",
htlcOutput.OutPoint(), expiryHeight)
}
}
}
// assertFinalizedTxn loads the class at the given height and compares the
// returned finalized txn to that in the class. It is safe to presented a nil
// expected transaction.
func assertFinalizedTxn(t *testing.T, ns NurseryStore, height uint32,
exFinalTx *wire.MsgTx) {
finalTx, _, _, err := ns.FetchClass(height)
if err != nil {
t.Fatalf("unable to fetch class at height=%d: %v", height,
err)
}
if !reflect.DeepEqual(finalTx, exFinalTx) {
t.Fatalf("expected finalized txn at height=%d "+
"to be %v, got %v", height, finalTx.TxHash(),
exFinalTx.TxHash())
}
}
// assertKndrAtMaturityHeight loads the class at the provided height and
// verifies that the provided kid output is one of the kindergarten outputs
// returned.
func assertKndrAtMaturityHeight(t *testing.T, ns NurseryStore,
kndrOutput *kidOutput) {
maturityHeight := kndrOutput.ConfHeight() +
kndrOutput.BlocksToMaturity()
_, kndrOutputs, _, err := ns.FetchClass(maturityHeight)
if err != nil {
t.Fatalf("unable to retrieve class at height %d: %v",
maturityHeight, err)
}
for _, kndr := range kndrOutputs {
if reflect.DeepEqual(&kndr, kndrOutput) {
return
}
}
t.Fatalf("could not find kndr output %v at height %d",
kndrOutput.OutPoint(), maturityHeight)
}
// assertKndrNotAtMaturityHeight loads the class at the provided height and
// verifies that the provided kid output is not one of the kindergarten outputs
// returned.
func assertKndrNotAtMaturityHeight(t *testing.T, ns NurseryStore,
kndrOutput *kidOutput) {
maturityHeight := kndrOutput.ConfHeight() +
kndrOutput.BlocksToMaturity()
_, kndrOutputs, _, err := ns.FetchClass(maturityHeight)
if err != nil {
t.Fatalf("unable to retrieve class at height %d: %v",
maturityHeight, err)
}
for _, kndr := range kndrOutputs {
if reflect.DeepEqual(&kndr, kndrOutput) {
t.Fatalf("found find kndr output %v at height %d",
kndrOutput.OutPoint(), maturityHeight)
}
}
}
// assertChannelMaturity queries the nursery store for the maturity of the given
// channel, failing if the result does not match the expectedMaturity.
func assertChannelMaturity(t *testing.T, ns NurseryStore,
chanPoint *wire.OutPoint, expectedMaturity bool) {
isMature, err := ns.IsMatureChannel(chanPoint)
if err != nil {
t.Fatalf("unable to fetch channel maturity: %v", err)
}
if isMature != expectedMaturity {
t.Fatalf("expected channel maturity: %v, actual: %v",
expectedMaturity, isMature)
}
}
// assertCanRemoveChannel tries to remove a channel from the nursery store,
// failing if the result does match expected canRemove.
func assertCanRemoveChannel(t *testing.T, ns NurseryStore,
chanPoint *wire.OutPoint, canRemove bool) {
err := ns.RemoveChannel(chanPoint)
if canRemove && err != nil {
t.Fatalf("expected nil when removing active channel, got: %v",
err)
} else if !canRemove && err != ErrImmatureChannel {
t.Fatalf("expected ErrImmatureChannel when removing "+
"active channel: %v", err)
}
}

View File

@ -942,7 +942,9 @@ func (r *rpcServer) forceCloseChan(channel *lnwallet.LightningChannel) (*chainha
// Send the closed channel summary over to the utxoNursery in order to // Send the closed channel summary over to the utxoNursery in order to
// have its outputs swept back into the wallet once they're mature. // have its outputs swept back into the wallet once they're mature.
r.server.utxoNursery.IncubateOutputs(closeSummary) if err := r.server.utxoNursery.IncubateOutputs(closeSummary); err != nil {
return nil, nil, err
}
return &txid, closeSummary, nil return &txid, closeSummary, nil
} }
@ -1241,13 +1243,37 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
// we can ultimately sweep the funds into the wallet. // we can ultimately sweep the funds into the wallet.
if nurseryInfo != nil { if nurseryInfo != nil {
forceClose.LimboBalance = int64(nurseryInfo.limboBalance) forceClose.LimboBalance = int64(nurseryInfo.limboBalance)
forceClose.RecoveredBalance = int64(nurseryInfo.recoveredBalance)
forceClose.MaturityHeight = nurseryInfo.maturityHeight forceClose.MaturityHeight = nurseryInfo.maturityHeight
// If the transaction has been confirmed, then // If the transaction has been confirmed, then
// we can compute how many blocks it has left. // we can compute how many blocks it has left.
if forceClose.MaturityHeight != 0 { if forceClose.MaturityHeight != 0 {
forceClose.BlocksTilMaturity = (forceClose.MaturityHeight - forceClose.BlocksTilMaturity =
uint32(currentHeight)) int32(forceClose.MaturityHeight) -
currentHeight
}
for _, htlcReport := range nurseryInfo.htlcs {
// TODO(conner) set incoming flag
// appropriately after handling incoming
// incubation
htlc := &lnrpc.PendingHTLC{
Incoming: false,
Amount: int64(htlcReport.amount),
Outpoint: htlcReport.outpoint.String(),
MaturityHeight: htlcReport.maturityHeight,
Stage: htlcReport.stage,
}
if htlc.MaturityHeight != 0 {
htlc.BlocksTilMaturity =
int32(htlc.MaturityHeight) -
currentHeight
}
forceClose.PendingHtlcs = append(forceClose.PendingHtlcs,
htlc)
} }
resp.TotalLimboBalance += int64(nurseryInfo.limboBalance) resp.TotalLimboBalance += int64(nurseryInfo.limboBalance)

View File

@ -136,8 +136,6 @@ func newServer(listenAddrs []string, chanDB *channeldb.DB, cc *chainControl,
invoices: newInvoiceRegistry(chanDB), invoices: newInvoiceRegistry(chanDB),
utxoNursery: newUtxoNursery(chanDB, cc.chainNotifier, cc.wallet),
identityPriv: privKey, identityPriv: privKey,
nodeSigner: newNodeSigner(privKey), nodeSigner: newNodeSigner(privKey),
@ -306,6 +304,26 @@ func newServer(listenAddrs []string, chanDB *channeldb.DB, cc *chainControl,
return nil, err return nil, err
} }
utxnStore, err := newNurseryStore(&bitcoinGenesis, chanDB)
if err != nil {
srvrLog.Errorf("unable to create nursery store: %v", err)
return nil, err
}
s.utxoNursery = newUtxoNursery(&NurseryConfig{
ChainIO: cc.chainIO,
ConfDepth: 1,
DB: chanDB,
Estimator: cc.feeEstimator,
GenSweepScript: func() ([]byte, error) {
return newSweepPkScript(cc.wallet)
},
Notifier: cc.chainNotifier,
PublishTransaction: cc.wallet.PublishTransaction,
Signer: cc.wallet.Cfg.Signer,
Store: utxnStore,
})
// Construct a closure that wraps the htlcswitch's CloseLink method. // Construct a closure that wraps the htlcswitch's CloseLink method.
closeLink := func(chanPoint *wire.OutPoint, closeLink := func(chanPoint *wire.OutPoint,
closureType htlcswitch.ChannelCloseType) { closureType htlcswitch.ChannelCloseType) {
@ -315,17 +333,17 @@ func newServer(listenAddrs []string, chanDB *channeldb.DB, cc *chainControl,
} }
s.breachArbiter = newBreachArbiter(&BreachConfig{ s.breachArbiter = newBreachArbiter(&BreachConfig{
Signer: cc.wallet.Cfg.Signer,
DB: chanDB,
PublishTransaction: cc.wallet.PublishTransaction,
Notifier: cc.chainNotifier,
ChainIO: s.cc.chainIO, ChainIO: s.cc.chainIO,
Estimator: s.cc.feeEstimator,
CloseLink: closeLink, CloseLink: closeLink,
Store: newRetributionStore(chanDB), DB: chanDB,
Estimator: s.cc.feeEstimator,
GenSweepScript: func() ([]byte, error) { GenSweepScript: func() ([]byte, error) {
return newSweepPkScript(cc.wallet) return newSweepPkScript(cc.wallet)
}, },
Notifier: cc.chainNotifier,
PublishTransaction: cc.wallet.PublishTransaction,
Signer: cc.wallet.Cfg.Signer,
Store: newRetributionStore(chanDB),
}) })
// Create the connection manager which will be responsible for // Create the connection manager which will be responsible for

File diff suppressed because it is too large Load Diff

View File

@ -40,11 +40,38 @@ var (
Hash: [chainhash.HashSize]byte{ Hash: [chainhash.HashSize]byte{
0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, 0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, 0x0d, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9, 0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9,
}, },
Index: 23, Index: 23,
}, },
{
Hash: [chainhash.HashSize]byte{
0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9,
0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
0x0d, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
},
Index: 30,
},
{
Hash: [chainhash.HashSize]byte{
0x0d, 0xe7, 0x95, 0xe4, 0xfc, 0xd2, 0xc6, 0xda,
0xb7, 0x25, 0xb8, 0x4d, 0x63, 0x59, 0xe6, 0x96,
0x31, 0x13, 0xa1, 0x17, 0x81, 0xb6, 0x37, 0xd8,
0x1e, 0x0b, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9,
},
Index: 2,
},
{
Hash: [chainhash.HashSize]byte{
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
0x51, 0xb6, 0x37, 0xd8, 0x1f, 0x0b, 0x4c, 0xf9,
0x9e, 0xc5, 0x8c, 0xe9, 0xfc, 0xd2, 0xc6, 0xda,
0x2d, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
},
Index: 9,
},
} }
keys = [][]byte{ keys = [][]byte{
@ -170,50 +197,61 @@ var (
{ {
breachedOutput: breachedOutput{ breachedOutput: breachedOutput{
amt: btcutil.Amount(13e7), amt: btcutil.Amount(13e7),
outpoint: outPoints[0], outpoint: outPoints[1],
witnessType: lnwallet.CommitmentTimeLock, witnessType: lnwallet.CommitmentTimeLock,
}, },
originChanPoint: outPoints[1], originChanPoint: outPoints[0],
blocksToMaturity: uint32(100), blocksToMaturity: uint32(42),
confHeight: uint32(1770001), confHeight: uint32(1000),
}, },
{ {
breachedOutput: breachedOutput{ breachedOutput: breachedOutput{
amt: btcutil.Amount(24e7), amt: btcutil.Amount(24e7),
outpoint: outPoints[1], outpoint: outPoints[2],
witnessType: lnwallet.CommitmentTimeLock, witnessType: lnwallet.CommitmentTimeLock,
}, },
originChanPoint: outPoints[0], originChanPoint: outPoints[0],
blocksToMaturity: uint32(50), blocksToMaturity: uint32(42),
confHeight: uint32(22342321), confHeight: uint32(1000),
}, },
{ {
breachedOutput: breachedOutput{ breachedOutput: breachedOutput{
amt: btcutil.Amount(2e5), amt: btcutil.Amount(2e5),
outpoint: outPoints[2], outpoint: outPoints[3],
witnessType: lnwallet.CommitmentTimeLock, witnessType: lnwallet.CommitmentTimeLock,
}, },
originChanPoint: outPoints[2], originChanPoint: outPoints[0],
blocksToMaturity: uint32(12), blocksToMaturity: uint32(28),
confHeight: uint32(34241), confHeight: uint32(500),
},
{
breachedOutput: breachedOutput{
amt: btcutil.Amount(10e6),
outpoint: outPoints[4],
witnessType: lnwallet.CommitmentTimeLock,
},
originChanPoint: outPoints[0],
blocksToMaturity: uint32(28),
confHeight: uint32(500),
}, },
} }
babyOutputs = []babyOutput{ babyOutputs = []babyOutput{
{ {
kidOutput: kidOutputs[0], kidOutput: kidOutputs[1],
expiry: 3829, expiry: 3829,
timeoutTx: timeoutTx, timeoutTx: timeoutTx,
}, },
{ {
kidOutput: kidOutputs[1], kidOutput: kidOutputs[2],
expiry: 85903, expiry: 4,
timeoutTx: timeoutTx, timeoutTx: timeoutTx,
}, },
{ {
kidOutput: kidOutputs[2], kidOutput: kidOutputs[3],
expiry: 4, expiry: 4,
timeoutTx: timeoutTx, timeoutTx: timeoutTx,
}, },
@ -283,32 +321,18 @@ func init() {
} }
signDescriptors[i].PubKey = pk signDescriptors[i].PubKey = pk
kidOutputs[i].signDesc = signDescriptors[i]
babyOutputs[i].kidOutput.signDesc = signDescriptors[i]
} }
}
func TestDeserializeKidsList(t *testing.T) {
var b bytes.Buffer
for _, kid := range kidOutputs {
if err := kid.Encode(&b); err != nil {
t.Fatalf("unable to serialize and add kid output to "+
"list: %v", err)
}
}
kidList, err := deserializeKidList(&b)
if err != nil {
t.Fatalf("unable to deserialize kid output list: %v", err)
}
for i := range kidOutputs { for i := range kidOutputs {
if !reflect.DeepEqual(&kidOutputs[i], kidList[i]) { isd := i % len(signDescriptors)
t.Fatalf("kidOutputs don't match \n%+v\n%+v", kidOutputs[i].signDesc = signDescriptors[isd]
&kidOutputs[i], kidList[i])
} }
for i := range babyOutputs {
isd := i % len(signDescriptors)
babyOutputs[i].kidOutput.signDesc = signDescriptors[isd]
} }
initIncubateTests()
} }
func TestKidOutputSerialization(t *testing.T) { func TestKidOutputSerialization(t *testing.T) {