@@ -2270,6 +2270,138 @@ fn channel_reserve_in_flight_removes() {
2270
2270
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
2271
2271
}
2272
2272
2273
+ enum PostFailBackAction {
2274
+ TimeoutOnChain,
2275
+ ClaimOnChain,
2276
+ FailOffChain,
2277
+ ClaimOffChain,
2278
+ }
2279
+
2280
+ #[test]
2281
+ fn test_fail_back_before_backwards_timeout() {
2282
+ do_test_fail_back_before_backwards_timeout(PostFailBackAction::TimeoutOnChain);
2283
+ do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOnChain);
2284
+ do_test_fail_back_before_backwards_timeout(PostFailBackAction::FailOffChain);
2285
+ do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOffChain);
2286
+ }
2287
+
2288
+ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBackAction) {
2289
+ // Test that we fail an HTLC upstream if we are still waiting for confirmation downstream
2290
+ // just before the upstream timeout expires
2291
+ let chanmon_cfgs = create_chanmon_cfgs(3);
2292
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2293
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2294
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2295
+ for node in nodes.iter() {
2296
+ *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000;
2297
+ }
2298
+
2299
+ let node_b_id = nodes[1].node.get_our_node_id();
2300
+ let node_c_id = nodes[2].node.get_our_node_id();
2301
+
2302
+ create_announced_chan_between_nodes(&nodes, 0, 1);
2303
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
2304
+
2305
+ // Start every node on the same block height to make reasoning about timeouts easier
2306
+ connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1);
2307
+ connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
2308
+ connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
2309
+
2310
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
2311
+
2312
+ // Force close the B<->C channel by timing out the HTLC
2313
+ let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1;
2314
+ connect_blocks(&nodes[1], timeout_blocks);
2315
+ let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
2316
+ check_closed_event(&nodes[1], 1, ClosureReason::HTLCsTimedOut, false, &[node_c_id], 100_000);
2317
+ check_closed_broadcast(&nodes[1], 1, true);
2318
+ check_added_monitors(&nodes[1], 1);
2319
+
2320
+ // After the A<->B HTLC gets within LATENCY_GRACE_PERIOD_BLOCKS we will fail the HTLC to avoid
2321
+ // the channel force-closing. Note that we already connected `TEST_FINAL_CLTV +
2322
+ // LATENCY_GRACE_PERIOD_BLOCKS` blocks above, so we subtract that from the HTLC expiry (which
2323
+ // is `TEST_FINAL_CLTV` + `MIN_CLTV_EXPIRY_DELTA`).
2324
+ let upstream_timeout_blocks = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS * 2;
2325
+ connect_blocks(&nodes[1], upstream_timeout_blocks);
2326
+
2327
+ // Connect blocks for nodes[0] to make sure they don't go on-chain
2328
+ connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks);
2329
+
2330
+ // Check that nodes[1] fails the HTLC upstream
2331
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
2332
+ vec![HTLCDestination::NextHopChannel {
2333
+ node_id: Some(nodes[2].node.get_our_node_id()),
2334
+ channel_id: chan_2.2
2335
+ }]);
2336
+ check_added_monitors!(nodes[1], 1);
2337
+ let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
2338
+ let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates;
2339
+
2340
+ nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]);
2341
+ commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false);
2342
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false,
2343
+ PaymentFailedConditions::new().blamed_chan_closed(true));
2344
+
2345
+ // Make sure we handle possible duplicate fails or extra messages after failing back
2346
+ match post_fail_back_action {
2347
+ PostFailBackAction::TimeoutOnChain => {
2348
+ // Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again
2349
+ mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment
2350
+ mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout
2351
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY);
2352
+ // Expect handling another fail back event, but the HTLC is already gone
2353
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
2354
+ vec![HTLCDestination::NextHopChannel {
2355
+ node_id: Some(nodes[2].node.get_our_node_id()),
2356
+ channel_id: chan_2.2
2357
+ }]);
2358
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2359
+ },
2360
+ PostFailBackAction::ClaimOnChain => {
2361
+ nodes[2].node.claim_funds(payment_preimage);
2362
+ expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
2363
+ check_added_monitors!(nodes[2], 1);
2364
+ get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
2365
+
2366
+ connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
2367
+ let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS);
2368
+ check_closed_broadcast!(nodes[2], true);
2369
+ check_closed_event(&nodes[2], 1, ClosureReason::HTLCsTimedOut, false, &[node_b_id], 100_000);
2370
+ check_added_monitors!(nodes[2], 1);
2371
+
2372
+ mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment
2373
+ mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success
2374
+ connect_blocks(&nodes[1], ANTI_REORG_DELAY);
2375
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2376
+ },
2377
+ PostFailBackAction::FailOffChain => {
2378
+ nodes[2].node.fail_htlc_backwards(&payment_hash);
2379
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2],
2380
+ vec![HTLCDestination::FailedPayment { payment_hash }]);
2381
+ check_added_monitors!(nodes[2], 1);
2382
+ let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
2383
+ let update_fail = commitment_update.update_fail_htlcs[0].clone();
2384
+
2385
+ nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &update_fail);
2386
+ let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
2387
+ assert_eq!(err_msg.channel_id, chan_2.2);
2388
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2389
+ },
2390
+ PostFailBackAction::ClaimOffChain => {
2391
+ nodes[2].node.claim_funds(payment_preimage);
2392
+ expect_payment_claimed!(nodes[2], payment_hash, 3_000_000);
2393
+ check_added_monitors!(nodes[2], 1);
2394
+ let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
2395
+ let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone();
2396
+
2397
+ nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &update_fulfill);
2398
+ let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
2399
+ assert_eq!(err_msg.channel_id, chan_2.2);
2400
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2401
+ },
2402
+ };
2403
+ }
2404
+
2273
2405
#[test]
2274
2406
fn channel_monitor_network_test() {
2275
2407
// Simple test which builds a network of ChannelManagers, connects them to each other, and
@@ -2374,7 +2506,7 @@ fn channel_monitor_network_test() {
2374
2506
let node2_commitment_txid;
2375
2507
{
2376
2508
let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE);
2377
- connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1 );
2509
+ connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS);
2378
2510
test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
2379
2511
node2_commitment_txid = node_txn[0].compute_txid();
2380
2512
@@ -3312,8 +3444,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
3312
3444
// Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
3313
3445
// Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
3314
3446
mine_transaction(&nodes[1], &commitment_tx[0]);
3315
- check_closed_event! (&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false
3316
- , [nodes[2].node.get_our_node_id()], 100000);
3447
+ check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false,
3448
+ & [nodes[2].node.get_our_node_id()], 100000);
3317
3449
let htlc_expiry = get_monitor!(nodes[1], chan_2.2).get_claimable_balances().iter().filter_map(|bal|
3318
3450
if let Balance::MaybeTimeoutClaimableHTLC { claimable_height, .. } = bal {
3319
3451
Some(*claimable_height)
@@ -9774,6 +9906,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
9774
9906
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
9775
9907
*nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
9776
9908
9909
+ let node_c_id = nodes[2].node.get_our_node_id();
9910
+
9777
9911
create_announced_chan_between_nodes(&nodes, 0, 1);
9778
9912
let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
9779
9913
let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
@@ -9789,7 +9923,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
9789
9923
9790
9924
let conf_height = nodes[1].best_block_info().1;
9791
9925
if !test_height_before_timelock {
9792
- connect_blocks(&nodes[1], 24 * 6 );
9926
+ connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS );
9793
9927
}
9794
9928
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
9795
9929
&nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
@@ -9808,10 +9942,6 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
9808
9942
&spending_txn[0]
9809
9943
};
9810
9944
check_spends!(htlc_tx, node_txn[0]);
9811
- // We should also generate a SpendableOutputs event with the to_self output (as its
9812
- // timelock is up).
9813
- let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9814
- assert_eq!(descriptor_spend_txn.len(), 1);
9815
9945
9816
9946
// If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
9817
9947
// should immediately fail-backwards the HTLC to the previous hop, without waiting for an
@@ -9830,6 +9960,18 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
9830
9960
nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
9831
9961
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
9832
9962
expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true);
9963
+
9964
+ // We should also generate a SpendableOutputs event with the to_self output (once the
9965
+ // timelock is up).
9966
+ connect_blocks(&nodes[1], (BREAKDOWN_TIMEOUT as u32) - TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - 1);
9967
+ let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
9968
+ assert_eq!(descriptor_spend_txn.len(), 1);
9969
+
9970
+ // When the HTLC times out on the A<->B edge, the B<->C channel will fail the HTLC back to
9971
+ // avoid the A<->B channel closing (even though it already has). This will generate a
9972
+ // spurious HTLCHandlingFailed event.
9973
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
9974
+ vec![HTLCDestination::NextHopChannel { node_id: Some(node_c_id), channel_id }]);
9833
9975
}
9834
9976
}
9835
9977
0 commit comments