@@ -19,7 +19,7 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
19
19
use crate :: chain:: transaction:: OutPoint ;
20
20
use crate :: chain:: { ChannelMonitorUpdateStatus , Listen , Watch } ;
21
21
use crate :: events:: { Event , MessageSendEvent , MessageSendEventsProvider , PaymentPurpose , ClosureReason , HTLCDestination } ;
22
- use crate :: ln:: channelmanager:: { RAACommitmentOrder , PaymentSendFailure , PaymentId , RecipientOnionFields } ;
22
+ use crate :: ln:: channelmanager:: { PaymentId , PaymentSendFailure , RAACommitmentOrder , RecipientOnionFields } ;
23
23
use crate :: ln:: channel:: AnnouncementSigsState ;
24
24
use crate :: ln:: msgs;
25
25
use crate :: ln:: types:: ChannelId ;
@@ -3312,22 +3312,25 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
3312
3312
3313
3313
reconnect_nodes ( reconnect_args) ;
3314
3314
3315
- // Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending
3316
- // `PaymentForwarded` event will finally be released.
3317
- let ( outpoint, ab_update_id, _) = nodes[ 1 ] . chain_monitor . latest_monitor_update_id . lock ( ) . unwrap ( ) . get ( & chan_id_ab) . unwrap ( ) . clone ( ) ;
3318
- nodes[ 1 ] . chain_monitor . chain_monitor . force_channel_monitor_updated ( outpoint, ab_update_id) ;
3319
-
3320
- // If the A<->B channel was closed before we reload, we'll replay the claim against it on
3321
- // reload, causing the `PaymentForwarded` event to get replayed.
3322
- let evs = nodes[ 1 ] . node . get_and_clear_pending_events ( ) ;
3323
- assert_eq ! ( evs. len( ) , if close_chans_before_reload { 2 } else { 1 } ) ;
3324
- for ev in evs {
3325
- if let Event :: PaymentForwarded { .. } = ev { }
3326
- else {
3327
- panic ! ( ) ;
3328
- }
3315
+ }
3316
+
3317
+ // Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending
3318
+ // `PaymentForwarded` event will finally be released.
3319
+ let ( outpoint, ab_update_id, _) = nodes[ 1 ] . chain_monitor . latest_monitor_update_id . lock ( ) . unwrap ( ) . get ( & chan_id_ab) . unwrap ( ) . clone ( ) ;
3320
+ nodes[ 1 ] . chain_monitor . chain_monitor . force_channel_monitor_updated ( outpoint, ab_update_id) ;
3321
+
3322
+ // If the A<->B channel was closed before we reload, we'll replay the claim against it on
3323
+ // reload, causing the `PaymentForwarded` event to get replayed.
3324
+ let evs = nodes[ 1 ] . node . get_and_clear_pending_events ( ) ;
3325
+ assert_eq ! ( evs. len( ) , if close_chans_before_reload { 2 } else { 1 } ) ;
3326
+ for ev in evs {
3327
+ if let Event :: PaymentForwarded { .. } = ev { }
3328
+ else {
3329
+ panic ! ( ) ;
3329
3330
}
3331
+ }
3330
3332
3333
+ if !close_chans_before_reload || close_only_a {
3331
3334
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C channel
3332
3335
// will fly, removing the payment preimage from it.
3333
3336
check_added_monitors ( & nodes[ 1 ] , 1 ) ;
@@ -3548,8 +3551,11 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) {
3548
3551
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs, & [ None , None , None ] ) ;
3549
3552
let mut nodes = create_network ( 3 , & node_cfgs, & node_chanmgrs) ;
3550
3553
3551
- create_announced_chan_between_nodes ( & nodes, 0 , 1 ) ;
3552
- create_announced_chan_between_nodes ( & nodes, 1 , 2 ) ;
3554
+ let node_a_id = nodes[ 0 ] . node . get_our_node_id ( ) ;
3555
+ let node_c_id = nodes[ 2 ] . node . get_our_node_id ( ) ;
3556
+
3557
+ let chan_id_ab = create_announced_chan_between_nodes ( & nodes, 0 , 1 ) . 2 ;
3558
+ let _chan_id_bc = create_announced_chan_between_nodes ( & nodes, 1 , 2 ) . 2 ;
3553
3559
3554
3560
// Route a payment from A, through B, to C, then claim it on C. Replay the
3555
3561
// `update_fulfill_htlc` twice on B to check that B doesn't hang.
@@ -3561,7 +3567,7 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) {
3561
3567
3562
3568
let cs_updates = get_htlc_update_msgs ( & nodes[ 2 ] , & nodes[ 1 ] . node . get_our_node_id ( ) ) ;
3563
3569
if hold_chan_a {
3564
- // The first update will be on the A <-> B channel, which we allow to complete.
3570
+ // The first update will be on the A <-> B channel, which we optionally allow to complete.
3565
3571
chanmon_cfgs[ 1 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: InProgress ) ;
3566
3572
}
3567
3573
nodes[ 1 ] . node . handle_update_fulfill_htlc ( nodes[ 2 ] . node . get_our_node_id ( ) , & cs_updates. update_fulfill_htlcs [ 0 ] ) ;
@@ -3588,14 +3594,51 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) {
3588
3594
assert ! ( nodes[ 1 ] . node. get_and_clear_pending_events( ) . is_empty( ) ) ;
3589
3595
assert ! ( nodes[ 1 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
3590
3596
3591
- let ( route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash ! ( & nodes[ 1 ] , nodes[ 2 ] , 1_000_000 ) ;
3597
+ let ( route, payment_hash_2, payment_preimage_2, payment_secret_2) =
3598
+ get_route_and_payment_hash ! ( & nodes[ 1 ] , nodes[ 2 ] , 1_000_000 ) ;
3592
3599
3600
+ // With the A<->B preimage persistence not yet complete, the B<->C channel is stuck
3601
+ // waiting.
3593
3602
nodes[ 1 ] . node . send_payment_with_route ( route, payment_hash_2,
3594
3603
RecipientOnionFields :: secret_only ( payment_secret_2) , PaymentId ( payment_hash_2. 0 ) ) . unwrap ( ) ;
3595
3604
check_added_monitors ( & nodes[ 1 ] , 0 ) ;
3596
3605
3597
3606
assert ! ( nodes[ 1 ] . node. get_and_clear_pending_events( ) . is_empty( ) ) ;
3598
3607
assert ! ( nodes[ 1 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
3608
+
3609
+ // ...but once we complete the A<->B channel preimage persistence, the B<->C channel
3610
+ // unlocks and we send both peers commitment updates.
3611
+ let ( outpoint, ab_update_id, _) = nodes[ 1 ] . chain_monitor . latest_monitor_update_id . lock ( ) . unwrap ( ) . get ( & chan_id_ab) . unwrap ( ) . clone ( ) ;
3612
+ assert ! ( nodes[ 1 ] . chain_monitor. chain_monitor. channel_monitor_updated( outpoint, ab_update_id) . is_ok( ) ) ;
3613
+
3614
+ let mut msg_events = nodes[ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
3615
+ assert_eq ! ( msg_events. len( ) , 2 ) ;
3616
+ check_added_monitors ( & nodes[ 1 ] , 2 ) ;
3617
+
3618
+ let mut c_update = msg_events. iter ( )
3619
+ . filter ( |ev| matches ! ( ev, MessageSendEvent :: UpdateHTLCs { node_id, .. } if * node_id == node_c_id) )
3620
+ . cloned ( ) . collect :: < Vec < _ > > ( ) ;
3621
+ let a_filtermap = |ev| if let MessageSendEvent :: UpdateHTLCs { node_id, updates } = ev {
3622
+ if node_id == node_a_id {
3623
+ Some ( updates)
3624
+ } else {
3625
+ None
3626
+ }
3627
+ } else {
3628
+ None
3629
+ } ;
3630
+ let a_update = msg_events. drain ( ..) . filter_map ( |ev| a_filtermap ( ev) ) . collect :: < Vec < _ > > ( ) ;
3631
+
3632
+ assert_eq ! ( a_update. len( ) , 1 ) ;
3633
+ assert_eq ! ( c_update. len( ) , 1 ) ;
3634
+
3635
+ nodes[ 0 ] . node . handle_update_fulfill_htlc ( nodes[ 1 ] . node . get_our_node_id ( ) , & a_update[ 0 ] . update_fulfill_htlcs [ 0 ] ) ;
3636
+ commitment_signed_dance ! ( nodes[ 0 ] , nodes[ 1 ] , a_update[ 0 ] . commitment_signed, false ) ;
3637
+ expect_payment_sent ( & nodes[ 0 ] , payment_preimage, None , true , true ) ;
3638
+ expect_payment_forwarded ! ( nodes[ 1 ] , nodes[ 0 ] , nodes[ 2 ] , Some ( 1000 ) , false , false ) ;
3639
+
3640
+ pass_along_path ( & nodes[ 1 ] , & [ & nodes[ 2 ] ] , 1_000_000 , payment_hash_2, Some ( payment_secret_2) , c_update. pop ( ) . unwrap ( ) , true , None ) ;
3641
+ claim_payment ( & nodes[ 1 ] , & [ & nodes[ 2 ] ] , payment_preimage_2) ;
3599
3642
}
3600
3643
}
3601
3644
0 commit comments