Skip to content

Commit 5ed3f25

Browse files
committed
Add ChannelManager methods to force close without broadcasting
If a user restores from a backup that they know is stale, they'd like to force-close all of their channels (or at least the ones they know are stale) *without* broadcasting the latest state, asking their peers to do so instead. This simply adds methods to do so, renaming the existing `force_close_channel` and `force_close_all_channels` methods to disambiguate further.
1 parent 3676a05 commit 5ed3f25

File tree

9 files changed

+64
-39
lines changed

9 files changed

+64
-39
lines changed

fuzz/src/full_stack.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -252,7 +252,7 @@ impl<'a> Drop for MoneyLossDetector<'a> {
252252
}
253253

254254
// Force all channels onto the chain (and time out claim txn)
255-
self.manager.force_close_all_channels();
255+
self.manager.force_close_all_channels_broadcasting_latest_txn();
256256
}
257257
}
258258
}
@@ -624,7 +624,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
624624
let channel_id = get_slice!(1)[0] as usize;
625625
if channel_id >= channels.len() { return; }
626626
channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) });
627-
channelmanager.force_close_channel(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
627+
channelmanager.force_close_broadcasting_latest_txn(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
628628
},
629629
// 15 is above
630630
_ => return,

lightning-background-processor/src/lib.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -744,7 +744,7 @@ mod tests {
744744
}
745745

746746
// Force-close the channel.
747-
nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
747+
nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
748748

749749
// Check that the force-close updates are persisted.
750750
check_persisted_data!(nodes[0].node, filepath.clone());
@@ -880,7 +880,7 @@ mod tests {
880880
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
881881

882882
// Force close the channel and check that the SpendableOutputs event was handled.
883-
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
883+
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
884884
let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
885885
confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
886886
let event = receiver

lightning-persister/src/lib.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ mod tests {
213213

214214
// Force close because cooperative close doesn't result in any persisted
215215
// updates.
216-
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
216+
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
217217
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
218218
check_closed_broadcast!(nodes[0], true);
219219
check_added_monitors!(nodes[0], 1);
@@ -247,7 +247,7 @@ mod tests {
247247
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
248248
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
249249
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
250-
nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
250+
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
251251
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
252252
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
253253
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
@@ -286,7 +286,7 @@ mod tests {
286286
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
287287
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
288288
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
289-
nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
289+
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
290290
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
291291
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
292292
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();

lightning/src/ln/chanmon_update_fail_tests.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
226226
}
227227

228228
// ...and make sure we can force-close a frozen channel
229-
nodes[0].node.force_close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
229+
nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
230230
check_added_monitors!(nodes[0], 1);
231231
check_closed_broadcast!(nodes[0], true);
232232

lightning/src/ln/channelmanager.rs

+37-12
Original file line numberDiff line numberDiff line change
@@ -1945,7 +1945,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
19451945

19461946
/// `peer_msg` should be set when we receive a message from a peer, but not set when the
19471947
/// user closes, which will be re-exposed as the `ChannelClosed` reason.
1948-
fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
1948+
fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
1949+
-> Result<PublicKey, APIError> {
19491950
let mut chan = {
19501951
let mut channel_state_lock = self.channel_state.lock().unwrap();
19511952
let channel_state = &mut *channel_state_lock;
@@ -1964,7 +1965,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
19641965
}
19651966
};
19661967
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
1967-
self.finish_force_close_channel(chan.force_shutdown(true));
1968+
self.finish_force_close_channel(chan.force_shutdown(broadcast));
19681969
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
19691970
let mut channel_state = self.channel_state.lock().unwrap();
19701971
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
@@ -1975,13 +1976,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
19751976
Ok(chan.get_counterparty_node_id())
19761977
}
19771978

1978-
/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
1979-
/// the chain and rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
1980-
/// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
1981-
/// channel.
1982-
pub fn force_close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
1979+
fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
19831980
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
1984-
match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None) {
1981+
match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
19851982
Ok(counterparty_node_id) => {
19861983
self.channel_state.lock().unwrap().pending_msg_events.push(
19871984
events::MessageSendEvent::HandleError {
@@ -1997,11 +1994,39 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
19971994
}
19981995
}
19991996

1997+
/// Force closes a channel, immediately broadcasting the latest local transaction(s) and
1998+
/// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
1999+
/// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
2000+
/// channel.
2001+
pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
2002+
-> Result<(), APIError> {
2003+
self.force_close_sending_error(channel_id, counterparty_node_id, true)
2004+
}
2005+
2006+
/// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
2007+
/// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
2008+
/// `counterparty_node_id` isn't the counterparty of the corresponding channel.
2009+
///
2010+
/// You can always get the latest local transaction(s) to broadcast from
2011+
/// [`ChannelMonitor::get_latest_holder_commitment_txn`].
2012+
pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
2013+
-> Result<(), APIError> {
2014+
self.force_close_sending_error(channel_id, counterparty_node_id, false)
2015+
}
2016+
20002017
/// Force close all channels, immediately broadcasting the latest local commitment transaction
20012018
/// for each to the chain and rejecting new HTLCs on each.
2002-
pub fn force_close_all_channels(&self) {
2019+
pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
2020+
for chan in self.list_channels() {
2021+
let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
2022+
}
2023+
}
2024+
2025+
/// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
2026+
/// local transaction(s).
2027+
pub fn force_close_all_channels_without_broadcasting_txn(&self) {
20032028
for chan in self.list_channels() {
2004-
let _ = self.force_close_channel(&chan.channel_id, &chan.counterparty.node_id);
2029+
let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
20052030
}
20062031
}
20072032

@@ -6058,7 +6083,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
60586083
for chan in self.list_channels() {
60596084
if chan.counterparty.node_id == *counterparty_node_id {
60606085
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
6061-
let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data));
6086+
let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true);
60626087
}
60636088
}
60646089
} else {
@@ -6080,7 +6105,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
60806105
}
60816106

60826107
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
6083-
let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data));
6108+
let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true);
60846109
}
60856110
}
60866111
}

lightning/src/ln/functional_tests.rs

+11-11
Original file line numberDiff line numberDiff line change
@@ -2203,7 +2203,7 @@ fn channel_monitor_network_test() {
22032203
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
22042204

22052205
// Simple case with no pending HTLCs:
2206-
nodes[1].node.force_close_channel(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
2206+
nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
22072207
check_added_monitors!(nodes[1], 1);
22082208
check_closed_broadcast!(nodes[1], true);
22092209
{
@@ -2224,7 +2224,7 @@ fn channel_monitor_network_test() {
22242224

22252225
// Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
22262226
// broadcasted until we reach the timelock time).
2227-
nodes[1].node.force_close_channel(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
2227+
nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap();
22282228
check_closed_broadcast!(nodes[1], true);
22292229
check_added_monitors!(nodes[1], 1);
22302230
{
@@ -2264,7 +2264,7 @@ fn channel_monitor_network_test() {
22642264

22652265
// nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
22662266
// HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
2267-
nodes[2].node.force_close_channel(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
2267+
nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap();
22682268
check_added_monitors!(nodes[2], 1);
22692269
check_closed_broadcast!(nodes[2], true);
22702270
let node2_commitment_txid;
@@ -3403,7 +3403,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
34033403
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
34043404

34053405
route_payment(&nodes[0], &[&nodes[1]], 10000000);
3406-
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3406+
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
34073407
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
34083408
check_closed_broadcast!(nodes[0], true);
34093409
check_added_monitors!(nodes[0], 1);
@@ -3466,7 +3466,7 @@ fn test_force_close_fail_back() {
34663466
// state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
34673467
// transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
34683468

3469-
nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
3469+
nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap();
34703470
check_closed_broadcast!(nodes[2], true);
34713471
check_added_monitors!(nodes[2], 1);
34723472
check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed);
@@ -4793,7 +4793,7 @@ fn test_claim_sizeable_push_msat() {
47934793
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
47944794

47954795
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
4796-
nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
4796+
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
47974797
check_closed_broadcast!(nodes[1], true);
47984798
check_added_monitors!(nodes[1], 1);
47994799
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
@@ -4822,7 +4822,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
48224822
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
48234823

48244824
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known());
4825-
nodes[0].node.force_close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
4825+
nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
48264826
check_closed_broadcast!(nodes[0], true);
48274827
check_added_monitors!(nodes[0], 1);
48284828
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
@@ -8365,7 +8365,7 @@ fn test_manually_accept_inbound_channel_request() {
83658365
_ => panic!("Unexpected event"),
83668366
}
83678367

8368-
nodes[1].node.force_close_channel(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8368+
nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
83698369

83708370
let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events();
83718371
assert_eq!(close_msg_ev.len(), 1);
@@ -8400,7 +8400,7 @@ fn test_manually_reject_inbound_channel_request() {
84008400
let events = nodes[1].node.get_and_clear_pending_events();
84018401
match events[0] {
84028402
Event::OpenChannelRequest { temporary_channel_id, .. } => {
8403-
nodes[1].node.force_close_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8403+
nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
84048404
}
84058405
_ => panic!("Unexpected event"),
84068406
}
@@ -9053,7 +9053,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
90539053
force_closing_node = 1;
90549054
counterparty_node = 0;
90559055
}
9056-
nodes[force_closing_node].node.force_close_channel(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
9056+
nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap();
90579057
check_closed_broadcast!(nodes[force_closing_node], true);
90589058
check_added_monitors!(nodes[force_closing_node], 1);
90599059
check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed);
@@ -9489,7 +9489,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t
94899489
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
94909490
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
94919491

9492-
nodes[1].node.force_close_channel(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
9492+
nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
94939493
check_closed_broadcast!(nodes[1], true);
94949494
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
94959495
check_added_monitors!(nodes[1], 1);

lightning/src/ln/payment_tests.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co
584584
// Route a payment, but force-close the channel before the HTLC fulfill message arrives at
585585
// nodes[0].
586586
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
587-
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
587+
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
588588
check_closed_broadcast!(nodes[0], true);
589589
check_added_monitors!(nodes[0], 1);
590590
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);

lightning/src/ln/priv_short_conf_tests.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -817,7 +817,7 @@ fn test_0conf_close_no_early_chan_update() {
817817
// We can use the channel immediately, but won't generate a channel_update until we get confs
818818
send_payment(&nodes[0], &[&nodes[1]], 100_000);
819819

820-
nodes[0].node.force_close_all_channels();
820+
nodes[0].node.force_close_all_channels_broadcasting_latest_txn();
821821
check_added_monitors!(nodes[0], 1);
822822
check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed);
823823
let _ = get_err_msg!(nodes[0], nodes[1].node.get_our_node_id());

lightning/src/util/events.rs

+6-6
Original file line numberDiff line numberDiff line change
@@ -459,33 +459,33 @@ pub enum Event {
459459
/// Indicates a request to open a new channel by a peer.
460460
///
461461
/// To accept the request, call [`ChannelManager::accept_inbound_channel`]. To reject the
462-
/// request, call [`ChannelManager::force_close_channel`].
462+
/// request, call [`ChannelManager::force_close_without_broadcasting_txn`].
463463
///
464464
/// The event is only triggered when a new open channel request is received and the
465465
/// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true.
466466
///
467467
/// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
468-
/// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel
468+
/// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
469469
/// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
470470
OpenChannelRequest {
471471
/// The temporary channel ID of the channel requested to be opened.
472472
///
473473
/// When responding to the request, the `temporary_channel_id` should be passed
474474
/// back to the ChannelManager through [`ChannelManager::accept_inbound_channel`] to accept,
475-
/// or through [`ChannelManager::force_close_channel`] to reject.
475+
/// or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject.
476476
///
477477
/// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
478-
/// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel
478+
/// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
479479
temporary_channel_id: [u8; 32],
480480
/// The node_id of the counterparty requesting to open the channel.
481481
///
482482
/// When responding to the request, the `counterparty_node_id` should be passed
483483
/// back to the `ChannelManager` through [`ChannelManager::accept_inbound_channel`] to
484-
/// accept the request, or through [`ChannelManager::force_close_channel`] to reject the
484+
/// accept the request, or through [`ChannelManager::force_close_without_broadcasting_txn`] to reject the
485485
/// request.
486486
///
487487
/// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
488-
/// [`ChannelManager::force_close_channel`]: crate::ln::channelmanager::ChannelManager::force_close_channel
488+
/// [`ChannelManager::force_close_without_broadcasting_txn`]: crate::ln::channelmanager::ChannelManager::force_close_without_broadcasting_txn
489489
counterparty_node_id: PublicKey,
490490
/// The channel value of the requested channel.
491491
funding_satoshis: u64,

0 commit comments

Comments
 (0)