Skip to content

Commit a600eee

Browse files
authored
Merge pull request #1564 from TheBlueMatt/2022-06-panic-on-behind
Panic if we're running with outdated state instead of force-closing
2 parents 92ca7ff + caa2a9a commit a600eee

File tree

10 files changed

+129
-99
lines changed

10 files changed

+129
-99
lines changed

fuzz/src/full_stack.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -252,7 +252,7 @@ impl<'a> Drop for MoneyLossDetector<'a> {
252252
}
253253

254254
// Force all channels onto the chain (and time out claim txn)
255-
self.manager.force_close_all_channels();
255+
self.manager.force_close_all_channels_broadcasting_latest_txn();
256256
}
257257
}
258258
}
@@ -624,7 +624,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
624624
let channel_id = get_slice!(1)[0] as usize;
625625
if channel_id >= channels.len() { return; }
626626
channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) });
627-
channelmanager.force_close_channel(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
627+
channelmanager.force_close_broadcasting_latest_txn(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
628628
},
629629
// 15 is above
630630
_ => return,

lightning-background-processor/src/lib.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -744,7 +744,7 @@ mod tests {
744744
}
745745

746746
// Force-close the channel.
747-
nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
747+
nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
748748

749749
// Check that the force-close updates are persisted.
750750
check_persisted_data!(nodes[0].node, filepath.clone());
@@ -880,7 +880,7 @@ mod tests {
880880
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
881881

882882
// Force close the channel and check that the SpendableOutputs event was handled.
883-
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
883+
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
884884
let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
885885
confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
886886
let event = receiver

lightning-persister/src/lib.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ mod tests {
213213

214214
// Force close because cooperative close doesn't result in any persisted
215215
// updates.
216-
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
216+
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
217217
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
218218
check_closed_broadcast!(nodes[0], true);
219219
check_added_monitors!(nodes[0], 1);
@@ -247,7 +247,7 @@ mod tests {
247247
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
248248
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
249249
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
250-
nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
250+
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
251251
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
252252
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
253253
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
@@ -286,7 +286,7 @@ mod tests {
286286
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
287287
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
288288
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
289-
nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
289+
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
290290
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
291291
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
292292
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();

lightning/src/ln/chanmon_update_fail_tests.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
226226
}
227227

228228
// ...and make sure we can force-close a frozen channel
229-
nodes[0].node.force_close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
229+
nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
230230
check_added_monitors!(nodes[0], 1);
231231
check_closed_broadcast!(nodes[0], true);
232232

lightning/src/ln/channel.rs

+20-6
Original file line numberDiff line numberDiff line change
@@ -802,7 +802,6 @@ pub(super) enum ChannelError {
802802
Ignore(String),
803803
Warn(String),
804804
Close(String),
805-
CloseDelayBroadcast(String),
806805
}
807806

808807
impl fmt::Debug for ChannelError {
@@ -811,7 +810,6 @@ impl fmt::Debug for ChannelError {
811810
&ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
812811
&ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
813812
&ChannelError::Close(ref e) => write!(f, "Close : {}", e),
814-
&ChannelError::CloseDelayBroadcast(ref e) => write!(f, "CloseDelayBroadcast : {}", e)
815813
}
816814
}
817815
}
@@ -3799,6 +3797,11 @@ impl<Signer: Sign> Channel<Signer> {
37993797

38003798
/// May panic if some calls other than message-handling calls (which will all Err immediately)
38013799
/// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
3800+
///
3801+
/// Some links printed in log lines are included here to check them during build (when run with
3802+
/// `cargo doc --document-private-items`):
3803+
/// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
3804+
/// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
38023805
pub fn channel_reestablish<L: Deref>(&mut self, msg: &msgs::ChannelReestablish, logger: &L,
38033806
node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock)
38043807
-> Result<ReestablishResponses, ChannelError> where L::Target: Logger {
@@ -3824,9 +3827,20 @@ impl<Signer: Sign> Channel<Signer> {
38243827
return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
38253828
}
38263829
if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
3827-
return Err(ChannelError::CloseDelayBroadcast(
3828-
"We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting".to_owned()
3829-
));
3830+
macro_rules! log_and_panic {
3831+
($err_msg: expr) => {
3832+
log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
3833+
panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
3834+
}
3835+
}
3836+
log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
3837+
This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
3838+
More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
3839+
If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
3840+
ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
3841+
ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
3842+
Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
3843+
See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
38303844
}
38313845
},
38323846
OptionalField::Absent => {}
@@ -3933,7 +3947,7 @@ impl<Signer: Sign> Channel<Signer> {
39333947
// now!
39343948
match self.free_holding_cell_htlcs(logger) {
39353949
Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)),
3936-
Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) =>
3950+
Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) =>
39373951
panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
39383952
Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
39393953
Ok(ReestablishResponses {

lightning/src/ln/channelmanager.rs

+37-29
Original file line numberDiff line numberDiff line change
@@ -369,15 +369,6 @@ impl MsgHandleErrInternal {
369369
},
370370
},
371371
},
372-
ChannelError::CloseDelayBroadcast(msg) => LightningError {
373-
err: msg.clone(),
374-
action: msgs::ErrorAction::SendErrorMessage {
375-
msg: msgs::ErrorMessage {
376-
channel_id,
377-
data: msg
378-
},
379-
},
380-
},
381372
},
382373
chan_id: None,
383374
shutdown_finish: None,
@@ -1273,13 +1264,6 @@ macro_rules! convert_chan_err {
12731264
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
12741265
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
12751266
},
1276-
ChannelError::CloseDelayBroadcast(msg) => {
1277-
log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
1278-
update_maps_on_chan_removal!($self, $short_to_id, $channel);
1279-
let shutdown_res = $channel.force_shutdown(false);
1280-
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
1281-
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
1282-
}
12831267
}
12841268
}
12851269
}
@@ -1945,7 +1929,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
19451929

19461930
/// `peer_msg` should be set when we receive a message from a peer, but not set when the
19471931
/// user closes, which will be re-exposed as the `ChannelClosed` reason.
1948-
fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
1932+
fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
1933+
-> Result<PublicKey, APIError> {
19491934
let mut chan = {
19501935
let mut channel_state_lock = self.channel_state.lock().unwrap();
19511936
let channel_state = &mut *channel_state_lock;
@@ -1964,7 +1949,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
19641949
}
19651950
};
19661951
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
1967-
self.finish_force_close_channel(chan.force_shutdown(true));
1952+
self.finish_force_close_channel(chan.force_shutdown(broadcast));
19681953
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
19691954
let mut channel_state = self.channel_state.lock().unwrap();
19701955
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
@@ -1975,13 +1960,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
19751960
Ok(chan.get_counterparty_node_id())
19761961
}
19771962

1978-
/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
1979-
/// the chain and rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
1980-
/// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
1981-
/// channel.
1982-
pub fn force_close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
1963+
fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
19831964
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
1984-
match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None) {
1965+
match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
19851966
Ok(counterparty_node_id) => {
19861967
self.channel_state.lock().unwrap().pending_msg_events.push(
19871968
events::MessageSendEvent::HandleError {
@@ -1997,11 +1978,39 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
19971978
}
19981979
}
19991980

1981+
/// Force closes a channel, immediately broadcasting the latest local transaction(s) and
1982+
/// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
1983+
/// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
1984+
/// channel.
1985+
pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
1986+
-> Result<(), APIError> {
1987+
self.force_close_sending_error(channel_id, counterparty_node_id, true)
1988+
}
1989+
1990+
/// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
1991+
/// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
1992+
/// `counterparty_node_id` isn't the counterparty of the corresponding channel.
1993+
///
1994+
/// You can always get the latest local transaction(s) to broadcast from
1995+
/// [`ChannelMonitor::get_latest_holder_commitment_txn`].
1996+
pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
1997+
-> Result<(), APIError> {
1998+
self.force_close_sending_error(channel_id, counterparty_node_id, false)
1999+
}
2000+
20002001
/// Force close all channels, immediately broadcasting the latest local commitment transaction
20012002
/// for each to the chain and rejecting new HTLCs on each.
2002-
pub fn force_close_all_channels(&self) {
2003+
pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
2004+
for chan in self.list_channels() {
2005+
let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
2006+
}
2007+
}
2008+
2009+
/// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
2010+
/// local transaction(s).
2011+
pub fn force_close_all_channels_without_broadcasting_txn(&self) {
20032012
for chan in self.list_channels() {
2004-
let _ = self.force_close_channel(&chan.channel_id, &chan.counterparty.node_id);
2013+
let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
20052014
}
20062015
}
20072016

@@ -3188,7 +3197,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
31883197
// ChannelClosed event is generated by handle_error for us.
31893198
Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
31903199
},
3191-
ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
31923200
};
31933201
handle_errors.push((counterparty_node_id, err));
31943202
continue;
@@ -6058,7 +6066,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
60586066
for chan in self.list_channels() {
60596067
if chan.counterparty.node_id == *counterparty_node_id {
60606068
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
6061-
let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data));
6069+
let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true);
60626070
}
60636071
}
60646072
} else {
@@ -6080,7 +6088,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
60806088
}
60816089

60826090
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
6083-
let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data));
6091+
let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true);
60846092
}
60856093
}
60866094
}

0 commit comments

Comments
 (0)