From fabcb6802347bc0e145840bac1f5ff27f6fcb793 Mon Sep 17 00:00:00 2001 From: Lucas Soriano del Pino Date: Fri, 8 Dec 2023 12:24:30 +1100 Subject: [PATCH 01/16] Revert "Drop dep `tokio`'s `io-util` feat as it broke MSRV and isn't useful" This reverts commit eb882a69b6c5b841b19e05494cfe0f09289b6af1. The reverted commit introduced a regression causing the subchannel open protocol to hang because a particularly large message was not being sent over in full (one of the chunks was seemingly stuck). --- lightning-net-tokio/Cargo.toml | 4 +- lightning-net-tokio/src/lib.rs | 72 ++++++++++++++++++---------------- 2 files changed, 41 insertions(+), 35 deletions(-) diff --git a/lightning-net-tokio/Cargo.toml b/lightning-net-tokio/Cargo.toml index 8a4ebd5d950..b7fa1e2a094 100644 --- a/lightning-net-tokio/Cargo.toml +++ b/lightning-net-tokio/Cargo.toml @@ -17,8 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] bitcoin = "0.29.0" lightning = { version = "0.0.117", path = "../lightning" } -tokio = { version = "1.0", features = [ "rt", "sync", "net", "time" ] } +tokio = { version = "1.0", features = [ "io-util", "rt", "sync", "net", "time" ] } [dev-dependencies] -tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] } +tokio = { version = "1.14", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] } lightning = { version = "0.0.117", path = "../lightning", features = ["_test_utils"] } diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 5527d85adf6..3278f5ba33c 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -32,8 +32,9 @@ use bitcoin::secp256k1::PublicKey; use tokio::net::TcpStream; -use tokio::time; +use tokio::{io, time}; use tokio::sync::mpsc; +use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt}; use lightning::ln::peer_handler; use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait; @@ -58,7 +59,7 @@ static ID_COUNTER: AtomicU64 = AtomicU64::new(0); // define a trivial two- and three- select macro with the specific types we need and just use that. pub(crate) enum SelectorOutput { - A(Option<()>), B(Option<()>), C(tokio::io::Result<()>), + A(Option<()>), B(Option<()>), C(tokio::io::Result), } pub(crate) struct TwoSelector< @@ -86,7 +87,7 @@ impl< } pub(crate) struct ThreeSelector< - A: Future> + Unpin, B: Future> + Unpin, C: Future> + Unpin + A: Future> + Unpin, B: Future> + Unpin, C: Future> + Unpin > { pub a: A, pub b: B, @@ -94,7 +95,7 @@ pub(crate) struct ThreeSelector< } impl< - A: Future> + Unpin, B: Future> + Unpin, C: Future> + Unpin + A: Future> + Unpin, B: Future> + Unpin, C: Future> + Unpin > Future for ThreeSelector { type Output = SelectorOutput; fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context<'_>) -> Poll { @@ -118,7 +119,7 @@ impl< /// Connection object (in an Arc>) in each SocketDescriptor we create as well as in the /// read future (which is returned by schedule_read). struct Connection { - writer: Option>, + writer: Option>, // Because our PeerManager is templated by user-provided types, and we can't (as far as I can // tell) have a const RawWakerVTable built out of templated functions, we need some indirection // between being woken up with write-ready and calling PeerManager::write_buffer_space_avail. @@ -155,7 +156,7 @@ impl Connection { async fn schedule_read( peer_manager: PM, us: Arc>, - reader: Arc, + mut reader: io::ReadHalf, mut read_wake_receiver: mpsc::Receiver<()>, mut write_avail_receiver: mpsc::Receiver<()>, ) where PM::Target: APeerManager { @@ -199,7 +200,7 @@ impl Connection { ThreeSelector { a: Box::pin(write_avail_receiver.recv()), b: Box::pin(read_wake_receiver.recv()), - c: Box::pin(reader.readable()), + c: Box::pin(reader.read(&mut buf)), }.await }; match select_result { @@ -210,9 +211,8 @@ impl Connection { } }, SelectorOutput::B(_) => {}, - SelectorOutput::C(res) => { - if res.is_err() { break Disconnect::PeerDisconnected; } - match reader.try_read(&mut buf) { + SelectorOutput::C(read) => { + match read { Ok(0) => break Disconnect::PeerDisconnected, Ok(len) => { let read_res = peer_manager.as_ref().read_event(&mut our_descriptor, &buf[0..len]); @@ -226,10 +226,6 @@ impl Connection { Err(_) => break Disconnect::CloseConnection, } }, - Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => { - // readable() is allowed to spuriously wake, so we have to handle - // WouldBlock here. - }, Err(_) => break Disconnect::PeerDisconnected, } }, @@ -243,14 +239,18 @@ impl Connection { // here. let _ = tokio::task::yield_now().await; }; - us.lock().unwrap().writer.take(); + let writer_option = us.lock().unwrap().writer.take(); + if let Some(mut writer) = writer_option { + // If the socket is already closed, shutdown() will fail, so just ignore it. + let _ = writer.shutdown().await; + } if let Disconnect::PeerDisconnected = disconnect_type { peer_manager.as_ref().socket_disconnected(&our_descriptor); peer_manager.as_ref().process_events(); } } - fn new(stream: StdTcpStream) -> (Arc, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc>) { + fn new(stream: StdTcpStream) -> (io::ReadHalf, mpsc::Receiver<()>, mpsc::Receiver<()>, Arc>) { // We only ever need a channel of depth 1 here: if we returned a non-full write to the // PeerManager, we will eventually get notified that there is room in the socket to write // new bytes, which will generate an event. That event will be popped off the queue before @@ -262,11 +262,11 @@ impl Connection { // false. let (read_waker, read_receiver) = mpsc::channel(1); stream.set_nonblocking(true).unwrap(); - let tokio_stream = Arc::new(TcpStream::from_std(stream).unwrap()); + let (reader, writer) = io::split(TcpStream::from_std(stream).unwrap()); - (Arc::clone(&tokio_stream), write_receiver, read_receiver, + (reader, write_receiver, read_receiver, Arc::new(Mutex::new(Self { - writer: Some(tokio_stream), write_avail, read_waker, read_paused: false, + writer: Some(writer), write_avail, read_waker, read_paused: false, rl_requested_disconnect: false, id: ID_COUNTER.fetch_add(1, Ordering::AcqRel) }))) @@ -462,9 +462,9 @@ impl SocketDescriptor { } impl peer_handler::SocketDescriptor for SocketDescriptor { fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize { - // To send data, we take a lock on our Connection to access the TcpStream, writing to it if - // there's room in the kernel buffer, or otherwise create a new Waker with a - // SocketDescriptor in it which can wake up the write_avail Sender, waking up the + // To send data, we take a lock on our Connection to access the WriteHalf of the TcpStream, + // writing to it if there's room in the kernel buffer, or otherwise create a new Waker with + // a SocketDescriptor in it which can wake up the write_avail Sender, waking up the // processing future which will call write_buffer_space_avail and we'll end up back here. let mut us = self.conn.lock().unwrap(); if us.writer.is_none() { @@ -484,18 +484,24 @@ impl peer_handler::SocketDescriptor for SocketDescriptor { let mut ctx = task::Context::from_waker(&waker); let mut written_len = 0; loop { - match us.writer.as_ref().unwrap().poll_write_ready(&mut ctx) { - task::Poll::Ready(Ok(())) => { - match us.writer.as_ref().unwrap().try_write(&data[written_len..]) { - Ok(res) => { - debug_assert_ne!(res, 0); - written_len += res; - if written_len == data.len() { return written_len; } - }, - Err(_) => return written_len, - } + match std::pin::Pin::new(us.writer.as_mut().unwrap()).poll_write(&mut ctx, &data[written_len..]) { + task::Poll::Ready(Ok(res)) => { + // The tokio docs *seem* to indicate this can't happen, and I certainly don't + // know how to handle it if it does (cause it should be a Poll::Pending + // instead): + assert_ne!(res, 0); + written_len += res; + if written_len == data.len() { return written_len; } + }, + task::Poll::Ready(Err(e)) => { + // The tokio docs *seem* to indicate this can't happen, and I certainly don't + // know how to handle it if it does (cause it should be a Poll::Pending + // instead): + assert_ne!(e.kind(), io::ErrorKind::WouldBlock); + // Probably we've already been closed, just return what we have and let the + // read thread handle closing logic. + return written_len; }, - task::Poll::Ready(Err(_)) => return written_len, task::Poll::Pending => { // We're queued up for a write event now, but we need to make sure we also // pause read given we're now waiting on the remote end to ACK (and in From 88cabaa8370a27fb784f8b485a96d6e9c1ba33df Mon Sep 17 00:00:00 2001 From: Fedeparma74 Date: Wed, 11 Oct 2023 10:50:28 +0200 Subject: [PATCH 02/16] backport: Fix deadlock when closing an unavailable channel I've backported from the 0.0.118 release since it seems like we could easily run into this by attempting to close a channel that is already closed or that never existed in the first place. --- lightning/src/ln/channelmanager.rs | 46 ++++++++++++++++++++++++++++-- lightning/src/ln/payment_tests.rs | 2 +- 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 62c6741fbdf..81a9e07d356 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2616,6 +2616,8 @@ where // it does not exist for this peer. Either way, we can attempt to force-close it. // // An appropriate error will be returned for non-existence of the channel if that's the case. + mem::drop(peer_state_lock); + mem::drop(per_peer_state); return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ()) }, } @@ -4001,7 +4003,7 @@ where for channel_id in channel_ids { if !peer_state.has_channel(channel_id) { return Err(APIError::ChannelUnavailable { - err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", channel_id, counterparty_node_id), + err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id), }); }; } @@ -4112,7 +4114,7 @@ where next_hop_channel_id, next_node_id) }), None => return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}.", + err: format!("Channel with id {} not found for the passed counterparty node_id {}", next_hop_channel_id, next_node_id) }) } @@ -10748,6 +10750,16 @@ mod tests { check_api_error_message(expected_message, res_err) } + fn check_channel_unavailable_error(res_err: Result, expected_channel_id: ChannelId, peer_node_id: PublicKey) { + let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id); + check_api_error_message(expected_message, res_err) + } + + fn check_api_misuse_error(res_err: Result) { + let expected_message = "No such channel awaiting to be accepted.".to_string(); + check_api_error_message(expected_message, res_err) + } + fn check_api_error_message(expected_err_message: String, res_err: Result) { match res_err { Err(APIError::APIMisuseError { err }) => { @@ -10792,6 +10804,36 @@ mod tests { check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key); } + #[test] + fn test_api_calls_with_unavailable_channel() { + // Tests that our API functions that expects a `counterparty_node_id` and a `channel_id` + // as input, behaves as expected if the `counterparty_node_id` is a known peer in the + // `ChannelManager::per_peer_state` map, but the peer state doesn't contain a channel with + // the given `channel_id`. + let chanmon_cfg = create_chanmon_cfgs(2); + let node_cfg = create_node_cfgs(2, &chanmon_cfg); + let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]); + let nodes = create_network(2, &node_cfg, &node_chanmgr); + + let counterparty_node_id = nodes[1].node.get_our_node_id(); + + // Dummy values + let channel_id = ChannelId::from_bytes([4; 32]); + + // Test the API functions. + check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42)); + + check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + + check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + + check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + + check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id); + + check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id); + } + #[test] fn test_connection_limiting() { // Test that we limit un-channel'd peers and un-funded channels properly. diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 26ecbb0bd24..616a1d4ce00 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -1906,7 +1906,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { // Check for unknown channel id error. let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}.", + err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) }); if test == InterceptTest::Fail { From 9861fcdb64dc62bf5a8711eabd29c7db0852efe8 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 29 Nov 2023 05:58:52 +0000 Subject: [PATCH 03/16] backport: Drop unreachable shutdown code in `Channel::get_shutdown` `Channel` is only a thing for funded channels. Thus, checking if a channel has not yet been funded is dead code and can simply be elided. --- lightning/src/ln/channel.rs | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a61a8de82de..932ac3f6706 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5567,9 +5567,6 @@ impl Channel where /// Begins the shutdown process, getting a message for the remote peer and returning all /// holding cell HTLCs for payment failure. - /// - /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no - /// [`ChannelMonitorUpdate`] will be returned). pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option, override_shutdown_script: Option) -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError> @@ -5595,16 +5592,9 @@ impl Channel where return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()}); } - // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown - // script is set, we just force-close and call it a day. - let mut chan_closed = false; - if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { - chan_closed = true; - } - let update_shutdown_script = match self.context.shutdown_scriptpubkey { Some(_) => false, - None if !chan_closed => { + None => { // use override shutdown script if provided let shutdown_scriptpubkey = match override_shutdown_script { Some(script) => script, @@ -5622,16 +5612,11 @@ impl Channel where self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); true }, - None => false, }; // From here on out, we may not fail! self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw; - if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { - self.context.channel_state = ChannelState::ShutdownComplete as u32; - } else { - self.context.channel_state |= ChannelState::LocalShutdownSent as u32; - } + self.context.channel_state |= ChannelState::LocalShutdownSent as u32; self.context.update_time_counter += 1; let monitor_update = if update_shutdown_script { From 77988523d323aea41d388c48cce34615a2503ec2 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 29 Nov 2023 05:59:42 +0000 Subject: [PATCH 04/16] backport: Move pre-funded-channel immediate shutdown logic to the right place Because a `Funded` `Channel` cannot possibly be pre-funding, the logic in `ChannelManager::close_channel_internal` to handle pre-funding channels is in the wrong place. Rather than being handled inside the `Funded` branch, it should be in an `else` following it, handling either of the two `ChannelPhases` outside of `Funded`. Sadly, because of a previous control flow management `loop {}`, the existing code will infinite loop, which is fixed here. --- lightning/src/ln/channelmanager.rs | 25 +++++++++---------------- lightning/src/ln/shutdown_tests.rs | 15 +++++++++++++++ 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 81a9e07d356..09bbe353c7b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2560,7 +2560,8 @@ where let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; let mut shutdown_result = None; - loop { + + { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -2571,10 +2572,11 @@ where match peer_state.channel_by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_phase_entry) => { + let unbroadcasted_batch_funding_txid = + chan_phase_entry.get().context().unbroadcasted_batch_funding_txid(); if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let funding_txo_opt = chan.context.get_funding_txo(); let their_features = &peer_state.latest_features; - let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid(); let (shutdown_msg, mut monitor_update_opt, htlcs) = chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; failed_htlcs = htlcs; @@ -2594,21 +2596,12 @@ where if let Some(monitor_update) = monitor_update_opt.take() { handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); - break; - } - - if chan.is_shutdown() { - if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) { - if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: channel_update - }); - } - self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed); - shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid)); - } } - break; + } else { + self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed); + failed_htlcs = Vec::new(); + remove_channel_phase!(self, chan_phase_entry); + shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid)); } }, hash_map::Entry::Vacant(_) => { diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 47361693693..a15cb009c72 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -275,6 +275,21 @@ fn shutdown_on_unfunded_channel() { check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyCoopClosedUnfundedChannel, [nodes[1].node.get_our_node_id()], 1_000_000); } +#[test] +fn close_on_unfunded_channel() { + // Test the user asking us to close prior to funding generation + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 100_000, 0, None).unwrap(); + let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + + nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap(); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1_000_000); +} + #[test] fn expect_channel_shutdown_state_with_force_closure() { // Test sending a shutdown prior to channel_ready after funding generation From 0b465c0ca41521df005c65f3b7e39789a43f26eb Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 29 Nov 2023 06:02:46 +0000 Subject: [PATCH 05/16] backport: Immediately error in `close_channel_internal` if there is no chan Previously, unfunded channels would be stored outside of `PeerState::channel_by_id`, and thus if there is no channel when we look in `PeerState::channel_by_id`, `close_channel_internal` called `force_close_channel_with_peer` to hunt for unfunded channels. However, that is no longer the case, so the call is redundant, and we can simply return an error instead. --- lightning/src/ln/channelmanager.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 09bbe353c7b..340f82fce1a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2605,13 +2605,12 @@ where } }, hash_map::Entry::Vacant(_) => { - // If we reach this point, it means that the channel_id either refers to an unfunded channel or - // it does not exist for this peer. Either way, we can attempt to force-close it. - // - // An appropriate error will be returned for non-existence of the channel if that's the case. - mem::drop(peer_state_lock); - mem::drop(per_peer_state); - return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ()) + return Err(APIError::ChannelUnavailable { + err: format!( + "Channel with id {} not found for the passed counterparty node_id {}", + channel_id, counterparty_node_id, + ) + }); }, } } From f49cffe74b722ac36c84c611a51a42677cb0a44d Mon Sep 17 00:00:00 2001 From: Tibo-lg Date: Mon, 6 Nov 2023 12:10:33 +0900 Subject: [PATCH 06/16] Use p2pderivatives/main as rebase target in CI --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 45bd4d30491..45f03eef97d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -143,7 +143,7 @@ jobs: rustup override set ${{ env.TOOLCHAIN }} - name: Fetch full tree and rebase on upstream run: | - git remote add upstream https://github.com/lightningdevkit/rust-lightning + git remote add upstream https://github.com/p2pderivatives/rust-lightning git fetch upstream export GIT_COMMITTER_EMAIL="rl-ci@example.com" export GIT_COMMITTER_NAME="RL CI" From 3f32a4006b27c92cf16fb19c6d5dab592e9c143f Mon Sep 17 00:00:00 2001 From: Tibo-lg Date: Mon, 6 Nov 2023 15:11:38 +0900 Subject: [PATCH 07/16] Fix pinned dependency in CI --- .github/workflows/build.yml | 2 +- ci/ci-tests.sh | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 45f03eef97d..81014bc1b58 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -51,7 +51,7 @@ jobs: shellcheck ci/ci-tests.sh - name: Run CI script shell: bash # Default on Winblows is powershell - run: ./ci/ci-tests.sh + run: CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh coverage: strategy: diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 6b89a98fdda..9b83c3dba4c 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -130,11 +130,14 @@ else [ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose cargo check --verbose --color always fi +[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd # Test that we can build downstream code with only the "release pins". pushd msrv-no-dev-deps-check PIN_RELEASE_DEPS +# The memchr crate switched to an MSRV of 1.60 starting with v2.6.0 +[ "$RUSTC_MINOR_VERSION" -lt 60 ] && cargo update -p memchr --precise "2.5.0" --verbose cargo check popd From 98af90f38158cf107470e316b6fc7488f9b47ffd Mon Sep 17 00:00:00 2001 From: Lucas Soriano del Pino Date: Wed, 20 Dec 2023 15:00:33 +1100 Subject: [PATCH 08/16] Run CI checks only on PR push If you want to check what CI says you can always just open a (draft) PR. --- .github/workflows/build.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 81014bc1b58..0950df23b7e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,9 +1,6 @@ name: Continuous Integration Checks on: - push: - branches-ignore: - - master pull_request: branches-ignore: - master From 1ab006b083b08f5c320cb93fc1fefa744914f4a6 Mon Sep 17 00:00:00 2001 From: Lucas Soriano del Pino Date: Wed, 20 Dec 2023 14:05:44 +1100 Subject: [PATCH 09/16] Run check-each-commit.sh based on PR base branch Most PRs target `main`, but for those PRs that target a different branch it is more efficient to target the PR base branch. Otherwise we can end up running the script for too many commits and run into a timeout on CI. --- .github/workflows/build.yml | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0950df23b7e..dbc3699c087 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -138,15 +138,24 @@ jobs: run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }} rustup override set ${{ env.TOOLCHAIN }} - - name: Fetch full tree and rebase on upstream + - name: Get PR base branch + id: get-pr-base-branch + run: | + BRANCH=${{ github.event.pull_request.base.ref }} + echo "PR_BASE_BRANCH=$BRANCH" >> "$GITHUB_OUTPUT" + - name: Fetch full tree and rebase on PR base branch + env: + PR_BASE_BRANCH: ${{ steps.get-pr-base-branch.outputs.PR_BASE_BRANCH }} run: | git remote add upstream https://github.com/p2pderivatives/rust-lightning git fetch upstream export GIT_COMMITTER_EMAIL="rl-ci@example.com" export GIT_COMMITTER_NAME="RL CI" - git rebase upstream/main + git rebase upstream/$PR_BASE_BRANCH - name: For each commit, run cargo check (including in fuzz) - run: ci/check-each-commit.sh upstream/main + env: + PR_BASE_BRANCH: ${{ steps.get-pr-base-branch.outputs.PR_BASE_BRANCH }} + run: ci/check-each-commit.sh upstream/$PR_BASE_BRANCH check_release: runs-on: ubuntu-latest From d55310686b26daf451ab662214b6d90c5792eccc Mon Sep 17 00:00:00 2001 From: Tibo-lg Date: Mon, 18 Dec 2023 15:20:44 +1100 Subject: [PATCH 10/16] Enable spliting lightning channel --- fuzz/src/chanmon_consistency.rs | 5 + fuzz/src/router.rs | 5 + lightning-block-sync/src/poll.rs | 37 +-- lightning/rustfmt.toml | 1 + lightning/src/chain/chainmonitor.rs | 40 ++- lightning/src/chain/channelmonitor.rs | 77 +++++- lightning/src/chain/mod.rs | 4 + lightning/src/ln/chan_utils.rs | 25 +- lightning/src/ln/channel.rs | 109 ++++++++- lightning/src/ln/channelmanager.rs | 281 +++++++++++++++++++++- lightning/src/ln/monitor_tests.rs | 2 +- lightning/src/ln/msgs.rs | 5 + lightning/src/ln/peer_handler.rs | 4 +- lightning/src/routing/router.rs | 10 + lightning/src/sign/mod.rs | 7 + lightning/src/util/errors.rs | 9 + lightning/src/util/macro_logger.rs | 2 +- lightning/src/util/persist.rs | 4 +- lightning/src/util/test_channel_signer.rs | 4 + lightning/src/util/test_utils.rs | 4 + rustfmt.toml | 2 +- 21 files changed, 582 insertions(+), 55 deletions(-) create mode 100644 lightning/rustfmt.toml diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 2a1b9e9a70a..cfeeaabf1a4 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -162,6 +162,10 @@ impl chain::Watch for TestChainMonitor { self.chain_monitor.update_channel(funding_txo, update) } + fn update_channel_funding_txo(&self, _: OutPoint, _: OutPoint, _: u64) -> chain::ChannelMonitorUpdateStatus { + unimplemented!() + } + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { return self.chain_monitor.release_pending_monitor_events(); } @@ -317,6 +321,7 @@ fn check_api_err(api_err: APIError, sendable_bounds_violated: bool) { // We can (obviously) temp-fail a monitor update }, APIError::IncompatibleShutdownScript { .. } => panic!("Cannot send an incompatible shutdown script"), + APIError::ExternalError { err } => panic!("{}", err), } } #[inline] diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index 7f4e7ad4019..f065c802441 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -242,6 +242,11 @@ pub fn do_test(data: &[u8], out: Out) { config: None, feerate_sat_per_1000_weight: None, channel_shutdown_state: Some(channelmanager::ChannelShutdownState::NotShuttingDown), + funding_redeemscript: None, + holder_funding_pubkey: PublicKey::from_slice(&hex::decode("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()[..]).unwrap(), + counter_funding_pubkey: None, + original_funding_outpoint: None, + channel_keys_id: [0; 32], }); } Some(&$first_hops_vec[..]) diff --git a/lightning-block-sync/src/poll.rs b/lightning-block-sync/src/poll.rs index e7171cf3656..4418a0d30f0 100644 --- a/lightning-block-sync/src/poll.rs +++ b/lightning-block-sync/src/poll.rs @@ -129,24 +129,25 @@ impl ValidatedBlockHeader { return Err(BlockSourceError::persistent("invalid block height")); } - let work = self.header.work(); - if self.chainwork != previous_header.chainwork + work { - return Err(BlockSourceError::persistent("invalid chainwork")); - } - - if let Network::Bitcoin = network { - if self.height % 2016 == 0 { - let target = self.header.target(); - let previous_target = previous_header.header.target(); - let min_target = previous_target >> 2; - let max_target = previous_target << 2; - if target > max_target || target < min_target { - return Err(BlockSourceError::persistent("invalid difficulty transition")) - } - } else if self.header.bits != previous_header.header.bits { - return Err(BlockSourceError::persistent("invalid difficulty")) - } - } + // let work = self.header.work(); + // if self.chainwork != previous_header.chainwork + work { + // return Err(BlockSourceError::persistent("invalid chainwork")); + // } + + // TODO(Tibo): This causes issues with Esplora, temporary fix. + // if let Network::Bitcoin = network { + // if self.height % 2016 == 0 { + // let target = self.header.target(); + // let previous_target = previous_header.header.target(); + // let min_target = previous_target >> 2; + // let max_target = previous_target << 2; + // if target > max_target || target < min_target { + // return Err(BlockSourceError::persistent("invalid difficulty transition")) + // } + // } else if self.header.bits != previous_header.header.bits { + // return Err(BlockSourceError::persistent("invalid difficulty")) + // } + // } Ok(()) } diff --git a/lightning/rustfmt.toml b/lightning/rustfmt.toml new file mode 100644 index 00000000000..c7ad93bafe3 --- /dev/null +++ b/lightning/rustfmt.toml @@ -0,0 +1 @@ +disable_all_formatting = true diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index e87d082d9a7..f424baeb919 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -444,7 +444,7 @@ where C::Target: chain::Filter, let monitor_states = self.monitors.read().unwrap(); for (_, monitor_state) in monitor_states.iter().filter(|(funding_outpoint, _)| { for chan in ignored_channels { - if chan.funding_txo.as_ref() == Some(funding_outpoint) { + if chan.funding_txo.as_ref() == Some(funding_outpoint) || chan.original_funding_outpoint.as_ref() == Some(funding_outpoint) { return false; } } @@ -624,6 +624,15 @@ where C::Target: chain::Filter, ) } } + + /// Retrieves the latest holder commitment transaction (and possibly HTLC transactions) for + /// the channel identified with the given `funding_txo`. Errors if no monitor is registered + /// for the given `funding_txo`. + pub fn get_latest_holder_commitment_txn(&self, funding_txo: &OutPoint) -> Result, ()> { + let monitors = self.monitors.read().unwrap(); + let monitor = monitors.get(funding_txo).ok_or(())?; + Ok(monitor.monitor.get_latest_holder_commitment_txn_internal(&self.logger)) + } } impl @@ -748,6 +757,33 @@ where C::Target: chain::Filter, Ok(persist_res) } + fn update_channel_funding_txo(&self, old_funding_txo: OutPoint, new_funding_txo: OutPoint, channel_value_satoshis: u64) -> ChannelMonitorUpdateStatus { + let mut monitors = self.monitors.write().unwrap(); + let monitor_opt = monitors.get_mut(&old_funding_txo); + match monitor_opt { + None => { + log_error!(self.logger, "Failed to update channel monitor funding txo: no such monitor registered"); + + // We should never ever trigger this from within ChannelManager. Technically a + // user could use this object with some proxying in between which makes this + // possible, but in tests and fuzzing, this should be a panic. + #[cfg(any(test, fuzzing))] + panic!("ChannelManager generated a channel update for a channel that was not yet registered!"); + #[cfg(not(any(test, fuzzing)))] + return ChannelMonitorUpdateStatus::UnrecoverableError; + }, + Some(monitor_state) => { + let spk = monitor_state.monitor.update_funding_info(new_funding_txo, channel_value_satoshis); + if let Some(filter) = &self.chain_source { + filter.register_output(WatchedOutput { block_hash: None, outpoint: new_funding_txo, script_pubkey: spk }); + } + return ChannelMonitorUpdateStatus::Completed; + } + } + } + + /// Note that we persist the given `ChannelMonitor` update while holding the + /// `ChainMonitor` monitors lock. fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { // Update the monitor that watches the channel referred to by the given outpoint. let monitors = self.monitors.read().unwrap(); @@ -827,7 +863,7 @@ where C::Target: chain::Filter, } let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); if monitor_events.len() > 0 { - let monitor_outpoint = monitor_state.monitor.get_funding_txo().0; + let monitor_outpoint = monitor_state.monitor.get_original_funding_txo().0; let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id(); pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id)); } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index bd0c1548428..5aba7ea1ef9 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -757,6 +757,7 @@ pub(crate) struct ChannelMonitorImpl { channel_keys_id: [u8; 32], holder_revocation_basepoint: PublicKey, funding_info: (OutPoint, Script), + original_funding_info: Option<(OutPoint, Script)>, current_counterparty_commitment_txid: Option, prev_counterparty_commitment_txid: Option, @@ -947,6 +948,13 @@ impl Writeable for ChannelMonitorImpl ChannelMonitor { channel_keys_id, holder_revocation_basepoint, funding_info, + original_funding_info: None, current_counterparty_commitment_txid: None, prev_counterparty_commitment_txid: None, @@ -1277,6 +1286,31 @@ impl ChannelMonitor { txid, htlc_outputs, commitment_number, their_per_commitment_point, logger) } + pub(crate) fn update_funding_info(&self, fund_outpoint: OutPoint, channel_value_satoshis: u64) -> Script { + let mut inner = self.inner.lock().unwrap(); + let script = inner.funding_info.1.clone(); + if let Some(original) = inner.original_funding_info.as_ref() { + if fund_outpoint == original.0 { + inner.original_funding_info = None; + inner.onchain_tx_handler.channel_transaction_parameters.original_funding_outpoint = None; + } + } else { + let original_funding_txo = inner.funding_info.0; + let original_funding_script_pubkey = &inner.funding_info.1; + + inner.original_funding_info = Some((original_funding_txo, original_funding_script_pubkey.clone())); + inner.onchain_tx_handler.channel_transaction_parameters.original_funding_outpoint = Some(original_funding_txo); + } + inner.outputs_to_watch.insert(fund_outpoint.txid, vec![(fund_outpoint.index as u32, script.clone())]); + + inner.funding_info = (fund_outpoint, script.clone()); + inner.onchain_tx_handler.channel_transaction_parameters.funding_outpoint = Some(fund_outpoint); + + inner.channel_value_satoshis = channel_value_satoshis; + inner.onchain_tx_handler.signer.set_channel_value_satoshis(channel_value_satoshis); + script + } + #[cfg(test)] fn provide_latest_holder_commitment_tx( &self, holder_commitment_tx: HolderCommitmentTransaction, @@ -1333,6 +1367,11 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_funding_txo().clone() } + /// + pub fn get_original_funding_txo(&self) -> (OutPoint, Script) { + self.inner.lock().unwrap().get_original_funding_txo().clone() + } + /// Gets a list of txids, with their output scripts (in the order they appear in the /// transaction), which we must learn about spends of via block_connected(). pub fn get_outputs_to_watch(&self) -> Vec<(Txid, Vec<(u32, Script)>)> { @@ -1501,6 +1540,11 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_latest_holder_commitment_txn(logger) } + pub(crate) fn get_latest_holder_commitment_txn_internal(&self, logger: &L) -> Vec + where L::Target: Logger { + self.inner.lock().unwrap().get_latest_holder_commitment_txn_internal(logger) + } + /// Unsafe test-only version of get_latest_holder_commitment_txn used by our test framework /// to bypass HolderCommitmentTransaction state update lockdown after signature and generate /// revoked commitment transaction. @@ -2768,6 +2812,10 @@ impl ChannelMonitorImpl { &self.funding_info } + pub fn get_original_funding_txo(&self) -> &(OutPoint, Script) { + &self.original_funding_info.as_ref().unwrap_or(&self.funding_info) + } + pub fn get_outputs_to_watch(&self) -> &HashMap> { // If we've detected a counterparty commitment tx on chain, we must include it in the set // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because @@ -3298,8 +3346,12 @@ impl ChannelMonitorImpl { } pub fn get_latest_holder_commitment_txn(&mut self, logger: &L) -> Vec where L::Target: Logger { - log_debug!(logger, "Getting signed latest holder commitment transaction!"); self.holder_tx_signed = true; + self.get_latest_holder_commitment_txn_internal(logger) + } + + pub(crate) fn get_latest_holder_commitment_txn_internal(&mut self, logger: &L) -> Vec where L::Target: Logger { + log_debug!(logger, "Getting signed latest holder commitment transaction!"); let commitment_tx = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript); let txid = commitment_tx.txid(); let mut holder_transactions = vec![commitment_tx]; @@ -3466,7 +3518,14 @@ impl ChannelMonitorImpl { // (except for HTLC transactions for channels with anchor outputs), which is an easy // way to filter out any potential non-matching txn for lazy filters. let prevout = &tx.input[0].previous_output; - if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 { + let match_prevout = |outpoint: &OutPoint| { + prevout.txid == outpoint.txid && prevout.vout == outpoint.index as u32 + }; + let is_split = tx.output.len() == 2 && tx.output[0].script_pubkey == tx.output[1].script_pubkey; + let is_match = match_prevout(&self.funding_info.0) || + (self.original_funding_info.is_some() && match_prevout(&self.original_funding_info.as_ref().unwrap().0) && !is_split); + + if is_match { let mut balance_spendable_csv = None; log_info!(logger, "Channel {} closed by funding output spend in txid {}.", &self.funding_info.0.to_channel_id(), txid); @@ -4219,6 +4278,16 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP index: Readable::read(reader)?, }; let funding_info = (outpoint, Readable::read(reader)?); + let original_funding_info = match ::read(reader)? { + 0 => { + let outpoint = Readable::read(reader)?; + let script = Readable::read(reader)?; + Some((outpoint, script)) + }, + 1 => { None }, + _ => return Err(DecodeError::InvalidValue), + }; + let current_counterparty_commitment_txid = Readable::read(reader)?; let prev_counterparty_commitment_txid = Readable::read(reader)?; @@ -4428,6 +4497,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP channel_keys_id, holder_revocation_basepoint, funding_info, + original_funding_info, current_counterparty_commitment_txid, prev_counterparty_commitment_txid, @@ -4683,7 +4753,8 @@ mod tests { selected_contest_delay: 67, }), funding_outpoint: Some(funding_outpoint), - channel_type_features: ChannelTypeFeatures::only_static_remote_key() + channel_type_features: ChannelTypeFeatures::only_static_remote_key(), + original_funding_outpoint: None, }; // Prune with one old state and a holder commitment tx holding a few overlaps with the // old state. diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index 89e0b155cf6..d1a38d44adb 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -288,6 +288,10 @@ pub trait Watch { /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus; + /// Update the outpoint funding the channel. To be used when the channel is split into two to + /// open a DLC channel with the same funding transaction. + fn update_channel_funding_txo(&self, old_funding_txo: OutPoint, new_funding_txo: OutPoint, channel_value_satoshis: u64) -> ChannelMonitorUpdateStatus; + /// Returns any monitor events since the last call. Subsequent calls must only return new /// events. /// diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index d1489e27168..6886a6f0c96 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -875,11 +875,22 @@ pub struct ChannelTransactionParameters { /// The late-bound counterparty channel transaction parameters. /// These parameters are populated at the point in the protocol where the counterparty provides them. pub counterparty_parameters: Option, - /// The late-bound funding outpoint + /// The late-bound funding outpoint. + /// + /// If it's a vanilla LN channel, this value corresponds to the actual funding outpoint that + /// goes on-chain when the channel is created. + /// + /// If instead we're dealing with a split channel, this value corresponds to the output of a + /// glue transaction which sits in between the funding transaction and the commitment + /// transaction. pub funding_outpoint: Option, /// This channel's type, as negotiated during channel open. For old objects where this field /// wasn't serialized, it will default to static_remote_key at deserialization. - pub channel_type_features: ChannelTypeFeatures + pub channel_type_features: ChannelTypeFeatures, + /// This value always corresponds to the actual funding outpoint. This is different to + /// [`ChannelTransactionParameters::funding_outpoint`], which varies depending on the type + /// of Lightning channel we have. + pub original_funding_outpoint: Option, } /// Late-bound per-channel counterparty data used to build transactions. @@ -938,6 +949,7 @@ impl Writeable for ChannelTransactionParameters { (8, self.funding_outpoint, option), (10, legacy_deserialization_prevention_marker, option), (11, self.channel_type_features, required), + (12, self.original_funding_outpoint, option), }); Ok(()) } @@ -952,6 +964,7 @@ impl Readable for ChannelTransactionParameters { let mut funding_outpoint = None; let mut _legacy_deserialization_prevention_marker: Option<()> = None; let mut channel_type_features = None; + let mut original_funding_outpoint = None; read_tlv_fields!(reader, { (0, holder_pubkeys, required), @@ -961,6 +974,7 @@ impl Readable for ChannelTransactionParameters { (8, funding_outpoint, option), (10, _legacy_deserialization_prevention_marker, option), (11, channel_type_features, option), + (12, original_funding_outpoint, option), }); let mut additional_features = ChannelTypeFeatures::empty(); @@ -973,7 +987,8 @@ impl Readable for ChannelTransactionParameters { is_outbound_from_holder: is_outbound_from_holder.0.unwrap(), counterparty_parameters, funding_outpoint, - channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()) + channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()), + original_funding_outpoint, }) } } @@ -1099,6 +1114,7 @@ impl HolderCommitmentTransaction { counterparty_parameters: Some(CounterpartyChannelTransactionParameters { pubkeys: channel_pubkeys.clone(), selected_contest_delay: 0 }), funding_outpoint: Some(chain::transaction::OutPoint { txid: Txid::all_zeros(), index: 0 }), channel_type_features: ChannelTypeFeatures::only_static_remote_key(), + original_funding_outpoint: None, }; let mut counterparty_htlc_sigs = Vec::new(); for _ in 0..htlcs.len() { @@ -1879,13 +1895,14 @@ mod tests { let holder_pubkeys = signer.pubkeys(); let counterparty_pubkeys = counterparty_signer.pubkeys().clone(); let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint); - let channel_parameters = ChannelTransactionParameters { + let channel_parameters = ChannelTransactionParameters { holder_pubkeys: holder_pubkeys.clone(), holder_selected_contest_delay: 0, is_outbound_from_holder: false, counterparty_parameters: Some(CounterpartyChannelTransactionParameters { pubkeys: counterparty_pubkeys.clone(), selected_contest_delay: 0 }), funding_outpoint: Some(chain::transaction::OutPoint { txid: Txid::all_zeros(), index: 0 }), channel_type_features: ChannelTypeFeatures::only_static_remote_key(), + original_funding_outpoint: None, }; let htlcs_with_aux = Vec::new(); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 932ac3f6706..b4472c0ae88 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -713,7 +713,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { latest_monitor_update_id: u64, - holder_signer: ChannelSignerType<::Signer>, + pub(crate) holder_signer: ChannelSignerType<::Signer>, shutdown_scriptpubkey: Option, destination_script: Script, @@ -933,7 +933,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// The unique identifier used to re-derive the private key material for the channel through /// [`SignerProvider::derive_channel_signer`]. - channel_keys_id: [u8; 32], + pub(crate) channel_keys_id: [u8; 32], /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we /// store it here and only release it to the `ChannelManager` once it asks for it. @@ -1070,6 +1070,33 @@ impl ChannelContext where SP::Target: SignerProvider { self.channel_transaction_parameters.funding_outpoint } + /// Returns the funding txo which is always the one that was confirmed on chain, even if the + /// channel is split. + pub fn get_original_funding_txo(&self) -> Option { + if self.channel_transaction_parameters.original_funding_outpoint.is_none() { + self.get_funding_txo() + } else { + self.channel_transaction_parameters.original_funding_outpoint + } + } + + /// Set the funding output and value of the channel. + fn set_funding_outpoint(&mut self, funding_outpoint: &OutPoint, channel_value_satoshis: u64, own_balance: u64) + { + self.channel_value_satoshis = channel_value_satoshis; + self.holder_signer.as_mut().set_channel_value_satoshis(channel_value_satoshis); + self.value_to_self_msat = own_balance + self.pending_outbound_htlcs.iter().map(|x| x.amount_msat).sum::(); + + let original_funding_outpoint = self.channel_transaction_parameters.original_funding_outpoint.unwrap_or_else(|| self.channel_transaction_parameters.funding_outpoint.unwrap()); + self.channel_transaction_parameters.funding_outpoint = Some(funding_outpoint.clone()); + self.channel_transaction_parameters.original_funding_outpoint = if &original_funding_outpoint != funding_outpoint { + Some(original_funding_outpoint.clone()) + } else { + None + }; + } + + /// Returns the block hash in which our funding transaction was confirmed. pub fn get_funding_tx_confirmed_in(&self) -> Option { self.funding_tx_confirmed_in @@ -2043,7 +2070,7 @@ impl ChannelContext where SP::Target: SignerProvider { _ => {} } } - let monitor_update = if let Some(funding_txo) = self.get_funding_txo() { + let monitor_update = if let Some(funding_txo) = self.get_original_funding_txo() { // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent), // returning a channel monitor update here would imply a channel monitor update before // we even registered the channel monitor to begin with, which is invalid. @@ -3880,7 +3907,7 @@ impl Channel where Ok(()) } - fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { + pub(super) fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2); msgs::RevokeAndACK { @@ -4878,8 +4905,54 @@ impl Channel where msgs = (Some(channel_ready), announcement_sigs); } } + + // If we have a vanilla LN channel, this checks if the transaction + // spends from the actual funding output. That could be either a + // commitment transaction or a mutual close transaction. + // + // If we have a split channel, this checks if the transaction spends + // from the glue output. That could only be a commitment + // transaction. + let is_funding_or_glue_txo = |prev_outpoint: &bitcoin::OutPoint| -> bool { + prev_outpoint == &funding_txo.into_bitcoin_outpoint() + }; + + // This check only runs if the check above returns `false`. We know + // that a vanilla LN channel can only be closed by spending from the + // original funding output, so in this check we are only considering + // split channels. + // + // The other ways in which a split channel could be closed are: + // + // - Through a mutual close of the _LN_ channel, which would spend + // directly from the original funding output. + // + // - Through the publication of a revoked commitment transaction + // spending from the original funding output! + // + // And that's exactly what we check here: whether the transaction + // spends from the original funding output and, if it does, whether + // the transaction is NOT the split transaction (the only other + // possible option). + // + // We do not announce the closing of the LN channel with the split + // transaction, because that is reserved to either mutual close or + // commitment transactions. LDK will only react to this announcement + // once, so we should not waste it on the split transaction, as this + // can lead to loss of funds. + let is_final_tx_spending_from_original_funding_txo = |prev_outpoint: &bitcoin::OutPoint, outputs: &[bitcoin::TxOut]| -> bool { + match self.context.get_original_funding_txo().map(|x| x.into_bitcoin_outpoint()) { + Some(original_funding_outpoint) => { + // Transaction spends from actual funding output. + prev_outpoint == &original_funding_outpoint && + // Transaction is _not_ a split transaction. + !(outputs.len() == 2 && outputs[0].script_pubkey == outputs[1].script_pubkey) + } + None => false, + } + }; for inp in tx.input.iter() { - if inp.previous_output == funding_txo.into_bitcoin_outpoint() { + if is_funding_or_glue_txo(&inp.previous_output) || is_final_tx_spending_from_original_funding_txo(&inp.previous_output, &tx.output) { log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id()); return Err(ClosureReason::CommitmentTxConfirmed); } @@ -5251,6 +5324,7 @@ impl Channel where // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the // txid of that interactive transaction, else we MUST NOT set it. next_funding_txid: None, + sub_channel_state: None, } } @@ -5499,9 +5573,9 @@ impl Channel where signature = res.0; htlc_signatures = res.1; - log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", + log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} with value {} -> {} in channel {}", encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction), - &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()), + &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()), self.context.channel_value_satoshis, log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id()); for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { @@ -5666,6 +5740,21 @@ impl Channel where }) .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash))) } + + /// Set the funding output and value of the channel, returning a `ChannelMonitorUpdate` + /// containing a commitment for the new funding output if requested. + pub fn set_funding_outpoint(&mut self, funding_outpoint: &OutPoint, channel_value_satoshis: u64, own_balance: u64, need_commitment: bool, logger: &L) -> Option + where + L::Target: Logger + { + self.context.set_funding_outpoint(funding_outpoint, channel_value_satoshis, own_balance); + + if need_commitment { + let monitor_update = self.build_commitment_no_status_check(logger); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + self.push_ret_blockable_mon_update(monitor_update) + } else { None } + } } /// A not-yet-funded outbound (from holder) channel using V1 channel establishment. @@ -5832,7 +5921,8 @@ impl OutboundV1Channel where SP::Target: SignerProvider { is_outbound_from_holder: true, counterparty_parameters: None, funding_outpoint: None, - channel_type_features: channel_type.clone() + channel_type_features: channel_type.clone(), + original_funding_outpoint: None, }, funding_transaction: None, is_batch_funding: None, @@ -6485,7 +6575,8 @@ impl InboundV1Channel where SP::Target: SignerProvider { pubkeys: counterparty_pubkeys, }), funding_outpoint: None, - channel_type_features: channel_type.clone() + channel_type_features: channel_type.clone(), + original_funding_outpoint: None, }, funding_transaction: None, is_batch_funding: None, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 340f82fce1a..ca6331e093d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -21,6 +21,7 @@ use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::constants::{genesis_block, ChainHash}; use bitcoin::network::constants::Network; +use bitcoin::Script; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; @@ -79,6 +80,7 @@ use core::ops::Deref; // Re-export this for use in the public API. pub use crate::ln::outbound_payment::{PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields}; use crate::ln::script::ShutdownScript; +use super::msgs::{CommitmentSigned, RevokeAndACK}; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -783,6 +785,18 @@ struct PendingInboundPayment { min_value_msat: Option, } +/// A structure holding a reference to a channel while under lock. +pub struct ChannelLock<'a, SP: Deref> where SP::Target: SignerProvider { + channel: &'a mut Channel, + mon_update_blocked: bool, +} + +impl<'a, SP: Deref> ChannelLock<'a, SP> where SP::Target: SignerProvider { + fn get_channel(&mut self) -> &mut Channel { + self.channel + } +} + /// [`SimpleArcChannelManager`] is useful when you need a [`ChannelManager`] with a static lifetime, e.g. /// when you're using `lightning-net-tokio` (since `tokio::spawn` requires parameters with static /// lifetimes). Other times you can afford a reference, which is more efficient, in which case @@ -1630,6 +1644,17 @@ pub struct ChannelDetails { /// /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109. pub config: Option, + /// The late bound redeemscript used for the funding output. + pub funding_redeemscript: Option