Skip to content

Commit cec8ce0

Browse files
authored
Merge pull request #1212 from TheBlueMatt/2021-12-timeout-graph
Add a method to prune stale channels from NetworkGraph
2 parents 6e27ca0 + 73e8dc4 commit cec8ce0

File tree

5 files changed

+370
-500
lines changed

5 files changed

+370
-500
lines changed

lightning-background-processor/Cargo.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ edition = "2018"
1111

1212
[dependencies]
1313
bitcoin = "0.27"
14-
lightning = { version = "0.0.103", path = "../lightning", features = ["allow_wallclock_use"] }
14+
lightning = { version = "0.0.103", path = "../lightning", features = ["std"] }
1515
lightning-persister = { version = "0.0.103", path = "../lightning-persister" }
1616

1717
[dev-dependencies]

lightning-background-processor/src/lib.rs

+22-1
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,8 @@ use std::ops::Deref;
3434
/// [`ChannelManager`] persistence should be done in the background.
3535
/// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`]
3636
/// at the appropriate intervals.
37+
/// * Calling [`NetworkGraph::remove_stale_channels`] (if a [`NetGraphMsgHandler`] is provided to
38+
/// [`BackgroundProcessor::start`]).
3739
///
3840
/// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
3941
/// upon as doing so may result in high latency.
@@ -68,6 +70,9 @@ const PING_TIMER: u64 = 30;
6870
#[cfg(test)]
6971
const PING_TIMER: u64 = 1;
7072

73+
/// Prune the network graph of stale entries hourly.
74+
const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
75+
7176
/// Trait which handles persisting a [`ChannelManager`] to disk.
7277
///
7378
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
@@ -203,13 +208,16 @@ impl BackgroundProcessor {
203208
let stop_thread = Arc::new(AtomicBool::new(false));
204209
let stop_thread_clone = stop_thread.clone();
205210
let handle = thread::spawn(move || -> Result<(), std::io::Error> {
206-
let event_handler = DecoratingEventHandler { event_handler, net_graph_msg_handler };
211+
let event_handler = DecoratingEventHandler { event_handler, net_graph_msg_handler: net_graph_msg_handler.as_ref().map(|t| t.deref()) };
207212

208213
log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup");
209214
channel_manager.timer_tick_occurred();
210215

211216
let mut last_freshness_call = Instant::now();
212217
let mut last_ping_call = Instant::now();
218+
let mut last_prune_call = Instant::now();
219+
let mut have_pruned = false;
220+
213221
loop {
214222
peer_manager.process_events();
215223
channel_manager.process_pending_events(&event_handler);
@@ -247,6 +255,19 @@ impl BackgroundProcessor {
247255
peer_manager.timer_tick_occurred();
248256
last_ping_call = Instant::now();
249257
}
258+
259+
// Note that we want to run a graph prune once not long after startup before
260+
// falling back to our usual hourly prunes. This avoids short-lived clients never
261+
// pruning their network graph. We run once 60 seconds after startup before
262+
// continuing our normal cadence.
263+
if last_prune_call.elapsed().as_secs() > if have_pruned { NETWORK_PRUNE_TIMER } else { 60 } {
264+
if let Some(ref handler) = net_graph_msg_handler {
265+
log_trace!(logger, "Pruning network graph of stale entries");
266+
handler.network_graph().remove_stale_channels();
267+
last_prune_call = Instant::now();
268+
have_pruned = true;
269+
}
270+
}
250271
}
251272
});
252273
Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }

lightning/Cargo.toml

-4
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ Still missing tons of error-handling. See GitHub issues for suggested projects i
1111
"""
1212

1313
[features]
14-
allow_wallclock_use = []
1514
fuzztarget = ["bitcoin/fuzztarget", "regex"]
1615
# Internal test utilities exposed to other repo crates
1716
_test_utils = ["hex", "regex", "bitcoin/bitcoinconsensus"]
@@ -53,6 +52,3 @@ secp256k1 = { version = "0.20.2", default-features = false, features = ["alloc"]
5352
version = "0.27"
5453
default-features = false
5554
features = ["bitcoinconsensus", "secp-recovery"]
56-
57-
[package.metadata.docs.rs]
58-
features = ["allow_wallclock_use"] # When https://github.com/rust-lang/rust/issues/43781 complies with our MSVR, we can add nice banners in the docs for the methods behind this feature-gate.

lightning/src/ln/channelmanager.rs

+7-5
Original file line numberDiff line numberDiff line change
@@ -67,10 +67,11 @@ use io::{Cursor, Read};
6767
use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
6868
use core::sync::atomic::{AtomicUsize, Ordering};
6969
use core::time::Duration;
70-
#[cfg(any(test, feature = "allow_wallclock_use"))]
71-
use std::time::Instant;
7270
use core::ops::Deref;
7371

72+
#[cfg(any(test, feature = "std"))]
73+
use std::time::Instant;
74+
7475
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
7576
//
7677
// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
@@ -5110,8 +5111,9 @@ where
51105111
/// indicating whether persistence is necessary. Only one listener on
51115112
/// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
51125113
/// up.
5113-
/// Note that the feature `allow_wallclock_use` must be enabled to use this function.
5114-
#[cfg(any(test, feature = "allow_wallclock_use"))]
5114+
///
5115+
/// Note that this method is not available with the `no-std` feature.
5116+
#[cfg(any(test, feature = "std"))]
51155117
pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool {
51165118
self.persistence_notifier.wait_timeout(max_wait)
51175119
}
@@ -5406,7 +5408,7 @@ impl PersistenceNotifier {
54065408
}
54075409
}
54085410

5409-
#[cfg(any(test, feature = "allow_wallclock_use"))]
5411+
#[cfg(any(test, feature = "std"))]
54105412
fn wait_timeout(&self, max_wait: Duration) -> bool {
54115413
let current_time = Instant::now();
54125414
loop {

0 commit comments

Comments
 (0)