Skip to content
This repository was archived by the owner on Feb 3, 2025. It is now read-only.

Commit 10e197e

Browse files
authored
Merge pull request #1010 from MutinyWallet/ldk-sync-lock
Make node manager locks RwLocks
2 parents e2e306b + 1ac3e23 commit 10e197e

File tree

2 files changed

+35
-34
lines changed

2 files changed

+35
-34
lines changed

mutiny-core/src/lib.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1067,7 +1067,7 @@ impl<S: MutinyStorage> MutinyWallet<S> {
10671067
if self
10681068
.node_manager
10691069
.nodes
1070-
.lock()
1070+
.read()
10711071
.await
10721072
.iter()
10731073
.flat_map(|(_, n)| n.channel_manager.list_channels())

mutiny-core/src/nodemanager.rs

Lines changed: 34 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ use crate::{
2424
storage::{MutinyStorage, DEVICE_ID_KEY, KEYCHAIN_STORE_KEY, NEED_FULL_SYNC_KEY},
2525
};
2626
use anyhow::anyhow;
27+
use async_lock::RwLock;
2728
use bdk::chain::{BlockId, ConfirmationTime};
2829
use bdk::{wallet::AddressIndex, FeeRate, LocalUtxo};
2930
use bitcoin::blockdata::script;
@@ -390,7 +391,7 @@ impl<S: MutinyStorage> NodeManagerBuilder<S> {
390391
let nodes = if c.safe_mode {
391392
// If safe mode is enabled, we don't start any nodes
392393
log_warn!(logger, "Safe mode enabled, not starting any nodes");
393-
Arc::new(Mutex::new(HashMap::new()))
394+
Arc::new(RwLock::new(HashMap::new()))
394395
} else {
395396
// Remove the archived nodes, we don't need to start them up.
396397
let unarchived_nodes = node_storage
@@ -453,7 +454,7 @@ impl<S: MutinyStorage> NodeManagerBuilder<S> {
453454

454455
log_info!(logger, "inserted updated nodes");
455456

456-
Arc::new(Mutex::new(nodes_map))
457+
Arc::new(RwLock::new(nodes_map))
457458
};
458459

459460
let price_cache = self
@@ -473,7 +474,7 @@ impl<S: MutinyStorage> NodeManagerBuilder<S> {
473474
chain,
474475
fee_estimator,
475476
storage: self.storage,
476-
node_storage: Mutex::new(node_storage),
477+
node_storage: RwLock::new(node_storage),
477478
nodes,
478479
#[cfg(target_arch = "wasm32")]
479480
websocket_proxy_addr,
@@ -511,8 +512,8 @@ pub struct NodeManager<S: MutinyStorage> {
511512
chain: Arc<MutinyChain<S>>,
512513
fee_estimator: Arc<MutinyFeeEstimator<S>>,
513514
pub(crate) storage: S,
514-
pub(crate) node_storage: Mutex<NodeStorage>,
515-
pub(crate) nodes: Arc<Mutex<HashMap<PublicKey, Arc<Node<S>>>>>,
515+
pub(crate) node_storage: RwLock<NodeStorage>,
516+
pub(crate) nodes: Arc<RwLock<HashMap<PublicKey, Arc<Node<S>>>>>,
516517
pub(crate) lsp_config: Option<LspConfig>,
517518
pub(crate) logger: Arc<MutinyLogger>,
518519
bitcoin_price_cache: Arc<Mutex<HashMap<String, (f32, Duration)>>>,
@@ -532,7 +533,7 @@ impl<S: MutinyStorage> NodeManager<S> {
532533
&self,
533534
pk: Option<&PublicKey>,
534535
) -> Result<Arc<Node<S>>, MutinyError> {
535-
let nodes = self.nodes.lock().await;
536+
let nodes = self.nodes.read().await;
536537
let node = match pk {
537538
Some(pubkey) => nodes.get(pubkey),
538539
None => nodes.iter().next().map(|(_, node)| node),
@@ -544,7 +545,7 @@ impl<S: MutinyStorage> NodeManager<S> {
544545
/// Returns after node has been stopped.
545546
pub async fn stop(&self) -> Result<(), MutinyError> {
546547
self.stop.swap(true, Ordering::Relaxed);
547-
let mut nodes = self.nodes.lock().await;
548+
let mut nodes = self.nodes.write().await;
548549
let node_futures = nodes.iter().map(|(_, n)| async {
549550
match n.stop().await {
550551
Ok(_) => {
@@ -1050,7 +1051,7 @@ impl<S: MutinyStorage> NodeManager<S> {
10501051
return Err(MutinyError::WalletOperationFailed);
10511052
};
10521053

1053-
let nodes = self.nodes.lock().await;
1054+
let nodes = self.nodes.read().await;
10541055
let lightning_msats: u64 = nodes
10551056
.iter()
10561057
.flat_map(|(_, n)| n.channel_manager.list_channels())
@@ -1097,13 +1098,17 @@ impl<S: MutinyStorage> NodeManager<S> {
10971098
/// This should be called before syncing the on-chain wallet
10981099
/// to ensure that new on-chain transactions are picked up.
10991100
async fn sync_ldk(&self) -> Result<(), MutinyError> {
1100-
let nodes = self.nodes.lock().await;
1101+
// get nodes hashmap, immediately drop lock because sync can take a while
1102+
let nodes = {
1103+
let nodes = self.nodes.read().await;
1104+
nodes.deref().clone()
1105+
};
11011106

11021107
// Lock all the nodes so we can sync them, make sure we keep the locks
11031108
// in scope so they don't get dropped and unlocked.
11041109
let futs = nodes
1105-
.iter()
1106-
.map(|(_, node)| node.sync_lock.lock())
1110+
.values()
1111+
.map(|node| node.sync_lock.lock())
11071112
.collect::<Vec<_>>();
11081113
let _locks = join_all(futs).await;
11091114

@@ -1224,7 +1229,7 @@ impl<S: MutinyStorage> NodeManager<S> {
12241229
/// If the node has any active channels it will fail to archive
12251230
#[allow(dead_code)]
12261231
pub(crate) async fn archive_node(&self, pubkey: PublicKey) -> Result<(), MutinyError> {
1227-
if let Some(node) = self.nodes.lock().await.get(&pubkey) {
1232+
if let Some(node) = self.nodes.read().await.get(&pubkey) {
12281233
// disallow archiving nodes with active channels or
12291234
// claimable on-chain funds, so we don't lose funds
12301235
if node.channel_manager.list_channels().is_empty()
@@ -1244,7 +1249,7 @@ impl<S: MutinyStorage> NodeManager<S> {
12441249
/// If the node has any active channels it will fail to archive
12451250
#[allow(dead_code)]
12461251
pub(crate) async fn archive_node_by_uuid(&self, node_uuid: String) -> Result<(), MutinyError> {
1247-
let mut node_storage = self.node_storage.lock().await;
1252+
let mut node_storage = self.node_storage.write().await;
12481253

12491254
match node_storage.nodes.get(&node_uuid).map(|n| n.to_owned()) {
12501255
None => Err(anyhow!("Could not find node to archive").into()),
@@ -1262,7 +1267,7 @@ impl<S: MutinyStorage> NodeManager<S> {
12621267

12631268
/// Lists the pubkeys of the lightning node in the manager.
12641269
pub async fn list_nodes(&self) -> Result<Vec<PublicKey>, MutinyError> {
1265-
let nodes = self.nodes.lock().await;
1270+
let nodes = self.nodes.read().await;
12661271
let peers = nodes.iter().map(|(_, n)| n.pubkey).collect();
12671272
Ok(peers)
12681273
}
@@ -1279,7 +1284,7 @@ impl<S: MutinyStorage> NodeManager<S> {
12791284

12801285
// check if any nodes have active channels with the current LSP
12811286
// if they do, we can't change the LSP
1282-
let nodes = self.nodes.lock().await;
1287+
let nodes = self.nodes.read().await;
12831288
if nodes.iter().any(|(_, n)| {
12841289
if let Some(lsp_pk) = n.lsp_client.as_ref().map(|x| x.get_lsp_pubkey()) {
12851290
!n.channel_manager
@@ -1294,7 +1299,7 @@ impl<S: MutinyStorage> NodeManager<S> {
12941299
drop(nodes);
12951300

12961301
// edit node storage
1297-
let mut node_storage = self.node_storage.lock().await;
1302+
let mut node_storage = self.node_storage.write().await;
12981303
node_storage.nodes.iter_mut().for_each(|(_, n)| {
12991304
n.lsp = lsp_config.clone();
13001305
});
@@ -1374,7 +1379,7 @@ impl<S: MutinyStorage> NodeManager<S> {
13741379
amount: u64,
13751380
labels: Vec<String>,
13761381
) -> Result<(MutinyInvoice, u64), MutinyError> {
1377-
let nodes = self.nodes.lock().await;
1382+
let nodes = self.nodes.read().await;
13781383
let use_phantom = nodes.len() > 1 && self.lsp_config.is_none();
13791384
if nodes.len() == 0 {
13801385
return Err(MutinyError::InvoiceCreationFailed);
@@ -1448,7 +1453,7 @@ impl<S: MutinyStorage> NodeManager<S> {
14481453
&self,
14491454
hash: &sha256::Hash,
14501455
) -> Result<MutinyInvoice, MutinyError> {
1451-
let nodes = self.nodes.lock().await;
1456+
let nodes = self.nodes.read().await;
14521457
for (_, node) in nodes.iter() {
14531458
if let Ok(inv) = node.get_invoice_by_hash(hash) {
14541459
return Ok(inv);
@@ -1462,7 +1467,7 @@ impl<S: MutinyStorage> NodeManager<S> {
14621467
&self,
14631468
user_channel_id: u128,
14641469
) -> Result<ChannelClosure, MutinyError> {
1465-
let nodes = self.nodes.lock().await;
1470+
let nodes = self.nodes.read().await;
14661471
for (_, node) in nodes.iter() {
14671472
if let Ok(Some(closure)) = node.get_channel_closure(user_channel_id) {
14681473
return Ok(closure);
@@ -1474,7 +1479,7 @@ impl<S: MutinyStorage> NodeManager<S> {
14741479

14751480
pub async fn list_channel_closures(&self) -> Result<Vec<ChannelClosure>, MutinyError> {
14761481
let mut channels: Vec<ChannelClosure> = vec![];
1477-
let nodes = self.nodes.lock().await;
1482+
let nodes = self.nodes.read().await;
14781483
for (_, node) in nodes.iter() {
14791484
if let Ok(mut invs) = node.get_channel_closures() {
14801485
channels.append(&mut invs)
@@ -1593,7 +1598,7 @@ impl<S: MutinyStorage> NodeManager<S> {
15931598
return Err(MutinyError::ChannelClosingFailed);
15941599
}
15951600

1596-
let nodes = self.nodes.lock().await;
1601+
let nodes = self.nodes.read().await;
15971602
let channel_opt: Option<(Arc<Node<S>>, ChannelDetails)> =
15981603
nodes.iter().find_map(|(_, n)| {
15991604
n.channel_manager
@@ -1679,7 +1684,7 @@ impl<S: MutinyStorage> NodeManager<S> {
16791684

16801685
/// Lists all the channels for all the nodes in the node manager.
16811686
pub async fn list_channels(&self) -> Result<Vec<MutinyChannel>, MutinyError> {
1682-
let nodes = self.nodes.lock().await;
1687+
let nodes = self.nodes.read().await;
16831688
let channels: Vec<ChannelDetails> = nodes
16841689
.iter()
16851690
.flat_map(|(_, n)| n.channel_manager.list_channels())
@@ -1709,7 +1714,7 @@ impl<S: MutinyStorage> NodeManager<S> {
17091714
})
17101715
.collect();
17111716

1712-
let nodes = self.nodes.lock().await;
1717+
let nodes = self.nodes.read().await;
17131718

17141719
// get peers we are connected to
17151720
let connected_peers: Vec<PublicKey> = nodes
@@ -1950,7 +1955,7 @@ pub(crate) async fn create_new_node_from_node_manager<S: MutinyStorage>(
19501955
// Begin with a mutex lock so that nothing else can
19511956
// save or alter the node list while it is about to
19521957
// be saved.
1953-
let mut node_mutex = node_manager.node_storage.lock().await;
1958+
let mut node_mutex = node_manager.node_storage.write().await;
19541959

19551960
// Get the current nodes and their bip32 indices
19561961
// so that we can create another node with the next.
@@ -2009,15 +2014,11 @@ pub(crate) async fn create_new_node_from_node_manager<S: MutinyStorage>(
20092014

20102015
let new_node = node_builder.build().await?;
20112016
let node_pubkey = new_node.pubkey;
2012-
node_manager
2013-
.nodes
2014-
.clone()
2015-
.lock()
2016-
.await
2017-
.insert(node_pubkey, Arc::new(new_node));
2017+
let mut nodes = node_manager.nodes.write().await;
2018+
nodes.insert(node_pubkey, Arc::new(new_node));
20182019

20192020
Ok(NodeIdentity {
2020-
uuid: next_node_uuid.clone(),
2021+
uuid: next_node_uuid,
20212022
pubkey: node_pubkey,
20222023
})
20232024
}
@@ -2127,7 +2128,7 @@ mod tests {
21272128

21282129
{
21292130
let node_identity = nm.new_node().await.expect("should create new node");
2130-
let node_storage = nm.node_storage.lock().await;
2131+
let node_storage = nm.node_storage.read().await;
21312132
assert_ne!("", node_identity.uuid);
21322133
assert_ne!("", node_identity.pubkey.to_string());
21332134
assert_eq!(1, node_storage.nodes.len());
@@ -2138,7 +2139,7 @@ mod tests {
21382139

21392140
{
21402141
let node_identity = nm.new_node().await.expect("node manager should initialize");
2141-
let node_storage = nm.node_storage.lock().await;
2142+
let node_storage = nm.node_storage.read().await;
21422143

21432144
assert_ne!("", node_identity.uuid);
21442145
assert_ne!("", node_identity.pubkey.to_string());

0 commit comments

Comments
 (0)