@@ -24,6 +24,7 @@ use crate::{
24
24
storage:: { MutinyStorage , DEVICE_ID_KEY , KEYCHAIN_STORE_KEY , NEED_FULL_SYNC_KEY } ,
25
25
} ;
26
26
use anyhow:: anyhow;
27
+ use async_lock:: RwLock ;
27
28
use bdk:: chain:: { BlockId , ConfirmationTime } ;
28
29
use bdk:: { wallet:: AddressIndex , FeeRate , LocalUtxo } ;
29
30
use bitcoin:: blockdata:: script;
@@ -390,7 +391,7 @@ impl<S: MutinyStorage> NodeManagerBuilder<S> {
390
391
let nodes = if c. safe_mode {
391
392
// If safe mode is enabled, we don't start any nodes
392
393
log_warn ! ( logger, "Safe mode enabled, not starting any nodes" ) ;
393
- Arc :: new ( Mutex :: new ( HashMap :: new ( ) ) )
394
+ Arc :: new ( RwLock :: new ( HashMap :: new ( ) ) )
394
395
} else {
395
396
// Remove the archived nodes, we don't need to start them up.
396
397
let unarchived_nodes = node_storage
@@ -453,7 +454,7 @@ impl<S: MutinyStorage> NodeManagerBuilder<S> {
453
454
454
455
log_info ! ( logger, "inserted updated nodes" ) ;
455
456
456
- Arc :: new ( Mutex :: new ( nodes_map) )
457
+ Arc :: new ( RwLock :: new ( nodes_map) )
457
458
} ;
458
459
459
460
let price_cache = self
@@ -473,7 +474,7 @@ impl<S: MutinyStorage> NodeManagerBuilder<S> {
473
474
chain,
474
475
fee_estimator,
475
476
storage : self . storage ,
476
- node_storage : Mutex :: new ( node_storage) ,
477
+ node_storage : RwLock :: new ( node_storage) ,
477
478
nodes,
478
479
#[ cfg( target_arch = "wasm32" ) ]
479
480
websocket_proxy_addr,
@@ -511,8 +512,8 @@ pub struct NodeManager<S: MutinyStorage> {
511
512
chain : Arc < MutinyChain < S > > ,
512
513
fee_estimator : Arc < MutinyFeeEstimator < S > > ,
513
514
pub ( crate ) storage : S ,
514
- pub ( crate ) node_storage : Mutex < NodeStorage > ,
515
- pub ( crate ) nodes : Arc < Mutex < HashMap < PublicKey , Arc < Node < S > > > > > ,
515
+ pub ( crate ) node_storage : RwLock < NodeStorage > ,
516
+ pub ( crate ) nodes : Arc < RwLock < HashMap < PublicKey , Arc < Node < S > > > > > ,
516
517
pub ( crate ) lsp_config : Option < LspConfig > ,
517
518
pub ( crate ) logger : Arc < MutinyLogger > ,
518
519
bitcoin_price_cache : Arc < Mutex < HashMap < String , ( f32 , Duration ) > > > ,
@@ -532,7 +533,7 @@ impl<S: MutinyStorage> NodeManager<S> {
532
533
& self ,
533
534
pk : Option < & PublicKey > ,
534
535
) -> Result < Arc < Node < S > > , MutinyError > {
535
- let nodes = self . nodes . lock ( ) . await ;
536
+ let nodes = self . nodes . read ( ) . await ;
536
537
let node = match pk {
537
538
Some ( pubkey) => nodes. get ( pubkey) ,
538
539
None => nodes. iter ( ) . next ( ) . map ( |( _, node) | node) ,
@@ -544,7 +545,7 @@ impl<S: MutinyStorage> NodeManager<S> {
544
545
/// Returns after node has been stopped.
545
546
pub async fn stop ( & self ) -> Result < ( ) , MutinyError > {
546
547
self . stop . swap ( true , Ordering :: Relaxed ) ;
547
- let mut nodes = self . nodes . lock ( ) . await ;
548
+ let mut nodes = self . nodes . write ( ) . await ;
548
549
let node_futures = nodes. iter ( ) . map ( |( _, n) | async {
549
550
match n. stop ( ) . await {
550
551
Ok ( _) => {
@@ -1050,7 +1051,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1050
1051
return Err ( MutinyError :: WalletOperationFailed ) ;
1051
1052
} ;
1052
1053
1053
- let nodes = self . nodes . lock ( ) . await ;
1054
+ let nodes = self . nodes . read ( ) . await ;
1054
1055
let lightning_msats: u64 = nodes
1055
1056
. iter ( )
1056
1057
. flat_map ( |( _, n) | n. channel_manager . list_channels ( ) )
@@ -1097,13 +1098,17 @@ impl<S: MutinyStorage> NodeManager<S> {
1097
1098
/// This should be called before syncing the on-chain wallet
1098
1099
/// to ensure that new on-chain transactions are picked up.
1099
1100
async fn sync_ldk ( & self ) -> Result < ( ) , MutinyError > {
1100
- let nodes = self . nodes . lock ( ) . await ;
1101
+ // get nodes hashmap, immediately drop lock because sync can take a while
1102
+ let nodes = {
1103
+ let nodes = self . nodes . read ( ) . await ;
1104
+ nodes. deref ( ) . clone ( )
1105
+ } ;
1101
1106
1102
1107
// Lock all the nodes so we can sync them, make sure we keep the locks
1103
1108
// in scope so they don't get dropped and unlocked.
1104
1109
let futs = nodes
1105
- . iter ( )
1106
- . map ( |( _ , node) | node. sync_lock . lock ( ) )
1110
+ . values ( )
1111
+ . map ( |node| node. sync_lock . lock ( ) )
1107
1112
. collect :: < Vec < _ > > ( ) ;
1108
1113
let _locks = join_all ( futs) . await ;
1109
1114
@@ -1224,7 +1229,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1224
1229
/// If the node has any active channels it will fail to archive
1225
1230
#[ allow( dead_code) ]
1226
1231
pub ( crate ) async fn archive_node ( & self , pubkey : PublicKey ) -> Result < ( ) , MutinyError > {
1227
- if let Some ( node) = self . nodes . lock ( ) . await . get ( & pubkey) {
1232
+ if let Some ( node) = self . nodes . read ( ) . await . get ( & pubkey) {
1228
1233
// disallow archiving nodes with active channels or
1229
1234
// claimable on-chain funds, so we don't lose funds
1230
1235
if node. channel_manager . list_channels ( ) . is_empty ( )
@@ -1244,7 +1249,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1244
1249
/// If the node has any active channels it will fail to archive
1245
1250
#[ allow( dead_code) ]
1246
1251
pub ( crate ) async fn archive_node_by_uuid ( & self , node_uuid : String ) -> Result < ( ) , MutinyError > {
1247
- let mut node_storage = self . node_storage . lock ( ) . await ;
1252
+ let mut node_storage = self . node_storage . write ( ) . await ;
1248
1253
1249
1254
match node_storage. nodes . get ( & node_uuid) . map ( |n| n. to_owned ( ) ) {
1250
1255
None => Err ( anyhow ! ( "Could not find node to archive" ) . into ( ) ) ,
@@ -1262,7 +1267,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1262
1267
1263
1268
/// Lists the pubkeys of the lightning node in the manager.
1264
1269
pub async fn list_nodes ( & self ) -> Result < Vec < PublicKey > , MutinyError > {
1265
- let nodes = self . nodes . lock ( ) . await ;
1270
+ let nodes = self . nodes . read ( ) . await ;
1266
1271
let peers = nodes. iter ( ) . map ( |( _, n) | n. pubkey ) . collect ( ) ;
1267
1272
Ok ( peers)
1268
1273
}
@@ -1279,7 +1284,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1279
1284
1280
1285
// check if any nodes have active channels with the current LSP
1281
1286
// if they do, we can't change the LSP
1282
- let nodes = self . nodes . lock ( ) . await ;
1287
+ let nodes = self . nodes . read ( ) . await ;
1283
1288
if nodes. iter ( ) . any ( |( _, n) | {
1284
1289
if let Some ( lsp_pk) = n. lsp_client . as_ref ( ) . map ( |x| x. get_lsp_pubkey ( ) ) {
1285
1290
!n. channel_manager
@@ -1294,7 +1299,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1294
1299
drop ( nodes) ;
1295
1300
1296
1301
// edit node storage
1297
- let mut node_storage = self . node_storage . lock ( ) . await ;
1302
+ let mut node_storage = self . node_storage . write ( ) . await ;
1298
1303
node_storage. nodes . iter_mut ( ) . for_each ( |( _, n) | {
1299
1304
n. lsp = lsp_config. clone ( ) ;
1300
1305
} ) ;
@@ -1374,7 +1379,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1374
1379
amount : u64 ,
1375
1380
labels : Vec < String > ,
1376
1381
) -> Result < ( MutinyInvoice , u64 ) , MutinyError > {
1377
- let nodes = self . nodes . lock ( ) . await ;
1382
+ let nodes = self . nodes . read ( ) . await ;
1378
1383
let use_phantom = nodes. len ( ) > 1 && self . lsp_config . is_none ( ) ;
1379
1384
if nodes. len ( ) == 0 {
1380
1385
return Err ( MutinyError :: InvoiceCreationFailed ) ;
@@ -1448,7 +1453,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1448
1453
& self ,
1449
1454
hash : & sha256:: Hash ,
1450
1455
) -> Result < MutinyInvoice , MutinyError > {
1451
- let nodes = self . nodes . lock ( ) . await ;
1456
+ let nodes = self . nodes . read ( ) . await ;
1452
1457
for ( _, node) in nodes. iter ( ) {
1453
1458
if let Ok ( inv) = node. get_invoice_by_hash ( hash) {
1454
1459
return Ok ( inv) ;
@@ -1462,7 +1467,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1462
1467
& self ,
1463
1468
user_channel_id : u128 ,
1464
1469
) -> Result < ChannelClosure , MutinyError > {
1465
- let nodes = self . nodes . lock ( ) . await ;
1470
+ let nodes = self . nodes . read ( ) . await ;
1466
1471
for ( _, node) in nodes. iter ( ) {
1467
1472
if let Ok ( Some ( closure) ) = node. get_channel_closure ( user_channel_id) {
1468
1473
return Ok ( closure) ;
@@ -1474,7 +1479,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1474
1479
1475
1480
pub async fn list_channel_closures ( & self ) -> Result < Vec < ChannelClosure > , MutinyError > {
1476
1481
let mut channels: Vec < ChannelClosure > = vec ! [ ] ;
1477
- let nodes = self . nodes . lock ( ) . await ;
1482
+ let nodes = self . nodes . read ( ) . await ;
1478
1483
for ( _, node) in nodes. iter ( ) {
1479
1484
if let Ok ( mut invs) = node. get_channel_closures ( ) {
1480
1485
channels. append ( & mut invs)
@@ -1593,7 +1598,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1593
1598
return Err ( MutinyError :: ChannelClosingFailed ) ;
1594
1599
}
1595
1600
1596
- let nodes = self . nodes . lock ( ) . await ;
1601
+ let nodes = self . nodes . read ( ) . await ;
1597
1602
let channel_opt: Option < ( Arc < Node < S > > , ChannelDetails ) > =
1598
1603
nodes. iter ( ) . find_map ( |( _, n) | {
1599
1604
n. channel_manager
@@ -1679,7 +1684,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1679
1684
1680
1685
/// Lists all the channels for all the nodes in the node manager.
1681
1686
pub async fn list_channels ( & self ) -> Result < Vec < MutinyChannel > , MutinyError > {
1682
- let nodes = self . nodes . lock ( ) . await ;
1687
+ let nodes = self . nodes . read ( ) . await ;
1683
1688
let channels: Vec < ChannelDetails > = nodes
1684
1689
. iter ( )
1685
1690
. flat_map ( |( _, n) | n. channel_manager . list_channels ( ) )
@@ -1709,7 +1714,7 @@ impl<S: MutinyStorage> NodeManager<S> {
1709
1714
} )
1710
1715
. collect ( ) ;
1711
1716
1712
- let nodes = self . nodes . lock ( ) . await ;
1717
+ let nodes = self . nodes . read ( ) . await ;
1713
1718
1714
1719
// get peers we are connected to
1715
1720
let connected_peers: Vec < PublicKey > = nodes
@@ -1950,7 +1955,7 @@ pub(crate) async fn create_new_node_from_node_manager<S: MutinyStorage>(
1950
1955
// Begin with a mutex lock so that nothing else can
1951
1956
// save or alter the node list while it is about to
1952
1957
// be saved.
1953
- let mut node_mutex = node_manager. node_storage . lock ( ) . await ;
1958
+ let mut node_mutex = node_manager. node_storage . write ( ) . await ;
1954
1959
1955
1960
// Get the current nodes and their bip32 indices
1956
1961
// so that we can create another node with the next.
@@ -2009,15 +2014,11 @@ pub(crate) async fn create_new_node_from_node_manager<S: MutinyStorage>(
2009
2014
2010
2015
let new_node = node_builder. build ( ) . await ?;
2011
2016
let node_pubkey = new_node. pubkey ;
2012
- node_manager
2013
- . nodes
2014
- . clone ( )
2015
- . lock ( )
2016
- . await
2017
- . insert ( node_pubkey, Arc :: new ( new_node) ) ;
2017
+ let mut nodes = node_manager. nodes . write ( ) . await ;
2018
+ nodes. insert ( node_pubkey, Arc :: new ( new_node) ) ;
2018
2019
2019
2020
Ok ( NodeIdentity {
2020
- uuid : next_node_uuid. clone ( ) ,
2021
+ uuid : next_node_uuid,
2021
2022
pubkey : node_pubkey,
2022
2023
} )
2023
2024
}
@@ -2127,7 +2128,7 @@ mod tests {
2127
2128
2128
2129
{
2129
2130
let node_identity = nm. new_node ( ) . await . expect ( "should create new node" ) ;
2130
- let node_storage = nm. node_storage . lock ( ) . await ;
2131
+ let node_storage = nm. node_storage . read ( ) . await ;
2131
2132
assert_ne ! ( "" , node_identity. uuid) ;
2132
2133
assert_ne ! ( "" , node_identity. pubkey. to_string( ) ) ;
2133
2134
assert_eq ! ( 1 , node_storage. nodes. len( ) ) ;
@@ -2138,7 +2139,7 @@ mod tests {
2138
2139
2139
2140
{
2140
2141
let node_identity = nm. new_node ( ) . await . expect ( "node manager should initialize" ) ;
2141
- let node_storage = nm. node_storage . lock ( ) . await ;
2142
+ let node_storage = nm. node_storage . read ( ) . await ;
2142
2143
2143
2144
assert_ne ! ( "" , node_identity. uuid) ;
2144
2145
assert_ne ! ( "" , node_identity. pubkey. to_string( ) ) ;
0 commit comments