@@ -2156,21 +2156,21 @@ where L::Target: Logger {
2156
2156
// The general routing idea is the following:
2157
2157
// 1. Fill first/last hops communicated by the caller.
2158
2158
// 2. Attempt to construct a path from payer to payee for transferring
2159
- // any ~sufficient (described later) value.
2160
- // If succeed, remember which channels were used and how much liquidity they have available,
2161
- // so that future paths don't rely on the same liquidity.
2159
+ // any ~sufficient (described later) value.
2160
+ // If succeed, remember which channels were used and how much liquidity they have available,
2161
+ // so that future paths don't rely on the same liquidity.
2162
2162
// 3. Proceed to the next step if:
2163
- // - we hit the recommended target value;
2164
- // - OR if we could not construct a new path. Any next attempt will fail too.
2165
- // Otherwise, repeat step 2.
2163
+ // - we hit the recommended target value;
2164
+ // - OR if we could not construct a new path. Any next attempt will fail too.
2165
+ // Otherwise, repeat step 2.
2166
2166
// 4. See if we managed to collect paths which aggregately are able to transfer target value
2167
- // (not recommended value).
2167
+ // (not recommended value).
2168
2168
// 5. If yes, proceed. If not, fail routing.
2169
2169
// 6. Select the paths which have the lowest cost (fee plus scorer penalty) per amount
2170
- // transferred up to the transfer target value.
2170
+ // transferred up to the transfer target value.
2171
2171
// 7. Reduce the value of the last path until we are sending only the target value.
2172
2172
// 8. If our maximum channel saturation limit caused us to pick two identical paths, combine
2173
- // them so that we're not sending two HTLCs along the same path.
2173
+ // them so that we're not sending two HTLCs along the same path.
2174
2174
2175
2175
// As for the actual search algorithm, we do a payee-to-payer Dijkstra's sorting by each node's
2176
2176
// distance from the payee
@@ -2917,7 +2917,7 @@ where L::Target: Logger {
2917
2917
for ( idx, ( hop, prev_hop_id) ) in hop_iter. zip ( prev_hop_iter) . enumerate ( ) {
2918
2918
let ( target, private_target_node_counter) =
2919
2919
node_counters. private_node_counter_from_pubkey ( & prev_hop_id)
2920
- . expect ( "node_counter_from_pubkey is called on all unblinded_route_hints keys during setup, so is always Some here" ) ;
2920
+ . expect ( "node_counter_from_pubkey is called on all unblinded_route_hints keys during setup, so is always Some here" ) ;
2921
2921
let ( _src_id, private_source_node_counter) =
2922
2922
node_counters. private_node_counter_from_pubkey ( & hop. src_node_id )
2923
2923
. expect ( "node_counter_from_pubkey is called on all unblinded_route_hints keys during setup, so is always Some here" ) ;
@@ -3167,6 +3167,27 @@ where L::Target: Logger {
3167
3167
}
3168
3168
debug_assert ! ( * used_liquidity_msat <= hop_max_msat) ;
3169
3169
}
3170
+
3171
+ // NOTE: The original motivation for the block was to ensure MPPs vary their paths a
3172
+ // bit by disabling the 'central' hop in any found paths. However, we are disabling
3173
+ // this block, as it:
3174
+ //
3175
+ // 1) causes MPP routing failures if there is only one path from payer -> payee
3176
+ // 2) forces MPPs to be routed across multiple paths; the additional paths are
3177
+ // usually more expensive and less reliable (less liquidity).
3178
+ //
3179
+ // Issue: https://github.com/lightningdevkit/rust-lightning/issues/3685
3180
+ //
3181
+ // The one possible privacy concern is that any hops which forward multiple shards
3182
+ // of the same MPP can link those shards together, as each shard uses the same
3183
+ // payment hash (since LDK implements Simplified Multipath Payments, not Atomic
3184
+ // Multipath Payments). But this concern seems moot, as sending the payment over a
3185
+ // single shard leaks almost the same information anyway.
3186
+ //
3187
+ // As far as I (and SOTA AI models) can tell, disabling this block doesn't violate
3188
+ // any correctness constraints or security concerns in surrounding code.
3189
+ let _ = prevented_redundant_path_selection;
3190
+ /*
3170
3191
if !prevented_redundant_path_selection {
3171
3192
// If we weren't capped by hitting a liquidity limit on a channel in the path,
3172
3193
// we'll probably end up picking the same path again on the next iteration.
@@ -3181,6 +3202,7 @@ where L::Target: Logger {
3181
3202
*used_liquidities.entry(CandidateHopId::Clear((scid, true))).or_default() = exhausted;
3182
3203
}
3183
3204
}
3205
+ */
3184
3206
3185
3207
// Track the total amount all our collected paths allow to send so that we know
3186
3208
// when to stop looking for more paths
@@ -3332,8 +3354,8 @@ where L::Target: Logger {
3332
3354
} ) ;
3333
3355
for idx in 0 ..( selected_route. len ( ) - 1 ) {
3334
3356
if idx + 1 >= selected_route. len ( ) { break ; }
3335
- if iter_equal ( selected_route[ idx ] . hops . iter ( ) . map ( |h| ( h. 0 . candidate . id ( ) , h. 0 . candidate . target ( ) ) ) ,
3336
- selected_route[ idx + 1 ] . hops . iter ( ) . map ( |h| ( h. 0 . candidate . id ( ) , h. 0 . candidate . target ( ) ) ) ) {
3357
+ if iter_equal ( selected_route[ idx ] . hops . iter ( ) . map ( |h| ( h. 0 . candidate . id ( ) , h. 0 . candidate . target ( ) ) ) ,
3358
+ selected_route[ idx + 1 ] . hops . iter ( ) . map ( |h| ( h. 0 . candidate . id ( ) , h. 0 . candidate . target ( ) ) ) ) {
3337
3359
let new_value = selected_route[ idx] . get_value_msat ( ) + selected_route[ idx + 1 ] . get_value_msat ( ) ;
3338
3360
selected_route[ idx] . update_value_and_recompute_fees ( new_value) ;
3339
3361
selected_route. remove ( idx + 1 ) ;
@@ -6602,12 +6624,12 @@ mod tests {
6602
6624
6603
6625
// We construct a network that looks like this:
6604
6626
//
6605
- // node2 -1(3)2- node3
6606
- // 2 2
6607
- // (2) (4)
6608
- // 1 1
6609
- // node1 -1(5)2- node4 -1(1)2- node6
6610
- // 2
6627
+ // node2 -1(3)2- node3
6628
+ // 2 2
6629
+ // (2) (4)
6630
+ // 1 1
6631
+ // node1 -1(5)2- node4 -1(1)2- node6
6632
+ // 2
6611
6633
// (6)
6612
6634
// 1
6613
6635
// our_node
@@ -8113,14 +8135,14 @@ mod tests {
8113
8135
// 1. We add a route candidate for intro_node contributing a high amount
8114
8136
// 2. We add a first_hop<>intro_node route candidate for the same high amount
8115
8137
// 3. We see a cheaper blinded route hint for the same intro node but a much lower contribution
8116
- // amount, and update our route candidate for intro_node for the lower amount
8138
+ // amount, and update our route candidate for intro_node for the lower amount
8117
8139
// 4. We then attempt to update the aforementioned first_hop<>intro_node route candidate for the
8118
- // lower contribution amount, but fail (this was previously caused by failure to account for
8119
- // blinded path fees when adding first_hop<>intro_node candidates)
8140
+ // lower contribution amount, but fail (this was previously caused by failure to account for
8141
+ // blinded path fees when adding first_hop<>intro_node candidates)
8120
8142
// 5. We go to construct the path from these route candidates and our first_hop<>intro_node
8121
- // candidate still thinks its path is contributing the original higher amount. This caused us
8122
- // to hit an `unreachable` overflow when calculating the cheaper intro_node fees over the
8123
- // larger amount
8143
+ // candidate still thinks its path is contributing the original higher amount. This caused us
8144
+ // to hit an `unreachable` overflow when calculating the cheaper intro_node fees over the
8145
+ // larger amount
8124
8146
let secp_ctx = Secp256k1 :: new ( ) ;
8125
8147
let logger = Arc :: new ( ln_test_utils:: TestLogger :: new ( ) ) ;
8126
8148
let network_graph = Arc :: new ( NetworkGraph :: new ( Network :: Testnet , Arc :: clone ( & logger) ) ) ;
0 commit comments