1
1
#!/usr/bin/env python3
2
+
2
3
from time import sleep
3
4
4
5
from scenarios .utils import ensure_miner
5
6
from warnet .test_framework_bridge import WarnetTestFramework
7
+ from warnet .utils import channel_match
6
8
7
9
8
10
def cli_help ():
@@ -15,77 +17,127 @@ def set_test_params(self):
15
17
16
18
def run_test (self ):
17
19
self .log .info ("Get LN nodes and wallet addresses" )
20
+ ln_nodes = []
18
21
recv_addrs = []
19
22
for tank in self .warnet .tanks :
20
23
if tank .lnnode is not None :
21
24
recv_addrs .append (tank .lnnode .getnewaddress ())
25
+ ln_nodes .append (tank .index )
22
26
23
27
self .log .info ("Fund LN wallets" )
24
- miner = ensure_miner (self .nodes [3 ])
25
- addr = miner .getnewaddress ()
26
- self .generatetoaddress (self .nodes [3 ], 110 , addr )
28
+ miner = ensure_miner (self .nodes [0 ])
29
+ miner_addr = miner .getnewaddress ()
30
+ # 298 block base
31
+ self .generatetoaddress (self .nodes [0 ], 298 , miner_addr )
32
+ # divvy up the goods
33
+ split = miner .getbalance () // len (recv_addrs )
27
34
for addr in recv_addrs :
28
- miner .sendtoaddress (addr , 50 )
29
- self .generatetoaddress (self .nodes [3 ], 1 , addr )
30
-
31
- self .log .info ("Waiting for funds to be spendable" )
32
- ready = False
33
- while not ready :
34
- sleep (1 )
35
- ready = True
35
+ miner .sendtoaddress (addr , split )
36
+ # confirm funds in block 299
37
+ self .generatetoaddress (self .nodes [0 ], 1 , miner_addr )
38
+
39
+ self .log .info (f"Waiting for funds to be spendable: { split } BTC each for { len (recv_addrs )} LN nodes" )
40
+
41
+ def funded_lnnodes ():
36
42
for tank in self .warnet .tanks :
37
43
if tank .lnnode is None :
38
44
continue
39
- bal = tank .lnnode .get_wallet_balance ()
40
- if int (bal ["confirmed_balance" ]) >= 50 :
41
- continue
42
- ready = False
43
- break
45
+ if int (tank .lnnode .get_wallet_balance ()["confirmed_balance" ]) < (split * 100000000 ):
46
+ return False
47
+ return True
48
+ self .wait_until (funded_lnnodes , timeout = 5 * 60 )
49
+
50
+ ln_nodes_uri = ln_nodes .copy ()
51
+ while len (ln_nodes_uri ) > 0 :
52
+ self .log .info (f"Waiting for all LN nodes to have URI, LN nodes remaining: { ln_nodes_uri } " )
53
+ for index in ln_nodes_uri :
54
+ lnnode = self .warnet .tanks [index ].lnnode
55
+ if lnnode .getURI ():
56
+ ln_nodes_uri .remove (index )
57
+ sleep (5 )
44
58
45
- self .log .info ("Open channels" )
46
- # TODO: This might belong in Warnet class as connect_ln_edges()
47
- # but that would need to ensure spendable funds first.
48
- # For now we consider this scenario "special".
49
- opening_txs = []
50
- ln_edges = []
59
+ self .log .info ("Adding p2p connections to LN nodes" )
51
60
for edge in self .warnet .graph .edges (data = True ):
52
61
(src , dst , data ) = edge
53
- if "channel" in data :
62
+ # Copy the L1 p2p topology (where applicable) to L2
63
+ # so we get a more robust p2p graph for lightning
64
+ if "channel_open" not in data and self .warnet .tanks [src ].lnnode and self .warnet .tanks [dst ].lnnode :
65
+ self .warnet .tanks [src ].lnnode .connect_to_tank (dst )
66
+
67
+ # Start confirming channel opens in block 300
68
+ self .log .info ("Opening channels, one per block" )
69
+ chan_opens = []
70
+ edges = self .warnet .graph .edges (data = True , keys = True )
71
+ edges = sorted (edges , key = lambda edge : edge [2 ])
72
+ for edge in edges :
73
+ (src , dst , key , data ) = edge
74
+ if "channel_open" in data :
54
75
src_node = self .warnet .get_ln_node_from_tank (src )
55
76
assert src_node is not None
56
77
assert self .warnet .get_ln_node_from_tank (dst ) is not None
57
- ln_edges .append (edge )
58
- tx = src_node .open_channel_to_tank (dst , data ["channel" ])["funding_txid" ]
59
- opening_txs .append (tx )
78
+ self .log .info (f"opening channel { src } ->{ dst } " )
79
+ chan_pt = src_node .open_channel_to_tank (dst , data ["channel_open" ])
80
+ # We can guarantee deterministic short channel IDs as long as
81
+ # the change output is greater than the channel funding output,
82
+ # which will then be output 0
83
+ assert chan_pt [64 :] == ":0"
84
+ chan_opens .append ((edge , chan_pt ))
85
+ self .log .info (f" pending channel point: { chan_pt } " )
86
+ self .wait_until (lambda chan_pt = chan_pt : chan_pt [:64 ] in self .nodes [0 ].getrawmempool ())
87
+ self .generatetoaddress (self .nodes [0 ], 1 , miner_addr )
88
+ assert chan_pt [:64 ] not in self .nodes [0 ].getrawmempool ()
89
+ height = self .nodes [0 ].getblockcount ()
90
+ self .log .info (f" confirmed in block { height } " )
91
+ self .log .info (f" channel_id should be: { int .from_bytes (height .to_bytes (3 , 'big' ) + (1 ).to_bytes (3 , 'big' ) + (0 ).to_bytes (2 , 'big' ), 'big' )} " )
60
92
61
- self .log .info ("Waiting for all channel open txs in mempool" )
62
- while True :
63
- all_set = True
64
- mp = self .nodes [3 ].getrawmempool ()
65
- for tx in opening_txs :
66
- if tx not in mp :
67
- all_set = False
68
- if all_set :
69
- break
70
- sleep (2 )
93
+ # Ensure all channel opens are sufficiently confirmed
94
+ self .generatetoaddress (self .nodes [0 ], 10 , miner_addr )
95
+ ln_nodes_gossip = ln_nodes .copy ()
96
+ while len (ln_nodes_gossip ) > 0 :
97
+ self .log .info (f"Waiting for graph gossip sync, LN nodes remaining: { ln_nodes_gossip } " )
98
+ for index in ln_nodes_gossip :
99
+ lnnode = self .warnet .tanks [index ].lnnode
100
+ my_edges = len (lnnode .lncli ("describegraph" )["edges" ])
101
+ my_nodes = len (lnnode .lncli ("describegraph" )["nodes" ])
102
+ if my_edges == len (chan_opens ) and my_nodes == len (ln_nodes ):
103
+ ln_nodes_gossip .remove (index )
104
+ else :
105
+ self .log .info (f" node { index } not synced (channels: { my_edges } /{ len (chan_opens )} , nodes: { my_nodes } /{ len (ln_nodes )} )" )
106
+ sleep (5 )
71
107
72
- self .log .info ("Confirming channel opens" )
73
- self .generatetoaddress (self .nodes [3 ], 6 , addr )
108
+ self .log .info ("Updating channel policies" )
109
+ for edge , chan_pt in chan_opens :
110
+ (src , dst , key , data ) = edge
111
+ if "target_policy" in data :
112
+ target_node = self .warnet .get_ln_node_from_tank (dst )
113
+ target_node .update_channel_policy (chan_pt , data ["target_policy" ])
114
+ if "source_policy" in data :
115
+ source_node = self .warnet .get_ln_node_from_tank (src )
116
+ source_node .update_channel_policy (chan_pt , data ["source_policy" ])
74
117
75
- self .log .info ("Waiting for graph gossip sync" )
76
118
while True :
77
- all_set = True
78
- for tank in self .warnet .tanks :
79
- if tank .lnnode is not None :
80
- edges = tank .lnnode .lncli ("describegraph" )["edges" ]
81
- if len (edges ) != len (ln_edges ):
82
- all_set = False
83
- if all_set :
119
+ self .log .info ("Waiting for all channel policies to match" )
120
+ score = 0
121
+ for tank_index , me in enumerate (ln_nodes ):
122
+ you = (tank_index + 1 ) % len (ln_nodes )
123
+ my_channels = self .warnet .tanks [me ].lnnode .lncli ("describegraph" )["edges" ]
124
+ your_channels = self .warnet .tanks [you ].lnnode .lncli ("describegraph" )["edges" ]
125
+ match = True
126
+ for chan_index , my_chan in enumerate (my_channels ):
127
+ your_chan = your_channels [chan_index ]
128
+ if not channel_match (my_chan , your_chan , allow_flip = False ):
129
+ print (f"Channel policy doesn't match between tanks { me } & { you } : { my_chan ['channel_id' ]} " )
130
+ match = False
131
+ break
132
+ if match :
133
+ print (f"All channel policies match between tanks { me } & { you } " )
134
+ score += 1
135
+ print (f"Score: { score } / { len (ln_nodes )} " )
136
+ if score == len (ln_nodes ):
84
137
break
85
- sleep (2 )
86
-
87
- self .log .info (f"Warnet LN ready with { len (recv_addrs )} nodes and { len (ln_edges )} channels." )
138
+ sleep (5 )
88
139
140
+ self .log .info (f"Warnet LN ready with { len (recv_addrs )} nodes and { len (chan_opens )} channels." )
89
141
90
142
if __name__ == "__main__" :
91
143
LNInit ().main ()
0 commit comments