|
| 1 | +import argparse |
| 2 | +import configparser |
| 3 | +import ipaddress |
| 4 | +import json |
| 5 | +import logging |
| 6 | +import os |
| 7 | +import pathlib |
| 8 | +import random |
| 9 | +import signal |
| 10 | +import sys |
| 11 | +import tempfile |
| 12 | +from pathlib import Path |
| 13 | + |
| 14 | +from test_framework.authproxy import AuthServiceProxy |
| 15 | +from test_framework.p2p import NetworkThread |
| 16 | +from test_framework.test_framework import ( |
| 17 | + TMPDIR_PREFIX, |
| 18 | + BitcoinTestFramework, |
| 19 | + TestStatus, |
| 20 | +) |
| 21 | +from test_framework.test_node import TestNode |
| 22 | +from test_framework.util import PortSeed, get_rpc_proxy |
| 23 | + |
| 24 | +WARNET_FILE = Path(os.path.dirname(__file__)) / "warnet.json" |
| 25 | +with open(WARNET_FILE) as file: |
| 26 | + WARNET = json.load(file) |
| 27 | + |
| 28 | + |
| 29 | +# Ensure that all RPC calls are made with brand new http connections |
| 30 | +def auth_proxy_request(self, method, path, postdata): |
| 31 | + self._set_conn() # creates new http client connection |
| 32 | + return self.oldrequest(method, path, postdata) |
| 33 | + |
| 34 | + |
| 35 | +AuthServiceProxy.oldrequest = AuthServiceProxy._request |
| 36 | +AuthServiceProxy._request = auth_proxy_request |
| 37 | + |
| 38 | + |
| 39 | +class Commander(BitcoinTestFramework): |
| 40 | + # required by subclasses of BitcoinTestFramework |
| 41 | + def set_test_params(self): |
| 42 | + pass |
| 43 | + |
| 44 | + def run_test(self): |
| 45 | + pass |
| 46 | + |
| 47 | + # Utility functions for Warnet scenarios |
| 48 | + @staticmethod |
| 49 | + def ensure_miner(node): |
| 50 | + wallets = node.listwallets() |
| 51 | + if "miner" not in wallets: |
| 52 | + node.createwallet("miner", descriptors=True) |
| 53 | + return node.get_wallet_rpc("miner") |
| 54 | + |
| 55 | + def handle_sigterm(self, signum, frame): |
| 56 | + print("SIGTERM received, stopping...") |
| 57 | + self.shutdown() |
| 58 | + sys.exit(0) |
| 59 | + |
| 60 | + # The following functions are chopped-up hacks of |
| 61 | + # the original methods from BitcoinTestFramework |
| 62 | + |
| 63 | + def setup(self): |
| 64 | + signal.signal(signal.SIGTERM, self.handle_sigterm) |
| 65 | + |
| 66 | + # hacked from _start_logging() |
| 67 | + # Scenarios will log plain messages to stdout only, which will can redirected by warnet |
| 68 | + self.log = logging.getLogger(self.__class__.__name__) |
| 69 | + self.log.setLevel(logging.INFO) # set this to DEBUG to see ALL RPC CALLS |
| 70 | + |
| 71 | + # Because scenarios run in their own subprocess, the logger here |
| 72 | + # is not the same as the warnet server or other global loggers. |
| 73 | + # Scenarios log directly to stdout which gets picked up by the |
| 74 | + # subprocess manager in the server, and reprinted to the global log. |
| 75 | + ch = logging.StreamHandler(sys.stdout) |
| 76 | + formatter = logging.Formatter(fmt="%(name)-8s %(message)s") |
| 77 | + ch.setFormatter(formatter) |
| 78 | + self.log.addHandler(ch) |
| 79 | + |
| 80 | + for i, tank in enumerate(WARNET): |
| 81 | + self.log.info( |
| 82 | + f"Adding TestNode #{i} from pod {tank['tank']} with IP {tank['rpc_host']}" |
| 83 | + ) |
| 84 | + node = TestNode( |
| 85 | + i, |
| 86 | + pathlib.Path(), # datadir path |
| 87 | + chain=tank["chain"], |
| 88 | + rpchost=tank["rpc_host"], |
| 89 | + timewait=60, |
| 90 | + timeout_factor=self.options.timeout_factor, |
| 91 | + bitcoind=None, |
| 92 | + bitcoin_cli=None, |
| 93 | + cwd=self.options.tmpdir, |
| 94 | + coverage_dir=self.options.coveragedir, |
| 95 | + ) |
| 96 | + node.rpc = get_rpc_proxy( |
| 97 | + f"http://{tank['rpc_user']}:{tank['rpc_password']}@{tank['rpc_host']}:{tank['rpc_port']}", |
| 98 | + i, |
| 99 | + timeout=60, |
| 100 | + coveragedir=self.options.coveragedir, |
| 101 | + ) |
| 102 | + node.rpc_connected = True |
| 103 | + node.init_peers = tank["init_peers"] |
| 104 | + self.nodes.append(node) |
| 105 | + |
| 106 | + self.num_nodes = len(self.nodes) |
| 107 | + |
| 108 | + # Set up temp directory and start logging |
| 109 | + if self.options.tmpdir: |
| 110 | + self.options.tmpdir = os.path.abspath(self.options.tmpdir) |
| 111 | + os.makedirs(self.options.tmpdir, exist_ok=False) |
| 112 | + else: |
| 113 | + self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) |
| 114 | + |
| 115 | + seed = self.options.randomseed |
| 116 | + if seed is None: |
| 117 | + seed = random.randrange(sys.maxsize) |
| 118 | + else: |
| 119 | + self.log.info(f"User supplied random seed {seed}") |
| 120 | + random.seed(seed) |
| 121 | + self.log.info(f"PRNG seed is: {seed}") |
| 122 | + |
| 123 | + self.log.debug("Setting up network thread") |
| 124 | + self.network_thread = NetworkThread() |
| 125 | + self.network_thread.start() |
| 126 | + |
| 127 | + self.success = TestStatus.PASSED |
| 128 | + |
| 129 | + def parse_args(self): |
| 130 | + previous_releases_path = "" |
| 131 | + parser = argparse.ArgumentParser(usage="%(prog)s [options]") |
| 132 | + parser.add_argument( |
| 133 | + "--nocleanup", |
| 134 | + dest="nocleanup", |
| 135 | + default=False, |
| 136 | + action="store_true", |
| 137 | + help="Leave bitcoinds and test.* datadir on exit or error", |
| 138 | + ) |
| 139 | + parser.add_argument( |
| 140 | + "--nosandbox", |
| 141 | + dest="nosandbox", |
| 142 | + default=False, |
| 143 | + action="store_true", |
| 144 | + help="Don't use the syscall sandbox", |
| 145 | + ) |
| 146 | + parser.add_argument( |
| 147 | + "--noshutdown", |
| 148 | + dest="noshutdown", |
| 149 | + default=False, |
| 150 | + action="store_true", |
| 151 | + help="Don't stop bitcoinds after the test execution", |
| 152 | + ) |
| 153 | + parser.add_argument( |
| 154 | + "--cachedir", |
| 155 | + dest="cachedir", |
| 156 | + default=None, |
| 157 | + help="Directory for caching pregenerated datadirs (default: %(default)s)", |
| 158 | + ) |
| 159 | + parser.add_argument( |
| 160 | + "--tmpdir", dest="tmpdir", default=None, help="Root directory for datadirs" |
| 161 | + ) |
| 162 | + parser.add_argument( |
| 163 | + "-l", |
| 164 | + "--loglevel", |
| 165 | + dest="loglevel", |
| 166 | + default="DEBUG", |
| 167 | + help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.", |
| 168 | + ) |
| 169 | + parser.add_argument( |
| 170 | + "--tracerpc", |
| 171 | + dest="trace_rpc", |
| 172 | + default=False, |
| 173 | + action="store_true", |
| 174 | + help="Print out all RPC calls as they are made", |
| 175 | + ) |
| 176 | + parser.add_argument( |
| 177 | + "--portseed", |
| 178 | + dest="port_seed", |
| 179 | + default=0, |
| 180 | + help="The seed to use for assigning port numbers (default: current process id)", |
| 181 | + ) |
| 182 | + parser.add_argument( |
| 183 | + "--previous-releases", |
| 184 | + dest="prev_releases", |
| 185 | + default=None, |
| 186 | + action="store_true", |
| 187 | + help="Force test of previous releases (default: %(default)s)", |
| 188 | + ) |
| 189 | + parser.add_argument( |
| 190 | + "--coveragedir", |
| 191 | + dest="coveragedir", |
| 192 | + default=None, |
| 193 | + help="Write tested RPC commands into this directory", |
| 194 | + ) |
| 195 | + parser.add_argument( |
| 196 | + "--configfile", |
| 197 | + dest="configfile", |
| 198 | + default=None, |
| 199 | + help="Location of the test framework config file (default: %(default)s)", |
| 200 | + ) |
| 201 | + parser.add_argument( |
| 202 | + "--pdbonfailure", |
| 203 | + dest="pdbonfailure", |
| 204 | + default=False, |
| 205 | + action="store_true", |
| 206 | + help="Attach a python debugger if test fails", |
| 207 | + ) |
| 208 | + parser.add_argument( |
| 209 | + "--usecli", |
| 210 | + dest="usecli", |
| 211 | + default=False, |
| 212 | + action="store_true", |
| 213 | + help="use bitcoin-cli instead of RPC for all commands", |
| 214 | + ) |
| 215 | + parser.add_argument( |
| 216 | + "--perf", |
| 217 | + dest="perf", |
| 218 | + default=False, |
| 219 | + action="store_true", |
| 220 | + help="profile running nodes with perf for the duration of the test", |
| 221 | + ) |
| 222 | + parser.add_argument( |
| 223 | + "--valgrind", |
| 224 | + dest="valgrind", |
| 225 | + default=False, |
| 226 | + action="store_true", |
| 227 | + help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown. valgrind 3.14 or later required.", |
| 228 | + ) |
| 229 | + parser.add_argument( |
| 230 | + "--randomseed", |
| 231 | + default=0x7761726E6574, # "warnet" ascii |
| 232 | + help="set a random seed for deterministically reproducing a previous test run", |
| 233 | + ) |
| 234 | + parser.add_argument( |
| 235 | + "--timeout-factor", |
| 236 | + dest="timeout_factor", |
| 237 | + default=1, |
| 238 | + help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts", |
| 239 | + ) |
| 240 | + parser.add_argument( |
| 241 | + "--network", |
| 242 | + dest="network", |
| 243 | + default="warnet", |
| 244 | + help="Designate which warnet this should run on (default: warnet)", |
| 245 | + ) |
| 246 | + parser.add_argument( |
| 247 | + "--v2transport", |
| 248 | + dest="v2transport", |
| 249 | + default=False, |
| 250 | + action="store_true", |
| 251 | + help="use BIP324 v2 connections between all nodes by default", |
| 252 | + ) |
| 253 | + |
| 254 | + self.add_options(parser) |
| 255 | + # Running TestShell in a Jupyter notebook causes an additional -f argument |
| 256 | + # To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument |
| 257 | + # source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168 |
| 258 | + parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1") |
| 259 | + self.options = parser.parse_args() |
| 260 | + if self.options.timeout_factor == 0: |
| 261 | + self.options.timeout_factor = 99999 |
| 262 | + self.options.timeout_factor = self.options.timeout_factor or ( |
| 263 | + 4 if self.options.valgrind else 1 |
| 264 | + ) |
| 265 | + self.options.previous_releases_path = previous_releases_path |
| 266 | + config = configparser.ConfigParser() |
| 267 | + if self.options.configfile is not None: |
| 268 | + with open(self.options.configfile) as f: |
| 269 | + config.read_file(f) |
| 270 | + |
| 271 | + config["environment"] = {"PACKAGE_BUGREPORT": ""} |
| 272 | + |
| 273 | + self.config = config |
| 274 | + |
| 275 | + if "descriptors" not in self.options: |
| 276 | + # Wallet is not required by the test at all and the value of self.options.descriptors won't matter. |
| 277 | + # It still needs to exist and be None in order for tests to work however. |
| 278 | + # So set it to None to force -disablewallet, because the wallet is not needed. |
| 279 | + self.options.descriptors = None |
| 280 | + elif self.options.descriptors is None: |
| 281 | + # Some wallet is either required or optionally used by the test. |
| 282 | + # Prefer SQLite unless it isn't available |
| 283 | + if self.is_sqlite_compiled(): |
| 284 | + self.options.descriptors = True |
| 285 | + elif self.is_bdb_compiled(): |
| 286 | + self.options.descriptors = False |
| 287 | + else: |
| 288 | + # If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter |
| 289 | + # It still needs to exist and be None in order for tests to work however. |
| 290 | + # So set it to None, which will also set -disablewallet. |
| 291 | + self.options.descriptors = None |
| 292 | + |
| 293 | + PortSeed.n = self.options.port_seed |
| 294 | + |
| 295 | + def connect_nodes(self, a, b, *, peer_advertises_v2=None, wait_for_connect: bool = True): |
| 296 | + """ |
| 297 | + Kwargs: |
| 298 | + wait_for_connect: if True, block until the nodes are verified as connected. You might |
| 299 | + want to disable this when using -stopatheight with one of the connected nodes, |
| 300 | + since there will be a race between the actual connection and performing |
| 301 | + the assertions before one node shuts down. |
| 302 | + """ |
| 303 | + from_connection = self.nodes[a] |
| 304 | + to_connection = self.nodes[b] |
| 305 | + |
| 306 | + to_ip_port = self.warnet.tanks[b].get_dns_addr() |
| 307 | + from_ip_port = self.warnet.tanks[a].get_ip_addr() |
| 308 | + |
| 309 | + if peer_advertises_v2 is None: |
| 310 | + peer_advertises_v2 = self.options.v2transport |
| 311 | + |
| 312 | + if peer_advertises_v2: |
| 313 | + from_connection.addnode(node=to_ip_port, command="onetry", v2transport=True) |
| 314 | + else: |
| 315 | + # skip the optional third argument (default false) for |
| 316 | + # compatibility with older clients |
| 317 | + from_connection.addnode(to_ip_port, "onetry") |
| 318 | + |
| 319 | + if not wait_for_connect: |
| 320 | + return |
| 321 | + |
| 322 | + def get_peer_ip(peer): |
| 323 | + try: # we encounter a regular ip address |
| 324 | + ip_addr = str(ipaddress.ip_address(peer["addr"].split(":")[0])) |
| 325 | + return ip_addr |
| 326 | + except ValueError as err: # or we encounter a service name |
| 327 | + try: |
| 328 | + # NETWORK-tank-TANK_INDEX-service |
| 329 | + # NETWORK-test-TEST-tank-TANK_INDEX-service |
| 330 | + tank_index = int(peer["addr"].split("-")[-2]) |
| 331 | + except (ValueError, IndexError) as inner_err: |
| 332 | + raise ValueError( |
| 333 | + "could not derive tank index from service name: {} {}".format( |
| 334 | + peer["addr"], inner_err |
| 335 | + ) |
| 336 | + ) from err |
| 337 | + |
| 338 | + ip_addr = self.warnet.tanks[tank_index].get_ip_addr() |
| 339 | + return ip_addr |
| 340 | + |
| 341 | + # poll until version handshake complete to avoid race conditions |
| 342 | + # with transaction relaying |
| 343 | + # See comments in net_processing: |
| 344 | + # * Must have a version message before anything else |
| 345 | + # * Must have a verack message before anything else |
| 346 | + self.wait_until( |
| 347 | + lambda: any( |
| 348 | + peer["addr"] == to_ip_port and peer["version"] != 0 |
| 349 | + for peer in from_connection.getpeerinfo() |
| 350 | + ) |
| 351 | + ) |
| 352 | + self.wait_until( |
| 353 | + lambda: any( |
| 354 | + get_peer_ip(peer) == from_ip_port and peer["version"] != 0 |
| 355 | + for peer in to_connection.getpeerinfo() |
| 356 | + ) |
| 357 | + ) |
| 358 | + self.wait_until( |
| 359 | + lambda: any( |
| 360 | + peer["addr"] == to_ip_port and peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 |
| 361 | + for peer in from_connection.getpeerinfo() |
| 362 | + ) |
| 363 | + ) |
| 364 | + self.wait_until( |
| 365 | + lambda: any( |
| 366 | + get_peer_ip(peer) == from_ip_port |
| 367 | + and peer["bytesrecv_per_msg"].pop("verack", 0) >= 21 |
| 368 | + for peer in to_connection.getpeerinfo() |
| 369 | + ) |
| 370 | + ) |
| 371 | + # The message bytes are counted before processing the message, so make |
| 372 | + # sure it was fully processed by waiting for a ping. |
| 373 | + self.wait_until( |
| 374 | + lambda: any( |
| 375 | + peer["addr"] == to_ip_port and peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 |
| 376 | + for peer in from_connection.getpeerinfo() |
| 377 | + ) |
| 378 | + ) |
| 379 | + self.wait_until( |
| 380 | + lambda: any( |
| 381 | + get_peer_ip(peer) == from_ip_port and peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 |
| 382 | + for peer in to_connection.getpeerinfo() |
| 383 | + ) |
| 384 | + ) |
0 commit comments