From a6281b2cfbd7993696f74cb9314b132d9be4e654 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sat, 12 Oct 2024 22:05:41 +0100 Subject: [PATCH 1/7] caddy: --install-namespace if not exists --- src/warnet/deploy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index d9b5a45b5..53b2af2c9 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -144,7 +144,7 @@ def deploy_caddy(directory: Path, debug: bool): if not network_file.get(name, {}).get("enabled", False): return - cmd = f"{HELM_COMMAND} {name} {CADDY_CHART} --namespace {namespace}" + cmd = f"{HELM_COMMAND} {name} {CADDY_CHART} --namespace {namespace} --create-namespace" if debug: cmd += " --debug" From 38adc3e5cafc1e1efcc4bb6cc261e031d03be897 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 11 Oct 2024 15:05:53 +0100 Subject: [PATCH 2/7] speedy startup --- src/warnet/deploy.py | 42 +++++++++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 53b2af2c9..12ed73578 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -1,6 +1,7 @@ import subprocess import sys import tempfile +from multiprocessing import Process from pathlib import Path from typing import Optional @@ -75,17 +76,44 @@ def _deploy(directory, debug, namespace, to_all_users): if to_all_users: namespaces = get_namespaces_by_type(WARGAMES_NAMESPACE_PREFIX) + processes = [] for namespace in namespaces: - deploy(directory, debug, namespace.metadata.name, False) + p = Process(target=deploy, args=(directory, debug, namespace.metadata.name, False)) + p.start() + processes.append(p) + for p in processes: + p.join() return if (directory / NETWORK_FILE).exists(): - dl = deploy_logging_stack(directory, debug) - deploy_network(directory, debug, namespace=namespace) - df = deploy_fork_observer(directory, debug) - if dl | df: - deploy_ingress(debug) - deploy_caddy(directory, debug) + processes = [] + logging_process = Process(target=deploy_logging_stack, args=(directory, debug)) + logging_process.start() + processes.append(logging_process) + + network_process = Process(target=deploy_network, args=(directory, debug, namespace)) + network_process.start() + + ingress_process = Process(target=deploy_ingress, args=(debug,)) + ingress_process.start() + processes.append(ingress_process) + + caddy_process = Process(target=deploy_caddy, args=(directory, debug)) + caddy_process.start() + processes.append(caddy_process) + + # Wait for the network process to complete + network_process.join() + + # Start the fork observer process immediately after network process completes + fork_observer_process = Process(target=deploy_fork_observer, args=(directory, debug)) + fork_observer_process.start() + processes.append(fork_observer_process) + + # Wait for all other processes to complete + for p in processes: + p.join() + elif (directory / NAMESPACES_FILE).exists(): deploy_namespaces(directory) else: From 32cdfd3d88733c16a80ea1f6a2c4d1f392f05df8 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sat, 12 Oct 2024 23:20:11 +0100 Subject: [PATCH 3/7] speedy startup tanks --- src/warnet/deploy.py | 58 +++++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 12ed73578..20d9174bb 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -259,41 +259,49 @@ def deploy_fork_observer(directory: Path, debug: bool) -> bool: def deploy_network(directory: Path, debug: bool = False, namespace: Optional[str] = None): network_file_path = directory / NETWORK_FILE - defaults_file_path = directory / DEFAULTS_FILE - namespace = get_default_namespace_or(namespace) with network_file_path.open() as f: network_file = yaml.safe_load(f) + processes = [] for node in network_file["nodes"]: - click.echo(f"Deploying node: {node.get('name')}") - try: - temp_override_file_path = "" - node_name = node.get("name") - node_config_override = {k: v for k, v in node.items() if k != "name"} + p = Process(target=deploy_single_node, args=(node, directory, debug, namespace)) + p.start() + processes.append(p) - cmd = f"{HELM_COMMAND} {node_name} {BITCOIN_CHART_LOCATION} --namespace {namespace} -f {defaults_file_path}" - if debug: - cmd += " --debug" + for p in processes: + p.join() - if node_config_override: - with tempfile.NamedTemporaryFile( - mode="w", suffix=".yaml", delete=False - ) as temp_file: - yaml.dump(node_config_override, temp_file) - temp_override_file_path = Path(temp_file.name) - cmd = f"{cmd} -f {temp_override_file_path}" - if not stream_command(cmd): - click.echo(f"Failed to run Helm command: {cmd}") - return - except Exception as e: - click.echo(f"Error: {e}") +def deploy_single_node(node, directory: Path, debug: bool, namespace: str): + defaults_file_path = directory / DEFAULTS_FILE + click.echo(f"Deploying node: {node.get('name')}") + temp_override_file_path = "" + try: + node_name = node.get("name") + node_config_override = {k: v for k, v in node.items() if k != "name"} + + defaults_file_path = directory / DEFAULTS_FILE + cmd = f"{HELM_COMMAND} {node_name} {BITCOIN_CHART_LOCATION} --namespace {namespace} -f {defaults_file_path}" + if debug: + cmd += " --debug" + + if node_config_override: + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: + yaml.dump(node_config_override, temp_file) + temp_override_file_path = Path(temp_file.name) + cmd = f"{cmd} -f {temp_override_file_path}" + + if not stream_command(cmd): + click.echo(f"Failed to run Helm command: {cmd}") return - finally: - if temp_override_file_path: - Path(temp_override_file_path).unlink() + except Exception as e: + click.echo(f"Error: {e}") + return + finally: + if temp_override_file_path: + Path(temp_override_file_path).unlink() def deploy_namespaces(directory: Path): From 2e237bc6e22d54ed5b5a5539c3fdaff358bc96da Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Sat, 12 Oct 2024 23:23:48 +0100 Subject: [PATCH 4/7] speedy startup namespaces --- src/warnet/deploy.py | 56 +++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 20d9174bb..3cf3e6270 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -320,32 +320,40 @@ def deploy_namespaces(directory: Path): ) return + processes = [] for namespace in namespaces_file["namespaces"]: - click.echo(f"Deploying namespace: {namespace.get('name')}") - try: - temp_override_file_path = "" - namespace_name = namespace.get("name") - namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} - - cmd = f"{HELM_COMMAND} {namespace_name} {NAMESPACES_CHART_LOCATION} -f {defaults_file_path}" - - if namespace_config_override: - with tempfile.NamedTemporaryFile( - mode="w", suffix=".yaml", delete=False - ) as temp_file: - yaml.dump(namespace_config_override, temp_file) - temp_override_file_path = Path(temp_file.name) - cmd = f"{cmd} -f {temp_override_file_path}" - - if not stream_command(cmd): - click.echo(f"Failed to run Helm command: {cmd}") - return - except Exception as e: - click.echo(f"Error: {e}") + p = Process(target=deploy_single_namespace, args=(namespace, defaults_file_path)) + p.start() + processes.append(p) + + for p in processes: + p.join() + + +def deploy_single_namespace(namespace, defaults_file_path: Path): + click.echo(f"Deploying namespace: {namespace.get('name')}") + temp_override_file_path = "" + try: + namespace_name = namespace.get("name") + namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} + + cmd = f"{HELM_COMMAND} {namespace_name} {NAMESPACES_CHART_LOCATION} -f {defaults_file_path}" + + if namespace_config_override: + with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as temp_file: + yaml.dump(namespace_config_override, temp_file) + temp_override_file_path = Path(temp_file.name) + cmd = f"{cmd} -f {temp_override_file_path}" + + if not stream_command(cmd): + click.echo(f"Failed to run Helm command: {cmd}") return - finally: - if temp_override_file_path: - temp_override_file_path.unlink() + except Exception as e: + click.echo(f"Error: {e}") + return + finally: + if temp_override_file_path: + Path(temp_override_file_path).unlink() def is_windows(): From 2526957e4186b57e2636c1098b196b933181db6a Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Wed, 16 Oct 2024 10:40:15 +0100 Subject: [PATCH 5/7] fix ingress deployment --- src/warnet/deploy.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index 3cf3e6270..bc8b7a345 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -94,7 +94,7 @@ def _deploy(directory, debug, namespace, to_all_users): network_process = Process(target=deploy_network, args=(directory, debug, namespace)) network_process.start() - ingress_process = Process(target=deploy_ingress, args=(debug,)) + ingress_process = Process(target=deploy_ingress, args=(directory, debug)) ingress_process.start() processes.append(ingress_process) @@ -184,7 +184,15 @@ def deploy_caddy(directory: Path, debug: bool): click.echo("\nTo access the warnet dashboard run:\n warnet dashboard") -def deploy_ingress(debug: bool): +def deploy_ingress(directory: Path, debug: bool): + # Deploy ingress if either logging or fork observer is enabled + network_file_path = directory / NETWORK_FILE + with network_file_path.open() as f: + network_file = yaml.safe_load(f) + fo_enabled = network_file.get("fork_observer", {}).get("enabled", False) + logging_enabled = check_logging_required(directory) + if not (fo_enabled or logging_enabled): + return click.echo("Deploying ingress controller") for command in INGRESS_HELM_COMMANDS: From 5c18af9e78ab31842b3a92f09d24bcf45ec183a6 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 5 Dec 2024 16:57:11 +0000 Subject: [PATCH 6/7] stop scenarios in parallel --- src/warnet/control.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/warnet/control.py b/src/warnet/control.py index 83d358a4e..c2f45d62c 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -6,6 +6,7 @@ import time import zipapp from concurrent.futures import ThreadPoolExecutor, as_completed +from multiprocessing import Pool from pathlib import Path from typing import Optional @@ -112,10 +113,18 @@ def stop_scenario(scenario_name): def stop_all_scenarios(scenarios): - """Stop all active scenarios using Helm""" - with console.status("[bold yellow]Stopping all scenarios...[/bold yellow]"): - for scenario in scenarios: - stop_scenario(scenario) + """Stop all active scenarios in parallel using multiprocessing""" + + def stop_single(scenario): + stop_scenario(scenario) + return f"Stopped scenario: {scenario}" + + with console.status("[bold yellow]Stopping all scenarios...[/bold yellow]"), Pool() as pool: + results = pool.map(stop_single, scenarios) + + for result in results: + console.print(f"[bold green]{result}[/bold green]") + console.print("[bold green]All scenarios have been stopped.[/bold green]") From 09b72300078ec25c4a42cdf3d2f2ad64fc9ddb42 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Thu, 5 Dec 2024 20:44:52 +0000 Subject: [PATCH 7/7] deploy logging CRDs first --- src/warnet/constants.py | 6 ++++++ src/warnet/deploy.py | 25 ++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/src/warnet/constants.py b/src/warnet/constants.py index 46f33a3fe..f8b45443f 100644 --- a/src/warnet/constants.py +++ b/src/warnet/constants.py @@ -96,6 +96,12 @@ }, } +LOGGING_CRD_COMMANDS = [ + "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", + "helm repo update", + "helm upgrade --install prometheus-operator-crds prometheus-community/prometheus-operator-crds", +] + # Helm commands for logging setup # TODO: also lots of hardcode stuff in these helm commands, will need to fix this when moving to helm charts LOGGING_HELM_COMMANDS = [ diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index bc8b7a345..2a429b89f 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -16,6 +16,7 @@ FORK_OBSERVER_CHART, HELM_COMMAND, INGRESS_HELM_COMMANDS, + LOGGING_CRD_COMMANDS, LOGGING_HELM_COMMANDS, LOGGING_NAMESPACE, NAMESPACES_CHART_LOCATION, @@ -87,6 +88,9 @@ def _deploy(directory, debug, namespace, to_all_users): if (directory / NETWORK_FILE).exists(): processes = [] + # Deploy logging CRD first to avoid synchronisation issues + deploy_logging_crd(directory, debug) + logging_process = Process(target=deploy_logging_stack, args=(directory, debug)) logging_process.start() processes.append(logging_process) @@ -146,11 +150,30 @@ def check_logging_required(directory: Path): return False +def deploy_logging_crd(directory: Path, debug: bool) -> bool: + """ + This function exists so we can parallelise the rest of the loggin stack + installation + """ + if not check_logging_required(directory): + return False + + click.echo( + "Found collectLogs or metricsExport in network definition, Deploying logging stack CRD" + ) + + for command in LOGGING_CRD_COMMANDS: + if not stream_command(command): + print(f"Failed to run Helm command: {command}") + return False + return True + + def deploy_logging_stack(directory: Path, debug: bool) -> bool: if not check_logging_required(directory): return False - click.echo("Found collectLogs or metricsExport in network definition, Deploying logging stack") + click.echo("Deploying logging stack") for command in LOGGING_HELM_COMMANDS: if not stream_command(command):