Skip to content

Commit 81724ef

Browse files
authored
Merge pull request #324 from pinheadmz/simln-service
add sim-ln service
2 parents c474fe5 + 253e1d5 commit 81724ef

File tree

16 files changed

+277
-87
lines changed

16 files changed

+277
-87
lines changed

justfile

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,12 @@ set shell := ["bash", "-uc"]
44
default:
55
just --list
66

7+
cluster:
8+
kubectl apply -f src/templates/rpc/namespace.yaml
9+
kubectl apply -f src/templates/rpc/rbac-config.yaml
10+
kubectl apply -f src/templates/rpc/warnet-rpc-service.yaml
11+
kubectl apply -f src/templates/rpc/warnet-rpc-statefulset.yaml
12+
713
# Setup and start the RPC in dev mode with minikube
814
start:
915
#!/usr/bin/env bash

src/backends/backend_interface.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -110,16 +110,15 @@ def get_tank_ipv4(self, index: int) -> str:
110110
raise NotImplementedError("This method should be overridden by child class")
111111

112112
@abstractmethod
113-
def wait_for_healthy_tanks(self, warnet, timeout=60) -> bool:
113+
def get_lnnode_hostname(self, index: int) -> str:
114114
"""
115-
Wait for healthy status on all bitcoind nodes
115+
Get the hostname assigned to a lnnode attached to a tank from the backend
116116
"""
117117
raise NotImplementedError("This method should be overridden by child class")
118118

119-
120119
@abstractmethod
121-
def service_from_json(self, json: dict) -> dict:
120+
def wait_for_healthy_tanks(self, warnet, timeout=60) -> bool:
122121
"""
123-
Create a single container from a JSON object of settings
122+
Wait for healthy status on all bitcoind nodes
124123
"""
125124
raise NotImplementedError("This method should be overridden by child class")

src/backends/compose/compose_backend.py

Lines changed: 24 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ def _write_docker_compose(self, warnet):
266266
# Initialize services and add them to the compose
267267
for service_name in warnet.services:
268268
if "compose" in services[service_name]["backends"]:
269-
compose["services"][service_name] = self.service_from_json(services[service_name])
269+
compose["services"][service_name] = self.service_from_json(service_name)
270270

271271
docker_compose_path = warnet.config_dir / "docker-compose.yml"
272272
try:
@@ -356,9 +356,12 @@ def add_services(self, tank: Tank, compose):
356356
"networks": [tank.network_name],
357357
}
358358

359+
def get_lnnode_hostname(self, index: int) -> str:
360+
return self.get_container_name(index, ServiceType.LIGHTNING)
361+
359362
def add_lnd_service(self, tank, compose):
360363
services = compose["services"]
361-
ln_container_name = self.get_container_name(tank.index, ServiceType.LIGHTNING)
364+
ln_container_name = self.get_lnnode_hostname(tank.index)
362365
ln_cb_container_name = self.get_container_name(tank.index, ServiceType.CIRCUITBREAKER)
363366
bitcoin_container_name = self.get_container_name(tank.index, ServiceType.BITCOIN)
364367
# These args are appended to the Dockerfile `ENTRYPOINT ["lnd"]`
@@ -448,21 +451,37 @@ def wait_for_healthy_tanks(self, warnet, timeout=60) -> bool:
448451

449452
return healthy
450453

451-
def service_from_json(self, obj: dict) -> dict:
454+
def get_service_container_name(self, service_name: str):
455+
return f"{self.network_name}_{services[service_name]['container_name_suffix']}"
456+
457+
def service_from_json(self, service_name: str) -> object:
458+
obj = services[service_name]
452459
volumes = obj.get("volumes", [])
453-
volumes += [f"{self.config_dir}" + filepath for filepath in obj.get("config_files", [])]
460+
for bind_mount in obj.get("config_files", []):
461+
volume_name, mount_path = bind_mount.split(":")
462+
hostpath = self.config_dir / volume_name
463+
# If it's starting off as an empty directory, create it now so
464+
# it has python-user permissions instead of docker-root
465+
if volume_name[-1] == "/":
466+
hostpath.mkdir(parents=True, exist_ok=True)
467+
volumes += [f"{hostpath}:{mount_path}"]
454468

455469
ports = []
456470
if "container_port" and "warnet_port" in obj:
457471
ports = [f"{obj['warnet_port']}:{obj['container_port']}"]
458472
return {
459473
"image": obj["image"],
460-
"container_name": f"{self.network_name}_{obj['container_name_suffix']}",
474+
"container_name": self.get_service_container_name(service_name),
461475
"ports": ports,
462476
"volumes": volumes,
463477
"privileged": obj.get("privileged", False),
464478
"devices": obj.get("devices", []),
465479
"command": obj.get("args", []),
466480
"environment": obj.get("environment", []),
481+
"restart": "on-failure",
467482
"networks": [self.network_name]
468483
}
484+
485+
def restart_service_container(self, service_name: str):
486+
container = self.client.containers.get(self.get_service_container_name(service_name))
487+
container.restart()

src/backends/kubernetes/kubernetes_backend.py

Lines changed: 111 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import base64
22
import logging
33
import re
4+
import subprocess
45
import time
56
from pathlib import Path
67
from typing import cast
@@ -15,6 +16,7 @@
1516
from kubernetes.dynamic import DynamicClient
1617
from kubernetes.dynamic.exceptions import ResourceNotFoundError
1718
from kubernetes.stream import stream
19+
from warnet.services import services
1820
from warnet.status import RunningStatus
1921
from warnet.tank import Tank
2022
from warnet.utils import parse_raw_messages
@@ -73,6 +75,10 @@ def down(self, warnet) -> bool:
7375

7476
self.remove_prometheus_service_monitors(warnet.tanks)
7577

78+
for service_name in warnet.services:
79+
if "k8s" in services[service_name]["backends"]:
80+
self.client.delete_namespaced_pod(f'{services[service_name]["container_name_suffix"]}-service', self.namespace)
81+
7682
return True
7783

7884
def get_file(self, tank_index: int, service: ServiceType, file_path: str):
@@ -436,12 +442,15 @@ def remove_prometheus_service_monitors(self, tanks):
436442
except ResourceNotFoundError:
437443
continue
438444

445+
def get_lnnode_hostname(self, index: int) -> str:
446+
return f"lightning-{index}.{self.namespace}"
447+
439448
def create_lnd_container(
440449
self, tank, bitcoind_service_name, volume_mounts
441450
) -> client.V1Container:
442451
# These args are appended to the Dockerfile `ENTRYPOINT ["lnd"]`
443452
bitcoind_rpc_host = f"{bitcoind_service_name}.{self.namespace}"
444-
lightning_dns = f"lightning-{tank.index}.{self.namespace}"
453+
lightning_dns = self.get_lnnode_hostname(tank.index)
445454
args = tank.lnnode.get_conf(lightning_dns, bitcoind_rpc_host)
446455
self.log.debug(f"Creating lightning container for tank {tank.index} using {args=:}")
447456
lightning_container = client.V1Container(
@@ -668,6 +677,10 @@ def deploy_pods(self, warnet):
668677
if self.check_logging_crds_installed():
669678
self.apply_prometheus_service_monitors(warnet.tanks)
670679

680+
for service_name in warnet.services:
681+
if "k8s" in services[service_name]["backends"]:
682+
self.service_from_json(services[service_name])
683+
671684
self.log.debug("Containers and services created. Configuring IP addresses")
672685
# now that the pods have had a second to create,
673686
# get the ips and set them on the tanks
@@ -700,5 +713,100 @@ def wait_for_healthy_tanks(self, warnet, timeout=30):
700713
"""
701714
pass
702715

703-
def service_from_json(self, obj: dict) -> dict:
704-
pass
716+
def service_from_json(self, obj):
717+
env = []
718+
for pair in obj.get("environment", []):
719+
name, value = pair.split("=")
720+
env.append(client.V1EnvVar(name=name, value=value))
721+
volume_mounts = []
722+
volumes = []
723+
for vol in obj.get("config_files", []):
724+
volume_name, mount_path = vol.split(":")
725+
volume_name = volume_name.replace("/", "")
726+
volume_mounts.append(client.V1VolumeMount(name=volume_name, mount_path=mount_path))
727+
volumes.append(client.V1Volume(name=volume_name, empty_dir=client.V1EmptyDirVolumeSource()))
728+
729+
service_container = client.V1Container(
730+
name=obj["container_name_suffix"],
731+
image=obj["image"],
732+
env=env,
733+
security_context=client.V1SecurityContext(
734+
privileged=True,
735+
capabilities=client.V1Capabilities(add=["NET_ADMIN", "NET_RAW"]),
736+
),
737+
volume_mounts=volume_mounts
738+
)
739+
sidecar_container = client.V1Container(
740+
name="sidecar",
741+
image="pinheadmz/sidecar:latest",
742+
volume_mounts=volume_mounts,
743+
ports=[client.V1ContainerPort(container_port=22)],
744+
)
745+
service_pod = client.V1Pod(
746+
api_version="v1",
747+
kind="Pod",
748+
metadata=client.V1ObjectMeta(
749+
name=obj["container_name_suffix"],
750+
namespace=self.namespace,
751+
labels={
752+
"app": obj["container_name_suffix"],
753+
"network": self.network_name,
754+
},
755+
),
756+
spec=client.V1PodSpec(
757+
restart_policy="OnFailure",
758+
containers=[service_container, sidecar_container],
759+
volumes=volumes,
760+
),
761+
)
762+
763+
# Do not ever change this variable name. xoxo, --Zip
764+
service_service = client.V1Service(
765+
api_version="v1",
766+
kind="Service",
767+
metadata=client.V1ObjectMeta(
768+
name=f'{obj["container_name_suffix"]}-service',
769+
labels={
770+
"app": obj["container_name_suffix"],
771+
"network": self.network_name,
772+
},
773+
),
774+
spec=client.V1ServiceSpec(
775+
selector={"app": obj["container_name_suffix"]},
776+
publish_not_ready_addresses=True,
777+
ports=[
778+
client.V1ServicePort(name="ssh", port=22, target_port=22),
779+
]
780+
)
781+
)
782+
783+
self.client.create_namespaced_pod(namespace=self.namespace, body=service_pod)
784+
self.client.create_namespaced_service(namespace=self.namespace, body=service_service)
785+
786+
def write_service_config(self, source_path: str, service_name: str, destination_path: str):
787+
obj = services[service_name]
788+
name = obj["container_name_suffix"]
789+
container_name = "sidecar"
790+
# Copy the archive from our local drive (Warnet RPC container/pod)
791+
# to the destination service's sidecar container via ssh
792+
self.log.info(f"Copying local {source_path} to remote {destination_path} for {service_name}")
793+
subprocess.run([
794+
"scp",
795+
"-o", "StrictHostKeyChecking=accept-new",
796+
source_path,
797+
f"root@{name}-service.{self.namespace}:/arbitrary_filename.tar"])
798+
self.log.info(f"Finished copying tarball for {service_name}, unpacking...")
799+
# Unpack the archive
800+
stream(
801+
self.client.connect_get_namespaced_pod_exec,
802+
name,
803+
self.namespace,
804+
container=container_name,
805+
command=["/bin/sh", "-c", f"tar -xf /arbitrary_filename.tar -C {destination_path}"],
806+
stderr=True,
807+
stdin=False,
808+
stdout=True,
809+
tty=False,
810+
_preload_content=False
811+
)
812+
self.log.info(f"Finished unpacking config data for {service_name} to {destination_path}")

src/cli/network.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -143,8 +143,11 @@ def connected(network: str):
143143

144144
@network.command()
145145
@click.option("--network", default="warnet", show_default=True)
146-
def export(network):
146+
@click.option("--activity", type=str)
147+
def export(network: str, activity: str):
147148
"""
148-
Export all [network] data for sim-ln to subdirectory
149+
Export all [network] data for a "simln" service running in a container
150+
on the network. Optionally add JSON string [activity] to simln config.
151+
Returns True on success.
149152
"""
150-
print(rpc_call("network_export", {"network": network}))
153+
print(rpc_call("network_export", {"network": network, "activity": activity}))

src/templates/Dockerfile_sidecar

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
FROM alpine:latest
2+
3+
RUN apk add openssh
4+
5+
RUN echo "root:" | chpasswd
6+
7+
RUN ssh-keygen -A
8+
9+
CMD ["/usr/sbin/sshd", "-D", \
10+
"-o", "PasswordAuthentication=yes", \
11+
"-o", "PermitEmptyPasswords=yes", \
12+
"-o", "PermitRootLogin=yes"]

src/templates/rpc/Dockerfile_rpc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ FROM python:3.11-slim
33

44
# Install procps, which includes pgrep
55
RUN apt-get update && \
6-
apt-get install -y procps && \
6+
apt-get install -y procps openssh-client && \
77
rm -rf /var/lib/apt/lists/*
88

99
# Set the working directory in the container

src/templates/rpc/Dockerfile_rpc_dev

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ FROM python:3.11-slim
33

44
# Install procps, which includes pgrep
55
RUN apt-get update && \
6-
apt-get install -y procps && \
6+
apt-get install -y procps openssh-client && \
77
rm -rf /var/lib/apt/lists/*
88

99
# Set the working directory in the container

src/warnet/lnnode.py

Lines changed: 29 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
import os
1+
import io
2+
import tarfile
23

34
from backends import BackendInterface, ServiceType
45
from warnet.utils import exponential_backoff, generate_ipv4_addr, handle_json
@@ -13,7 +14,8 @@
1314
"--bitcoin.active",
1415
"--bitcoin.regtest",
1516
"--bitcoin.node=bitcoind",
16-
"--maxpendingchannels=64"
17+
"--maxpendingchannels=64",
18+
"--trickledelay=1"
1719
])
1820

1921
class LNNode:
@@ -114,30 +116,38 @@ def generate_cli_command(self, command: list[str]):
114116
raise Exception(f"Unsupported LN implementation: {self.impl}")
115117
return cmd
116118

117-
def export(self, config, subdir):
118-
container_name = self.backend.get_container_name(self.tank.index, ServiceType.LIGHTNING)
119-
macaroon_filename = f"{container_name}_admin.macaroon"
120-
cert_filename = f"{container_name}_tls.cert"
121-
macaroon_path = os.path.join(subdir, macaroon_filename)
122-
cert_path = os.path.join(subdir, cert_filename)
119+
def export(self, config: object, tar_file):
120+
# Retrieve the credentials
123121
macaroon = self.backend.get_file(
124122
self.tank.index,
125123
ServiceType.LIGHTNING,
126124
"/root/.lnd/data/chain/bitcoin/regtest/admin.macaroon",
127125
)
128-
cert = self.backend.get_file(self.tank.index, ServiceType.LIGHTNING, "/root/.lnd/tls.cert")
129-
130-
with open(macaroon_path, "wb") as f:
131-
f.write(macaroon)
132-
133-
with open(cert_path, "wb") as f:
134-
f.write(cert)
126+
cert = self.backend.get_file(
127+
self.tank.index,
128+
ServiceType.LIGHTNING,
129+
"/root/.lnd/tls.cert"
130+
)
131+
name = f"ln-{self.tank.index}"
132+
macaroon_filename = f"{name}_admin.macaroon"
133+
cert_filename = f"{name}_tls.cert"
134+
host = self.backend.get_lnnode_hostname(self.tank.index)
135+
136+
# Add the files to the in-memory tar archive
137+
tarinfo1 = tarfile.TarInfo(name=macaroon_filename)
138+
tarinfo1.size = len(macaroon)
139+
fileobj1 = io.BytesIO(macaroon)
140+
tar_file.addfile(tarinfo=tarinfo1, fileobj=fileobj1)
141+
tarinfo2 = tarfile.TarInfo(name=cert_filename)
142+
tarinfo2.size = len(cert)
143+
fileobj2 = io.BytesIO(cert)
144+
tar_file.addfile(tarinfo=tarinfo2, fileobj=fileobj2)
135145

136146
config["nodes"].append(
137147
{
138-
"id": container_name,
139-
"address": f"https://{self.ipv4}:{self.rpc_port}",
140-
"macaroon": macaroon_path,
141-
"cert": cert_path,
148+
"id": name,
149+
"address": f"https://{host}:{self.rpc_port}",
150+
"macaroon": f"/simln/{macaroon_filename}",
151+
"cert": f"/simln/{cert_filename}",
142152
}
143153
)

0 commit comments

Comments
 (0)