Skip to content

add sim-ln service #324

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 10 commits into from
Apr 5, 2024
6 changes: 6 additions & 0 deletions justfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@ set shell := ["bash", "-uc"]
default:
just --list

cluster:
kubectl apply -f src/templates/rpc/namespace.yaml
kubectl apply -f src/templates/rpc/rbac-config.yaml
kubectl apply -f src/templates/rpc/warnet-rpc-service.yaml
kubectl apply -f src/templates/rpc/warnet-rpc-statefulset.yaml

# Setup and start the RPC in dev mode with minikube
start:
#!/usr/bin/env bash
Expand Down
9 changes: 4 additions & 5 deletions src/backends/backend_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,16 +110,15 @@ def get_tank_ipv4(self, index: int) -> str:
raise NotImplementedError("This method should be overridden by child class")

@abstractmethod
def wait_for_healthy_tanks(self, warnet, timeout=60) -> bool:
def get_lnnode_hostname(self, index: int) -> str:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A nice (future) improvement could be to combine this with the get_tank_ipv4 function above, and have it return the hostname for tanks or lnnodes

"""
Wait for healthy status on all bitcoind nodes
Get the hostname assigned to a lnnode attached to a tank from the backend
"""
raise NotImplementedError("This method should be overridden by child class")


@abstractmethod
def service_from_json(self, json: dict) -> dict:
def wait_for_healthy_tanks(self, warnet, timeout=60) -> bool:
"""
Create a single container from a JSON object of settings
Wait for healthy status on all bitcoind nodes
"""
raise NotImplementedError("This method should be overridden by child class")
29 changes: 24 additions & 5 deletions src/backends/compose/compose_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ def _write_docker_compose(self, warnet):
# Initialize services and add them to the compose
for service_name in warnet.services:
if "compose" in services[service_name]["backends"]:
compose["services"][service_name] = self.service_from_json(services[service_name])
compose["services"][service_name] = self.service_from_json(service_name)

docker_compose_path = warnet.config_dir / "docker-compose.yml"
try:
Expand Down Expand Up @@ -356,9 +356,12 @@ def add_services(self, tank: Tank, compose):
"networks": [tank.network_name],
}

def get_lnnode_hostname(self, index: int) -> str:
return self.get_container_name(index, ServiceType.LIGHTNING)

def add_lnd_service(self, tank, compose):
services = compose["services"]
ln_container_name = self.get_container_name(tank.index, ServiceType.LIGHTNING)
ln_container_name = self.get_lnnode_hostname(tank.index)
ln_cb_container_name = self.get_container_name(tank.index, ServiceType.CIRCUITBREAKER)
bitcoin_container_name = self.get_container_name(tank.index, ServiceType.BITCOIN)
# These args are appended to the Dockerfile `ENTRYPOINT ["lnd"]`
Expand Down Expand Up @@ -448,21 +451,37 @@ def wait_for_healthy_tanks(self, warnet, timeout=60) -> bool:

return healthy

def service_from_json(self, obj: dict) -> dict:
def get_service_container_name(self, service_name: str):
return f"{self.network_name}_{services[service_name]['container_name_suffix']}"

def service_from_json(self, service_name: str) -> object:
obj = services[service_name]
volumes = obj.get("volumes", [])
volumes += [f"{self.config_dir}" + filepath for filepath in obj.get("config_files", [])]
for bind_mount in obj.get("config_files", []):
volume_name, mount_path = bind_mount.split(":")
hostpath = self.config_dir / volume_name
# If it's starting off as an empty directory, create it now so
# it has python-user permissions instead of docker-root
if volume_name[-1] == "/":
hostpath.mkdir(parents=True, exist_ok=True)
volumes += [f"{hostpath}:{mount_path}"]

ports = []
if "container_port" and "warnet_port" in obj:
ports = [f"{obj['warnet_port']}:{obj['container_port']}"]
return {
"image": obj["image"],
"container_name": f"{self.network_name}_{obj['container_name_suffix']}",
"container_name": self.get_service_container_name(service_name),
"ports": ports,
"volumes": volumes,
"privileged": obj.get("privileged", False),
"devices": obj.get("devices", []),
"command": obj.get("args", []),
"environment": obj.get("environment", []),
"restart": "on-failure",
"networks": [self.network_name]
}

def restart_service_container(self, service_name: str):
container = self.client.containers.get(self.get_service_container_name(service_name))
container.restart()
114 changes: 111 additions & 3 deletions src/backends/kubernetes/kubernetes_backend.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import base64
import logging
import re
import subprocess
import time
from pathlib import Path
from typing import cast
Expand All @@ -15,6 +16,7 @@
from kubernetes.dynamic import DynamicClient
from kubernetes.dynamic.exceptions import ResourceNotFoundError
from kubernetes.stream import stream
from warnet.services import services
from warnet.status import RunningStatus
from warnet.tank import Tank
from warnet.utils import parse_raw_messages
Expand Down Expand Up @@ -73,6 +75,10 @@ def down(self, warnet) -> bool:

self.remove_prometheus_service_monitors(warnet.tanks)

for service_name in warnet.services:
if "k8s" in services[service_name]["backends"]:
self.client.delete_namespaced_pod(f'{services[service_name]["container_name_suffix"]}-service', self.namespace)

return True

def get_file(self, tank_index: int, service: ServiceType, file_path: str):
Expand Down Expand Up @@ -436,12 +442,15 @@ def remove_prometheus_service_monitors(self, tanks):
except ResourceNotFoundError:
continue

def get_lnnode_hostname(self, index: int) -> str:
return f"lightning-{index}.{self.namespace}"

def create_lnd_container(
self, tank, bitcoind_service_name, volume_mounts
) -> client.V1Container:
# These args are appended to the Dockerfile `ENTRYPOINT ["lnd"]`
bitcoind_rpc_host = f"{bitcoind_service_name}.{self.namespace}"
lightning_dns = f"lightning-{tank.index}.{self.namespace}"
lightning_dns = self.get_lnnode_hostname(tank.index)
args = tank.lnnode.get_conf(lightning_dns, bitcoind_rpc_host)
self.log.debug(f"Creating lightning container for tank {tank.index} using {args=:}")
lightning_container = client.V1Container(
Expand Down Expand Up @@ -668,6 +677,10 @@ def deploy_pods(self, warnet):
if self.check_logging_crds_installed():
self.apply_prometheus_service_monitors(warnet.tanks)

for service_name in warnet.services:
if "k8s" in services[service_name]["backends"]:
self.service_from_json(services[service_name])

self.log.debug("Containers and services created. Configuring IP addresses")
# now that the pods have had a second to create,
# get the ips and set them on the tanks
Expand Down Expand Up @@ -700,5 +713,100 @@ def wait_for_healthy_tanks(self, warnet, timeout=30):
"""
pass

def service_from_json(self, obj: dict) -> dict:
pass
def service_from_json(self, obj):
env = []
for pair in obj.get("environment", []):
name, value = pair.split("=")
env.append(client.V1EnvVar(name=name, value=value))
volume_mounts = []
volumes = []
for vol in obj.get("config_files", []):
volume_name, mount_path = vol.split(":")
volume_name = volume_name.replace("/", "")
volume_mounts.append(client.V1VolumeMount(name=volume_name, mount_path=mount_path))
volumes.append(client.V1Volume(name=volume_name, empty_dir=client.V1EmptyDirVolumeSource()))

service_container = client.V1Container(
name=obj["container_name_suffix"],
image=obj["image"],
env=env,
security_context=client.V1SecurityContext(
privileged=True,
capabilities=client.V1Capabilities(add=["NET_ADMIN", "NET_RAW"]),
),
volume_mounts=volume_mounts
)
sidecar_container = client.V1Container(
name="sidecar",
image="pinheadmz/sidecar:latest",
volume_mounts=volume_mounts,
ports=[client.V1ContainerPort(container_port=22)],
)
service_pod = client.V1Pod(
api_version="v1",
kind="Pod",
metadata=client.V1ObjectMeta(
name=obj["container_name_suffix"],
namespace=self.namespace,
labels={
"app": obj["container_name_suffix"],
"network": self.network_name,
},
),
spec=client.V1PodSpec(
restart_policy="OnFailure",
containers=[service_container, sidecar_container],
volumes=volumes,
),
)

# Do not ever change this variable name. xoxo, --Zip
service_service = client.V1Service(
api_version="v1",
kind="Service",
metadata=client.V1ObjectMeta(
name=f'{obj["container_name_suffix"]}-service',
labels={
"app": obj["container_name_suffix"],
"network": self.network_name,
},
),
spec=client.V1ServiceSpec(
selector={"app": obj["container_name_suffix"]},
publish_not_ready_addresses=True,
ports=[
client.V1ServicePort(name="ssh", port=22, target_port=22),
]
)
)

self.client.create_namespaced_pod(namespace=self.namespace, body=service_pod)
self.client.create_namespaced_service(namespace=self.namespace, body=service_service)

def write_service_config(self, source_path: str, service_name: str, destination_path: str):
obj = services[service_name]
name = obj["container_name_suffix"]
container_name = "sidecar"
# Copy the archive from our local drive (Warnet RPC container/pod)
# to the destination service's sidecar container via ssh
self.log.info(f"Copying local {source_path} to remote {destination_path} for {service_name}")
subprocess.run([
"scp",
"-o", "StrictHostKeyChecking=accept-new",
source_path,
f"root@{name}-service.{self.namespace}:/arbitrary_filename.tar"])
self.log.info(f"Finished copying tarball for {service_name}, unpacking...")
# Unpack the archive
stream(
self.client.connect_get_namespaced_pod_exec,
name,
self.namespace,
container=container_name,
command=["/bin/sh", "-c", f"tar -xf /arbitrary_filename.tar -C {destination_path}"],
stderr=True,
stdin=False,
stdout=True,
tty=False,
_preload_content=False
)
self.log.info(f"Finished unpacking config data for {service_name} to {destination_path}")
9 changes: 6 additions & 3 deletions src/cli/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,8 +143,11 @@ def connected(network: str):

@network.command()
@click.option("--network", default="warnet", show_default=True)
def export(network):
@click.option("--activity", type=str)
def export(network: str, activity: str):
"""
Export all [network] data for sim-ln to subdirectory
Export all [network] data for a "simln" service running in a container
on the network. Optionally add JSON string [activity] to simln config.
Returns True on success.
"""
print(rpc_call("network_export", {"network": network}))
print(rpc_call("network_export", {"network": network, "activity": activity}))
12 changes: 12 additions & 0 deletions src/templates/Dockerfile_sidecar
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
FROM alpine:latest

RUN apk add openssh

RUN echo "root:" | chpasswd

RUN ssh-keygen -A

CMD ["/usr/sbin/sshd", "-D", \
"-o", "PasswordAuthentication=yes", \
"-o", "PermitEmptyPasswords=yes", \
"-o", "PermitRootLogin=yes"]
2 changes: 1 addition & 1 deletion src/templates/rpc/Dockerfile_rpc
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ FROM python:3.11-slim

# Install procps, which includes pgrep
RUN apt-get update && \
apt-get install -y procps && \
apt-get install -y procps openssh-client && \
rm -rf /var/lib/apt/lists/*

# Set the working directory in the container
Expand Down
2 changes: 1 addition & 1 deletion src/templates/rpc/Dockerfile_rpc_dev
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ FROM python:3.11-slim

# Install procps, which includes pgrep
RUN apt-get update && \
apt-get install -y procps && \
apt-get install -y procps openssh-client && \
rm -rf /var/lib/apt/lists/*

# Set the working directory in the container
Expand Down
48 changes: 29 additions & 19 deletions src/warnet/lnnode.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import io
import tarfile

from backends import BackendInterface, ServiceType
from warnet.utils import exponential_backoff, generate_ipv4_addr, handle_json
Expand All @@ -13,7 +14,8 @@
"--bitcoin.active",
"--bitcoin.regtest",
"--bitcoin.node=bitcoind",
"--maxpendingchannels=64"
"--maxpendingchannels=64",
"--trickledelay=1"
])

class LNNode:
Expand Down Expand Up @@ -114,30 +116,38 @@ def generate_cli_command(self, command: list[str]):
raise Exception(f"Unsupported LN implementation: {self.impl}")
return cmd

def export(self, config, subdir):
container_name = self.backend.get_container_name(self.tank.index, ServiceType.LIGHTNING)
macaroon_filename = f"{container_name}_admin.macaroon"
cert_filename = f"{container_name}_tls.cert"
macaroon_path = os.path.join(subdir, macaroon_filename)
cert_path = os.path.join(subdir, cert_filename)
def export(self, config: object, tar_file):
# Retrieve the credentials
macaroon = self.backend.get_file(
self.tank.index,
ServiceType.LIGHTNING,
"/root/.lnd/data/chain/bitcoin/regtest/admin.macaroon",
)
cert = self.backend.get_file(self.tank.index, ServiceType.LIGHTNING, "/root/.lnd/tls.cert")

with open(macaroon_path, "wb") as f:
f.write(macaroon)

with open(cert_path, "wb") as f:
f.write(cert)
cert = self.backend.get_file(
self.tank.index,
ServiceType.LIGHTNING,
"/root/.lnd/tls.cert"
)
name = f"ln-{self.tank.index}"
macaroon_filename = f"{name}_admin.macaroon"
cert_filename = f"{name}_tls.cert"
host = self.backend.get_lnnode_hostname(self.tank.index)

# Add the files to the in-memory tar archive
tarinfo1 = tarfile.TarInfo(name=macaroon_filename)
tarinfo1.size = len(macaroon)
fileobj1 = io.BytesIO(macaroon)
tar_file.addfile(tarinfo=tarinfo1, fileobj=fileobj1)
tarinfo2 = tarfile.TarInfo(name=cert_filename)
tarinfo2.size = len(cert)
fileobj2 = io.BytesIO(cert)
tar_file.addfile(tarinfo=tarinfo2, fileobj=fileobj2)

config["nodes"].append(
{
"id": container_name,
"address": f"https://{self.ipv4}:{self.rpc_port}",
"macaroon": macaroon_path,
"cert": cert_path,
"id": name,
"address": f"https://{host}:{self.rpc_port}",
"macaroon": f"/simln/{macaroon_filename}",
"cert": f"/simln/{cert_filename}",
}
)
Loading
Loading