From 64a55cc7415e5c401c26a9d920f2ce6692740680 Mon Sep 17 00:00:00 2001 From: Carlos Brandt Date: Thu, 23 Jan 2025 10:00:57 +0100 Subject: [PATCH 1/4] Split Cookbook document into three, for scenarios --- docs/docs/Cookbook.mdx | 363 ----------------------------- docs/docs/cookbook/1-edge.mdx | 292 +++++++++++++++++++++++ docs/docs/cookbook/2-incluster.mdx | 97 ++++++++ docs/docs/cookbook/3-tunneled.mdx | 73 ++++++ docs/docs/cookbook/_category_.json | 8 + 5 files changed, 470 insertions(+), 363 deletions(-) delete mode 100644 docs/docs/Cookbook.mdx create mode 100644 docs/docs/cookbook/1-edge.mdx create mode 100644 docs/docs/cookbook/2-incluster.mdx create mode 100644 docs/docs/cookbook/3-tunneled.mdx create mode 100644 docs/docs/cookbook/_category_.json diff --git a/docs/docs/Cookbook.mdx b/docs/docs/Cookbook.mdx deleted file mode 100644 index 1bdece22..00000000 --- a/docs/docs/Cookbook.mdx +++ /dev/null @@ -1,363 +0,0 @@ ---- -sidebar_position: 3 ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import ThemedImage from '@theme/ThemedImage'; -import useBaseUrl from '@docusaurus/useBaseUrl'; - - -# Cookbook - -These are practical recipes for different deployment scenarios. - -Select here the tab with the scenario you want deploy: - - - - - - - - - - - - - -Select here the featured plugin you want to try: - - - - Offload your pods to a remote machine with Docker engine available - - - Offload your pods to an HPC SLURM based batch system - - - Offload your pods to a remote Kubernetes cluster: COMING SOON - For test instructions contact us! - - - -There are more 3rd-party plugins developed that you can get inspired by or even use out of the box. You can find some ref in the [quick start section](guides/deploy-interlink#attach-your-favorite-plugin-or-develop-one) - -## Install interLink - -### Deploy Remote components - -In general, starting from the deployment of the remote components is adviced. Since the kubernetes virtual node won't reach the `Ready` status until all the stack is successfully deployed. - -#### Interlink API server - - - - __For this deployment mode the remote host has to allow the kubernetes cluster to connect to the Oauth2 proxy service port (30443 if you use the automatic script for installation)__ - - You first need to initialize an OIDC client with you Identity Provider (IdP). - - Since any OIDC provider working with [OAuth2 Proxy](https://oauth2-proxy.github.io/oauth2-proxy/) tool will do the work, we are going to put the configuration for a generic OIDC identity provider in this cookbook. Nevertheless you can find more detailed on dedicated pages with instructions ready for [GitHub](./guides/deploy-interlink#create-an-oauth-github-app), [EGI checkin](./guides/oidc-IAM), [INFN IAM](./guides/oidc-IAM). - - First of all download the [latest release](https://github.com/interTwin-eu/interLink/releases) of the interLink installer: - - ```bash - export VERSION=$(curl -s https://api.github.com/repos/intertwin-eu/interlink/releases/latest | jq -r .name) - wget -O interlink-installer https://github.com/interTwin-eu/interLink/releases/download/$VERSION/interlink-installer_Linux_x86_64 - chmod +x interlink-installer - ``` - - Create a template configuration with the init option: - - ```bash - mkdir -p interlink - ./interlink-installer --init --config ./interlink/.installer.yaml - ``` - - The configuration file should be filled as followed. This is the case where the `my-node` will contact an edge service that will be listening on `PUBLIC_IP` and `API_PORT` authenticating requests from an OIDC provider `https://my_oidc_idp.com`: - - ```bash title="./interlink/.installer.yaml" - interlink_ip: PUBLIC_IP - interlink_port: API_PORT - interlink_version: 0.3.3 - kubelet_node_name: my-node - kubernetes_namespace: interlink - node_limits: - cpu: "1000" - # MEMORY in GB - memory: 25600 - pods: "100" - oauth: - provider: oidc - issuer: https://my_oidc_idp.com/ - scopes: - - "openid" - - "email" - - "offline_access" - - "profile" - audience: interlink - grant_type: authorization_code - group_claim: groups - group: "my_vk_allowed_group" - token_url: "https://my_oidc_idp.com/token" - device_code_url: "https://my_oidc_idp/auth/device" - client_id: "oidc-client-xx" - client_secret: "xxxxxx" - insecure_http: true - ``` - - Now you are ready to start the OIDC authentication flow to generate all your manifests and configuration files for the interLink components. To do so, just execute the installer: - - ```bash - ./interlink-installer --config ./interlink/.installer.yaml --output-dir ./interlink/manifests/ - ``` - - Install Oauth2-Proxy and interLink API server services and configurations with: - - ```bash - chmod +x ./interlink/manifests/interlink-remote.sh - ./interlink/manifests/interlink-remote.sh install - ``` - - Then start the services with: - - ```bash - ./interlink/manifests/interlink-remote.sh start - ``` - - With `stop` command you can stop the service. By default logs are store in `~/.interlink/logs`, checkout there for any error before moving to the next step. - - __N.B.__ you can look the oauth2_proxy configuration parameters looking into the `interlink-remote.sh` script. - - __N.B.__ logs (expecially if in verbose mode) can become pretty huge, consider to implement your favorite rotation routine for all the logs in `~/.interlink/logs/` - - - Go directly to ["Test and debugging tips"](Cookbook#test-and-debug). The selected scenario does not expect you to do anything here. - - - COMING SOON... - - - - -#### Plugin service - - - - - - - Create utility folders: - - ```bash - mkdir -p $HOME/.interlink/logs - mkdir -p $HOME/.interlink/bin - mkdir -p $HOME/.interlink/config - ``` - - Create a configuration file: - - ```bash title="$HOME/.interlink/config/plugin-config.yaml" - ## Multi user host - Socket: "unix:///home/myusername/.plugin.sock" - InterlinkPort: "0" - SidecarPort: "0" - - CommandPrefix: "" - DataRootFolder: "/home/myusername/.interlink/jobs/" - BashPath: /bin/bash - VerboseLogging: false - ErrorsOnlyLogging: false - ``` - - __N.B.__ Depending on wheter you edge is single user or not, you should know by previous steps which section to uncomment here. - - More on configuration options at [official repo](https://github.com/interTwin-eu/interlink-docker-plugin/blob/main/README.md) - - - Download the [latest release](https://github.com/interTwin-eu/interlink-docker-plugin/releases) binary in `$HOME/.interlink/bin/plugin` for either GPU host or CPU host (tags ending with `no-GPU`) - - Start the plugins passing the configuration that you have just created: - - ```bash - export INTERLINKCONFIGPATH=$PWD/plugin-config.yaml - $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & - echo $! > $HOME/.interlink/plugin.pid - ``` - - - Check the logs in `$HOME/.interlink/logs/plugin.log`. - - To kill and restart the process is enough: - - ```bash - # kill - kill $(cat $HOME/.interlink/plugin.pid) - - # restart - export INTERLINKCONFIGPATH=$PWD/plugin-config.yaml - $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & - echo $! > $HOME/.interlink/plugin.pid - - Almost there! Now it's time to add this virtual node into the Kubernetes cluster! - - - - Create utility folders - - ```bash - mkdir -p $HOME/.interlink/logs - mkdir -p $HOME/.interlink/bin - mkdir -p $HOME/.interlink/config - ``` - - - Create a configuration file (__remember to substitute `/home/username/` with your actual home path__): - - ```bash title="./interlink/manifests/plugin-config.yaml" - Socket: "unix:///home/myusername/.plugin.sock" - InterlinkPort: "0" - SidecarPort: "0" - - CommandPrefix: "" - DataRootFolder: "/home/myusername/.interlink/jobs/" - BashPath: /bin/bash - VerboseLogging: false - ErrorsOnlyLogging: false - SbatchPath: "/usr/bin/sbatch" - ScancelPath: "/usr/bin/scancel" - SqueuePath: "/usr/bin/squeue" - SingularityPrefix: "" - ``` - - - More on configuration options at [official repo](https://github.com/interTwin-eu/interlink-slurm-plugin/blob/main/README.md) - - - Download the [latest release](https://github.com/interTwin-eu/interlink-slurm-plugin/releases) binary in `$HOME/.interlink/bin/plugin` - - ```bash - export PLUGIN_VERSION=$(curl -s https://api.github.com/repos/intertwin-eu/interlink-slurm-plugin/releases/latest | jq -r .name) - wget -O $HOME/.interlink/bin/plugin https://github.com/interTwin-eu/interlink-slurm-plugin/releases/download/${PLUGIN_VERSION}/interlink-sidecar-slurm_Linux_x86_64 - ``` - - - Start the plugins passing the configuration that you have just created: - - ```bash - export SLURMCONFIGPATH=$PWD/interlink/manifests/plugin-config.yaml - $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & - echo $! > $HOME/.interlink/plugin.pid - ``` - - - Check the logs in `$HOME/.interlink/logs/plugin.log`. - - To kill and restart the process is enough: - - ```bash - # kill - kill $(cat $HOME/.interlink/plugin.pid) - - # restart - export SLURMCONFIGPATH=$PWD/interlink/manifests/plugin-config.yaml - $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & - echo $! > $HOME/.interlink/plugin.pid - - Almost there! Now it's time to add this virtual node into the Kubernetes cluster! - - - __KUBERNTES PLUGIN COMING SOOON... CONTACT US FOR TEST INSTRUCTIONS__ - - - - - - Go directly to ["Test and debugging tips"](Cookbook#test-and-debug). The selected scenario does not expect you to do anything here. - - - - COMING SOON... - - - - - - -#### Test interLink stack health - -interLink comes with a call that can be used to monitor the overall status of both interlink server and plugins, at once. - -``` -curl -v --unix-socket ${HOME}/.interlink.sock http://unix/pinglink -``` - -This call will return the status of the system and its readiness to submit jobs. - - -### Deploy Kubernetes components - -The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). Depending on the scenario you selected, there might be additional operations to be done. - - - - You can now install the helm chart with the preconfigured (by the installer script) helm values in `./interlink/manifests/values.yaml` - - ```bash - helm upgrade --install \ - --create-namespace \ - -n interlink \ - my-node \ - oci://ghcr.io/intertwin-eu/interlink-helm-chart/interlink \ - --values ./interlink/manifests/values.yaml - ``` - - You can fix the [version of the chart](https://github.com/interTwin-eu/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18) by using the `--version` option. - - - - - Create an helm values file: - - ```yaml title="values.yaml" - nodeName: interlink-with-socket - - plugin: - enabled: true - image: "plugin docker image here" - command: ["/bin/bash", "-c"] - args: ["/app/plugin"] - config: | - your plugin - configuration - goes here!!! - socket: unix:///var/run/plugin.sock - - interlink: - enabled: true - socket: unix:///var/run/interlink.sock - ``` - - Eventually deploy the latest release of the official [helm chart](https://github.com/interTwin-eu/interlink-helm-chart): - - ```bash - helm upgrade --install --create-namespace -n interlink my-virtual-node oci://ghcr.io/intertwin-eu/interlink-helm-chart/interlink --values ./values.yaml - ``` - - You can fix the [version of the chart](https://github.com/interTwin-eu/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18) by using the `--version` option. - - - COMING SOON... - - - -Whenever you see the node ready, you are good to go! - -To start debugging in case of problems we suggest starting from the pod containers logs! - -## Test the setup - -Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). - - diff --git a/docs/docs/cookbook/1-edge.mdx b/docs/docs/cookbook/1-edge.mdx new file mode 100644 index 00000000..9f80a977 --- /dev/null +++ b/docs/docs/cookbook/1-edge.mdx @@ -0,0 +1,292 @@ +--- +sidebar_position: 3 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import ThemedImage from '@theme/ThemedImage'; +import useBaseUrl from '@docusaurus/useBaseUrl'; + + +# Edge node deployment + + + + + +## Install interLink + +### Deploy Remote components + +In general, starting from the deployment of the remote components is adviced. Since the kubernetes virtual node won't reach the `Ready` +status until all the stack is successfully deployed. + +#### Interlink API server + +__For this deployment mode the remote host has to allow the kubernetes cluster to connect to the Oauth2 proxy service port +(30443 if you use the automatic script for installation)__ + +You first need to initialize an OIDC client with you Identity Provider (IdP). + +Since any OIDC provider working with [OAuth2 Proxy](https://oauth2-proxy.github.io/oauth2-proxy/) tool will do the work, we are going +to put the configuration for a generic OIDC identity provider in this cookbook. Nevertheless you can find more detailed on dedicated +pages with instructions ready for [GitHub](./guides/deploy-interlink#create-an-oauth-github-app), [EGI checkin](./guides/oidc-IAM), +[INFN IAM](./guides/oidc-IAM). + +First of all download the [latest release](https://github.com/interTwin-eu/interLink/releases) of the interLink installer: + +```bash +export VERSION=$(curl -s https://api.github.com/repos/intertwin-eu/interlink/releases/latest | jq -r .name) +wget -O interlink-installer https://github.com/interTwin-eu/interLink/releases/download/$VERSION/interlink-installer_Linux_x86_64 +chmod +x interlink-installer +``` + +Create a template configuration with the init option: + +```bash +mkdir -p interlink +./interlink-installer --init --config ./interlink/.installer.yaml +``` + +The configuration file should be filled as followed. This is the case where the `my-node` will contact an edge service that will be +listening on `PUBLIC_IP` and `API_PORT` authenticating requests from an OIDC provider `https://my_oidc_idp.com`: + +```bash title="./interlink/.installer.yaml" +interlink_ip: PUBLIC_IP +interlink_port: API_PORT +interlink_version: 0.3.3 +kubelet_node_name: my-node +kubernetes_namespace: interlink +node_limits: + cpu: "1000" + # MEMORY in GB + memory: 25600 + pods: "100" +oauth: + provider: oidc + issuer: https://my_oidc_idp.com/ + scopes: + - "openid" + - "email" + - "offline_access" + - "profile" + audience: interlink + grant_type: authorization_code + group_claim: groups + group: "my_vk_allowed_group" + token_url: "https://my_oidc_idp.com/token" + device_code_url: "https://my_oidc_idp/auth/device" + client_id: "oidc-client-xx" + client_secret: "xxxxxx" +insecure_http: true +``` + +Now you are ready to start the OIDC authentication flow to generate all your manifests and configuration files for the interLink components. To do so, just execute the installer: + +```bash +./interlink-installer --config ./interlink/.installer.yaml --output-dir ./interlink/manifests/ +``` + +Install Oauth2-Proxy and interLink API server services and configurations with: + +```bash +chmod +x ./interlink/manifests/interlink-remote.sh +./interlink/manifests/interlink-remote.sh install +``` + +Then start the services with: + +```bash +./interlink/manifests/interlink-remote.sh start +``` + +With `stop` command you can stop the service. By default logs are store in `~/.interlink/logs`, checkout there for any error before moving to the next step. + +__N.B.__ you can look the oauth2_proxy configuration parameters looking into the `interlink-remote.sh` script. + +__N.B.__ logs (expecially if in verbose mode) can become pretty huge, consider to implement your favorite rotation routine for all the logs in `~/.interlink/logs/` + + +#### Plugin service + +Select here the featured plugin you want to try: + + + + _Offload your pods to a remote machine with Docker engine available._ + + - Create utility folders: + + ```bash + mkdir -p $HOME/.interlink/logs + mkdir -p $HOME/.interlink/bin + mkdir -p $HOME/.interlink/config + ``` + - Create a configuration file: + + ```bash title="$HOME/.interlink/config/plugin-config.yaml" + ## Multi user host + Socket: "unix:///home/myusername/.plugin.sock" + InterlinkPort: "0" + SidecarPort: "0" + + CommandPrefix: "" + DataRootFolder: "/home/myusername/.interlink/jobs/" + BashPath: /bin/bash + VerboseLogging: false + ErrorsOnlyLogging: false + ``` + - __N.B.__ Depending on wheter you edge is single user or not, you should know by previous steps which section to uncomment here. + - More on configuration options at [official repo](https://github.com/interTwin-eu/interlink-docker-plugin/blob/main/README.md) + + - Download the [latest release](https://github.com/interTwin-eu/interlink-docker-plugin/releases) binary in + `$HOME/.interlink/bin/plugin` for either GPU host or CPU host (tags ending with `no-GPU`) + - Start the plugins passing the configuration that you have just created: + + ```bash + export INTERLINKCONFIGPATH=$PWD/plugin-config.yaml + $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & + echo $! > $HOME/.interlink/plugin.pid + ``` + + - Check the logs in `$HOME/.interlink/logs/plugin.log`. + - To kill and restart the process is enough: + + ```bash + # kill + kill $(cat $HOME/.interlink/plugin.pid) + + # restart + export INTERLINKCONFIGPATH=$PWD/plugin-config.yaml + $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & + echo $! > $HOME/.interlink/plugin.pid + + Almost there! Now it's time to add this virtual node into the Kubernetes cluster! + + + + + _Offload your pods to an HPC SLURM based batch system._ + + - Create utility folders + + ```bash + mkdir -p $HOME/.interlink/logs + mkdir -p $HOME/.interlink/bin + mkdir -p $HOME/.interlink/config + ``` + + - Create a configuration file (__remember to substitute `/home/username/` with your actual home path__): + + ```bash title="./interlink/manifests/plugin-config.yaml" + Socket: "unix:///home/myusername/.plugin.sock" + InterlinkPort: "0" + SidecarPort: "0" + + CommandPrefix: "" + DataRootFolder: "/home/myusername/.interlink/jobs/" + BashPath: /bin/bash + VerboseLogging: false + ErrorsOnlyLogging: false + SbatchPath: "/usr/bin/sbatch" + ScancelPath: "/usr/bin/scancel" + SqueuePath: "/usr/bin/squeue" + SingularityPrefix: "" + ``` + + - More on configuration options at [official repo](https://github.com/interTwin-eu/interlink-slurm-plugin/blob/main/README.md) + + - Download the [latest release](https://github.com/interTwin-eu/interlink-slurm-plugin/releases) binary in + `$HOME/.interlink/bin/plugin` + + ```bash + export PLUGIN_VERSION=$(curl -s https://api.github.com/repos/intertwin-eu/interlink-slurm-plugin/releases/latest | jq -r .name) + wget -O $HOME/.interlink/bin/plugin https://github.com/interTwin-eu/interlink-slurm-plugin/releases/download/${PLUGIN_VERSION}/interlink-sidecar-slurm_Linux_x86_64 + ``` + + - Start the plugins passing the configuration that you have just created: + + ```bash + export SLURMCONFIGPATH=$PWD/interlink/manifests/plugin-config.yaml + $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & + echo $! > $HOME/.interlink/plugin.pid + ``` + + - Check the logs in `$HOME/.interlink/logs/plugin.log`. + - To kill and restart the process is enough: + + ```bash + # kill + kill $(cat $HOME/.interlink/plugin.pid) + + # restart + export SLURMCONFIGPATH=$PWD/interlink/manifests/plugin-config.yaml + $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & + echo $! > $HOME/.interlink/plugin.pid + + Almost there! Now it's time to add this virtual node into the Kubernetes cluster! + + + + + _Offload your pods to a remote Kubernetes cluster._ + +:::note + KUBERNETES PLUGIN IS COMING SOON! + For test instructions contact us! +::: + + + + + +##### 3rd-party plugins + +There are more 3rd-party plugins developed that you can get inspired by or even use out of the box. +You can find some ref in the [quick start section](guides/deploy-interlink#attach-your-favorite-plugin-or-develop-one) + + +#### Test interLink stack health + +interLink comes with a call that can be used to monitor the overall status of both interlink server and plugins, at once. + +``` +curl -v --unix-socket ${HOME}/.interlink.sock http://unix/pinglink +``` + +This call will return the status of the system and its readiness to submit jobs. + + +### Deploy Kubernetes components + +The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). +Depending on the scenario you selected, there might be additional operations to be done. + +You can now install the helm chart with the preconfigured (by the installer script) helm values in `./interlink/manifests/values.yaml` + +```bash + helm upgrade --install \ + --create-namespace \ + -n interlink \ + my-node \ + oci://ghcr.io/intertwin-eu/interlink-helm-chart/interlink \ + --values ./interlink/manifests/values.yaml +``` + +You can fix the [version of the chart](https://github.com/interTwin-eu/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18) +by using the `--version` option. + +Whenever you see the node ready, you are good to go! + +To start debugging in case of problems we suggest starting from the pod containers logs! + +## Test the setup + +Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). + + diff --git a/docs/docs/cookbook/2-incluster.mdx b/docs/docs/cookbook/2-incluster.mdx new file mode 100644 index 00000000..d204725e --- /dev/null +++ b/docs/docs/cookbook/2-incluster.mdx @@ -0,0 +1,97 @@ +--- +sidebar_position: 3 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import ThemedImage from '@theme/ThemedImage'; +import useBaseUrl from '@docusaurus/useBaseUrl'; + + +# In-cluster deployment + + + + +## Install interLink + +### Deploy Remote components + +In general, starting from the deployment of the remote components is adviced. Since the kubernetes virtual node won't reach the `Ready` status until all the stack is successfully deployed. + +#### Interlink API server + +:::note +Go directly to ["Test and debugging tips"](Cookbook#test-and-debug). +The selected scenario does not expect you to do anything here. +::: + + +#### Plugin service + +:::note +Go directly to ["Test and debugging tips"](Cookbook#test-and-debug). +The selected scenario does not expect you to do anything here. +::: + + + +#### Test interLink stack health + +interLink comes with a call that can be used to monitor the overall status of both interlink server and plugins, at once. + +``` +curl -v --unix-socket ${HOME}/.interlink.sock http://unix/pinglink +``` + +This call will return the status of the system and its readiness to submit jobs. + + +### Deploy Kubernetes components + +The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). Depending on the scenario you selected, there might be additional operations to be done. + +- Create an helm values file: + +```yaml title="values.yaml" +nodeName: interlink-with-socket + +plugin: + enabled: true + image: "plugin docker image here" + command: ["/bin/bash", "-c"] + args: ["/app/plugin"] + config: | + your plugin + configuration + goes here!!! + socket: unix:///var/run/plugin.sock + +interlink: + enabled: true + socket: unix:///var/run/interlink.sock +``` + +Eventually deploy the latest release of the official [helm chart](https://github.com/interTwin-eu/interlink-helm-chart): + +```bash +helm upgrade --install --create-namespace \ + -n interlink my-virtual-node oci://ghcr.io/intertwin-eu/interlink-helm-chart/interlink \ + --values ./values.yaml +``` + +You can fix the [version of the chart](https://github.com/interTwin-eu/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18) +by using the `--version` option. + +Whenever you see the node ready, you are good to go! + +To start debugging in case of problems we suggest starting from the pod containers logs! + +## Test the setup + +Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). diff --git a/docs/docs/cookbook/3-tunneled.mdx b/docs/docs/cookbook/3-tunneled.mdx new file mode 100644 index 00000000..461442f6 --- /dev/null +++ b/docs/docs/cookbook/3-tunneled.mdx @@ -0,0 +1,73 @@ +--- +sidebar_position: 3 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import ThemedImage from '@theme/ThemedImage'; +import useBaseUrl from '@docusaurus/useBaseUrl'; + + +# Tunneled deployment + +Select here the tab with the scenario you want deploy: + + + + +## Install interLink + +### Deploy Remote components + +In general, starting from the deployment of the remote components is adviced. Since the kubernetes virtual node won't reach the `Ready` +status until all the stack is successfully deployed. + +#### Interlink API server + +:::note +COMING SOON... +::: + + +#### Plugin service + +:::note +COMING SOON... +::: + + +#### Test interLink stack health + +interLink comes with a call that can be used to monitor the overall status of both interlink server and plugins, at once. + +``` +curl -v --unix-socket ${HOME}/.interlink.sock http://unix/pinglink +``` + +This call will return the status of the system and its readiness to submit jobs. + + +### Deploy Kubernetes components + +The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). +Depending on the scenario you selected, there might be additional operations to be done. + +:::note +COMING SOON... +::: + + +Whenever you see the node ready, you are good to go! + +To start debugging in case of problems we suggest starting from the pod containers logs! + +## Test the setup + +Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). + + diff --git a/docs/docs/cookbook/_category_.json b/docs/docs/cookbook/_category_.json new file mode 100644 index 00000000..d8253b65 --- /dev/null +++ b/docs/docs/cookbook/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Cookbook", + "position": 3, + "link": { + "type": "generated-index", + "description": "Practical recipes for different deployment scenarios." + } +} From e6cb71120ecbb9363209e5f8061a3d1c6867d3b7 Mon Sep 17 00:00:00 2001 From: Carlos Brandt Date: Fri, 24 Jan 2025 18:45:26 +0100 Subject: [PATCH 2/4] ignore docs/yarn.lock --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 541c0ea0..67111146 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ interlink-install vendor dist/* +docs/yarn.lock report/* __pycache__/* vendor/* From 12f121be66efabae640a74562b7327d60f7cc78d Mon Sep 17 00:00:00 2001 From: Carlos Brandt Date: Fri, 31 Jan 2025 10:33:23 +0100 Subject: [PATCH 3/4] Fix broken links --- docs/docs/cookbook/1-edge.mdx | 66 +++++++++++++++--------- docs/docs/cookbook/2-incluster.mdx | 18 ++++--- docs/docs/cookbook/3-tunneled.mdx | 16 +++--- docs/docs/guides/01-deploy-interlink.mdx | 2 +- 4 files changed, 64 insertions(+), 38 deletions(-) diff --git a/docs/docs/cookbook/1-edge.mdx b/docs/docs/cookbook/1-edge.mdx index 9f80a977..936e2755 100644 --- a/docs/docs/cookbook/1-edge.mdx +++ b/docs/docs/cookbook/1-edge.mdx @@ -9,6 +9,9 @@ import useBaseUrl from '@docusaurus/useBaseUrl'; # Edge node deployment +Deploy interLink API server on an edge node, dispatching jobs on a remote system +according to an interLink plugin. + - ## Install interLink ### Deploy Remote components -In general, starting from the deployment of the remote components is adviced. Since the kubernetes virtual node won't reach the `Ready` +In general, starting from the deployment of the remote components is adviced. +Since the kubernetes virtual node won't reach the `Ready` status until all the stack is successfully deployed. #### Interlink API server -__For this deployment mode the remote host has to allow the kubernetes cluster to connect to the Oauth2 proxy service port +__For this deployment mode the remote host has to allow the kubernetes cluster +to connect to the Oauth2 proxy service port (30443 if you use the automatic script for installation)__ You first need to initialize an OIDC client with you Identity Provider (IdP). -Since any OIDC provider working with [OAuth2 Proxy](https://oauth2-proxy.github.io/oauth2-proxy/) tool will do the work, we are going -to put the configuration for a generic OIDC identity provider in this cookbook. Nevertheless you can find more detailed on dedicated -pages with instructions ready for [GitHub](./guides/deploy-interlink#create-an-oauth-github-app), [EGI checkin](./guides/oidc-IAM), -[INFN IAM](./guides/oidc-IAM). +Since any OIDC provider working with [OAuth2 Proxy](https://oauth2-proxy.github.io/oauth2-proxy/) +tool will do the work, we are going +to put the configuration for a generic OIDC identity provider in this cookbook. +Nevertheless you can find more detailed on dedicated +pages with instructions ready for [GitHub](../guides/deploy-interlink#create-an-oauth-github-app), +[EGI checkin](../guides/oidc-IAM), [INFN IAM](../guides/oidc-IAM). -First of all download the [latest release](https://github.com/interTwin-eu/interLink/releases) of the interLink installer: +First of all download the [latest release](https://github.com/interTwin-eu/interLink/releases) +of the interLink installer: ```bash export VERSION=$(curl -s https://api.github.com/repos/intertwin-eu/interlink/releases/latest | jq -r .name) @@ -53,8 +60,10 @@ mkdir -p interlink ./interlink-installer --init --config ./interlink/.installer.yaml ``` -The configuration file should be filled as followed. This is the case where the `my-node` will contact an edge service that will be -listening on `PUBLIC_IP` and `API_PORT` authenticating requests from an OIDC provider `https://my_oidc_idp.com`: +The configuration file should be filled as followed. This is the case where the +`my-node` will contact an edge service that will be +listening on `PUBLIC_IP` and `API_PORT` authenticating requests from an +OIDC provider `https://my_oidc_idp.com`: ```bash title="./interlink/.installer.yaml" interlink_ip: PUBLIC_IP @@ -105,11 +114,14 @@ Then start the services with: ./interlink/manifests/interlink-remote.sh start ``` -With `stop` command you can stop the service. By default logs are store in `~/.interlink/logs`, checkout there for any error before moving to the next step. +With `stop` command you can stop the service. By default logs are store in +`~/.interlink/logs`, checkout there for any error before moving to the next step. -__N.B.__ you can look the oauth2_proxy configuration parameters looking into the `interlink-remote.sh` script. +__N.B.__ you can look the oauth2_proxy configuration parameters looking into +the `interlink-remote.sh` script. -__N.B.__ logs (expecially if in verbose mode) can become pretty huge, consider to implement your favorite rotation routine for all the logs in `~/.interlink/logs/` +__N.B.__ logs (expecially if in verbose mode) can become pretty huge, consider +to implement your favorite rotation routine for all the logs in `~/.interlink/logs/`. #### Plugin service @@ -141,11 +153,13 @@ Select here the featured plugin you want to try: VerboseLogging: false ErrorsOnlyLogging: false ``` - - __N.B.__ Depending on wheter you edge is single user or not, you should know by previous steps which section to uncomment here. - - More on configuration options at [official repo](https://github.com/interTwin-eu/interlink-docker-plugin/blob/main/README.md) + - __N.B.__ Depending on wheter you edge is single user or not, + you should know by previous steps which section to uncomment here. + - More on configuration options at + [official repo](https://github.com/interTwin-eu/interlink-docker-plugin/blob/main/README.md) - - Download the [latest release](https://github.com/interTwin-eu/interlink-docker-plugin/releases) binary in - `$HOME/.interlink/bin/plugin` for either GPU host or CPU host (tags ending with `no-GPU`) + - Download the [latest release](https://github.com/interTwin-eu/interlink-docker-plugin/releases) + binary in `$HOME/.interlink/bin/plugin` for either GPU host or CPU host (tags ending with `no-GPU`) - Start the plugins passing the configuration that you have just created: ```bash @@ -199,10 +213,11 @@ Select here the featured plugin you want to try: SingularityPrefix: "" ``` - - More on configuration options at [official repo](https://github.com/interTwin-eu/interlink-slurm-plugin/blob/main/README.md) + - More on configuration options at + [official repo](https://github.com/interTwin-eu/interlink-slurm-plugin/blob/main/README.md) - - Download the [latest release](https://github.com/interTwin-eu/interlink-slurm-plugin/releases) binary in - `$HOME/.interlink/bin/plugin` + - Download the [latest release](https://github.com/interTwin-eu/interlink-slurm-plugin/releases) + binary in `$HOME/.interlink/bin/plugin` ```bash export PLUGIN_VERSION=$(curl -s https://api.github.com/repos/intertwin-eu/interlink-slurm-plugin/releases/latest | jq -r .name) @@ -248,7 +263,8 @@ Select here the featured plugin you want to try: ##### 3rd-party plugins There are more 3rd-party plugins developed that you can get inspired by or even use out of the box. -You can find some ref in the [quick start section](guides/deploy-interlink#attach-your-favorite-plugin-or-develop-one) +You can find some ref in the +[quick start section](../guides/deploy-interlink#attach-your-favorite-plugin-or-develop-one) #### Test interLink stack health @@ -264,10 +280,12 @@ This call will return the status of the system and its readiness to submit jobs. ### Deploy Kubernetes components -The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). +The deployment of the Kubernetes components are managed by the official +[HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). Depending on the scenario you selected, there might be additional operations to be done. -You can now install the helm chart with the preconfigured (by the installer script) helm values in `./interlink/manifests/values.yaml` +You can now install the helm chart with the preconfigured (by the installer script) +helm values in `./interlink/manifests/values.yaml` ```bash helm upgrade --install \ @@ -287,6 +305,6 @@ To start debugging in case of problems we suggest starting from the pod containe ## Test the setup -Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). +Please find a demo pod to test your setup [here](../guides/develop-a-plugin#lets-test-is-out). diff --git a/docs/docs/cookbook/2-incluster.mdx b/docs/docs/cookbook/2-incluster.mdx index d204725e..7d4bffb0 100644 --- a/docs/docs/cookbook/2-incluster.mdx +++ b/docs/docs/cookbook/2-incluster.mdx @@ -27,7 +27,7 @@ In general, starting from the deployment of the remote components is adviced. Si #### Interlink API server :::note -Go directly to ["Test and debugging tips"](Cookbook#test-and-debug). +Go directly to ["Test and debugging tips"](#test-and-debug). The selected scenario does not expect you to do anything here. ::: @@ -35,7 +35,7 @@ The selected scenario does not expect you to do anything here. #### Plugin service :::note -Go directly to ["Test and debugging tips"](Cookbook#test-and-debug). +Go directly to ["Test and debugging tips"](#test-and-debug). The selected scenario does not expect you to do anything here. ::: @@ -43,7 +43,8 @@ The selected scenario does not expect you to do anything here. #### Test interLink stack health -interLink comes with a call that can be used to monitor the overall status of both interlink server and plugins, at once. +InterLink comes with a call that can be used to monitor the overall status of +both interlink server and plugins, at once. ``` curl -v --unix-socket ${HOME}/.interlink.sock http://unix/pinglink @@ -54,7 +55,9 @@ This call will return the status of the system and its readiness to submit jobs. ### Deploy Kubernetes components -The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). Depending on the scenario you selected, there might be additional operations to be done. +The deployment of the Kubernetes components are managed by the official +[HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). +Depending on the scenario you selected, there might be additional operations to be done. - Create an helm values file: @@ -77,7 +80,8 @@ interlink: socket: unix:///var/run/interlink.sock ``` -Eventually deploy the latest release of the official [helm chart](https://github.com/interTwin-eu/interlink-helm-chart): +Eventually deploy the latest release of the official +[helm chart](https://github.com/interTwin-eu/interlink-helm-chart): ```bash helm upgrade --install --create-namespace \ @@ -92,6 +96,6 @@ Whenever you see the node ready, you are good to go! To start debugging in case of problems we suggest starting from the pod containers logs! -## Test the setup +## Test and debug -Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). +Please find a demo pod to test your setup [here](../guides/develop-a-plugin#lets-test-is-out). diff --git a/docs/docs/cookbook/3-tunneled.mdx b/docs/docs/cookbook/3-tunneled.mdx index 461442f6..541e982f 100644 --- a/docs/docs/cookbook/3-tunneled.mdx +++ b/docs/docs/cookbook/3-tunneled.mdx @@ -24,7 +24,8 @@ Select here the tab with the scenario you want deploy: ### Deploy Remote components -In general, starting from the deployment of the remote components is adviced. Since the kubernetes virtual node won't reach the `Ready` +In general, starting from the deployment of the remote components is adviced. +Since the kubernetes virtual node won't reach the `Ready` status until all the stack is successfully deployed. #### Interlink API server @@ -43,7 +44,8 @@ COMING SOON... #### Test interLink stack health -interLink comes with a call that can be used to monitor the overall status of both interlink server and plugins, at once. +interLink comes with a call that can be used to monitor the overall status of +both interlink server and plugins, at once. ``` curl -v --unix-socket ${HOME}/.interlink.sock http://unix/pinglink @@ -54,8 +56,10 @@ This call will return the status of the system and its readiness to submit jobs. ### Deploy Kubernetes components -The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). -Depending on the scenario you selected, there might be additional operations to be done. +The deployment of the Kubernetes components are managed by the official +[HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). +Depending on the scenario you selected, there might be additional operations +to be done. :::note COMING SOON... @@ -66,8 +70,8 @@ Whenever you see the node ready, you are good to go! To start debugging in case of problems we suggest starting from the pod containers logs! -## Test the setup +## Test and debug -Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). +Please find a demo pod to test your setup [here](../guides/develop-a-plugin#lets-test-is-out). diff --git a/docs/docs/guides/01-deploy-interlink.mdx b/docs/docs/guides/01-deploy-interlink.mdx index 6881de4f..ebfe18b1 100644 --- a/docs/docs/guides/01-deploy-interlink.mdx +++ b/docs/docs/guides/01-deploy-interlink.mdx @@ -10,7 +10,7 @@ Learn how to deploy interLink virtual nodes on your cluster. In this tutorial yo The installation script that we are going to configure will take care of providing you with a complete Kubernetes manifest to instantiate the virtual node interface. Also you will get an installation bash script to be executed on the remote host where you want to delegate your container execution. That script is already configured to **automatically** authenticate the incoming request from the virtual node component, and forward the correct instructions to the openAPI interface of the [interLink plugin](./api-reference) (a.k.a. sidecar) of your choice. Thus you can use this setup also for directly [developing a plugin](./develop-a-plugin), without caring for anything else. -For a complete guide on all the possible scenarios, please refer to the [Cookbook](../cookbook). +For a complete guide on all the possible scenarios, please refer to the [Cookbook](/docs/category/cookbook). ## Requirements From 30b3f9fc7e9e5bbfc8523f031123a7a542b0fc42 Mon Sep 17 00:00:00 2001 From: Carlos Brandt Date: Fri, 31 Jan 2025 11:55:29 +0100 Subject: [PATCH 4/4] [docs] include introduction text --- docs/docs/intro.mdx | 54 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/docs/docs/intro.mdx b/docs/docs/intro.mdx index a5e5cf60..4d2522e8 100644 --- a/docs/docs/intro.mdx +++ b/docs/docs/intro.mdx @@ -22,7 +22,51 @@ interLink is in early development phase, thus subject to breaking changes with n ::: +## Overview + +We are running a Kubernetes cluster that we are going to consider "local". +And we want to offload some of the containers to other (remote) systems -- +another K8S cluster, or an HPC cluster. + +The containers being offloaded are batch (or "job") containers -- with a pre-defined +lifecycle, non-interactive containers (see [Targets](#targets)). +The dispatching to the other (remote) system is done through a combination of +[Virtual Kubelets](https://virtual-kubelet.io/) and interLink' API and +plugins. +Plugins will define the how the containers will run on the remote system +(see [Target providers](#target-providers)). + +InterLink API and the plugin can be arranged in three different ways across the local cluster +and the remote system: + +- both deployed remote (**[Edge-node](#edge-node)**) +- both deployed local (**[In-cluster](#in-cluster)**) +- API local, plugin remote (**[Tunneled](#tunneled)**) + +``` ++---------------------------+ +----------------------------+ +| Virtual Node | | Pod Containers Runtime | +| | | | +| | | | +| | | | +| +-----------------------------------------+ | +| | (API + plugin) interLink | | +| | (API) interLink (plugin) | | +| | interLink (API + plugin) | | +| +-----------------------------------------+ | +| | | | +| | | | +| | | | +| | | | +| | | | +| | | | ++---------------------------+ +----------------------------+ +``` + + + ## Targets +> rename to Applications - __K8s applications with tasks to be executed on HPC systems__: This target focuses on Kubernetes applications that require high-performance computing (HPC) resources for executing tasks. These tasks might involve complex computations, simulations, or data processing that benefit from the specialized hardware and optimized performance of HPC systems. @@ -31,15 +75,17 @@ interLink is in early development phase, thus subject to breaking changes with n - __Lambda-like functions calling on external resources__: This target involves running containers on demand with specific computing needs. Now these resources might also be outside of the Kubernetes cluster thanks to interLink functionality. ## Target providers +> rename to Runtime providers Our solution is designed to target a wide range of providers with container execution capabilities, including but not limited to: - __SLURM or HTCondor batch systems with Apptainer, Enroot, or Singularity__: These batch systems are widely used in high-performance computing environments to manage and schedule jobs. By integrating with container runtimes like Apptainer, Enroot, or Singularity, our solution can efficiently execute containerized tasks on these systems. -- __Remote/on-demand virtual machines with any container runtime__: This includes virtual machines that can be provisioned on-demand and support container runtimes such as Docker, Podman, or others. This flexibility allows for scalable and dynamic resource allocation based on workload requirements. +- __On-demand virtual machines with any container runtime__: This includes virtual machines that can be provisioned on-demand and support container runtimes such as Docker, Podman, or others. This flexibility allows for scalable and dynamic resource allocation based on workload requirements. - __Remote Kubernetes clusters__: Our solution can extend the capabilities of existing Kubernetes clusters, enabling them to offload workloads to another remote cluster. This is particularly useful for distributing workloads across multiple clusters for better resource utilization and fault tolerance. - __Lambda-like services__: These are serverless computing services that execute code in response to events and automatically manage the underlying compute resources. By targeting these services, our solution can leverage the scalability and efficiency of serverless architectures for containerized workloads. All of this, while exposing a bare Kubernetes API kind of orchestration. ## NOT a target +> rename to non an application - __Long-running services__: Our solution is not designed for services that need to run continuously for extended periods. It is optimized for tasks that have a defined start and end, rather than persistent services exposing intra-cluster communication endpoints. - __Kubernetes Federation__: We do not aim to support Kubernetes Federation, which involves managing multiple Kubernetes clusters as a single entity. Our focus is on enabling Kubernetes pods to execute on remote resources, not on federating all kind of resources on multiple clusters. @@ -47,7 +93,7 @@ Our solution is designed to target a wide range of providers with container exec ## Deployment scenarios -### Service remote edge node +### Edge-node In this scenario, the Virtual Kubelet communicates with remote services deployed on a dedicate edge node exposing authenticated interLink APIs and its associated plugin. This setup is ideal for scenarios where edge computing resources are utilized for controlled communication b/w the Kubernetes cluster and the remote resources. @@ -59,7 +105,7 @@ In this scenario, the Virtual Kubelet communicates with remote services deployed }} /> -### In-cluster mode +### In-cluster This scenario involves deploying a Virtual Kubelet along with the interLink API server and the plugin to interact with a remote API. This setup allows Kubernetes pods to be executed on remote resources while all other components sits inside the Kubernetes cluster. @@ -71,7 +117,7 @@ This scenario involves deploying a Virtual Kubelet along with the interLink API }} /> -### Tunneled mode +### Tunneled This deployment involves the Virtual Kubelet connecting to a remote interLink API server and its plugin through a secure tunnel. This setup ensures secure communication between the Kubernetes cluster and the remote resources, making it suitable for environments with strict security requirements or to host services on a multi user host like a login node.