From e62dc18bb6b4478efbee08e86fc0db69bb76e743 Mon Sep 17 00:00:00 2001 From: "Mengqi (David) Yu" Date: Mon, 23 Jan 2023 18:30:41 +0000 Subject: [PATCH] draft: kit tasks --- tests/pipelines/kit/kit-cl2-load-run.yaml | 40 +++ tests/pipelines/kit/kit-cl2-load.yaml | 237 ++++++++++++++++++ .../tasks/generators/clusterloader/load.yaml | 20 +- tests/tasks/setup/kit/controlplane.yaml | 123 +++++++++ tests/tasks/setup/kit/dataplane.yaml | 99 ++++++++ tests/tasks/setup/kit/kubeconfig.yaml | 25 ++ .../tasks/setup/kit/label-and-taint-node.yaml | 51 ++++ tests/tasks/setup/kit/validate-dataplane.yaml | 36 +++ 8 files changed, 620 insertions(+), 11 deletions(-) create mode 100644 tests/pipelines/kit/kit-cl2-load-run.yaml create mode 100644 tests/pipelines/kit/kit-cl2-load.yaml create mode 100644 tests/tasks/setup/kit/controlplane.yaml create mode 100644 tests/tasks/setup/kit/dataplane.yaml create mode 100644 tests/tasks/setup/kit/kubeconfig.yaml create mode 100644 tests/tasks/setup/kit/label-and-taint-node.yaml create mode 100644 tests/tasks/setup/kit/validate-dataplane.yaml diff --git a/tests/pipelines/kit/kit-cl2-load-run.yaml b/tests/pipelines/kit/kit-cl2-load-run.yaml new file mode 100644 index 00000000..6ba3e366 --- /dev/null +++ b/tests/pipelines/kit/kit-cl2-load-run.yaml @@ -0,0 +1,40 @@ +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + name: kit-cl2-load-example-run + namespace: tekton-pipelines +spec: + timeout: "12h" + workspaces: + - name: source + emptyDir: {} + - name: results + emptyDir: {} + - name: kubeconfig + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + storageClassName: kit-gp2 + resources: + requests: + storage: 1Gi + params: + - name: name + value: "my-cluster-name" + - name: "results-bucket" + value: "my-bucket" + - name: "node_count" + value: "100" + - name: "amp-workspace-id" + value: "my-AMP-workspace" + podTemplate: + nodeSelector: + kubernetes.io/arch: amd64 + # TODO: The mounted EBS PV is only writable to root, ideally we should avoid to run containers as root. + securityContext: + runAsNonRoot: false + runAsUser: 0 + serviceAccountName: tekton-pipelines-executor + pipelineRef: + name: kit-cl2-load diff --git a/tests/pipelines/kit/kit-cl2-load.yaml b/tests/pipelines/kit/kit-cl2-load.yaml new file mode 100644 index 00000000..72b32ed2 --- /dev/null +++ b/tests/pipelines/kit/kit-cl2-load.yaml @@ -0,0 +1,237 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Pipeline +metadata: + name: kit-cl2-load + namespace: tekton-pipelines +spec: + workspaces: + - name: source + - name: results + - name: kubeconfig + params: + - name: name + description: The name of the test cluster. + default: "guest" + - name: amp-workspace-id + description: The AMP workspace ID where remote write needs to happen. + - name: node_count + description: desired node count for Dataplane, min is 1000 to create DP nodes. + - name: kubernetes-version + default: "1.24" + description: Kubernetes version for the guest cluster. + - name: apiserver-replicas + description: "Desired replicas of the APIServer" + default: "2" + - name: apiserver-image + default: "" + description: Image of apiserver + - name: apiserver-parameters + default: "" + description: | + Parameters of the apiserver. e.g. '["--goaway-chance=0.01"]' + - name: apiserver-instance-type + default: "m5.2xlarge" + description: Instance type for the apiserver + - name: etcd-replicas + default: "3" + description: Number of ETCD replicas + - name: etcd-image + default: "" + description: Image of ETCD + - name: etcd-parameters + default: "" + description: Parameters of the ETCD container + - name: etcd-instance-type + default: "m5.xlarge" + description: Instance type for the ETCD + - name: aws-vpc-cni-version + default: release-1.10 + description: The release version for aws vpc cni. + - name: aws-ebs-csi-driver-version + default: release-1.13 + description: The release version for aws ebs csi driver. + - name: monitoring-node-instance-types + default: | + ["m5.4xlarge"] + description: The desired instance type of the monitoring node. + - name: monitoring-node-selectors + default: 'node.kubernetes.io/instance-type=m5.4xlarge' + description: The selectors to select the monitoring node for tainting. + - name: giturl + description: "git url to clone the package" + default: https://github.com/kubernetes/perf-tests.git + - name: cl2-branch + description: "The branch of clusterloader2 you want to use" + default: "master" + - name: pods-per-node + description: "pod density" + default: "10" + - name: nodes-per-namespace + description: "nodes per namespace to get created for load test " + default: "100" + - name: cl2-load-test-throughput + description: "throughput used for mutate operations" + default: "15" + - name: results-bucket + description: "Results bucket with path of s3 to upload results" + tasks: + - name: setup-control-plane + taskRef: + kind: Task + name: control-plane-setup + params: + - name: name + value: '$(params.name)' + - name: kubernetes-version + value: '$(params.kubernetes-version)' + - name: apiserver-replicas + value: '$(params.apiserver-replicas)' + - name: apiserver-image + value: '$(params.apiserver-image)' + - name: apiserver-parameters + value: '$(params.apiserver-parameters)' + - name: apiserver-instance-type + value: '$(params.apiserver-instance-type)' + - name: etcd-replicas + value: '$(params.etcd-replicas)' + - name: etcd-image + value: '$(params.etcd-image)' + - name: etcd-parameters + value: '$(params.etcd-parameters)' + - name: etcd-instance-type + value: '$(params.etcd-instance-type)' + + - name: retieve-kubeconfig + runAfter: [setup-control-plane] + taskRef: + kind: Task + name: kit-retrieve-kubeconfig + params: + - name: cluster-name + value: '$(params.name)' + workspaces: + - name: kubeconfig + workspace: kubeconfig + + - name: install-cni + runAfter: [retieve-kubeconfig] + taskRef: + kind: Task + name: install-cni + params: + - name: aws-vpc-cni-version + value: 'release-1.10' + workspaces: + - name: kubeconfig + workspace: kubeconfig + + - name: install-csi + runAfter: [install-cni] + taskRef: + kind: Task + name: install-csi + params: + - name: aws-ebs-csi-driver-version + value: 'release-1.13' + workspaces: + - name: kubeconfig + workspace: kubeconfig + + - name: setup-monitoring-node + runAfter: [install-csi] + taskRef: + kind: Task + name: data-plane-setup + params: + - name: cluster-name + value: '$(params.name)' + - name: dataplane-name + value: 'monitoring-$(params.name)' + - name: desired-nodes + value: '1' + - name: instance-types + value: '$(params.monitoring-node-instance-types)' + + - name: setup-data-plane + runAfter: [label-and-taint-minitoring-node] + taskRef: + kind: Task + name: data-plane-setup + params: + - name: cluster-name + value: '$(params.name)' + - name: desired-nodes + value: '$(params.node_count)' + + - name: validate-data-plane + runAfter: [setup-data-plane] + taskRef: + kind: Task + name: validate-data-plane + params: + - name: desired-nodes + value: '$(($(params.node_count)+1))' + workspaces: + - name: kubeconfig + workspace: kubeconfig + + - name: label-and-taint-minitoring-node + runAfter: [setup-monitoring-node] + taskRef: + kind: Task + name: label-and-taint-node + params: + - name: cluster-name + value: '$(params.name)' + - name: selectors + value: '$(params.monitoring-node-selectors)' + - name: taint + value: 'monitoring=true:NoSchedule' + # We add another label to ensure Prometheus stack only run on the dedicated node in + # https://github.com/awslabs/kubernetes-iteration-toolkit/blob/e95b99e09e89f23a4aa8162632ba78b6c351dec5/tests/tasks/generators/clusterloader/load.yaml#L84 + - name: labels + value: 'eks.amazonaws.com/nodegroup=monitoring-$(params.name)-nodes-1' + workspaces: + - name: kubeconfig + workspace: kubeconfig + + - name: load + runAfter: [validate-data-plane] + taskRef: + kind: Task + name: load + params: + - name: cluster-name + value: '$(params.name)' + - name: cl2-branch + value: '$(params.cl2-branch)' + - name: giturl + value: '$(params.giturl)' + - name: pods-per-node + value: '$(params.pods-per-node)' + - name: nodes-per-namespace + value: '$(params.nodes-per-namespace)' + - name: cl2-load-test-throughput + value: '$(params.cl2-load-test-throughput)' + - name: results-bucket + value: '$(params.results-bucket)' + - name: nodes + value: '$(params.node_count)' + - name: amp-workspace-id + value: '$(params.amp-workspace-id)' + workspaces: + - name: source + workspace: source + - name: results + workspace: results + - name: kubeconfig + workspace: kubeconfig + + # finally: + # - name: teardown + # taskRef: + # name: teardown + # params: + # - name: name + # value: '$(params.name)' diff --git a/tests/tasks/generators/clusterloader/load.yaml b/tests/tasks/generators/clusterloader/load.yaml index a320af4a..ed4de70d 100644 --- a/tests/tasks/generators/clusterloader/load.yaml +++ b/tests/tasks/generators/clusterloader/load.yaml @@ -30,9 +30,6 @@ spec: - name: region default: "us-west-2" description: The region where the cluster is in. - - name: endpoint - default: "" - description: "aws eks enpoint to create clusters against" - name: cluster-name description: The name of the EKS cluster you want to spin. - name: amp-workspace-id @@ -47,6 +44,8 @@ spec: - name: source mountPath: /src/k8s.io/ - name: results + - name: kubeconfig + description: kubeconfig of the guest cluster steps: - name: git-clone image: alpine/git @@ -125,23 +124,22 @@ spec: fi # Building clusterloader2 binary cd $(workspaces.source.path)/perf-tests/clusterloader2/ - GOPROXY=direct GOOS=linux CGO_ENABLED=0 go build -v -o ./clusterloader ./cmd + GOOS=linux CGO_ENABLED=0 go build -v -o ./clusterloader ./cmd - name: run-loadtest image: alpine/k8s:1.22.6 onError: continue script: | #!/bin/bash - ENDPOINT_FLAG="" - if [ -n "$(params.endpoint)" ]; then - ENDPOINT_FLAG="--endpoint $(params.endpoint)" - fi if [ -n "$(params.amp-workspace-id)" ]; then - CL2_PROMETHEUS_FLAGS="--enable-prometheus-server=true --prometheus-pvc-storage-class gp2 --prometheus-manifest-path=$(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/" + # CL2_PROMETHEUS_FLAGS="--enable-prometheus-server=true --prometheus-pvc-storage-class gp2 --prometheus-manifest-path=$(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/" + CL2_PROMETHEUS_FLAGS="--enable-prometheus-server=true --prometheus-pvc-storage-class gp2 --prometheus-storage-class-provisioner ebs.csi.aws.com --prometheus-manifest-path=$(workspaces.source.path)/perf-tests/clusterloader2/pkg/prometheus/manifests/" fi - aws eks $ENDPOINT_FLAG update-kubeconfig --name $(params.cluster-name) --region $(params.region) + + export KUBECONFIG=$(workspaces.kubeconfig.path)/kubeconfig + cat $(workspaces.source.path)/perf-tests/clusterloader2/testing/load/config.yaml cd $(workspaces.source.path)/perf-tests/clusterloader2/ - ENABLE_EXEC_SERVICE=false ./clusterloader --kubeconfig=/root/.kube/config --testconfig=$(workspaces.source.path)/perf-tests/clusterloader2/testing/load/config.yaml --testoverrides=$(workspaces.source.path)/overrides.yaml --nodes=$(params.nodes) --provider=eks --report-dir=$(workspaces.results.path) --alsologtostderr --v=2 $CL2_PROMETHEUS_FLAGS + ENABLE_EXEC_SERVICE=false ./clusterloader --testconfig=$(workspaces.source.path)/perf-tests/clusterloader2/testing/load/config.yaml --testoverrides=$(workspaces.source.path)/overrides.yaml --nodes=$(params.nodes) --provider=eks --report-dir=$(workspaces.results.path) --alsologtostderr --v=2 $CL2_PROMETHEUS_FLAGS exit_code=$? if [ $exit_code -eq 0 ]; then echo "1" | tee $(results.datapoint.path) diff --git a/tests/tasks/setup/kit/controlplane.yaml b/tests/tasks/setup/kit/controlplane.yaml new file mode 100644 index 00000000..81ebdaa4 --- /dev/null +++ b/tests/tasks/setup/kit/controlplane.yaml @@ -0,0 +1,123 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: control-plane-setup + namespace: tekton-pipelines + annotations: + tekton.dev/pipelines.minVersion: "0.17.0" + tekton.dev/categories: Kubernetes + tekton.dev/tags: CLI, kubectl + tekton.dev/displayName: "kubernetes actions" + tekton.dev/platforms: "linux/amd64" +spec: + description: | + Setup a kubernetes control plane in the guest cluster. + params: + - name: name + default: "guest" + description: Name of the guest cluster + - name: kubernetes-version + default: "1.24" + description: Kubernetes version for the guest cluster + - name: apiserver-replicas + default: "1" + description: Number of APIserver replicas + - name: apiserver-image + default: "" + description: Image of apiserver + - name: apiserver-parameters + default: "" + description: | + Parameters of the apiserver. e.g. '["--goaway-chance=0.01"]' + - name: apiserver-instance-type + default: "m5.2xlarge" + description: Instance type for the apiserver + - name: etcd-replicas + default: "3" + description: Number of ETCD replicas + - name: etcd-image + default: "" + description: Image of ETCD + - name: etcd-parameters + default: "" + description: Parameters of the ETCD container + - name: etcd-instance-type + default: "m5.xlarge" + description: Instance type for the ETCD + steps: + - name: setup-control-plane + image: bitnami/kubectl:1.24.5 # curl was removed in more recent versions + script: | + #!/bin/bash + echo "Approving CSRs" + kubectl certificate approve $(kubectl get csr | grep "Pending" | awk '{print $1}') 2>/dev/null || true + namespace=$(kubectl get ns $(params.name) -o yaml 2>/dev/null | grep phase | awk '{print $2}') + if [[ $namespace != "Active" ]] + then + echo "Create namespace" + kubectl create namespace $(params.name) + fi + echo "Setting up control plane" + cat < /tmp/controlplane.yaml + apiVersion: kit.k8s.sh/v1alpha1 + kind: ControlPlane + metadata: + name: $(params.name) # Desired Cluster name + namespace: $(params.name) + spec: + etcd: + replicas: $(params.etcd-replicas) + spec: + nodeSelector: + node.kubernetes.io/instance-type: $(params.etcd-instance-type) + containers: + - name: etcd + EOF + if [ -n "$(params.etcd-image)" ]; then + cat <> /tmp/controlplane.yaml + image: $(params.etcd-image) + EOF + fi + if [ -n "$(params.etcd-parameters)" ]; then + cat <> /tmp/controlplane.yaml + args: $(params.etcd-parameters) + EOF + fi + cat <> /tmp/controlplane.yaml + master: + apiServer: + replicas: $(params.apiserver-replicas) + spec: + nodeSelector: + node.kubernetes.io/instance-type: $(params.apiserver-instance-type) + containers: + - name: apiserver + EOF + if [ -n "$(params.apiserver-image)" ]; then + cat <> /tmp/controlplane.yaml + image: $(params.apiserver-image) + EOF + fi + if [ -n "$(params.apiserver-parameters)" ]; then + cat <> /tmp/controlplane.yaml + args: $(params.apiserver-parameters) + EOF + fi + if [ -n "$(params.kubernetes-version)" ]; then + cat <> /tmp/controlplane.yaml + kubernetesVersion: "$(params.kubernetes-version)" + EOF + fi + kubectl apply -f /tmp/controlplane.yaml + + echo "waiting for controlplane to be ready" + while true; do + status0=$(kubectl get -f /tmp/controlplane.yaml -o jsonpath='{.status.conditions[0].status}') + status1=$(kubectl get -f /tmp/controlplane.yaml -o jsonpath='{.status.conditions[1].status}') + if [[ "$status0" == "True" ]] && [[ "$status1" == "True" ]]; then + echo "controlplane is ready" + break + fi + done + kubectl get -f /tmp/controlplane.yaml -o yaml diff --git a/tests/tasks/setup/kit/dataplane.yaml b/tests/tasks/setup/kit/dataplane.yaml new file mode 100644 index 00000000..f3051366 --- /dev/null +++ b/tests/tasks/setup/kit/dataplane.yaml @@ -0,0 +1,99 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: data-plane-setup + namespace: tekton-pipelines + annotations: + tekton.dev/pipelines.minVersion: "0.17.0" + tekton.dev/categories: Kubernetes + tekton.dev/tags: CLI, kubectl + tekton.dev/displayName: "kubernetes actions" + tekton.dev/platforms: "linux/amd64" +spec: + description: | + Setup a kubernetes data plane in the guest cluster. + params: + - name: cluster-name + default: "guest" + description: Name of the guest cluster + - name: dataplane-name + default: "" + description: Optional name of the dataplane object. + - name: desired-nodes + default: "10" + description: The desired number of nodes in the cluster. + - name: min-nodes + default: "1" + description: The minimum number of nodes in the cluster nodegroup. + - name: max-nodes + default: "1000" + description: The maximum number of nodes in the cluster nodegroup. + - name: instance-types + default: '["c5.large","m5.large","r5.large","t3.large","t3a.large","c5a.large","m5a.large","r5a.large"]' + description: The desired instance types of the node group. + steps: + - name: setup-data-plane + image: bitnami/kubectl:1.24.5 + script: | + #!/bin/bash + set -e + + echo "Getting subnet and security group tags" + TAG=$(kubectl get provisioner -oyaml | grep karpenter.sh/discovery | awk 'NR==1{ print $2}') + echo "Setting up data plane" + max_nodes=$(params.max-nodes) + nodes=$(params.desired-nodes) + asgs=$((nodes/max_nodes)) + echo "node groups: $asgs" + if [ -z "$(params.dataplane-name)" ] + then + dp_name=$(params.cluster-name)-nodes + else + dp_name=$(params.dataplane-name) + fi + + create_dp_nodes() + { + dataplane_name=$dp_name-$1 + EC2_INSTANCES=$3 + cat <> /tmp/dp.yaml + apiVersion: kit.k8s.sh/v1alpha1 + kind: DataPlane + metadata: + name: $dataplane_name + namespace: $(params.cluster-name) + spec: + clusterName: $(params.cluster-name) # Associated Cluster name + nodeCount: $2 + instanceTypes: $EC2_INSTANCES + subnetSelector: + karpenter.sh/discovery: ${TAG} + EOF + kubectl apply -f /tmp/dp.yaml + echo "Created dataplane object $dataplane_name" + + echo "waiting for dataplane" + while true; do + status0=$(kubectl get -f /tmp/dp.yaml -o jsonpath='{.status.conditions[0].status}') + status1=$(kubectl get -f /tmp/dp.yaml -o jsonpath='{.status.conditions[1].status}') + if [[ "$status0" == "True" ]] && [[ "$status1" == "True" ]]; then + echo "controlplane is ready" + break + fi + done + } + + for i in $(seq 1 $asgs) + do + # dataplane object is backed by ASG which has a cap for the max # of nodes per ASG + create_dp_nodes $i $max_nodes $(params.instance-types) + done + + remaining_nodes=$(((nodes)%max_nodes)) + echo "remaining nodes: $remaining_nodes" + if [[ $remaining_nodes -gt 0 ]] + then + echo "The remaining_nodes var is greater than 0." + create_dp_nodes 0 $remaining_nodes $(params.instance-types) + fi diff --git a/tests/tasks/setup/kit/kubeconfig.yaml b/tests/tasks/setup/kit/kubeconfig.yaml new file mode 100644 index 00000000..0f55a31f --- /dev/null +++ b/tests/tasks/setup/kit/kubeconfig.yaml @@ -0,0 +1,25 @@ +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: kit-retrieve-kubeconfig + namespace: tekton-pipelines +spec: + description: "retrieve the kubeconfig for the kit guest cluster" + params: + - name: cluster-name + default: "guest" + description: "The kit guest cluster name" + steps: + - name: retrieve-kubeconfig + image: bitnami/kubectl + script: | + echo $(workspaces.kubeconfig.path) + ls -ld $(workspaces.kubeconfig.path) + kubectl get secret -n $(params.cluster-name) $(params.cluster-name)-kube-admin-config -ojsonpath='{.data.config}' | base64 -d > $(workspaces.kubeconfig.path)/kubeconfig + # TODO: remove debug + cat $(workspaces.kubeconfig.path)/kubeconfig + # sanity check to ensure it's live. + kubectl --kubeconfig $(workspaces.kubeconfig.path)/kubeconfig version + workspaces: + - name: kubeconfig + description: kubeconfig of the guest cluster diff --git a/tests/tasks/setup/kit/label-and-taint-node.yaml b/tests/tasks/setup/kit/label-and-taint-node.yaml new file mode 100644 index 00000000..3f0789bb --- /dev/null +++ b/tests/tasks/setup/kit/label-and-taint-node.yaml @@ -0,0 +1,51 @@ +# taint by label node.kubernetes.io/instance-type: t3a.xlarge +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: label-and-taint-node + namespace: tekton-pipelines + annotations: + tekton.dev/pipelines.minVersion: "0.17.0" + tekton.dev/categories: Kubernetes + tekton.dev/tags: CLI, kubectl + tekton.dev/displayName: "kubernetes actions" + tekton.dev/platforms: "linux/amd64" +spec: + description: | + Taint a node object. + params: + - name: cluster-name + default: "guest" + description: name of the guest cluster. + - name: selectors + default: "" + description: selectors to fetch the k8s nodes. e.g. key1=value1,key2=value2 + - name: taint + default: "monitoring=true:NoSchedule" + description: Taint to apply to the k8s node. + - name: labels + default: "" + description: labels to add to the k8s nodes. e.g. key1=value1,key2=value2 + steps: + - name: kubectl-taint + image: bitnami/kubectl:1.24.5 + script: | + export KUBECONFIG=$(workspaces.kubeconfig.path)/kubeconfig + echo "finding node(s) to add taint" + while true; do + nodes=$(kubectl get -l $(params.selectors) -o jsonpath='{.metadata.name}') + if [[ ! -z "$nodes"]]; then + echo "found node(s)" + break + fi + done + if [ ! -z $(params.taint) ]; then + kubectl taint nodes --overwrite -l $(params.selectors) $(params.taint) + fi + if [ ! -z $(params.labels) ]; then + kubectl label nodes --overwrite -l $(params.selectors) $(params.labels) + fi + workspaces: + - name: kubeconfig + description: kubeconfig of the guest cluster diff --git a/tests/tasks/setup/kit/validate-dataplane.yaml b/tests/tasks/setup/kit/validate-dataplane.yaml new file mode 100644 index 00000000..b1c23543 --- /dev/null +++ b/tests/tasks/setup/kit/validate-dataplane.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: validate-data-plane + namespace: tekton-pipelines + annotations: + tekton.dev/pipelines.minVersion: "0.17.0" + tekton.dev/categories: Kubernetes + tekton.dev/tags: CLI, kubectl + tekton.dev/displayName: "kubernetes actions" + tekton.dev/platforms: "linux/amd64" +spec: + description: | + Wait and validate the data plane in a guest cluster. + params: + - name: desired-nodes + description: The desired number of nodes in the cluster. + steps: + - name: setup-data-plane + image: bitnami/kubectl:1.24.5 + script: | + #!/bin/bash + set -e + + echo "validating data plane nodes" + export KUBECONFIG=$(workspaces.kubeconfig.path)/kubeconfig + while true; do + ready_node=$(kubectl get nodes --no-headers | grep -w Ready | wc -l) + echo "ready-nodes=$ready_node out of $(params.desired-nodes)" + if [[ "$ready_node" -eq $(params.desired-nodes) ]]; then break; fi + sleep 5 + done + workspaces: + - name: kubeconfig + description: kubeconfig of the guest cluster