⚠️ Split Helm chart into operator and providers charts with optional dependency #22
Workflow file for this run
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Smoke Test | |
| on: | |
| pull_request: | |
| branches: | |
| - main | |
| - 'release-*' | |
| push: | |
| branches: | |
| - main | |
| workflow_dispatch: | |
| permissions: | |
| contents: read | |
| jobs: | |
| smoke-test: | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: Checkout code | |
| uses: actions/checkout@v4 | |
| with: | |
| fetch-depth: 0 | |
| - name: Set up Go | |
| uses: actions/setup-go@v5 | |
| with: | |
| go-version-file: 'go.mod' | |
| - name: Install kubectl | |
| run: | | |
| curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" | |
| chmod +x kubectl | |
| sudo mv kubectl /usr/local/bin/ | |
| - name: Install yq | |
| run: | | |
| wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O yq | |
| chmod +x yq | |
| sudo mv yq /usr/local/bin/ | |
| - name: Install Helm | |
| run: | | |
| curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash | |
| - name: Build Docker image | |
| run: | | |
| # Build the operator image with a specific tag for smoke test | |
| CONTROLLER_IMG=cluster-api-operator TAG=smoke-test make docker-build | |
| echo "Built image: cluster-api-operator-amd64:smoke-test" | |
| # Tag the image for easier reference | |
| docker tag cluster-api-operator-amd64:smoke-test cluster-api-operator:smoke-test | |
| - name: Build charts | |
| run: | | |
| make release-chart | |
| # Extract HELM_CHART_TAG from Makefile | |
| HELM_CHART_TAG=$(make -s -f Makefile -p | grep '^HELM_CHART_TAG :=' | cut -d' ' -f3) | |
| echo "HELM_CHART_TAG=$HELM_CHART_TAG" >> $GITHUB_ENV | |
| echo "Detected HELM_CHART_TAG: $HELM_CHART_TAG" | |
| - name: Create kind cluster | |
| run: | | |
| chmod +x ./hack/ensure-kind.sh | |
| ./hack/ensure-kind.sh | |
| # Create kind cluster with Docker socket mount for CAPD | |
| cat <<EOF > /tmp/kind-config.yaml | |
| kind: Cluster | |
| apiVersion: kind.x-k8s.io/v1alpha4 | |
| nodes: | |
| - role: control-plane | |
| extraMounts: | |
| - hostPath: /var/run/docker.sock | |
| containerPath: /var/run/docker.sock | |
| - hostPath: /var/lib/docker | |
| containerPath: /var/lib/docker | |
| EOF | |
| kind create cluster --name capi-operator-smoke-test --config /tmp/kind-config.yaml --wait 5m | |
| kubectl cluster-info --context kind-capi-operator-smoke-test | |
| - name: Load Docker image to kind | |
| run: | | |
| # Load the built image into kind cluster | |
| kind load docker-image cluster-api-operator:smoke-test --name capi-operator-smoke-test | |
| echo "Loaded image cluster-api-operator:smoke-test into kind cluster" | |
| - name: Add Helm repositories | |
| run: | | |
| helm repo add jetstack https://charts.jetstack.io | |
| helm repo update | |
| - name: Install cert-manager | |
| run: | | |
| helm install cert-manager jetstack/cert-manager \ | |
| --namespace cert-manager \ | |
| --create-namespace \ | |
| --set installCRDs=true \ | |
| --wait \ | |
| --timeout 5m | |
| - name: Install Cluster API Operator | |
| run: | | |
| # Use exact chart filename based on HELM_CHART_TAG | |
| CHART_PACKAGE="out/package/cluster-api-operator-${HELM_CHART_TAG}.tgz" | |
| echo "Using chart package: $CHART_PACKAGE" | |
| # Verify the file exists | |
| if [ ! -f "$CHART_PACKAGE" ]; then | |
| echo "Error: Chart package not found: $CHART_PACKAGE" | |
| ls -la out/package/ | |
| exit 1 | |
| fi | |
| helm install capi-operator "$CHART_PACKAGE" \ | |
| --create-namespace \ | |
| -n capi-operator-system \ | |
| --set image.manager.repository=cluster-api-operator \ | |
| --set image.manager.tag=smoke-test \ | |
| --set image.manager.pullPolicy=IfNotPresent \ | |
| --wait \ | |
| --timeout 90s | |
| - name: Wait for CAPI Operator to be ready | |
| run: | | |
| kubectl wait --for=condition=Available --timeout=300s -n capi-operator-system deployment/capi-operator-cluster-api-operator | |
| - name: Deploy providers using cluster-api-operator-providers chart | |
| run: | | |
| # Create values file for providers | |
| cat <<EOF > /tmp/providers-values.yaml | |
| core: | |
| cluster-api: | |
| namespace: capi-system | |
| bootstrap: | |
| kubeadm: | |
| namespace: capi-kubeadm-bootstrap-system | |
| controlPlane: | |
| kubeadm: | |
| namespace: capi-kubeadm-control-plane-system | |
| infrastructure: | |
| docker: | |
| namespace: capd-system | |
| manager: | |
| featureGates: | |
| core: | |
| ClusterTopology: true | |
| ClusterResourceSet: true | |
| MachinePool: true | |
| kubeadm: | |
| ClusterTopology: true | |
| MachinePool: true | |
| docker: | |
| ClusterTopology: true | |
| EOF | |
| # Use exact providers chart filename based on HELM_CHART_TAG | |
| PROVIDERS_CHART_PACKAGE="out/package/cluster-api-operator-providers-${HELM_CHART_TAG}.tgz" | |
| echo "Using providers chart package: $PROVIDERS_CHART_PACKAGE" | |
| # Verify the file exists | |
| if [ ! -f "$PROVIDERS_CHART_PACKAGE" ]; then | |
| echo "Error: Providers chart package not found: $PROVIDERS_CHART_PACKAGE" | |
| ls -la out/package/ | |
| exit 1 | |
| fi | |
| helm install capi-providers "$PROVIDERS_CHART_PACKAGE" \ | |
| -f /tmp/providers-values.yaml \ | |
| --wait | |
| - name: Wait for providers to be ready | |
| run: | | |
| echo "=== Waiting for Core Provider to be ready ===" | |
| kubectl wait --for=condition=Ready --timeout=300s -n capi-system coreprovider/cluster-api || true | |
| echo -e "\n=== Waiting for Bootstrap Provider to be ready ===" | |
| kubectl wait --for=condition=Ready --timeout=300s -n capi-kubeadm-bootstrap-system bootstrapprovider/kubeadm || true | |
| echo -e "\n=== Waiting for Control Plane Provider to be ready ===" | |
| kubectl wait --for=condition=Ready --timeout=300s -n capi-kubeadm-control-plane-system controlplaneprovider/kubeadm || true | |
| echo -e "\n=== Waiting for Infrastructure Provider to be ready ===" | |
| kubectl wait --for=condition=Ready --timeout=300s -n capd-system infrastructureprovider/docker || true | |
| # Additional wait for deployments | |
| echo -e "\n=== Waiting for provider deployments ===" | |
| kubectl wait --for=condition=Available --timeout=300s -n capi-system deployment/capi-controller-manager || true | |
| kubectl wait --for=condition=Available --timeout=300s -n capi-kubeadm-bootstrap-system deployment/capi-kubeadm-bootstrap-controller-manager || true | |
| kubectl wait --for=condition=Available --timeout=300s -n capi-kubeadm-control-plane-system deployment/capi-kubeadm-control-plane-controller-manager || true | |
| kubectl wait --for=condition=Available --timeout=300s -n capd-system deployment/capd-controller-manager || true | |
| # Wait for webhooks to be ready | |
| echo -e "\n=== Waiting for webhook services ===" | |
| kubectl wait --for=jsonpath='{.status.loadBalancer}' --timeout=300s -n capi-kubeadm-bootstrap-system service/capi-kubeadm-bootstrap-webhook-service || true | |
| kubectl wait --for=jsonpath='{.status.loadBalancer}' --timeout=300s -n capi-kubeadm-control-plane-system service/capi-kubeadm-control-plane-webhook-service || true | |
| - name: Verify installation | |
| run: | | |
| echo "=== Cluster API Operator Status ===" | |
| kubectl get pods -n capi-operator-system | |
| echo -e "\n=== Core Provider Status ===" | |
| kubectl get coreprovider -A -o wide | |
| kubectl describe coreprovider -n capi-system cluster-api || true | |
| echo -e "\n=== Bootstrap Provider Status ===" | |
| kubectl get bootstrapprovider -A -o wide | |
| kubectl describe bootstrapprovider -n capi-kubeadm-bootstrap-system kubeadm || true | |
| echo -e "\n=== Control Plane Provider Status ===" | |
| kubectl get controlplaneprovider -A -o wide | |
| kubectl describe controlplaneprovider -n capi-kubeadm-control-plane-system kubeadm || true | |
| echo -e "\n=== Infrastructure Provider Status ===" | |
| kubectl get infrastructureprovider -A -o wide | |
| kubectl describe infrastructureprovider -n capd-system docker || true | |
| echo -e "\n=== All Pods ===" | |
| kubectl get pods -A | grep -E "(capi-|capd-)" | |
| echo -e "\n=== Webhook Services ===" | |
| kubectl get svc -A | grep webhook | |
| echo -e "\n=== Webhook Certificates ===" | |
| kubectl get certificate,certificaterequest -A | grep -E "(capi-|capd-)" | |
| echo -e "\n=== CRDs ===" | |
| kubectl get crds | grep -E "(cluster.x-k8s.io|operator.cluster.x-k8s.io)" | |
| - name: Check provider health | |
| run: | | |
| # Check if core provider is ready | |
| CORE_READY=$(kubectl get coreprovider -n capi-system cluster-api -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') | |
| if [ "$CORE_READY" != "True" ]; then | |
| echo "Core provider is not ready" | |
| kubectl get coreprovider -n capi-system cluster-api -o yaml | |
| exit 1 | |
| fi | |
| # Check if bootstrap provider is ready | |
| BOOTSTRAP_READY=$(kubectl get bootstrapprovider -n capi-kubeadm-bootstrap-system kubeadm -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') | |
| if [ "$BOOTSTRAP_READY" != "True" ]; then | |
| echo "Bootstrap provider is not ready" | |
| kubectl get bootstrapprovider -n capi-kubeadm-bootstrap-system kubeadm -o yaml | |
| exit 1 | |
| fi | |
| # Check if control plane provider is ready | |
| CONTROLPLANE_READY=$(kubectl get controlplaneprovider -n capi-kubeadm-control-plane-system kubeadm -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') | |
| if [ "$CONTROLPLANE_READY" != "True" ]; then | |
| echo "Control plane provider is not ready" | |
| kubectl get controlplaneprovider -n capi-kubeadm-control-plane-system kubeadm -o yaml | |
| exit 1 | |
| fi | |
| # Check if infrastructure provider is ready | |
| INFRA_READY=$(kubectl get infrastructureprovider -n capd-system docker -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') | |
| if [ "$INFRA_READY" != "True" ]; then | |
| echo "Infrastructure provider is not ready" | |
| kubectl get infrastructureprovider -n capd-system docker -o yaml | |
| exit 1 | |
| fi | |
| echo "All providers are ready!" | |
| # Additional webhook readiness check | |
| echo -e "\n=== Checking webhook endpoints ===" | |
| kubectl get endpoints -A | grep webhook | |
| - name: Download cluster manifest | |
| run: | | |
| echo "=== Downloading cluster manifest ===" | |
| curl -L https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/refs/heads/main/test/infrastructure/docker/examples/simple-cluster.yaml -o simple-cluster.yaml | |
| # Show the manifest for debugging | |
| echo "=== Cluster manifest ===" | |
| cat simple-cluster.yaml | |
| # Extract cluster name from the manifest using yq | |
| CLUSTER_NAME=$(yq eval 'select(.kind == "Cluster") | .metadata.name' simple-cluster.yaml) | |
| # Ensure cluster name was extracted successfully | |
| if [ -z "$CLUSTER_NAME" ]; then | |
| echo "ERROR: Failed to extract cluster name from simple-cluster.yaml" | |
| echo "Please check the manifest structure" | |
| exit 1 | |
| fi | |
| echo "Detected cluster name: $CLUSTER_NAME" | |
| echo "CLUSTER_NAME=$CLUSTER_NAME" >> $GITHUB_ENV | |
| - name: Create workload cluster | |
| run: | | |
| echo "=== Pre-creation diagnostics ===" | |
| echo "Checking webhook services..." | |
| kubectl get svc -A | grep webhook | |
| echo -e "\nChecking webhook endpoints..." | |
| kubectl get endpoints -A | grep webhook | |
| echo -e "\nChecking webhook certificates..." | |
| kubectl get secret -A | grep webhook-service-cert | |
| echo -e "\n=== Analyzing cluster manifest for CNI configuration ===" | |
| echo "Checking for CNI-related settings in simple-cluster.yaml:" | |
| grep -i "cni\|calico\|flannel\|weave\|cilium" simple-cluster.yaml || echo "No CNI configuration found in manifest" | |
| echo -e "\n=== Checking KubeadmControlPlane configuration ===" | |
| yq eval 'select(.kind == "KubeadmControlPlane") | .spec' simple-cluster.yaml || echo "Could not extract KubeadmControlPlane spec" | |
| echo -e "\n=== Creating workload cluster ===" | |
| kubectl apply -f simple-cluster.yaml | |
| echo -e "\n=== Cluster resources created ===" | |
| kubectl get cluster,dockercluster,kubeadmcontrolplane,machinedeployment -A | |
| - name: Get workload cluster kubeconfig | |
| run: | | |
| echo "=== Getting workload cluster kubeconfig ===" | |
| # Get kubeconfig from the cluster | |
| kubectl get secret ${CLUSTER_NAME}-kubeconfig -o jsonpath='{.data.value}' | base64 -d > ${CLUSTER_NAME}.kubeconfig | |
| echo "=== Testing kubeconfig ===" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig cluster-info || echo "Cluster API endpoint may not be ready yet" | |
| - name: Install CNI plugin (Calico) using Helm | |
| run: | | |
| echo "=== Installing Calico CNI plugin using Helm ===" | |
| # Add Calico Helm repository | |
| helm repo add projectcalico https://docs.tigera.io/calico/charts --kubeconfig=${CLUSTER_NAME}.kubeconfig | |
| helm repo update --kubeconfig=${CLUSTER_NAME}.kubeconfig | |
| # Install Calico using Helm with values from CAPI Azure provider | |
| helm install calico projectcalico/tigera-operator \ | |
| --kubeconfig=${CLUSTER_NAME}.kubeconfig \ | |
| -f https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/addons/calico/values.yaml \ | |
| --namespace tigera-operator \ | |
| --create-namespace \ | |
| --wait \ | |
| --timeout 5m | |
| echo "=== Waiting for Calico to be ready ===" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig wait --for=condition=Ready --timeout=300s pods -n tigera-operator -l app.kubernetes.io/name=tigera-operator | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig wait --for=condition=Ready --timeout=300s pods -n calico-system --all | |
| echo "=== Calico installation complete ===" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get pods -n tigera-operator | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get pods -n calico-system | |
| - name: Wait for nodes to be ready | |
| run: | | |
| echo "=== Waiting for control plane node to be ready ===" | |
| # Wait for the node to become ready after CNI installation | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig wait --for=condition=Ready --timeout=300s nodes --all | |
| echo "=== Checking node status ===" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get nodes -o wide | |
| echo "=== Waiting for control plane replicas ===" | |
| kubectl wait --for=jsonpath='{.status.readyReplicas}'=1 --timeout=300s kubeadmcontrolplane -l cluster.x-k8s.io/cluster-name=${CLUSTER_NAME} | |
| echo "=== Final cluster status ===" | |
| kubectl get cluster ${CLUSTER_NAME} -o wide | |
| kubectl get machines -l cluster.x-k8s.io/cluster-name=${CLUSTER_NAME} | |
| - name: Verify kubectl commands work on workload cluster | |
| run: | | |
| echo "=== Testing kubectl get po on workload cluster ===" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get po -A | |
| echo -e "\n=== Testing kubectl get nodes ===" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get nodes | |
| echo -e "\n=== Verifying CNI is working ===" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get pods -n calico-system | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get pods -n tigera-operator | |
| echo -e "\n=== Waiting for system pods to be ready ===" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig wait --for=condition=Ready --timeout=300s pods -n kube-system -l k8s-app=kube-proxy | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig wait --for=condition=Ready --timeout=300s pods -n kube-system -l component=kube-apiserver | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig wait --for=condition=Ready --timeout=300s pods -n kube-system -l component=kube-controller-manager | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig wait --for=condition=Ready --timeout=300s pods -n kube-system -l component=kube-scheduler | |
| - name: Verify cluster functionality | |
| run: | | |
| echo "=== Final cluster verification ===" | |
| echo "Cluster nodes:" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get nodes -o wide | |
| echo -e "\nAll pods:" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get po -A | |
| echo -e "\nAll services:" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get svc -A | |
| echo -e "\nCluster info:" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig cluster-info | |
| - name: Collect debug information on failure | |
| if: failure() | |
| run: | | |
| echo "=== Events ===" | |
| kubectl get events -A --sort-by='.lastTimestamp' | tail -50 | |
| echo -e "\n=== CAPI Operator Logs ===" | |
| kubectl logs -n capi-operator-system deployment/capi-operator-cluster-api-operator --tail=100 || true | |
| echo -e "\n=== Core Provider Logs ===" | |
| kubectl logs -n capi-system deployment/capi-controller-manager --tail=100 || true | |
| echo -e "\n=== Bootstrap Provider Logs ===" | |
| kubectl logs -n capi-kubeadm-bootstrap-system deployment/capi-kubeadm-bootstrap-controller-manager --tail=100 || true | |
| echo -e "\n=== Control Plane Provider Logs ===" | |
| kubectl logs -n capi-kubeadm-control-plane-system deployment/capi-kubeadm-control-plane-controller-manager --tail=100 || true | |
| echo -e "\n=== Infrastructure Provider Logs ===" | |
| kubectl logs -n capd-system deployment/capd-controller-manager --tail=100 || true | |
| echo -e "\n=== Webhook Services and Endpoints ===" | |
| kubectl get svc,endpoints -A | grep webhook || true | |
| echo -e "\n=== Webhook Certificates ===" | |
| kubectl get certificate,certificaterequest,secret -A | grep -E "(webhook|serving-cert)" || true | |
| echo -e "\n=== Cluster Resources ===" | |
| kubectl get cluster,dockercluster,kubeadmcontrolplane,machine,dockermachine -A -o wide || true | |
| echo -e "\n=== Describe Cluster ===" | |
| kubectl describe cluster ${CLUSTER_NAME} || true | |
| echo -e "\n=== Describe Machines ===" | |
| kubectl describe machines -l cluster.x-k8s.io/cluster-name=${CLUSTER_NAME} || true | |
| echo -e "\n=== Docker Containers ===" | |
| docker ps -a | grep -E "(smoke-test|kind)" || true | |
| echo -e "\n=== Kind Clusters ===" | |
| kind get clusters || true | |
| echo -e "\n=== Describe Failed Pods ===" | |
| kubectl get pods -A | grep -v Running | grep -v Completed | tail -n +2 | while read namespace name ready status restarts age; do | |
| echo "Describing pod $name in namespace $namespace" | |
| kubectl describe pod -n $namespace $name | |
| echo "---" | |
| done | |
| echo -e "\n=== CNI Diagnostics ===" | |
| echo "Checking Calico installation status..." | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get pods -n tigera-operator -o wide || true | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get pods -n calico-system -o wide || true | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig get pods -n calico-apiserver -o wide || true | |
| echo -e "\n=== Calico logs ===" | |
| kubectl --kubeconfig=${CLUSTER_NAME}.kubeconfig logs -n tigera-operator -l app.kubernetes.io/name=tigera-operator --tail=50 || true | |
| echo -e "\n=== Node CNI status ===" | |
| CONTROL_PLANE_CONTAINER=$(docker ps -a | grep ${CLUSTER_NAME}-controlplane | awk '{print $1}' | head -1) | |
| if [ ! -z "$CONTROL_PLANE_CONTAINER" ]; then | |
| echo "Control plane container: $CONTROL_PLANE_CONTAINER" | |
| echo "=== Checking CNI binaries ===" | |
| docker exec $CONTROL_PLANE_CONTAINER ls -la /opt/cni/bin/ || echo "CNI binaries directory not found" | |
| echo -e "\n=== Checking CNI configuration ===" | |
| docker exec $CONTROL_PLANE_CONTAINER ls -la /etc/cni/net.d/ || echo "CNI config directory not found" | |
| docker exec $CONTROL_PLANE_CONTAINER cat /etc/cni/net.d/* 2>/dev/null || echo "No CNI config files found" | |
| echo -e "\n=== Checking kubelet configuration ===" | |
| docker exec $CONTROL_PLANE_CONTAINER cat /var/lib/kubelet/kubeadm-flags.env || true | |
| docker exec $CONTROL_PLANE_CONTAINER ps aux | grep kubelet || true | |
| echo -e "\n=== Node status inside container ===" | |
| docker exec $CONTROL_PLANE_CONTAINER kubectl --kubeconfig=/etc/kubernetes/admin.conf get nodes -o wide || true | |
| docker exec $CONTROL_PLANE_CONTAINER kubectl --kubeconfig=/etc/kubernetes/admin.conf describe nodes || true | |
| fi | |
| echo -e "\n=== CAPD Provider Configuration ===" | |
| kubectl get dockercluster ${CLUSTER_NAME} -o yaml || true | |
| kubectl get dockermachinetemplate -A -o yaml || true | |
| echo -e "\n=== Helm releases ===" | |
| helm list --all-namespaces --kubeconfig=${CLUSTER_NAME}.kubeconfig || true | |
| - name: Clean up | |
| if: always() | |
| run: | | |
| echo "=== Cleaning up kind clusters ===" | |
| # List all kind clusters before cleanup | |
| echo "Current kind clusters:" | |
| kind get clusters || true | |
| # Delete workload cluster if it exists | |
| echo "Deleting workload cluster: ${CLUSTER_NAME}" | |
| kind delete cluster --name ${CLUSTER_NAME} || true | |
| # Delete management cluster | |
| echo "Deleting management cluster: capi-operator-smoke-test" | |
| kind delete cluster --name capi-operator-smoke-test || true | |
| # Verify all clusters are deleted | |
| echo "Remaining kind clusters:" | |
| kind get clusters || true |