From 0bd1909878c43e4e7836b45f708a465b23f8fb70 Mon Sep 17 00:00:00 2001 From: Salvador Fuentes Date: Wed, 21 Nov 2018 12:13:30 -0600 Subject: [PATCH 1/5] cri-o: modify crio.conf to match new format crio.conf has changed its format in the 1.12 version. Change our scripts to match this new format. Signed-off-by: Salvador Fuentes --- .ci/install_crio.sh | 40 ++++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/.ci/install_crio.sh b/.ci/install_crio.sh index 9ab628d2e..10923d2e3 100755 --- a/.ci/install_crio.sh +++ b/.ci/install_crio.sh @@ -5,7 +5,9 @@ # SPDX-License-Identifier: Apache-2.0 # -set -e +set -o errexit +set -o nounset +set -o pipefail cidir=$(dirname "$0") source "${cidir}/lib.sh" @@ -13,7 +15,9 @@ source /etc/os-release || source /usr/lib/os-release echo "Get CRI-O sources" kubernetes_sigs_org="github.com/kubernetes-sigs" +ghprbGhRepository="${ghprbGhRepository:-}" crio_repo="${kubernetes_sigs_org}/cri-o" + go get -d "$crio_repo" || true pushd "${GOPATH}/src/${crio_repo}" @@ -93,33 +97,25 @@ sudo -E install -D -m0755 runc "/usr/local/bin/crio-runc" popd crio_config_file="/etc/crio/crio.conf" -echo "Set runc as default runtime in CRI-O for trusted workloads" -sudo sed -i 's/^runtime =.*/runtime = "\/usr\/local\/bin\/crio-runc"/' "$crio_config_file" -echo "Change stream_port where cri-o will listen" -sudo sed -i 's/^stream_port.*/stream_port = "10020"/' "$crio_config_file" +echo "Set manage_network_ns_lifecycle to true" +network_ns_flag="manage_network_ns_lifecycle" +sudo sed -i "/\[crio.runtime\]/a$network_ns_flag = true" "$crio_config_file" echo "Add docker.io registry to pull images" -# Matches cri-o 1.9 file format -sudo sed -i 's/^registries = \[/registries = \[ "docker.io"/' "$crio_config_file" # Matches cri-o 1.10 file format +sudo sed -i 's/^registries = \[/registries = \[ "docker.io"/' "$crio_config_file" +# Matches cri-o 1.12 file format sudo sed -i 's/^#registries = \[/registries = \[ "docker.io" \] /' "$crio_config_file" -echo "Set manage_network_ns_lifecycle to true" -network_ns_flag="manage_network_ns_lifecycle" - -# Check if flag is already defined in the CRI-O config file. -# If it is already defined, then just change the value to true, -# else, add the flag with the value. -if grep "$network_ns_flag" "$crio_config_file"; then - sudo sed -i "s/^$network_ns_flag.*/$network_ns_flag = true/" "$crio_config_file" -else - sudo sed -i "/\[crio.runtime\]/a$network_ns_flag = true" "$crio_config_file" -fi +echo "Change stream_port where cri-o will listen" +sudo sed -i 's/^stream_port.*/stream_port = "10020"/' "$crio_config_file" -echo "Set Kata containers as default runtime in CRI-O for untrusted workloads" -sudo sed -i 's/default_workload_trust = "trusted"/default_workload_trust = "untrusted"/' "$crio_config_file" -sudo sed -i 's/runtime_untrusted_workload = ""/runtime_untrusted_workload = "\/usr\/local\/bin\/kata-runtime"/' "$crio_config_file" +echo "Configure runtimes for trusted/untrusted annotations" +sudo sed -i 's/^#* *runtime =.*/runtime = "\/usr\/local\/bin\/crio-runc"/' "$crio_config_file" +sudo sed -i 's/^default_runtime/# default_runtime/' "$crio_config_file" +sudo sed -i 's/^#*runtime_untrusted_workload = ""/runtime_untrusted_workload = "\/usr\/local\/bin\/kata-runtime"/' "$crio_config_file" +sudo sed -i 's/#*default_workload_trust = ""/default_workload_trust = "trusted"/' "$crio_config_file" service_path="/etc/systemd/system" crio_service_file="${cidir}/data/crio.service" @@ -127,7 +123,7 @@ crio_service_file="${cidir}/data/crio.service" echo "Install crio service (${crio_service_file})" sudo install -m0444 "${crio_service_file}" "${service_path}" -kubelet_service_dir="/etc/systemd/system/kubelet.service.d/" +kubelet_service_dir="${service_path}/kubelet.service.d/" sudo mkdir -p "${kubelet_service_dir}" From b0b57cf474321d7c39c54481cc4edf4d849d35d0 Mon Sep 17 00:00:00 2001 From: Salvador Fuentes Date: Wed, 21 Nov 2018 12:27:43 -0600 Subject: [PATCH 2/5] k8s: Update scripts for Kubernetes 1.12 Update init and cleanup scripts. - Use updated flannel config file, taken from the documentation documents, which is now downloaded from the coreos/flannel repository. - Use a yaml configuration file to initialize kubeadm. - Remove workaround to delete pods after the kubeadm reset. Fixes: #925. Fixes: #296. Signed-off-by: Salvador Fuentes --- integration/kubernetes/cleanup_env.sh | 33 ++++--- .../kubernetes/data/kube-flannel-rbac.yml | 40 -------- integration/kubernetes/data/kube-flannel.yml | 95 ------------------- integration/kubernetes/init.sh | 46 +++++---- integration/kubernetes/kubeadm/config.yaml | 12 +++ 5 files changed, 55 insertions(+), 171 deletions(-) delete mode 100644 integration/kubernetes/data/kube-flannel-rbac.yml delete mode 100644 integration/kubernetes/data/kube-flannel.yml create mode 100644 integration/kubernetes/kubeadm/config.yaml diff --git a/integration/kubernetes/cleanup_env.sh b/integration/kubernetes/cleanup_env.sh index e1dcf7363..61ab3c448 100755 --- a/integration/kubernetes/cleanup_env.sh +++ b/integration/kubernetes/cleanup_env.sh @@ -4,31 +4,34 @@ # # SPDX-License-Identifier: Apache-2.0 # +# This script is used to reset the kubernetes cluster SCRIPT_PATH=$(dirname "$(readlink -f "$0")") source "${SCRIPT_PATH}/../../lib/common.bash" -export KUBECONFIG=/etc/kubernetes/admin.conf -sudo -E kubeadm reset --cri-socket=/var/run/crio/crio.sock +cri_runtime="${CRI_RUNTIME:-crio}" + +case "${cri_runtime}" in +containerd) + cri_runtime_socket="/run/containerd/containerd.sock" + ;; +crio) + cri_runtime_socket="/var/run/crio/crio.sock" + ;; +*) + echo "Runtime ${cri_runtime} not supported" + ;; +esac -# Workaround to delete pods using crictl -# Needed until https://github.com/kubernetes/kubeadm/issues/748 -# gets fixed -for ctr in $(sudo crictl ps --quiet); do - sudo crictl stop "$ctr" - sudo crictl rm "$ctr" -done -for pod in $(sudo crictl pods --quiet); do - sudo crictl stopp "$pod" - sudo crictl rmp "$pod" -done +export KUBECONFIG=/etc/kubernetes/admin.conf +sudo -E kubeadm reset -f --cri-socket="${cri_runtime_socket}" -sudo systemctl stop crio +sudo systemctl stop "${cri_runtime}" sudo ip link set dev cni0 down sudo ip link set dev flannel.1 down sudo ip link del cni0 sudo ip link del flannel.1 -# Check no processes are left behind +# Check no kata processes are left behind after reseting kubernetes check_processes diff --git a/integration/kubernetes/data/kube-flannel-rbac.yml b/integration/kubernetes/data/kube-flannel-rbac.yml deleted file mode 100644 index 1d8fa74c8..000000000 --- a/integration/kubernetes/data/kube-flannel-rbac.yml +++ /dev/null @@ -1,40 +0,0 @@ -# This file was pulled from: -# https://github.com/coreos/flannel (HEAD at time of pull was 4973e02e539378) ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: flannel - namespace: kube-system diff --git a/integration/kubernetes/data/kube-flannel.yml b/integration/kubernetes/data/kube-flannel.yml deleted file mode 100644 index 118a9ba2e..000000000 --- a/integration/kubernetes/data/kube-flannel.yml +++ /dev/null @@ -1,95 +0,0 @@ -# This file was pulled from: -# https://github.com/coreos/flannel (HEAD at time of pull was 4973e02e539378) ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: flannel - namespace: kube-system ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: kube-flannel-cfg - namespace: kube-system - labels: - tier: node - app: flannel -data: - cni-conf.json: | - { - "name": "cbr0", - "type": "flannel", - "delegate": { - "isDefaultGateway": true - } - } - net-conf.json: | - { - "Network": "10.244.0.0/16", - "Backend": { - "Type": "vxlan" - } - } ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: kube-flannel-ds - namespace: kube-system - labels: - tier: node - app: flannel -spec: - template: - metadata: - labels: - tier: node - app: flannel - spec: - hostNetwork: true - nodeSelector: - beta.kubernetes.io/arch: amd64 - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - serviceAccountName: flannel - containers: - - name: kube-flannel - image: quay.io/coreos/flannel:v0.8.0-amd64 - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - - name: install-cni - image: quay.io/coreos/flannel:v0.8.0-amd64 - command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ] - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - - name: run - hostPath: - path: /run - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg diff --git a/integration/kubernetes/init.sh b/integration/kubernetes/init.sh index b677f099a..c1ad847ee 100755 --- a/integration/kubernetes/init.sh +++ b/integration/kubernetes/init.sh @@ -15,27 +15,19 @@ cri_runtime="${CRI_RUNTIME:-crio}" case "${cri_runtime}" in containerd) - cri_runtime_socket="/run/containerd/containerd.sock" - ;; + cri_runtime_socket="/run/containerd/containerd.sock" + ;; crio) - cri_runtime_socket="/var/run/crio/crio.sock" - ;; + cri_runtime_socket="/var/run/crio/crio.sock" + ;; *) - echo "Runtime ${cri_runtime} not supported" - - ;; + echo "Runtime ${cri_runtime} not supported" + ;; esac -# Check no processes are left behind +# Check no there are no kata processes from previous tests. check_processes -# The next workaround is to be able to communicate between pods -# Issue: https://github.com/kubernetes/kubernetes/issues/40182 -# Fix is ready for K8s 1.9, but still need to investigate why it does not -# work by default. -# FIXME: Issue: https://github.com/clearcontainers/tests/issues/934 -sudo iptables -P FORWARD ACCEPT - # Remove existing CNI configurations: sudo rm -rf /var/lib/cni/networks/* sudo rm -rf /etc/cni/net.d/* @@ -49,18 +41,30 @@ echo "Start ${cri_runtime} service" sudo systemctl start ${cri_runtime} echo "Init cluster using ${cri_runtime_socket}" -sudo -E kubeadm init --pod-network-cidr 10.244.0.0/16 --cri-socket="unix://${cri_runtime_socket}" +kubeadm_config_template="${SCRIPT_PATH}/kubeadm/config.yaml" +kubeadm_config_file="$(mktemp --tmpdir kubeadm_config.XXXXXX.yaml)" + +sed -e "s|CRI_RUNTIME_SOCKET|${cri_runtime_socket}|" "${kubeadm_config_template}" > "${kubeadm_config_file}" + +sudo -E kubeadm init --config "${kubeadm_config_file}" + export KUBECONFIG=/etc/kubernetes/admin.conf sudo -E kubectl get nodes sudo -E kubectl get pods -sudo -E kubectl create -f "${SCRIPT_PATH}/data/kube-flannel-rbac.yml" -sudo -E kubectl create --namespace kube-system -f "${SCRIPT_PATH}/data/kube-flannel.yml" -# The kube-dns pod usually takes around 30 seconds to get ready +# kube-flannel config file taken from k8s 1.12 documentation: +flannel_config="https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml" + +sudo -E kubectl apply -f "$flannel_config" + +# The kube-dns pod usually takes around 120 seconds to get ready # This instruction will wait until it is up and running, so we can # start creating our containers. -dns_wait_time=300 +dns_wait_time=120 sleep_time=5 -cmd="sudo -E kubectl get pods --all-namespaces | grep 'dns.*3/3.*Running'" +cmd="sudo -E kubectl get pods --all-namespaces | grep 'coredns.*1/1.*Running'" waitForProcess "$dns_wait_time" "$sleep_time" "$cmd" + +# Enable the master node to be able to schedule pods. +sudo -E kubectl taint nodes "$(hostname)" node-role.kubernetes.io/master:NoSchedule- diff --git a/integration/kubernetes/kubeadm/config.yaml b/integration/kubernetes/kubeadm/config.yaml new file mode 100644 index 000000000..b90559dfd --- /dev/null +++ b/integration/kubernetes/kubeadm/config.yaml @@ -0,0 +1,12 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +nodeRegistration: + criSocket: CRI_RUNTIME_SOCKET +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +kubernetesVersion: v1.12.0 +networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/12 From 0d2426c1852507266b9f010cd9166ef6b97b159a Mon Sep 17 00:00:00 2001 From: Salvador Fuentes Date: Wed, 21 Nov 2018 12:52:53 -0600 Subject: [PATCH 3/5] k8s: use annotation tags and new kubectl wait Add untrusted annotation tags to the pod configuration files and use the `kubectl wait` instead of our `waitForProcess` script to know if a pod is running. Depends-on: github.com/kata-containers/runtime#928 Signed-off-by: Salvador Fuentes --- integration/kubernetes/k8s-cpu-ns.bats | 14 ++--- integration/kubernetes/k8s-memory.bats | 18 +++--- integration/kubernetes/k8s-pid-ns.bats | 3 +- integration/kubernetes/k8s-uts+ipc-ns.bats | 57 ++++++++----------- integration/kubernetes/nginx.bats | 36 +++++------- .../busybox-pod.yaml} | 3 + .../untrusted_workloads/busybox-template.yaml | 15 +++++ .../untrusted_workloads/nginx-deployment.yaml | 22 +++++++ .../{ => untrusted_workloads}/pod-cpu.yaml | 3 + .../pod-memory-limit.yaml | 3 + 10 files changed, 98 insertions(+), 76 deletions(-) rename integration/kubernetes/{pod.yaml => untrusted_workloads/busybox-pod.yaml} (74%) create mode 100644 integration/kubernetes/untrusted_workloads/busybox-template.yaml create mode 100644 integration/kubernetes/untrusted_workloads/nginx-deployment.yaml rename integration/kubernetes/{ => untrusted_workloads}/pod-cpu.yaml (69%) rename integration/kubernetes/{ => untrusted_workloads}/pod-memory-limit.yaml (74%) diff --git a/integration/kubernetes/k8s-cpu-ns.bats b/integration/kubernetes/k8s-cpu-ns.bats index 5ebc92dcb..4028c235b 100644 --- a/integration/kubernetes/k8s-cpu-ns.bats +++ b/integration/kubernetes/k8s-cpu-ns.bats @@ -17,20 +17,18 @@ setup() { total_cpus=2 total_requests=512 total_cpu_container=1 + pod_config_dir="${BATS_TEST_DIRNAME}/untrusted_workloads" } @test "Check CPU constraints" { issue="https://github.com/kata-containers/tests/issues/794" [ "${CRI_RUNTIME}" == "containerd" ] && skip "test not working with ${CRI_RUNTIME} see: ${issue}" - wait_time=120 - sleep_time=5 # Create the pod - sudo -E kubectl create -f pod-cpu.yaml + sudo -E kubectl create -f "${pod_config_dir}/pod-cpu.yaml" # Check pod creation - pod_status_cmd="sudo -E kubectl get pods -a | grep $pod_name | grep Running" - waitForProcess "$wait_time" "$sleep_time" "$pod_status_cmd" + sudo -E kubectl wait --for=condition=Ready pod "$pod_name" # Check the total of cpus total_cpus_container=$(sudo -E kubectl exec $pod_name -c $container_name nproc) @@ -54,9 +52,5 @@ setup() { } teardown() { - sudo -E kubectl delete deployment "$pod_name" - # Wait for the pods to be deleted - cmd="sudo -E kubectl get pods | grep found." - waitForProcess "$wait_time" "$sleep_time" "$cmd" - sudo -E kubectl get pods + sudo -E kubectl delete pod "$pod_name" } diff --git a/integration/kubernetes/k8s-memory.bats b/integration/kubernetes/k8s-memory.bats index 32c811a0f..d1fe2e9ce 100644 --- a/integration/kubernetes/k8s-memory.bats +++ b/integration/kubernetes/k8s-memory.bats @@ -10,6 +10,7 @@ load "${BATS_TEST_DIRNAME}/../../.ci/lib.sh" setup() { export KUBECONFIG=/etc/kubernetes/admin.conf pod_name="memory-test" + pod_config_dir="${BATS_TEST_DIRNAME}/untrusted_workloads" } @test "Exceeding memory constraints" { @@ -19,33 +20,30 @@ setup() { sed \ -e "s/\${memory_size}/${memory_limit_size}/" \ -e "s/\${memory_allocated}/${allocated_size}/" \ - pod-memory-limit.yaml > test_exceed_memory.yaml + "${pod_config_dir}/pod-memory-limit.yaml" > "${pod_config_dir}/test_exceed_memory.yaml" # Create the pod exceeding memory constraints - run sudo -E kubectl create -f test_exceed_memory.yaml + run sudo -E kubectl create -f "${pod_config_dir}/test_exceed_memory.yaml" [ "$status" -ne 0 ] - rm -f test_exceed_memory.yaml + rm -f "${pod_config_dir}/test_exceed_memory.yaml" } @test "Running within memory constraints" { memory_limit_size="200Mi" allocated_size="100M" - wait_time=300 - sleep_time=5 # Create test .yaml sed \ -e "s/\${memory_size}/${memory_limit_size}/" \ -e "s/\${memory_allocated}/${allocated_size}/" \ - pod-memory-limit.yaml > test_within_memory.yaml + "${pod_config_dir}/pod-memory-limit.yaml" > "${pod_config_dir}/test_within_memory.yaml" # Create the pod within memory constraints - sudo -E kubectl create -f test_within_memory.yaml + sudo -E kubectl create -f "${pod_config_dir}/test_within_memory.yaml" # Check pod creation - pod_status_cmd="sudo -E kubectl get pods -a | grep $pod_name | grep Running" - waitForProcess "$wait_time" "$sleep_time" "$pod_status_cmd" + sudo -E kubectl wait --for=condition=Ready pod "$pod_name" - rm -f test_within_memory.yaml + rm -f "${pod_config_dir}/test_within_memory.yaml" sudo -E kubectl delete pod "$pod_name" } diff --git a/integration/kubernetes/k8s-pid-ns.bats b/integration/kubernetes/k8s-pid-ns.bats index 97cd3dd18..b307ee81b 100644 --- a/integration/kubernetes/k8s-pid-ns.bats +++ b/integration/kubernetes/k8s-pid-ns.bats @@ -13,6 +13,7 @@ setup() { pod_name="busybox" first_container_name="first-test-container" second_container_name="second-test-container" + pod_config_dir="${BATS_TEST_DIRNAME}/untrusted_workloads" } @test "Check PID namespaces" { @@ -21,7 +22,7 @@ setup() { sleep_time=5 # Create the pod - sudo -E kubectl create -f pod.yaml + sudo -E kubectl create -f "${pod_config_dir}/busybox-pod.yaml" # Check pod creation pod_status_cmd="sudo -E kubectl get pods -a | grep $pod_name | grep Running" diff --git a/integration/kubernetes/k8s-uts+ipc-ns.bats b/integration/kubernetes/k8s-uts+ipc-ns.bats index 81050df45..c27e94e93 100644 --- a/integration/kubernetes/k8s-uts+ipc-ns.bats +++ b/integration/kubernetes/k8s-uts+ipc-ns.bats @@ -12,49 +12,42 @@ setup() { export KUBECONFIG=/etc/kubernetes/admin.conf first_pod_name="first-test" second_pod_name="second-test" - sleep_cmd="sleep 30" - # Pull the images before launching workload. This is mainly because we use - # a timeout and in slow networks it may result in not been able to pull the image - # successfully. + # Pull the images before launching workload. sudo -E crictl pull "$busybox_image" + pod_config_dir="${BATS_TEST_DIRNAME}/untrusted_workloads" + + uts_cmd="ls -la /proc/self/ns/uts" + ipc_cmd="ls -la /proc/self/ns/ipc" } @test "Check UTS and IPC namespaces" { issue="https://github.com/kata-containers/tests/issues/793" [ "${CRI_RUNTIME}" == "containerd" ] && skip "test not working with ${CRI_RUNTIME} see: ${issue}" - wait_time=120 - sleep_time=5 # Run the first pod - sudo -E kubectl run $first_pod_name --image=$busybox_image -- sh -c "eval $sleep_cmd" - first_pod_status_cmd="sudo -E kubectl get pods -a | grep $first_pod_name | grep Running" - waitForProcess "$wait_time" "$sleep_time" "$first_pod_status_cmd" + first_pod_config=$(mktemp --tmpdir pod_config.XXXXXX.yaml) + cp "$pod_config_dir/busybox-template.yaml" "$first_pod_config" + sed -i "s/NAME/${first_pod_name}/" "$first_pod_config" + sudo -E kubectl create -f "$first_pod_config" + sudo -E kubectl wait --for=condition=Ready pod "$first_pod_name" + first_pod_uts_ns=$(sudo -E kubectl exec "$first_pod_name" -- sh -c "$uts_cmd" | grep uts | cut -d ':' -f3) + first_pod_ipc_ns=$(sudo -E kubectl exec "$first_pod_name" -- sh -c "$ipc_cmd" | grep ipc | cut -d ':' -f3) # Run the second pod - sudo -E kubectl run $second_pod_name --image=$busybox_image -- sh -c "eval $sleep_cmd" - second_pod_status_cmd="sudo -E kubectl get pods -a | grep $second_pod_name | grep Running" - waitForProcess "$wait_time" "$sleep_time" "$second_pod_status_cmd" - - # Check UTS namespace - uts_cmd="ls -la /proc/self/ns/uts" - first_complete_pod_name=$(sudo -E kubectl get pods | grep "$first_pod_name" | cut -d ' ' -f1) - second_complete_pod_name=$(sudo -E kubectl get pods | grep "$second_pod_name" | cut -d ' ' -f1) - first_pod_uts_namespace=$(sudo -E kubectl exec "$first_complete_pod_name" -- sh -c "$uts_cmd" | grep uts | cut -d ':' -f3) - second_pod_uts_namespace=$(sudo -E kubectl exec "$second_complete_pod_name" -- sh -c "$uts_cmd" | grep uts | cut -d ':' -f3) - [ "$first_pod_uts_namespace" == "$second_pod_uts_namespace" ] - - # Check IPC namespace - ipc_cmd="ls -la /proc/self/ns/ipc" - first_pod_ipc_namespace=$(sudo -E kubectl exec "$first_complete_pod_name" -- sh -c "$ipc_cmd" | grep ipc | cut -d ':' -f3) - second_pod_ipc_namespace=$(sudo -E kubectl exec "$second_complete_pod_name" -- sh -c "$ipc_cmd" | grep ipc | cut -d ':' -f3) - [ "$first_pod_ipc_namespace" == "$second_pod_ipc_namespace" ] + second_pod_config=$(mktemp --tmpdir pod_config.XXXXXX.yaml) + cp "$pod_config_dir/busybox-template.yaml" "$second_pod_config" + sed -i "s/NAME/${second_pod_name}/" "$second_pod_config" + sudo -E kubectl create -f "$second_pod_config" + sudo -E kubectl wait --for=condition=Ready pod "$second_pod_name" + second_pod_uts_ns=$(sudo -E kubectl exec "$second_pod_name" -- sh -c "$uts_cmd" | grep uts | cut -d ':' -f3) + second_pod_ipc_ns=$(sudo -E kubectl exec "$second_pod_name" -- sh -c "$ipc_cmd" | grep ipc | cut -d ':' -f3) + + # Check UTS and IPC namespaces + [ "$first_pod_uts_ns" == "$second_pod_uts_ns" ] + [ "$first_pod_ipc_ns" == "$second_pod_ipc_ns" ] } teardown() { - sudo -E kubectl delete deployment "$first_pod_name" - sudo -E kubectl delete deployment "$second_pod_name" - # Wait for the pods to be deleted - cmd="sudo -E kubectl get pods | grep found." - waitForProcess "$wait_time" "$sleep_time" "$cmd" - sudo -E kubectl get pods + sudo -E kubectl delete pod "$first_pod_name" + sudo -E kubectl delete pod "$second_pod_name" } diff --git a/integration/kubernetes/nginx.bats b/integration/kubernetes/nginx.bats index 611a8e68f..21577f851 100644 --- a/integration/kubernetes/nginx.bats +++ b/integration/kubernetes/nginx.bats @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bats # # Copyright (c) 2018 Intel Corporation # @@ -12,30 +12,24 @@ setup() { nginx_version=$("${GOPATH}/bin/yq" read "$versions_file" "docker_images.nginx.version") nginx_image="nginx:$nginx_version" busybox_image="busybox" - service_name="nginx-service" + deployment="nginx-deployment" export KUBECONFIG=/etc/kubernetes/admin.conf - master=$(hostname) - sudo -E kubectl taint nodes "$master" node-role.kubernetes.io/master:NoSchedule- - # Pull the images before launching workload. This is mainly because we use - # a timeout and in slow networks it may result in not been able to pull the image - # successfully. + # Pull the images before launching workload. sudo -E crictl pull "$busybox_image" sudo -E crictl pull "$nginx_image" + pod_config_dir="${BATS_TEST_DIRNAME}/untrusted_workloads" } @test "Verify nginx connectivity between pods" { - wait_time=120 - sleep_time=5 - cmd="sudo -E kubectl get pods | grep $service_name | grep Running" - sudo -E kubectl run "$service_name" --image="$nginx_image" --replicas=2 - sudo -E kubectl expose deployment "$service_name" --port=80 - sudo -E kubectl get svc,pod - # Wait for nginx service to come up - waitForProcess "$wait_time" "$sleep_time" "$cmd" - sudo -E kubectl describe service "$service_name" + wait_time=30 + sleep_time=3 + sudo -E kubectl create -f "${pod_config_dir}/${deployment}.yaml" + sudo -E kubectl wait --for=condition=Available deployment/${deployment} + sudo -E kubectl expose deployment/${deployment} + busybox_pod="test-nginx" sudo -E kubectl run $busybox_pod --restart=Never --image="$busybox_image" \ - -- wget --timeout=5 "$service_name" + -- wget --timeout=5 "$deployment" cmd="sudo -E kubectl get pods -a | grep $busybox_pod | grep Completed" waitForProcess "$wait_time" "$sleep_time" "$cmd" sudo -E kubectl logs "$busybox_pod" | grep "index.html" @@ -43,11 +37,7 @@ setup() { } teardown() { - sudo -E kubectl delete deployment "$service_name" - sudo -E kubectl delete service "$service_name" + sudo -E kubectl delete deployment "$deployment" + sudo -E kubectl delete service "$deployment" sudo -E kubectl delete pod "$busybox_pod" - # Wait for the pods to be deleted - cmd="sudo -E kubectl get pods | grep found." - waitForProcess "$wait_time" "$sleep_time" "$cmd" - sudo -E kubectl get pods } diff --git a/integration/kubernetes/pod.yaml b/integration/kubernetes/untrusted_workloads/busybox-pod.yaml similarity index 74% rename from integration/kubernetes/pod.yaml rename to integration/kubernetes/untrusted_workloads/busybox-pod.yaml index e96851b66..713446bf2 100644 --- a/integration/kubernetes/pod.yaml +++ b/integration/kubernetes/untrusted_workloads/busybox-pod.yaml @@ -2,6 +2,9 @@ apiVersion: v1 kind: Pod metadata: name: busybox + annotations: + io.kubernetes.cri-o.TrustedSandbox: "false" + io.kubernetes.cri.untrusted-workload: "true" spec: shareProcessNamespace: true containers: diff --git a/integration/kubernetes/untrusted_workloads/busybox-template.yaml b/integration/kubernetes/untrusted_workloads/busybox-template.yaml new file mode 100644 index 000000000..f95af0b1a --- /dev/null +++ b/integration/kubernetes/untrusted_workloads/busybox-template.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: NAME + annotations: + io.kubernetes.cri-o.TrustedSandbox: "false" + io.kubernetes.cri.untrusted-workload: "true" +spec: + shareProcessNamespace: true + containers: + - name: busybox + image: busybox + command: + - sleep + - "120" diff --git a/integration/kubernetes/untrusted_workloads/nginx-deployment.yaml b/integration/kubernetes/untrusted_workloads/nginx-deployment.yaml new file mode 100644 index 000000000..492e49684 --- /dev/null +++ b/integration/kubernetes/untrusted_workloads/nginx-deployment.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 2 + template: + metadata: + labels: + app: nginx + annotations: + io.kubernetes.cri-o.TrustedSandbox: "false" + io.kubernetes.cri.untrusted-workload: "true" + spec: + containers: + - name: nginx + image: nginx:1.14 + ports: + - containerPort: 80 diff --git a/integration/kubernetes/pod-cpu.yaml b/integration/kubernetes/untrusted_workloads/pod-cpu.yaml similarity index 69% rename from integration/kubernetes/pod-cpu.yaml rename to integration/kubernetes/untrusted_workloads/pod-cpu.yaml index 2178cc3d1..0c90db792 100644 --- a/integration/kubernetes/pod-cpu.yaml +++ b/integration/kubernetes/untrusted_workloads/pod-cpu.yaml @@ -2,6 +2,9 @@ apiVersion: v1 kind: Pod metadata: name: constraints-cpu-test + annotations: + io.kubernetes.cri-o.TrustedSandbox: "false" + io.kubernetes.cri.untrusted-workload: "true" spec: containers: - name: first-cpu-container diff --git a/integration/kubernetes/pod-memory-limit.yaml b/integration/kubernetes/untrusted_workloads/pod-memory-limit.yaml similarity index 74% rename from integration/kubernetes/pod-memory-limit.yaml rename to integration/kubernetes/untrusted_workloads/pod-memory-limit.yaml index 92d44da2b..6fc0e62ec 100644 --- a/integration/kubernetes/pod-memory-limit.yaml +++ b/integration/kubernetes/untrusted_workloads/pod-memory-limit.yaml @@ -2,6 +2,9 @@ apiVersion: v1 kind: Pod metadata: name: memory-test + annotations: + io.kubernetes.cri-o.TrustedSandbox: "false" + io.kubernetes.cri.untrusted-workload: "true" spec: containers: - name: memory-test-ctr From 24bd3273f18c758f0262bdc8cc7b48cfe3df0c73 Mon Sep 17 00:00:00 2001 From: Salvador Fuentes Date: Thu, 22 Nov 2018 09:57:49 -0600 Subject: [PATCH 4/5] k8s+containerd: remove skip on test The tests that verify the uts and ipc namespaces and the CPU test using containerd work well on k8s 1.12. Fixes: #793. Fixes: #794. Signed-off-by: Salvador Fuentes --- integration/kubernetes/k8s-cpu-ns.bats | 3 --- integration/kubernetes/k8s-uts+ipc-ns.bats | 3 --- 2 files changed, 6 deletions(-) diff --git a/integration/kubernetes/k8s-cpu-ns.bats b/integration/kubernetes/k8s-cpu-ns.bats index 4028c235b..158529a56 100644 --- a/integration/kubernetes/k8s-cpu-ns.bats +++ b/integration/kubernetes/k8s-cpu-ns.bats @@ -21,9 +21,6 @@ setup() { } @test "Check CPU constraints" { - issue="https://github.com/kata-containers/tests/issues/794" - [ "${CRI_RUNTIME}" == "containerd" ] && skip "test not working with ${CRI_RUNTIME} see: ${issue}" - # Create the pod sudo -E kubectl create -f "${pod_config_dir}/pod-cpu.yaml" diff --git a/integration/kubernetes/k8s-uts+ipc-ns.bats b/integration/kubernetes/k8s-uts+ipc-ns.bats index c27e94e93..9e54428ae 100644 --- a/integration/kubernetes/k8s-uts+ipc-ns.bats +++ b/integration/kubernetes/k8s-uts+ipc-ns.bats @@ -21,9 +21,6 @@ setup() { } @test "Check UTS and IPC namespaces" { - issue="https://github.com/kata-containers/tests/issues/793" - [ "${CRI_RUNTIME}" == "containerd" ] && skip "test not working with ${CRI_RUNTIME} see: ${issue}" - # Run the first pod first_pod_config=$(mktemp --tmpdir pod_config.XXXXXX.yaml) cp "$pod_config_dir/busybox-template.yaml" "$first_pod_config" From 4ff6bc68f8f83dbf0554f46f5235aff29e5c4f2f Mon Sep 17 00:00:00 2001 From: Salvador Fuentes Date: Thu, 22 Nov 2018 12:56:53 -0600 Subject: [PATCH 5/5] k8s: remove kubernetes packages on cleanup Remove kubernetes packages and configuration when running on baremetal machines. Signed-off-by: Salvador Fuentes --- .ci/lib.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.ci/lib.sh b/.ci/lib.sh index 664782797..4c6f2646a 100755 --- a/.ci/lib.sh +++ b/.ci/lib.sh @@ -255,5 +255,10 @@ gen_clean_arch() { delete_stale_kata_resource info "Remove installed kata packages" ${GOPATH}/src/${tests_repo}/cmd/kata-manager/kata-manager.sh remove-packages + info "Remove installed kubernetes packages and configuration" + if [ "$ID" == ubuntu ]; then + sudo rm -rf /etc/systemd/system/kubelet.service.d + sudo apt-get purge kubeadm kubelet kubectl -y + fi }