Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tets PR for Integration test #150

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 98 additions & 0 deletions .github/workflows/basic-ci.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
name: Harvester-Network-Controller CI

# synchronize for pull request update
on:
push:
branches:
- master
- 'v**'
pull_request:
types: [opened, reopened, synchronize]

env:
LIBVIRT_DEFAULT_URI: "qemu:///system"
VM_DEPLOYED: false

jobs:
validation:
runs-on:
- self-hosted
- golang
steps:
- name: "Clone and check"
uses: actions/checkout@v3
- name: "Run validations"
run: |
make validate
job-new-installation:
needs: validation
runs-on:
- self-hosted
- golang
steps:
- name: "Clone and check"
uses: actions/checkout@v3
- name: "Build the Image for the Integration Test"
run: |
BUILD_FOR_CI=true make
./ci/scripts/patch-ttl-repo.sh
echo "NCH override result as below:"
cat ci/charts/nch-override.yaml
- name: "Local Deployment (Harvester+NetworkController) for testing"
id: vm_deploy
run: |
rm -rf nch-new-vagrant-k3s
git clone https://github.com/bk201/vagrant-k3s nch-new-vagrant-k3s
pushd nch-new-vagrant-k3s
yq e -i ".cluster_size = 1" settings.yaml
./new-cluster.sh
echo "VM_DEPLOYED=true" >> "$GITHUB_ENV"
yq e -i ".longhorn_version = \"1.7.1\"" settings.yaml
./scripts/deploy_longhorn.sh
popd
- name: "Patch Image target"
run: |
./ci/scripts/patch-ttl-repo.sh
echo "NCH override result as below:"
cat ci/charts/nch-override.yaml
- name: "Deploy NCH"
run: |
pushd nch-new-vagrant-k3s
cp ../ci/scripts/deploy_nch_current.sh ./deploy_nch_current.sh
cp ../ci/charts/nch-override.yaml ./nch-override.yaml
./deploy_nch_current.sh
popd
- name: "Add disk"
run: |
pushd nch-new-vagrant-k3s
./scripts/attach-disk.sh node1 nch-new-vagrant-k3s
sleep 30
popd
- name: "Run Basic Test"
id: basic-test
run: |
pushd nch-new-vagrant-k3s
vagrant ssh-config node1 > ../ssh-config
cp kubeconfig ../kubeconfig
popd
echo Running integration tests
NCH_HOME=`pwd` go test -v ./tests/...
- name: "Get NCH logs"
if: always()
run: |
if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then
echo "VM is not deployed, skip getting logs"
exit 0
fi
./ci/scripts/get-debug-info.sh
- name: "Tear Down / Cleanup"
if: always()
run: |
if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then
echo "VM is not deployed, skip VM destroy"
exit 0
fi
rm -rf /tmp/network-controller/nch-new-vagrant-k3s
pushd nch-new-vagrant-k3s
vagrant destroy -f --parallel
popd
17 changes: 17 additions & 0 deletions ci/charts/nch-override.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
image:
repository: rancher/harvester-network-controller
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""

webhook:
repository: rancher/harvester-network-webhook
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""

autoProvisionFilter: [/dev/sd*]
debug: true

# we only manually inject udev monitor error once, so we can test it in CI.
injectUdevMonitorError: true
68 changes: 68 additions & 0 deletions ci/scripts/deploy_nch_chart.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
#!/bin/bash -e

TOP_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/" &> /dev/null && pwd )"

ensure_command() {
local cmd=$1
if ! which $cmd &> /dev/null; then
echo 1
return
fi
echo 0
}

ensure_network_controller_ready() {
while [ true ]; do
running_num=$(kubectl get pods -n harvester-system |grep ^network-controller |grep Running |awk '{print $3}' |wc -l)
if [[ $running_num -eq ${cluster_nodes} ]]; then
echo "network-controller pods are ready!"
break
fi
echo "check network-controller pods failure."
exit 1
done
}

if [ ! -f $TOP_DIR/kubeconfig ]; then
echo "kubeconfig does not exist. Please create cluster first."
echo "Maybe try new_cluster.sh"
exit 1
fi
echo $TOP_DIR/kubeconfig
export KUBECONFIG=$TOP_DIR/kubeconfig

if [[ $(ensure_command helm) -eq 1 ]]; then
echo "no helm, try to curl..."
curl -O https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz
tar -zxvf helm-v3.9.4-linux-amd64.tar.gz
HELM=$TOP_DIR/linux-amd64/helm
$HELM version
else
echo "Get helm, version info as below"
HELM=$(which helm)
$HELM version
fi

pushd $TOP_DIR

cat >> nch-override.yaml.default << 'EOF'
autoProvisionFilter: [/dev/sd*]
EOF

if [ ! -f nch-override.yaml ]; then
mv nch-override.yaml.default nch-override.yaml
fi

#$HELM pull harvester-network-controller --repo https://charts.harvesterhci.io --untar
#$HELM install -f $TOP_DIR/nch-override.yaml harvester-network-controller ./harvester-network-controller --create-namespace -n harvester-system

$HELM repo add harvester https://charts.harvesterhci.io
$HELM repo update
kubectl apply -f https://raw.githubusercontent.com/harvester/network-controller-harvester/master/manifests/dependency_crds
$HELM install harvester-network-controller harvester/harvester-network-controller

cluster_nodes=$(yq -e e '.cluster_size' $TOP_DIR/settings.yaml)
echo "cluster nodes: $cluster_nodes"
ensure_network_controller_ready

popd
65 changes: 65 additions & 0 deletions ci/scripts/deploy_nch_current.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
#!/bin/bash -e

TOP_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/" &> /dev/null && pwd )"

ensure_command() {
local cmd=$1
if ! which $cmd &> /dev/null; then
echo 1
return
fi
echo 0
}

ensure_network_controller_ready() {
while [ true ]; do
running_num=$(kubectl get pods -n harvester-system |grep ^network-controller |grep Running |awk '{print $3}' |wc -l)
if [[ $running_num -eq ${cluster_nodes} ]]; then
echo "network-controller pods are ready!"
break
fi
echo "check network-controller failure."
exit 1
done
}

if [ ! -f $TOP_DIR/kubeconfig ]; then
echo "kubeconfig does not exist. Please create cluster first."
echo "Maybe try new_cluster.sh"
exit 1
fi
echo $TOP_DIR/kubeconfig
export KUBECONFIG=$TOP_DIR/kubeconfig

if [[ $(ensure_command helm) -eq 1 ]]; then
echo "no helm, try to curl..."
curl -O https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz
tar -zxvf helm-v3.9.4-linux-amd64.tar.gz
HELM=$TOP_DIR/linux-amd64/helm
$HELM version
else
echo "Get helm, version info as below"
HELM=$(which helm)
$HELM version
fi

cluster_nodes=$(yq -e e '.cluster_size' $TOP_DIR/settings.yaml)
echo "cluster nodes: $cluster_nodes"
ensure_network_controller_ready

pushd $TOP_DIR
cat >> nch-override.yaml.default << 'EOF'
autoProvisionFilter: [/dev/sd*]
EOF

if [ ! -f nch-override.yaml ]; then
mv nch-override.yaml.default nch-override.yaml
fi

cp -r ../deploy/charts/harvester-node-disk-manager harvester-network-cntroller

target_img=$(yq -e .image.repository nch-override.yaml)
echo "install target image: ${target_img}"
$HELM install -f $TOP_DIR/nch-override.yaml harvester-network-cntroller ./harvester-network-cntroller --create-namespace -n harvester-system

popd
10 changes: 10 additions & 0 deletions ci/scripts/get-debug-info.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/bin/bash -e

TARGETNODE="node1"

export KUBECONFIG=kubeconfig

NDMPOD=$(kubectl get pods -n harvester-system --field-selector spec.nodeName=$TARGETNODE | grep ^harvester-network-cntroller |grep -v webhook |awk '{print $1}')

# filter out the redundant Skip log
kubectl logs $NDMPOD -n harvester-system |grep -v Skip
10 changes: 10 additions & 0 deletions ci/scripts/patch-ttl-repo.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/bin/bash -e

COMMIT=$(git rev-parse --short HEAD)
IMAGE=ttl.sh/network-controller-harvester-${COMMIT}
IMAGE_WEBHOOK=ttl.sh/node-disk-manager-webhook-${COMMIT}

yq e -i ".image.repository = \"${IMAGE}\"" ci/charts/nch-override.yaml
yq e -i ".image.tag = \"1h\"" ci/charts/nch-override.yaml
yq e -i ".webhook.image.repository = \"${IMAGE_WEBHOOK}\"" ci/charts/nch-override.yaml
yq e -i ".webhook.image.tag = \"1h\"" ci/charts/nch-override.yaml
62 changes: 62 additions & 0 deletions ci/scripts/upgrade_nch.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
#!/bin/bash -e

TOP_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/" &> /dev/null && pwd )"

ensure_command() {
local cmd=$1
if ! which $cmd &> /dev/null; then
echo 1
return
fi
echo 0
}

if [ ! -f $TOP_DIR/kubeconfig ]; then
echo "kubeconfig does not exist. Please create cluster first."
echo "Maybe try new_cluster.sh"
exit 1
fi
echo $TOP_DIR/kubeconfig
export KUBECONFIG=$TOP_DIR/kubeconfig

if [[ $(ensure_command helm) -eq 1 ]]; then
echo "no helm, try to curl..."
curl -O https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz
tar -zxvf helm-v3.9.4-linux-amd64.tar.gz
HELM=$TOP_DIR/linux-amd64/helm
$HELM version
else
echo "Get helm, version info as below"
HELM=$(which helm)
$HELM version
fi

cluster_nodes=$(yq -e e '.cluster_size' $TOP_DIR/settings.yaml)
echo "cluster nodes: $cluster_nodes"

pushd $TOP_DIR
# cleanup first
rm -rf harvester-node-disk-manager*
rm -rf ndm-override.yaml

cp -r ../deploy/charts/harvester-node-disk-manager harvester-node-disk-manager
cp ../ci/charts/ndm-override.yaml ndm-override.yaml

target_img=$(yq -e .image.repository ndm-override.yaml)
echo "upgrade target image: ${target_img}, upgrading ..."
$HELM upgrade -f $TOP_DIR/ndm-override.yaml harvester-node-disk-manager harvester-node-disk-manager/ -n harvester-system

sleep 30 # wait 30 seconds for ndm respawn pods

# check image
pod_name=$(kubectl get pods -n harvester-system |grep Running |grep -v webhook |grep ^harvester-node-disk-manager|head -n1 |awk '{print $1}')
container_img=$(kubectl get pods ${pod_name} -n harvester-system -o yaml |yq -e .spec.containers[0].image |tr ":" \n)
yaml_img=$(yq -e .image.repository ndm-override.yaml)
if grep -q ${yaml_img} <<< ${container_img}; then
echo "Image is equal: ${yaml_img}"
else
echo "Image is non-equal, container: ${container_img}, yaml file: ${yaml_img}"
exit 1
fi
echo "harvester-node-disk-manager upgrade successfully!"
popd
Loading