Skip to content

Commit c86bc81

Browse files
authored
Merge pull request #95 from rancher/dpock/repo-updates
[main] helm-project-operator and helm-locker repo reorg updates
2 parents 72b296e + a97f78f commit c86bc81

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

59 files changed

+1682
-116
lines changed

.github/scripts/branch-tags.sh

+55
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
#!/bin/bash
2+
3+
# Exit immediately if a command exits with a non-zero status
4+
set -e
5+
6+
# Function to get the previous tag
7+
getPreviousTag() {
8+
local tagPrefix="$1"
9+
# List all tags and filter ones that start with tagPrefix, sort by creation date
10+
git tag --sort=-creatordate | grep "^${tagPrefix}" | head -n 1
11+
}
12+
13+
# Determine if we're in a GitHub Actions environment
14+
if [ -n "$GITHUB_REF" ] && [ -n "$GITHUB_SHA" ]; then
15+
# Use GHA environment variables
16+
ref="$GITHUB_REF"
17+
commitSha="${GITHUB_SHA:0:7}"
18+
else
19+
# Fallback to local Git repo
20+
if [ ! -d ".git" ]; then
21+
echo "This script must be run from the root of a Git repository or GitHub Actions."
22+
exit 1
23+
fi
24+
ref=$(git symbolic-ref HEAD)
25+
commitSha=$(git rev-parse --short HEAD)
26+
fi
27+
28+
branchTag=""
29+
branchStaticTag=""
30+
prevTag=""
31+
32+
if [ "$ref" == "refs/heads/main" ]; then
33+
branchTag="head"
34+
branchStaticTag="main-${commitSha}"
35+
prevTag=$(getPreviousTag "main-")
36+
elif [[ "$ref" == refs/heads/release/* ]]; then
37+
version="${ref#refs/heads/release/}" # Extract "vX.0"
38+
branchTag="${version}-head"
39+
branchStaticTag="${version}-head-${commitSha}"
40+
prevTag=$(getPreviousTag "${version}-head-")
41+
else
42+
gitTag=$(git tag -l --contains HEAD | head -n 1)
43+
if [[ -n "$gitTag" ]]; then
44+
branchTag="${gitTag}"
45+
branchStaticTag="${gitTag}-${commitSha}"
46+
else
47+
branchTag="dev-${commitSha}"
48+
branchStaticTag="dev-${commitSha}"
49+
fi
50+
fi
51+
52+
# Output the results
53+
echo "branch_tag=${branchTag}"
54+
echo "branch_static_tag=${branchStaticTag}"
55+
echo "prev_static_tag=${prevTag}"

.github/workflows/e2e-ci.yaml

+63-49
Original file line numberDiff line numberDiff line change
@@ -32,23 +32,41 @@ env:
3232
YQ_VERSION: v4.25.1
3333
E2E_CI: true
3434
REPO: rancher
35-
TAG: dev
3635
APISERVER_PORT: 8001
3736
DEFAULT_SLEEP_TIMEOUT_SECONDS: 10
3837
KUBECTL_WAIT_TIMEOUT: 300s
3938
DEBUG: ${{ github.event.inputs.debug || false }}
39+
CLUSTER_NAME: 'e2e-ci-prometheus-federator'
4040

4141
permissions:
4242
contents: write
4343

4444
jobs:
45+
prebuild-env:
46+
name: Prebuild needed Env vars
47+
runs-on: ubuntu-latest
48+
steps:
49+
- name: Check out the repository to the runner
50+
uses: actions/checkout@v4
51+
- name: Set Branch Tag and Other Variables
52+
id: set-vars
53+
run: bash ./.github/scripts/branch-tags.sh >> $GITHUB_OUTPUT
54+
outputs:
55+
branch_tag: ${{ steps.set-vars.outputs.branch_tag }}
56+
branch_static_tag: ${{ steps.set-vars.outputs.branch_static_tag }}
57+
prev_tag: ${{ steps.set-vars.outputs.prev_tag }}
4558
e2e-prometheus-federator:
59+
needs: [
60+
prebuild-env,
61+
]
4662
runs-on: ubuntu-latest
63+
env:
64+
TAG: ${{ needs.prebuild-env.outputs.branch_static_tag }}
4765
strategy:
4866
matrix:
4967
k3s_version:
5068
# k3d version list k3s | sed 's/+/-/' | sort -h
51-
- ${{ github.event.inputs.k3s_version || 'v1.28.4-k3s2' }}
69+
- ${{ github.event.inputs.k3s_version || 'v1.28.14-k3s1' }}
5270
steps:
5371
-
5472
uses: actions/checkout@v3
@@ -66,28 +84,24 @@ jobs:
6684
run: |
6785
sudo wget https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq;
6886
-
69-
name: Perform CI
87+
name: Perform pre-e2e image build
7088
run: |
71-
REPO=${REPO} TAG=${TAG} ./scripts/build;
72-
REPO=${REPO} TAG=${TAG} ./scripts/package;
89+
EMBEDED_CHART_VERSION=0.3.4 REPO=${REPO} TAG=${TAG} make build;
90+
REPO=${REPO} TAG=${TAG} make package;
7391
-
74-
name: Provision k3d Cluster
75-
uses: AbsaOSS/k3d-action@v2
76-
# k3d will automatically create a network named k3d-test-cluster-1 with the range 172.18.0.0/16
77-
with:
78-
cluster-name: "e2e-ci-prometheus-federator"
79-
args: >-
80-
--agents 1
81-
--network "nw01"
82-
--image docker.io/rancher/k3s:${{matrix.k3s_version}}
92+
name : Install k3d
93+
run : ./.github/workflows/e2e/scripts/install-k3d.sh
94+
-
95+
name : Setup k3d cluster
96+
run : K3S_VERSION=${{ matrix.k3s_version }} ./.github/workflows/e2e/scripts/setup-cluster.sh
8397
-
8498
name: Import Images Into k3d
8599
run: |
86-
k3d image import ${REPO}/prometheus-federator:${TAG} -c e2e-ci-prometheus-federator;
100+
k3d image import ${REPO}/prometheus-federator:${TAG} -c $CLUSTER_NAME;
87101
-
88102
name: Setup kubectl context
89103
run: |
90-
kubectl config use-context k3d-e2e-ci-prometheus-federator;
104+
kubectl config use-context "k3d-$CLUSTER_NAME";
91105
-
92106
name: Set Up Tmate Debug Session
93107
if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.enable_tmate == 'true' }}
@@ -110,39 +124,39 @@ jobs:
110124
-
111125
name: Check if Project Registration Namespace is auto-created on namespace detection
112126
run: ./.github/workflows/e2e/scripts/create-project-namespace.sh;
113-
114-
# Commenting out for failure in CI but not locally
115-
# -
116-
# name: Create Project Monitoring Stack via ProjectHelmChart CR
117-
# run: ./.github/workflows/e2e/scripts/create-projecthelmchart.sh;
118-
# -
119-
# name: Check if the Project Prometheus Stack is up
120-
# run: ./.github/workflows/e2e/scripts/validate-project-monitoring.sh;
121-
# -
122-
# name: Wait for 8 minutes for enough scraping to be done to continue
123-
# run: |
124-
# for i in {1..48}; do sleep 10; echo "Waited $((i*10)) seconds for metrics to be populated"...; done;
125-
# -
126-
# name: Validate Project Prometheus Targets
127-
# run: ./.github/workflows/e2e/scripts/validate-project-prometheus-targets.sh;
128-
# -
129-
# name: Validate Project Grafana Datasources
130-
# run: ./.github/workflows/e2e/scripts/validate-project-grafana-datasource.sh;
131-
# -
132-
# name: Validate Project Grafana Dashboards
133-
# run: ./.github/workflows/e2e/scripts/validate-project-grafana-dashboards.sh;
134-
# #-
135-
# #name: Validate Project Grafana Dashboard Data
136-
# #run: ./.github/workflows/e2e/scripts/validate-project-grafana-dashboard-data.sh;
137-
# -
138-
# name: Validate Project Prometheus Alerts
139-
# run: ./.github/workflows/e2e/scripts/validate-project-prometheus-alerts.sh;
140-
# -
141-
# name: Validate Project Alertmanager
142-
# run: ./.github/workflows/e2e/scripts/validate-project-alertmanager.sh;
143-
# -
144-
# name: Delete Project Prometheus Stack
145-
# run: ./.github/workflows/e2e/scripts/delete-projecthelmchart.sh;
127+
-
128+
name: Create Project Monitoring Stack via ProjectHelmChart CR
129+
run: DEFAULT_SLEEP_TIMEOUT_SECONDS=20 ./.github/workflows/e2e/scripts/create-projecthelmchart.sh;
130+
-
131+
name: Check if the Project Prometheus Stack is up
132+
run: ./.github/workflows/e2e/scripts/validate-project-monitoring.sh;
133+
-
134+
name: Wait for 8 minutes for enough scraping to be done to continue
135+
run: |
136+
for i in {1..48}; do sleep 10; echo "Waited $((i*10)) seconds for metrics to be populated"...; done;
137+
-
138+
name: Validate Project Prometheus Targets
139+
run: ./.github/workflows/e2e/scripts/validate-project-prometheus-targets.sh;
140+
-
141+
name: Validate Project Grafana Datasources
142+
run: ./.github/workflows/e2e/scripts/validate-project-grafana-datasource.sh;
143+
-
144+
name: Validate Project Grafana Dashboards
145+
run: ./.github/workflows/e2e/scripts/validate-project-grafana-dashboards.sh;
146+
# Re-disable this as it's been broken since Jun 28, 2023
147+
# More context: https://github.com/rancher/prometheus-federator/pull/73
148+
# -
149+
# name: Validate Project Grafana Dashboard Data
150+
# run: ./.github/workflows/e2e/scripts/validate-project-grafana-dashboard-data.sh;
151+
-
152+
name: Validate Project Prometheus Alerts
153+
run: ./.github/workflows/e2e/scripts/validate-project-prometheus-alerts.sh;
154+
-
155+
name: Validate Project Alertmanager
156+
run: ./.github/workflows/e2e/scripts/validate-project-alertmanager.sh;
157+
-
158+
name: Delete Project Prometheus Stack
159+
run: ./.github/workflows/e2e/scripts/delete-projecthelmchart.sh;
146160
-
147161
name: Uninstall Prometheus Federator
148162
run: ./.github/workflows/e2e/scripts/uninstall-federator.sh;

.github/workflows/e2e/scripts/create-project-namespace.sh

+7-2
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,13 @@ source $(dirname $0)/entry
66

77
cd $(dirname $0)/../../../..
88

9-
kubectl create namespace e2e-prometheus-federator || true
10-
kubectl label namespace e2e-prometheus-federator field.cattle.io/projectId=p-example --overwrite
9+
USE_RANCHER=${USE_RANCHER:-"false"}
10+
if [ "$USE_RANCHER" = "true" ]; then
11+
kubectl apply -f ./examples/ci/project.yaml
12+
fi
13+
14+
kubectl apply -f ./examples/ci/namespace.yaml
15+
1116
sleep "${DEFAULT_SLEEP_TIMEOUT_SECONDS}"
1217
if ! kubectl get namespace cattle-project-p-example; then
1318
echo "ERROR: Expected cattle-project-p-example namespace to exist after ${DEFAULT_SLEEP_TIMEOUT_SECONDS} seconds, not found"

.github/workflows/e2e/scripts/create-projecthelmchart.sh

+3-3
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,14 @@ source $(dirname $0)/entry
77
cd $(dirname $0)/../../../..
88

99
if [[ "${E2E_CI}" == "true" ]]; then
10-
kubectl apply -f ./examples/ci-example.yaml
10+
kubectl apply -f ./examples/ci/project-helm-chart.yaml
1111
else
12-
kubectl apply -f ./examples/example.yaml
12+
kubectl apply -f ./examples/project-helm-chart.yaml
1313
fi
1414
sleep ${DEFAULT_SLEEP_TIMEOUT_SECONDS};
1515

1616
if ! kubectl get -n cattle-monitoring-system job/helm-install-cattle-project-p-example-monitoring; then
17-
echo "ERROR: Helm Install Job for Project Monitoring Stack was never created after ${KUBECTL_WAIT_TIMEOUT} seconds"
17+
echo "ERROR: Helm Install Job for Project Monitoring Stack was never created after ${DEFAULT_SLEEP_TIMEOUT_SECONDS} seconds"
1818
exit 1
1919
fi
2020

.github/workflows/e2e/scripts/delete-projecthelmchart.sh

+5-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,11 @@ source $(dirname $0)/entry
66

77
cd $(dirname $0)/../../../..
88

9-
kubectl delete -f ./examples/ci-example.yaml
9+
if [[ "${E2E_CI}" == "true" ]]; then
10+
kubectl delete -f ./examples/ci/project-helm-chart.yaml
11+
else
12+
kubectl delete -f ./examples/project-helm-chart.yaml
13+
fi
1014
if kubectl get -n cattle-monitoring-system job/helm-delete-cattle-project-p-example-monitoring --ignore-not-found; then
1115
if ! kubectl wait --for=condition=complete --timeout="${KUBECTL_WAIT_TIMEOUT}" -n cattle-monitoring-system job/helm-delete-cattle-project-p-example-monitoring; then
1216
echo "ERROR: Helm Uninstall Job for Project Monitoring Stack never completed after ${KUBECTL_WAIT_TIMEOUT}"

.github/workflows/e2e/scripts/generate-artifacts.sh

+31-7
Original file line numberDiff line numberDiff line change
@@ -38,17 +38,41 @@ MANIFEST_DIRECTORY=${ARTIFACT_DIRECTORY}/manifests
3838
LOG_DIRECTORY=${ARTIFACT_DIRECTORY}/logs
3939

4040
# Manifests
41-
4241
mkdir -p ${MANIFEST_DIRECTORY}
43-
kubectl get pods -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/monitoring_pods.yaml || true
44-
kubectl get pods -n cattle-project-p-example -o yaml > ${MANIFEST_DIRECTORY}/project_pods.yaml || true
42+
mkdir -p ${MANIFEST_DIRECTORY}/helmcharts
43+
mkdir -p ${MANIFEST_DIRECTORY}/helmreleases
44+
mkdir -p ${MANIFEST_DIRECTORY}/daemonsets
45+
mkdir -p ${MANIFEST_DIRECTORY}/deployments
46+
mkdir -p ${MANIFEST_DIRECTORY}/jobs
47+
mkdir -p ${MANIFEST_DIRECTORY}/statefulsets
48+
mkdir -p ${MANIFEST_DIRECTORY}/pods
49+
mkdir -p ${MANIFEST_DIRECTORY}/projecthelmcharts
50+
4551
kubectl get namespaces -o yaml > ${MANIFEST_DIRECTORY}/namespaces.yaml || true
46-
kubectl get projecthelmchart -n cattle-project-p-example -o yaml > ${MANIFEST_DIRECTORY}/projecthelmcharts.yaml || true
47-
kubectl get helmcharts -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/helmcharts.yaml || true
48-
kubectl get helmreleases -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/helmreleases.yaml || true
52+
kubectl get helmcharts -A > ${MANIFEST_DIRECTORY}/helmcharts-list.txt || true
53+
kubectl get services -A > ${MANIFEST_DIRECTORY}/services-list.txt || true
4954

50-
# Logs
55+
## cattle-monitoring-system ns manifests
56+
kubectl get helmcharts -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/helmcharts/cattle-monitoring-system.yaml || true
57+
kubectl get helmreleases -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/helmreleases/cattle-monitoring-system.yaml || true
58+
kubectl get daemonset -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/daemonsets/cattle-monitoring-system.yaml || true
59+
kubectl get deployment -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/deployments/cattle-monitoring-system.yaml || true
60+
kubectl get job -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/jobs/cattle-monitoring-system.yaml || true
61+
kubectl get statefulset -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/statefulsets/cattle-monitoring-system.yaml || true
62+
kubectl get pods -n cattle-monitoring-system -o yaml > ${MANIFEST_DIRECTORY}/pods/cattle-monitoring-system.yaml || true
5163

64+
## cattle-project-p-example ns manifests
65+
kubectl get deployment -n cattle-project-p-example -o yaml > ${MANIFEST_DIRECTORY}/deployments/cattle-project-p-example.yaml || true
66+
kubectl get projecthelmchart -n cattle-project-p-example -o yaml > ${MANIFEST_DIRECTORY}/projecthelmcharts/cattle-project-p-example.yaml || true
67+
kubectl get statefulset -n cattle-project-p-example -o yaml > ${MANIFEST_DIRECTORY}/statefulsets/cattle-project-p-example.yaml || true
68+
kubectl get pods -n cattle-project-p-example -o yaml > ${MANIFEST_DIRECTORY}/pods/cattle-project-p-example.yaml || true
69+
70+
## cattle-project-p-example-monitoring ns manifests
71+
kubectl get deployment -n cattle-project-p-example-monitoring -o yaml > ${MANIFEST_DIRECTORY}/deployments/cattle-project-p-example-monitoring.yaml || true
72+
kubectl get statefulset -n cattle-project-p-example-monitoring -o yaml > ${MANIFEST_DIRECTORY}/statefulsets/cattle-project-p-example-monitoring.yaml || true
73+
kubectl get pods -n cattle-project-p-example-monitoring -o yaml > ${MANIFEST_DIRECTORY}/pods/cattle-project-p-example-monitoring.yaml || true
74+
75+
# Logs
5276
mkdir -p ${LOG_DIRECTORY}/rancher-monitoring
5377

5478
## Rancher Monitoring

.github/workflows/e2e/scripts/install-federator.sh

+2-2
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ case "${KUBERNETES_DISTRIBUTION_TYPE}" in
2525
cluster_args="--set helmProjectOperator.helmController.enabled=false"
2626
fi
2727
;;
28-
*)
28+
v1.25.*)
2929
embedded_helm_controller_fixed_version="v1.25.4"
3030
if [[ $(echo ${kubernetes_version} ${embedded_helm_controller_fixed_version} | tr " " "\n" | sort -rV | head -n 1 ) == "${embedded_helm_controller_fixed_version}" ]]; then
3131
cluster_args="--set helmProjectOperator.helmController.enabled=false"
@@ -52,7 +52,7 @@ case "${KUBERNETES_DISTRIBUTION_TYPE}" in
5252
cluster_args="--set helmProjectOperator.helmController.enabled=false"
5353
fi
5454
;;
55-
*)
55+
v1.25.*)
5656
embedded_helm_controller_fixed_version="v1.25.4"
5757
if [[ $(echo ${kubernetes_version} ${embedded_helm_controller_fixed_version} | tr " " "\n" | sort -rV | head -n 1 ) == "${embedded_helm_controller_fixed_version}" ]]; then
5858
cluster_args="--set helmProjectOperator.helmController.enabled=false"
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
#!/bin/bash
2+
3+
set -e
4+
set -x
5+
6+
K3D_URL=https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh
7+
DEFAULT_K3D_VERSION=v5.7.4
8+
9+
install_k3d(){
10+
local k3dVersion=${K3D_VERSION:-${DEFAULT_K3D_VERSION}}
11+
echo -e "Downloading k3d@${k3dVersion} see: ${K3D_URL}"
12+
curl --silent --fail ${K3D_URL} | TAG=${k3dVersion} bash
13+
}
14+
15+
install_k3d
16+
17+
k3d version

.github/workflows/e2e/scripts/install-monitoring.sh

+3
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,9 @@ helm version
1313
helm repo add ${HELM_REPO} https://charts.rancher.io
1414
helm repo update
1515

16+
echo "Create required \`cattle-fleet-system\` namespace"
17+
kubectl create namespace cattle-fleet-system
18+
1619
echo "Installing rancher monitoring crd with :\n"
1720

1821
helm search repo ${HELM_REPO}/rancher-monitoring-crd --versions --max-col-width=0 | head -n 2

0 commit comments

Comments
 (0)