diff --git a/gloo-gateway/1-17/enterprise-istio-ambient/default/README.md b/gloo-gateway/1-17/enterprise-istio-ambient/default/README.md
index be8a4e5d08..51e5c73815 100644
--- a/gloo-gateway/1-17/enterprise-istio-ambient/default/README.md
+++ b/gloo-gateway/1-17/enterprise-istio-ambient/default/README.md
@@ -969,6 +969,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail ||
## Lab 5 - Deploy the httpbin demo app
+
We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway.
You can find more information about this application [here](http://httpbin.org/).
@@ -1042,13 +1043,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -1125,13 +1119,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -4664,13 +4651,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
diff --git a/gloo-gateway/1-17/enterprise-istio-ambient/default/tests/proxies-changes.test.js.liquid b/gloo-gateway/1-17/enterprise-istio-ambient/default/tests/proxies-changes.test.js.liquid
new file mode 100644
index 0000000000..46bbe1422e
--- /dev/null
+++ b/gloo-gateway/1-17/enterprise-istio-ambient/default/tests/proxies-changes.test.js.liquid
@@ -0,0 +1,57 @@
+const { execSync } = require('child_process');
+const { expect } = require('chai');
+const { diff } = require('jest-diff');
+
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-17/enterprise-istio-sidecar/default/README.md b/gloo-gateway/1-17/enterprise-istio-sidecar/default/README.md
index 047e9ac9df..78c7e997a1 100644
--- a/gloo-gateway/1-17/enterprise-istio-sidecar/default/README.md
+++ b/gloo-gateway/1-17/enterprise-istio-sidecar/default/README.md
@@ -920,6 +920,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail ||
## Lab 5 - Deploy the httpbin demo app
+
We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway.
You can find more information about this application [here](http://httpbin.org/).
@@ -992,13 +993,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -1075,13 +1069,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -4614,13 +4601,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
diff --git a/gloo-gateway/1-17/enterprise-istio-sidecar/default/tests/proxies-changes.test.js.liquid b/gloo-gateway/1-17/enterprise-istio-sidecar/default/tests/proxies-changes.test.js.liquid
new file mode 100644
index 0000000000..46bbe1422e
--- /dev/null
+++ b/gloo-gateway/1-17/enterprise-istio-sidecar/default/tests/proxies-changes.test.js.liquid
@@ -0,0 +1,57 @@
+const { execSync } = require('child_process');
+const { expect } = require('chai');
+const { diff } = require('jest-diff');
+
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-17/enterprise-vm/default/tests/proxies-changes.test.js.liquid b/gloo-gateway/1-17/enterprise-vm/default/tests/proxies-changes.test.js.liquid
new file mode 100644
index 0000000000..46bbe1422e
--- /dev/null
+++ b/gloo-gateway/1-17/enterprise-vm/default/tests/proxies-changes.test.js.liquid
@@ -0,0 +1,57 @@
+const { execSync } = require('child_process');
+const { expect } = require('chai');
+const { diff } = require('jest-diff');
+
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-17/enterprise/default/README.md b/gloo-gateway/1-17/enterprise/default/README.md
index d69bf3905e..b1316c6907 100644
--- a/gloo-gateway/1-17/enterprise/default/README.md
+++ b/gloo-gateway/1-17/enterprise/default/README.md
@@ -861,6 +861,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail ||
## Lab 4 - Deploy the httpbin demo app
+
We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway.
You can find more information about this application [here](http://httpbin.org/).
@@ -932,13 +933,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -1015,13 +1009,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -4554,13 +4541,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
diff --git a/gloo-gateway/1-17/enterprise/default/tests/proxies-changes.test.js.liquid b/gloo-gateway/1-17/enterprise/default/tests/proxies-changes.test.js.liquid
new file mode 100644
index 0000000000..46bbe1422e
--- /dev/null
+++ b/gloo-gateway/1-17/enterprise/default/tests/proxies-changes.test.js.liquid
@@ -0,0 +1,57 @@
+const { execSync } = require('child_process');
+const { expect } = require('chai');
+const { diff } = require('jest-diff');
+
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-17/gloo-edge/default/README.md b/gloo-gateway/1-17/gloo-edge/default/README.md
index 1701e6827a..69b03cd59a 100644
--- a/gloo-gateway/1-17/gloo-edge/default/README.md
+++ b/gloo-gateway/1-17/gloo-edge/default/README.md
@@ -2743,6 +2743,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail ||
## Lab 12 - Deploy the httpbin demo app
+
We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway.
You can find more information about this application [here](http://httpbin.org/).
@@ -2814,13 +2815,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -2897,13 +2891,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -6436,13 +6423,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
diff --git a/gloo-gateway/1-17/gloo-edge/default/tests/proxies-changes.test.js.liquid b/gloo-gateway/1-17/gloo-edge/default/tests/proxies-changes.test.js.liquid
new file mode 100644
index 0000000000..46bbe1422e
--- /dev/null
+++ b/gloo-gateway/1-17/gloo-edge/default/tests/proxies-changes.test.js.liquid
@@ -0,0 +1,57 @@
+const { execSync } = require('child_process');
+const { expect } = require('chai');
+const { diff } = require('jest-diff');
+
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-17/gloo-mesh/default/README.md b/gloo-gateway/1-17/gloo-mesh/default/README.md
index dd76680b2b..3a5749b4b1 100644
--- a/gloo-gateway/1-17/gloo-mesh/default/README.md
+++ b/gloo-gateway/1-17/gloo-mesh/default/README.md
@@ -3557,6 +3557,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail ||
## Lab 21 - Deploy the httpbin demo app
+
We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway.
You can find more information about this application [here](http://httpbin.org/).
@@ -3630,13 +3631,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -3713,13 +3707,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -7252,13 +7239,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
diff --git a/gloo-gateway/1-17/gloo-mesh/default/tests/proxies-changes.test.js.liquid b/gloo-gateway/1-17/gloo-mesh/default/tests/proxies-changes.test.js.liquid
new file mode 100644
index 0000000000..46bbe1422e
--- /dev/null
+++ b/gloo-gateway/1-17/gloo-mesh/default/tests/proxies-changes.test.js.liquid
@@ -0,0 +1,57 @@
+const { execSync } = require('child_process');
+const { expect } = require('chai');
+const { diff } = require('jest-diff');
+
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-17/openshift/README.md b/gloo-gateway/1-17/openshift/README.md
index 1f0eea45dd..b9772bf9af 100644
--- a/gloo-gateway/1-17/openshift/README.md
+++ b/gloo-gateway/1-17/openshift/README.md
@@ -1096,6 +1096,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail ||
## Lab 6 - Deploy the httpbin demo app
+
We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway.
You can find more information about this application [here](http://httpbin.org/).
@@ -1168,13 +1169,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -1251,13 +1245,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -4799,13 +4786,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
diff --git a/gloo-gateway/1-17/openshift/tests/proxies-changes.test.js.liquid b/gloo-gateway/1-17/openshift/tests/proxies-changes.test.js.liquid
new file mode 100644
index 0000000000..46bbe1422e
--- /dev/null
+++ b/gloo-gateway/1-17/openshift/tests/proxies-changes.test.js.liquid
@@ -0,0 +1,57 @@
+const { execSync } = require('child_process');
+const { expect } = require('chai');
+const { diff } = require('jest-diff');
+
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-17/oss-istio-ambient/README.md b/gloo-gateway/1-17/oss-istio-ambient/README.md
index ae580dea55..ea9639c66a 100644
--- a/gloo-gateway/1-17/oss-istio-ambient/README.md
+++ b/gloo-gateway/1-17/oss-istio-ambient/README.md
@@ -293,6 +293,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail ||
## Lab 4 - Deploy the httpbin demo app
+
We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway.
You can find more information about this application [here](http://httpbin.org/).
@@ -364,13 +365,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -447,13 +441,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -2834,13 +2821,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
diff --git a/gloo-gateway/1-17/oss-istio-ambient/tests/proxies-changes.test.js.liquid b/gloo-gateway/1-17/oss-istio-ambient/tests/proxies-changes.test.js.liquid
new file mode 100644
index 0000000000..46bbe1422e
--- /dev/null
+++ b/gloo-gateway/1-17/oss-istio-ambient/tests/proxies-changes.test.js.liquid
@@ -0,0 +1,57 @@
+const { execSync } = require('child_process');
+const { expect } = require('chai');
+const { diff } = require('jest-diff');
+
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-17/oss/default/README.md b/gloo-gateway/1-17/oss/default/README.md
index c6a5d69663..388589865d 100644
--- a/gloo-gateway/1-17/oss/default/README.md
+++ b/gloo-gateway/1-17/oss/default/README.md
@@ -202,6 +202,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail ||
## Lab 3 - Deploy the httpbin demo app
+
We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway.
You can find more information about this application [here](http://httpbin.org/).
@@ -273,13 +274,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -356,13 +350,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -2743,13 +2730,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
diff --git a/gloo-gateway/1-17/oss/default/tests/proxies-changes.test.js.liquid b/gloo-gateway/1-17/oss/default/tests/proxies-changes.test.js.liquid
new file mode 100644
index 0000000000..46bbe1422e
--- /dev/null
+++ b/gloo-gateway/1-17/oss/default/tests/proxies-changes.test.js.liquid
@@ -0,0 +1,57 @@
+const { execSync } = require('child_process');
+const { expect } = require('chai');
+const { diff } = require('jest-diff');
+
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-18/enterprise-istio-ambient/default/README.md b/gloo-gateway/1-18/enterprise-istio-ambient/default/README.md
index 9186ccb81d..b5d8a7c5b0 100644
--- a/gloo-gateway/1-18/enterprise-istio-ambient/default/README.md
+++ b/gloo-gateway/1-18/enterprise-istio-ambient/default/README.md
@@ -881,7 +881,7 @@ helm repo update
helm upgrade -i -n gloo-system \
gloo-gateway gloo-ee-helm/gloo-ee \
--create-namespace \
- --version 1.18.0-rc4 \
+ --version 1.18.0-rc6 \
--kube-context $CLUSTER1 \
--set-string license_key=$LICENSE_KEY \
-f -<
+
We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway.
You can find more information about this application [here](http://httpbin.org/).
@@ -1053,13 +1054,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -1136,13 +1130,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -4932,13 +4919,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -7014,7 +6994,7 @@ We can now configure the Gloo Gateway portal backend to use it:
helm upgrade -i -n gloo-system \
gloo-gateway gloo-ee-helm/gloo-ee \
--create-namespace \
- --version 1.18.0-rc4 \
+ --version 1.18.0-rc6 \
--kube-context ${CLUSTER1} \
--reuse-values \
-f -< setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-18/enterprise-istio-sidecar/default/README.md b/gloo-gateway/1-18/enterprise-istio-sidecar/default/README.md
index 13f7836eab..8dfc516ab2 100644
--- a/gloo-gateway/1-18/enterprise-istio-sidecar/default/README.md
+++ b/gloo-gateway/1-18/enterprise-istio-sidecar/default/README.md
@@ -826,7 +826,7 @@ helm repo update
helm upgrade -i -n gloo-system \
gloo-gateway gloo-ee-helm/gloo-ee \
--create-namespace \
- --version 1.18.0-rc4 \
+ --version 1.18.0-rc6 \
--kube-context $CLUSTER1 \
--set-string license_key=$LICENSE_KEY \
-f -<
+
We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway.
You can find more information about this application [here](http://httpbin.org/).
@@ -1002,13 +1003,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -1085,13 +1079,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -4881,13 +4868,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -6963,7 +6943,7 @@ We can now configure the Gloo Gateway portal backend to use it:
helm upgrade -i -n gloo-system \
gloo-gateway gloo-ee-helm/gloo-ee \
--create-namespace \
- --version 1.18.0-rc4 \
+ --version 1.18.0-rc6 \
--kube-context ${CLUSTER1} \
--reuse-values \
-f -< setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-18/enterprise/ai-gateway/README.md b/gloo-gateway/1-18/enterprise/ai-gateway/README.md
index 03befb3cc0..b1edb5be12 100644
--- a/gloo-gateway/1-18/enterprise/ai-gateway/README.md
+++ b/gloo-gateway/1-18/enterprise/ai-gateway/README.md
@@ -142,7 +142,7 @@ helm repo update
helm upgrade -i -n gloo-system \
gloo-gateway gloo-ee-helm/gloo-ee \
--create-namespace \
- --version 1.18.0-rc4 \
+ --version 1.18.0-rc6 \
--kube-context $CLUSTER1 \
--set-string license_key=$LICENSE_KEY \
-f -< setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-18/enterprise/default/README.md b/gloo-gateway/1-18/enterprise/default/README.md
index 703869c8f6..61da976952 100644
--- a/gloo-gateway/1-18/enterprise/default/README.md
+++ b/gloo-gateway/1-18/enterprise/default/README.md
@@ -21,27 +21,28 @@ source ./scripts/assert.sh
* [Lab 4 - Deploy the httpbin demo app](#lab-4---deploy-the-httpbin-demo-app-)
* [Lab 5 - Expose the httpbin application through the gateway](#lab-5---expose-the-httpbin-application-through-the-gateway-)
* [Lab 6 - Delegate with control](#lab-6---delegate-with-control-)
-* [Lab 7 - Modify the requests and responses](#lab-7---modify-the-requests-and-responses-)
-* [Lab 8 - Split traffic between 2 backend services](#lab-8---split-traffic-between-2-backend-services-)
-* [Lab 9 - Securing the access with OAuth](#lab-9---securing-the-access-with-oauth-)
-* [Lab 10 - Use the transformation filter to manipulate headers](#lab-10---use-the-transformation-filter-to-manipulate-headers-)
-* [Lab 11 - Apply rate limiting to the Gateway](#lab-11---apply-rate-limiting-to-the-gateway-)
-* [Lab 12 - Use the JWT filter to validate JWT and create headers from claims](#lab-12---use-the-jwt-filter-to-validate-jwt-and-create-headers-from-claims-)
-* [Lab 13 - Use the Web Application Firewall filter](#lab-13---use-the-web-application-firewall-filter-)
-* [Lab 14 - Validate and authorize client certificates](#lab-14---validate-and-authorize-client-certificates-)
-* [Lab 15 - Use the `cache-control` response header to cache responses](#lab-15---use-the-`cache-control`-response-header-to-cache-responses-)
-* [Lab 16 - Deploy Argo Rollouts](#lab-16---deploy-argo-rollouts-)
-* [Lab 17 - Roll out a new app version using Argo Rollouts](#lab-17---roll-out-a-new-app-version-using-argo-rollouts-)
-* [Lab 18 - Expose a service through TCP](#lab-18---expose-a-service-through-tcp-)
-* [Lab 19 - Deploy the Bookinfo sample application](#lab-19---deploy-the-bookinfo-sample-application-)
-* [Lab 20 - Expose the productpage API securely](#lab-20---expose-the-productpage-api-securely-)
-* [Lab 21 - Expose an external API and stitch it with the productpage API](#lab-21---expose-an-external-api-and-stitch-it-with-the-productpage-api-)
-* [Lab 22 - Expose the dev portal backend](#lab-22---expose-the-dev-portal-backend-)
-* [Lab 23 - Deploy and expose the dev portal frontend](#lab-23---deploy-and-expose-the-dev-portal-frontend-)
-* [Lab 24 - Demonstrate the self service capabilities](#lab-24---demonstrate-the-self-service-capabilities-)
-* [Lab 25 - Dev portal monetization](#lab-25---dev-portal-monetization-)
-* [Lab 26 - Deploy Backstage with the backend plugin](#lab-26---deploy-backstage-with-the-backend-plugin-)
-* [Lab 27 - Deploy OpenTelemetry Collector](#lab-27---deploy-opentelemetry-collector-)
+* [Lab 7 - Direct response](#lab-7---direct-response-)
+* [Lab 8 - Modify the requests and responses](#lab-8---modify-the-requests-and-responses-)
+* [Lab 9 - Split traffic between 2 backend services](#lab-9---split-traffic-between-2-backend-services-)
+* [Lab 10 - Securing the access with OAuth](#lab-10---securing-the-access-with-oauth-)
+* [Lab 11 - Use the transformation filter to manipulate headers](#lab-11---use-the-transformation-filter-to-manipulate-headers-)
+* [Lab 12 - Apply rate limiting to the Gateway](#lab-12---apply-rate-limiting-to-the-gateway-)
+* [Lab 13 - Use the JWT filter to validate JWT and create headers from claims](#lab-13---use-the-jwt-filter-to-validate-jwt-and-create-headers-from-claims-)
+* [Lab 14 - Use the Web Application Firewall filter](#lab-14---use-the-web-application-firewall-filter-)
+* [Lab 15 - Validate and authorize client certificates](#lab-15---validate-and-authorize-client-certificates-)
+* [Lab 16 - Use the `cache-control` response header to cache responses](#lab-16---use-the-`cache-control`-response-header-to-cache-responses-)
+* [Lab 17 - Deploy Argo Rollouts](#lab-17---deploy-argo-rollouts-)
+* [Lab 18 - Roll out a new app version using Argo Rollouts](#lab-18---roll-out-a-new-app-version-using-argo-rollouts-)
+* [Lab 19 - Expose a service through TCP](#lab-19---expose-a-service-through-tcp-)
+* [Lab 20 - Deploy the Bookinfo sample application](#lab-20---deploy-the-bookinfo-sample-application-)
+* [Lab 21 - Expose the productpage API securely](#lab-21---expose-the-productpage-api-securely-)
+* [Lab 22 - Expose an external API and stitch it with the productpage API](#lab-22---expose-an-external-api-and-stitch-it-with-the-productpage-api-)
+* [Lab 23 - Expose the dev portal backend](#lab-23---expose-the-dev-portal-backend-)
+* [Lab 24 - Deploy and expose the dev portal frontend](#lab-24---deploy-and-expose-the-dev-portal-frontend-)
+* [Lab 25 - Demonstrate the self service capabilities](#lab-25---demonstrate-the-self-service-capabilities-)
+* [Lab 26 - Dev portal monetization](#lab-26---dev-portal-monetization-)
+* [Lab 27 - Deploy Backstage with the backend plugin](#lab-27---deploy-backstage-with-the-backend-plugin-)
+* [Lab 28 - Deploy OpenTelemetry Collector](#lab-28---deploy-opentelemetry-collector-)
@@ -774,7 +775,7 @@ helm repo update
helm upgrade -i -n gloo-system \
gloo-gateway gloo-ee-helm/gloo-ee \
--create-namespace \
- --version 1.18.0-rc4 \
+ --version 1.18.0-rc6 \
--kube-context $CLUSTER1 \
--set-string license_key=$LICENSE_KEY \
-f -<
+
We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway.
You can find more information about this application [here](http://httpbin.org/).
@@ -944,13 +946,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -1027,13 +1022,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -1891,7 +1879,76 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail ||
-## Lab 7 - Modify the requests and responses
+## Lab 7 - Direct response
+
+The Kubernetes Gateway API doesn't provide the ability to configure direct responses (yet), so we've added this capability with a custom filter.
+
+You need to create a `DirectResponse` object (in this example, to provide a health check endpoint).
+
+```bash
+kubectl apply --context ${CLUSTER1} -f - < ./test.js
+const helpersHttp = require('./tests/chai-http');
+
+describe("Direct response returns 200", () => {
+ it('Checking \'200\' status code', () => helpersHttp.checkURL({ host: `https://httpbin.example.com`, path: '/health', retCode: 200 }));
+})
+EOF
+echo "executing test dist/gloo-gateway-workshop/build/templates/steps/apps/httpbin/direct-response/tests/direct-response.test.js.liquid"
+timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; }
+-->
+
+
+
+## Lab 8 - Modify the requests and responses
The Kubernetes Gateway API provides different options to add/update/remove request and response headers.
@@ -2369,7 +2426,7 @@ kubectl delete --context ${CLUSTER1} -n httpbin routeoption routeoption
-## Lab 8 - Split traffic between 2 backend services
+## Lab 9 - Split traffic between 2 backend services
You can split traffic between different backends, with different weights.
@@ -2442,7 +2499,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail ||
-## Lab 9 - Securing the access with OAuth
+## Lab 10 - Securing the access with OAuth
In this step, we're going to secure the access to the `httpbin` service using OAuth.
@@ -2687,7 +2744,7 @@ If you open the browser in incognito and login using the username `user2` and th
-## Lab 10 - Use the transformation filter to manipulate headers
+## Lab 11 - Use the transformation filter to manipulate headers
In this step, we're going to use a regular expression to extract a part of an existing header and to create a new one:
@@ -2741,7 +2798,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail ||
-## Lab 11 - Apply rate limiting to the Gateway
+## Lab 12 - Apply rate limiting to the Gateway
In this step, we're going to apply rate limiting to the Gateway to only allow 3 requests per minute for the users of the `solo.io` organization.
@@ -2852,7 +2909,7 @@ kubectl delete --context ${CLUSTER1} -n httpbin routeoption routeoption
-## Lab 12 - Use the JWT filter to validate JWT and create headers from claims
+## Lab 13 - Use the JWT filter to validate JWT and create headers from claims
In this step, we're going to validate the JWT token and to create a new header from the `email` claim.
@@ -3090,7 +3147,7 @@ kubectl --context ${CLUSTER1} -n gloo-system delete virtualhostoption jwt
-## Lab 13 - Use the Web Application Firewall filter
+## Lab 14 - Use the Web Application Firewall filter
A web application firewall (WAF) protects web applications by monitoring, filtering, and blocking potentially harmful traffic and attacks that can overtake or exploit them.
@@ -3175,7 +3232,7 @@ kubectl delete --context ${CLUSTER1} -n gloo-system routeoption waf
-## Lab 14 - Validate and authorize client certificates
+## Lab 15 - Validate and authorize client certificates
In this step, we're going to secure the access to the httpbin service using mutual TLS (mTLS), and apply further authorization based on information in the client certificate.
@@ -3594,7 +3651,7 @@ kubectl --context ${CLUSTER1} -n httpbin delete RouteOption routeoption
-## Lab 15 - Use the `cache-control` response header to cache responses
+## Lab 16 - Use the `cache-control` response header to cache responses
An HTTP or HTTPS listener on your gateway can be configured to cache responses for upstream services.
When the listener routes a request to an upstream service, the response from the upstream is automatically cached by the caching server if it contains a `cache-control` response header.
@@ -3851,7 +3908,7 @@ kubectl --context ${CLUSTER1} -n gloo-system delete httplisteneroption cache
-## Lab 16 - Deploy Argo Rollouts
+## Lab 17 - Deploy Argo Rollouts
[Argo Rollouts](https://argoproj.github.io/rollouts/) is a declarative progressive delivery tool for Kubernetes that we can use to update applications gradually, using a blue/green or canary strategy to manage the rollout.
@@ -3887,7 +3944,7 @@ Now we're ready to use Argo Rollouts to progressively update applications as par
-## Lab 17 - Roll out a new app version using Argo Rollouts
+## Lab 18 - Roll out a new app version using Argo Rollouts
We're going to use Argo Rollouts to gradually deliver an upgraded version of our httpbin application.
To do this, we'll define a resource that lets Argo Rollouts know how we want it to handle updates to our application,
@@ -4823,13 +4880,6 @@ spec:
httpGet:
path: /status/200
port: http
- resources:
- limits:
- cpu: 1
- memory: 512Mi
- requests:
- cpu: 100m
- memory: 256Mi
env:
- name: K8S_MEM_LIMIT
valueFrom:
@@ -4863,7 +4913,7 @@ EOF
-## Lab 18 - Expose a service through TCP
+## Lab 19 - Expose a service through TCP
Gloo Gateway allows you to expose TCP services using `TCPRoutes`.
@@ -4951,7 +5001,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail ||
-## Lab 19 - Deploy the Bookinfo sample application
+## Lab 20 - Deploy the Bookinfo sample application
[
](https://youtu.be/nzYcrjalY5A "Video Link")
We're going to deploy the Bookinfo sample application to demonstrate several features of Gloo Gateway.
@@ -4994,7 +5044,7 @@ Configure your hosts file to resolve bookinfo.example.com with the IP address of
-## Lab 20 - Expose the productpage API securely
+## Lab 21 - Expose the productpage API securely
Gloo Gateway includes a developer portal, which provides a framework for managing API discovery, API client identity, and API policies.
@@ -5542,7 +5592,7 @@ EOF
-## Lab 21 - Expose an external API and stitch it with the productpage API
+## Lab 22 - Expose an external API and stitch it with the productpage API
You can also use Gloo Gateway to expose an API that is outside of the cluster. In this section, we will expose `https://openlibrary.org/search.json`
@@ -5809,7 +5859,7 @@ EOF
-## Lab 22 - Expose the dev portal backend
+## Lab 23 - Expose the dev portal backend
Now that your API has been exposed securely and our plans defined, lets advertise this API through a developer portal.
@@ -5971,7 +6021,7 @@ We'll create it later.
-## Lab 23 - Deploy and expose the dev portal frontend
+## Lab 24 - Deploy and expose the dev portal frontend
The developer frontend is provided as a fully functional template to allow you to customize it based on your own requirements.
@@ -6372,7 +6422,7 @@ kubectl --context ${CLUSTER1} -n gloo-system delete portalgroups.portal.gloo.sol
-## Lab 24 - Demonstrate the self service capabilities
+## Lab 25 - Demonstrate the self service capabilities
We're going to demonstrate how to allow users to create their own teams and applications, subscribe to API Products and get credentials.
@@ -6794,7 +6844,7 @@ We can now configure the Gloo Gateway portal backend to use it:
helm upgrade -i -n gloo-system \
gloo-gateway gloo-ee-helm/gloo-ee \
--create-namespace \
- --version 1.18.0-rc4 \
+ --version 1.18.0-rc6 \
--kube-context ${CLUSTER1} \
--reuse-values \
-f -<
+## Lab 26 - Dev portal monetization
The `portalMetadata` section of the `ApiProduct` objects we've created previously is used to add some metadata in the access logs.
@@ -7133,7 +7183,7 @@ timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=150 --bail ||
-## Lab 26 - Deploy Backstage with the backend plugin
+## Lab 27 - Deploy Backstage with the backend plugin
Let's deploy Backstage:
@@ -7309,7 +7359,7 @@ timeout --signal=INT 6m mocha ./test.js --timeout 10000 --retries=250 --bail ||
-## Lab 27 - Deploy OpenTelemetry Collector
+## Lab 28 - Deploy OpenTelemetry Collector
Having metrics is essential for running applications reliably, and gateways are no exceptions.
diff --git a/gloo-gateway/1-18/enterprise/default/tests/proxies-changes.test.js.liquid b/gloo-gateway/1-18/enterprise/default/tests/proxies-changes.test.js.liquid
new file mode 100644
index 0000000000..46bbe1422e
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/default/tests/proxies-changes.test.js.liquid
@@ -0,0 +1,57 @@
+const { execSync } = require('child_process');
+const { expect } = require('chai');
+const { diff } = require('jest-diff');
+
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+describe('Gloo snapshot stability test', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+
+ let firstSnapshot;
+
+ it('should retrieve initial snapshot', function() {
+ const output = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+
+ try {
+ firstSnapshot = JSON.parse(output);
+ } catch (err) {
+ throw new Error('Failed to parse JSON output from initial snapshot: ' + err.message);
+ }
+ expect(firstSnapshot).to.be.an('object');
+ });
+
+ it('should not change after the given delay', async function() {
+ await delay(delaySeconds * 1000);
+
+ let secondSnapshot;
+ try {
+ const output2 = execSync(
+ `kubectl --context ${contextName} -n gloo-system exec deploy/gloo -- wget -O - localhost:9095/snapshots/proxies -q`,
+ { encoding: 'utf8' }
+ );
+ secondSnapshot = JSON.parse(output2);
+ } catch (err) {
+ throw new Error('Failed to retrieve or parse the second snapshot: ' + err.message);
+ }
+
+ const firstJson = JSON.stringify(firstSnapshot, null, 2);
+ const secondJson = JSON.stringify(secondSnapshot, null, 2);
+
+ // Show only 2 lines of context around each change
+ const diffOutput = diff(firstJson, secondJson, { contextLines: 2, expand: false });
+
+ if (! diffOutput.includes("Compared values have no visual difference.")) {
+ console.error('Differences found between snapshots:\n' + diffOutput);
+ throw new Error('Snapshots differ after the delay.');
+ } else {
+ console.log('No differences found. The snapshots are stable.');
+ }
+ });
+});
+
diff --git a/gloo-gateway/1-18/enterprise/lambda/README.md b/gloo-gateway/1-18/enterprise/lambda/README.md
new file mode 100644
index 0000000000..8497dadbdd
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/README.md
@@ -0,0 +1,1097 @@
+
+
+
+
+
+
+
+
+
+# Gloo Gateway Workshop
+
+
+
+## Table of Contents
+* [Introduction](#introduction)
+* [Lab 1 - Deploy KinD Cluster(s)](#lab-1---deploy-kind-cluster(s)-)
+* [Lab 2 - Deploy the Amazon pod identity webhook](#lab-2---deploy-the-amazon-pod-identity-webhook-)
+* [Lab 3 - Deploy Gloo Gateway](#lab-3---deploy-gloo-gateway-)
+* [Lab 4 - Deploy the httpbin demo app](#lab-4---deploy-the-httpbin-demo-app-)
+* [Lab 5 - Expose the httpbin application through the gateway](#lab-5---expose-the-httpbin-application-through-the-gateway-)
+* [Lab 6 - Execute Lambda functions](#lab-6---execute-lambda-functions-)
+
+
+
+## Introduction
+
+Gloo Gateway is a feature-rich, fast, and flexible Kubernetes-native ingress controller and next-generation API gateway that is built on top of Envoy proxy and the Kubernetes Gateway API).
+
+Gloo Gateway is fully conformant with the Kubernetes Gateway API and extends its functionality with Solo’s custom Gateway APIs, such as `RouteOption`, `VirtualHostOption`, `Upstream`s, `RateLimitConfig`, or `AuthConfig`.
+These resources help to centrally configure routing, security, and resiliency rules for a specific component, such as a host, route, or gateway listener.
+
+These capabilities are grouped into two editions of Gloo Gateway:
+
+### Open source (OSS) Gloo Gateway
+
+Use Kubernetes Gateway API-native features and the following Gloo Gateway extensions to configure basic routing, security, and resiliency capabilities:
+
+* Access logging
+* Buffering
+* Cross-Origin Resource Sharing (CORS)
+* Cross-Site Request Forgery (CSRF)
+* Fault injection
+* Header control
+* Retries
+* Timeouts
+* Traffic tapping
+* Transformations
+
+### Gloo Gateway Enterprise Edition
+
+In addition to the features provided by the OSS edition, many more features are available in the Enterprise Edition, including:
+
+* External authentication and authorization
+* External processing
+* Data loss prevention
+* Developer portal
+* JSON web token (JWT)
+* Rate limiting
+* Response caching
+* Web Application Filters
+
+### Want to learn more about Gloo Gateway?
+
+In the labs that follow we present some of the common patterns that our customers use and provide a good entry point into the workings of Gloo Gateway.
+
+You can find more information about Gloo Gateway in the official documentation: .
+
+
+
+
+## Lab 1 - Deploy KinD Cluster(s)
+
+
+Clone this repository and go to the directory where this `README.md` file is.
+
+Set the context environment variables:
+
+```bash
+export CLUSTER1=cluster1
+```
+
+Deploy the KinD clusters:
+```bash
+bash ./data/steps/deploy-kind-clusters/deploy-cluster1.sh
+```
+Then run the following commands to wait for all the Pods to be ready:
+
+```bash
+./scripts/check.sh cluster1
+```
+
+**Note:** If you run the `check.sh` script immediately after the `deploy.sh` script, you may see a jsonpath error. If that happens, simply wait a few seconds and try again.
+
+Once the `check.sh` script completes, execute the `kubectl get pods -A` command, and verify that all pods are in a running state.
+
+
+
+
+
+## Lab 2 - Deploy the Amazon pod identity webhook
+
+To use the AWS Lambda integration, we need to deploy the Amazon EKS pod identity webhook.
+
+A prerequisite is to install [Cert Manager](https://cert-manager.io/):
+
+```bash
+wget https://github.com/cert-manager/cert-manager/releases/download/v1.12.4/cert-manager.yaml
+
+kubectl --context ${CLUSTER1} apply -f cert-manager.yaml
+```
+
+Wait for cert-manager to be running:
+
+```bash
+kubectl --context ${CLUSTER1} -n cert-manager rollout status deploy cert-manager
+kubectl --context ${CLUSTER1} -n cert-manager rollout status deploy cert-manager-cainjector
+kubectl --context ${CLUSTER1} -n cert-manager rollout status deploy cert-manager-webhook
+```
+
+Now, you can install the Amazon EKS pod identity webhook:
+
+```bash
+kubectl --context ${CLUSTER1} apply -f data/steps/deploy-amazon-pod-identity-webhook
+```
+
+Wait for the pod identity webhook to be running:
+
+```bash
+kubectl --context ${CLUSTER1} rollout status deploy/pod-identity-webhook
+```
+
+
+
+
+## Lab 3 - Deploy Gloo Gateway
+
+You can deploy Gloo Gateway with the `glooctl` CLI or declaratively using Helm.
+
+We're going to use the Helm option.
+
+Install the Kubernetes Gateway API CRDs as they do not come installed by default on most Kubernetes clusters.
+
+```bash
+kubectl --context $CLUSTER1 apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.2.0/experimental-install.yaml
+```
+
+
+
+Next, install Gloo Gateway. This command installs the Gloo Gateway control plane into the namespace `gloo-system`.
+
+```bash
+helm repo add gloo-ee-helm https://storage.googleapis.com/gloo-ee-helm
+
+helm repo update
+
+helm upgrade -i -n gloo-system \
+ gloo-gateway gloo-ee-helm/gloo-ee \
+ --create-namespace \
+ --version 1.18.0-rc6 \
+ --kube-context $CLUSTER1 \
+ --set-string license_key=$LICENSE_KEY \
+ -f -<
+```bash
+kubectl --context $CLUSTER1 -n gloo-system get pods
+```
+
+Here is the expected output:
+
+```,nocopy
+NAME READY STATUS RESTARTS AGE
+caching-service-79cf55ccbb-dcvgp 1/1 Running 0 69s
+extauth-58f68c5cd5-gxgxc 1/1 Running 0 69s
+gateway-portal-web-server-5c5d58d8d5-7lzwg 1/1 Running 0 69s
+gloo-7d8994697-lfg5x 1/1 Running 0 69s
+gloo-resource-rollout-check-x8b77 0/1 Completed 0 69s
+gloo-resource-rollout-cjtgh 0/1 Completed 0 69s
+rate-limit-6db9c67794-vf7h2 1/1 Running 0 69s
+redis-6c7c489d8c-g2dhc 1/1 Running 0 69s
+```
+
+
+
+
+
+## Lab 4 - Deploy the httpbin demo app
+
+
+We're going to deploy the httpbin application to demonstrate several features of Gloo Gateway.
+
+You can find more information about this application [here](http://httpbin.org/).
+
+Run the following commands to deploy the httpbin app twice (`httpbin1` and `httpbin2`).
+
+```bash
+kubectl --context ${CLUSTER1} create ns httpbin
+kubectl apply --context ${CLUSTER1} -f - <
+```shell
+kubectl --context ${CLUSTER1} -n httpbin get pods
+```
+
+Here is the expected output when both Pods are ready:
+
+```,nocopy
+NAME READY STATUS RESTARTS AGE
+httpbin1-7fdbf6498-ms7qt 1/1 Running 0 94s
+httpbin2-655777b846-6nrms 1/1 Running 0 93s
+```
+
+
+
+
+
+
+## Lab 5 - Expose the httpbin application through the gateway
+
+
+
+
+The team in charge of the gateway can create a `Gateway` resource and configure an HTTP listener.
+
+
+
+```bash
+kubectl apply --context ${CLUSTER1} -f - <
+
+Configure your hosts file to resolve httpbin.example.com with the IP address of the proxy by executing the following command:
+
+```bash
+./scripts/register-domain.sh httpbin.example.com ${PROXY_IP}
+```
+
+Try to access the application through HTTP:
+
+```shell
+curl http://httpbin.example.com/get
+```
+
+Here is the expected output:
+
+```json,nocopy
+{
+ "args": {},
+ "headers": {
+ "Accept": [
+ "*/*"
+ ],
+ "Host": [
+ "httpbin.example.com"
+ ],
+ "User-Agent": [
+ "curl/8.5.0"
+ ],
+ "X-Forwarded-Proto": [
+ "http"
+ ],
+ "X-Request-Id": [
+ "d0998a48-7532-4eeb-ab69-23cef22185cf"
+ ]
+ },
+ "method": "GET",
+ "origin": "127.0.0.6:38917",
+ "url": "http://httpbin.example.com/get"
+}
+```
+
+
+
+Now, let's secure the access through TLS.
+Let's first create a private key and a self-signed certificate:
+
+```bash
+openssl req -x509 -nodes -days 365 -newkey rsa:2048 \
+ -keyout tls.key -out tls.crt -subj "/CN=*"
+```
+
+Then, you have to store it in a Kubernetes secret running the following command:
+
+```bash
+kubectl create --context ${CLUSTER1} -n gloo-system secret tls tls-secret --key tls.key \
+ --cert tls.crt
+```
+
+Update the `Gateway` resource to add HTTPS listeners.
+
+```bash
+kubectl apply --context ${CLUSTER1} -f - <
+
+```shell
+curl -k https://httpbin.example.com/get
+```
+
+Here is the expected output:
+
+```json,nocopy
+{
+ "args": {},
+ "headers": {
+ "Accept": [
+ "*/*"
+ ],
+ "Host": [
+ "httpbin.example.com"
+ ],
+ "User-Agent": [
+ "curl/8.5.0"
+ ],
+ "X-Forwarded-Proto": [
+ "https"
+ ],
+ "X-Request-Id": [
+ "8e61c480-6373-4c38-824b-2bfe89e79d0c"
+ ]
+ },
+ "method": "GET",
+ "origin": "127.0.0.6:52655",
+ "url": "https://httpbin.example.com/get"
+}
+```
+
+
+
+The team in charge of the gateway can create an `HTTPRoute` to automatically redirect HTTP to HTTPS:
+
+```bash
+kubectl apply --context ${CLUSTER1} -f - < ./test.js
+const helpersHttp = require('./tests/chai-http');
+
+describe("location header correctly set", () => {
+ it('Checking text \'location\'', () => helpersHttp.checkHeaders({ host: `http://httpbin.example.com`, path: '/get', expectedHeaders: [{'key': 'location', 'value': `https://httpbin.example.com/get`}]}));
+})
+EOF
+echo "executing test dist/gloo-gateway-workshop/build/templates/steps/apps/httpbin/expose-httpbin/tests/redirect-http-to-https.test.js.liquid"
+timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; }
+-->
+
+
+
+
+## Lab 6 - Execute Lambda functions
+[
](https://youtu.be/gD6GLMlP-Qc "Video Link")
+
+First of all, you need to create an `Upstream` object corresponding to the AWS destination:
+
+```bash
+kubectl apply --context ${CLUSTER1} -f - < {
+ return event;
+};
+```
+
+You should now be able to invoke the Lambda function using the following command:
+
+```shell
+curl -k "https://httpbin.example.com/lambda" -d '{"foo":"bar"}'
+```
+
+You should get a response like below:
+
+```js,nocopy
+{
+ "headers": {
+ ":authority": "httpbin.example.com",
+ ":method": "GET",
+ ":path": "/lambda",
+ ":scheme": "https",
+ "accept": "*/*",
+ "user-agent": "curl/8.5.0",
+ "x-forwarded-proto": "https",
+ "x-request-id": "118e4181-d8d6-4a64-a304-ac247b2a7d84"
+ },
+ "httpMethod": "GET",
+ "multiValueHeaders": {},
+ "multiValueQueryStringParameters": {},
+ "path": "/lambda",
+ "queryString": "",
+ "queryStringParameters": {}
+}
+
+```
+
+It's very similar to what the `httpbin` application provides. It displays information about the request is has received.
+
+But when a Lambda function is exposed through an AWS API Gateway, the response of the function should be in a specific format (see this [example](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html)).
+
+The Gloo Gateway integration has the ability to understand this format and to process the response in the same way an AWS API gateway would.
+
+Here is the Node.js Lambda function we're going to use to demonstrate this capability:
+
+```js,nocopy
+export const handler = async(event) => {
+ const response = {
+ "statusCode": 201,
+ "headers": {
+ "key": "value"
+ },
+ "isBase64Encoded": false,
+ "multiValueHeaders": {
+ "X-Custom-Header": ["My value", "My other value"],
+ },
+ "body": JSON.stringify({TotalCodeSize: 104330022,FunctionCount: 26})
+ }
+ return response;
+};
+```
+Let's update the `Upstream`:
+```bash
+kubectl apply --context ${CLUSTER1} -f - < ./test.js
+const helpersHttp = require('./tests/chai-http');
+
+describe("Lambda integration is working properly", () => {
+ it('Checking text \'"TotalCodeSize": 104330022\' in ' + process.env.CLUSTER1, () => helpersHttp.checkBody({ host: `https://httpbin.example.com`, path: '/lambda', body: '"TotalCodeSize": 104330022', match: true }));
+ it('Checking headers in ' + process.env.CLUSTER1, () => helpersHttp.checkHeaders({ host: `https://httpbin.example.com`, path: '/lambda', expectedHeaders: [{key: "key", value: "value"}, {key: "x-custom-header", value: "My value,My other value"}], match: true }));
+})
+EOF
+echo "executing test dist/gloo-gateway-workshop/build/templates/steps/gateway-lambda/tests/check-lambda-api-gateway.test.js.liquid"
+timeout --signal=INT 3m mocha ./test.js --timeout 10000 --retries=120 --bail || { DEBUG_MODE=true mocha ./test.js --timeout 120000; exit 1; }
+-->
+
+Let's remove the annotation and restart the pods:
+
+```bash
+kubectl --context ${CLUSTER1} -n gloo-system annotate sa -l gloo=kube-gateway "eks.amazonaws.com/role-arn-"
+kubectl --context ${CLUSTER1} -n gloo-system rollout restart deploy gloo-proxy-http
+```
+
+And also delete the different objects we've created:
+
+```bash
+kubectl --context ${CLUSTER1} -n httpbin delete upstream lambda
+```
+
+
+
+
+
+
diff --git a/gloo-gateway/1-18/enterprise/lambda/data/.gitkeep b/gloo-gateway/1-18/enterprise/lambda/data/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-amazon-pod-identity-webhook/auth.yaml b/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-amazon-pod-identity-webhook/auth.yaml
new file mode 100644
index 0000000000..369436c244
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-amazon-pod-identity-webhook/auth.yaml
@@ -0,0 +1,78 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: pod-identity-webhook
+ namespace: default
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: pod-identity-webhook
+ namespace: default
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - create
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - update
+ - patch
+ resourceNames:
+ - "pod-identity-webhook"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: pod-identity-webhook
+ namespace: default
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: pod-identity-webhook
+subjects:
+- kind: ServiceAccount
+ name: pod-identity-webhook
+ namespace: default
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: pod-identity-webhook
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - get
+ - watch
+ - list
+- apiGroups:
+ - certificates.k8s.io
+ resources:
+ - certificatesigningrequests
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: pod-identity-webhook
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: pod-identity-webhook
+subjects:
+- kind: ServiceAccount
+ name: pod-identity-webhook
+ namespace: default
diff --git a/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-amazon-pod-identity-webhook/deployment-base.yaml b/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-amazon-pod-identity-webhook/deployment-base.yaml
new file mode 100644
index 0000000000..d5c8e6c384
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-amazon-pod-identity-webhook/deployment-base.yaml
@@ -0,0 +1,63 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: pod-identity-webhook
+ namespace: default
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: pod-identity-webhook
+ template:
+ metadata:
+ labels:
+ app: pod-identity-webhook
+ spec:
+ serviceAccountName: pod-identity-webhook
+ containers:
+ - name: pod-identity-webhook
+ image: amazon/amazon-eks-pod-identity-webhook:v0.5.0
+ imagePullPolicy: Always
+ command:
+ - /webhook
+ - --in-cluster=false
+ - --namespace=default
+ - --service-name=pod-identity-webhook
+ - --annotation-prefix=eks.amazonaws.com
+ - --token-audience=sts.amazonaws.com
+ - --logtostderr
+ volumeMounts:
+ - name: cert
+ mountPath: "/etc/webhook/certs"
+ readOnly: true
+ volumes:
+ - name: cert
+ secret:
+ secretName: pod-identity-webhook-cert
+---
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: selfsigned
+spec:
+ selfSigned: {}
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: pod-identity-webhook
+ namespace: default
+spec:
+ secretName: pod-identity-webhook-cert
+ commonName: "pod-identity-webhook.default.svc"
+ dnsNames:
+ - "pod-identity-webhook"
+ - "pod-identity-webhook.default"
+ - "pod-identity-webhook.default.svc"
+ - "pod-identity-webhook.default.svc.local"
+ isCA: true
+ duration: 2160h # 90d
+ renewBefore: 360h # 15d
+ issuerRef:
+ name: selfsigned
+ kind: ClusterIssuer
diff --git a/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-amazon-pod-identity-webhook/mutatingwebhook.yaml b/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-amazon-pod-identity-webhook/mutatingwebhook.yaml
new file mode 100644
index 0000000000..c8e66e7325
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-amazon-pod-identity-webhook/mutatingwebhook.yaml
@@ -0,0 +1,22 @@
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ name: pod-identity-webhook
+ namespace: default
+ annotations:
+ cert-manager.io/inject-ca-from: default/pod-identity-webhook
+webhooks:
+- name: pod-identity-webhook.amazonaws.com
+ failurePolicy: Ignore
+ clientConfig:
+ service:
+ name: pod-identity-webhook
+ namespace: default
+ path: "/mutate"
+ rules:
+ - operations: [ "CREATE" ]
+ apiGroups: [""]
+ apiVersions: ["v1"]
+ resources: ["pods"]
+ sideEffects: None
+ admissionReviewVersions: ["v1beta1"]
diff --git a/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-amazon-pod-identity-webhook/service.yaml b/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-amazon-pod-identity-webhook/service.yaml
new file mode 100644
index 0000000000..4db1f51448
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-amazon-pod-identity-webhook/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: pod-identity-webhook
+ namespace: default
+ annotations:
+ prometheus.io/port: "443"
+ prometheus.io/scheme: "https"
+ prometheus.io/scrape: "true"
+spec:
+ ports:
+ - port: 443
+ targetPort: 443
+ selector:
+ app: pod-identity-webhook
diff --git a/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-kind-clusters/deploy-cluster1.sh b/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-kind-clusters/deploy-cluster1.sh
new file mode 100644
index 0000000000..4f97e5f1cc
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/data/steps/deploy-kind-clusters/deploy-cluster1.sh
@@ -0,0 +1,289 @@
+#!/usr/bin/env bash
+set -o errexit
+
+number="1"
+name="cluster1"
+region=""
+zone=""
+twodigits=$(printf "%02d\n" $number)
+
+kindest_node=${KINDEST_NODE}
+
+if [ -z "$kindest_node" ]; then
+ export k8s_version="1.28.0"
+
+ [[ ${k8s_version::1} != 'v' ]] && export k8s_version=v${k8s_version}
+ kindest_node_ver=$(curl --silent "https://registry.hub.docker.com/v2/repositories/kindest/node/tags?page_size=100" \
+ | jq -r '.results | .[] | select(.name==env.k8s_version) | .name+"@"+.digest')
+
+ if [ -z "$kindest_node_ver" ]; then
+ echo "Incorrect Kubernetes version provided: ${k8s_version}."
+ exit 1
+ fi
+ kindest_node=kindest/node:${kindest_node_ver}
+fi
+echo "Using KinD image: ${kindest_node}"
+
+if [ -z "$3" ]; then
+ case $name in
+ cluster1)
+ region=us-west-1
+ ;;
+ cluster2)
+ region=us-west-2
+ ;;
+ *)
+ region=us-east-1
+ ;;
+ esac
+fi
+
+if [ -z "$4" ]; then
+ case $name in
+ cluster1)
+ zone=us-west-1a
+ ;;
+ cluster2)
+ zone=us-west-2a
+ ;;
+ *)
+ zone=us-east-1a
+ ;;
+ esac
+fi
+
+if hostname -I 2>/dev/null; then
+ myip=$(hostname -I | awk '{ print $1 }')
+else
+ myip=$(ipconfig getifaddr en0)
+fi
+
+# Function to determine the next available cluster number
+get_next_cluster_number() {
+ if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+mkdir -p /tmp/oidc
+
+cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA
+1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL
+395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw
+zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm
+5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8
+2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9
+ywIDAQAB
+-----END PUBLIC KEY-----
+EOF
+
+cat <<'EOF' >/tmp/oidc/sa-signer.key
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ
++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui
+PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6
++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+
+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5
+f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG
+el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY
+FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh
+SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc
+r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv
+z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn
+7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy
+3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8
+PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy
+72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw
+BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo
+hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn
+WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+
+y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI
+KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39
+0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR
+f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN
+b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc
+Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd
+qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q==
+-----END RSA PRIVATE KEY-----
+EOF
+
+echo Contents of kind${number}.yaml
+cat << EOF | tee kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ extraMounts:
+ - containerPath: /etc/kubernetes/oidc
+ hostPath: /tmp/oidc
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16"
+ podSubnet: "10.1${twodigits}.0.0/16"
+kubeadmConfigPatches:
+- |
+ kind: ClusterConfiguration
+ apiServer:
+ extraArgs:
+ service-account-key-file: /etc/kubernetes/pki/sa.pub
+ service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub
+ service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key
+ service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com
+ api-audiences: sts.amazonaws.com
+ extraVolumes:
+ - name: oidc
+ hostPath: /etc/kubernetes/oidc
+ mountPath: /etc/kubernetes/oidc
+ readOnly: true
+ metadata:
+ name: config
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"]
+ endpoint = ["http://us-central1-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+EOF
+echo -----------------------------------------------------
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
+networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }')
+kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF | tee metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+# connect the registry to the cluster network if not already connected
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+
+# Document the local registry
+# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
+cat <
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/images/document-gloo-gateway.svg b/gloo-gateway/1-18/enterprise/lambda/images/document-gloo-gateway.svg
new file mode 100644
index 0000000000..322368db75
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/images/document-gloo-gateway.svg
@@ -0,0 +1,12 @@
+
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/images/enterprise-features.png b/gloo-gateway/1-18/enterprise/lambda/images/enterprise-features.png
new file mode 100644
index 0000000000..707c843e13
Binary files /dev/null and b/gloo-gateway/1-18/enterprise/lambda/images/enterprise-features.png differ
diff --git a/gloo-gateway/1-18/enterprise/lambda/images/gateway-api-dark.png b/gloo-gateway/1-18/enterprise/lambda/images/gateway-api-dark.png
new file mode 100644
index 0000000000..0fa184c849
Binary files /dev/null and b/gloo-gateway/1-18/enterprise/lambda/images/gateway-api-dark.png differ
diff --git a/gloo-gateway/1-18/enterprise/lambda/images/gateway-api-resource-model.png b/gloo-gateway/1-18/enterprise/lambda/images/gateway-api-resource-model.png
new file mode 100644
index 0000000000..0397ad2698
Binary files /dev/null and b/gloo-gateway/1-18/enterprise/lambda/images/gateway-api-resource-model.png differ
diff --git a/gloo-gateway/1-18/enterprise/lambda/images/gloo-edge-architecture.png b/gloo-gateway/1-18/enterprise/lambda/images/gloo-edge-architecture.png
new file mode 100644
index 0000000000..b2048a65fb
Binary files /dev/null and b/gloo-gateway/1-18/enterprise/lambda/images/gloo-edge-architecture.png differ
diff --git a/gloo-gateway/1-18/enterprise/lambda/images/gloo-gateway-dark.svg b/gloo-gateway/1-18/enterprise/lambda/images/gloo-gateway-dark.svg
new file mode 100644
index 0000000000..dbc20ca046
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/images/gloo-gateway-dark.svg
@@ -0,0 +1,12 @@
+
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/images/portal-apis.png b/gloo-gateway/1-18/enterprise/lambda/images/portal-apis.png
new file mode 100644
index 0000000000..76858d10b7
Binary files /dev/null and b/gloo-gateway/1-18/enterprise/lambda/images/portal-apis.png differ
diff --git a/gloo-gateway/1-18/enterprise/lambda/images/security-workflow.png b/gloo-gateway/1-18/enterprise/lambda/images/security-workflow.png
new file mode 100644
index 0000000000..5a2249e81e
Binary files /dev/null and b/gloo-gateway/1-18/enterprise/lambda/images/security-workflow.png differ
diff --git a/gloo-gateway/1-18/enterprise/lambda/images/track-gloo-ai-gateway.svg b/gloo-gateway/1-18/enterprise/lambda/images/track-gloo-ai-gateway.svg
new file mode 100644
index 0000000000..9cca3ca903
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/images/track-gloo-ai-gateway.svg
@@ -0,0 +1,14 @@
+
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/images/track-gloo-gateway.svg b/gloo-gateway/1-18/enterprise/lambda/images/track-gloo-gateway.svg
new file mode 100644
index 0000000000..9ca81f8a17
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/images/track-gloo-gateway.svg
@@ -0,0 +1,12 @@
+
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/scripts/assert.sh b/gloo-gateway/1-18/enterprise/lambda/scripts/assert.sh
new file mode 100755
index 0000000000..75ba95ac90
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/scripts/assert.sh
@@ -0,0 +1,252 @@
+#!/usr/bin/env bash
+
+#####################################################################
+##
+## title: Assert Extension
+##
+## description:
+## Assert extension of shell (bash, ...)
+## with the common assert functions
+## Function list based on:
+## http://junit.sourceforge.net/javadoc/org/junit/Assert.html
+## Log methods : inspired by
+## - https://natelandau.com/bash-scripting-utilities/
+## author: Mark Torok
+##
+## date: 07. Dec. 2016
+##
+## license: MIT
+##
+#####################################################################
+
+if command -v tput &>/dev/null && tty -s; then
+ RED=$(tput setaf 1)
+ GREEN=$(tput setaf 2)
+ MAGENTA=$(tput setaf 5)
+ NORMAL=$(tput sgr0)
+ BOLD=$(tput bold)
+else
+ RED=$(echo -en "\e[31m")
+ GREEN=$(echo -en "\e[32m")
+ MAGENTA=$(echo -en "\e[35m")
+ NORMAL=$(echo -en "\e[00m")
+ BOLD=$(echo -en "\e[01m")
+fi
+
+log_header() {
+ printf "\n${BOLD}${MAGENTA}========== %s ==========${NORMAL}\n" "$@" >&2
+}
+
+log_success() {
+ printf "${GREEN}✔ %s${NORMAL}\n" "$@" >&2
+}
+
+log_failure() {
+ printf "${RED}✖ %s${NORMAL}\n" "$@" >&2
+ file=.test-error.log
+ echo "$@" >> $file
+ echo "#############################################" >> $file
+ echo "#############################################" >> $file
+}
+
+
+assert_eq() {
+ local expected="$1"
+ local actual="$2"
+ local msg="${3-}"
+
+ if [ "$expected" == "$actual" ]; then
+ return 0
+ else
+ [ "${#msg}" -gt 0 ] && log_failure "$expected == $actual :: $msg" || true
+ return 1
+ fi
+}
+
+assert_not_eq() {
+ local expected="$1"
+ local actual="$2"
+ local msg="${3-}"
+
+ if [ ! "$expected" == "$actual" ]; then
+ return 0
+ else
+ [ "${#msg}" -gt 0 ] && log_failure "$expected != $actual :: $msg" || true
+ return 1
+ fi
+}
+
+assert_true() {
+ local actual="$1"
+ local msg="${2-}"
+
+ assert_eq true "$actual" "$msg"
+ return "$?"
+}
+
+assert_false() {
+ local actual="$1"
+ local msg="${2-}"
+
+ assert_eq false "$actual" "$msg"
+ return "$?"
+}
+
+assert_array_eq() {
+
+ declare -a expected=("${!1-}")
+ # echo "AAE ${expected[@]}"
+
+ declare -a actual=("${!2}")
+ # echo "AAE ${actual[@]}"
+
+ local msg="${3-}"
+
+ local return_code=0
+ if [ ! "${#expected[@]}" == "${#actual[@]}" ]; then
+ return_code=1
+ fi
+
+ local i
+ for (( i=1; i < ${#expected[@]} + 1; i+=1 )); do
+ if [ ! "${expected[$i-1]}" == "${actual[$i-1]}" ]; then
+ return_code=1
+ break
+ fi
+ done
+
+ if [ "$return_code" == 1 ]; then
+ [ "${#msg}" -gt 0 ] && log_failure "(${expected[*]}) != (${actual[*]}) :: $msg" || true
+ fi
+
+ return "$return_code"
+}
+
+assert_array_not_eq() {
+
+ declare -a expected=("${!1-}")
+ declare -a actual=("${!2}")
+
+ local msg="${3-}"
+
+ local return_code=1
+ if [ ! "${#expected[@]}" == "${#actual[@]}" ]; then
+ return_code=0
+ fi
+
+ local i
+ for (( i=1; i < ${#expected[@]} + 1; i+=1 )); do
+ if [ ! "${expected[$i-1]}" == "${actual[$i-1]}" ]; then
+ return_code=0
+ break
+ fi
+ done
+
+ if [ "$return_code" == 1 ]; then
+ [ "${#msg}" -gt 0 ] && log_failure "(${expected[*]}) == (${actual[*]}) :: $msg" || true
+ fi
+
+ return "$return_code"
+}
+
+assert_empty() {
+ local actual=$1
+ local msg="${2-}"
+
+ assert_eq "" "$actual" "$msg"
+ return "$?"
+}
+
+assert_not_empty() {
+ local actual=$1
+ local msg="${2-}"
+
+ assert_not_eq "" "$actual" "$msg"
+ return "$?"
+}
+
+assert_contain() {
+ local haystack="$1"
+ local needle="${2-}"
+ local msg="${3-}"
+
+ if [ -z "${needle:+x}" ]; then
+ return 0;
+ fi
+
+ if [ -z "${haystack##*$needle*}" ]; then
+ return 0
+ else
+ [ "${#msg}" -gt 0 ] && log_failure "$haystack doesn't contain $needle :: $msg" || true
+ return 1
+ fi
+}
+
+assert_not_contain() {
+ local haystack="$1"
+ local needle="${2-}"
+ local msg="${3-}"
+
+ if [ -z "${needle:+x}" ]; then
+ return 0;
+ fi
+
+ if [ "${haystack##*$needle*}" ]; then
+ return 0
+ else
+ [ "${#msg}" -gt 0 ] && log_failure "$haystack contains $needle :: $msg" || true
+ return 1
+ fi
+}
+
+assert_gt() {
+ local first="$1"
+ local second="$2"
+ local msg="${3-}"
+
+ if [[ "$first" -gt "$second" ]]; then
+ return 0
+ else
+ [ "${#msg}" -gt 0 ] && log_failure "$first > $second :: $msg" || true
+ return 1
+ fi
+}
+
+assert_ge() {
+ local first="$1"
+ local second="$2"
+ local msg="${3-}"
+
+ if [[ "$first" -ge "$second" ]]; then
+ return 0
+ else
+ [ "${#msg}" -gt 0 ] && log_failure "$first >= $second :: $msg" || true
+ return 1
+ fi
+}
+
+assert_lt() {
+ local first="$1"
+ local second="$2"
+ local msg="${3-}"
+
+ if [[ "$first" -lt "$second" ]]; then
+ return 0
+ else
+ [ "${#msg}" -gt 0 ] && log_failure "$first < $second :: $msg" || true
+ return 1
+ fi
+}
+
+assert_le() {
+ local first="$1"
+ local second="$2"
+ local msg="${3-}"
+
+ if [[ "$first" -le "$second" ]]; then
+ return 0
+ else
+ [ "${#msg}" -gt 0 ] && log_failure "$first <= $second :: $msg" || true
+ return 1
+ fi
+}
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/scripts/check.sh b/gloo-gateway/1-18/enterprise/lambda/scripts/check.sh
new file mode 100755
index 0000000000..fa52484b28
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/scripts/check.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+
+printf "Waiting for all the kube-system pods to become ready in context $1"
+until [ $(kubectl --context $1 -n kube-system get pods -o jsonpath='{range .items[*].status.containerStatuses[*]}{.ready}{"\n"}{end}' | grep false -c) -eq 0 ]; do
+ printf "%s" "."
+ sleep 1
+done
+printf "\n kube-system pods are now ready \n"
+
+printf "Waiting for all the metallb-system pods to become ready in context $1"
+until [ $(kubectl --context $1 -n metallb-system get pods -o jsonpath='{range .items[*].status.containerStatuses[*]}{.ready}{"\n"}{end}' | grep false -c) -eq 0 ]; do
+ printf "%s" "."
+ sleep 1
+done
+printf "\n metallb-system pods are now ready \n"
+
diff --git a/gloo-gateway/1-18/enterprise/lambda/scripts/configure-domain-rewrite.sh b/gloo-gateway/1-18/enterprise/lambda/scripts/configure-domain-rewrite.sh
new file mode 100755
index 0000000000..d6e684c9da
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/scripts/configure-domain-rewrite.sh
@@ -0,0 +1,93 @@
+#!/usr/bin/env bash
+
+set -x # Debug mode to show commands
+set -e # Stop on error
+
+hostname="$1"
+new_hostname="$2"
+
+## Install CoreDNS if not installed
+if ! command -v coredns &> /dev/null; then
+ wget https://github.com/coredns/coredns/releases/download/v1.8.3/coredns_1.8.3_linux_amd64.tgz
+ tar xvf coredns_1.8.3_linux_amd64.tgz
+ sudo mv coredns /usr/local/bin/
+ sudo rm -rf coredns_1.8.3_linux_amd64.tgz
+fi
+
+name="$(echo {a..z} | tr -d ' ' | fold -w1 | shuf | head -n3 | tr -d '\n')"
+tld=$(echo {a..z} | tr -d ' ' | fold -w1 | shuf | head -n2 | tr -d '\n')
+random_domain="$name.$tld"
+CONFIG_FILE=~/coredns.conf
+
+## Update coredns.conf with a rewrite rule
+if grep -q "rewrite name $hostname" $CONFIG_FILE; then
+ sed -i "s/rewrite name $hostname.*/rewrite name $hostname $new_hostname/" $CONFIG_FILE
+else
+ if [ ! -f "$CONFIG_FILE" ]; then
+ # Create a new config file if it doesn't exist
+ cat < $CONFIG_FILE
+.:5300 {
+ forward . 8.8.8.8 8.8.4.4
+ log
+}
+EOF
+ fi
+ # Append a new rewrite rule
+ sed -i "/log/i \ rewrite name $hostname $new_hostname" $CONFIG_FILE
+fi
+
+# Ensure the random domain rewrite rule is always present
+if grep -q "rewrite name .* httpbin.org" $CONFIG_FILE; then
+ sed -i "s/rewrite name .* httpbin.org/rewrite name $random_domain httpbin.org/" $CONFIG_FILE
+else
+ sed -i "/log/i \ rewrite name $random_domain httpbin.org" $CONFIG_FILE
+fi
+
+cat $CONFIG_FILE # Display the config for debugging
+
+## Check if CoreDNS is running and kill it
+if pgrep coredns; then
+ pkill coredns
+ # wait for the process to be terminated
+ sleep 10
+fi
+
+## Restart CoreDNS with the updated config
+nohup coredns -conf $CONFIG_FILE &> /dev/null &
+
+## Configure the system resolver
+sudo tee /etc/systemd/resolved.conf > /dev/null < /dev/null || ! command -v jq &> /dev/null; then
+ echo "Both openssl and jq are required to run this script."
+ exit 1
+fi
+
+PRIVATE_KEY_PATH=$1
+SUBJECT=$2
+TEAM=$3
+LLM=$4
+MODEL=$5
+
+if [ -z "$PRIVATE_KEY_PATH" ] || [ -z "$SUBJECT" ] || [ -z "$TEAM" ] || [ -z "$LLM" ] || [ -z "$MODEL" ]; then
+ echo "Usage: $0 "
+ exit 1
+fi
+
+
+if [[ "$LLM" != "openai" && "$LLM" != "mistral" ]]; then
+ echo "LLM must be either 'openai' or 'mistral'."
+ exit 1
+fi
+
+HEADER='{"alg":"RS256","typ":"JWT"}'
+PAYLOAD=$(jq -n --arg sub "$SUBJECT" --arg team "$TEAM" --arg llm "$LLM" --arg model "$MODEL" \
+'{
+ "iss": "solo.io",
+ "org": "solo.io",
+ "sub": $sub,
+ "team": $team,
+ "llms": {
+ ($llm): [$model]
+ }
+}')
+
+# Encode Base64URL function
+base64url_encode() {
+ openssl base64 -e | tr -d '=' | tr '/+' '_-' | tr -d '\n'
+}
+
+# Create JWT Header
+HEADER_BASE64=$(echo -n $HEADER | base64url_encode)
+
+# Create JWT Payload
+PAYLOAD_BASE64=$(echo -n $PAYLOAD | base64url_encode)
+
+# Create JWT Signature
+SIGNING_INPUT="${HEADER_BASE64}.${PAYLOAD_BASE64}"
+SIGNATURE=$(echo -n $SIGNING_INPUT | openssl dgst -sha256 -sign $PRIVATE_KEY_PATH | base64url_encode)
+
+# Combine all parts to get the final JWT token
+JWT_TOKEN="${SIGNING_INPUT}.${SIGNATURE}"
+
+# Output the JWT token
+echo $JWT_TOKEN
diff --git a/gloo-gateway/1-18/enterprise/lambda/scripts/deploy-aws-with-calico.sh b/gloo-gateway/1-18/enterprise/lambda/scripts/deploy-aws-with-calico.sh
new file mode 100755
index 0000000000..e4df4bcd38
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/scripts/deploy-aws-with-calico.sh
@@ -0,0 +1,254 @@
+#!/usr/bin/env bash
+set -o errexit
+
+number=$1
+name=$2
+region=$3
+zone=$4
+twodigits=$(printf "%02d\n" $number)
+kindest_node=${KINDEST_NODE:-kindest\/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31}
+
+if [ -z "$3" ]; then
+ region=us-east-1
+fi
+
+if [ -z "$4" ]; then
+ zone=us-east-1a
+fi
+
+if hostname -I 2>/dev/null; then
+ myip=$(hostname -I | awk '{ print $1 }')
+else
+ myip=$(ipconfig getifaddr en0)
+fi
+
+# Function to determine the next available cluster number
+get_next_cluster_number() {
+ if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+
+mkdir -p /tmp/oidc
+
+cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA
+1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL
+395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw
+zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm
+5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8
+2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9
+ywIDAQAB
+-----END PUBLIC KEY-----
+EOF
+
+cat <<'EOF' >/tmp/oidc/sa-signer.key
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ
++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui
+PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6
++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+
+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5
+f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG
+el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY
+FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh
+SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc
+r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv
+z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn
+7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy
+3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8
+PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy
+72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw
+BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo
+hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn
+WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+
+y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI
+KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39
+0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR
+f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN
+b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc
+Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd
+qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q==
+-----END RSA PRIVATE KEY-----
+EOF
+
+cat << EOF > kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ extraMounts:
+ - containerPath: /etc/kubernetes/oidc
+ hostPath: /tmp/oidc
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ disableDefaultCNI: true
+ serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16"
+ podSubnet: "10.1${twodigits}.0.0/16"
+kubeadmConfigPatches:
+- |
+ kind: ClusterConfiguration
+ apiServer:
+ extraArgs:
+ service-account-key-file: /etc/kubernetes/pki/sa.pub
+ service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub
+ service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key
+ service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com
+ api-audiences: sts.amazonaws.com
+ extraVolumes:
+ - name: oidc
+ hostPath: /etc/kubernetes/oidc
+ mountPath: /etc/kubernetes/oidc
+ readOnly: true
+ metadata:
+ name: config
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"]
+ endpoint = ["http://us-central1-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+${KIND_ADDL_FEATURES}
+EOF
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
+networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }')
+
+kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+curl -sL https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico.yaml | sed 's/250m/50m/g' | kubectl --context kind-kind${number} apply -f -
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF > metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+cat </dev/null; then
+ myip=$(hostname -I | awk '{ print $1 }')
+else
+ myip=$(ipconfig getifaddr en0)
+fi
+
+# Function to determine the next available cluster number
+get_next_cluster_number() {
+ if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+
+mkdir -p /tmp/oidc
+
+cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA
+1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL
+395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw
+zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm
+5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8
+2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9
+ywIDAQAB
+-----END PUBLIC KEY-----
+EOF
+
+cat <<'EOF' >/tmp/oidc/sa-signer.key
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ
++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui
+PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6
++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+
+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5
+f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG
+el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY
+FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh
+SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc
+r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv
+z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn
+7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy
+3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8
+PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy
+72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw
+BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo
+hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn
+WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+
+y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI
+KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39
+0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR
+f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN
+b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc
+Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd
+qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q==
+-----END RSA PRIVATE KEY-----
+EOF
+
+cat << EOF > kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ extraMounts:
+ - containerPath: /etc/kubernetes/oidc
+ hostPath: /tmp/oidc
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ disableDefaultCNI: true
+ serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16"
+ podSubnet: "10.1${twodigits}.0.0/16"
+kubeadmConfigPatches:
+- |
+ kind: ClusterConfiguration
+ apiServer:
+ extraArgs:
+ service-account-key-file: /etc/kubernetes/pki/sa.pub
+ service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub
+ service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key
+ service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com
+ api-audiences: sts.amazonaws.com
+ extraVolumes:
+ - name: oidc
+ hostPath: /etc/kubernetes/oidc
+ mountPath: /etc/kubernetes/oidc
+ readOnly: true
+ metadata:
+ name: config
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"]
+ endpoint = ["http://us-central1-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+${KIND_ADDL_FEATURES}
+EOF
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
+networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }')
+
+kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+helm repo add cilium https://helm.cilium.io/
+
+helm --kube-context kind-kind${number} install cilium cilium/cilium --version 1.15.5 \
+ --namespace kube-system \
+ --set prometheus.enabled=true \
+ --set operator.prometheus.enabled=true \
+ --set hubble.enabled=true \
+ --set hubble.metrics.enabled="{dns:destinationContext=pod|ip;sourceContext=pod|ip,drop:destinationContext=pod|ip;sourceContext=pod|ip,tcp:destinationContext=pod|ip;sourceContext=pod|ip,flow:destinationContext=pod|ip;sourceContext=pod|ip,port-distribution:destinationContext=pod|ip;sourceContext=pod|ip}" \
+ --set hubble.relay.enabled=true \
+ --set hubble.ui.enabled=true \
+ --set kubeProxyReplacement=partial \
+ --set hostServices.enabled=false \
+ --set hostServices.protocols="tcp" \
+ --set externalIPs.enabled=true \
+ --set nodePort.enabled=true \
+ --set hostPort.enabled=true \
+ --set bpf.masquerade=false \
+ --set image.pullPolicy=IfNotPresent \
+ --set cni.exclusive=false \
+ --set ipam.mode=kubernetes
+kubectl --context=kind-kind${number} -n kube-system rollout status ds cilium || true
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF > metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+# connect the registry to the cluster network if not already connected
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+
+# Document the local registry
+# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
+cat </dev/null; then
+ myip=$(hostname -I | awk '{ print $1 }')
+else
+ myip=$(ipconfig getifaddr en0)
+fi
+
+# Function to determine the next available cluster number
+get_next_cluster_number() {
+ if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+
+mkdir -p /tmp/oidc
+
+cat <<'EOF' >/tmp/oidc/sa-signer-pkcs8.pub
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA53YiBcrn7+ZK0Vb4odeA
+1riYdvEb8To4H6/HtF+OKzuCIXFQ+bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL
+395nvxdly83SUrdh7ItfOPRluuuiPHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0Zw
+zIM9OviX8iEF8xHWUtz4BAMDG8N6+zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm
+5X5uOKsCHMtNSjqYUNB1DxN6xxM+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD8
+2p/16KQKU6TkZSrldkYxiHIPhu+5f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9
+ywIDAQAB
+-----END PUBLIC KEY-----
+EOF
+
+cat <<'EOF' >/tmp/oidc/sa-signer.key
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA53YiBcrn7+ZK0Vb4odeA1riYdvEb8To4H6/HtF+OKzuCIXFQ
++bRy7yMrDGITYpfYPrTZOgfdeTLZqOiAj+cL395nvxdly83SUrdh7ItfOPRluuui
+PHnFn111wpyjBw5nut4Kx+M5MksNfA1hU0ZwzIM9OviX8iEF8xHWUtz4BAMDG8N6
++zpLo0pAzaei5hKuLZ9dZOzHBC8VOW82cQMm5X5uOKsCHMtNSjqYUNB1DxN6xxM+
+odGWT/6xthPGk6YCxmO28YHPFZfiS2eAIpD82p/16KQKU6TkZSrldkYxiHIPhu+5
+f9faZJG7dB9pLN1SfdTBio4PK5Mz9muLUCv9ywIDAQABAoIBAB8tro+RMYUDRHjG
+el9ypAxIeWEsQVNRQFYkW4ZUiNYSAgl3Ni0svX6xAg989peFVL+9pLVIcfDthJxY
+FVlNCjBxyQ/YmwHFC9vQkARJEd6eLUXsj8INtS0ubbp1VxCQRDDL0C/0z7OSoJJh
+SwboqjEiTJExA2a+RArmEDTBRzdi3t+kT8G23JcqOivrITt17K6bQYyJXw7/vUdc
+r/R+hfd5TqVq92VddzDT7RNJAxsbPPXjGnESlq1GALBDs+uBGYsP0fiEJb2nicSv
+z9fBnBeERhut1gcE0C0iLRQZb+3r8TitBtxrZv+0BHgXrkKtXDwWTqGEKOwC4dBn
+7nxkH2ECgYEA6+/DOTABGYOWOQftFkJMjcugzDrjoGpuXuVOTb65T+3FHAzU93zy
+3bt3wQxrlugluyy9Sc/PL3ck2LgUsPHZ+s7zsdGvvGALBD6bOSSKATz9JgjwifO8
+PgqUz1kXRwez2CtKLOOCFFtcIzEdWIzsa1ubNqLzgN7rD+XBkUc2uEcCgYEA+yTy
+72EDMQVoIZOygytHsDNdy0iS2RsBbdurT27wkYuFpFUVWdbNSL+8haE+wJHseHcw
+BD4WIMpU+hnS4p4OO8+6V7PiXOS5E/se91EJigZAoixgDUiC8ihojWgK9PYEavUo
+hULWbayO59SxYWeUI4Ze0GP8Jw8vdB86ib4ulF0CgYEAgyzRuLjk05+iZODwQyDn
+WSquov3W0rh51s7cw0LX2wWSQm8r9NGGYhs5kJ5sLwGxAKj2MNSWF4jBdrCZ6Gr+
+y4BGY0X209/+IAUC3jlfdSLIiF4OBlT6AvB1HfclhvtUVUp0OhLfnpvQ1UwYScRI
+KcRLvovIoIzP2g3emfwjAz8CgYEAxUHhOhm1mwRHJNBQTuxok0HVMrze8n1eov39
+0RcvBvJSVp+pdHXdqX1HwqHCmxhCZuAeq8ZkNP8WvZYY6HwCbAIdt5MHgbT4lXQR
+f2l8F5gPnhFCpExG5ZLNg/urV3oAQE4stHap21zEpdyOMhZb6Yc5424U+EzaFdgN
+b3EcPtUCgYAkKvUlSnBbgiJz1iaN6fuTqH0efavuFGMhjNmG7GtpNXdgyl1OWIuc
+Yu+tZtHXtKYf3B99GwPrFzw/7yfDwae5YeWmi2/pFTH96wv3brJBqkAWY8G5Rsmd
+qF50p34vIFqUBniNRwSArx8t2dq/CuAMgLAtSjh70Q6ZAnCF85PD8Q==
+-----END RSA PRIVATE KEY-----
+EOF
+
+cat << EOF > kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ extraMounts:
+ - containerPath: /etc/kubernetes/oidc
+ hostPath: /tmp/oidc
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16"
+ podSubnet: "10.1${twodigits}.0.0/16"
+kubeadmConfigPatches:
+- |
+ kind: ClusterConfiguration
+ apiServer:
+ extraArgs:
+ service-account-key-file: /etc/kubernetes/pki/sa.pub
+ service-account-key-file: /etc/kubernetes/oidc/sa-signer-pkcs8.pub
+ service-account-signing-key-file: /etc/kubernetes/oidc/sa-signer.key
+ service-account-issuer: https://solo-workshop-oidc.s3.us-east-1.amazonaws.com
+ api-audiences: sts.amazonaws.com
+ extraVolumes:
+ - name: oidc
+ hostPath: /etc/kubernetes/oidc
+ mountPath: /etc/kubernetes/oidc
+ readOnly: true
+ metadata:
+ name: config
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"]
+ endpoint = ["http://us-central1-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+${KIND_ADDL_FEATURES}
+EOF
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
+networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }')
+
+kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF > metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+# connect the registry to the cluster network if not already connected
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+
+# Document the local registry
+# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
+cat <&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+
+cat << EOF > kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ ipFamily: ipv6
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"]
+ endpoint = ["http://us-central1-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+${KIND_ADDL_FEATURES}
+EOF
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].GlobalIPv6Address')
+networkkind=$(echo ${ipkind} | rev | cut -d: -f2- | rev):
+
+#kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF > metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}${number}1-${networkkind}${number}9
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+# connect the registry to the cluster network if not already connected
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+
+# Document the local registry
+# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
+cat </dev/null; then
+ myip=$(hostname -I | awk '{ print $1 }')
+else
+ myip=$(ipconfig getifaddr en0)
+fi
+
+# Function to determine the next available cluster number
+get_next_cluster_number() {
+ if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+
+cat << EOF > kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+- role: worker
+ image: ${kindest_node}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+- role: worker
+ image: ${kindest_node}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ disableDefaultCNI: true
+ serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16"
+ podSubnet: "10.1${twodigits}.0.0/16"
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"]
+ endpoint = ["http://us-central1-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+${KIND_ADDL_FEATURES}
+EOF
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
+networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }')
+
+kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+curl -sL https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico.yaml | sed 's/250m/50m/g' | kubectl --context kind-kind${number} apply -f -
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF > metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+# connect the registry to the cluster network if not already connected
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+
+# Document the local registry
+# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
+cat </dev/null; then
+ myip=$(hostname -I | awk '{ print $1 }')
+else
+ myip=$(ipconfig getifaddr en0)
+fi
+
+# Function to determine the next available cluster number
+get_next_cluster_number() {
+ if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+
+cat << EOF > kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+- role: worker
+ image: ${kindest_node}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+- role: worker
+ image: ${kindest_node}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ disableDefaultCNI: true
+ serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16"
+ podSubnet: "10.1${twodigits}.0.0/16"
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"]
+ endpoint = ["http://us-central1-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+${KIND_ADDL_FEATURES}
+EOF
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
+networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }')
+
+kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/cilium/cilium:v1.15.5
+quay.io/cilium/operator-generic:v1.15.5
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+
+helm repo add cilium https://helm.cilium.io/
+
+helm --kube-context kind-kind${number} install cilium cilium/cilium --version 1.15.5 \
+ --namespace kube-system \
+ --set prometheus.enabled=true \
+ --set operator.prometheus.enabled=true \
+ --set hubble.enabled=true \
+ --set hubble.metrics.enabled="{dns:destinationContext=pod|ip;sourceContext=pod|ip,drop:destinationContext=pod|ip;sourceContext=pod|ip,tcp:destinationContext=pod|ip;sourceContext=pod|ip,flow:destinationContext=pod|ip;sourceContext=pod|ip,port-distribution:destinationContext=pod|ip;sourceContext=pod|ip}" \
+ --set hubble.relay.enabled=true \
+ --set hubble.ui.enabled=true \
+ --set kubeProxyReplacement=partial \
+ --set hostServices.enabled=false \
+ --set hostServices.protocols="tcp" \
+ --set externalIPs.enabled=true \
+ --set nodePort.enabled=true \
+ --set hostPort.enabled=true \
+ --set bpf.masquerade=false \
+ --set image.pullPolicy=IfNotPresent \
+ --set cni.exclusive=false \
+ --set ipam.mode=kubernetes
+kubectl --context=kind-kind${number} -n kube-system rollout status ds cilium || true
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF > metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+# connect the registry to the cluster network if not already connected
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+
+# Document the local registry
+# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
+cat </dev/null; then
+ myip=$(hostname -I | awk '{ print $1 }')
+else
+ myip=$(ipconfig getifaddr en0)
+fi
+
+# Function to determine the next available cluster number
+get_next_cluster_number() {
+ if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+
+cat << EOF > kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+- role: worker
+ image: ${kindest_node}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+- role: worker
+ image: ${kindest_node}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ disableDefaultCNI: true
+ serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16"
+ podSubnet: "10.1${twodigits}.0.0/16"
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"]
+ endpoint = ["http://us-central1-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+${KIND_ADDL_FEATURES}
+EOF
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
+networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }')
+
+kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF > metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+# connect the registry to the cluster network if not already connected
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+
+# Document the local registry
+# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
+cat </dev/null; then
+ myip=$(hostname -I | awk '{ print $1 }')
+else
+ myip=$(ipconfig getifaddr en0)
+fi
+
+# Function to determine the next available cluster number
+get_next_cluster_number() {
+ if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+
+cat << EOF > kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+- role: worker
+ image: ${kindest_node}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16"
+ podSubnet: "10.1${twodigits}.0.0/16"
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"]
+ endpoint = ["http://us-central1-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+${KIND_ADDL_FEATURES}
+EOF
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
+networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }')
+
+kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF > metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+# connect the registry to the cluster network if not already connected
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+
+# Document the local registry
+# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
+cat </dev/null; then
+ myip=$(hostname -I | awk '{ print $1 }')
+else
+ myip=$(ipconfig getifaddr en0)
+fi
+
+# Function to determine the next available cluster number
+get_next_cluster_number() {
+ if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+
+cat << EOF > kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ disableDefaultCNI: true
+ serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16"
+ podSubnet: "10.1${twodigits}.0.0/16"
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+${KIND_ADDL_FEATURES}
+EOF
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
+networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }')
+
+kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF > metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+# connect the registry to the cluster network if not already connected
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+
+# Document the local registry
+# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
+cat </dev/null; then
+ myip=$(hostname -I | awk '{ print $1 }')
+else
+ myip=$(ipconfig getifaddr en0)
+fi
+
+# Function to determine the next available cluster number
+get_next_cluster_number() {
+ if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+
+cat << EOF > kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ disableDefaultCNI: true
+ serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16"
+ podSubnet: "10.1${twodigits}.0.0/16"
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"]
+ endpoint = ["http://us-central1-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+${KIND_ADDL_FEATURES}
+EOF
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
+networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }')
+
+kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+helm repo add cilium https://helm.cilium.io/
+
+helm --kube-context kind-kind${number} install cilium cilium/cilium --version 1.15.5 \
+ --namespace kube-system \
+ --set prometheus.enabled=true \
+ --set operator.prometheus.enabled=true \
+ --set hubble.enabled=true \
+ --set hubble.metrics.enabled="{dns:destinationContext=pod|ip;sourceContext=pod|ip,drop:destinationContext=pod|ip;sourceContext=pod|ip,tcp:destinationContext=pod|ip;sourceContext=pod|ip,flow:destinationContext=pod|ip;sourceContext=pod|ip,port-distribution:destinationContext=pod|ip;sourceContext=pod|ip}" \
+ --set hubble.relay.enabled=true \
+ --set hubble.ui.enabled=true \
+ --set kubeProxyReplacement=partial \
+ --set hostServices.enabled=false \
+ --set hostServices.protocols="tcp" \
+ --set externalIPs.enabled=true \
+ --set nodePort.enabled=true \
+ --set hostPort.enabled=true \
+ --set bpf.masquerade=false \
+ --set image.pullPolicy=IfNotPresent \
+ --set cni.exclusive=false \
+ --set ipam.mode=kubernetes
+kubectl --context=kind-kind${number} -n kube-system rollout status ds cilium || true
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF > metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+# connect the registry to the cluster network if not already connected
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+
+# Document the local registry
+# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
+cat </dev/null; then
+ myip=$(hostname -I | awk '{ print $1 }')
+else
+ myip=$(ipconfig getifaddr en0)
+fi
+
+# Function to determine the next available cluster number
+get_next_cluster_number() {
+ if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+
+cat << EOF > kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ disableDefaultCNI: true
+ serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16"
+ podSubnet: "10.1${twodigits}.0.0/16"
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+${KIND_ADDL_FEATURES}
+EOF
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
+networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }')
+
+kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF > metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+# connect the registry to the cluster network if not already connected
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+
+# Document the local registry
+# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
+cat </dev/null; then
+ myip=$(hostname -I | awk '{ print $1 }')
+else
+ myip=$(ipconfig getifaddr en0)
+fi
+
+# Function to determine the next available cluster number
+get_next_cluster_number() {
+ if ! kind get clusters 2>&1 | grep "^kind" > /dev/null; then
+ echo 1
+ else
+ highest_num=$(kind get clusters | grep "^kind" | tail -1 | cut -c 5-)
+ echo $((highest_num + 1))
+ fi
+}
+
+if [ -f /.dockerenv ]; then
+myip=$HOST_IP
+container=$(docker inspect $(docker ps -q) | jq -r ".[] | select(.Config.Hostname == \"$HOSTNAME\") | .Name" | cut -d/ -f2)
+docker network connect "kind" $container || true
+number=$(get_next_cluster_number)
+twodigits=$(printf "%02d\n" $number)
+fi
+
+reg_name='kind-registry'
+reg_port='5000'
+docker start "${reg_name}" 2>/dev/null || \
+docker run -d --restart=always -p "0.0.0.0:${reg_port}:5000" --name "${reg_name}" registry:2
+
+cache_port='5000'
+cat > registries < ${HOME}/.${cache_name}-config.yml </dev/null || \
+docker run -d --restart=always ${DEPLOY_EXTRA_PARAMS} -v ${HOME}/.${cache_name}-config.yml:/etc/docker/registry/config.yml --name "${cache_name}" registry:2
+done
+
+cat << EOF > kind${number}.yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+ image: ${kindest_node}
+ extraPortMappings:
+ - containerPort: 6443
+ hostPort: 70${twodigits}
+ labels:
+ ingress-ready: true
+ topology.kubernetes.io/region: ${region}
+ topology.kubernetes.io/zone: ${zone}
+networking:
+ serviceSubnet: "10.$(echo $twodigits | sed 's/^0*//').0.0/16"
+ podSubnet: "10.1${twodigits}.0.0/16"
+containerdConfigPatches:
+- |-
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"]
+ endpoint = ["http://${reg_name}:${reg_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
+ endpoint = ["http://docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-docker.pkg.dev"]
+ endpoint = ["http://us-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."us-central1-docker.pkg.dev"]
+ endpoint = ["http://us-central1-docker:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
+ endpoint = ["http://quay:${cache_port}"]
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
+ endpoint = ["http://gcr:${cache_port}"]
+${KIND_ADDL_FEATURES}
+EOF
+
+kind create cluster --name kind${number} --config kind${number}.yaml
+
+ipkind=$(docker inspect kind${number}-control-plane | jq -r '.[0].NetworkSettings.Networks[].IPAddress')
+networkkind=$(echo ${ipkind} | awk -F. '{ print $1"."$2 }')
+
+kubectl config set-cluster kind-kind${number} --server=https://${myip}:70${twodigits} --insecure-skip-tls-verify=true
+
+docker network connect "kind" "${reg_name}" || true
+docker network connect "kind" docker || true
+docker network connect "kind" us-docker || true
+docker network connect "kind" us-central1-docker || true
+docker network connect "kind" quay || true
+docker network connect "kind" gcr || true
+
+# Preload images
+cat << EOF >> images.txt
+quay.io/metallb/controller:v0.13.12
+quay.io/metallb/speaker:v0.13.12
+EOF
+cat images.txt | while read image; do
+ docker pull $image || true
+ kind load docker-image $image --name kind${number} || true
+done
+for i in 1 2 3 4 5; do kubectl --context=kind-kind${number} apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml && break || sleep 15; done
+kubectl --context=kind-kind${number} create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
+kubectl --context=kind-kind${number} -n metallb-system rollout status deploy controller || true
+
+cat << EOF > metallb${number}.yaml
+apiVersion: metallb.io/v1beta1
+kind: IPAddressPool
+metadata:
+ name: first-pool
+ namespace: metallb-system
+spec:
+ addresses:
+ - ${networkkind}.1${twodigits}.1-${networkkind}.1${twodigits}.254
+---
+apiVersion: metallb.io/v1beta1
+kind: L2Advertisement
+metadata:
+ name: empty
+ namespace: metallb-system
+EOF
+
+printf "Create IPAddressPool in kind-kind${number}\n"
+for i in {1..10}; do
+kubectl --context=kind-kind${number} apply -f metallb${number}.yaml && break
+sleep 2
+done
+
+# connect the registry to the cluster network if not already connected
+printf "Renaming context kind-kind${number} to ${name}\n"
+for i in {1..100}; do
+ (kubectl config get-contexts -oname | grep ${name}) && break
+ kubectl config rename-context kind-kind${number} ${name} && break
+ printf " $i"/100
+ sleep 2
+ [ $i -lt 100 ] || exit 1
+done
+
+# Document the local registry
+# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
+cat </dev/null || true"
+sed -n '/```bash/,/```/p; //p' | egrep -v '```|' | sed '/#IGNORE_ME/d'
diff --git a/gloo-gateway/1-18/enterprise/lambda/scripts/register-domain.sh b/gloo-gateway/1-18/enterprise/lambda/scripts/register-domain.sh
new file mode 100755
index 0000000000..1cb84cd86a
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/scripts/register-domain.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+
+# Check if the correct number of arguments is provided
+if [ "$#" -ne 2 ]; then
+ echo "Usage: $0 "
+ exit 1
+fi
+
+# Variables
+hostname="$1"
+new_ip_or_domain="$2"
+hosts_file="/etc/hosts"
+
+# Function to check if the input is a valid IP address
+is_ip() {
+ if [[ $1 =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
+ return 0 # 0 = true - valid IPv4 address
+ elif [[ $1 =~ ^[0-9a-f]+[:]+[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9a-f]*[:]*[0-9]*$ ]]; then
+ return 0 # 0 = true - valid IPv6 address
+ else
+ return 1 # 1 = false
+ fi
+}
+
+# Function to resolve domain to the first IPv4 address using dig
+resolve_domain() {
+ # Using dig to query A records, and awk to parse the first IPv4 address
+ dig +short A "$1" | awk '/^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$/ {print; exit}'
+}
+
+# Validate new_ip_or_domain or resolve domain to IP
+if is_ip "$new_ip_or_domain"; then
+ new_ip="$new_ip_or_domain"
+else
+ new_ip=$(resolve_domain "$new_ip_or_domain")
+ if [ -z "$new_ip" ]; then
+ echo "Failed to resolve domain to an IPv4 address."
+ exit 1
+ fi
+fi
+
+# Check if the entry already exists
+if grep -q "$hostname\$" "$hosts_file"; then
+ # Update the existing entry with the new IP
+ tempfile=$(mktemp)
+ sed "s/^.*$hostname\$/$new_ip $hostname/" "$hosts_file" > "$tempfile"
+ sudo cp "$tempfile" "$hosts_file"
+ rm "$tempfile"
+ echo "Updated $hostname in $hosts_file with new IP: $new_ip"
+else
+ # Add a new entry if it doesn't exist
+ echo "$new_ip $hostname" | sudo tee -a "$hosts_file" > /dev/null
+ echo "Added $hostname to $hosts_file with IP: $new_ip"
+fi
diff --git a/gloo-gateway/1-18/enterprise/lambda/scripts/timestamped_output.sh b/gloo-gateway/1-18/enterprise/lambda/scripts/timestamped_output.sh
new file mode 100755
index 0000000000..b1f741613e
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/scripts/timestamped_output.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+# Read input line by line and prepend a timestamp
+while IFS= read -r line; do
+ echo "$(date '+%Y-%m-%d %H:%M:%S') $line"
+done
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/can-resolve.test.js.liquid b/gloo-gateway/1-18/enterprise/lambda/tests/can-resolve.test.js.liquid
new file mode 100644
index 0000000000..7d1163da97
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/can-resolve.test.js.liquid
@@ -0,0 +1,17 @@
+const dns = require('dns');
+const chaiHttp = require("chai-http");
+const chai = require("chai");
+const expect = chai.expect;
+chai.use(chaiHttp);
+const { waitOnFailedTest } = require('./tests/utils');
+
+afterEach(function(done) { waitOnFailedTest(done, this.currentTest.currentRetry())});
+
+describe("Address '" + process.env.{{ to_resolve }} + "' can be resolved in DNS", () => {
+ it(process.env.{{ to_resolve }} + ' can be resolved', (done) => {
+ return dns.lookup(process.env.{{ to_resolve }}, (err, address, family) => {
+ expect(address).to.be.an.ip;
+ done();
+ });
+ });
+});
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/chai-exec.js b/gloo-gateway/1-18/enterprise/lambda/tests/chai-exec.js
new file mode 100644
index 0000000000..67ba62f095
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/chai-exec.js
@@ -0,0 +1,205 @@
+const jsYaml = require('js-yaml');
+const deepObjectDiff = require('deep-object-diff');
+const chaiExec = require("@jsdevtools/chai-exec");
+const chai = require("chai");
+const expect = chai.expect;
+const should = chai.should();
+chai.use(chaiExec);
+const utils = require('./utils');
+const { debugLog } = require('./utils/logging');
+chai.config.truncateThreshold = 4000; // length threshold for actual and expected values in assertion errors
+
+global = {
+ checkKubernetesObject: async ({ context, namespace, kind, k8sObj, yaml }) => {
+ let command = "kubectl --context " + context + " -n " + namespace + " get " + kind + " " + k8sObj + " -o json";
+ debugLog(`Executing command: ${command}`);
+ let cli = chaiExec(command);
+ let json = jsYaml.load(yaml)
+
+ debugLog(`Command output (stdout): ${cli.stdout}`);
+ debugLog(`Command error (stderr): ${cli.stderr}`);
+
+ cli.should.exit.with.code(0);
+ cli.stderr.should.be.empty;
+ let data = JSON.parse(cli.stdout);
+ debugLog(`Parsed data from CLI: ${JSON.stringify(data)}`);
+
+ let diff = deepObjectDiff.detailedDiff(json, data);
+ debugLog(`Diff between expected and actual object: ${JSON.stringify(diff)}`);
+
+ let expectedObject = false;
+ if (Object.keys(diff.updated).length === 0 && Object.keys(diff.deleted).length === 0) {
+ expectedObject = true;
+ }
+ debugLog(`Expected object found: ${expectedObject}`);
+ expect(expectedObject, "The following object can't be found or is not as expected:\n" + yaml).to.be.true;
+ },
+
+ checkDeployment: async ({ context, namespace, k8sObj }) => {
+ let command = "kubectl --context " + context + " -n " + namespace + " get deploy " + k8sObj + " -o jsonpath='{.status}'";
+ debugLog(`Executing command: ${command}`);
+ let cli = chaiExec(command);
+
+ debugLog(`Command output (stdout): ${cli.stdout}`);
+ debugLog(`Command error (stderr): ${cli.stderr}`);
+
+ cli.stderr.should.be.empty;
+ let readyReplicas = JSON.parse(cli.stdout.slice(1, -1)).readyReplicas || 0;
+ let replicas = JSON.parse(cli.stdout.slice(1, -1)).replicas;
+ debugLog(`Ready replicas: ${readyReplicas}, Total replicas: ${replicas}`);
+
+ if (readyReplicas != replicas) {
+ debugLog(`Deployment ${k8sObj} in ${context} not ready, retrying...`);
+ await utils.sleep(1000);
+ }
+ cli.should.exit.with.code(0);
+ readyReplicas.should.equal(replicas);
+ },
+
+ checkDeploymentHasPod: async ({ context, namespace, deployment }) => {
+ let command = "kubectl --context " + context + " -n " + namespace + " get deploy " + deployment + " -o name'";
+ debugLog(`Executing command: ${command}`);
+ let cli = chaiExec(command);
+
+ debugLog(`Command output (stdout): ${cli.stdout}`);
+ debugLog(`Command error (stderr): ${cli.stderr}`);
+
+ cli.stderr.should.be.empty;
+ cli.stdout.should.not.be.empty;
+ cli.stdout.should.contain(deployment);
+ },
+
+ checkDeploymentsWithLabels: async ({ context, namespace, labels, instances }) => {
+ let command = "kubectl --context " + context + " -n " + namespace + " get deploy -l " + labels + " -o jsonpath='{.items}'";
+ debugLog(`Executing command: ${command}`);
+ let cli = chaiExec(command);
+
+ debugLog(`Command output (stdout): ${cli.stdout}`);
+ debugLog(`Command error (stderr): ${cli.stderr}`);
+
+ cli.stderr.should.be.empty;
+ let deployments = JSON.parse(cli.stdout.slice(1, -1));
+ debugLog(`Found deployments: ${JSON.stringify(deployments)}`);
+
+ expect(deployments).to.have.lengthOf(instances);
+ deployments.forEach((deployment) => {
+ let readyReplicas = deployment.status.readyReplicas || 0;
+ let replicas = deployment.status.replicas;
+ debugLog(`Deployment ${deployment.metadata.name} - Ready replicas: ${readyReplicas}, Total replicas: ${replicas}`);
+
+ if (readyReplicas != replicas) {
+ debugLog(`Deployment ${deployment.metadata.name} in ${context} not ready, retrying...`);
+ utils.sleep(1000);
+ }
+ cli.should.exit.with.code(0);
+ readyReplicas.should.equal(replicas);
+ });
+ },
+
+ checkStatefulSet: async ({ context, namespace, k8sObj }) => {
+ let command = "kubectl --context " + context + " -n " + namespace + " get sts " + k8sObj + " -o jsonpath='{.status}'";
+ debugLog(`Executing command: ${command}`);
+ let cli = chaiExec(command);
+
+ debugLog(`Command output (stdout): ${cli.stdout}`);
+ debugLog(`Command error (stderr): ${cli.stderr}`);
+
+ cli.stderr.should.be.empty;
+ let readyReplicas = JSON.parse(cli.stdout.slice(1, -1)).readyReplicas || 0;
+ let replicas = JSON.parse(cli.stdout.slice(1, -1)).replicas;
+ debugLog(`StatefulSet ${k8sObj} - Ready replicas: ${readyReplicas}, Total replicas: ${replicas}`);
+
+ if (readyReplicas != replicas) {
+ debugLog(`StatefulSet ${k8sObj} in ${context} not ready, retrying...`);
+ await utils.sleep(1000);
+ }
+ cli.should.exit.with.code(0);
+ readyReplicas.should.equal(replicas);
+ },
+
+ checkDaemonSet: async ({ context, namespace, k8sObj }) => {
+ let command = "kubectl --context " + context + " -n " + namespace + " get ds " + k8sObj + " -o jsonpath='{.status}'";
+ debugLog(`Executing command: ${command}`);
+ let cli = chaiExec(command);
+
+ debugLog(`Command output (stdout): ${cli.stdout}`);
+ debugLog(`Command error (stderr): ${cli.stderr}`);
+
+ cli.stderr.should.be.empty;
+ let readyReplicas = JSON.parse(cli.stdout.slice(1, -1)).numberReady || 0;
+ let replicas = JSON.parse(cli.stdout.slice(1, -1)).desiredNumberScheduled;
+ debugLog(`DaemonSet ${k8sObj} - Ready replicas: ${readyReplicas}, Total replicas: ${replicas}`);
+
+ if (readyReplicas != replicas) {
+ debugLog(`DaemonSet ${k8sObj} in ${context} not ready, retrying...`);
+ await utils.sleep(1000);
+ }
+ cli.should.exit.with.code(0);
+ readyReplicas.should.equal(replicas);
+ },
+
+ k8sObjectIsPresent: ({ context, namespace, k8sType, k8sObj }) => {
+ let command = "kubectl --context " + context + " -n " + namespace + " get " + k8sType + " " + k8sObj + " -o name";
+ debugLog(`Executing command: ${command}`);
+ let cli = chaiExec(command);
+
+ debugLog(`Command output (stdout): ${cli.stdout}`);
+ debugLog(`Command error (stderr): ${cli.stderr}`);
+
+ cli.stderr.should.be.empty;
+ cli.should.exit.with.code(0);
+ },
+
+ genericCommand: async ({ command, responseContains = "" }) => {
+ debugLog(`Executing generic command: ${command}`);
+ let cli = chaiExec(command);
+
+ if (cli.stderr && cli.stderr != "") {
+ debugLog(`Command ${command} not successful: ${cli.stderr}`);
+ await utils.sleep(1000);
+ }
+
+ debugLog(`Command output (stdout): ${cli.stdout}`);
+ debugLog(`Command error (stderr): ${cli.stderr}`);
+
+ cli.stderr.should.be.empty;
+ cli.should.exit.with.code(0);
+ if (responseContains != "") {
+ debugLog(`Checking if stdout contains: ${responseContains}`);
+ cli.stdout.should.contain(responseContains);
+ }
+ },
+
+ getOutputForCommand: ({ command }) => {
+ debugLog(`Executing command: ${command}`);
+ let cli = chaiExec(command);
+ debugLog(`Command output (stdout): ${cli.stdout}`);
+ return cli.stdout;
+ },
+
+ curlInPod: ({ curlCommand, podName, namespace }) => {
+ debugLog(`Executing curl command: ${curlCommand} on pod: ${podName} in namespace: ${namespace}`);
+ const cli = chaiExec(curlCommand);
+ debugLog(`Curl command output (stdout): ${cli.stdout}`);
+ return cli.stdout;
+ },
+ curlInDeployment: async ({ curlCommand, deploymentName, namespace, context }) => {
+ debugLog(`Executing curl command: ${curlCommand} on deployment: ${deploymentName} in namespace: ${namespace} and context: ${context}`);
+ let getPodCommand = `kubectl --context ${context} -n ${namespace} get pods -l app=${deploymentName} -o jsonpath='{.items[0].metadata.name}'`;
+ let podName = chaiExec(getPodCommand).stdout.trim();
+ debugLog(`Pod selected for curl command: ${podName}`);
+ let execCommand = `kubectl --context ${context} -n ${namespace} exec ${podName} -- ${curlCommand}`;
+ const cli = chaiExec(execCommand);
+ debugLog(`Curl command output (stdout): ${cli.stdout}`);
+ return cli.stdout;
+ },
+};
+
+module.exports = global;
+
+afterEach(function (done) {
+ if (this.currentTest.currentRetry() > 0 && this.currentTest.currentRetry() % 5 === 0) {
+ debugLog(`Test "${this.currentTest.fullTitle()}" retry: ${this.currentTest.currentRetry()}`);
+ }
+ utils.waitOnFailedTest(done, this.currentTest.currentRetry())
+});
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/chai-http.js b/gloo-gateway/1-18/enterprise/lambda/tests/chai-http.js
new file mode 100644
index 0000000000..67f43db003
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/chai-http.js
@@ -0,0 +1,139 @@
+const chaiHttp = require("chai-http");
+const chai = require("chai");
+const expect = chai.expect;
+chai.use(chaiHttp);
+const utils = require('./utils');
+const fs = require("fs");
+const { debugLog } = require('./utils/logging');
+
+process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0';
+process.env.NODE_NO_WARNINGS = 1;
+chai.config.truncateThreshold = 4000; // length threshold for actual and expected values in assertion errors
+
+global = {
+ checkURL: ({ host, path = "", headers = [], certFile = '', keyFile = '', retCode }) => {
+ debugLog(`Checking URL: ${host}${path} with expected return code: ${retCode}`);
+
+ let cert = certFile ? fs.readFileSync(certFile) : '';
+ let key = keyFile ? fs.readFileSync(keyFile) : '';
+ let request = chai.request(host).head(path).redirects(0).cert(cert).key(key);
+
+ debugLog(`Setting headers: ${JSON.stringify(headers)}`);
+ headers.forEach(header => request.set(header.key, header.value));
+
+ return request
+ .send()
+ .then(async function (res) {
+ debugLog(`Response status code: ${res.status}`);
+ expect(res).to.have.status(retCode);
+ });
+ },
+
+ checkBody: ({ host, path = "", headers = [], body = '', certFile = '', keyFile = '', method = "get", data = "", match = true }) => {
+ debugLog(`Checking body at ${host}${path} with method: ${method} and match condition: ${match}`);
+
+ let cert = certFile ? fs.readFileSync(certFile) : '';
+ let key = keyFile ? fs.readFileSync(keyFile) : '';
+ let request = chai.request(host);
+
+ switch (method) {
+ case "get":
+ request = request.get(path).redirects(0).cert(cert).key(key);
+ break;
+ case "post":
+ request = request.post(path).redirects(0);
+ break;
+ case "put":
+ request = request.put(path).redirects(0);
+ break;
+ case "head":
+ request = request.head(path).redirects(0);
+ break;
+ default:
+ throw 'The requested method is not implemented.';
+ }
+
+ debugLog(`Setting headers: ${JSON.stringify(headers)}`);
+ headers.forEach(header => request.set(header.key, header.value));
+
+ debugLog(`Sending data: ${data}`);
+ return request
+ .send(data)
+ .then(async function (res) {
+ debugLog(`Response body: ${res.text}`);
+ if (match) {
+ expect(res.text).to.contain(body);
+ } else {
+ expect(res.text).not.to.contain(body);
+ }
+ });
+ },
+
+ checkHeaders: ({ host, path = "", headers = [], certFile = '', keyFile = '', expectedHeaders = [] }) => {
+ debugLog(`Checking headers for URL: ${host}${path}`);
+
+ let cert = certFile ? fs.readFileSync(certFile) : '';
+ let key = keyFile ? fs.readFileSync(keyFile) : '';
+ let request = chai.request(host).get(path).redirects(0).cert(cert).key(key);
+
+ debugLog(`Setting headers: ${JSON.stringify(headers)}`);
+ headers.forEach(header => request.set(header.key, header.value));
+
+ return request
+ .send()
+ .then(async function (res) {
+ debugLog(`Response headers: ${JSON.stringify(res.header)}`);
+ expectedHeaders.forEach(header => {
+ debugLog(`Checking header ${header.key} with expected value: ${header.value}`);
+ if (header.value === '*') {
+ expect(res.header).to.have.property(header.key);
+ } else {
+ expect(res.header[header.key]).to.equal(header.value);
+ }
+ });
+ });
+ },
+
+ checkWithMethod: ({ host, path, headers = [], method = "get", certFile = '', keyFile = '', retCode }) => {
+ debugLog(`Checking URL: ${host}${path} with method: ${method} and expected return code: ${retCode}`);
+
+ let cert = certFile ? fs.readFileSync(certFile) : '';
+ let key = keyFile ? fs.readFileSync(keyFile) : '';
+ let request = chai.request(host);
+
+ switch (method) {
+ case 'get':
+ request = request.get(path);
+ break;
+ case 'post':
+ request = request.post(path);
+ break;
+ case 'put':
+ request = request.put(path);
+ break;
+ default:
+ throw 'The requested method is not implemented.';
+ }
+
+ request.cert(cert).key(key).redirects(0);
+
+ debugLog(`Setting headers: ${JSON.stringify(headers)}`);
+ headers.forEach(header => request.set(header.key, header.value));
+
+ return request
+ .send()
+ .then(async function (res) {
+ debugLog(`Response status code: ${res.status}`);
+ expect(res).to.have.status(retCode);
+ });
+ }
+};
+
+module.exports = global;
+
+afterEach(function (done) {
+ if (this.currentTest.currentRetry() > 0 && this.currentTest.currentRetry() % 5 === 0) {
+ console.log(`Test "${this.currentTest.fullTitle()}" retry: ${this.currentTest.currentRetry()}`);
+ }
+ utils.waitOnFailedTest(done, this.currentTest.currentRetry());
+});
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/k8s-changes.js b/gloo-gateway/1-18/enterprise/lambda/tests/k8s-changes.js
new file mode 100644
index 0000000000..07b7202922
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/k8s-changes.js
@@ -0,0 +1,248 @@
+// k8s-cr-watcher.js
+
+const k8s = require('@kubernetes/client-node');
+const yaml = require('js-yaml');
+const diff = require('deep-diff').diff;
+
+function delay(ms) {
+ return new Promise(resolve => setTimeout(resolve, ms));
+}
+
+function sanitizeObject(obj) {
+ const sanitized = JSON.parse(JSON.stringify(obj));
+ if (sanitized.metadata) {
+ delete sanitized.metadata.managedFields;
+ delete sanitized.metadata.generation;
+ delete sanitized.metadata.resourceVersion;
+ delete sanitized.metadata.creationTimestamp;
+ }
+ return sanitized;
+}
+
+function getValueAtPath(obj, pathArray) {
+ return pathArray.reduce((acc, key) => (acc && acc[key] !== undefined) ? acc[key] : undefined, obj);
+}
+
+// Helper function to format differences into a human-readable string
+function formatDifferences(differences, previousObj, currentObj) {
+ let output = '';
+ const handledArrayPaths = new Set();
+
+ differences.forEach(d => {
+ const path = d.path.join('.');
+ if (d.kind === 'A') {
+ const arrayPath = d.path.join('.');
+ if (!handledArrayPaths.has(arrayPath)) {
+ const beforeArray = getValueAtPath(previousObj, d.path);
+ const afterArray = getValueAtPath(currentObj, d.path);
+
+ output += `• ${arrayPath}:\n\nBefore:\n${yaml.dump(beforeArray).trim().split('\n').join('\n')}\nAfter:\n${yaml.dump(afterArray).trim().split('\n').join('\n')}\n`;
+ handledArrayPaths.add(arrayPath);
+ }
+ } else {
+ // Check if this change is part of an already handled array
+ const isPartOfHandledArray = Array.from(handledArrayPaths).some(arrayPath => path.startsWith(arrayPath));
+
+ if (!isPartOfHandledArray) {
+ switch (d.kind) {
+ case 'E': // Edit
+ output += `• ${path}: '${JSON.stringify(d.lhs)}' => '${JSON.stringify(d.rhs)}'\n`;
+ break;
+ case 'N': // New
+ output += `• ${path}: Added '${JSON.stringify(d.rhs)}'\n`;
+ break;
+ case 'D': // Deleted
+ output += `• ${path}: Removed '${JSON.stringify(d.lhs)}'\n`;
+ break;
+ default:
+ output += `• ${path}: Changed\n`;
+ }
+ }
+ }
+ });
+
+ return output;
+}
+
+// Function to extract change information from an event
+function extractChangeInfo(type, apiObj, previousObj, currentObj) {
+ const name = apiObj.metadata.name;
+ const namespace = apiObj.metadata.namespace;
+ const kind = apiObj.kind;
+ const apiVersion = apiObj.apiVersion;
+
+ let changeInfo = `${type}: ${kind} "${name}"`;
+ if (namespace) {
+ changeInfo += ` in namespace "${namespace}"`;
+ }
+ changeInfo += ` (apiVersion: ${apiVersion})`;
+
+ if (type === 'MODIFIED' && previousObj) {
+ const differences = diff(previousObj, apiObj);
+ if (differences && differences.length > 0) {
+ // Filter out non-essential diffs
+ const essentialDifferences = differences.filter(d => {
+ const path = d.path.join('.');
+ return !path.startsWith('metadata.generation') &&
+ !path.startsWith('metadata.resourceVersion') &&
+ !path.startsWith('metadata.creationTimestamp');
+ });
+
+ if (essentialDifferences.length > 0) {
+ changeInfo += '\n\nDifferences:\n' + formatDifferences(essentialDifferences, previousObj, apiObj);
+ } else {
+ changeInfo += '\n\nNo meaningful differences detected';
+ }
+ } else {
+ changeInfo += '\n\nNo differences detected';
+ }
+ }
+
+ return changeInfo;
+}
+
+async function watchCRs(contextName, delaySeconds, durationSeconds) {
+ let changeCount = 0;
+ let isWatchSetupComplete = false;
+
+ console.log(`Waiting for ${delaySeconds} seconds before starting the test...`);
+ await delay(delaySeconds * 1000);
+ console.log('Delay complete. Starting the test.');
+
+ const kc = new k8s.KubeConfig();
+ kc.loadFromDefault();
+
+ const contexts = kc.getContexts();
+ const context = contexts.find(c => c.name === contextName);
+
+ kc.setCurrentContext(contextName);
+
+ const k8sApi = kc.makeApiClient(k8s.CustomObjectsApi);
+ const apisApi = kc.makeApiClient(k8s.ApisApi);
+
+ async function getResources(group, version) {
+ try {
+ const { body } = await k8sApi.listClusterCustomObject(group, version, '');
+ return body.resources || [];
+ } catch (error) {
+ console.error(`Error getting resources for ${group}/${version}: ${error}`);
+ return [];
+ }
+ }
+
+ // Function to watch a specific CR
+ async function watchCR(group, version, plural, abortController) {
+ const watch = new k8s.Watch(kc);
+ let resourceVersion;
+
+ try {
+ // Get the latest resourceVersion
+ const listResponse = await k8sApi.listClusterCustomObject(group, version, plural);
+ resourceVersion = listResponse.body.metadata.resourceVersion;
+
+ // Cache of previous objects (sanitized)
+ const objectCache = {};
+
+ // Initialize the object cache
+ if (listResponse.body.items) {
+ listResponse.body.items.forEach(item => {
+ objectCache[item.metadata.uid] = sanitizeObject(item);
+ });
+ }
+
+ await watch.watch(
+ `/apis/${group}/${version}/${plural}`,
+ {
+ abortSignal: abortController.signal,
+ allowWatchBookmarks: true,
+ resourceVersion: resourceVersion
+ },
+ (type, apiObj) => {
+ if (isWatchSetupComplete) {
+ const uid = apiObj.metadata.uid;
+
+ // Sanitize the current object by removing non-essential metadata
+ const sanitizedObj = sanitizeObject(apiObj);
+
+ let previousObj = objectCache[uid];
+
+ if (previousObj) {
+ // Clone previousObj to avoid mutation
+ previousObj = JSON.parse(JSON.stringify(previousObj));
+ }
+
+ if (type === 'ADDED' || type === 'MODIFIED' || type === 'DELETED') {
+ const changeInfo = extractChangeInfo(type, sanitizedObj, previousObj, sanitizedObj);
+
+ // Only log meaningful changes
+ if (type === 'MODIFIED' && changeInfo.includes('No meaningful differences detected')) {
+ // Skip logging if there are no meaningful changes
+ return;
+ }
+
+ console.log(changeInfo);
+ console.log('---');
+ console.log(yaml.dump(sanitizedObj).trim()); // Display the full object in YAML
+ console.log('---');
+
+ if (type === 'DELETED') {
+ delete objectCache[uid];
+ } else {
+ objectCache[uid] = sanitizedObj;
+ }
+
+ changeCount++;
+ }
+ }
+ },
+ (err) => {
+ if (err && err.message !== 'aborted') {
+ console.error(`Error watching ${group}/${version}/${plural}: ${err}`);
+ }
+ }
+ );
+ } catch (error) {
+ if (error.message !== 'aborted') {
+ console.error(`Error setting up watch for ${group}/${version}/${plural}: ${error}`);
+ }
+ }
+ }
+
+ console.log(`Using context: ${contextName}`);
+ console.log(`Watching for CR changes with apiVersion containing "istio", "gloo", "solo" or "gateway.networking.k8s.io" for ${durationSeconds} seconds...`);
+
+ const abortController = new AbortController();
+ const watchPromises = [];
+
+ const { body: apiGroups } = await apisApi.getAPIVersions();
+
+ for (const group of apiGroups.groups) {
+ if (group.name.includes('istio') || group.name.includes('gloo') || group.name.includes('solo') || group.name.includes('gateway.networking.k8s.io')) {
+ const latestVersion = group.preferredVersion || group.versions[0];
+ const resources = await getResources(group.name, latestVersion.version);
+
+ for (const resource of resources) {
+ if (resource.kind && resource.name && !resource.name.includes('/')) {
+ watchPromises.push(watchCR(group.name, latestVersion.version, resource.name, abortController));
+ }
+ }
+ }
+ }
+
+ console.log("Watch setup complete. Listening for changes...");
+ console.log('---');
+
+ isWatchSetupComplete = true;
+
+ await new Promise(resolve => setTimeout(resolve, durationSeconds * 1000));
+
+ abortController.abort();
+ console.log(`Watch completed after ${durationSeconds} seconds.`);
+ console.log(`Total changes detected: ${changeCount}`);
+
+ await Promise.allSettled(watchPromises);
+
+ return changeCount;
+}
+
+module.exports = { watchCRs };
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/k8s-changes.test.js.liquid b/gloo-gateway/1-18/enterprise/lambda/tests/k8s-changes.test.js.liquid
new file mode 100644
index 0000000000..85ff59def2
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/k8s-changes.test.js.liquid
@@ -0,0 +1,25 @@
+const assert = require('assert');
+const { watchCRs } = require('./tests/k8s-changes');
+
+describe('Kubernetes CR Watcher', function() {
+ let contextName = process.env.{{ context | default: "CLUSTER1" }};
+ let delaySeconds = {{ delay | default: 5 }};
+ let durationSeconds = {{ duration | default: 10 }};
+ let changeCount = 0;
+
+ it(`No CR changed in context ${contextName} for ${durationSeconds} seconds`, async function() {
+ this.timeout((durationSeconds + delaySeconds + 10) * 1000);
+
+ changeCount = await watchCRs(contextName, delaySeconds, durationSeconds);
+
+ assert.strictEqual(changeCount, 0, `Test failed: ${changeCount} changes were detected`);
+ });
+
+ after(function(done) {
+ setTimeout(() => {
+ process.exit(changeCount);
+ }, 1000);
+
+ done();
+ });
+});
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/keycloak-token.js b/gloo-gateway/1-18/enterprise/lambda/tests/keycloak-token.js
new file mode 100644
index 0000000000..3ac1a691db
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/keycloak-token.js
@@ -0,0 +1,4 @@
+const keycloak = require('./keycloak');
+const { argv } = require('node:process');
+
+keycloak.getKeyCloakCookie(argv[2], argv[3]);
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/keycloak.js b/gloo-gateway/1-18/enterprise/lambda/tests/keycloak.js
new file mode 100644
index 0000000000..3af51e31c1
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/keycloak.js
@@ -0,0 +1,48 @@
+const puppeteer = require('puppeteer');
+//const utils = require('./utils');
+
+global = {
+ getKeyCloakCookie: async (url, user) => {
+ const browser = await puppeteer.launch({
+ headless: "new",
+ slowMo: 40,
+ ignoreHTTPSErrors: true,
+ args: ['--no-sandbox', '--disable-setuid-sandbox'], // needed for instruqt
+ });
+ // Create a new browser context
+ const context = await browser.createBrowserContext();
+ const page = await context.newPage();
+ await page.goto(url);
+ await page.waitForNetworkIdle({ options: { timeout: 1000 } });
+ //await utils.sleep(1000);
+
+ // Enter credentials
+ await page.screenshot({path: 'screenshot.png'});
+ await page.waitForSelector('#username', { options: { timeout: 1000 } });
+ await page.waitForSelector('#password', { options: { timeout: 1000 } });
+ await page.type('#username', user);
+ await page.type('#password', 'password');
+ await page.click('#kc-login');
+ await page.waitForNetworkIdle({ options: { timeout: 1000 } });
+ //await utils.sleep(1000);
+
+ // Retrieve session cookie
+ const cookies = await page.cookies();
+ const sessionCookie = cookies.find(cookie => cookie.name === 'keycloak-session');
+ let ret;
+ if (sessionCookie) {
+ ret = `${sessionCookie.name}=${sessionCookie.value}`; // Construct the cookie string
+ } else {
+ // console.error(await page.content()); // very verbose
+ await page.screenshot({path: 'screenshot.png'});
+ console.error(` No session cookie found for ${user}`);
+ ret = "keycloak-session=dummy";
+ }
+ await context.close();
+ await browser.close();
+ console.log(ret);
+ return ret;
+ }
+};
+
+module.exports = global;
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/pages/base.js b/gloo-gateway/1-18/enterprise/lambda/tests/pages/base.js
new file mode 100644
index 0000000000..af140b84f5
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/pages/base.js
@@ -0,0 +1,28 @@
+const { debugLog } = require('../utils/logging');
+
+class BasePage {
+ constructor(page) {
+ this.page = page;
+ }
+
+ async navigateTo(url) {
+ debugLog(`Navigating to ${url}`);
+ await this.page.goto(url, { waitUntil: 'networkidle2' });
+ debugLog('Navigation complete');
+ }
+
+ async findVisibleSelector(selectors) {
+ for (const selector of selectors) {
+ const element = await this.page.$(selector);
+ if (element) {
+ const visible = await this.page.evaluate(el => !!(el.offsetWidth || el.offsetHeight || el.getClientRects().length), element);
+ if (visible) {
+ return selector;
+ }
+ }
+ }
+ throw new Error('No visible selector found for the provided options.');
+ }
+}
+
+module.exports = BasePage;
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/pages/constants.js b/gloo-gateway/1-18/enterprise/lambda/tests/pages/constants.js
new file mode 100644
index 0000000000..17068fbf55
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/pages/constants.js
@@ -0,0 +1,13 @@
+const InsightType = {
+ BP: 'BP',
+ CFG: 'CFG',
+ HLT: 'HLT',
+ ING: 'ING',
+ RES: 'RES',
+ RTE: 'RTE',
+ SEC: 'SEC',
+};
+
+module.exports = {
+ InsightType,
+};
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/admin-apps-page.js b/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/admin-apps-page.js
new file mode 100644
index 0000000000..35e4ea7de5
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/admin-apps-page.js
@@ -0,0 +1,36 @@
+const BasePage = require("../base");
+
+class DeveloperPortalAdminAppsPage extends BasePage {
+ constructor(page) {
+ super(page);
+ // Metadata selectors
+ this.editMetadataButton = 'button ::-p-text("Edit Custom Metadata")';
+ this.metadataKeyInput = '#meta-key-input';
+ this.metadataValueInput = '#meta-value-input';
+ this.addMetadataButton = 'button[type="submit"] ::-p-text("Add Metadata")';
+ this.saveMetadataButton = 'button[type="button"] ::-p-text("Save")';
+ }
+
+ async addCustomMetadata(key, value) {
+ // Click the edit metadata button
+ await this.page.waitForSelector(this.editMetadataButton, { visible: true });
+ await this.page.locator(this.editMetadataButton).click();
+
+ // Fill in key and value
+ await this.page.waitForSelector(this.metadataKeyInput, { visible: true });
+ await this.page.type(this.metadataKeyInput, key);
+
+ await this.page.waitForSelector(this.metadataValueInput, { visible: true });
+ await this.page.type(this.metadataValueInput, value);
+
+ // Click add metadata button
+ await this.page.waitForSelector(this.addMetadataButton, { visible: true });
+ await this.page.locator(this.addMetadataButton).click();
+
+ // Click save button
+ await this.page.waitForSelector(this.saveMetadataButton, { visible: true });
+ await this.page.click(this.saveMetadataButton);
+ }
+}
+
+module.exports = DeveloperPortalAdminAppsPage;
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/admin-subscriptions-page.js b/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/admin-subscriptions-page.js
new file mode 100644
index 0000000000..0b53432f99
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/admin-subscriptions-page.js
@@ -0,0 +1,85 @@
+const BasePage = require("../base");
+
+class DeveloperPortalAdminSubscriptionPage extends BasePage {
+ constructor(page) {
+ super(page);
+
+ // Subscription management selectors
+ this.approveButton = 'button ::-p-text("Approve")';
+ this.confirmApproveButton = 'button[type="submit"] ::-p-text("Approve Subscription")';
+
+ // Metadata selectors
+ this.editMetadataButton = 'button ::-p-text("Edit Custom Metadata")';
+ this.metadataKeyInput = '#meta-key-input';
+ this.metadataValueInput = '#meta-value-input';
+ this.addMetadataButton = 'button[type="submit"] ::-p-text("Add Metadata")';
+ this.saveMetadataButton = 'button[type="button"] ::-p-text("Save")';
+
+ // Rate limit selectors
+ this.editRateLimitButton = 'button ::-p-text("Edit Rate Limit")';
+ this.requestsPerUnitInput = '#rpu-input';
+ this.unitSelect = '#unit-input';
+ this.saveRateLimitButton = 'button[type="submit"] ::-p-text("Save")';
+ }
+
+ async approveSubscription() {
+ // Click the initial approve button
+ await this.page.waitForSelector(this.approveButton, { visible: true });
+ await this.page.locator(this.approveButton).click();
+
+ // Wait for and click the confirm approve button in the modal
+ await this.page.waitForSelector(this.confirmApproveButton, { visible: true });
+ await this.page.locator(this.confirmApproveButton).click();
+
+ // Wait for approve button to become disabled
+ await this.page.waitForFunction(() => {
+ const button = document.querySelector('button[data-disabled="true"]');
+ return button && button.innerText.includes("Approve");
+ }, { timeout: 3000 });
+ }
+
+ async addCustomMetadata(key, value) {
+ // Click the edit metadata button
+ await this.page.waitForSelector(this.editMetadataButton, { visible: true });
+ await this.page.locator(this.editMetadataButton).click();
+
+ // Fill in key and value
+ await this.page.waitForSelector(this.metadataKeyInput, { visible: true });
+ await this.page.type(this.metadataKeyInput, key);
+
+ await this.page.waitForSelector(this.metadataValueInput, { visible: true });
+ await this.page.type(this.metadataValueInput, value);
+
+ // Click add metadata button
+ await this.page.waitForSelector(this.addMetadataButton, { visible: true });
+ await this.page.locator(this.addMetadataButton).click();
+
+ // Click save button
+ await this.page.waitForSelector(this.saveMetadataButton, { visible: true });
+ await this.page.click(this.saveMetadataButton);
+ }
+
+ async setRateLimit(requests, unit) {
+ // Click edit rate limit button
+ await this.page.waitForSelector(this.editRateLimitButton, { visible: true });
+ await this.page.locator(this.editRateLimitButton).click();
+
+ // Set requests per unit
+ await this.page.waitForSelector(this.requestsPerUnitInput, { visible: true });
+ await this.page.type(this.requestsPerUnitInput, requests.toString());
+
+ // Click unit select to open dropdown
+ await this.page.click(this.unitSelect);
+
+ // Select the unit from dropdown
+ await this.page.keyboard.press('ArrowDown');
+ await this.page.keyboard.press('ArrowDown');
+ await this.page.keyboard.press('Enter');
+
+ // Click save button
+ await this.page.waitForSelector(this.saveRateLimitButton, { visible: true });
+ await this.page.click(this.saveRateLimitButton);
+ }
+}
+
+module.exports = DeveloperPortalAdminSubscriptionPage;
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/api-page.js b/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/api-page.js
new file mode 100644
index 0000000000..9322071376
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/api-page.js
@@ -0,0 +1,25 @@
+const BasePage = require("../base");
+
+class DeveloperPortalAPIPage extends BasePage {
+ constructor(page) {
+ super(page)
+
+ // Selectors
+ this.apiBlocksSelector = 'a[href^="/apis/"]';
+ }
+
+ async getAPIProducts() {
+ const apiBlocks = await this.page.evaluate((selector) => {
+ const blocks = document.querySelectorAll(selector);
+
+ return Array.from(blocks).map(block => {
+ const blockHTML = block.outerHTML;
+ return blockHTML;
+ });
+ }, this.apiBlocksSelector);
+
+ return apiBlocks;
+ }
+}
+
+module.exports = DeveloperPortalAPIPage;
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/apps-page.js b/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/apps-page.js
new file mode 100644
index 0000000000..1cbfe085fa
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/apps-page.js
@@ -0,0 +1,190 @@
+const BasePage = require("../base");
+
+class DeveloperPortalAppsPage extends BasePage {
+ constructor(page) {
+ super(page);
+
+ // App creation selectors
+ this.createAppButton = 'button ::-p-text("CREATE NEW APP")';
+ this.teamSelectInput = '#app-team-select';
+ this.appNameInput = '#app-name-input';
+ this.appDescriptionInput = '#app-description-input';
+ this.createAppSubmitButton = 'button[type="submit"] ::-p-text("Create App")';
+
+ // App details and subscription selectors
+ this.detailsLink = 'a::-p-text("DETAILS")';
+ this.addSubscriptionButton = 'div::-p-text("ADD SUBSCRIPTION")';
+ this.apiProductSelect = '#api-product-select';
+ this.createSubscriptionButton = 'button[type="submit"] ::-p-text("Create Subscription")';
+
+ // API Key selectors
+ this.addApiKeyButton = 'div::-p-text("ADD API KEY")';
+ this.apiKeyNameInput = '#api-key-name-input';
+ this.submitApiKeyButton = 'button[type="submit"] ::-p-text("ADD API Key")';
+ this.copyApiKeyButton = 'button[aria-label="Copy this API Key"]';
+ this.closeModalButton = 'button ::-p-text("Close")';
+
+ // OAuth client selectors
+ this.createOAuthClientButton = 'button ::-p-text("Create OAuth Client")';
+ this.confirmOAuthClientButton = 'button[type="submit"] ::-p-text("Create OAuth Client")';
+ this.copyOAuthClientButton = 'button[aria-label="Copy this Client Secret"]';
+ }
+
+ async clickCreateNewApp() {
+ await this.page.locator(this.createAppButton).click();
+ }
+
+ async selectTeam(teamName) {
+ await this.page.waitForSelector(this.teamSelectInput);
+ await this.page.click(this.teamSelectInput);
+
+ const teamOption = `div[role="option"]::-p-text("${teamName}")`;
+ await this.page.waitForSelector(teamOption);
+ await this.page.click(teamOption);
+ }
+
+ async fillAppDetails(name, description) {
+ await this.page.waitForSelector(this.appNameInput, { visible: true });
+ await this.page.type(this.appNameInput, name);
+
+ await this.page.waitForSelector(this.appDescriptionInput, { visible: true });
+ await this.page.type(this.appDescriptionInput, description);
+ }
+
+ async submitAppCreation() {
+ await this.page.locator(this.createAppSubmitButton).click();
+ }
+
+ async createNewApp(teamName, appName, appDescription) {
+ await this.clickCreateNewApp();
+ await this.selectTeam(teamName);
+ await this.fillAppDetails(appName, appDescription);
+ await this.submitAppCreation();
+ }
+
+ async navigateToAppDetails() {
+ await this.page.locator(this.detailsLink).click();
+ }
+
+ async clickAddSubscription() {
+ await this.page.locator(this.addSubscriptionButton).click();
+ }
+
+ async selectApiProduct(productName) {
+ await this.page.waitForSelector(this.apiProductSelect);
+ await this.page.click(this.apiProductSelect);
+
+ const productOption = `div[role="option"]::-p-text("${productName}")`;
+ await this.page.waitForSelector(productOption);
+ await this.page.click(productOption);
+ }
+
+ async submitSubscriptionCreation() {
+ await this.page.locator(this.createSubscriptionButton).click();
+ }
+
+ async createSubscription(apiProductName) {
+ await this.clickAddSubscription();
+ await this.selectApiProduct(apiProductName);
+ await this.submitSubscriptionCreation();
+ }
+
+ async createAppAndSubscribe(teamName, appName, appDescription, apiProductName) {
+ await this.createNewApp(teamName, appName, appDescription);
+ await this.navigateToAppDetails();
+ await this.createSubscription(apiProductName);
+ }
+
+ async createApiKey(keyName) {
+ // Click ADD API KEY button
+ await this.page.locator(this.addApiKeyButton).click();
+
+ // Wait for and fill in the name input
+ await this.page.waitForSelector(this.apiKeyNameInput, { visible: true });
+ await this.page.type(this.apiKeyNameInput, keyName);
+
+ // Click create button
+ await this.page.locator(this.submitApiKeyButton).click();
+
+ // Get API key value from clipboard
+ await this.page.waitForSelector(this.copyApiKeyButton, { visible: true });
+ await this.page.click(this.copyApiKeyButton);
+
+ const clipboardContent = await this.page.evaluate(() => navigator.clipboard.readText());
+
+ // Close the modal
+ await this.page.locator(this.closeModalButton).click();
+
+ return clipboardContent;
+ }
+
+ async createOAuthClient() {
+ // Click initial Create OAuth Client button
+ await this.page.click(this.createOAuthClientButton);
+
+ // Wait for and click confirm button in modal
+ await this.page.waitForSelector(this.confirmOAuthClientButton, { visible: true });
+ await this.page.locator(this.confirmOAuthClientButton).click();
+
+ // Wait for and click copy button
+ await this.page.waitForSelector(this.copyOAuthClientButton, { visible: true });
+ await this.page.click(this.copyOAuthClientButton);
+
+ // Wait for the 'Client ID' label to appear in the modal using page.waitForFunction with XPath
+ await this.page.waitForFunction(() => {
+ return document.evaluate(
+ '//div[text()="Client ID"]',
+ document,
+ null,
+ XPathResult.FIRST_ORDERED_NODE_TYPE,
+ null
+ ).singleNodeValue !== null;
+ });
+
+ // Get the Client ID
+ const clientId = await this.page.evaluate(() => {
+ const clientIdLabel = document.evaluate(
+ '//div[text()="Client ID"]',
+ document,
+ null,
+ XPathResult.FIRST_ORDERED_NODE_TYPE,
+ null
+ ).singleNodeValue;
+
+ if (clientIdLabel && clientIdLabel.nextElementSibling) {
+ return clientIdLabel.nextElementSibling.textContent.trim();
+ }
+ return null;
+ });
+
+ // Get the Client Secret
+ const clientSecret = await this.page.evaluate(() => {
+ const clientSecretLabel = document.evaluate(
+ '//div[text()="Client Secret"]',
+ document,
+ null,
+ XPathResult.FIRST_ORDERED_NODE_TYPE,
+ null
+ ).singleNodeValue;
+
+ if (clientSecretLabel && clientSecretLabel.nextElementSibling) {
+ const button = clientSecretLabel.nextElementSibling;
+ // The secret value is inside the button's inner text
+ const secretText = button.innerText.trim().split('\n')[0];
+ return secretText;
+ }
+ return null;
+ });
+
+ // Close the modal
+ await this.page.click(this.closeModalButton);
+
+ return {
+ clientId,
+ clientSecret,
+ };
+ }
+
+}
+
+module.exports = DeveloperPortalAppsPage;
\ No newline at end of file
diff --git a/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/home-page.js b/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/home-page.js
new file mode 100644
index 0000000000..8957364a95
--- /dev/null
+++ b/gloo-gateway/1-18/enterprise/lambda/tests/pages/dev-portal/home-page.js
@@ -0,0 +1,31 @@
+const BasePage = require("../base");
+
+class DeveloperPortalHomePage extends BasePage {
+ constructor(page) {
+ super(page)
+
+ // Selectors
+ this.loginLink = 'a[href="/v1/login"]';
+ this.userHolder = '.userHolder';
+ this.logoutLink = 'a[href="/v1/logout"]';
+ }
+
+ async clickLogin() {
+ await this.page.waitForSelector(this.loginLink, { visible: true });
+ await this.page.click(this.loginLink);
+ }
+
+ async getLoggedInUserName() {
+ await this.page.waitForSelector(this.userHolder, { visible: true });
+
+ const username = await this.page.evaluate(() => {
+ const userHolderDiv = document.querySelector('.userHolder');
+ const text = userHolderDiv ? userHolderDiv.textContent.trim() : '';
+ return text.replace(/