Skip to content

Commit 65d190f

Browse files
committed
e2e test for heterogenous cluster
1 parent 47d41ae commit 65d190f

File tree

4 files changed

+236
-36
lines changed

4 files changed

+236
-36
lines changed

.github/workflows/e2e_tests.yaml

+4
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,8 @@ jobs:
6464

6565
- name: Setup and start KinD cluster
6666
uses: ./common/github-actions/kind
67+
with:
68+
worker-nodes: 1
6769

6870
- name: Install NVidia GPU operator for KinD
6971
uses: ./common/github-actions/nvidia-gpu-operator
@@ -102,6 +104,8 @@ jobs:
102104
kubectl create clusterrolebinding sdk-user-localqueue-creator --clusterrole=localqueue-creator --user=sdk-user
103105
kubectl create clusterrole list-secrets --verb=get,list --resource=secrets
104106
kubectl create clusterrolebinding sdk-user-list-secrets --clusterrole=list-secrets --user=sdk-user
107+
kubectl create clusterrole pod-creator --verb=get,list --resource=pods
108+
kubectl create clusterrolebinding sdk-user-pod-creator --clusterrole=pod-creator --user=sdk-user
105109
kubectl config use-context sdk-user
106110
107111
- name: Run e2e tests
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
from time import sleep
2+
import time
3+
from codeflare_sdk import (
4+
Cluster,
5+
ClusterConfiguration,
6+
)
7+
8+
from codeflare_sdk.common.kueue.kueue import list_local_queues
9+
10+
import pytest
11+
12+
from support import *
13+
14+
15+
@pytest.mark.skip(reason="Skipping heterogenous cluster kind test")
16+
class TestHeterogenousClustersOauth:
17+
def setup_method(self):
18+
initialize_kubernetes_client(self)
19+
20+
def teardown_method(self):
21+
delete_namespace(self)
22+
delete_kueue_resources(self)
23+
24+
@pytest.mark.nvidia_gpu
25+
def test_heterogeneous_clusters(self):
26+
create_namespace(self)
27+
create_kueue_resources(self, 2)
28+
self.run_heterogeneous_clusters()
29+
30+
def run_heterogeneous_clusters(
31+
self, gpu_resource_name="nvidia.com/gpu", number_of_gpus=0
32+
):
33+
used_nodes = []
34+
35+
for flavor in self.resource_flavors:
36+
cluster_name = f"test-ray-cluster-li-{flavor[-5:]}"
37+
queues = list_local_queues(namespace=self.namespace, flavors=[flavor])
38+
queue_name = queues[0]["name"] if queues else None
39+
print(f"Using flavor: {flavor}, Queue: {queue_name}")
40+
cluster = Cluster(
41+
ClusterConfiguration(
42+
name=cluster_name,
43+
namespace=self.namespace,
44+
num_workers=1,
45+
head_cpu_requests="500m",
46+
head_cpu_limits="500m",
47+
head_memory_requests=2,
48+
head_memory_limits=2,
49+
worker_cpu_requests="500m",
50+
worker_cpu_limits=1,
51+
worker_memory_requests=1,
52+
worker_memory_limits=4,
53+
worker_extended_resource_requests={
54+
gpu_resource_name: number_of_gpus
55+
},
56+
write_to_file=True,
57+
verify_tls=False,
58+
local_queue=queue_name,
59+
)
60+
)
61+
cluster.up()
62+
time.sleep(2)
63+
node_name = get_pod_node(self, self.namespace, cluster_name)
64+
print(f"Cluster {cluster_name}-{flavor} is running on node: {node_name}")
65+
time.sleep(2)
66+
assert (
67+
node_name not in used_nodes
68+
), f"Node {node_name} was already used by another flavor."
69+
used_nodes.append(node_name)
70+
cluster.down()
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
from time import sleep
2+
import time
3+
from codeflare_sdk import (
4+
Cluster,
5+
ClusterConfiguration,
6+
TokenAuthentication,
7+
)
8+
9+
from codeflare_sdk.common.kueue.kueue import list_local_queues
10+
11+
import pytest
12+
13+
from support import *
14+
15+
16+
@pytest.mark.openshift
17+
class TestHeterogenousClustersOauth:
18+
def setup_method(self):
19+
initialize_kubernetes_client(self)
20+
21+
def teardown_method(self):
22+
delete_namespace(self)
23+
delete_kueue_resources(self)
24+
25+
def test_heterogeneous_clusters(self):
26+
create_namespace(self)
27+
create_kueue_resources(self, 2)
28+
self.run_heterogeneous_clusters()
29+
30+
def run_heterogeneous_clusters(
31+
self, gpu_resource_name="nvidia.com/gpu", number_of_gpus=0
32+
):
33+
ray_image = get_ray_image()
34+
35+
auth = TokenAuthentication(
36+
token=run_oc_command(["whoami", "--show-token=true"]),
37+
server=run_oc_command(["whoami", "--show-server=true"]),
38+
skip_tls=True,
39+
)
40+
auth.login()
41+
42+
used_nodes = []
43+
44+
for flavor in self.resource_flavors:
45+
cluster_name = f"test-ray-cluster-li-{flavor[-5:]}"
46+
queues = list_local_queues(namespace=self.namespace, flavors=[flavor])
47+
queue_name = queues[0]["name"] if queues else None
48+
print(f"Using flavor: {flavor}, Queue: {queue_name}")
49+
cluster = Cluster(
50+
ClusterConfiguration(
51+
namespace=self.namespace,
52+
name=cluster_name,
53+
num_workers=1,
54+
worker_cpu_requests=1,
55+
worker_cpu_limits=1,
56+
worker_memory_requests=1,
57+
worker_memory_limits=4,
58+
image=ray_image,
59+
verify_tls=False,
60+
local_queue=queue_name,
61+
)
62+
)
63+
cluster.up()
64+
time.sleep(5)
65+
node_name = get_pod_node(self, self.namespace, cluster_name)
66+
print(f"Cluster {cluster_name}-{flavor} is running on node: {node_name}")
67+
time.sleep(5)
68+
assert (
69+
node_name not in used_nodes
70+
), f"Node {node_name} was already used by another flavor."
71+
used_nodes.append(node_name)
72+
cluster.down()

tests/e2e/support.py

+90-36
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import json
12
import os
23
import random
34
import string
@@ -65,19 +66,30 @@ def create_namespace(self):
6566
return RuntimeError(e)
6667

6768

68-
def create_new_resource_flavor(self):
69-
self.resource_flavor = f"test-resource-flavor-{random_choice()}"
70-
create_resource_flavor(self, self.resource_flavor)
69+
def create_new_resource_flavor(self, num_flavors):
70+
self.resource_flavors = []
71+
for i in range(num_flavors):
72+
default = i < 1
73+
resource_flavor = f"test-resource-flavor-{random_choice()}"
74+
create_resource_flavor(self, resource_flavor, default)
75+
self.resource_flavors.append(resource_flavor)
7176

7277

73-
def create_new_cluster_queue(self):
74-
self.cluster_queue = f"test-cluster-queue-{random_choice()}"
75-
create_cluster_queue(self, self.cluster_queue, self.resource_flavor)
78+
def create_new_cluster_queue(self, num_queues):
79+
self.cluster_queues = []
80+
for i in range(num_queues):
81+
cluster_queue_name = f"test-cluster-queue-{random_choice()}"
82+
create_cluster_queue(self, cluster_queue_name, self.resource_flavors[i])
83+
self.cluster_queues.append(cluster_queue_name)
7684

7785

78-
def create_new_local_queue(self):
79-
self.local_queue = f"test-local-queue-{random_choice()}"
80-
create_local_queue(self, self.cluster_queue, self.local_queue)
86+
def create_new_local_queue(self, num_queues):
87+
self.local_queues = []
88+
for i in range(num_queues):
89+
is_default = i == 0
90+
local_queue_name = f"test-local-queue-{random_choice()}"
91+
create_local_queue(self, self.cluster_queues[i], local_queue_name, is_default)
92+
self.local_queues.append(local_queue_name)
8193

8294

8395
def create_namespace_with_name(self, namespace_name):
@@ -132,7 +144,7 @@ def create_cluster_queue(self, cluster_queue, flavor):
132144
{"name": "memory", "nominalQuota": "36Gi"},
133145
{"name": "nvidia.com/gpu", "nominalQuota": 1},
134146
],
135-
}
147+
},
136148
],
137149
}
138150
],
@@ -161,11 +173,33 @@ def create_cluster_queue(self, cluster_queue, flavor):
161173
self.cluster_queue = cluster_queue
162174

163175

164-
def create_resource_flavor(self, flavor):
176+
def create_resource_flavor(self, flavor, default=True):
177+
worker_label, worker_value = os.getenv("WORKER_LABEL", "worker-1=true").split("=")
178+
control_label, control_value = os.getenv(
179+
"CONTROL_LABEL", "ingress-ready=true"
180+
).split("=")
181+
toleration_key = os.getenv(
182+
"TOLERATION_KEY", "node-role.kubernetes.io/control-plane"
183+
)
184+
185+
node_labels = (
186+
{worker_label: worker_value} if default else {control_label: control_value}
187+
)
188+
165189
resource_flavor_json = {
166190
"apiVersion": "kueue.x-k8s.io/v1beta1",
167191
"kind": "ResourceFlavor",
168192
"metadata": {"name": flavor},
193+
"spec": {
194+
"nodeLabels": node_labels,
195+
"tolerations": [
196+
{
197+
"key": toleration_key,
198+
"operator": "Exists",
199+
"effect": "NoSchedule",
200+
}
201+
],
202+
},
169203
}
170204

171205
try:
@@ -190,14 +224,14 @@ def create_resource_flavor(self, flavor):
190224
self.resource_flavor = flavor
191225

192226

193-
def create_local_queue(self, cluster_queue, local_queue):
227+
def create_local_queue(self, cluster_queue, local_queue, is_default=True):
194228
local_queue_json = {
195229
"apiVersion": "kueue.x-k8s.io/v1beta1",
196230
"kind": "LocalQueue",
197231
"metadata": {
198232
"namespace": self.namespace,
199233
"name": local_queue,
200-
"annotations": {"kueue.x-k8s.io/default-queue": "true"},
234+
"annotations": {"kueue.x-k8s.io/default-queue": str(is_default).lower()},
201235
},
202236
"spec": {"clusterQueue": cluster_queue},
203237
}
@@ -226,34 +260,54 @@ def create_local_queue(self, cluster_queue, local_queue):
226260
self.local_queue = local_queue
227261

228262

229-
def create_kueue_resources(self):
263+
def create_kueue_resources(self, resource_ammount=1):
230264
print("creating Kueue resources ...")
231-
create_new_resource_flavor(self)
232-
create_new_cluster_queue(self)
233-
create_new_local_queue(self)
265+
create_new_resource_flavor(self, resource_ammount)
266+
create_new_cluster_queue(self, resource_ammount)
267+
create_new_local_queue(self, resource_ammount)
234268

235269

236270
def delete_kueue_resources(self):
237271
# Delete if given cluster-queue exists
238-
try:
239-
self.custom_api.delete_cluster_custom_object(
240-
group="kueue.x-k8s.io",
241-
plural="clusterqueues",
242-
version="v1beta1",
243-
name=self.cluster_queue,
244-
)
245-
print(f"\n'{self.cluster_queue}' cluster-queue deleted")
246-
except Exception as e:
247-
print(f"\nError deleting cluster-queue '{self.cluster_queue}' : {e}")
272+
for cq in self.cluster_queues:
273+
try:
274+
self.custom_api.delete_cluster_custom_object(
275+
group="kueue.x-k8s.io",
276+
plural="clusterqueues",
277+
version="v1beta1",
278+
name=cq,
279+
)
280+
print(f"\n'{cq}' cluster-queue deleted")
281+
except Exception as e:
282+
print(f"\nError deleting cluster-queue '{cq}' : {e}")
248283

249284
# Delete if given resource-flavor exists
250-
try:
251-
self.custom_api.delete_cluster_custom_object(
252-
group="kueue.x-k8s.io",
253-
plural="resourceflavors",
254-
version="v1beta1",
255-
name=self.resource_flavor,
285+
for flavor in self.resource_flavors:
286+
try:
287+
self.custom_api.delete_cluster_custom_object(
288+
group="kueue.x-k8s.io",
289+
plural="resourceflavors",
290+
version="v1beta1",
291+
name=flavor,
292+
)
293+
print(f"'{flavor}' resource-flavor deleted")
294+
except Exception as e:
295+
print(f"\nError deleting resource-flavor '{flavor}': {e}")
296+
297+
298+
def get_pod_node(self, namespace, name):
299+
label_selector = f"ray.io/cluster={name}"
300+
pods = self.api_instance.list_namespaced_pod(
301+
namespace, label_selector=label_selector
302+
)
303+
if not pods.items:
304+
raise ValueError(
305+
f"Unable to retrieve node name for pod '{name}' in namespace '{namespace}'"
256306
)
257-
print(f"'{self.resource_flavor}' resource-flavor deleted")
258-
except Exception as e:
259-
print(f"\nError deleting resource-flavor '{self.resource_flavor}' : {e}")
307+
pod = pods.items[0]
308+
node_name = pod.spec.node_name
309+
if node_name is None:
310+
raise ValueError(
311+
f"No node selected for pod '{name}' in namespace '{namespace}'"
312+
)
313+
return node_name

0 commit comments

Comments
 (0)