Skip to content

Commit 2fcd048

Browse files
committed
e2e test for heterogenous cluster
1 parent 2e28f8a commit 2fcd048

File tree

4 files changed

+215
-32
lines changed

4 files changed

+215
-32
lines changed

.github/workflows/e2e_tests.yaml

+4
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,8 @@ jobs:
7373

7474
- name: Setup and start KinD cluster
7575
uses: ./common/github-actions/kind
76+
with:
77+
worker-nodes: 1
7678

7779
- name: Install NVidia GPU operator for KinD
7880
uses: ./common/github-actions/nvidia-gpu-operator
@@ -111,6 +113,8 @@ jobs:
111113
kubectl create clusterrolebinding sdk-user-localqueue-creator --clusterrole=localqueue-creator --user=sdk-user
112114
kubectl create clusterrole list-secrets --verb=get,list --resource=secrets
113115
kubectl create clusterrolebinding sdk-user-list-secrets --clusterrole=list-secrets --user=sdk-user
116+
kubectl create clusterrole pod-creator --verb=get,list --resource=pods
117+
kubectl create clusterrolebinding sdk-user-pod-creator --clusterrole=pod-creator --user=sdk-user
114118
kubectl config use-context sdk-user
115119
116120
- name: Run e2e tests
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
from time import sleep
2+
import time
3+
from codeflare_sdk import (
4+
Cluster,
5+
ClusterConfiguration,
6+
)
7+
8+
from codeflare_sdk.common.kueue.kueue import list_local_queues
9+
10+
import pytest
11+
12+
from support import *
13+
14+
15+
@pytest.mark.kind
16+
class TestRayLocalInteractiveOauth:
17+
def setup_method(self):
18+
initialize_kubernetes_client(self)
19+
20+
def teardown_method(self):
21+
delete_namespace(self)
22+
delete_kueue_resources(self)
23+
24+
@pytest.mark.nvidia_gpu
25+
def test_heterogeneous_clusters(self):
26+
create_namespace(self)
27+
create_kueue_resources(self)
28+
self.run_heterogeneous_clusters()
29+
30+
def run_heterogeneous_clusters(
31+
self, gpu_resource_name="nvidia.com/gpu", number_of_gpus=0
32+
):
33+
used_nodes = []
34+
35+
for flavor in self.resource_flavors:
36+
cluster_name = f"test-ray-cluster-li-{flavor[-5:]}"
37+
queues = list_local_queues(namespace=self.namespace, flavors=[flavor])
38+
queue_name = queues[0]["name"] if queues else None
39+
print(f"Using flavor: {flavor}, Queue: {queue_name}")
40+
cluster = Cluster(
41+
ClusterConfiguration(
42+
name=cluster_name,
43+
namespace=self.namespace,
44+
num_workers=1,
45+
head_cpu_requests="500m",
46+
head_cpu_limits="500m",
47+
head_memory_requests=2,
48+
head_memory_limits=2,
49+
worker_cpu_requests="500m",
50+
worker_cpu_limits=1,
51+
worker_memory_requests=1,
52+
worker_memory_limits=4,
53+
worker_extended_resource_requests={
54+
gpu_resource_name: number_of_gpus
55+
},
56+
write_to_file=True,
57+
verify_tls=False,
58+
local_queue=queue_name,
59+
)
60+
)
61+
cluster.up()
62+
time.sleep(2)
63+
node_name = get_pod_node(self, self.namespace, cluster_name)
64+
print(f"Cluster {cluster_name}-{flavor} is running on node: {node_name}")
65+
time.sleep(2)
66+
assert (
67+
node_name not in used_nodes
68+
), f"Node {node_name} was already used by another flavor."
69+
used_nodes.append(node_name)
70+
cluster.down()
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
from time import sleep
2+
import time
3+
from codeflare_sdk import (
4+
Cluster,
5+
ClusterConfiguration,
6+
TokenAuthentication,
7+
)
8+
9+
from codeflare_sdk.common.kueue.kueue import list_local_queues
10+
11+
import pytest
12+
13+
from support import *
14+
15+
16+
@pytest.mark.openshift
17+
class TestRayLocalInteractiveOauth:
18+
def setup_method(self):
19+
initialize_kubernetes_client(self)
20+
21+
def teardown_method(self):
22+
delete_namespace(self)
23+
delete_kueue_resources(self)
24+
25+
def test_heterogeneous_clusters(self):
26+
create_namespace(self)
27+
create_kueue_resources(self)
28+
self.run_heterogeneous_clusters()
29+
30+
def run_heterogeneous_clusters(
31+
self, gpu_resource_name="nvidia.com/gpu", number_of_gpus=0
32+
):
33+
ray_image = get_ray_image()
34+
35+
auth = TokenAuthentication(
36+
token=run_oc_command(["whoami", "--show-token=true"]),
37+
server=run_oc_command(["whoami", "--show-server=true"]),
38+
skip_tls=True,
39+
)
40+
auth.login()
41+
42+
used_nodes = []
43+
44+
for flavor in self.resource_flavors:
45+
cluster_name = f"test-ray-cluster-li-{flavor[-5:]}"
46+
queues = list_local_queues(namespace=self.namespace, flavors=[flavor])
47+
queue_name = queues[0]["name"] if queues else None
48+
print(f"Using flavor: {flavor}, Queue: {queue_name}")
49+
cluster = Cluster(
50+
ClusterConfiguration(
51+
namespace=self.namespace,
52+
name=cluster_name,
53+
num_workers=1,
54+
worker_cpu_requests=1,
55+
worker_cpu_limits=1,
56+
worker_memory_requests=1,
57+
worker_memory_limits=4,
58+
image=ray_image,
59+
verify_tls=False,
60+
local_queue=queue_name,
61+
)
62+
)
63+
cluster.up()
64+
time.sleep(5)
65+
node_name = get_pod_node(self, self.namespace, cluster_name)
66+
print(f"Cluster {cluster_name}-{flavor} is running on node: {node_name}")
67+
time.sleep(5)
68+
assert (
69+
node_name not in used_nodes
70+
), f"Node {node_name} was already used by another flavor."
71+
used_nodes.append(node_name)
72+
cluster.down()

tests/e2e/support.py

+69-32
Original file line numberDiff line numberDiff line change
@@ -65,19 +65,30 @@ def create_namespace(self):
6565
return RuntimeError(e)
6666

6767

68-
def create_new_resource_flavor(self):
69-
self.resource_flavor = f"test-resource-flavor-{random_choice()}"
70-
create_resource_flavor(self, self.resource_flavor)
68+
def create_new_resource_flavor(self, num_flavors=2):
69+
self.resource_flavors = []
70+
for i in range(num_flavors):
71+
default = i < 1
72+
resource_flavor = f"test-resource-flavor-{random_choice()}"
73+
create_resource_flavor(self, resource_flavor, default)
74+
self.resource_flavors.append(resource_flavor)
7175

7276

73-
def create_new_cluster_queue(self):
74-
self.cluster_queue = f"test-cluster-queue-{random_choice()}"
75-
create_cluster_queue(self, self.cluster_queue, self.resource_flavor)
77+
def create_new_cluster_queue(self, num_queues=2):
78+
self.cluster_queues = []
79+
for i in range(num_queues):
80+
cluster_queue_name = f"test-cluster-queue-{random_choice()}"
81+
create_cluster_queue(self, cluster_queue_name, self.resource_flavors[i])
82+
self.cluster_queues.append(cluster_queue_name)
7683

7784

78-
def create_new_local_queue(self):
79-
self.local_queue = f"test-local-queue-{random_choice()}"
80-
create_local_queue(self, self.cluster_queue, self.local_queue)
85+
def create_new_local_queue(self, num_queues=2):
86+
self.local_queues = []
87+
for i in range(num_queues):
88+
is_default = i == 0
89+
local_queue_name = f"test-local-queue-{random_choice()}"
90+
create_local_queue(self, self.cluster_queues[i], local_queue_name, is_default)
91+
self.local_queues.append(local_queue_name)
8192

8293

8394
def create_namespace_with_name(self, namespace_name):
@@ -132,7 +143,7 @@ def create_cluster_queue(self, cluster_queue, flavor):
132143
{"name": "memory", "nominalQuota": "36Gi"},
133144
{"name": "nvidia.com/gpu", "nominalQuota": 1},
134145
],
135-
}
146+
},
136147
],
137148
}
138149
],
@@ -161,11 +172,21 @@ def create_cluster_queue(self, cluster_queue, flavor):
161172
self.cluster_queue = cluster_queue
162173

163174

164-
def create_resource_flavor(self, flavor):
175+
def create_resource_flavor(self, flavor, default=True):
165176
resource_flavor_json = {
166177
"apiVersion": "kueue.x-k8s.io/v1beta1",
167178
"kind": "ResourceFlavor",
168179
"metadata": {"name": flavor},
180+
"spec": {
181+
"nodeLabels": {"worker-1" if default else "ingress-ready": "true"},
182+
"tolerations": [
183+
{
184+
"key": "node-role.kubernetes.io/control-plane",
185+
"operator": "Exists",
186+
"effect": "NoSchedule",
187+
}
188+
],
189+
},
169190
}
170191

171192
try:
@@ -190,14 +211,14 @@ def create_resource_flavor(self, flavor):
190211
self.resource_flavor = flavor
191212

192213

193-
def create_local_queue(self, cluster_queue, local_queue):
214+
def create_local_queue(self, cluster_queue, local_queue, is_default=True):
194215
local_queue_json = {
195216
"apiVersion": "kueue.x-k8s.io/v1beta1",
196217
"kind": "LocalQueue",
197218
"metadata": {
198219
"namespace": self.namespace,
199220
"name": local_queue,
200-
"annotations": {"kueue.x-k8s.io/default-queue": "true"},
221+
"annotations": {"kueue.x-k8s.io/default-queue": str(is_default).lower()},
201222
},
202223
"spec": {"clusterQueue": cluster_queue},
203224
}
@@ -235,25 +256,41 @@ def create_kueue_resources(self):
235256

236257
def delete_kueue_resources(self):
237258
# Delete if given cluster-queue exists
238-
try:
239-
self.custom_api.delete_cluster_custom_object(
240-
group="kueue.x-k8s.io",
241-
plural="clusterqueues",
242-
version="v1beta1",
243-
name=self.cluster_queue,
244-
)
245-
print(f"\n'{self.cluster_queue}' cluster-queue deleted")
246-
except Exception as e:
247-
print(f"\nError deleting cluster-queue '{self.cluster_queue}' : {e}")
259+
for cq in self.cluster_queues:
260+
try:
261+
self.custom_api.delete_cluster_custom_object(
262+
group="kueue.x-k8s.io",
263+
plural="clusterqueues",
264+
version="v1beta1",
265+
name=cq,
266+
)
267+
print(f"\n'{cq}' cluster-queue deleted")
268+
except Exception as e:
269+
print(f"\nError deleting cluster-queue '{cq}' : {e}")
248270

249271
# Delete if given resource-flavor exists
250-
try:
251-
self.custom_api.delete_cluster_custom_object(
252-
group="kueue.x-k8s.io",
253-
plural="resourceflavors",
254-
version="v1beta1",
255-
name=self.resource_flavor,
272+
for flavor in self.resource_flavors:
273+
try:
274+
self.custom_api.delete_cluster_custom_object(
275+
group="kueue.x-k8s.io",
276+
plural="resourceflavors",
277+
version="v1beta1",
278+
name=flavor,
279+
)
280+
print(f"'{flavor}' resource-flavor deleted")
281+
except Exception as e:
282+
print(f"\nError deleting resource-flavor '{flavor}': {e}")
283+
284+
285+
def get_pod_node(self, namespace, name):
286+
label_selector = f"ray.io/cluster={name}"
287+
pods = self.api_instance.list_namespaced_pod(
288+
namespace, label_selector=label_selector
289+
)
290+
if not pods.items:
291+
raise ValueError(
292+
f"Unable to retrieve node name for pod '{name}' in namespace '{namespace}'"
256293
)
257-
print(f"'{self.resource_flavor}' resource-flavor deleted")
258-
except Exception as e:
259-
print(f"\nError deleting resource-flavor '{self.resource_flavor}' : {e}")
294+
pod = pods.items[0]
295+
node_name = pod.spec.node_name
296+
return node_name

0 commit comments

Comments
 (0)