21
21
22
22
import logging
23
23
import time
24
+ from collections .abc import Iterable
24
25
25
26
from marvin .cloudstackTestCase import cloudstackTestCase
27
+ from marvin .cloudstackAPI import (migrateSystemVm , listRouters , listSystemVms )
26
28
from marvin .lib .base import (Cluster , Configurations , Host , Network , NetworkOffering , ServiceOffering , VirtualMachine ,
27
29
Zone )
28
30
from marvin .lib .common import (get_domain , get_zone , get_template )
@@ -98,6 +100,41 @@ def setUpClass(cls):
98
100
)
99
101
cls ._cleanup .append (cls .network )
100
102
103
+ cls .migrateSvms (cls .cluster )
104
+
105
+ @classmethod
106
+ def migrateSvms (cls , cluster ):
107
+ """
108
+ for testing the balanced algorithm we must make sure there is at least as more free memory on host[1] than on
109
+ host[0]. As a grude measure we migrate any and all system vms to host[0] before the testing commences
110
+
111
+ :param cluster: the cluser to check
112
+ :return: None
113
+ """
114
+
115
+ systemVmIds = []
116
+ cmds = listSystemVms .listSystemVmsCmd ()
117
+ responseS = cls .apiclient .listSystemVms (cmds )
118
+ if isinstance (responseS , Iterable ):
119
+ for svm in responseS :
120
+ if svm .hostid != cls .hosts [0 ].id :
121
+ systemVmIds .append (svm .id )
122
+ cmdv = listRouters .listRoutersCmd ()
123
+ responseR = cls .apiclient .listRouters (cmdv )
124
+ if isinstance (responseR , Iterable ):
125
+ for svm in responseR :
126
+ if svm .hostid != cls .hosts [0 ].id :
127
+ systemVmIds .append (svm .id )
128
+ numToMigrate = len (systemVmIds )
129
+ cls .logger .debug (f'system vms and routers to migrate -- { numToMigrate } ' )
130
+ cmdM = migrateSystemVm .migrateSystemVmCmd ()
131
+ cmdM .hostId = cls .hosts [0 ].id
132
+ for id in systemVmIds :
133
+ cmdM .virtualmachineid = id
134
+ responseM = cls .apiclient .migrateSystemVm (cmdM )
135
+ cls .logger .debug (f'migrated { responseM } ' )
136
+
137
+
101
138
@classmethod
102
139
def tearDownClass (cls ):
103
140
super (TestClusterDRS , cls ).tearDownClass ()
@@ -111,7 +148,6 @@ def setUp(self):
111
148
def tearDown (self ):
112
149
super (TestClusterDRS , self ).tearDown ()
113
150
114
- @classmethod
115
151
def get_vm_host_id (cls , vm_id ):
116
152
list_vms = VirtualMachine .list (cls .apiclient , id = vm_id )
117
153
vm = list_vms [0 ]
@@ -188,8 +224,8 @@ def test_01_condensed_drs_algorithm(self):
188
224
serviceofferingid = self .service_offering .id ,
189
225
templateid = self .template .id , zoneid = self .zone .id ,
190
226
networkids = self .network .id , hostid = self .hosts [1 ].id )
191
- vm_2_host_id = self .get_vm_host_id (self .virtual_machine_2 .id )
192
227
self .cleanup .append (self .virtual_machine_2 )
228
+ vm_2_host_id = self .get_vm_host_id (self .virtual_machine_2 .id )
193
229
194
230
self .assertNotEqual (vm_1_host_id , vm_2_host_id , msg = "Both VMs should be on different hosts" )
195
231
self .wait_for_vm_start (self .virtual_machine_1 )
@@ -216,13 +252,15 @@ def test_01_condensed_drs_algorithm(self):
216
252
217
253
@attr (tags = ["advanced" ], required_hardware = "false" )
218
254
def test_02_balanced_drs_algorithm (self ):
219
- """ Verify DRS algorithm - balanced"""
220
-
221
- # 1. Deploy vm-1 on host 1
222
- # 2. Deploy vm-2 on host 2
223
- # 3. Execute DRS to move all VMs on different hosts
255
+ """
256
+ Verify DRS algorithm - balanced
224
257
258
+ # 1. Deploy vm-1 on host 1
259
+ # 2. Deploy vm-2 on host 2
260
+ # 3. Execute DRS to move all VMs on different hosts
261
+ """
225
262
self .logger .debug ("=== Running test_02_balanced_drs_algorithm ===" )
263
+
226
264
# 1. Deploy vm-1 on host 1
227
265
self .services ["virtual_machine" ]["name" ] = "virtual-machine-1"
228
266
self .services ["virtual_machine" ]["displayname" ] = "virtual-machine-1"
@@ -240,8 +278,8 @@ def test_02_balanced_drs_algorithm(self):
240
278
serviceofferingid = self .service_offering .id ,
241
279
templateid = self .template .id , zoneid = self .zone .id ,
242
280
networkids = self .network .id , hostid = self .hosts [0 ].id )
243
- vm_2_host_id = self .get_vm_host_id (self .virtual_machine_2 .id )
244
281
self .cleanup .append (self .virtual_machine_2 )
282
+ vm_2_host_id = self .get_vm_host_id (self .virtual_machine_2 .id )
245
283
246
284
self .assertEqual (vm_1_host_id , vm_2_host_id , msg = "Both VMs should be on same hosts" )
247
285
self .wait_for_vm_start (self .virtual_machine_1 )
@@ -256,12 +294,15 @@ def test_02_balanced_drs_algorithm(self):
256
294
migration ["virtualmachineid" ]: migration ["destinationhostid" ] for migration in migrations
257
295
}
258
296
259
- self .assertEqual (len (vm_to_dest_host_map ), 1 , msg = "DRS plan should have 1 migrations" )
297
+ # this is one if no svm is considered to be migrated, it might be higher
298
+ self .assertTrue (len (vm_to_dest_host_map ) >= 1 , msg = "DRS plan should have at least 1 migrations" )
260
299
261
300
executed_plan = self .cluster .executeDrsPlan (self .apiclient , vm_to_dest_host_map )
262
301
self .wait_for_plan_completion (executed_plan )
263
302
264
303
vm_1_host_id = self .get_vm_host_id (self .virtual_machine_1 .id )
265
304
vm_2_host_id = self .get_vm_host_id (self .virtual_machine_2 .id )
266
305
267
- self .assertNotEqual (vm_1_host_id , vm_2_host_id , msg = "Both VMs should be on different hosts" )
306
+ self .assertTrue (
307
+ vm_1_host_id != self .virtual_machine_1 .hostid or vm_2_host_id != self .virtual_machine_2 .hostid ,
308
+ msg = "At least one VM should have been migrated to a different host" )
0 commit comments