Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 89 additions & 0 deletions KVM/qemu/multi_vms.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,50 @@
auto_cpu_model = "no"
cpu_model = host
variants:
- 1vm:
vms = "vm1"
variants:
- from1G_toall:
type = multi_vms_multi_boot
start_vm = no
start_mem = 1024
mem_generator = random_32g_window
random_min = 0
random_max = 64
random_unit = 511
samples_per_window = 2
window_size = 32768
- 1td:
vms = "vm1"
machine_type_extra_params = "kernel-irqchip=split"
vm_secure_guest_type = tdx
variants:
- from1G_toall:
type = multi_vms_multi_boot
start_vm = no
start_mem = 1024
mem_generator = random_32g_window
random_min = 0
random_max = 64
random_unit = 511
samples_per_window = 2
window_size = 32768
- 1td_1vm:
machine_type_extra_params_vm2 = "kernel-irqchip=split"
vm_secure_guest_type_vm2 = tdx
variants:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this patch remove the original case, multi_vms.1td_1vm?

Same comments for below cases: 4td, 4vm.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, thanks, updated, added default for them.

- @default:
- from1G_toall:
type = multi_vms_multi_boot
start_vm = no
start_mem = 1024
mem_generator = random_32g_window
random_min = 0
random_max = 64
random_unit = 511
samples_per_window = 2
window_size = 32768
divide_host_mem_limit_by_vm_count = yes
- 2td:
machine_type_extra_params = "kernel-irqchip=split"
vm_secure_guest_type = tdx
Expand All @@ -38,10 +79,34 @@
- 1G_2G:
mem_vm1 = 1024
mem_vm2 = 2048
- from1G_toall:
type = multi_vms_multi_boot
start_vm = no
start_mem = 1024
mem_generator = random_32g_window
random_min = 0
random_max = 64
random_unit = 511
samples_per_window = 2
window_size = 32768
divide_host_mem_limit_by_vm_count = yes
- 4td:
machine_type_extra_params = "kernel-irqchip=split"
vm_secure_guest_type = tdx
vms = "vm1 vm2 vm3 vm4"
variants:
- @default:
- from1G_toall:
type = multi_vms_multi_boot
start_vm = no
start_mem = 1024
mem_generator = random_32g_window
random_min = 0
random_max = 64
random_unit = 511
samples_per_window = 2
window_size = 32768
divide_host_mem_limit_by_vm_count = yes
- 2vm:
variants:
- 2vcpu:
Expand All @@ -66,5 +131,29 @@
- 1G_2G:
mem_vm1 = 1024
mem_vm2 = 2048
- from1G_toall:
type = multi_vms_multi_boot
start_vm = no
start_mem = 1024
mem_generator = random_32g_window
random_min = 0
random_max = 64
random_unit = 511
samples_per_window = 2
window_size = 32768
divide_host_mem_limit_by_vm_count = yes
- 4vm:
vms = "vm1 vm2 vm3 vm4"
variants:
- @default:
- from1G_toall:
type = multi_vms_multi_boot
start_vm = no
start_mem = 1024
mem_generator = random_32g_window
random_min = 0
random_max = 64
random_unit = 511
samples_per_window = 2
window_size = 32768
divide_host_mem_limit_by_vm_count = yes
178 changes: 178 additions & 0 deletions KVM/qemu/tests/multi_vms_multi_boot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,178 @@
#!/usr/bin/python3

# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2026 Intel Corporation

import logging
import random

from virttest import env_process
from virttest import error_context
from virttest import utils_misc
from provider import dmesg_router # pylint: disable=unused-import

LOG = logging.getLogger("avocado.test")


def _calc_default_mem_series(params, vm_names):
"""
Calculate a default memory series based on the generator type.

Supported generators:
- linear: increments by mem_step from start_mem to memory_limit
- random_32g_window: random samples within sliding 32G windows
"""
# Supported: "linear" (default fallback) or "random_32g_window" (used by current cfg)
mem_generator = params.get("mem_generator", "linear")
# Default 1024MB; override via cfg "start_mem = <value>"
start_mem = int(params.get_numeric("start_mem", 1024))
# Only used by linear generator; ignored by random_32g_window
mem_step = int(params.get_numeric("mem_step", 128))
# Optional cap on memory_limit; if not set in cfg, defaults to host free memory
max_mem_raw = params.get("max_mem")
divide_host_mem_limit_by_vm_count = (
params.get("divide_host_mem_limit_by_vm_count", "yes") == "yes"
)

memory_limit = int(utils_misc.get_usable_memory_size())
if divide_host_mem_limit_by_vm_count:
memory_limit = memory_limit // max(1, len(vm_names))

if max_mem_raw:
memory_limit = min(memory_limit, int(max_mem_raw))

if start_mem > memory_limit:
LOG.warning("start_mem (%s) > memory_limit (%s), no iterations",
start_mem, memory_limit)
return []

if mem_generator == "random_32g_window":
# All params below are read from cfg; values here are fallback defaults
random_min = int(params.get_numeric("random_min", 0))
random_max = int(params.get_numeric("random_max", 64))
random_unit = int(params.get_numeric("random_unit", 511))
samples_per_window = int(params.get_numeric("samples_per_window", 2))
window_size = int(params.get_numeric("window_size", 32768))
random_seed = params.get("random_seed")
if random_seed is not None:
random.seed(int(random_seed))

series = []
base = start_mem
while base <= memory_limit:
max_steps = (
(min(base + window_size, memory_limit) - base) // random_unit
if random_unit > 0 else 0
)
effective_max = min(random_max, max_steps)
effective_min = min(random_min, effective_max)
for _ in range(samples_per_window):
if effective_min > effective_max:
series.append(base)
else:
random_step = random.randint(effective_min, effective_max)
mem_value = base + random_step * random_unit
series.append(mem_value)
base += window_size

if start_mem not in series:
series.insert(0, start_mem)
if memory_limit not in series and memory_limit >= start_mem:
series.append(memory_limit)

return sorted(set(series))

series = []
current_mem = start_mem
while current_mem <= memory_limit:
series.append(current_mem)
current_mem += mem_step
return series


def _resolve_iteration_plan(params, vm_names):
"""
Build a list of per-iteration memory overrides for each VM.

Returns a list of dicts:
[ {vm_name: {"mem": value}}, ... ]
"""
vm_overrides = {vm_name: {} for vm_name in vm_names}

default_mem_series = _calc_default_mem_series(params, vm_names)
iteration_count = len(default_mem_series)
for vm_name in vm_names:
vm_overrides[vm_name]["mem"] = [
str(value) for value in default_mem_series
]

if iteration_count <= 0:
return []

plan = []
for index in range(iteration_count):
iteration_item = {}
for vm_name in vm_names:
iteration_item[vm_name] = {}
for param_name, values in vm_overrides[vm_name].items():
iteration_item[vm_name][param_name] = values[index]
plan.append(iteration_item)
return plan


@error_context.context_aware
def run(test, params, env):
"""
Boot multiple VMs with per-iteration parameter overrides:
1) Boot all VMs with current iteration parameters
2) Verify all guests can login
3) Destroy all VMs
4) Repeat for all iterations

:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment
"""

timeout = int(params.get_numeric("login_timeout", 240))

vm_names = params.objects("vms")
if not vm_names:
test.cancel("No VMs configured for multi_vms_multi_boot")

iteration_plan = _resolve_iteration_plan(params, vm_names)

if not iteration_plan:
test.cancel("No valid iterations resolved for multi_vms_multi_boot")

test.log.info("Total iterations: %s, VMs per iteration: %s",
len(iteration_plan), len(vm_names))

for iteration, vm_param_overrides in enumerate(iteration_plan, start=1):
started_vms = []
try:
override_desc = ", ".join(
"%s(mem=%s)" % (vm_name, vm_param_overrides.get(vm_name, {}).get("mem", "default"))
for vm_name in vm_names
)
error_context.context(
"Iteration %s/%s: %s"
% (iteration, len(iteration_plan), override_desc),
test.log.info,
)

for vm_name in vm_names:
vm_params = params.object_params(vm_name)
vm_params["start_vm"] = "yes"
for key, value in vm_param_overrides.get(vm_name, {}).items():
vm_params[key] = value
env_process.preprocess_vm(test, vm_params, env, vm_name)
started_vms.append(env.get_vm(vm_name))

for vm in started_vms:
vm.verify_alive()
session = vm.wait_for_login(timeout=timeout)
session.close()
finally:
for vm in started_vms:
vm.destroy(gracefully=False)
Loading