Skip to content

Commit 5eaa199

Browse files
JamesC1305roypat
authored andcommitted
Add seccomp filters to VMM
Add seccompiler filters as a field of the VMM. This is required to ensure that the newly created vCPU threads have the same filters as the currently existing vCPU threads Signed-off-by: James Curtis <[email protected]>
1 parent 98f5c05 commit 5eaa199

File tree

2 files changed

+79
-0
lines changed

2 files changed

+79
-0
lines changed

src/vmm/src/builder.rs

+10
Original file line numberDiff line numberDiff line change
@@ -139,13 +139,15 @@ impl std::convert::From<linux_loader::cmdline::Error> for StartMicrovmError {
139139
}
140140

141141
#[cfg_attr(target_arch = "aarch64", allow(unused))]
142+
#[allow(clippy::too_many_arguments)]
142143
fn create_vmm_and_vcpus(
143144
instance_info: &InstanceInfo,
144145
event_manager: &mut EventManager,
145146
guest_memory: GuestMemoryMmap,
146147
uffd: Option<Uffd>,
147148
track_dirty_pages: bool,
148149
vcpu_count: u8,
150+
#[cfg(target_arch = "x86_64")] seccomp_filters: BpfThreadMap,
149151
kvm_capabilities: Vec<KvmCapability>,
150152
) -> Result<(Vmm, Vec<Vcpu>), StartMicrovmError> {
151153
use self::StartMicrovmError::*;
@@ -222,6 +224,8 @@ fn create_vmm_and_vcpus(
222224
uffd,
223225
vcpus_handles: Vec::new(),
224226
vcpus_exit_evt,
227+
#[cfg(target_arch = "x86_64")]
228+
seccomp_filters,
225229
resource_allocator,
226230
mmio_device_manager,
227231
#[cfg(target_arch = "x86_64")]
@@ -301,6 +305,8 @@ pub fn build_microvm_for_boot(
301305
None,
302306
track_dirty_pages,
303307
vm_resources.vm_config.vcpu_count,
308+
#[cfg(target_arch = "x86_64")]
309+
seccomp_filters.clone(),
304310
cpu_template.kvm_capabilities.clone(),
305311
)?;
306312

@@ -468,6 +474,8 @@ pub fn build_microvm_from_snapshot(
468474
uffd,
469475
vm_resources.vm_config.track_dirty_pages,
470476
vm_resources.vm_config.vcpu_count,
477+
#[cfg(target_arch = "x86_64")]
478+
seccomp_filters.clone(),
471479
microvm_state.vm_state.kvm_cap_modifiers.clone(),
472480
)?;
473481

@@ -1142,6 +1150,8 @@ pub mod tests {
11421150
uffd: None,
11431151
vcpus_handles: Vec::new(),
11441152
vcpus_exit_evt,
1153+
#[cfg(target_arch = "x86_64")]
1154+
seccomp_filters: crate::seccomp_filters::get_empty_filters(),
11451155
resource_allocator: ResourceAllocator::new().unwrap(),
11461156
mmio_device_manager,
11471157
#[cfg(target_arch = "x86_64")]

src/vmm/src/lib.rs

+69
Original file line numberDiff line numberDiff line change
@@ -122,11 +122,17 @@ use device_manager::resources::ResourceAllocator;
122122
use devices::acpi::vmgenid::VmGenIdError;
123123
use event_manager::{EventManager as BaseEventManager, EventOps, Events, MutEventSubscriber};
124124
use seccompiler::BpfProgram;
125+
#[cfg(target_arch = "x86_64")]
126+
use seccompiler::BpfThreadMap;
125127
use userfaultfd::Uffd;
126128
use utils::epoll::EventSet;
127129
use utils::eventfd::EventFd;
128130
use utils::terminal::Terminal;
129131
use utils::u64_to_usize;
132+
#[cfg(target_arch = "x86_64")]
133+
use vmm_config::hotplug::{HotplugVcpuConfig, HotplugVcpuError};
134+
#[cfg(target_arch = "x86_64")]
135+
use vmm_config::machine_config::{MachineConfigUpdate, MAX_SUPPORTED_VCPUS};
130136
use vstate::vcpu::{self, KvmVcpuConfigureError, StartThreadedError, VcpuSendEventError};
131137

132138
use crate::arch::DeviceType;
@@ -314,6 +320,9 @@ pub struct Vmm {
314320
vcpus_handles: Vec<VcpuHandle>,
315321
// Used by Vcpus and devices to initiate teardown; Vmm should never write here.
316322
vcpus_exit_evt: EventFd,
323+
// seccomp_filters are only needed in VMM for hotplugging vCPUS.
324+
#[cfg(target_arch = "x86_64")]
325+
seccomp_filters: BpfThreadMap,
317326

318327
// Allocator for guest resources
319328
resource_allocator: ResourceAllocator,
@@ -594,6 +603,66 @@ impl Vmm {
594603
Ok(cpu_configs)
595604
}
596605

606+
/// Adds new vCPUs to VMM.
607+
#[cfg(target_arch = "x86_64")]
608+
pub fn hotplug_vcpus(
609+
&mut self,
610+
config: HotplugVcpuConfig,
611+
) -> Result<MachineConfigUpdate, HotplugVcpuError> {
612+
use crate::logger::IncMetric;
613+
if config.vcpu_count < 1 {
614+
return Err(HotplugVcpuError::VcpuCountTooLow);
615+
} else if self
616+
.vcpus_handles
617+
.len()
618+
.checked_add(config.vcpu_count.into())
619+
.ok_or(HotplugVcpuError::VcpuCountTooHigh)?
620+
> MAX_SUPPORTED_VCPUS.into()
621+
{
622+
return Err(HotplugVcpuError::VcpuCountTooHigh);
623+
}
624+
625+
// Create and start new vcpus
626+
let mut vcpus = Vec::with_capacity(config.vcpu_count.into());
627+
628+
#[allow(clippy::cast_possible_truncation)]
629+
let start_idx = self.vcpus_handles.len().try_into().unwrap();
630+
for cpu_idx in start_idx..(start_idx + config.vcpu_count) {
631+
let exit_evt = self
632+
.vcpus_exit_evt
633+
.try_clone()
634+
.map_err(HotplugVcpuError::EventFd)?;
635+
let vcpu =
636+
Vcpu::new(cpu_idx, &self.vm, exit_evt).map_err(HotplugVcpuError::VcpuCreate)?;
637+
vcpus.push(vcpu);
638+
}
639+
640+
self.start_vcpus(
641+
vcpus,
642+
self.seccomp_filters
643+
.get("vcpu")
644+
.ok_or_else(|| HotplugVcpuError::MissingSeccompFilters("vcpu".to_string()))?
645+
.clone(),
646+
)
647+
.map_err(HotplugVcpuError::VcpuStart)?;
648+
649+
#[allow(clippy::cast_lossless)]
650+
METRICS.hotplug.vcpus_added.add(config.vcpu_count.into());
651+
652+
// Update VM config to reflect new CPUs added
653+
#[allow(clippy::cast_possible_truncation)]
654+
let new_machine_config = MachineConfigUpdate {
655+
vcpu_count: Some(self.vcpus_handles.len() as u8),
656+
mem_size_mib: None,
657+
smt: None,
658+
cpu_template: None,
659+
track_dirty_pages: None,
660+
huge_pages: None,
661+
};
662+
663+
Ok(new_machine_config)
664+
}
665+
597666
/// Retrieves the KVM dirty bitmap for each of the guest's memory regions.
598667
pub fn reset_dirty_bitmap(&self) {
599668
self.guest_memory

0 commit comments

Comments
 (0)