From 87c691294fbad7f1d9ea42305aef9d400885d9fa Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Sun, 30 Jan 2022 11:51:37 +0100 Subject: [PATCH 1/2] rework `UsedLevel4Entries` --- src/binary/level_4_entries.rs | 93 ++++++++++++++++++++++++++++------- src/binary/load_kernel.rs | 20 +++----- src/binary/mod.rs | 41 +++++++++------ src/config.rs | 8 +++ 4 files changed, 118 insertions(+), 44 deletions(-) diff --git a/src/binary/level_4_entries.rs b/src/binary/level_4_entries.rs index 447e07a8..f30e5a45 100644 --- a/src/binary/level_4_entries.rs +++ b/src/binary/level_4_entries.rs @@ -1,10 +1,16 @@ -use core::convert::TryInto; +use core::{alloc::Layout, convert::TryInto}; +use usize_conversions::IntoUsize; use x86_64::{ - structures::paging::{Page, PageTableIndex}, - VirtAddr, + structures::paging::{Page, PageTableIndex, Size4KiB}, + PhysAddr, VirtAddr, }; use xmas_elf::program::ProgramHeader; +use crate::{ + binary::{MemoryRegion, CONFIG}, + BootInfo, +}; + /// Keeps track of used entries in a level 4 page table. /// /// Useful for determining a free virtual memory block, e.g. for mapping additional data. @@ -13,35 +19,86 @@ pub struct UsedLevel4Entries { } impl UsedLevel4Entries { - /// Initializes a new instance from the given ELF program segments. + /// Initializes a new instance. /// - /// Marks the virtual address range of all segments as used. - pub fn new<'a>( - segments: impl Iterator>, - virtual_address_offset: u64, - ) -> Self { + /// Marks the statically configured virtual address ranges from the config as used. + pub fn new(max_phys_addr: PhysAddr, regions_len: usize, framebuffer_size: usize) -> Self { let mut used = UsedLevel4Entries { entry_state: [false; 512], }; used.entry_state[0] = true; // TODO: Can we do this dynamically? - for segment in segments { - let start_page: Page = Page::containing_address(VirtAddr::new( - segment.virtual_addr() + virtual_address_offset, - )); - let end_page: Page = Page::containing_address(VirtAddr::new( - segment.virtual_addr() + virtual_address_offset + segment.mem_size(), - )); + // Mark the statically configured ranges from the config as used. + + if let Some(physical_memory_offset) = CONFIG.physical_memory_offset { + used.mark_range_as_used(physical_memory_offset, max_phys_addr.as_u64().into_usize()); + } + + if CONFIG.map_page_table_recursively { + if let Some(recursive_index) = CONFIG.recursive_index { + used.mark_p4_index_as_used(PageTableIndex::new(recursive_index)); + } + } + + if let Some(kernel_stack_address) = CONFIG.kernel_stack_address { + used.mark_range_as_used(kernel_stack_address, CONFIG.kernel_stack_size()); + } + + if let Some(boot_info_address) = CONFIG.boot_info_address { + let boot_info_layout = Layout::new::(); + let regions = regions_len + 1; // one region might be split into used/unused + let memory_regions_layout = Layout::array::(regions).unwrap(); + let (combined, _) = boot_info_layout.extend(memory_regions_layout).unwrap(); + + used.mark_range_as_used(boot_info_address, combined.size()); + } - for p4_index in u64::from(start_page.p4_index())..=u64::from(end_page.p4_index()) { - used.entry_state[p4_index as usize] = true; + if CONFIG.map_framebuffer { + if let Some(framebuffer_address) = CONFIG.framebuffer_address { + used.mark_range_as_used(framebuffer_address, framebuffer_size); } } used } + /// Mark all p4 entries in the range `[address..address+size)` as used. + /// + /// `size` can be a `u64` or `usize`. + fn mark_range_as_used(&mut self, address: u64, size: S) + where + VirtAddr: core::ops::Add, + { + let start = VirtAddr::new(address); + let end_inclusive = (start + size) - 1usize; + let start_page = Page::::containing_address(start); + let end_page_inclusive = Page::::containing_address(end_inclusive); + + for p4_index in u16::from(start_page.p4_index())..=u16::from(end_page_inclusive.p4_index()) + { + self.mark_p4_index_as_used(PageTableIndex::new(p4_index)); + } + } + + fn mark_p4_index_as_used(&mut self, p4_index: PageTableIndex) { + self.entry_state[usize::from(p4_index)] = true; + } + + /// Marks the virtual address range of all segments as used. + pub fn mark_segments<'a>( + &mut self, + segments: impl Iterator>, + virtual_address_offset: u64, + ) { + for segment in segments.filter(|s| s.mem_size() > 0) { + self.mark_range_as_used( + segment.virtual_addr() + virtual_address_offset, + segment.mem_size(), + ); + } + } + /// Returns a unused level 4 entry and marks it as used. /// /// Since this method marks each returned index as used, it can be used multiple times diff --git a/src/binary/load_kernel.rs b/src/binary/load_kernel.rs index 4be7b8e5..e07eb114 100644 --- a/src/binary/load_kernel.rs +++ b/src/binary/load_kernel.rs @@ -43,6 +43,7 @@ where bytes: &'a [u8], page_table: &'a mut M, frame_allocator: &'a mut F, + used_entries: &mut UsedLevel4Entries, ) -> Result { log::info!("Elf file loaded at {:#p}", bytes); let kernel_offset = PhysAddr::new(&bytes[0] as *const u8 as u64); @@ -56,11 +57,13 @@ where header::Type::None => unimplemented!(), header::Type::Relocatable => unimplemented!(), header::Type::Executable => 0, - header::Type::SharedObject => 0x400000, + header::Type::SharedObject => used_entries.get_free_address().as_u64(), header::Type::Core => unimplemented!(), header::Type::ProcessorSpecific(_) => unimplemented!(), }; + used_entries.mark_segments(elf_file.program_iter(), virtual_address_offset); + header::sanity_check(&elf_file)?; let loader = Loader { elf_file, @@ -120,13 +123,6 @@ where fn entry_point(&self) -> VirtAddr { VirtAddr::new(self.elf_file.header.pt2.entry_point() + self.inner.virtual_address_offset) } - - fn used_level_4_entries(&self) -> UsedLevel4Entries { - UsedLevel4Entries::new( - self.elf_file.program_iter(), - self.inner.virtual_address_offset, - ) - } } impl<'a, M, F> Inner<'a, M, F> @@ -513,10 +509,10 @@ pub fn load_kernel( bytes: &[u8], page_table: &mut (impl MapperAllSizes + Translate), frame_allocator: &mut impl FrameAllocator, -) -> Result<(VirtAddr, Option, UsedLevel4Entries), &'static str> { - let mut loader = Loader::new(bytes, page_table, frame_allocator)?; + used_entries: &mut UsedLevel4Entries, +) -> Result<(VirtAddr, Option), &'static str> { + let mut loader = Loader::new(bytes, page_table, frame_allocator, used_entries)?; let tls_template = loader.load_segments()?; - let used_entries = loader.used_level_4_entries(); - Ok((loader.entry_point(), tls_template, used_entries)) + Ok((loader.entry_point(), tls_template)) } diff --git a/src/binary/mod.rs b/src/binary/mod.rs index 9a415ecf..ef0c6cd3 100644 --- a/src/binary/mod.rs +++ b/src/binary/mod.rs @@ -2,11 +2,7 @@ use crate::{ binary::legacy_memory_region::{LegacyFrameAllocator, LegacyMemoryRegion}, boot_info::{BootInfo, FrameBuffer, FrameBufferInfo, MemoryRegion, TlsTemplate}, }; -use core::{ - arch::asm, - mem::{self, MaybeUninit}, - slice, -}; +use core::{alloc::Layout, arch::asm, mem::MaybeUninit, slice}; use level_4_entries::UsedLevel4Entries; use parsed_config::CONFIG; use usize_conversions::FromUsize; @@ -127,14 +123,24 @@ where { let kernel_page_table = &mut page_tables.kernel; + let mut used_entries = UsedLevel4Entries::new( + frame_allocator.max_phys_addr(), + frame_allocator.len(), + framebuffer_size, + ); + // Enable support for the no-execute bit in page tables. enable_nxe_bit(); // Make the kernel respect the write-protection bits even when in ring 0 by default enable_write_protect_bit(); - let (entry_point, tls_template, mut used_entries) = - load_kernel::load_kernel(kernel_bytes, kernel_page_table, frame_allocator) - .expect("no entry point"); + let (entry_point, tls_template) = load_kernel::load_kernel( + kernel_bytes, + kernel_page_table, + frame_allocator, + &mut used_entries, + ) + .expect("no entry point"); log::info!("Entry point at: {:#x}", entry_point.as_u64()); // create a stack @@ -310,13 +316,20 @@ where // allocate and map space for the boot info let (boot_info, memory_regions) = { - let boot_info_addr = boot_info_location(&mut mappings.used_entries); - let boot_info_end = boot_info_addr + mem::size_of::(); - let memory_map_regions_addr = - boot_info_end.align_up(u64::from_usize(mem::align_of::())); + let boot_info_layout = Layout::new::(); let regions = frame_allocator.len() + 1; // one region might be split into used/unused - let memory_map_regions_end = - memory_map_regions_addr + regions * mem::size_of::(); + let memory_regions_layout = Layout::array::(regions).unwrap(); + let (combined, memory_regions_offset) = + boot_info_layout.extend(memory_regions_layout).unwrap(); + + let boot_info_addr = boot_info_location(&mut mappings.used_entries); + assert!( + boot_info_addr.is_aligned(u64::from_usize(combined.align())), + "boot info addr is not properly aligned" + ); + + let memory_map_regions_addr = boot_info_addr + memory_regions_offset; + let memory_map_regions_end = boot_info_addr + combined.size(); let start_page = Page::containing_address(boot_info_addr); let end_page = Page::containing_address(memory_map_regions_end - 1u64); diff --git a/src/config.rs b/src/config.rs index 4f6ee1a4..d0b3b095 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,3 +1,5 @@ +const PAGE_SIZE: u64 = 4096; + /// Allows configuring the bootloader behavior. /// /// To control these, use a `[package.metadata.bootloader]` table in the `Cargo.toml` of @@ -88,3 +90,9 @@ pub struct Config { /// fits them if 1 or more is set. pub minimum_framebuffer_width: Option, } + +impl Config { + pub(crate) fn kernel_stack_size(&self) -> u64 { + self.kernel_stack_size.unwrap_or(20 * PAGE_SIZE) + } +} From fd0419846810a43d7d159cb2104f3af80c7ca58b Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Thu, 3 Feb 2022 12:15:06 +0100 Subject: [PATCH 2/2] put constant and method behind feature gate --- src/config.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/config.rs b/src/config.rs index d0b3b095..c07a4c4c 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,3 +1,4 @@ +#[cfg(feature = "binary")] const PAGE_SIZE: u64 = 4096; /// Allows configuring the bootloader behavior. @@ -91,6 +92,7 @@ pub struct Config { pub minimum_framebuffer_width: Option, } +#[cfg(feature = "binary")] impl Config { pub(crate) fn kernel_stack_size(&self) -> u64 { self.kernel_stack_size.unwrap_or(20 * PAGE_SIZE)