Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rework UsedLevel4Entries #219

Merged
merged 2 commits into from
Feb 3, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 75 additions & 18 deletions src/binary/level_4_entries.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,16 @@
use core::convert::TryInto;
use core::{alloc::Layout, convert::TryInto};
use usize_conversions::IntoUsize;
use x86_64::{
structures::paging::{Page, PageTableIndex},
VirtAddr,
structures::paging::{Page, PageTableIndex, Size4KiB},
PhysAddr, VirtAddr,
};
use xmas_elf::program::ProgramHeader;

use crate::{
binary::{MemoryRegion, CONFIG},
BootInfo,
};

/// Keeps track of used entries in a level 4 page table.
///
/// Useful for determining a free virtual memory block, e.g. for mapping additional data.
Expand All @@ -13,35 +19,86 @@ pub struct UsedLevel4Entries {
}

impl UsedLevel4Entries {
/// Initializes a new instance from the given ELF program segments.
/// Initializes a new instance.
///
/// Marks the virtual address range of all segments as used.
pub fn new<'a>(
segments: impl Iterator<Item = ProgramHeader<'a>>,
virtual_address_offset: u64,
) -> Self {
/// Marks the statically configured virtual address ranges from the config as used.
pub fn new(max_phys_addr: PhysAddr, regions_len: usize, framebuffer_size: usize) -> Self {
let mut used = UsedLevel4Entries {
entry_state: [false; 512],
};

used.entry_state[0] = true; // TODO: Can we do this dynamically?

for segment in segments {
let start_page: Page = Page::containing_address(VirtAddr::new(
segment.virtual_addr() + virtual_address_offset,
));
let end_page: Page = Page::containing_address(VirtAddr::new(
segment.virtual_addr() + virtual_address_offset + segment.mem_size(),
));
// Mark the statically configured ranges from the config as used.

if let Some(physical_memory_offset) = CONFIG.physical_memory_offset {
used.mark_range_as_used(physical_memory_offset, max_phys_addr.as_u64().into_usize());
}

if CONFIG.map_page_table_recursively {
if let Some(recursive_index) = CONFIG.recursive_index {
used.mark_p4_index_as_used(PageTableIndex::new(recursive_index));
}
}

if let Some(kernel_stack_address) = CONFIG.kernel_stack_address {
used.mark_range_as_used(kernel_stack_address, CONFIG.kernel_stack_size());
}

if let Some(boot_info_address) = CONFIG.boot_info_address {
let boot_info_layout = Layout::new::<BootInfo>();
let regions = regions_len + 1; // one region might be split into used/unused
let memory_regions_layout = Layout::array::<MemoryRegion>(regions).unwrap();
let (combined, _) = boot_info_layout.extend(memory_regions_layout).unwrap();

used.mark_range_as_used(boot_info_address, combined.size());
}

for p4_index in u64::from(start_page.p4_index())..=u64::from(end_page.p4_index()) {
used.entry_state[p4_index as usize] = true;
if CONFIG.map_framebuffer {
if let Some(framebuffer_address) = CONFIG.framebuffer_address {
used.mark_range_as_used(framebuffer_address, framebuffer_size);
}
}

used
}

/// Mark all p4 entries in the range `[address..address+size)` as used.
///
/// `size` can be a `u64` or `usize`.
fn mark_range_as_used<S>(&mut self, address: u64, size: S)
where
VirtAddr: core::ops::Add<S, Output = VirtAddr>,
{
let start = VirtAddr::new(address);
let end_inclusive = (start + size) - 1usize;
let start_page = Page::<Size4KiB>::containing_address(start);
let end_page_inclusive = Page::<Size4KiB>::containing_address(end_inclusive);

for p4_index in u16::from(start_page.p4_index())..=u16::from(end_page_inclusive.p4_index())
{
self.mark_p4_index_as_used(PageTableIndex::new(p4_index));
}
}

fn mark_p4_index_as_used(&mut self, p4_index: PageTableIndex) {
self.entry_state[usize::from(p4_index)] = true;
}

/// Marks the virtual address range of all segments as used.
pub fn mark_segments<'a>(
&mut self,
segments: impl Iterator<Item = ProgramHeader<'a>>,
virtual_address_offset: u64,
) {
for segment in segments.filter(|s| s.mem_size() > 0) {
self.mark_range_as_used(
segment.virtual_addr() + virtual_address_offset,
segment.mem_size(),
);
}
}

/// Returns a unused level 4 entry and marks it as used.
///
/// Since this method marks each returned index as used, it can be used multiple times
Expand Down
20 changes: 8 additions & 12 deletions src/binary/load_kernel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ where
bytes: &'a [u8],
page_table: &'a mut M,
frame_allocator: &'a mut F,
used_entries: &mut UsedLevel4Entries,
) -> Result<Self, &'static str> {
log::info!("Elf file loaded at {:#p}", bytes);
let kernel_offset = PhysAddr::new(&bytes[0] as *const u8 as u64);
Expand All @@ -56,11 +57,13 @@ where
header::Type::None => unimplemented!(),
header::Type::Relocatable => unimplemented!(),
header::Type::Executable => 0,
header::Type::SharedObject => 0x400000,
header::Type::SharedObject => used_entries.get_free_address().as_u64(),
header::Type::Core => unimplemented!(),
header::Type::ProcessorSpecific(_) => unimplemented!(),
};

used_entries.mark_segments(elf_file.program_iter(), virtual_address_offset);

header::sanity_check(&elf_file)?;
let loader = Loader {
elf_file,
Expand Down Expand Up @@ -120,13 +123,6 @@ where
fn entry_point(&self) -> VirtAddr {
VirtAddr::new(self.elf_file.header.pt2.entry_point() + self.inner.virtual_address_offset)
}

fn used_level_4_entries(&self) -> UsedLevel4Entries {
UsedLevel4Entries::new(
self.elf_file.program_iter(),
self.inner.virtual_address_offset,
)
}
}

impl<'a, M, F> Inner<'a, M, F>
Expand Down Expand Up @@ -513,10 +509,10 @@ pub fn load_kernel(
bytes: &[u8],
page_table: &mut (impl MapperAllSizes + Translate),
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<(VirtAddr, Option<TlsTemplate>, UsedLevel4Entries), &'static str> {
let mut loader = Loader::new(bytes, page_table, frame_allocator)?;
used_entries: &mut UsedLevel4Entries,
) -> Result<(VirtAddr, Option<TlsTemplate>), &'static str> {
let mut loader = Loader::new(bytes, page_table, frame_allocator, used_entries)?;
let tls_template = loader.load_segments()?;
let used_entries = loader.used_level_4_entries();

Ok((loader.entry_point(), tls_template, used_entries))
Ok((loader.entry_point(), tls_template))
}
41 changes: 27 additions & 14 deletions src/binary/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,7 @@ use crate::{
binary::legacy_memory_region::{LegacyFrameAllocator, LegacyMemoryRegion},
boot_info::{BootInfo, FrameBuffer, FrameBufferInfo, MemoryRegion, TlsTemplate},
};
use core::{
arch::asm,
mem::{self, MaybeUninit},
slice,
};
use core::{alloc::Layout, arch::asm, mem::MaybeUninit, slice};
use level_4_entries::UsedLevel4Entries;
use parsed_config::CONFIG;
use usize_conversions::FromUsize;
Expand Down Expand Up @@ -127,14 +123,24 @@ where
{
let kernel_page_table = &mut page_tables.kernel;

let mut used_entries = UsedLevel4Entries::new(
frame_allocator.max_phys_addr(),
frame_allocator.len(),
framebuffer_size,
);

// Enable support for the no-execute bit in page tables.
enable_nxe_bit();
// Make the kernel respect the write-protection bits even when in ring 0 by default
enable_write_protect_bit();

let (entry_point, tls_template, mut used_entries) =
load_kernel::load_kernel(kernel_bytes, kernel_page_table, frame_allocator)
.expect("no entry point");
let (entry_point, tls_template) = load_kernel::load_kernel(
kernel_bytes,
kernel_page_table,
frame_allocator,
&mut used_entries,
)
.expect("no entry point");
log::info!("Entry point at: {:#x}", entry_point.as_u64());

// create a stack
Expand Down Expand Up @@ -310,13 +316,20 @@ where

// allocate and map space for the boot info
let (boot_info, memory_regions) = {
let boot_info_addr = boot_info_location(&mut mappings.used_entries);
let boot_info_end = boot_info_addr + mem::size_of::<BootInfo>();
let memory_map_regions_addr =
boot_info_end.align_up(u64::from_usize(mem::align_of::<MemoryRegion>()));
let boot_info_layout = Layout::new::<BootInfo>();
let regions = frame_allocator.len() + 1; // one region might be split into used/unused
let memory_map_regions_end =
memory_map_regions_addr + regions * mem::size_of::<MemoryRegion>();
let memory_regions_layout = Layout::array::<MemoryRegion>(regions).unwrap();
let (combined, memory_regions_offset) =
boot_info_layout.extend(memory_regions_layout).unwrap();

let boot_info_addr = boot_info_location(&mut mappings.used_entries);
assert!(
boot_info_addr.is_aligned(u64::from_usize(combined.align())),
"boot info addr is not properly aligned"
);

let memory_map_regions_addr = boot_info_addr + memory_regions_offset;
let memory_map_regions_end = boot_info_addr + combined.size();

let start_page = Page::containing_address(boot_info_addr);
let end_page = Page::containing_address(memory_map_regions_end - 1u64);
Expand Down
10 changes: 10 additions & 0 deletions src/config.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
#[cfg(feature = "binary")]
const PAGE_SIZE: u64 = 4096;

/// Allows configuring the bootloader behavior.
///
/// To control these, use a `[package.metadata.bootloader]` table in the `Cargo.toml` of
Expand Down Expand Up @@ -88,3 +91,10 @@ pub struct Config {
/// fits them if 1 or more is set.
pub minimum_framebuffer_width: Option<usize>,
}

#[cfg(feature = "binary")]
impl Config {
pub(crate) fn kernel_stack_size(&self) -> u64 {
self.kernel_stack_size.unwrap_or(20 * PAGE_SIZE)
}
}