Skip to content

Commit 1ee7c35

Browse files
authoredFeb 3, 2022
Merge pull request #219 from Freax13/new-level-four-entries
rework `UsedLevel4Entries`
2 parents fa61731 + fd04198 commit 1ee7c35

File tree

4 files changed

+120
-44
lines changed

4 files changed

+120
-44
lines changed
 

‎src/binary/level_4_entries.rs

+75-18
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,16 @@
1-
use core::convert::TryInto;
1+
use core::{alloc::Layout, convert::TryInto};
2+
use usize_conversions::IntoUsize;
23
use x86_64::{
3-
structures::paging::{Page, PageTableIndex},
4-
VirtAddr,
4+
structures::paging::{Page, PageTableIndex, Size4KiB},
5+
PhysAddr, VirtAddr,
56
};
67
use xmas_elf::program::ProgramHeader;
78

9+
use crate::{
10+
binary::{MemoryRegion, CONFIG},
11+
BootInfo,
12+
};
13+
814
/// Keeps track of used entries in a level 4 page table.
915
///
1016
/// Useful for determining a free virtual memory block, e.g. for mapping additional data.
@@ -13,35 +19,86 @@ pub struct UsedLevel4Entries {
1319
}
1420

1521
impl UsedLevel4Entries {
16-
/// Initializes a new instance from the given ELF program segments.
22+
/// Initializes a new instance.
1723
///
18-
/// Marks the virtual address range of all segments as used.
19-
pub fn new<'a>(
20-
segments: impl Iterator<Item = ProgramHeader<'a>>,
21-
virtual_address_offset: u64,
22-
) -> Self {
24+
/// Marks the statically configured virtual address ranges from the config as used.
25+
pub fn new(max_phys_addr: PhysAddr, regions_len: usize, framebuffer_size: usize) -> Self {
2326
let mut used = UsedLevel4Entries {
2427
entry_state: [false; 512],
2528
};
2629

2730
used.entry_state[0] = true; // TODO: Can we do this dynamically?
2831

29-
for segment in segments {
30-
let start_page: Page = Page::containing_address(VirtAddr::new(
31-
segment.virtual_addr() + virtual_address_offset,
32-
));
33-
let end_page: Page = Page::containing_address(VirtAddr::new(
34-
segment.virtual_addr() + virtual_address_offset + segment.mem_size(),
35-
));
32+
// Mark the statically configured ranges from the config as used.
33+
34+
if let Some(physical_memory_offset) = CONFIG.physical_memory_offset {
35+
used.mark_range_as_used(physical_memory_offset, max_phys_addr.as_u64().into_usize());
36+
}
37+
38+
if CONFIG.map_page_table_recursively {
39+
if let Some(recursive_index) = CONFIG.recursive_index {
40+
used.mark_p4_index_as_used(PageTableIndex::new(recursive_index));
41+
}
42+
}
43+
44+
if let Some(kernel_stack_address) = CONFIG.kernel_stack_address {
45+
used.mark_range_as_used(kernel_stack_address, CONFIG.kernel_stack_size());
46+
}
47+
48+
if let Some(boot_info_address) = CONFIG.boot_info_address {
49+
let boot_info_layout = Layout::new::<BootInfo>();
50+
let regions = regions_len + 1; // one region might be split into used/unused
51+
let memory_regions_layout = Layout::array::<MemoryRegion>(regions).unwrap();
52+
let (combined, _) = boot_info_layout.extend(memory_regions_layout).unwrap();
53+
54+
used.mark_range_as_used(boot_info_address, combined.size());
55+
}
3656

37-
for p4_index in u64::from(start_page.p4_index())..=u64::from(end_page.p4_index()) {
38-
used.entry_state[p4_index as usize] = true;
57+
if CONFIG.map_framebuffer {
58+
if let Some(framebuffer_address) = CONFIG.framebuffer_address {
59+
used.mark_range_as_used(framebuffer_address, framebuffer_size);
3960
}
4061
}
4162

4263
used
4364
}
4465

66+
/// Mark all p4 entries in the range `[address..address+size)` as used.
67+
///
68+
/// `size` can be a `u64` or `usize`.
69+
fn mark_range_as_used<S>(&mut self, address: u64, size: S)
70+
where
71+
VirtAddr: core::ops::Add<S, Output = VirtAddr>,
72+
{
73+
let start = VirtAddr::new(address);
74+
let end_inclusive = (start + size) - 1usize;
75+
let start_page = Page::<Size4KiB>::containing_address(start);
76+
let end_page_inclusive = Page::<Size4KiB>::containing_address(end_inclusive);
77+
78+
for p4_index in u16::from(start_page.p4_index())..=u16::from(end_page_inclusive.p4_index())
79+
{
80+
self.mark_p4_index_as_used(PageTableIndex::new(p4_index));
81+
}
82+
}
83+
84+
fn mark_p4_index_as_used(&mut self, p4_index: PageTableIndex) {
85+
self.entry_state[usize::from(p4_index)] = true;
86+
}
87+
88+
/// Marks the virtual address range of all segments as used.
89+
pub fn mark_segments<'a>(
90+
&mut self,
91+
segments: impl Iterator<Item = ProgramHeader<'a>>,
92+
virtual_address_offset: u64,
93+
) {
94+
for segment in segments.filter(|s| s.mem_size() > 0) {
95+
self.mark_range_as_used(
96+
segment.virtual_addr() + virtual_address_offset,
97+
segment.mem_size(),
98+
);
99+
}
100+
}
101+
45102
/// Returns a unused level 4 entry and marks it as used.
46103
///
47104
/// Since this method marks each returned index as used, it can be used multiple times

‎src/binary/load_kernel.rs

+8-12
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ where
4343
bytes: &'a [u8],
4444
page_table: &'a mut M,
4545
frame_allocator: &'a mut F,
46+
used_entries: &mut UsedLevel4Entries,
4647
) -> Result<Self, &'static str> {
4748
log::info!("Elf file loaded at {:#p}", bytes);
4849
let kernel_offset = PhysAddr::new(&bytes[0] as *const u8 as u64);
@@ -56,11 +57,13 @@ where
5657
header::Type::None => unimplemented!(),
5758
header::Type::Relocatable => unimplemented!(),
5859
header::Type::Executable => 0,
59-
header::Type::SharedObject => 0x400000,
60+
header::Type::SharedObject => used_entries.get_free_address().as_u64(),
6061
header::Type::Core => unimplemented!(),
6162
header::Type::ProcessorSpecific(_) => unimplemented!(),
6263
};
6364

65+
used_entries.mark_segments(elf_file.program_iter(), virtual_address_offset);
66+
6467
header::sanity_check(&elf_file)?;
6568
let loader = Loader {
6669
elf_file,
@@ -120,13 +123,6 @@ where
120123
fn entry_point(&self) -> VirtAddr {
121124
VirtAddr::new(self.elf_file.header.pt2.entry_point() + self.inner.virtual_address_offset)
122125
}
123-
124-
fn used_level_4_entries(&self) -> UsedLevel4Entries {
125-
UsedLevel4Entries::new(
126-
self.elf_file.program_iter(),
127-
self.inner.virtual_address_offset,
128-
)
129-
}
130126
}
131127

132128
impl<'a, M, F> Inner<'a, M, F>
@@ -513,10 +509,10 @@ pub fn load_kernel(
513509
bytes: &[u8],
514510
page_table: &mut (impl MapperAllSizes + Translate),
515511
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
516-
) -> Result<(VirtAddr, Option<TlsTemplate>, UsedLevel4Entries), &'static str> {
517-
let mut loader = Loader::new(bytes, page_table, frame_allocator)?;
512+
used_entries: &mut UsedLevel4Entries,
513+
) -> Result<(VirtAddr, Option<TlsTemplate>), &'static str> {
514+
let mut loader = Loader::new(bytes, page_table, frame_allocator, used_entries)?;
518515
let tls_template = loader.load_segments()?;
519-
let used_entries = loader.used_level_4_entries();
520516

521-
Ok((loader.entry_point(), tls_template, used_entries))
517+
Ok((loader.entry_point(), tls_template))
522518
}

‎src/binary/mod.rs

+27-14
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,7 @@ use crate::{
22
binary::legacy_memory_region::{LegacyFrameAllocator, LegacyMemoryRegion},
33
boot_info::{BootInfo, FrameBuffer, FrameBufferInfo, MemoryRegion, TlsTemplate},
44
};
5-
use core::{
6-
arch::asm,
7-
mem::{self, MaybeUninit},
8-
slice,
9-
};
5+
use core::{alloc::Layout, arch::asm, mem::MaybeUninit, slice};
106
use level_4_entries::UsedLevel4Entries;
117
use parsed_config::CONFIG;
128
use usize_conversions::FromUsize;
@@ -127,14 +123,24 @@ where
127123
{
128124
let kernel_page_table = &mut page_tables.kernel;
129125

126+
let mut used_entries = UsedLevel4Entries::new(
127+
frame_allocator.max_phys_addr(),
128+
frame_allocator.len(),
129+
framebuffer_size,
130+
);
131+
130132
// Enable support for the no-execute bit in page tables.
131133
enable_nxe_bit();
132134
// Make the kernel respect the write-protection bits even when in ring 0 by default
133135
enable_write_protect_bit();
134136

135-
let (entry_point, tls_template, mut used_entries) =
136-
load_kernel::load_kernel(kernel_bytes, kernel_page_table, frame_allocator)
137-
.expect("no entry point");
137+
let (entry_point, tls_template) = load_kernel::load_kernel(
138+
kernel_bytes,
139+
kernel_page_table,
140+
frame_allocator,
141+
&mut used_entries,
142+
)
143+
.expect("no entry point");
138144
log::info!("Entry point at: {:#x}", entry_point.as_u64());
139145

140146
// create a stack
@@ -310,13 +316,20 @@ where
310316

311317
// allocate and map space for the boot info
312318
let (boot_info, memory_regions) = {
313-
let boot_info_addr = boot_info_location(&mut mappings.used_entries);
314-
let boot_info_end = boot_info_addr + mem::size_of::<BootInfo>();
315-
let memory_map_regions_addr =
316-
boot_info_end.align_up(u64::from_usize(mem::align_of::<MemoryRegion>()));
319+
let boot_info_layout = Layout::new::<BootInfo>();
317320
let regions = frame_allocator.len() + 1; // one region might be split into used/unused
318-
let memory_map_regions_end =
319-
memory_map_regions_addr + regions * mem::size_of::<MemoryRegion>();
321+
let memory_regions_layout = Layout::array::<MemoryRegion>(regions).unwrap();
322+
let (combined, memory_regions_offset) =
323+
boot_info_layout.extend(memory_regions_layout).unwrap();
324+
325+
let boot_info_addr = boot_info_location(&mut mappings.used_entries);
326+
assert!(
327+
boot_info_addr.is_aligned(u64::from_usize(combined.align())),
328+
"boot info addr is not properly aligned"
329+
);
330+
331+
let memory_map_regions_addr = boot_info_addr + memory_regions_offset;
332+
let memory_map_regions_end = boot_info_addr + combined.size();
320333

321334
let start_page = Page::containing_address(boot_info_addr);
322335
let end_page = Page::containing_address(memory_map_regions_end - 1u64);

‎src/config.rs

+10
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
#[cfg(feature = "binary")]
2+
const PAGE_SIZE: u64 = 4096;
3+
14
/// Allows configuring the bootloader behavior.
25
///
36
/// To control these, use a `[package.metadata.bootloader]` table in the `Cargo.toml` of
@@ -88,3 +91,10 @@ pub struct Config {
8891
/// fits them if 1 or more is set.
8992
pub minimum_framebuffer_width: Option<usize>,
9093
}
94+
95+
#[cfg(feature = "binary")]
96+
impl Config {
97+
pub(crate) fn kernel_stack_size(&self) -> u64 {
98+
self.kernel_stack_size.unwrap_or(20 * PAGE_SIZE)
99+
}
100+
}

0 commit comments

Comments
 (0)
Please sign in to comment.