Skip to content

Commit 5578fea

Browse files
committed
Dynamically map stack, boot info, physical memory, recursive table
1 parent 0ffbebd commit 5578fea

File tree

5 files changed

+170
-46
lines changed

5 files changed

+170
-46
lines changed

build.rs

Lines changed: 49 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,39 @@
11
#[cfg(not(feature = "binary"))]
22
fn main() {}
33

4+
#[cfg(feature = "binary")]
5+
fn address_from_env(env: &'static str) -> Option<u64> {
6+
use std::env;
7+
match env::var(env) {
8+
Err(env::VarError::NotPresent) => None,
9+
Err(env::VarError::NotUnicode(_)) => {
10+
panic!("The `{}` environment variable must be valid unicode", env,)
11+
}
12+
Ok(s) => {
13+
let addr = if s.starts_with("0x") {
14+
u64::from_str_radix(&s[2..], 16)
15+
} else {
16+
u64::from_str_radix(&s, 10)
17+
};
18+
19+
let addr = addr.expect(&format!(
20+
"The `{}` environment variable must be an integer\
21+
(is `{}`).",
22+
env, s
23+
));
24+
25+
if addr % 0x1000 != 0 {
26+
panic!(
27+
"The `{}` environment variable must be aligned to 0x1000 (is `{:#x}`).",
28+
env, addr
29+
);
30+
}
31+
32+
Some(addr)
33+
}
34+
}
35+
}
36+
437
#[cfg(feature = "binary")]
538
fn main() {
639
use std::{
@@ -146,31 +179,29 @@ fn main() {
146179
// create a file with the `PHYSICAL_MEMORY_OFFSET` constant
147180
let file_path = out_dir.join("physical_memory_offset.rs");
148181
let mut file = File::create(file_path).expect("failed to create physical_memory_offset.rs");
149-
let physical_memory_offset = match env::var("BOOTLOADER_PHYSICAL_MEMORY_OFFSET") {
150-
Err(env::VarError::NotPresent) => 0o_177777_770_000_000_000_0000u64,
151-
Err(env::VarError::NotUnicode(_)) => panic!(
152-
"The `BOOTLOADER_PHYSICAL_MEMORY_OFFSET` environment variable must be valid unicode"
153-
),
154-
Ok(s) => if s.starts_with("0x") {
155-
u64::from_str_radix(&s[2..], 16)
156-
} else {
157-
u64::from_str_radix(&s, 10)
158-
}
159-
.expect(&format!(
160-
"The `BOOTLOADER_PHYSICAL_MEMORY_OFFSET` environment variable must be an integer\
161-
(is `{}`).",
162-
s
163-
)),
164-
};
182+
let physical_memory_offset = address_from_env("BOOTLOADER_PHYSICAL_MEMORY_OFFSET");
165183
file.write_all(
166184
format!(
167-
"const PHYSICAL_MEMORY_OFFSET: u64 = {:#x};",
185+
"const PHYSICAL_MEMORY_OFFSET: Option<u64> = {:?};",
168186
physical_memory_offset
169187
)
170188
.as_bytes(),
171189
)
172190
.expect("write to physical_memory_offset.rs failed");
173191

192+
// create a file with the `KERNEL_STACK_ADDRESS` constant
193+
let file_path = out_dir.join("kernel_stack_address.rs");
194+
let mut file = File::create(file_path).expect("failed to create kernel_stack_address.rs");
195+
let kernel_stack_address = address_from_env("BOOTLOADER_KERNEL_STACK_ADDRESS");
196+
file.write_all(
197+
format!(
198+
"const KERNEL_STACK_ADDRESS: Option<u64> = {:?};",
199+
kernel_stack_address,
200+
)
201+
.as_bytes(),
202+
)
203+
.expect("write to kernel_stack_address.rs failed");
204+
174205
// pass link arguments to rustc
175206
println!("cargo:rustc-link-search=native={}", out_dir.display());
176207
println!(
@@ -180,6 +211,7 @@ fn main() {
180211

181212
println!("cargo:rerun-if-env-changed=KERNEL");
182213
println!("cargo:rerun-if-env-changed=BOOTLOADER_PHYSICAL_MEMORY_OFFSET");
214+
println!("cargo:rerun-if-env-changed=BOOTLOADER_KERNEL_STACK_ADDRESS");
183215
println!("cargo:rerun-if-changed={}", kernel.display());
184216
println!("cargo:rerun-if-changed=build.rs");
185217
}

src/level4_entries.rs

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
use core::convert::TryInto;
2+
use fixedvec::FixedVec;
3+
use x86_64::ux;
4+
use x86_64::{structures::paging::Page, VirtAddr};
5+
use xmas_elf::program::ProgramHeader64;
6+
7+
pub struct UsedLevel4Entries {
8+
entry_state: [bool; 512], // whether an entry is in use by the kernel
9+
}
10+
11+
impl UsedLevel4Entries {
12+
pub fn new(segments: &FixedVec<ProgramHeader64>) -> Self {
13+
let mut used = UsedLevel4Entries {
14+
entry_state: [false; 512],
15+
};
16+
17+
used.entry_state[0] = true; // TODO: Can we do this dynamically?
18+
19+
for segment in segments {
20+
let start_page: Page = Page::containing_address(VirtAddr::new(segment.virtual_addr));
21+
let end_page: Page =
22+
Page::containing_address(VirtAddr::new(segment.virtual_addr + segment.mem_size));
23+
24+
for p4_index in u64::from(start_page.p4_index())..u64::from(end_page.p4_index()) {
25+
used.entry_state[p4_index as usize] = true;
26+
}
27+
}
28+
29+
used
30+
}
31+
32+
pub fn get_free_entry(&mut self) -> ux::u9 {
33+
let (idx, entry) = self
34+
.entry_state
35+
.iter_mut()
36+
.enumerate()
37+
.find(|(_, &mut entry)| entry == false)
38+
.expect("no usable level 4 entries found");
39+
40+
*entry = true;
41+
ux::u9::new(idx.try_into().unwrap())
42+
}
43+
}

src/main.rs

Lines changed: 69 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,15 @@
1111
compile_error!("The bootloader crate must be compiled for the `x86_64-bootloader.json` target");
1212

1313
use bootloader::bootinfo::{BootInfo, FrameRange};
14+
use core::convert::TryInto;
1415
use core::panic::PanicInfo;
1516
use core::{mem, slice};
1617
use fixedvec::alloc_stack;
1718
use usize_conversions::usize_from;
19+
use x86_64::instructions::tlb;
1820
use x86_64::structures::paging::{
19-
frame::PhysFrameRange, Mapper, Page, PageTableFlags, PhysFrame, RecursivePageTable, Size2MiB,
20-
Size4KiB,
21+
frame::PhysFrameRange, page_table::PageTableEntry, Mapper, Page, PageTable, PageTableFlags,
22+
PhysFrame, RecursivePageTable, Size2MiB, Size4KiB,
2123
};
2224
use x86_64::ux::u9;
2325
use x86_64::{PhysAddr, VirtAddr};
@@ -26,6 +28,9 @@ use x86_64::{PhysAddr, VirtAddr};
2628
// the `map_physical_memory` is activated. Set by the build script.
2729
include!(concat!(env!("OUT_DIR"), "/physical_memory_offset.rs"));
2830

31+
// The virtual address of the kernel stack. Set by the build script.
32+
include!(concat!(env!("OUT_DIR"), "/kernel_stack_address.rs"));
33+
2934
global_asm!(include_str!("stage_1.s"));
3035
global_asm!(include_str!("stage_2.s"));
3136
global_asm!(include_str!("e820.s"));
@@ -44,6 +49,7 @@ unsafe fn context_switch(boot_info: VirtAddr, entry_point: VirtAddr, stack_point
4449

4550
mod boot_info;
4651
mod frame_allocator;
52+
mod level4_entries;
4753
mod page_table;
4854
mod printer;
4955

@@ -74,6 +80,7 @@ extern "C" {
7480
static __page_table_end: usize;
7581
static __bootloader_end: usize;
7682
static __bootloader_start: usize;
83+
static _p4: usize;
7784
}
7885

7986
#[no_mangle]
@@ -90,6 +97,7 @@ pub unsafe extern "C" fn stage_4() -> ! {
9097
let page_table_end = &__page_table_end as *const _ as u64;
9198
let bootloader_start = &__bootloader_start as *const _ as u64;
9299
let bootloader_end = &__bootloader_end as *const _ as u64;
100+
let p4_physical = &_p4 as *const _ as u64;
93101

94102
load_elf(
95103
IdentityMappedAddr(PhysAddr::new(kernel_start)),
@@ -100,6 +108,7 @@ pub unsafe extern "C" fn stage_4() -> ! {
100108
PhysAddr::new(page_table_end),
101109
PhysAddr::new(bootloader_start),
102110
PhysAddr::new(bootloader_end),
111+
PhysAddr::new(p4_physical),
103112
)
104113
}
105114

@@ -112,6 +121,7 @@ fn load_elf(
112121
page_table_end: PhysAddr,
113122
bootloader_start: PhysAddr,
114123
bootloader_end: PhysAddr,
124+
p4_physical: PhysAddr,
115125
) -> ! {
116126
use bootloader::bootinfo::{MemoryRegion, MemoryRegionType};
117127
use fixedvec::FixedVec;
@@ -149,11 +159,25 @@ fn load_elf(
149159
}
150160
}
151161

162+
// Mark used virtual addresses
163+
let mut level4_entries = level4_entries::UsedLevel4Entries::new(&segments);
164+
152165
// Enable support for the no-execute bit in page tables.
153166
enable_nxe_bit();
154167

155-
// Create a RecursivePageTable
156-
let recursive_index = u9::new(511);
168+
// Create a recursive page table entry
169+
let recursive_index = u9::new(level4_entries.get_free_entry().try_into().unwrap());
170+
let mut entry = PageTableEntry::new();
171+
entry.set_addr(
172+
p4_physical,
173+
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
174+
);
175+
176+
// Write the recursive entry into the page table
177+
let page_table = unsafe { &mut *(p4_physical.as_u64() as *mut PageTable) };
178+
page_table[recursive_index] = entry;
179+
tlb::flush_all();
180+
157181
let recursive_page_table_addr = Page::from_page_table_indices(
158182
recursive_index,
159183
recursive_index,
@@ -211,18 +235,14 @@ fn load_elf(
211235
rec_page_table.unmap(page).expect("dealloc error").1.flush();
212236
}
213237

214-
// Map kernel segments.
215-
let stack_end = page_table::map_kernel(
216-
kernel_start.phys(),
217-
&segments,
218-
&mut rec_page_table,
219-
&mut frame_allocator,
220-
)
221-
.expect("kernel mapping failed");
222-
223238
// Map a page for the boot info structure
224239
let boot_info_page = {
225-
let page: Page = Page::containing_address(VirtAddr::new(0xb0071f0000));
240+
let page: Page = Page::from_page_table_indices(
241+
level4_entries.get_free_entry(),
242+
u9::new(0),
243+
u9::new(0),
244+
u9::new(0),
245+
);
226246
let frame = frame_allocator
227247
.allocate_frame(MemoryRegionType::BootInfo)
228248
.expect("frame allocation failed");
@@ -241,13 +261,38 @@ fn load_elf(
241261
page
242262
};
243263

244-
if cfg!(feature = "map_physical_memory") {
245-
fn virt_for_phys(phys: PhysAddr) -> VirtAddr {
246-
VirtAddr::new(phys.as_u64() + PHYSICAL_MEMORY_OFFSET)
247-
}
264+
// If no kernel stack address is provided, map the kernel stack after the boot info page
265+
let kernel_stack_address = match KERNEL_STACK_ADDRESS {
266+
Some(addr) => Page::containing_address(VirtAddr::new(addr)),
267+
None => boot_info_page + 1,
268+
};
269+
270+
// Map kernel segments.
271+
let stack_end = page_table::map_kernel(
272+
kernel_start.phys(),
273+
kernel_stack_address,
274+
&segments,
275+
&mut rec_page_table,
276+
&mut frame_allocator,
277+
)
278+
.expect("kernel mapping failed");
279+
280+
let physical_memory_offset = if cfg!(feature = "map_physical_memory") {
281+
let physical_memory_offset = PHYSICAL_MEMORY_OFFSET.unwrap_or_else(|| {
282+
// If offset not manually provided, find a free p4 entry and map memory here.
283+
// One level 4 entry spans 2^48/512 bytes (over 500gib) so this should suffice.
284+
assert!(max_phys_addr < (1 << 48) / 512);
285+
Page::from_page_table_indices_1gib(level4_entries.get_free_entry(), u9::new(0))
286+
.start_address()
287+
.as_u64()
288+
});
289+
290+
let virt_for_phys =
291+
|phys: PhysAddr| -> VirtAddr { VirtAddr::new(phys.as_u64() + physical_memory_offset) };
248292

249293
let start_frame = PhysFrame::<Size2MiB>::containing_address(PhysAddr::new(0));
250294
let end_frame = PhysFrame::<Size2MiB>::containing_address(PhysAddr::new(max_phys_addr));
295+
251296
for frame in PhysFrame::range_inclusive(start_frame, end_frame) {
252297
let page = Page::containing_address(virt_for_phys(frame.start_address()));
253298
let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
@@ -263,13 +308,17 @@ fn load_elf(
263308
.expect("Mapping of bootinfo page failed")
264309
.flush();
265310
}
266-
}
311+
312+
physical_memory_offset
313+
} else {
314+
0 // Value is unused by BootInfo::new, so this doesn't matter
315+
};
267316

268317
// Construct boot info structure.
269318
let mut boot_info = BootInfo::new(
270319
memory_map,
271320
recursive_page_table_addr.as_u64(),
272-
PHYSICAL_MEMORY_OFFSET,
321+
physical_memory_offset,
273322
);
274323
boot_info.memory_map.sort();
275324

src/page_table.rs

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
use crate::frame_allocator::FrameAllocator;
2+
use crate::level4_entries::UsedLevel4Entries;
23
use bootloader::bootinfo::MemoryRegionType;
34
use fixedvec::FixedVec;
45
use x86_64::structures::paging::mapper::{MapToError, MapperFlush, UnmapError};
@@ -10,6 +11,7 @@ use xmas_elf::program::{self, ProgramHeader64};
1011

1112
pub(crate) fn map_kernel(
1213
kernel_start: PhysAddr,
14+
stack_start: Page,
1315
segments: &FixedVec<ProgramHeader64>,
1416
page_table: &mut RecursivePageTable,
1517
frame_allocator: &mut FrameAllocator,
@@ -18,16 +20,15 @@ pub(crate) fn map_kernel(
1820
map_segment(segment, kernel_start, page_table, frame_allocator)?;
1921
}
2022

21-
// create a stack
22-
// TODO create a stack range dynamically (based on where the kernel is loaded)
23-
let stack_start = Page::containing_address(VirtAddr::new(0x57AC_0000_0000));
23+
// Create a stack
2424
let stack_size: u64 = 512; // in pages
2525
let stack_end = stack_start + stack_size;
2626

2727
let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
2828
let region_type = MemoryRegionType::KernelStack;
2929

30-
for page in Page::range(stack_start, stack_end) {
30+
// Leave the first page unmapped as a 'guard page'
31+
for page in Page::range(stack_start + 1, stack_end) {
3132
let frame = frame_allocator
3233
.allocate_frame(region_type)
3334
.ok_or(MapToError::FrameAllocationFailed)?;

src/stage_3.s

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,6 @@ set_up_page_tables:
3636
rep stosd
3737

3838
# p4
39-
lea eax, [_p4]
40-
or eax, (1 | 2)
41-
mov [_p4 + 511 * 8], eax # recursive mapping
4239
lea eax, [_p3]
4340
or eax, (1 | 2)
4441
mov [_p4], eax
@@ -63,10 +60,12 @@ set_up_page_tables:
6360
cmp ecx, edx
6461
jb map_p2_table
6562
# p1
66-
lea eax, __bootloader_start
63+
# start mapping from __page_table_start, as we need to be able to access
64+
# the p4 table from rust. stop mapping at __bootloader_end
65+
lea eax, __page_table_start
6766
and eax, 0xfffff000
6867
or eax, (1 | 2)
69-
lea ecx, __bootloader_start
68+
lea ecx, __page_table_start
7069
shr ecx, 12 # start page number
7170
lea edx, __bootloader_end
7271
add edx, 4096 - 1 # align up

0 commit comments

Comments
 (0)