@@ -10,11 +10,11 @@ use bootloader_api::{
10
10
} ;
11
11
use core:: { alloc:: Layout , arch:: asm, mem:: MaybeUninit , slice} ;
12
12
use level_4_entries:: UsedLevel4Entries ;
13
- use usize_conversions:: FromUsize ;
13
+ use usize_conversions:: { FromUsize , IntoUsize } ;
14
14
use x86_64:: {
15
15
structures:: paging:: {
16
16
page_table:: PageTableLevel , FrameAllocator , Mapper , OffsetPageTable , Page , PageSize ,
17
- PageTableFlags , PageTableIndex , PhysFrame , Size2MiB , Size4KiB ,
17
+ PageTable , PageTableFlags , PageTableIndex , PhysFrame , Size2MiB , Size4KiB ,
18
18
} ,
19
19
PhysAddr , VirtAddr ,
20
20
} ;
@@ -136,6 +136,7 @@ where
136
136
I : ExactSizeIterator < Item = D > + Clone ,
137
137
D : LegacyMemoryRegion ,
138
138
{
139
+ let bootloader_page_table = & mut page_tables. bootloader ;
139
140
let kernel_page_table = & mut page_tables. kernel ;
140
141
141
142
let mut used_entries = UsedLevel4Entries :: new (
@@ -183,23 +184,6 @@ where
183
184
}
184
185
}
185
186
186
- // identity-map context switch function, so that we don't get an immediate pagefault
187
- // after switching the active page table
188
- let context_switch_function = PhysAddr :: new ( context_switch as * const ( ) as u64 ) ;
189
- let context_switch_function_start_frame: PhysFrame =
190
- PhysFrame :: containing_address ( context_switch_function) ;
191
- for frame in PhysFrame :: range_inclusive (
192
- context_switch_function_start_frame,
193
- context_switch_function_start_frame + 1 ,
194
- ) {
195
- match unsafe {
196
- kernel_page_table. identity_map ( frame, PageTableFlags :: PRESENT , frame_allocator)
197
- } {
198
- Ok ( tlb) => tlb. flush ( ) ,
199
- Err ( err) => panic ! ( "failed to identity map frame {:?}: {:?}" , frame, err) ,
200
- }
201
- }
202
-
203
187
// create, load, and identity-map GDT (required for working `iretq`)
204
188
let gdt_frame = frame_allocator
205
189
. allocate_frame ( )
@@ -305,6 +289,151 @@ where
305
289
None
306
290
} ;
307
291
292
+ // Setup memory for the context switch.
293
+ // We set up two regions of memory:
294
+ // 1. "context switch page" - This page contains only a single instruction
295
+ // to switch to the kernel's page table. It's placed right before the
296
+ // kernel's entrypoint, so that the last instruction the bootloader
297
+ // executes is the page table switch and we don't need to jump to the
298
+ // entrypoint.
299
+ // 2. "trampoline" - The "context switch page" might overlap with the
300
+ // bootloader's memory, so we can't map it into the bootloader's address
301
+ // space. Instead we map a trampoline at an address of our choosing and
302
+ // jump to it instead. The trampoline will then switch to a new page
303
+ // table (context switch page table) that contains the "context switch
304
+ // page" and jump to it.
305
+
306
+ let phys_offset = kernel_page_table. phys_offset ( ) ;
307
+ let translate_frame_to_virt = |frame : PhysFrame | phys_offset + frame. start_address ( ) . as_u64 ( ) ;
308
+
309
+ // The switching the page table is a 3 byte instruction.
310
+ // Check that subtraction 3 from the entrypoint won't jump the gap in the address space.
311
+ if ( 0xffff_8000_0000_0000 ..=0xffff_8000_0000_0002 ) . contains ( & entry_point. as_u64 ( ) ) {
312
+ panic ! ( "The kernel's entrypoint must not be located between 0xffff_8000_0000_0000 and 0xffff_8000_0000_0002" ) ;
313
+ }
314
+ // Determine the address where we should place the page table switch instruction.
315
+ let entrypoint_page: Page = Page :: containing_address ( entry_point) ;
316
+ let addr_just_before_entrypoint = entry_point. as_u64 ( ) . wrapping_sub ( 3 ) ;
317
+ let context_switch_addr = VirtAddr :: new ( addr_just_before_entrypoint) ;
318
+ let context_switch_page: Page = Page :: containing_address ( context_switch_addr) ;
319
+
320
+ // Choose the address for the trampoline. The address shouldn't overlap
321
+ // with the bootloader's memory or the context switch page.
322
+ let trampoline_page_candidate1: Page =
323
+ Page :: from_start_address ( VirtAddr :: new ( 0xffff_ffff_ffff_f000 ) ) . unwrap ( ) ;
324
+ let trampoline_page_candidate2: Page =
325
+ Page :: from_start_address ( VirtAddr :: new ( 0xffff_ffff_ffff_c000 ) ) . unwrap ( ) ;
326
+ let trampoline_page = if context_switch_page != trampoline_page_candidate1
327
+ && entrypoint_page != trampoline_page_candidate1
328
+ {
329
+ trampoline_page_candidate1
330
+ } else {
331
+ trampoline_page_candidate2
332
+ } ;
333
+
334
+ // Prepare the trampoline.
335
+ let trampoline_frame = frame_allocator
336
+ . allocate_frame ( )
337
+ . expect ( "Failed to allocate memory for trampoline" ) ;
338
+ // Write two instructions to the trampoline:
339
+ // 1. Load the context switch page table
340
+ // 2. Jump to the context switch
341
+ unsafe {
342
+ let trampoline: * mut u8 = translate_frame_to_virt ( trampoline_frame) . as_mut_ptr ( ) ;
343
+ // mov cr3, rdx
344
+ trampoline. add ( 0 ) . write ( 0x0f ) ;
345
+ trampoline. add ( 1 ) . write ( 0x22 ) ;
346
+ trampoline. add ( 2 ) . write ( 0xda ) ;
347
+ // jmp r13
348
+ trampoline. add ( 3 ) . write ( 0x41 ) ;
349
+ trampoline. add ( 4 ) . write ( 0xff ) ;
350
+ trampoline. add ( 5 ) . write ( 0xe5 ) ;
351
+ }
352
+
353
+ // Write the instruction to switch to the final kernel page table to the context switch page.
354
+ let context_switch_frame = frame_allocator
355
+ . allocate_frame ( )
356
+ . expect ( "Failed to allocate memory for context switch page" ) ;
357
+ // mov cr3, rax
358
+ let instruction_bytes = [ 0x0f , 0x22 , 0xd8 ] ;
359
+ let context_switch_ptr: * mut u8 = translate_frame_to_virt ( context_switch_frame) . as_mut_ptr ( ) ;
360
+ for ( i, b) in instruction_bytes. into_iter ( ) . enumerate ( ) {
361
+ // We can let the offset wrap around because we map the frame twice
362
+ // if the context switch is near a page boundary.
363
+ let offset = ( context_switch_addr. as_u64 ( ) . into_usize ( ) ) . wrapping_add ( i) % 4096 ;
364
+
365
+ unsafe {
366
+ // Write the instruction byte.
367
+ context_switch_ptr. add ( offset) . write ( b) ;
368
+ }
369
+ }
370
+
371
+ // Create a new page table for use during the context switch.
372
+ let context_switch_page_table_frame = frame_allocator
373
+ . allocate_frame ( )
374
+ . expect ( "Failed to allocate frame for context switch page table" ) ;
375
+ let context_switch_page_table: & mut PageTable = {
376
+ let ptr: * mut PageTable =
377
+ translate_frame_to_virt ( context_switch_page_table_frame) . as_mut_ptr ( ) ;
378
+ // create a new, empty page table
379
+ unsafe {
380
+ ptr. write ( PageTable :: new ( ) ) ;
381
+ & mut * ptr
382
+ }
383
+ } ;
384
+ let mut context_switch_page_table =
385
+ unsafe { OffsetPageTable :: new ( context_switch_page_table, phys_offset) } ;
386
+
387
+ // Map the trampoline and the context switch.
388
+ unsafe {
389
+ // Map the trampoline page into both the bootloader's page table and
390
+ // the context switch page table.
391
+ bootloader_page_table
392
+ . map_to (
393
+ trampoline_page,
394
+ trampoline_frame,
395
+ PageTableFlags :: PRESENT ,
396
+ frame_allocator,
397
+ )
398
+ . expect ( "Failed to map trampoline into main page table" )
399
+ . ignore ( ) ;
400
+ context_switch_page_table
401
+ . map_to (
402
+ trampoline_page,
403
+ trampoline_frame,
404
+ PageTableFlags :: PRESENT ,
405
+ frame_allocator,
406
+ )
407
+ . expect ( "Failed to map trampoline into context switch page table" )
408
+ . ignore ( ) ;
409
+
410
+ // Map the context switch only into the context switch page table.
411
+ context_switch_page_table
412
+ . map_to (
413
+ context_switch_page,
414
+ context_switch_frame,
415
+ PageTableFlags :: PRESENT ,
416
+ frame_allocator,
417
+ )
418
+ . expect ( "Failed to map context switch into context switch page table" )
419
+ . ignore ( ) ;
420
+
421
+ // If the context switch is near a page boundary, map the entrypoint
422
+ // page to the same frame in case the page table switch instruction
423
+ // crosses a page boundary.
424
+ if context_switch_page != entrypoint_page {
425
+ context_switch_page_table
426
+ . map_to (
427
+ entrypoint_page,
428
+ context_switch_frame,
429
+ PageTableFlags :: PRESENT ,
430
+ frame_allocator,
431
+ )
432
+ . expect ( "Failed to map context switch into context switch page table" )
433
+ . ignore ( ) ;
434
+ }
435
+ }
436
+
308
437
Mappings {
309
438
framebuffer : framebuffer_virt_addr,
310
439
entry_point,
@@ -313,6 +442,10 @@ where
313
442
physical_memory_offset,
314
443
recursive_index,
315
444
tls_template,
445
+ context_switch_trampoline : trampoline_page. start_address ( ) ,
446
+ context_switch_page_table,
447
+ context_switch_page_table_frame,
448
+ context_switch_addr,
316
449
}
317
450
}
318
451
@@ -333,6 +466,14 @@ pub struct Mappings {
333
466
pub recursive_index : Option < PageTableIndex > ,
334
467
/// The thread local storage template of the kernel executable, if it contains one.
335
468
pub tls_template : Option < TlsTemplate > ,
469
+ /// The address of the context switch trampoline in the bootloader's address space.
470
+ pub context_switch_trampoline : VirtAddr ,
471
+ /// The page table used for context switch from the bootloader to the kernel.
472
+ pub context_switch_page_table : OffsetPageTable < ' static > ,
473
+ /// The physical frame where the level 4 page table of the context switch address space is stored.
474
+ pub context_switch_page_table_frame : PhysFrame ,
475
+ /// Address just before the kernel's entrypoint.
476
+ pub context_switch_addr : VirtAddr ,
336
477
}
337
478
338
479
/// Allocates and initializes the boot info struct and the memory map.
@@ -450,15 +591,17 @@ pub fn switch_to_kernel(
450
591
..
451
592
} = page_tables;
452
593
let addresses = Addresses {
594
+ context_switch_trampoline : mappings. context_switch_trampoline ,
595
+ context_switch_page_table : mappings. context_switch_page_table_frame ,
596
+ context_switch_addr : mappings. context_switch_addr ,
453
597
page_table : kernel_level_4_frame,
454
598
stack_top : mappings. stack_end . start_address ( ) ,
455
- entry_point : mappings. entry_point ,
456
599
boot_info,
457
600
} ;
458
601
459
602
log:: info!(
460
- "Jumping to kernel entry point at {:?}" ,
461
- addresses . entry_point
603
+ "Switching to kernel entry point at {:?}" ,
604
+ mappings . entry_point
462
605
) ;
463
606
464
607
unsafe {
@@ -484,21 +627,25 @@ pub struct PageTables {
484
627
unsafe fn context_switch ( addresses : Addresses ) -> ! {
485
628
unsafe {
486
629
asm ! (
487
- "mov cr3, {}; mov rsp, {}; push 0; jmp {}" ,
488
- in( reg) addresses. page_table. start_address( ) . as_u64( ) ,
630
+ "mov rsp, {}; sub rsp, 8; jmp {}" ,
489
631
in( reg) addresses. stack_top. as_u64( ) ,
490
- in( reg) addresses. entry_point. as_u64( ) ,
632
+ in( reg) addresses. context_switch_trampoline. as_u64( ) ,
633
+ in( "rdx" ) addresses. context_switch_page_table. start_address( ) . as_u64( ) ,
634
+ in( "r13" ) addresses. context_switch_addr. as_u64( ) ,
635
+ in( "rax" ) addresses. page_table. start_address( ) . as_u64( ) ,
491
636
in( "rdi" ) addresses. boot_info as * const _ as usize ,
637
+ options( noreturn) ,
492
638
) ;
493
639
}
494
- unreachable ! ( ) ;
495
640
}
496
641
497
642
/// Memory addresses required for the context switch.
498
643
struct Addresses {
644
+ context_switch_trampoline : VirtAddr ,
645
+ context_switch_page_table : PhysFrame ,
646
+ context_switch_addr : VirtAddr ,
499
647
page_table : PhysFrame ,
500
648
stack_top : VirtAddr ,
501
- entry_point : VirtAddr ,
502
649
boot_info : & ' static mut BootInfo ,
503
650
}
504
651
0 commit comments