Skip to content

Commit

Permalink
fix: 🐛 Move reset function to assembly for chainboot
Browse files Browse the repository at this point in the history
  • Loading branch information
berkus committed Jan 30, 2025
1 parent 88c088b commit 82c98b9
Show file tree
Hide file tree
Showing 4 changed files with 77 additions and 81 deletions.
79 changes: 3 additions & 76 deletions bin/chainboot/src/boot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,81 +28,8 @@ pub unsafe extern "C" fn _start() -> ! {
unsafe { reset() };
}

#[unsafe(no_mangle)]
#[unsafe(link_section = ".text.chainboot")]
pub unsafe extern "C" fn reset() -> ! {
use core::{
cell::UnsafeCell,
sync::{atomic, atomic::Ordering},
};

// These are a problem, because they are not interpreted as constants here.
// Subsequently, this code tries to read values from not-yet-existing data locations.
unsafe extern "Rust" {
// Boundaries of the .bss section, provided by the linker script
static __BSS_START: UnsafeCell<()>;
static __BSS_SIZE_U64S: UnsafeCell<()>;
// Load address of the kernel binary
static __binary_nonzero_lma: UnsafeCell<()>;
// Address to relocate to and image size
static __binary_nonzero_vma: UnsafeCell<()>;
static __binary_nonzero_vma_end_exclusive: UnsafeCell<()>;
// Stack top
static __boot_core_stack_end_exclusive: UnsafeCell<()>;
}

// This tries to call memcpy() at a wrong linked address - the function is in relocated area!

// Relocate the code.
// Emulate
// core::ptr::copy_nonoverlapping(
// __binary_nonzero_lma.get() as *const u64,
// __binary_nonzero_vma.get() as *mut u64,
// __binary_nonzero_vma_end_exclusive.get() as usize - __binary_nonzero_vma.get() as usize,
// );
let binary_size = unsafe { __binary_nonzero_vma_end_exclusive.get() } as usize
- unsafe { __binary_nonzero_vma.get() } as usize;
unsafe {
local_memcpy(
__binary_nonzero_vma.get() as *mut u8,
__binary_nonzero_lma.get() as *const u8,
binary_size,
)
};

// This tries to call memset() at a wrong linked address - the function is in relocated area!

// Zeroes the .bss section
// Emulate
// crate::stdmem::local_memset(__bss_start.get() as *mut u8, 0u8, __bss_size.get() as usize);
let bss = unsafe {
core::slice::from_raw_parts_mut(
__BSS_START.get() as *mut u64,
__BSS_SIZE_U64S.get() as usize,
)
};
for i in bss {
*i = 0;
}

// Don't cross this line with loads and stores. The initializations
// done above could be "invisible" to the compiler, because we write to the
// same memory location that is used by statics after this point.
// Additionally, we assume that no statics are accessed before this point.
atomic::compiler_fence(Ordering::SeqCst);

let max_kernel_size = unsafe { __binary_nonzero_vma.get() } as u64
- unsafe { __boot_core_stack_end_exclusive.get() } as u64;
unsafe { crate::kernel_init(max_kernel_size) }
unsafe extern "Rust" {
fn reset() -> !;
}

#[inline(always)]
#[unsafe(link_section = ".text.chainboot")]
unsafe fn local_memcpy(mut dest: *mut u8, mut src: *const u8, n: usize) {
let dest_end = unsafe { dest.add(n) };
while dest < dest_end {
unsafe { *dest = *src };
dest = unsafe { dest.add(1) };
src = unsafe { src.add(1) };
}
}
core::arch::global_asm!(include_str!("boot.s"));
68 changes: 68 additions & 0 deletions bin/chainboot/src/boot.s
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
/*
* Pre-boot code.
* Used only because Rust's AM considers UB any access to statics before statics
* have been initialized. This is exactly the case for the boot code.
* So we avoid referencing any statics in the Rust code, and delegate the
* task to assembly piece instead.
*/

// Load the address of a symbol into a register, PC-relative.
//
// The symbol must lie within +/- 4 GiB of the Program Counter.
//
// # Resources
//
// - https://sourceware.org/binutils/docs-2.36/as/AArch64_002dRelocations.html
.macro ADR_REL register, symbol
adrp \register, \symbol
add \register, \register, #:lo12:\symbol
.endm

// .section .text.chainboot.entry

.section .text.chainboot
/// Reset function.
///
/// Initializes the bss section before calling into the user's `main()`.
///
/// # Safety
///
/// We assume that no statics are accessed before transition to main from this function.
///
/// We are guaranteed to be in EL1 non-secure mode here.
reset:
ADR_REL x0, __boot_core_stack_end_exclusive
mov sp, x0

// Relocate the code from __binary_nonzero_lma to __binary_nonzero_vma
ADR_REL x1, __binary_nonzero_lma // Load address of the kernel binary
ADR_REL x2, __binary_nonzero_vma // Address to relocate to
ADR_REL x3, __binary_nonzero_vma_end_exclusive // To calculate image size

sub x0, x2, x0 // max loadable kernel size = VMA - SP

// Relocate the code.
sub x4, x3, x2 // x4 = Image size

.L__relocate_loop:
ldp x5, x6, [x1], #16
stp x5, x6, [x2], #16
sub x4, x4, #16
b.nz .L__relocate_loop

// Initialize BSS
// Assumptions: BSS start is u64-aligned, BSS end is u128-aligned.
// __BSS_START and __BSS_END are defined in linker script
ADR_REL x1, __BSS_START
ADR_REL x2, __BSS_END
.L__bss_init_loop:
stp xzr, xzr, [x1], #16
cmp x1, x2
b.lt .L__bss_init_loop

// max_kernel_size is already in x0 here
b kernel_init

.size reset, . - reset
.type reset, function
.global reset
10 changes: 5 additions & 5 deletions bin/chainboot/src/link.ld
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ SECTIONS
*(.text.chainboot)
} :segment_start_code

/* Align to 8 bytes, b/c relocating the binary is done in u64 chunks */
. = ALIGN(8);
/* Align to 16 bytes, b/c relocating the binary is done in u128 chunks */
. = ALIGN(16);

__binary_nonzero_lma = .;

Expand Down Expand Up @@ -88,8 +88,8 @@ SECTIONS
***********************************************************************************************/
.data : { *(.data*) } :segment_data

/* Fill up to 8 bytes, b/c relocating the binary is done in u64 chunks */
. = ALIGN(8);
/* Fill up to 16 bytes, b/c relocating the binary is done in u128 chunks */
. = ALIGN(16);
__binary_nonzero_vma_end_exclusive = .;

/* Section is zeroed in pairs of u64. Align start and end to 16 bytes at least */
Expand All @@ -99,7 +99,7 @@ SECTIONS
*(.bss .bss.*)
*(COMMON)
. = ALIGN(16);
__BSS_SIZE_U64S = (. - __BSS_START) / 8;
__BSS_END = .;
} :segment_data

/DISCARD/ : { *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) *(.text.boot*)}
Expand Down
1 change: 1 addition & 0 deletions bin/chainboot/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ mod boot;
///
/// - Only a single core must be active and running this function.
/// - The init calls in this function must appear in the correct order.
#[unsafe(no_mangle)]
unsafe fn kernel_init(max_kernel_size: u64) -> ! {
#[cfg(feature = "jtag")]
machine::debug::jtag::wait_debugger();
Expand Down

0 comments on commit 82c98b9

Please sign in to comment.