Skip to content

mem: use slab allocator when allocating smol stuff #51

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 3, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/aero_kernel/src/arch/x86_64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -170,10 +170,10 @@ extern "C" fn x86_64_aero_main(boot_info: &'static StivaleStruct) -> ! {
gdt::init_boot();
log::info!("loaded bootstrap GDT");

let mut offset_table = paging::init(mmap_tag).unwrap();
paging::init(mmap_tag).unwrap();
log::info!("loaded paging");

alloc::init_heap(&mut offset_table).expect("failed to initialize the kernel heap");
alloc::init_heap();
log::info!("loaded heap");

paging::init_vm_frames();
Expand Down
328 changes: 239 additions & 89 deletions src/aero_kernel/src/mem/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,93 +21,238 @@ use core::alloc;
use core::alloc::{GlobalAlloc, Layout};
use core::ptr::NonNull;

use crate::utils::sync::Mutex;
use linked_list_allocator::{align_up, Heap};
use linked_list_allocator::Heap;

use crate::mem::paging::*;
use crate::utils::sync::Mutex;
use crate::AERO_SYSTEM_ALLOCATOR;

use super::paging::FRAME_ALLOCATOR;
use super::AddressSpace;
use crate::mem::paging::*;

const HEAP_MAX_SIZE: usize = 128 * 1024 * 1024; // 128 GiB
const HEAP_START: usize = 0xfffff80000000000;
const HEAP_END: usize = HEAP_START + HEAP_MAX_SIZE;

pub struct LockedHeap(Mutex<Heap>);
#[repr(C)]
struct SlabHeader {
ptr: *mut Slab,
}

impl LockedHeap {
/// Creates a new uninitialized instance of the kernel
/// global allocator.
#[inline]
pub const fn new_uninit() -> Self {
Self(Mutex::new(Heap::empty()))
/// The slab is the primary unit of currency in the slab allocator.
struct Slab {
size: usize,
first_free: usize,
}

impl Slab {
const fn new(size: usize) -> Self {
Self {
size,
first_free: 0,
}
}

/// Allocate memory as described by the given `layout`.
///
/// Returns a pointer to newly-allocated memory, or null to indicate
/// allocation failure.
unsafe fn allocate(&self, layout: alloc::Layout) -> Result<NonNull<u8>, ()> {
// SAFETY: We we need to be careful to not cause a deadlock as the interrupt
// handlers utilize the heap and might interrupt an in-progress allocation. So, we
// lock the interrupts during the allocation.
let mut heap = self.0.lock_irq();
fn init(&mut self) {
unsafe {
let frame: PhysFrame<Size4KiB> = FRAME_ALLOCATOR
.allocate_frame()
.expect("slab_init: failed to allocate frame");

self.first_free = frame.start_address().as_u64() as usize;
self.first_free += crate::PHYSICAL_MEMORY_OFFSET.as_u64() as usize;
}

let hdr_size = core::mem::size_of::<SlabHeader>() as u64;
let aligned_hdr_size = align_up(hdr_size, self.size as u64) as usize;

heap.allocate_first_fit(layout).or_else(|_| {
let heap_top = heap.top();
let size = align_up(layout.size(), 0x1000);
let avl_size = Size4KiB::SIZE as usize - aligned_hdr_size;

// Check if our heap has not increased beyond the maximum allowed size.
if heap_top + size > HEAP_END {
panic!("The heap size has increased more then {:#x}", HEAP_END)
let slab_ptr = unsafe { &mut *(self.first_free as *mut SlabHeader) };
slab_ptr.ptr = self as *mut Slab;

self.first_free += aligned_hdr_size;

let arr_ptr = self.first_free as *mut usize;
let array = unsafe { core::slice::from_raw_parts_mut(arr_ptr, avl_size) };

// A slab is built by allocating a 4KiB page, placing the slab data at
// the end, and dividing the rest into equal-size buffers:
//
// ------------------------------------------------------
// | buffer | buffer | buffer | buffer | slab header
// ------------------------------------------------------
// one page
let max = avl_size / self.size - 1;
let fact = self.size / 8;

for i in 0..max {
unsafe {
array[i * fact] = array.as_ptr().add((i + 1) * fact) as usize;
}
}

array[max * fact] = 0;
}

fn alloc(&mut self) -> *mut u8 {
if self.first_free == 0 {
self.init();
}

let old_free = self.first_free as *mut usize;

unsafe {
self.first_free = *old_free;
}

old_free as *mut u8
}

fn dealloc(&mut self, ptr: *mut u8) {
if ptr == core::ptr::null_mut() {
panic!("dealloc: attempted to free a nullptr")
}

let new_head = ptr as *mut usize;

unsafe {
*new_head = self.first_free;
}

self.first_free = new_head as usize;
}
}

struct ProtectedAllocator {
slabs: [Slab; 10],
linked_list_heap: Heap,
}

struct Allocator {
inner: Mutex<ProtectedAllocator>,
}

// Else we just have to extend the heap.
let mut address_space = AddressSpace::this();
let mut offset_table = address_space.offset_page_table();

let page_range = {
let heap_start = VirtAddr::new(heap_top as _);
let heap_end = heap_start + size - 1u64;

let heap_start_page: Page = Page::containing_address(heap_start);
let heap_end_page = Page::containing_address(heap_end);

Page::range_inclusive(heap_start_page, heap_end_page)
};

for page in page_range {
let frame = unsafe {
FRAME_ALLOCATOR
.allocate_frame()
.expect("Failed to allocate frame to extend heap")
};

unsafe {
offset_table.map_to(
page,
frame,
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
&mut FRAME_ALLOCATOR,
)
}
.expect("Failed to map frame to extend the heap")
.flush();
impl Allocator {
const fn new() -> Self {
Self {
inner: Mutex::new(ProtectedAllocator {
slabs: [
Slab::new(8),
Slab::new(16),
Slab::new(24),
Slab::new(32),
Slab::new(48),
Slab::new(64),
Slab::new(128),
Slab::new(256),
Slab::new(512),
Slab::new(1024),
],

linked_list_heap: Heap::empty(),
}),
}
}

fn alloc(&self, layout: Layout) -> *mut u8 {
let mut inner = self.inner.lock_irq();

let slab = inner
.slabs
.iter_mut()
.find(|slab| slab.size >= layout.size());

if let Some(slab) = slab {
slab.alloc()
} else {
inner
.linked_list_heap
.allocate_first_fit(layout)
.or_else(|_| {
let heap_top = inner.linked_list_heap.top();
let size = align_up(layout.size() as u64, 0x1000);

// Check if our heap has not increased beyond the maximum allowed size.
if heap_top + size as usize > HEAP_END {
panic!("the heap size has increased more then {:#x}", HEAP_END)
}

// Else we just have to extend the heap.
let mut address_space = AddressSpace::this();
let mut offset_table = address_space.offset_page_table();

let page_range = {
let heap_start = VirtAddr::new(heap_top as _);
let heap_end = heap_start + size - 1u64;

let heap_start_page: Page = Page::containing_address(heap_start);
let heap_end_page = Page::containing_address(heap_end);

Page::range_inclusive(heap_start_page, heap_end_page)
};

for page in page_range {
let frame = unsafe {
FRAME_ALLOCATOR
.allocate_frame()
.expect("Failed to allocate frame to extend heap")
};

unsafe {
offset_table.map_to(
page,
frame,
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
&mut FRAME_ALLOCATOR,
)
}
.expect("Failed to map frame to extend the heap")
.flush();
}

unsafe {
inner.linked_list_heap.extend(size as usize); // Now extend the heap.
inner.linked_list_heap.allocate_first_fit(layout) // And try again.
}
})
.expect("alloc: memory exhausted")
.as_ptr()
}
}

fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let mut inner = self.inner.lock_irq();

let size = layout.size();

if size >= Size4KiB::SIZE as usize {
unsafe {
inner
.linked_list_heap
.deallocate(NonNull::new_unchecked(ptr), layout);
}

heap.extend(size); // Now extend the heap.
heap.allocate_first_fit(layout) // And try again.
})
return;
}

let slab_header = (ptr as usize & !(0xfff)) as *mut SlabHeader;

let slab_header = unsafe { &mut *slab_header };
let slab = unsafe { &mut *slab_header.ptr };

slab.dealloc(ptr);
}
}

pub struct LockedHeap(Allocator);

/// Initializes an empty heap.
///
/// ## Safety
/// This function should only be called once and the provided `start` address
/// should be a valid address.
unsafe fn init(&self, start: usize, size: usize) {
self.0.lock().init(start, size);
impl LockedHeap {
/// Creates a new uninitialized instance of the kernel
/// global allocator.
#[inline]
pub const fn new_uninit() -> Self {
Self(Allocator::new())
}
}

Expand Down Expand Up @@ -220,7 +365,10 @@ unsafe impl GlobalAlloc for LockedHeap {
// necessary and sufficient.
debug_assert!(layout.size() < usize::MAX - (layout.align() - 1));

let ptr = self.allocate(layout).unwrap().as_ptr();
// SAFETY: We we need to be careful to not cause a deadlock as the interrupt
// handlers utilize the heap and might interrupt an in-progress allocation. So, we
// lock the interrupts during the allocation.
let ptr = self.0.alloc(layout);

#[cfg(feature = "kmemleak")]
kmemleak::MEM_LEAK_CATCHER.track_caller(ptr, layout);
Expand All @@ -235,9 +383,7 @@ unsafe impl GlobalAlloc for LockedHeap {
#[cfg(feature = "kmemleak")]
kmemleak::MEM_LEAK_CATCHER.unref(ptr);

self.0
.lock_irq()
.deallocate(NonNull::new_unchecked(ptr), layout)
self.0.dealloc(ptr, layout)
}
}

Expand All @@ -251,29 +397,33 @@ fn alloc_error_handler(layout: alloc::Layout) -> ! {
}

/// Initialize the heap at the [HEAP_START].
pub fn init_heap(offset_table: &mut OffsetPageTable) -> Result<(), MapToError<Size4KiB>> {
let frame: PhysFrame = unsafe {
FRAME_ALLOCATOR
.allocate_frame()
.ok_or(MapToError::FrameAllocationFailed)?
};

pub fn init_heap() {
unsafe {
offset_table.map_to(
Page::containing_address(VirtAddr::new(HEAP_START as _)),
frame,
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
&mut FRAME_ALLOCATOR,
)
}?
.flush();
let mut address_space = AddressSpace::this();
let mut offset_table = address_space.offset_page_table();

unsafe {
AERO_SYSTEM_ALLOCATOR.init(HEAP_START, 4096);
let frame: PhysFrame = FRAME_ALLOCATOR
.allocate_frame()
.expect("init_heap: failed to allocate frame for the linked list allocator");

offset_table
.map_to(
Page::containing_address(VirtAddr::new(HEAP_START as _)),
frame,
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
&mut FRAME_ALLOCATOR,
)
.expect("init_heap: failed to initialize the heap")
.flush();

AERO_SYSTEM_ALLOCATOR
.0
.inner
.lock_irq()
.linked_list_heap
.init(HEAP_START, Size4KiB::SIZE as usize);
}

#[cfg(feature = "kmemleak")]
kmemleak::MEM_LEAK_CATCHER.init();

Ok(())
}