@@ -21,93 +21,238 @@ use core::alloc;
21
21
use core:: alloc:: { GlobalAlloc , Layout } ;
22
22
use core:: ptr:: NonNull ;
23
23
24
- use crate :: utils:: sync:: Mutex ;
25
- use linked_list_allocator:: { align_up, Heap } ;
24
+ use linked_list_allocator:: Heap ;
26
25
27
- use crate :: mem :: paging :: * ;
26
+ use crate :: utils :: sync :: Mutex ;
28
27
use crate :: AERO_SYSTEM_ALLOCATOR ;
29
28
30
29
use super :: paging:: FRAME_ALLOCATOR ;
31
30
use super :: AddressSpace ;
31
+ use crate :: mem:: paging:: * ;
32
32
33
33
const HEAP_MAX_SIZE : usize = 128 * 1024 * 1024 ; // 128 GiB
34
34
const HEAP_START : usize = 0xfffff80000000000 ;
35
35
const HEAP_END : usize = HEAP_START + HEAP_MAX_SIZE ;
36
36
37
- pub struct LockedHeap ( Mutex < Heap > ) ;
37
+ #[ repr( C ) ]
38
+ struct SlabHeader {
39
+ ptr : * mut Slab ,
40
+ }
38
41
39
- impl LockedHeap {
40
- /// Creates a new uninitialized instance of the kernel
41
- /// global allocator.
42
- #[ inline]
43
- pub const fn new_uninit ( ) -> Self {
44
- Self ( Mutex :: new ( Heap :: empty ( ) ) )
42
+ /// The slab is the primary unit of currency in the slab allocator.
43
+ struct Slab {
44
+ size : usize ,
45
+ first_free : usize ,
46
+ }
47
+
48
+ impl Slab {
49
+ const fn new ( size : usize ) -> Self {
50
+ Self {
51
+ size,
52
+ first_free : 0 ,
53
+ }
45
54
}
46
55
47
- /// Allocate memory as described by the given `layout`.
48
- ///
49
- /// Returns a pointer to newly-allocated memory, or null to indicate
50
- /// allocation failure.
51
- unsafe fn allocate ( & self , layout : alloc:: Layout ) -> Result < NonNull < u8 > , ( ) > {
52
- // SAFETY: We we need to be careful to not cause a deadlock as the interrupt
53
- // handlers utilize the heap and might interrupt an in-progress allocation. So, we
54
- // lock the interrupts during the allocation.
55
- let mut heap = self . 0 . lock_irq ( ) ;
56
+ fn init ( & mut self ) {
57
+ unsafe {
58
+ let frame: PhysFrame < Size4KiB > = FRAME_ALLOCATOR
59
+ . allocate_frame ( )
60
+ . expect ( "slab_init: failed to allocate frame" ) ;
61
+
62
+ self . first_free = frame. start_address ( ) . as_u64 ( ) as usize ;
63
+ self . first_free += crate :: PHYSICAL_MEMORY_OFFSET . as_u64 ( ) as usize ;
64
+ }
65
+
66
+ let hdr_size = core:: mem:: size_of :: < SlabHeader > ( ) as u64 ;
67
+ let aligned_hdr_size = align_up ( hdr_size, self . size as u64 ) as usize ;
56
68
57
- heap. allocate_first_fit ( layout) . or_else ( |_| {
58
- let heap_top = heap. top ( ) ;
59
- let size = align_up ( layout. size ( ) , 0x1000 ) ;
69
+ let avl_size = Size4KiB :: SIZE as usize - aligned_hdr_size;
60
70
61
- // Check if our heap has not increased beyond the maximum allowed size.
62
- if heap_top + size > HEAP_END {
63
- panic ! ( "The heap size has increased more then {:#x}" , HEAP_END )
71
+ let slab_ptr = unsafe { & mut * ( self . first_free as * mut SlabHeader ) } ;
72
+ slab_ptr. ptr = self as * mut Slab ;
73
+
74
+ self . first_free += aligned_hdr_size;
75
+
76
+ let arr_ptr = self . first_free as * mut usize ;
77
+ let array = unsafe { core:: slice:: from_raw_parts_mut ( arr_ptr, avl_size) } ;
78
+
79
+ // A slab is built by allocating a 4KiB page, placing the slab data at
80
+ // the end, and dividing the rest into equal-size buffers:
81
+ //
82
+ // ------------------------------------------------------
83
+ // | buffer | buffer | buffer | buffer | slab header
84
+ // ------------------------------------------------------
85
+ // one page
86
+ let max = avl_size / self . size - 1 ;
87
+ let fact = self . size / 8 ;
88
+
89
+ for i in 0 ..max {
90
+ unsafe {
91
+ array[ i * fact] = array. as_ptr ( ) . add ( ( i + 1 ) * fact) as usize ;
64
92
}
93
+ }
94
+
95
+ array[ max * fact] = 0 ;
96
+ }
97
+
98
+ fn alloc ( & mut self ) -> * mut u8 {
99
+ if self . first_free == 0 {
100
+ self . init ( ) ;
101
+ }
102
+
103
+ let old_free = self . first_free as * mut usize ;
104
+
105
+ unsafe {
106
+ self . first_free = * old_free;
107
+ }
108
+
109
+ old_free as * mut u8
110
+ }
111
+
112
+ fn dealloc ( & mut self , ptr : * mut u8 ) {
113
+ if ptr == core:: ptr:: null_mut ( ) {
114
+ panic ! ( "dealloc: attempted to free a nullptr" )
115
+ }
116
+
117
+ let new_head = ptr as * mut usize ;
118
+
119
+ unsafe {
120
+ * new_head = self . first_free ;
121
+ }
122
+
123
+ self . first_free = new_head as usize ;
124
+ }
125
+ }
126
+
127
+ struct ProtectedAllocator {
128
+ slabs : [ Slab ; 10 ] ,
129
+ linked_list_heap : Heap ,
130
+ }
131
+
132
+ struct Allocator {
133
+ inner : Mutex < ProtectedAllocator > ,
134
+ }
65
135
66
- // Else we just have to extend the heap.
67
- let mut address_space = AddressSpace :: this ( ) ;
68
- let mut offset_table = address_space. offset_page_table ( ) ;
69
-
70
- let page_range = {
71
- let heap_start = VirtAddr :: new ( heap_top as _ ) ;
72
- let heap_end = heap_start + size - 1u64 ;
73
-
74
- let heap_start_page: Page = Page :: containing_address ( heap_start) ;
75
- let heap_end_page = Page :: containing_address ( heap_end) ;
76
-
77
- Page :: range_inclusive ( heap_start_page, heap_end_page)
78
- } ;
79
-
80
- for page in page_range {
81
- let frame = unsafe {
82
- FRAME_ALLOCATOR
83
- . allocate_frame ( )
84
- . expect ( "Failed to allocate frame to extend heap" )
85
- } ;
86
-
87
- unsafe {
88
- offset_table. map_to (
89
- page,
90
- frame,
91
- PageTableFlags :: PRESENT | PageTableFlags :: WRITABLE ,
92
- & mut FRAME_ALLOCATOR ,
93
- )
94
- }
95
- . expect ( "Failed to map frame to extend the heap" )
96
- . flush ( ) ;
136
+ impl Allocator {
137
+ const fn new ( ) -> Self {
138
+ Self {
139
+ inner : Mutex :: new ( ProtectedAllocator {
140
+ slabs : [
141
+ Slab :: new ( 8 ) ,
142
+ Slab :: new ( 16 ) ,
143
+ Slab :: new ( 24 ) ,
144
+ Slab :: new ( 32 ) ,
145
+ Slab :: new ( 48 ) ,
146
+ Slab :: new ( 64 ) ,
147
+ Slab :: new ( 128 ) ,
148
+ Slab :: new ( 256 ) ,
149
+ Slab :: new ( 512 ) ,
150
+ Slab :: new ( 1024 ) ,
151
+ ] ,
152
+
153
+ linked_list_heap : Heap :: empty ( ) ,
154
+ } ) ,
155
+ }
156
+ }
157
+
158
+ fn alloc ( & self , layout : Layout ) -> * mut u8 {
159
+ let mut inner = self . inner . lock_irq ( ) ;
160
+
161
+ let slab = inner
162
+ . slabs
163
+ . iter_mut ( )
164
+ . find ( |slab| slab. size >= layout. size ( ) ) ;
165
+
166
+ if let Some ( slab) = slab {
167
+ slab. alloc ( )
168
+ } else {
169
+ inner
170
+ . linked_list_heap
171
+ . allocate_first_fit ( layout)
172
+ . or_else ( |_| {
173
+ let heap_top = inner. linked_list_heap . top ( ) ;
174
+ let size = align_up ( layout. size ( ) as u64 , 0x1000 ) ;
175
+
176
+ // Check if our heap has not increased beyond the maximum allowed size.
177
+ if heap_top + size as usize > HEAP_END {
178
+ panic ! ( "the heap size has increased more then {:#x}" , HEAP_END )
179
+ }
180
+
181
+ // Else we just have to extend the heap.
182
+ let mut address_space = AddressSpace :: this ( ) ;
183
+ let mut offset_table = address_space. offset_page_table ( ) ;
184
+
185
+ let page_range = {
186
+ let heap_start = VirtAddr :: new ( heap_top as _ ) ;
187
+ let heap_end = heap_start + size - 1u64 ;
188
+
189
+ let heap_start_page: Page = Page :: containing_address ( heap_start) ;
190
+ let heap_end_page = Page :: containing_address ( heap_end) ;
191
+
192
+ Page :: range_inclusive ( heap_start_page, heap_end_page)
193
+ } ;
194
+
195
+ for page in page_range {
196
+ let frame = unsafe {
197
+ FRAME_ALLOCATOR
198
+ . allocate_frame ( )
199
+ . expect ( "Failed to allocate frame to extend heap" )
200
+ } ;
201
+
202
+ unsafe {
203
+ offset_table. map_to (
204
+ page,
205
+ frame,
206
+ PageTableFlags :: PRESENT | PageTableFlags :: WRITABLE ,
207
+ & mut FRAME_ALLOCATOR ,
208
+ )
209
+ }
210
+ . expect ( "Failed to map frame to extend the heap" )
211
+ . flush ( ) ;
212
+ }
213
+
214
+ unsafe {
215
+ inner. linked_list_heap . extend ( size as usize ) ; // Now extend the heap.
216
+ inner. linked_list_heap . allocate_first_fit ( layout) // And try again.
217
+ }
218
+ } )
219
+ . expect ( "alloc: memory exhausted" )
220
+ . as_ptr ( )
221
+ }
222
+ }
223
+
224
+ fn dealloc ( & self , ptr : * mut u8 , layout : Layout ) {
225
+ let mut inner = self . inner . lock_irq ( ) ;
226
+
227
+ let size = layout. size ( ) ;
228
+
229
+ if size >= Size4KiB :: SIZE as usize {
230
+ unsafe {
231
+ inner
232
+ . linked_list_heap
233
+ . deallocate ( NonNull :: new_unchecked ( ptr) , layout) ;
97
234
}
98
235
99
- heap. extend ( size) ; // Now extend the heap.
100
- heap. allocate_first_fit ( layout) // And try again.
101
- } )
236
+ return ;
237
+ }
238
+
239
+ let slab_header = ( ptr as usize & !( 0xfff ) ) as * mut SlabHeader ;
240
+
241
+ let slab_header = unsafe { & mut * slab_header } ;
242
+ let slab = unsafe { & mut * slab_header. ptr } ;
243
+
244
+ slab. dealloc ( ptr) ;
102
245
}
246
+ }
247
+
248
+ pub struct LockedHeap ( Allocator ) ;
103
249
104
- /// Initializes an empty heap.
105
- ///
106
- /// ## Safety
107
- /// This function should only be called once and the provided `start` address
108
- /// should be a valid address.
109
- unsafe fn init ( & self , start : usize , size : usize ) {
110
- self . 0 . lock ( ) . init ( start, size) ;
250
+ impl LockedHeap {
251
+ /// Creates a new uninitialized instance of the kernel
252
+ /// global allocator.
253
+ #[ inline]
254
+ pub const fn new_uninit ( ) -> Self {
255
+ Self ( Allocator :: new ( ) )
111
256
}
112
257
}
113
258
@@ -220,7 +365,10 @@ unsafe impl GlobalAlloc for LockedHeap {
220
365
// necessary and sufficient.
221
366
debug_assert ! ( layout. size( ) < usize :: MAX - ( layout. align( ) - 1 ) ) ;
222
367
223
- let ptr = self . allocate ( layout) . unwrap ( ) . as_ptr ( ) ;
368
+ // SAFETY: We we need to be careful to not cause a deadlock as the interrupt
369
+ // handlers utilize the heap and might interrupt an in-progress allocation. So, we
370
+ // lock the interrupts during the allocation.
371
+ let ptr = self . 0 . alloc ( layout) ;
224
372
225
373
#[ cfg( feature = "kmemleak" ) ]
226
374
kmemleak:: MEM_LEAK_CATCHER . track_caller ( ptr, layout) ;
@@ -235,9 +383,7 @@ unsafe impl GlobalAlloc for LockedHeap {
235
383
#[ cfg( feature = "kmemleak" ) ]
236
384
kmemleak:: MEM_LEAK_CATCHER . unref ( ptr) ;
237
385
238
- self . 0
239
- . lock_irq ( )
240
- . deallocate ( NonNull :: new_unchecked ( ptr) , layout)
386
+ self . 0 . dealloc ( ptr, layout)
241
387
}
242
388
}
243
389
@@ -251,29 +397,33 @@ fn alloc_error_handler(layout: alloc::Layout) -> ! {
251
397
}
252
398
253
399
/// Initialize the heap at the [HEAP_START].
254
- pub fn init_heap ( offset_table : & mut OffsetPageTable ) -> Result < ( ) , MapToError < Size4KiB > > {
255
- let frame: PhysFrame = unsafe {
256
- FRAME_ALLOCATOR
257
- . allocate_frame ( )
258
- . ok_or ( MapToError :: FrameAllocationFailed ) ?
259
- } ;
260
-
400
+ pub fn init_heap ( ) {
261
401
unsafe {
262
- offset_table. map_to (
263
- Page :: containing_address ( VirtAddr :: new ( HEAP_START as _ ) ) ,
264
- frame,
265
- PageTableFlags :: PRESENT | PageTableFlags :: WRITABLE ,
266
- & mut FRAME_ALLOCATOR ,
267
- )
268
- } ?
269
- . flush ( ) ;
402
+ let mut address_space = AddressSpace :: this ( ) ;
403
+ let mut offset_table = address_space. offset_page_table ( ) ;
270
404
271
- unsafe {
272
- AERO_SYSTEM_ALLOCATOR . init ( HEAP_START , 4096 ) ;
405
+ let frame: PhysFrame = FRAME_ALLOCATOR
406
+ . allocate_frame ( )
407
+ . expect ( "init_heap: failed to allocate frame for the linked list allocator" ) ;
408
+
409
+ offset_table
410
+ . map_to (
411
+ Page :: containing_address ( VirtAddr :: new ( HEAP_START as _ ) ) ,
412
+ frame,
413
+ PageTableFlags :: PRESENT | PageTableFlags :: WRITABLE ,
414
+ & mut FRAME_ALLOCATOR ,
415
+ )
416
+ . expect ( "init_heap: failed to initialize the heap" )
417
+ . flush ( ) ;
418
+
419
+ AERO_SYSTEM_ALLOCATOR
420
+ . 0
421
+ . inner
422
+ . lock_irq ( )
423
+ . linked_list_heap
424
+ . init ( HEAP_START , Size4KiB :: SIZE as usize ) ;
273
425
}
274
426
275
427
#[ cfg( feature = "kmemleak" ) ]
276
428
kmemleak:: MEM_LEAK_CATCHER . init ( ) ;
277
-
278
- Ok ( ( ) )
279
429
}
0 commit comments