@@ -21,93 +21,241 @@ use core::alloc;
21
21
use core:: alloc:: { GlobalAlloc , Layout } ;
22
22
use core:: ptr:: NonNull ;
23
23
24
- use crate :: utils:: sync:: Mutex ;
25
- use linked_list_allocator:: { align_up, Heap } ;
24
+ use linked_list_allocator:: Heap ;
26
25
27
- use crate :: mem :: paging :: * ;
26
+ use crate :: utils :: sync :: Mutex ;
28
27
use crate :: AERO_SYSTEM_ALLOCATOR ;
29
28
30
29
use super :: paging:: FRAME_ALLOCATOR ;
31
30
use super :: AddressSpace ;
31
+ use crate :: mem:: paging:: * ;
32
32
33
33
const HEAP_MAX_SIZE : usize = 128 * 1024 * 1024 ; // 128 GiB
34
34
const HEAP_START : usize = 0xfffff80000000000 ;
35
35
const HEAP_END : usize = HEAP_START + HEAP_MAX_SIZE ;
36
36
37
- pub struct LockedHeap ( Mutex < Heap > ) ;
37
+ #[ repr( C ) ]
38
+ struct SlabHeader {
39
+ ptr : * mut Slab ,
40
+ }
38
41
39
- impl LockedHeap {
40
- /// Creates a new uninitialized instance of the kernel
41
- /// global allocator.
42
- #[ inline]
43
- pub const fn new_uninit ( ) -> Self {
44
- Self ( Mutex :: new ( Heap :: empty ( ) ) )
42
+ /// The slab is the primary unit of currency in the slab allocator.
43
+ ///
44
+ /// A slab consists of one or more pages of virtually contiguous memory carved up into equal-size
45
+ /// chunks, with a reference count indicating how many of those chunks have been allocated.
46
+ struct Slab {
47
+ size : usize ,
48
+ first_free : usize ,
49
+ }
50
+
51
+ impl Slab {
52
+ const fn new ( size : usize ) -> Self {
53
+ Self {
54
+ size,
55
+ first_free : 0 ,
56
+ }
45
57
}
46
58
47
- /// Allocate memory as described by the given `layout`.
48
- ///
49
- /// Returns a pointer to newly-allocated memory, or null to indicate
50
- /// allocation failure.
51
- unsafe fn allocate ( & self , layout : alloc:: Layout ) -> Result < NonNull < u8 > , ( ) > {
52
- // SAFETY: We we need to be careful to not cause a deadlock as the interrupt
53
- // handlers utilize the heap and might interrupt an in-progress allocation. So, we
54
- // lock the interrupts during the allocation.
55
- let mut heap = self . 0 . lock_irq ( ) ;
59
+ fn init ( & mut self ) {
60
+ unsafe {
61
+ let frame: PhysFrame < Size4KiB > = FRAME_ALLOCATOR
62
+ . allocate_frame ( )
63
+ . expect ( "slab_init: failed to allocate frame" ) ;
64
+
65
+ self . first_free = frame. start_address ( ) . as_u64 ( ) as usize ;
66
+ self . first_free += crate :: PHYSICAL_MEMORY_OFFSET . as_u64 ( ) as usize ;
67
+ }
68
+
69
+ let hdr_size = core:: mem:: size_of :: < SlabHeader > ( ) as u64 ;
70
+ let aligned_hdr_size = align_up ( hdr_size, self . size as u64 ) as usize ;
56
71
57
- heap. allocate_first_fit ( layout) . or_else ( |_| {
58
- let heap_top = heap. top ( ) ;
59
- let size = align_up ( layout. size ( ) , 0x1000 ) ;
72
+ let avl_size = Size4KiB :: SIZE as usize - aligned_hdr_size;
60
73
61
- // Check if our heap has not increased beyond the maximum allowed size.
62
- if heap_top + size > HEAP_END {
63
- panic ! ( "The heap size has increased more then {:#x}" , HEAP_END )
74
+ let slab_ptr = unsafe { & mut * ( self . first_free as * mut SlabHeader ) } ;
75
+ slab_ptr. ptr = self as * mut Slab ;
76
+
77
+ self . first_free += aligned_hdr_size;
78
+
79
+ let arr_ptr = self . first_free as * mut usize ;
80
+ let array = unsafe { core:: slice:: from_raw_parts_mut ( arr_ptr, avl_size) } ;
81
+
82
+ // A slab is built by allocating a 4KiB page, placing the slab data at
83
+ // the end, and dividing the rest into equal-size buffers:
84
+ //
85
+ // ------------------------------------------------------
86
+ // | buffer | buffer | buffer | buffer | slab header
87
+ // ------------------------------------------------------
88
+ // one page
89
+ let max = avl_size / self . size - 1 ;
90
+ let fact = self . size / 8 ;
91
+
92
+ for i in 0 ..max {
93
+ unsafe {
94
+ array[ i * fact] = array. as_ptr ( ) . add ( ( i + 1 ) * fact) as usize ;
64
95
}
96
+ }
97
+
98
+ array[ max * fact] = 0 ;
99
+ }
100
+
101
+ fn alloc ( & mut self ) -> * mut u8 {
102
+ if self . first_free == 0 {
103
+ self . init ( ) ;
104
+ }
105
+
106
+ let old_free = self . first_free as * mut usize ;
107
+
108
+ unsafe {
109
+ self . first_free = * old_free;
110
+ }
111
+
112
+ old_free as * mut u8
113
+ }
114
+
115
+ fn dealloc ( & mut self , ptr : * mut u8 ) {
116
+ if ptr == core:: ptr:: null_mut ( ) {
117
+ panic ! ( "dealloc: attempted to free a nullptr" )
118
+ }
119
+
120
+ let new_head = ptr as * mut usize ;
121
+
122
+ unsafe {
123
+ * new_head = self . first_free ;
124
+ }
125
+
126
+ self . first_free = new_head as usize ;
127
+ }
128
+ }
129
+
130
+ struct ProtectedAllocator {
131
+ slabs : [ Slab ; 10 ] ,
132
+ linked_list_heap : Heap ,
133
+ }
134
+
135
+ struct Allocator {
136
+ inner : Mutex < ProtectedAllocator > ,
137
+ }
65
138
66
- // Else we just have to extend the heap.
67
- let mut address_space = AddressSpace :: this ( ) ;
68
- let mut offset_table = address_space. offset_page_table ( ) ;
69
-
70
- let page_range = {
71
- let heap_start = VirtAddr :: new ( heap_top as _ ) ;
72
- let heap_end = heap_start + size - 1u64 ;
73
-
74
- let heap_start_page: Page = Page :: containing_address ( heap_start) ;
75
- let heap_end_page = Page :: containing_address ( heap_end) ;
76
-
77
- Page :: range_inclusive ( heap_start_page, heap_end_page)
78
- } ;
79
-
80
- for page in page_range {
81
- let frame = unsafe {
82
- FRAME_ALLOCATOR
83
- . allocate_frame ( )
84
- . expect ( "Failed to allocate frame to extend heap" )
85
- } ;
86
-
87
- unsafe {
88
- offset_table. map_to (
89
- page,
90
- frame,
91
- PageTableFlags :: PRESENT | PageTableFlags :: WRITABLE ,
92
- & mut FRAME_ALLOCATOR ,
93
- )
94
- }
95
- . expect ( "Failed to map frame to extend the heap" )
96
- . flush ( ) ;
139
+ impl Allocator {
140
+ const fn new ( ) -> Self {
141
+ Self {
142
+ inner : Mutex :: new ( ProtectedAllocator {
143
+ slabs : [
144
+ Slab :: new ( 8 ) ,
145
+ Slab :: new ( 16 ) ,
146
+ Slab :: new ( 24 ) ,
147
+ Slab :: new ( 32 ) ,
148
+ Slab :: new ( 48 ) ,
149
+ Slab :: new ( 64 ) ,
150
+ Slab :: new ( 128 ) ,
151
+ Slab :: new ( 256 ) ,
152
+ Slab :: new ( 512 ) ,
153
+ Slab :: new ( 1024 ) ,
154
+ ] ,
155
+
156
+ linked_list_heap : Heap :: empty ( ) ,
157
+ } ) ,
158
+ }
159
+ }
160
+
161
+ fn alloc ( & self , layout : Layout ) -> * mut u8 {
162
+ let mut inner = self . inner . lock_irq ( ) ;
163
+
164
+ let slab = inner
165
+ . slabs
166
+ . iter_mut ( )
167
+ . find ( |slab| slab. size >= layout. size ( ) ) ;
168
+
169
+ if let Some ( slab) = slab {
170
+ slab. alloc ( )
171
+ } else {
172
+ inner
173
+ . linked_list_heap
174
+ . allocate_first_fit ( layout)
175
+ . or_else ( |_| {
176
+ let heap_top = inner. linked_list_heap . top ( ) ;
177
+ let size = align_up ( layout. size ( ) as u64 , 0x1000 ) ;
178
+
179
+ // Check if our heap has not increased beyond the maximum allowed size.
180
+ if heap_top + size as usize > HEAP_END {
181
+ panic ! ( "the heap size has increased more then {:#x}" , HEAP_END )
182
+ }
183
+
184
+ // Else we just have to extend the heap.
185
+ let mut address_space = AddressSpace :: this ( ) ;
186
+ let mut offset_table = address_space. offset_page_table ( ) ;
187
+
188
+ let page_range = {
189
+ let heap_start = VirtAddr :: new ( heap_top as _ ) ;
190
+ let heap_end = heap_start + size - 1u64 ;
191
+
192
+ let heap_start_page: Page = Page :: containing_address ( heap_start) ;
193
+ let heap_end_page = Page :: containing_address ( heap_end) ;
194
+
195
+ Page :: range_inclusive ( heap_start_page, heap_end_page)
196
+ } ;
197
+
198
+ for page in page_range {
199
+ let frame = unsafe {
200
+ FRAME_ALLOCATOR
201
+ . allocate_frame ( )
202
+ . expect ( "Failed to allocate frame to extend heap" )
203
+ } ;
204
+
205
+ unsafe {
206
+ offset_table. map_to (
207
+ page,
208
+ frame,
209
+ PageTableFlags :: PRESENT | PageTableFlags :: WRITABLE ,
210
+ & mut FRAME_ALLOCATOR ,
211
+ )
212
+ }
213
+ . expect ( "Failed to map frame to extend the heap" )
214
+ . flush ( ) ;
215
+ }
216
+
217
+ unsafe {
218
+ inner. linked_list_heap . extend ( size as usize ) ; // Now extend the heap.
219
+ inner. linked_list_heap . allocate_first_fit ( layout) // And try again.
220
+ }
221
+ } )
222
+ . expect ( "alloc: memory exhausted" )
223
+ . as_ptr ( )
224
+ }
225
+ }
226
+
227
+ fn dealloc ( & self , ptr : * mut u8 , layout : Layout ) {
228
+ let mut inner = self . inner . lock_irq ( ) ;
229
+
230
+ let size = layout. size ( ) ;
231
+
232
+ if size >= Size4KiB :: SIZE as usize {
233
+ unsafe {
234
+ inner
235
+ . linked_list_heap
236
+ . deallocate ( NonNull :: new_unchecked ( ptr) , layout) ;
97
237
}
98
238
99
- heap. extend ( size) ; // Now extend the heap.
100
- heap. allocate_first_fit ( layout) // And try again.
101
- } )
239
+ return ;
240
+ }
241
+
242
+ let slab_header = ( ptr as usize & !( 0xfff ) ) as * mut SlabHeader ;
243
+
244
+ let slab_header = unsafe { & mut * slab_header } ;
245
+ let slab = unsafe { & mut * slab_header. ptr } ;
246
+
247
+ slab. dealloc ( ptr) ;
102
248
}
249
+ }
250
+
251
+ pub struct LockedHeap ( Allocator ) ;
103
252
104
- /// Initializes an empty heap.
105
- ///
106
- /// ## Safety
107
- /// This function should only be called once and the provided `start` address
108
- /// should be a valid address.
109
- unsafe fn init ( & self , start : usize , size : usize ) {
110
- self . 0 . lock ( ) . init ( start, size) ;
253
+ impl LockedHeap {
254
+ /// Creates a new uninitialized instance of the kernel
255
+ /// global allocator.
256
+ #[ inline]
257
+ pub const fn new_uninit ( ) -> Self {
258
+ Self ( Allocator :: new ( ) )
111
259
}
112
260
}
113
261
@@ -220,7 +368,10 @@ unsafe impl GlobalAlloc for LockedHeap {
220
368
// necessary and sufficient.
221
369
debug_assert ! ( layout. size( ) < usize :: MAX - ( layout. align( ) - 1 ) ) ;
222
370
223
- let ptr = self . allocate ( layout) . unwrap ( ) . as_ptr ( ) ;
371
+ // SAFETY: We we need to be careful to not cause a deadlock as the interrupt
372
+ // handlers utilize the heap and might interrupt an in-progress allocation. So, we
373
+ // lock the interrupts during the allocation.
374
+ let ptr = self . 0 . alloc ( layout) ;
224
375
225
376
#[ cfg( feature = "kmemleak" ) ]
226
377
kmemleak:: MEM_LEAK_CATCHER . track_caller ( ptr, layout) ;
@@ -235,9 +386,7 @@ unsafe impl GlobalAlloc for LockedHeap {
235
386
#[ cfg( feature = "kmemleak" ) ]
236
387
kmemleak:: MEM_LEAK_CATCHER . unref ( ptr) ;
237
388
238
- self . 0
239
- . lock_irq ( )
240
- . deallocate ( NonNull :: new_unchecked ( ptr) , layout)
389
+ self . 0 . dealloc ( ptr, layout)
241
390
}
242
391
}
243
392
@@ -251,29 +400,33 @@ fn alloc_error_handler(layout: alloc::Layout) -> ! {
251
400
}
252
401
253
402
/// Initialize the heap at the [HEAP_START].
254
- pub fn init_heap ( offset_table : & mut OffsetPageTable ) -> Result < ( ) , MapToError < Size4KiB > > {
255
- let frame: PhysFrame = unsafe {
256
- FRAME_ALLOCATOR
257
- . allocate_frame ( )
258
- . ok_or ( MapToError :: FrameAllocationFailed ) ?
259
- } ;
260
-
403
+ pub fn init_heap ( ) {
261
404
unsafe {
262
- offset_table. map_to (
263
- Page :: containing_address ( VirtAddr :: new ( HEAP_START as _ ) ) ,
264
- frame,
265
- PageTableFlags :: PRESENT | PageTableFlags :: WRITABLE ,
266
- & mut FRAME_ALLOCATOR ,
267
- )
268
- } ?
269
- . flush ( ) ;
405
+ let mut address_space = AddressSpace :: this ( ) ;
406
+ let mut offset_table = address_space. offset_page_table ( ) ;
270
407
271
- unsafe {
272
- AERO_SYSTEM_ALLOCATOR . init ( HEAP_START , 4096 ) ;
408
+ let frame: PhysFrame = FRAME_ALLOCATOR
409
+ . allocate_frame ( )
410
+ . expect ( "init_heap: failed to allocate frame for the linked list allocator" ) ;
411
+
412
+ offset_table
413
+ . map_to (
414
+ Page :: containing_address ( VirtAddr :: new ( HEAP_START as _ ) ) ,
415
+ frame,
416
+ PageTableFlags :: PRESENT | PageTableFlags :: WRITABLE ,
417
+ & mut FRAME_ALLOCATOR ,
418
+ )
419
+ . expect ( "init_heap: failed to initialize the heap" )
420
+ . flush ( ) ;
421
+
422
+ AERO_SYSTEM_ALLOCATOR
423
+ . 0
424
+ . inner
425
+ . lock_irq ( )
426
+ . linked_list_heap
427
+ . init ( HEAP_START , Size4KiB :: SIZE as usize ) ;
273
428
}
274
429
275
430
#[ cfg( feature = "kmemleak" ) ]
276
431
kmemleak:: MEM_LEAK_CATCHER . init ( ) ;
277
-
278
- Ok ( ( ) )
279
432
}
0 commit comments