@@ -425,10 +425,7 @@ impl BootServices {
425
425
///
426
426
/// * [`uefi::Status::BUFFER_TOO_SMALL`]
427
427
/// * [`uefi::Status::INVALID_PARAMETER`]
428
- pub fn memory_map < ' buf > (
429
- & self ,
430
- buffer : & ' buf mut [ u8 ] ,
431
- ) -> Result < ( MemoryMapKey , MemoryMapIter < ' buf > ) > {
428
+ pub fn memory_map < ' buf > ( & self , buffer : & ' buf mut [ u8 ] ) -> Result < MemoryMap < ' buf > > {
432
429
let mut map_size = buffer. len ( ) ;
433
430
MemoryDescriptor :: assert_aligned ( buffer) ;
434
431
let map_buffer = buffer. as_mut_ptr ( ) . cast :: < MemoryDescriptor > ( ) ;
@@ -453,13 +450,13 @@ impl BootServices {
453
450
}
454
451
. into_with_val ( move || {
455
452
let len = map_size / entry_size;
456
- let iter = MemoryMapIter {
457
- buffer,
453
+
454
+ MemoryMap {
455
+ key : map_key,
456
+ buf : buffer,
458
457
entry_size,
459
- index : 0 ,
460
458
len,
461
- } ;
462
- ( map_key, iter)
459
+ }
463
460
} )
464
461
}
465
462
@@ -1993,6 +1990,108 @@ pub struct MemoryMapSize {
1993
1990
pub map_size : usize ,
1994
1991
}
1995
1992
1993
+ /// An iterator of [`MemoryDescriptor`] that is always associated with the
1994
+ /// unique [`MemoryMapKey`] contained in the struct.
1995
+ ///
1996
+ /// To iterate over the entries, call [`MemoryMap::entries`]. To get a sorted
1997
+ /// map, you manually have to call [`MemoryMap::sort`] first.
1998
+ pub struct MemoryMap < ' buf > {
1999
+ key : MemoryMapKey ,
2000
+ buf : & ' buf mut [ u8 ] ,
2001
+ entry_size : usize ,
2002
+ len : usize ,
2003
+ }
2004
+
2005
+ impl < ' buf > MemoryMap < ' buf > {
2006
+ #[ must_use]
2007
+ /// Returns the unique [`MemoryMapKey`] associated with the memory map.
2008
+ pub fn key ( & self ) -> MemoryMapKey {
2009
+ self . key
2010
+ }
2011
+
2012
+ /// Sorts the memory map by physical address in place.
2013
+ /// This operation is optional and should be invoked only once.
2014
+ pub fn sort ( & mut self ) {
2015
+ unsafe {
2016
+ self . qsort ( 0 , self . len - 1 ) ;
2017
+ }
2018
+ }
2019
+
2020
+ /// Hoare partition scheme for quicksort.
2021
+ /// Must be called with `low` and `high` being indices within bounds.
2022
+ unsafe fn qsort ( & mut self , low : usize , high : usize ) {
2023
+ if low >= high {
2024
+ return ;
2025
+ }
2026
+
2027
+ let p = self . partition ( low, high) ;
2028
+ self . qsort ( low, p) ;
2029
+ self . qsort ( p + 1 , high) ;
2030
+ }
2031
+
2032
+ unsafe fn partition ( & mut self , low : usize , high : usize ) -> usize {
2033
+ let pivot = self . get_element_phys_addr ( low + ( high - low) / 2 ) ;
2034
+
2035
+ let mut left_index = low. wrapping_sub ( 1 ) ;
2036
+ let mut right_index = high. wrapping_add ( 1 ) ;
2037
+
2038
+ loop {
2039
+ while {
2040
+ left_index = left_index. wrapping_add ( 1 ) ;
2041
+
2042
+ self . get_element_phys_addr ( left_index) < pivot
2043
+ } { }
2044
+
2045
+ while {
2046
+ right_index = right_index. wrapping_sub ( 1 ) ;
2047
+
2048
+ self . get_element_phys_addr ( right_index) > pivot
2049
+ } { }
2050
+
2051
+ if left_index >= right_index {
2052
+ return right_index;
2053
+ }
2054
+
2055
+ self . swap ( left_index, right_index) ;
2056
+ }
2057
+ }
2058
+
2059
+ /// Indices must be smaller than len.
2060
+ unsafe fn swap ( & mut self , index1 : usize , index2 : usize ) {
2061
+ if index1 == index2 {
2062
+ return ;
2063
+ }
2064
+
2065
+ let base = self . buf . as_mut_ptr ( ) ;
2066
+
2067
+ unsafe {
2068
+ ptr:: swap_nonoverlapping (
2069
+ base. add ( index1 * self . entry_size ) ,
2070
+ base. add ( index2 * self . entry_size ) ,
2071
+ self . entry_size ,
2072
+ ) ;
2073
+ }
2074
+ }
2075
+
2076
+ fn get_element_phys_addr ( & self , index : usize ) -> PhysicalAddress {
2077
+ let offset = index. checked_mul ( self . entry_size ) . unwrap ( ) ;
2078
+ let elem = unsafe { & * self . buf . as_ptr ( ) . add ( offset) . cast :: < MemoryDescriptor > ( ) } ;
2079
+ elem. phys_start
2080
+ }
2081
+
2082
+ /// Returns an iterator over the contained memory map. To get a sorted map,
2083
+ /// call [`MemoryMap::sort`] first.
2084
+ #[ must_use]
2085
+ pub fn entries ( & self ) -> MemoryMapIter {
2086
+ MemoryMapIter {
2087
+ buffer : self . buf ,
2088
+ entry_size : self . entry_size ,
2089
+ index : 0 ,
2090
+ len : self . len ,
2091
+ }
2092
+ }
2093
+ }
2094
+
1996
2095
/// An iterator of [`MemoryDescriptor`]. The underlying memory map is always
1997
2096
/// associated with a unique [`MemoryMapKey`].
1998
2097
#[ derive( Debug , Clone ) ]
@@ -2014,12 +2113,16 @@ impl<'buf> Iterator for MemoryMapIter<'buf> {
2014
2113
2015
2114
fn next ( & mut self ) -> Option < Self :: Item > {
2016
2115
if self . index < self . len {
2017
- let ptr = self . buffer . as_ptr ( ) as usize + self . entry_size * self . index ;
2116
+ let descriptor = unsafe {
2117
+ & * self
2118
+ . buffer
2119
+ . as_ptr ( )
2120
+ . add ( self . entry_size * self . index )
2121
+ . cast :: < MemoryDescriptor > ( )
2122
+ } ;
2018
2123
2019
2124
self . index += 1 ;
2020
2125
2021
- let descriptor = unsafe { & * ( ptr as * const MemoryDescriptor ) } ;
2022
-
2023
2126
Some ( descriptor)
2024
2127
} else {
2025
2128
None
@@ -2197,3 +2300,93 @@ pub enum InterfaceType: i32 => {
2197
2300
#[ derive( Debug , Clone , Copy ) ]
2198
2301
#[ repr( transparent) ]
2199
2302
pub struct ProtocolSearchKey ( NonNull < c_void > ) ;
2303
+
2304
+ #[ cfg( test) ]
2305
+ mod tests {
2306
+ use core:: mem:: size_of;
2307
+
2308
+ use crate :: table:: boot:: { MemoryAttribute , MemoryMap , MemoryMapKey , MemoryType } ;
2309
+
2310
+ use super :: { MemoryDescriptor , MemoryMapIter } ;
2311
+
2312
+ #[ test]
2313
+ fn mem_map_sorting ( ) {
2314
+ // Doesn't matter what type it is.
2315
+ const TY : MemoryType = MemoryType :: RESERVED ;
2316
+
2317
+ const BASE : MemoryDescriptor = MemoryDescriptor {
2318
+ ty : TY ,
2319
+ phys_start : 0 ,
2320
+ virt_start : 0 ,
2321
+ page_count : 0 ,
2322
+ att : MemoryAttribute :: empty ( ) ,
2323
+ } ;
2324
+
2325
+ let mut buffer = [
2326
+ MemoryDescriptor {
2327
+ phys_start : 2000 ,
2328
+ ..BASE
2329
+ } ,
2330
+ MemoryDescriptor {
2331
+ phys_start : 3000 ,
2332
+ ..BASE
2333
+ } ,
2334
+ BASE ,
2335
+ MemoryDescriptor {
2336
+ phys_start : 1000 ,
2337
+ ..BASE
2338
+ } ,
2339
+ ] ;
2340
+
2341
+ let desc_count = buffer. len ( ) ;
2342
+
2343
+ let byte_buffer = {
2344
+ let size = desc_count * size_of :: < MemoryDescriptor > ( ) ;
2345
+ unsafe { core:: slice:: from_raw_parts_mut ( buffer. as_mut_ptr ( ) as * mut u8 , size) }
2346
+ } ;
2347
+
2348
+ let mut mem_map = MemoryMap {
2349
+ // Key doesn't matter
2350
+ key : MemoryMapKey ( 0 ) ,
2351
+ len : desc_count,
2352
+ buf : byte_buffer,
2353
+ entry_size : size_of :: < MemoryDescriptor > ( ) ,
2354
+ } ;
2355
+
2356
+ mem_map. sort ( ) ;
2357
+
2358
+ if !is_sorted ( & mem_map. entries ( ) ) {
2359
+ panic ! ( "mem_map is not sorted: {}" , mem_map) ;
2360
+ }
2361
+ }
2362
+
2363
+ // Added for debug purposes on test failure
2364
+ impl core:: fmt:: Display for MemoryMap < ' _ > {
2365
+ fn fmt ( & self , f : & mut core:: fmt:: Formatter < ' _ > ) -> core:: fmt:: Result {
2366
+ writeln ! ( f) ?;
2367
+ for desc in self . entries ( ) {
2368
+ writeln ! ( f, "{:?}" , desc) ?;
2369
+ }
2370
+ Ok ( ( ) )
2371
+ }
2372
+ }
2373
+
2374
+ fn is_sorted ( iter : & MemoryMapIter ) -> bool {
2375
+ let mut iter = iter. clone ( ) ;
2376
+ let mut curr_start;
2377
+
2378
+ if let Some ( val) = iter. next ( ) {
2379
+ curr_start = val. phys_start ;
2380
+ } else {
2381
+ return true ;
2382
+ }
2383
+
2384
+ for desc in iter {
2385
+ if desc. phys_start <= curr_start {
2386
+ return false ;
2387
+ }
2388
+ curr_start = desc. phys_start
2389
+ }
2390
+ true
2391
+ }
2392
+ }
0 commit comments