1
1
//! This module is responsible for managing the absolute addresses that allocations are located at,
2
2
//! and for casting between pointers and integers based on those addresses.
3
3
4
+ mod reuse_pool;
5
+
4
6
use std:: cell:: RefCell ;
5
7
use std:: cmp:: max;
6
8
use std:: collections:: hash_map:: Entry ;
@@ -9,9 +11,10 @@ use rand::Rng;
9
11
10
12
use rustc_data_structures:: fx:: { FxHashMap , FxHashSet } ;
11
13
use rustc_span:: Span ;
12
- use rustc_target:: abi:: { HasDataLayout , Size } ;
14
+ use rustc_target:: abi:: { Align , HasDataLayout , Size } ;
13
15
14
16
use crate :: * ;
17
+ use reuse_pool:: ReusePool ;
15
18
16
19
#[ derive( Copy , Clone , Debug , PartialEq , Eq ) ]
17
20
pub enum ProvenanceMode {
@@ -26,7 +29,7 @@ pub enum ProvenanceMode {
26
29
27
30
pub type GlobalState = RefCell < GlobalStateInner > ;
28
31
29
- #[ derive( Clone , Debug ) ]
32
+ #[ derive( Debug ) ]
30
33
pub struct GlobalStateInner {
31
34
/// This is used as a map between the address of each allocation and its `AllocId`. It is always
32
35
/// sorted by address. We cannot use a `HashMap` since we can be given an address that is offset
@@ -38,6 +41,8 @@ pub struct GlobalStateInner {
38
41
/// they do not have an `AllocExtra`.
39
42
/// This is the inverse of `int_to_ptr_map`.
40
43
base_addr : FxHashMap < AllocId , u64 > ,
44
+ /// A pool of addresses we can reuse for future allocations.
45
+ reuse : ReusePool ,
41
46
/// Whether an allocation has been exposed or not. This cannot be put
42
47
/// into `AllocExtra` for the same reason as `base_addr`.
43
48
exposed : FxHashSet < AllocId > ,
@@ -53,6 +58,7 @@ impl VisitProvenance for GlobalStateInner {
53
58
let GlobalStateInner {
54
59
int_to_ptr_map : _,
55
60
base_addr : _,
61
+ reuse : _,
56
62
exposed : _,
57
63
next_base_addr : _,
58
64
provenance_mode : _,
@@ -71,6 +77,7 @@ impl GlobalStateInner {
71
77
GlobalStateInner {
72
78
int_to_ptr_map : Vec :: default ( ) ,
73
79
base_addr : FxHashMap :: default ( ) ,
80
+ reuse : ReusePool :: new ( ) ,
74
81
exposed : FxHashSet :: default ( ) ,
75
82
next_base_addr : stack_addr,
76
83
provenance_mode : config. provenance_mode ,
@@ -142,6 +149,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
142
149
Ok ( match global_state. base_addr . entry ( alloc_id) {
143
150
Entry :: Occupied ( entry) => * entry. get ( ) ,
144
151
Entry :: Vacant ( entry) => {
152
+ let mut rng = ecx. machine . rng . borrow_mut ( ) ;
145
153
let ( size, align, kind) = ecx. get_alloc_info ( alloc_id) ;
146
154
// This is either called immediately after allocation (and then cached), or when
147
155
// adjusting `tcx` pointers (which never get freed). So assert that we are looking
@@ -150,44 +158,63 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
150
158
// information was removed.
151
159
assert ! ( !matches!( kind, AllocKind :: Dead ) ) ;
152
160
153
- // This allocation does not have a base address yet, pick one.
154
- // Leave some space to the previous allocation, to give it some chance to be less aligned.
155
- let slack = {
156
- let mut rng = ecx. machine . rng . borrow_mut ( ) ;
157
- // This means that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
158
- rng. gen_range ( 0 ..16 )
161
+ // This allocation does not have a base address yet, pick or reuse one.
162
+ let base_addr = if let Some ( reuse_addr) =
163
+ global_state. reuse . take_addr ( & mut * rng, size, align)
164
+ {
165
+ reuse_addr
166
+ } else {
167
+ // We have to pick a fresh address.
168
+ // Leave some space to the previous allocation, to give it some chance to be less aligned.
169
+ // We ensure that `(global_state.next_base_addr + slack) % 16` is uniformly distributed.
170
+ let slack = rng. gen_range ( 0 ..16 ) ;
171
+ // From next_base_addr + slack, round up to adjust for alignment.
172
+ let base_addr = global_state
173
+ . next_base_addr
174
+ . checked_add ( slack)
175
+ . ok_or_else ( || err_exhaust ! ( AddressSpaceFull ) ) ?;
176
+ let base_addr = align_addr ( base_addr, align. bytes ( ) ) ;
177
+
178
+ // Remember next base address. If this allocation is zero-sized, leave a gap
179
+ // of at least 1 to avoid two allocations having the same base address.
180
+ // (The logic in `alloc_id_from_addr` assumes unique addresses, and different
181
+ // function/vtable pointers need to be distinguishable!)
182
+ global_state. next_base_addr = base_addr
183
+ . checked_add ( max ( size. bytes ( ) , 1 ) )
184
+ . ok_or_else ( || err_exhaust ! ( AddressSpaceFull ) ) ?;
185
+ // Even if `Size` didn't overflow, we might still have filled up the address space.
186
+ if global_state. next_base_addr > ecx. target_usize_max ( ) {
187
+ throw_exhaust ! ( AddressSpaceFull ) ;
188
+ }
189
+
190
+ base_addr
159
191
} ;
160
- // From next_base_addr + slack, round up to adjust for alignment.
161
- let base_addr = global_state
162
- . next_base_addr
163
- . checked_add ( slack)
164
- . ok_or_else ( || err_exhaust ! ( AddressSpaceFull ) ) ?;
165
- let base_addr = align_addr ( base_addr, align. bytes ( ) ) ;
166
- entry. insert ( base_addr) ;
167
192
trace ! (
168
- "Assigning base address {:#x} to allocation {:?} (size: {}, align: {}, slack: {} )" ,
193
+ "Assigning base address {:#x} to allocation {:?} (size: {}, align: {})" ,
169
194
base_addr,
170
195
alloc_id,
171
196
size. bytes( ) ,
172
197
align. bytes( ) ,
173
- slack,
174
198
) ;
175
199
176
- // Remember next base address. If this allocation is zero-sized, leave a gap
177
- // of at least 1 to avoid two allocations having the same base address.
178
- // (The logic in `alloc_id_from_addr` assumes unique addresses, and different
179
- // function/vtable pointers need to be distinguishable!)
180
- global_state. next_base_addr = base_addr
181
- . checked_add ( max ( size. bytes ( ) , 1 ) )
182
- . ok_or_else ( || err_exhaust ! ( AddressSpaceFull ) ) ?;
183
- // Even if `Size` didn't overflow, we might still have filled up the address space.
184
- if global_state. next_base_addr > ecx. target_usize_max ( ) {
185
- throw_exhaust ! ( AddressSpaceFull ) ;
186
- }
187
- // Also maintain the opposite mapping in `int_to_ptr_map`.
188
- // Given that `next_base_addr` increases in each allocation, pushing the
189
- // corresponding tuple keeps `int_to_ptr_map` sorted
190
- global_state. int_to_ptr_map . push ( ( base_addr, alloc_id) ) ;
200
+ // Store address in cache.
201
+ entry. insert ( base_addr) ;
202
+
203
+ // Also maintain the opposite mapping in `int_to_ptr_map`, ensuring we keep it sorted.
204
+ // We have a fast-path for the common case that this address is bigger than all previous ones.
205
+ let pos = if global_state
206
+ . int_to_ptr_map
207
+ . last ( )
208
+ . is_some_and ( |( last_addr, _) | * last_addr < base_addr)
209
+ {
210
+ global_state. int_to_ptr_map . len ( )
211
+ } else {
212
+ global_state
213
+ . int_to_ptr_map
214
+ . binary_search_by_key ( & base_addr, |( addr, _) | * addr)
215
+ . unwrap_err ( )
216
+ } ;
217
+ global_state. int_to_ptr_map . insert ( pos, ( base_addr, alloc_id) ) ;
191
218
192
219
base_addr
193
220
}
@@ -302,7 +329,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
302
329
}
303
330
304
331
impl GlobalStateInner {
305
- pub fn free_alloc_id ( & mut self , dead_id : AllocId ) {
332
+ pub fn free_alloc_id (
333
+ & mut self ,
334
+ rng : & mut impl Rng ,
335
+ dead_id : AllocId ,
336
+ size : Size ,
337
+ align : Align ,
338
+ ) {
306
339
// We can *not* remove this from `base_addr`, since the interpreter design requires that we
307
340
// be able to retrieve an AllocId + offset for any memory access *before* we check if the
308
341
// access is valid. Specifically, `ptr_get_alloc` is called on each attempt at a memory
@@ -322,6 +355,8 @@ impl GlobalStateInner {
322
355
// We can also remove it from `exposed`, since this allocation can anyway not be returned by
323
356
// `alloc_id_from_addr` any more.
324
357
self . exposed . remove ( & dead_id) ;
358
+ // Also remember this address for future reuse.
359
+ self . reuse . add_addr ( rng, addr, size, align)
325
360
}
326
361
}
327
362
0 commit comments