@@ -69,6 +69,14 @@ pub trait SFT {
69
69
/// object - the sanity checker will fail if an object is not sane.
70
70
#[ cfg( feature = "sanity" ) ]
71
71
fn is_sane ( & self ) -> bool ;
72
+ /// Is the object managed by MMTk? For most cases, if we find the sft for an object, that means
73
+ /// the object is in the space and managed by MMTk. However, for some spaces, like MallocSpace,
74
+ /// we mark the entire chunk in the SFT table as a malloc space, but only some of the addresses
75
+ /// in the space contain actual MMTk objects. So they need a further check.
76
+ #[ inline( always) ]
77
+ fn is_mmtk_object ( & self , _object : ObjectReference ) -> bool {
78
+ true
79
+ }
72
80
/// Initialize object metadata (in the header, or in the side metadata).
73
81
fn initialize_object_metadata ( & self , object : ObjectReference , alloc : bool ) ;
74
82
}
@@ -106,6 +114,10 @@ impl SFT for EmptySpaceSFT {
106
114
*/
107
115
false
108
116
}
117
+ #[ inline( always) ]
118
+ fn is_mmtk_object ( & self , _object : ObjectReference ) -> bool {
119
+ false
120
+ }
109
121
110
122
fn initialize_object_metadata ( & self , object : ObjectReference , _alloc : bool ) {
111
123
panic ! (
@@ -153,19 +165,20 @@ impl<'a> SFTMap<'a> {
153
165
res
154
166
}
155
167
156
- fn log_update ( & self , space : & ( dyn SFT + Sync + ' static ) , start : Address , chunks : usize ) {
168
+ fn log_update ( & self , space : & ( dyn SFT + Sync + ' static ) , start : Address , bytes : usize ) {
169
+ debug ! (
170
+ "Update SFT for [{}, {}) as {}" ,
171
+ start,
172
+ start + bytes,
173
+ space. name( )
174
+ ) ;
157
175
let first = start. chunk_index ( ) ;
158
- let end = start + ( chunks << LOG_BYTES_IN_CHUNK ) ;
159
- debug ! ( "Update SFT for [{}, {}) as {}" , start, end, space. name( ) ) ;
176
+ let last = conversions:: chunk_align_up ( start + bytes) . chunk_index ( ) ;
160
177
let start_chunk = chunk_index_to_address ( first) ;
161
- let end_chunk = chunk_index_to_address ( first + chunks ) ;
178
+ let end_chunk = chunk_index_to_address ( last ) ;
162
179
debug ! (
163
- "Update SFT for {} chunks of [{} #{}, {} #{})" ,
164
- chunks,
165
- start_chunk,
166
- first,
167
- end_chunk,
168
- first + chunks
180
+ "Update SFT for {} bytes of [{} #{}, {} #{})" ,
181
+ bytes, start_chunk, first, end_chunk, last
169
182
) ;
170
183
}
171
184
@@ -188,13 +201,15 @@ impl<'a> SFTMap<'a> {
188
201
}
189
202
190
203
/// Update SFT map for the given address range.
191
- /// It should be used in these cases: 1. when a space grows, 2. when initializing a contiguous space, 3. when ensure_mapped() is called on a space.
192
- pub fn update ( & self , space : & ( dyn SFT + Sync + ' static ) , start : Address , chunks : usize ) {
204
+ /// It should be used when we acquire new memory and use it as part of a space. For example, the cases include:
205
+ /// 1. when a space grows, 2. when initializing a contiguous space, 3. when ensure_mapped() is called on a space.
206
+ pub fn update ( & self , space : & ( dyn SFT + Sync + ' static ) , start : Address , bytes : usize ) {
193
207
if DEBUG_SFT {
194
- self . log_update ( space, start, chunks ) ;
208
+ self . log_update ( space, start, bytes ) ;
195
209
}
196
210
let first = start. chunk_index ( ) ;
197
- for chunk in first..( first + chunks) {
211
+ let last = conversions:: chunk_align_up ( start + bytes) . chunk_index ( ) ;
212
+ for chunk in first..last {
198
213
self . set ( chunk, space) ;
199
214
}
200
215
if DEBUG_SFT {
@@ -204,10 +219,18 @@ impl<'a> SFTMap<'a> {
204
219
205
220
// TODO: We should clear a SFT entry when a space releases a chunk.
206
221
#[ allow( dead_code) ]
207
- pub fn clear ( & self , chunk_idx : usize ) {
222
+ pub fn clear ( & self , chunk_start : Address ) {
223
+ assert ! ( chunk_start. is_aligned_to( BYTES_IN_CHUNK ) ) ;
224
+ let chunk_idx = chunk_start. chunk_index ( ) ;
208
225
self . set ( chunk_idx, & EMPTY_SPACE_SFT ) ;
209
226
}
210
227
228
+ // Currently only used by 32 bits vm map
229
+ #[ allow( dead_code) ]
230
+ pub fn clear_by_index ( & self , chunk_idx : usize ) {
231
+ self . set ( chunk_idx, & EMPTY_SPACE_SFT )
232
+ }
233
+
211
234
fn set ( & self , chunk : usize , sft : & ( dyn SFT + Sync + ' static ) ) {
212
235
/*
213
236
* This is safe (only) because a) this is only called during the
@@ -227,7 +250,8 @@ impl<'a> SFTMap<'a> {
227
250
// in which case, we still set SFT map again.
228
251
debug_assert ! (
229
252
old == EMPTY_SFT_NAME || new == EMPTY_SFT_NAME || old == new,
230
- "attempt to overwrite a non-empty chunk in SFT map (from {} to {})" ,
253
+ "attempt to overwrite a non-empty chunk {} in SFT map (from {} to {})" ,
254
+ chunk,
231
255
old,
232
256
new
233
257
) ;
@@ -236,16 +260,10 @@ impl<'a> SFTMap<'a> {
236
260
}
237
261
238
262
pub fn is_in_space ( & self , object : ObjectReference ) -> bool {
239
- let not_in_space = object. to_address ( ) . chunk_index ( ) >= self . sft . len ( )
240
- || self . get ( object. to_address ( ) ) . name ( ) == EMPTY_SPACE_SFT . name ( ) ;
241
-
242
- if not_in_space {
243
- // special case - we do not yet have SFT entries for malloc space
244
- use crate :: policy:: mallocspace:: is_alloced_by_malloc;
245
- is_alloced_by_malloc ( object)
246
- } else {
247
- true
263
+ if object. to_address ( ) . chunk_index ( ) >= self . sft . len ( ) {
264
+ return false ;
248
265
}
266
+ self . get ( object. to_address ( ) ) . is_mmtk_object ( object)
249
267
}
250
268
}
251
269
@@ -361,8 +379,7 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
361
379
// "should only grow space for new chunks at chunk-aligned start address"
362
380
// );
363
381
if new_chunk {
364
- let chunks = conversions:: bytes_to_chunks_up ( bytes) ;
365
- SFT_MAP . update ( self . as_sft ( ) , start, chunks) ;
382
+ SFT_MAP . update ( self . as_sft ( ) , start, bytes) ;
366
383
}
367
384
}
368
385
@@ -371,7 +388,6 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
371
388
* mapped (e.g. for a vm image which is externally mmapped.)
372
389
*/
373
390
fn ensure_mapped ( & self ) {
374
- let chunks = conversions:: bytes_to_chunks_up ( self . common ( ) . extent ) ;
375
391
if self
376
392
. common ( )
377
393
. metadata
@@ -381,7 +397,7 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
381
397
// TODO(Javad): handle meta space allocation failure
382
398
panic ! ( "failed to mmap meta memory" ) ;
383
399
}
384
- SFT_MAP . update ( self . as_sft ( ) , self . common ( ) . start , chunks ) ;
400
+ SFT_MAP . update ( self . as_sft ( ) , self . common ( ) . start , self . common ( ) . extent ) ;
385
401
use crate :: util:: heap:: layout:: mmapper:: Mmapper ;
386
402
self . common ( )
387
403
. mmapper
@@ -602,7 +618,7 @@ impl<VM: VMBinding> CommonSpace<VM> {
602
618
// TODO(Javad): handle meta space allocation failure
603
619
panic ! ( "failed to mmap meta memory" ) ;
604
620
}
605
- SFT_MAP . update ( space. as_sft ( ) , self . start , bytes_to_chunks_up ( self . extent ) ) ;
621
+ SFT_MAP . update ( space. as_sft ( ) , self . start , self . extent ) ;
606
622
}
607
623
}
608
624
0 commit comments