Skip to content

Commit e63457c

Browse files
authored
Set SFT for MallocSpace (#476)
Set SFT entries for MallocSpace (closes #473), and use the newly added `is_mmtk_object` SFT method to correctly check whether an object in a MallocSpace chunk is allocated by `malloc`. Fix an issue that if the address is chunk aligned, the SFT entries may not be set properly (closes #374). The `update` method of SFT accepts the number of bytes as the size argument, rather than the number of chunks.
1 parent 983c0f0 commit e63457c

File tree

6 files changed

+67
-42
lines changed

6 files changed

+67
-42
lines changed

src/plan/marksweep/global.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,8 @@ impl<VM: VMBinding> Plan for MarkSweep<VM> {
6161
fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) {
6262
self.base().set_collection_kind::<Self>(self);
6363
self.base().set_gc_status(GcStatus::GcPrepare);
64+
self.common()
65+
.schedule_common::<MSProcessEdges<VM>>(&MS_CONSTRAINTS, scheduler);
6466
// Stop & scan mutators (mutator scanning can happen before STW)
6567
scheduler.work_buckets[WorkBucketStage::Unconstrained]
6668
.add(StopMutators::<MSProcessEdges<VM>>::new());

src/policy/lockfreeimmortalspace.rs

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
use crate::mmtk::SFT_MAP;
22
use crate::policy::space::{CommonSpace, Space, SFT};
33
use crate::util::address::Address;
4-
use crate::util::conversions::bytes_to_chunks_up;
54
use crate::util::heap::PageResource;
65

76
use crate::util::ObjectReference;
@@ -104,11 +103,7 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
104103
// TODO(Javad): handle meta space allocation failure
105104
panic!("failed to mmap meta memory");
106105
}
107-
SFT_MAP.update(
108-
self.as_sft(),
109-
AVAILABLE_START,
110-
bytes_to_chunks_up(total_bytes),
111-
);
106+
SFT_MAP.update(self.as_sft(), AVAILABLE_START, total_bytes);
112107
}
113108

114109
fn reserved_pages(&self) -> usize {

src/policy/mallocspace/global.rs

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,11 @@ impl<VM: VMBinding> SFT for MallocSpace<VM> {
7171
true
7272
}
7373

74+
// For malloc space, we need to further check the alloc bit.
75+
fn is_mmtk_object(&self, object: ObjectReference) -> bool {
76+
is_alloced_by_malloc(object)
77+
}
78+
7479
fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) {
7580
trace!("initialize_object_metadata for object {}", object);
7681
let page_addr = conversions::page_align_down(object.to_address());
@@ -204,6 +209,8 @@ impl<VM: VMBinding> MallocSpace<VM> {
204209
if !is_meta_space_mapped(address, actual_size) {
205210
// Map the metadata space for the associated chunk
206211
self.map_metadata_and_update_bound(address, actual_size);
212+
// Update SFT
213+
crate::mmtk::SFT_MAP.update(self, address, actual_size);
207214
}
208215
self.active_bytes.fetch_add(actual_size, Ordering::SeqCst);
209216

@@ -327,6 +334,14 @@ impl<VM: VMBinding> MallocSpace<VM> {
327334
}
328335
}
329336

337+
/// Clean up for an empty chunk
338+
fn clean_up_empty_chunk(&self, chunk_start: Address) {
339+
// Since the chunk mark metadata is a byte, we don't need synchronization
340+
unsafe { unset_chunk_mark_unsafe(chunk_start) };
341+
// Clear the SFT entry
342+
crate::mmtk::SFT_MAP.clear(chunk_start);
343+
}
344+
330345
/// This function is called when the mark bits sit on the side metadata.
331346
/// This has been optimized with the use of bulk loading and bulk zeroing of
332347
/// metadata.
@@ -495,8 +510,7 @@ impl<VM: VMBinding> MallocSpace<VM> {
495510
bzero_metadata(&mark_bit_spec, chunk_start, BYTES_IN_CHUNK);
496511

497512
if chunk_is_empty {
498-
// Since the chunk mark metadata is a byte, we don't need synchronization
499-
unsafe { unset_chunk_mark_unsafe(chunk_start) };
513+
self.clean_up_empty_chunk(chunk_start);
500514
}
501515

502516
debug!(
@@ -614,8 +628,7 @@ impl<VM: VMBinding> MallocSpace<VM> {
614628
}
615629

616630
if chunk_is_empty {
617-
// Since the chunk mark metadata is a byte, we don't need synchronization
618-
unsafe { unset_chunk_mark_unsafe(chunk_start) };
631+
self.clean_up_empty_chunk(chunk_start);
619632
}
620633

621634
debug!(

src/policy/mallocspace/mod.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,3 @@ mod global;
33
pub mod metadata;
44

55
pub use global::*;
6-
pub(crate) use metadata::is_alloced_by_malloc;

src/policy/space.rs

Lines changed: 46 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,14 @@ pub trait SFT {
6969
/// object - the sanity checker will fail if an object is not sane.
7070
#[cfg(feature = "sanity")]
7171
fn is_sane(&self) -> bool;
72+
/// Is the object managed by MMTk? For most cases, if we find the sft for an object, that means
73+
/// the object is in the space and managed by MMTk. However, for some spaces, like MallocSpace,
74+
/// we mark the entire chunk in the SFT table as a malloc space, but only some of the addresses
75+
/// in the space contain actual MMTk objects. So they need a further check.
76+
#[inline(always)]
77+
fn is_mmtk_object(&self, _object: ObjectReference) -> bool {
78+
true
79+
}
7280
/// Initialize object metadata (in the header, or in the side metadata).
7381
fn initialize_object_metadata(&self, object: ObjectReference, alloc: bool);
7482
}
@@ -106,6 +114,10 @@ impl SFT for EmptySpaceSFT {
106114
*/
107115
false
108116
}
117+
#[inline(always)]
118+
fn is_mmtk_object(&self, _object: ObjectReference) -> bool {
119+
false
120+
}
109121

110122
fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) {
111123
panic!(
@@ -153,19 +165,20 @@ impl<'a> SFTMap<'a> {
153165
res
154166
}
155167

156-
fn log_update(&self, space: &(dyn SFT + Sync + 'static), start: Address, chunks: usize) {
168+
fn log_update(&self, space: &(dyn SFT + Sync + 'static), start: Address, bytes: usize) {
169+
debug!(
170+
"Update SFT for [{}, {}) as {}",
171+
start,
172+
start + bytes,
173+
space.name()
174+
);
157175
let first = start.chunk_index();
158-
let end = start + (chunks << LOG_BYTES_IN_CHUNK);
159-
debug!("Update SFT for [{}, {}) as {}", start, end, space.name());
176+
let last = conversions::chunk_align_up(start + bytes).chunk_index();
160177
let start_chunk = chunk_index_to_address(first);
161-
let end_chunk = chunk_index_to_address(first + chunks);
178+
let end_chunk = chunk_index_to_address(last);
162179
debug!(
163-
"Update SFT for {} chunks of [{} #{}, {} #{})",
164-
chunks,
165-
start_chunk,
166-
first,
167-
end_chunk,
168-
first + chunks
180+
"Update SFT for {} bytes of [{} #{}, {} #{})",
181+
bytes, start_chunk, first, end_chunk, last
169182
);
170183
}
171184

@@ -188,13 +201,15 @@ impl<'a> SFTMap<'a> {
188201
}
189202

190203
/// Update SFT map for the given address range.
191-
/// It should be used in these cases: 1. when a space grows, 2. when initializing a contiguous space, 3. when ensure_mapped() is called on a space.
192-
pub fn update(&self, space: &(dyn SFT + Sync + 'static), start: Address, chunks: usize) {
204+
/// It should be used when we acquire new memory and use it as part of a space. For example, the cases include:
205+
/// 1. when a space grows, 2. when initializing a contiguous space, 3. when ensure_mapped() is called on a space.
206+
pub fn update(&self, space: &(dyn SFT + Sync + 'static), start: Address, bytes: usize) {
193207
if DEBUG_SFT {
194-
self.log_update(space, start, chunks);
208+
self.log_update(space, start, bytes);
195209
}
196210
let first = start.chunk_index();
197-
for chunk in first..(first + chunks) {
211+
let last = conversions::chunk_align_up(start + bytes).chunk_index();
212+
for chunk in first..last {
198213
self.set(chunk, space);
199214
}
200215
if DEBUG_SFT {
@@ -204,10 +219,18 @@ impl<'a> SFTMap<'a> {
204219

205220
// TODO: We should clear a SFT entry when a space releases a chunk.
206221
#[allow(dead_code)]
207-
pub fn clear(&self, chunk_idx: usize) {
222+
pub fn clear(&self, chunk_start: Address) {
223+
assert!(chunk_start.is_aligned_to(BYTES_IN_CHUNK));
224+
let chunk_idx = chunk_start.chunk_index();
208225
self.set(chunk_idx, &EMPTY_SPACE_SFT);
209226
}
210227

228+
// Currently only used by 32 bits vm map
229+
#[allow(dead_code)]
230+
pub fn clear_by_index(&self, chunk_idx: usize) {
231+
self.set(chunk_idx, &EMPTY_SPACE_SFT)
232+
}
233+
211234
fn set(&self, chunk: usize, sft: &(dyn SFT + Sync + 'static)) {
212235
/*
213236
* This is safe (only) because a) this is only called during the
@@ -227,7 +250,8 @@ impl<'a> SFTMap<'a> {
227250
// in which case, we still set SFT map again.
228251
debug_assert!(
229252
old == EMPTY_SFT_NAME || new == EMPTY_SFT_NAME || old == new,
230-
"attempt to overwrite a non-empty chunk in SFT map (from {} to {})",
253+
"attempt to overwrite a non-empty chunk {} in SFT map (from {} to {})",
254+
chunk,
231255
old,
232256
new
233257
);
@@ -236,16 +260,10 @@ impl<'a> SFTMap<'a> {
236260
}
237261

238262
pub fn is_in_space(&self, object: ObjectReference) -> bool {
239-
let not_in_space = object.to_address().chunk_index() >= self.sft.len()
240-
|| self.get(object.to_address()).name() == EMPTY_SPACE_SFT.name();
241-
242-
if not_in_space {
243-
// special case - we do not yet have SFT entries for malloc space
244-
use crate::policy::mallocspace::is_alloced_by_malloc;
245-
is_alloced_by_malloc(object)
246-
} else {
247-
true
263+
if object.to_address().chunk_index() >= self.sft.len() {
264+
return false;
248265
}
266+
self.get(object.to_address()).is_mmtk_object(object)
249267
}
250268
}
251269

@@ -361,8 +379,7 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
361379
// "should only grow space for new chunks at chunk-aligned start address"
362380
// );
363381
if new_chunk {
364-
let chunks = conversions::bytes_to_chunks_up(bytes);
365-
SFT_MAP.update(self.as_sft(), start, chunks);
382+
SFT_MAP.update(self.as_sft(), start, bytes);
366383
}
367384
}
368385

@@ -371,7 +388,6 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
371388
* mapped (e.g. for a vm image which is externally mmapped.)
372389
*/
373390
fn ensure_mapped(&self) {
374-
let chunks = conversions::bytes_to_chunks_up(self.common().extent);
375391
if self
376392
.common()
377393
.metadata
@@ -381,7 +397,7 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
381397
// TODO(Javad): handle meta space allocation failure
382398
panic!("failed to mmap meta memory");
383399
}
384-
SFT_MAP.update(self.as_sft(), self.common().start, chunks);
400+
SFT_MAP.update(self.as_sft(), self.common().start, self.common().extent);
385401
use crate::util::heap::layout::mmapper::Mmapper;
386402
self.common()
387403
.mmapper
@@ -602,7 +618,7 @@ impl<VM: VMBinding> CommonSpace<VM> {
602618
// TODO(Javad): handle meta space allocation failure
603619
panic!("failed to mmap meta memory");
604620
}
605-
SFT_MAP.update(space.as_sft(), self.start, bytes_to_chunks_up(self.extent));
621+
SFT_MAP.update(space.as_sft(), self.start, self.extent);
606622
}
607623
}
608624

src/util/heap/layout/map32.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ impl Map32 {
269269
self.next_link[chunk as usize] = 0;
270270
for offset in 0..chunks {
271271
self.descriptor_map[(chunk + offset) as usize] = SpaceDescriptor::UNINITIALIZED;
272-
SFT_MAP.clear((chunk + offset) as usize);
272+
SFT_MAP.clear_by_index((chunk + offset) as usize);
273273
// VM.barriers.objectArrayStoreNoGCBarrier(spaceMap, chunk + offset, null);
274274
}
275275
chunks as _

0 commit comments

Comments
 (0)