Skip to content

Commit 4ca3d2b

Browse files
authored
Fix a concurrency bug in mapping chunk side metadata for mallocspace (#404)
1 parent c4c2dcd commit 4ca3d2b

File tree

2 files changed

+21
-10
lines changed

2 files changed

+21
-10
lines changed

src/policy/mallocspace/global.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -201,6 +201,7 @@ impl<VM: VMBinding> MallocSpace<VM> {
201201

202202
if !address.is_zero() {
203203
let actual_size = unsafe { malloc_usable_size(raw) };
204+
// If the side metadata for the address has not yet been mapped, we will map all the side metadata for the address.
204205
if !is_meta_space_mapped(address) {
205206
let chunk_start = conversions::chunk_align_down(address);
206207
debug!(

src/policy/mallocspace/metadata.rs

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ lazy_static! {
2020
local: vec![],
2121
};
2222

23-
// lock to synchronize the mapping for the active chunk space
23+
// lock to synchronize the mapping of side metadata for a newly allocated chunk by malloc
2424
static ref CHUNK_MAP_LOCK: Mutex<()> = Mutex::new(());
2525
}
2626

@@ -110,27 +110,37 @@ fn map_active_chunk_metadata(chunk_start: Address) {
110110
// We map the active chunk metadata (if not previously mapped), as well as the alloc bit metadata
111111
// and active page metadata here
112112
pub fn map_meta_space_for_chunk(metadata: &SideMetadataContext, chunk_start: Address) {
113-
{
114-
// In order to prevent race conditions, we synchronize on the lock first and then
115-
// check if we need to map the active chunk metadata for `chunk_start`
116-
let _lock = CHUNK_MAP_LOCK.lock().unwrap();
117-
if !is_chunk_mapped(chunk_start) {
118-
map_active_chunk_metadata(chunk_start);
119-
}
113+
// In order to prevent race conditions, we synchronize on the lock first and then
114+
// check if we need to map the active chunk metadata for `chunk_start`
115+
let _lock = CHUNK_MAP_LOCK.lock().unwrap();
116+
117+
// Check if the chunk bit metadata is mapped. If it is not mapped, map it.
118+
// Note that the chunk bit metadata is global. It may have been mapped because other policy mapped it.
119+
if !is_chunk_mapped(chunk_start) {
120+
map_active_chunk_metadata(chunk_start);
120121
}
121122

123+
// If we have set the chunk bit, return. This is needed just in case another thread has done this before
124+
// we can acquire the lock.
122125
if is_chunk_marked(chunk_start) {
123126
return;
124127
}
125128

126-
set_chunk_mark(chunk_start);
129+
// Attempt to map the local metadata for the policy.
130+
// Note that this might fail. For example, we have marked a chunk as active but later we freed all
131+
// the objects in it, and unset its chunk bit. However, we do not free its metadata. So for the chunk,
132+
// its chunk bit is mapped, but not marked, and all its local metadata is also mapped.
127133
let mmap_metadata_result = metadata.try_map_metadata_space(chunk_start, BYTES_IN_CHUNK);
128-
trace!("set chunk mark bit for {}", chunk_start);
129134
debug_assert!(
130135
mmap_metadata_result.is_ok(),
131136
"mmap sidemetadata failed for chunk_start ({})",
132137
chunk_start
133138
);
139+
140+
// Set the chunk mark at the end. So if we have chunk mark set, we know we have mapped side metadata
141+
// for the chunk.
142+
trace!("set chunk mark bit for {}", chunk_start);
143+
set_chunk_mark(chunk_start);
134144
}
135145

136146
// Check if a given object was allocated by malloc

0 commit comments

Comments
 (0)