Skip to content

Commit cd2fe83

Browse files
authored
Annotate mmap ranges using PR_SET_VMA (#1236)
We demand that every invocation of `mmap` within mmtk-core to be accompanied with an "annotation" for the purpose of the mmap. On Linux, we will use `PR_SET_VMA_ANON_NAME` to set the attribute after `mmap` so that it can be seen in `/proc/pid/maps`. This will greatly improve the debugging experience.
1 parent 5bc6ce5 commit cd2fe83

File tree

19 files changed

+467
-143
lines changed

19 files changed

+467
-143
lines changed

Cargo.toml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,13 @@ count_live_bytes_in_gc = []
179179
# capture the type names of work packets.
180180
bpftrace_workaround = []
181181

182+
# Disable mmap annotations.
183+
# All invocations of `mmap` in mmtk-core are accompanied by calls of `prctl` with
184+
# `PR_SET_VMA_ANON_NAME` to annotate the mmap ranges with human-readable names. It is enabled by
185+
# default and should work fine even with large heap sizes. However, if this is causing problems,
186+
# users can disable such annotations by enabling this Cargo feature.
187+
no_mmap_annotation = []
188+
182189
# Do not modify the following line - ci-common.sh matches it
183190
# -- Mutally exclusive features --
184191
# Only one feature from each group can be provided. Otherwise build will fail.

docs/userguide/src/migration/prefix.md

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,22 @@ Notes for the mmtk-core developers:
3030

3131
<!-- Insert new versions here -->
3232

33+
## 0.30.0
34+
35+
### mmap-related functions require annotation
36+
37+
```admonish tldr
38+
Memory-mapping functions in `mmtk::util::memory` now take an additional `MmapAnnotation` argument.
39+
```
40+
41+
API changes:
42+
43+
- module `util::memory`
44+
+ The following functions take an additional `MmapAnnotation` argument.
45+
* `dzmmap`
46+
* `dzmmap_noreplace`
47+
* `mmap_noreserve`
48+
3349
## 0.28.0
3450

3551
### `handle_user_collection_request` returns `bool`

src/policy/lockfreeimmortalspace.rs

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ use crate::util::heap::gc_trigger::GCTrigger;
1313
use crate::util::heap::layout::vm_layout::vm_layout;
1414
use crate::util::heap::PageResource;
1515
use crate::util::heap::VMRequest;
16+
use crate::util::memory::MmapAnnotation;
1617
use crate::util::memory::MmapStrategy;
1718
use crate::util::metadata::side_metadata::SideMetadataContext;
1819
use crate::util::metadata::side_metadata::SideMetadataSanity;
@@ -241,15 +242,22 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
241242
*args.options.transparent_hugepages,
242243
crate::util::memory::MmapProtection::ReadWrite,
243244
);
244-
crate::util::memory::dzmmap_noreplace(start, aligned_total_bytes, strategy).unwrap();
245-
if space
245+
crate::util::memory::dzmmap_noreplace(
246+
start,
247+
aligned_total_bytes,
248+
strategy,
249+
&MmapAnnotation::Space {
250+
name: space.get_name(),
251+
},
252+
)
253+
.unwrap();
254+
space
246255
.metadata
247-
.try_map_metadata_space(start, aligned_total_bytes)
248-
.is_err()
249-
{
250-
// TODO(Javad): handle meta space allocation failure
251-
panic!("failed to mmap meta memory");
252-
}
256+
.try_map_metadata_space(start, aligned_total_bytes, space.get_name())
257+
.unwrap_or_else(|e| {
258+
// TODO(Javad): handle meta space allocation failure
259+
panic!("failed to mmap meta memory: {e}")
260+
});
253261

254262
space
255263
}

src/policy/marksweepspace/malloc_ms/global.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -439,7 +439,7 @@ impl<VM: VMBinding> MallocSpace<VM> {
439439

440440
fn map_metadata_and_update_bound(&self, addr: Address, size: usize) {
441441
// Map the metadata space for the range [addr, addr + size)
442-
map_meta_space(&self.metadata, addr, size);
442+
map_meta_space(&self.metadata, addr, size, self.get_name());
443443

444444
// Update the bounds of the max and min chunk addresses seen -- this is used later in the sweep
445445
// Lockless compare-and-swap loops perform better than a locking variant

src/policy/marksweepspace/malloc_ms/metadata.rs

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ fn is_meta_space_mapped_for_address(address: Address) -> bool {
7676
}
7777

7878
/// Eagerly map the active chunk metadata surrounding `chunk_start`
79-
fn map_active_chunk_metadata(chunk_start: Address) {
79+
fn map_active_chunk_metadata(chunk_start: Address, space_name: &str) {
8080
debug_assert!(chunk_start.is_aligned_to(BYTES_IN_CHUNK));
8181
// We eagerly map 16Gb worth of space for the chunk mark bytes on 64-bits
8282
// We require saturating subtractions in order to not overflow the chunk_start by
@@ -99,16 +99,20 @@ fn map_active_chunk_metadata(chunk_start: Address) {
9999
chunk_start + (size / 2)
100100
);
101101

102-
assert!(
103-
CHUNK_METADATA.try_map_metadata_space(start, size).is_ok(),
104-
"failed to mmap meta memory"
105-
);
102+
CHUNK_METADATA
103+
.try_map_metadata_space(start, size, space_name)
104+
.unwrap_or_else(|e| panic!("failed to mmap meta memory: {e}"));
106105
}
107106

108107
/// We map the active chunk metadata (if not previously mapped), as well as the VO bit metadata
109108
/// and active page metadata here. Note that if [addr, addr + size) crosses multiple chunks, we
110109
/// will map for each chunk.
111-
pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size: usize) {
110+
pub(super) fn map_meta_space(
111+
metadata: &SideMetadataContext,
112+
addr: Address,
113+
size: usize,
114+
space_name: &str,
115+
) {
112116
// In order to prevent race conditions, we synchronize on the lock first and then
113117
// check if we need to map the active chunk metadata for `chunk_start`
114118
let _lock = CHUNK_MAP_LOCK.lock().unwrap();
@@ -118,7 +122,7 @@ pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size
118122
// Check if the chunk bit metadata is mapped. If it is not mapped, map it.
119123
// Note that the chunk bit metadata is global. It may have been mapped because other policy mapped it.
120124
if !is_chunk_mapped(start) {
121-
map_active_chunk_metadata(start);
125+
map_active_chunk_metadata(start, space_name);
122126
}
123127

124128
// If we have set the chunk bit, return. This is needed just in case another thread has done this before
@@ -131,7 +135,8 @@ pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size
131135
// Note that this might fail. For example, we have marked a chunk as active but later we freed all
132136
// the objects in it, and unset its chunk bit. However, we do not free its metadata. So for the chunk,
133137
// its chunk bit is mapped, but not marked, and all its local metadata is also mapped.
134-
let mmap_metadata_result = metadata.try_map_metadata_space(start, BYTES_IN_CHUNK);
138+
let mmap_metadata_result =
139+
metadata.try_map_metadata_space(start, BYTES_IN_CHUNK, space_name);
135140
debug_assert!(
136141
mmap_metadata_result.is_ok(),
137142
"mmap sidemetadata failed for chunk_start ({})",

src/policy/sft_map.rs

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -378,9 +378,11 @@ mod dense_chunk_map {
378378
global: vec![SFT_DENSE_CHUNK_MAP_INDEX],
379379
local: vec![],
380380
};
381-
if context.try_map_metadata_space(start, bytes).is_err() {
382-
panic!("failed to mmap metadata memory");
383-
}
381+
context
382+
.try_map_metadata_space(start, bytes, "SFTDenseChunkMap")
383+
.unwrap_or_else(|e| {
384+
panic!("failed to mmap metadata memory: {e}");
385+
});
384386

385387
self.update(space, start, bytes);
386388
}

src/policy/space.rs

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -144,12 +144,19 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
144144
if let Err(mmap_error) = self
145145
.common()
146146
.mmapper
147-
.ensure_mapped(res.start, res.pages, self.common().mmap_strategy())
148-
.and(
149-
self.common()
150-
.metadata
151-
.try_map_metadata_space(res.start, bytes),
147+
.ensure_mapped(
148+
res.start,
149+
res.pages,
150+
self.common().mmap_strategy(),
151+
&memory::MmapAnnotation::Space {
152+
name: self.get_name(),
153+
},
152154
)
155+
.and(self.common().metadata.try_map_metadata_space(
156+
res.start,
157+
bytes,
158+
self.get_name(),
159+
))
153160
{
154161
memory::handle_mmap_error::<VM>(mmap_error, tls, res.start, bytes);
155162
}
@@ -293,15 +300,13 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
293300
/// Ensure this space is marked as mapped -- used when the space is already
294301
/// mapped (e.g. for a vm image which is externally mmapped.)
295302
fn ensure_mapped(&self) {
296-
if self
297-
.common()
303+
self.common()
298304
.metadata
299-
.try_map_metadata_space(self.common().start, self.common().extent)
300-
.is_err()
301-
{
302-
// TODO(Javad): handle meta space allocation failure
303-
panic!("failed to mmap meta memory");
304-
}
305+
.try_map_metadata_space(self.common().start, self.common().extent, self.get_name())
306+
.unwrap_or_else(|e| {
307+
// TODO(Javad): handle meta space allocation failure
308+
panic!("failed to mmap meta memory: {e}");
309+
});
305310

306311
self.common()
307312
.mmapper
@@ -609,14 +614,12 @@ impl<VM: VMBinding> CommonSpace<VM> {
609614
}
610615

611616
// For contiguous space, we know its address range so we reserve metadata memory for its range.
612-
if rtn
613-
.metadata
614-
.try_map_metadata_address_range(rtn.start, rtn.extent)
615-
.is_err()
616-
{
617-
// TODO(Javad): handle meta space allocation failure
618-
panic!("failed to mmap meta memory");
619-
}
617+
rtn.metadata
618+
.try_map_metadata_address_range(rtn.start, rtn.extent, rtn.name)
619+
.unwrap_or_else(|e| {
620+
// TODO(Javad): handle meta space allocation failure
621+
panic!("failed to mmap meta memory: {e}");
622+
});
620623

621624
debug!(
622625
"Created space {} [{}, {}) for {} bytes",

src/policy/vmspace.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ impl<VM: VMBinding> VMSpace<VM> {
229229
// Map side metadata
230230
self.common
231231
.metadata
232-
.try_map_metadata_space(chunk_start, chunk_size)
232+
.try_map_metadata_space(chunk_start, chunk_size, self.get_name())
233233
.unwrap();
234234
// Insert to vm map: it would be good if we can make VM map aware of the region. However, the region may be outside what we can map in our VM map implementation.
235235
// self.common.vm_map.insert(chunk_start, chunk_size, self.common.descriptor);

src/util/heap/layout/byte_map_mmapper.rs

Lines changed: 35 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
use super::mmapper::MapState;
22
use super::Mmapper;
3+
use crate::util::memory::MmapAnnotation;
34
use crate::util::Address;
45

56
use crate::util::constants::*;
@@ -44,7 +45,13 @@ impl Mmapper for ByteMapMmapper {
4445
}
4546
}
4647

47-
fn ensure_mapped(&self, start: Address, pages: usize, strategy: MmapStrategy) -> Result<()> {
48+
fn ensure_mapped(
49+
&self,
50+
start: Address,
51+
pages: usize,
52+
strategy: MmapStrategy,
53+
anno: &MmapAnnotation,
54+
) -> Result<()> {
4855
let start_chunk = Self::address_to_mmap_chunks_down(start);
4956
let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages));
5057
trace!(
@@ -62,7 +69,8 @@ impl Mmapper for ByteMapMmapper {
6269

6370
let mmap_start = Self::mmap_chunks_to_address(chunk);
6471
let _guard = self.lock.lock().unwrap();
65-
MapState::transition_to_mapped(&self.mapped[chunk], mmap_start, strategy).unwrap();
72+
MapState::transition_to_mapped(&self.mapped[chunk], mmap_start, strategy, anno)
73+
.unwrap();
6674
}
6775

6876
Ok(())
@@ -73,6 +81,7 @@ impl Mmapper for ByteMapMmapper {
7381
start: Address,
7482
pages: usize,
7583
strategy: MmapStrategy,
84+
anno: &MmapAnnotation,
7685
) -> Result<()> {
7786
let start_chunk = Self::address_to_mmap_chunks_down(start);
7887
let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages));
@@ -91,7 +100,8 @@ impl Mmapper for ByteMapMmapper {
91100

92101
let mmap_start = Self::mmap_chunks_to_address(chunk);
93102
let _guard = self.lock.lock().unwrap();
94-
MapState::transition_to_quarantined(&self.mapped[chunk], mmap_start, strategy).unwrap();
103+
MapState::transition_to_quarantined(&self.mapped[chunk], mmap_start, strategy, anno)
104+
.unwrap();
95105
}
96106

97107
Ok(())
@@ -172,6 +182,7 @@ impl Default for ByteMapMmapper {
172182
#[cfg(test)]
173183
mod tests {
174184
use super::ByteMapMmapper;
185+
use crate::mmap_anno_test;
175186
use crate::util::heap::layout::Mmapper;
176187
use crate::util::Address;
177188

@@ -237,7 +248,7 @@ mod tests {
237248
|| {
238249
let mmapper = ByteMapMmapper::new();
239250
mmapper
240-
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST)
251+
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
241252
.unwrap();
242253

243254
for chunk in start_chunk..end_chunk {
@@ -266,7 +277,7 @@ mod tests {
266277
|| {
267278
let mmapper = ByteMapMmapper::new();
268279
mmapper
269-
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST)
280+
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
270281
.unwrap();
271282

272283
for chunk in start_chunk..end_chunk {
@@ -295,7 +306,7 @@ mod tests {
295306
|| {
296307
let mmapper = ByteMapMmapper::new();
297308
mmapper
298-
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST)
309+
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
299310
.unwrap();
300311

301312
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
@@ -329,7 +340,12 @@ mod tests {
329340
// map 2 chunks
330341
let mmapper = ByteMapMmapper::new();
331342
mmapper
332-
.ensure_mapped(FIXED_ADDRESS, test_memory_pages, MmapStrategy::TEST)
343+
.ensure_mapped(
344+
FIXED_ADDRESS,
345+
test_memory_pages,
346+
MmapStrategy::TEST,
347+
mmap_anno_test!(),
348+
)
333349
.unwrap();
334350

335351
// protect 1 chunk
@@ -364,7 +380,12 @@ mod tests {
364380
// map 2 chunks
365381
let mmapper = ByteMapMmapper::new();
366382
mmapper
367-
.ensure_mapped(FIXED_ADDRESS, test_memory_pages, MmapStrategy::TEST)
383+
.ensure_mapped(
384+
FIXED_ADDRESS,
385+
test_memory_pages,
386+
MmapStrategy::TEST,
387+
mmap_anno_test!(),
388+
)
368389
.unwrap();
369390

370391
// protect 1 chunk
@@ -382,7 +403,12 @@ mod tests {
382403

383404
// ensure mapped - this will unprotect the previously protected chunk
384405
mmapper
385-
.ensure_mapped(FIXED_ADDRESS, protect_memory_pages_2, MmapStrategy::TEST)
406+
.ensure_mapped(
407+
FIXED_ADDRESS,
408+
protect_memory_pages_2,
409+
MmapStrategy::TEST,
410+
mmap_anno_test!(),
411+
)
386412
.unwrap();
387413
assert_eq!(
388414
mmapper.mapped[chunk].load(Ordering::Relaxed),

0 commit comments

Comments
 (0)