Skip to content

8352292: Implement NMT tags using hashtables #24142

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/hotspot/cpu/aarch64/compressedKlass_aarch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ static char* reserve_at_eor_compatible_address(size_t size, bool aslr) {
const uint64_t immediate = ((uint64_t)immediates[index]) << 32;
assert(immediate > 0 && Assembler::operand_valid_for_logical_immediate(/*is32*/false, immediate),
"Invalid immediate %d " UINT64_FORMAT, index, immediate);
result = os::attempt_reserve_memory_at((char*)immediate, size, false);
result = os::attempt_reserve_memory_at((char*)immediate, size, mtNone, false);
if (result == nullptr) {
log_trace(metaspace, map)("Failed to attach at " UINT64_FORMAT_X, immediate);
}
Expand Down Expand Up @@ -114,7 +114,7 @@ char* CompressedKlassPointers::reserve_address_space_for_compressed_classes(size
if (result == nullptr) {
constexpr size_t alignment = nth_bit(32);
log_debug(metaspace, map)("Trying to reserve at a 32-bit-aligned address");
result = os::reserve_memory_aligned(size, alignment, false);
result = os::reserve_memory_aligned(size, alignment, mtMetaspace, false);
}

return result;
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity)
_initialized(false) {

// Reserve address space for backing memory
_base = (uintptr_t)os::reserve_memory(max_capacity, false, mtJavaHeap);
_base = (uintptr_t)os::reserve_memory(max_capacity, mtJavaHeap, false);
if (_base == 0) {
// Failed
ZInitialize::error("Failed to reserve address space for backing memory");
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/os/posix/os_posix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -491,9 +491,9 @@ static char* chop_extra_memory(size_t size, size_t alignment, char* extra_base,
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
// rather than unmapping and remapping the whole chunk to get requested alignment.
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
char* os::reserve_memory_aligned(size_t size, size_t alignment, MemTag mem_tag, bool exec) {
size_t extra_size = calculate_aligned_extra_size(size, alignment);
char* extra_base = os::reserve_memory(extra_size, exec);
char* extra_base = os::reserve_memory(extra_size, mem_tag, exec);
if (extra_base == nullptr) {
return nullptr;
}
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/os/posix/perfMemory_posix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ static char* backing_store_file_name = nullptr; // name of the backing store
static char* create_standard_memory(size_t size) {

// allocate an aligned chuck of memory
char* mapAddress = os::reserve_memory(size);
char* mapAddress = os::reserve_memory(size, mtInternal);

if (mapAddress == nullptr) {
return nullptr;
Expand Down
14 changes: 7 additions & 7 deletions src/hotspot/os/windows/os_windows.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3028,7 +3028,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
PAGE_READWRITE);
// If reservation failed, return null
if (p_buf == nullptr) return nullptr;
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC, mtNone);
os::release_memory(p_buf, bytes + chunk_size);

// we still need to round up to a page boundary (in case we are using large pages)
Expand Down Expand Up @@ -3089,7 +3089,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
// need to create a dummy 'reserve' record to match
// the release.
MemTracker::record_virtual_memory_reserve((address)p_buf,
bytes_to_release, CALLER_PC);
bytes_to_release, CALLER_PC, mtNone);
os::release_memory(p_buf, bytes_to_release);
}
#ifdef ASSERT
Expand All @@ -3107,9 +3107,9 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
// Although the memory is allocated individually, it is returned as one.
// NMT records it as one block.
if ((flags & MEM_COMMIT) != 0) {
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC, mtNone);
} else {
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC, mtNone);
}

// made it this far, success
Expand Down Expand Up @@ -3249,7 +3249,7 @@ char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, in
// Multiple threads can race in this code but it's not possible to unmap small sections of
// virtual space to get requested alignment, like posix-like os's.
// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc, MemTag mem_tag = mtNone) {
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc, MemTag mem_tag) {
assert(is_aligned(alignment, os::vm_allocation_granularity()),
"Alignment must be a multiple of allocation granularity (page size)");
assert(is_aligned(size, os::vm_allocation_granularity()),
Expand Down Expand Up @@ -3291,7 +3291,7 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi

char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
// exec can be ignored
return map_or_reserve_memory_aligned(size, alignment, -1 /* file_desc */);
return map_or_reserve_memory_aligned(size, alignment, -1 /* file_desc */, mtNone);
}

char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MemTag mem_tag) {
Expand Down Expand Up @@ -5196,7 +5196,7 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
}

// Record virtual memory allocation
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC, mtNone);

DWORD bytes_read;
OVERLAPPED overlapped;
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/cds/archiveBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,8 @@ address ArchiveBuilder::reserve_buffer() {
size_t buffer_size = LP64_ONLY(CompressedClassSpaceSize) NOT_LP64(256 * M);
ReservedSpace rs = MemoryReserver::reserve(buffer_size,
MetaspaceShared::core_region_alignment(),
os::vm_page_size());
os::vm_page_size(),
mtClassShared);
if (!rs.is_reserved()) {
log_error(cds)("Failed to reserve %zu bytes of output buffer.", buffer_size);
MetaspaceShared::unrecoverable_writing_error();
Expand Down
10 changes: 5 additions & 5 deletions src/hotspot/share/cds/filemap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1066,10 +1066,10 @@ void FileMapInfo::close() {
*/
static char* map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
bool allow_exec, MemTag mem_tag = mtNone) {
bool allow_exec, MemTag mem_tag) {
char* mem = os::map_memory(fd, file_name, file_offset, addr, bytes,
AlwaysPreTouch ? false : read_only,
allow_exec, mem_tag);
mem_tag, AlwaysPreTouch ? false : read_only,
allow_exec);
if (mem != nullptr && AlwaysPreTouch) {
os::pretouch_memory(mem, mem + bytes);
}
Expand All @@ -1094,7 +1094,7 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
assert(WINDOWS_ONLY(false) NOT_WINDOWS(true), "Don't call on Windows");
// Replace old mapping with new one that is writable.
char *base = os::map_memory(_fd, _full_path, r->file_offset(),
addr, size, false /* !read_only */,
addr, size, mtNone, false /* !read_only */,
r->allow_exec());
close();
// These have to be errors because the shared region is now unmapped.
Expand Down Expand Up @@ -1620,7 +1620,7 @@ bool FileMapInfo::map_heap_region_impl() {
} else {
base = map_memory(_fd, _full_path, r->file_offset(),
addr, _mapped_heap_memregion.byte_size(), r->read_only(),
r->allow_exec());
r->allow_exec(), mtJavaHeap);
if (base == nullptr || base != addr) {
dealloc_heap_region();
log_info(cds)("UseSharedSpaces: Unable to map at required address in java heap. "
Expand Down
12 changes: 8 additions & 4 deletions src/hotspot/share/cds/metaspaceShared.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1478,7 +1478,8 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
archive_space_rs = MemoryReserver::reserve((char*)base_address,
archive_space_size,
archive_space_alignment,
os::vm_page_size());
os::vm_page_size(),
mtMetaspace);
if (archive_space_rs.is_reserved()) {
assert(base_address == nullptr ||
(address)archive_space_rs.base() == base_address, "Sanity");
Expand Down Expand Up @@ -1546,11 +1547,13 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
archive_space_rs = MemoryReserver::reserve((char*)base_address,
archive_space_size,
archive_space_alignment,
os::vm_page_size());
os::vm_page_size(),
mtClassShared);
class_space_rs = MemoryReserver::reserve((char*)ccs_base,
class_space_size,
class_space_alignment,
os::vm_page_size());
os::vm_page_size(),
mtClass);
}
if (!archive_space_rs.is_reserved() || !class_space_rs.is_reserved()) {
release_reserved_spaces(total_space_rs, archive_space_rs, class_space_rs);
Expand All @@ -1564,7 +1567,8 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
total_space_rs = MemoryReserver::reserve((char*) base_address,
total_range_size,
base_address_alignment,
os::vm_page_size());
os::vm_page_size(),
mtClassShared);
} else {
// We did not manage to reserve at the preferred address, or were instructed to relocate. In that
// case we reserve wherever possible, but the start address needs to be encodable as narrow Klass
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/classfile/compactHashtable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ HashtableTextDump::HashtableTextDump(const char* filename) : _fd(-1) {
if (_fd < 0) {
quit("Unable to open hashtable dump file", filename);
}
_base = os::map_memory(_fd, filename, 0, nullptr, _size, true, false);
_base = os::map_memory(_fd, filename, 0, nullptr, _size, mtSymbol, true, false);
if (_base == nullptr) {
quit("Unable to map hashtable dump file", filename);
}
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1230,7 +1230,8 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* des
// Allocate a new reserved space, preferring to use large pages.
ReservedSpace rs = MemoryReserver::reserve(size,
alignment,
preferred_page_size);
preferred_page_size,
mtGC);

size_t page_size = rs.page_size();
G1RegionToSpaceMapper* result =
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/parallel/parMarkBitMap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@ ParMarkBitMap::initialize(MemRegion covered_region)

ReservedSpace rs = MemoryReserver::reserve(_reserved_byte_size,
rs_align,
page_sz);
page_sz,
mtNone);

if (!rs.is_reserved()) {
// Failed to reserve memory for the bitmap,
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/parallel/psParallelCompact.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,8 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)

ReservedSpace rs = MemoryReserver::reserve(_reserved_byte_size,
rs_align,
page_sz);
page_sz,
mtGC);

if (!rs.is_reserved()) {
// Failed to reserve memory.
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/shared/cardTable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ void CardTable::initialize(void* region0_start, void* region1_start) {
HeapWord* high_bound = _whole_heap.end();

const size_t rs_align = MAX2(_page_size, os::vm_allocation_granularity());
ReservedSpace rs = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size);
ReservedSpace rs = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size, mtGC);

if (!rs.is_reserved()) {
vm_exit_during_initialization("Could not reserve enough space for the "
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/share/gc/shenandoah/shenandoahCardTable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ void ShenandoahCardTable::initialize() {
// ReservedSpace constructor would assert rs_align >= os::vm_page_size().
const size_t rs_align = MAX2(_page_size, granularity);

ReservedSpace write_space = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size);
ReservedSpace write_space = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size, mtGC);
initialize(write_space);

// The assembler store_check code will do an unsigned shift of the oop,
Expand All @@ -59,7 +59,7 @@ void ShenandoahCardTable::initialize() {
_write_byte_map = _byte_map;
_write_byte_map_base = _byte_map_base;

ReservedSpace read_space = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size);
ReservedSpace read_space = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size, mtGCCardSet);
initialize(read_space);

_read_byte_map = (CardValue*) read_space.base();
Expand Down
6 changes: 3 additions & 3 deletions src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ static ReservedSpace reserve(size_t size, size_t preferred_page_size) {
size = align_up(size, alignment);
}

const ReservedSpace reserved = MemoryReserver::reserve(size, alignment, preferred_page_size);
const ReservedSpace reserved = MemoryReserver::reserve(size, alignment, preferred_page_size, mtGC);
if (!reserved.is_reserved()) {
vm_exit_during_initialization("Could not reserve space");
}
Expand Down Expand Up @@ -380,7 +380,7 @@ jint ShenandoahHeap::initialize() {
for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
char* req_addr = (char*)addr;
assert(is_aligned(req_addr, cset_align), "Should be aligned");
cset_rs = MemoryReserver::reserve(req_addr, cset_size, cset_align, cset_page_size);
cset_rs = MemoryReserver::reserve(req_addr, cset_size, cset_align, cset_page_size, mtGC);
if (cset_rs.is_reserved()) {
assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
Expand All @@ -389,7 +389,7 @@ jint ShenandoahHeap::initialize() {
}

if (_collection_set == nullptr) {
cset_rs = MemoryReserver::reserve(cset_size, cset_align, os::vm_page_size());
cset_rs = MemoryReserver::reserve(cset_size, cset_align, os::vm_page_size(), mtGC);
if (!cset_rs.is_reserved()) {
vm_exit_during_initialization("Cannot reserve memory for collection set");
}
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,8 @@ bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes)
assert(is_aligned(reservation_size_request_bytes, os::vm_allocation_granularity()), "invariant");
_rs = MemoryReserver::reserve(reservation_size_request_bytes,
os::vm_allocation_granularity(),
os::vm_page_size());
os::vm_page_size(),
mtTracing);
if (!_rs.is_reserved()) {
return false;
}
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/share/memory/allocation.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ template <class E>
E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MemTag mem_tag) {
size_t size = size_for(length);

char* addr = os::reserve_memory(size, !ExecMem, mem_tag);
char* addr = os::reserve_memory(size, mem_tag, !ExecMem);
if (addr == nullptr) {
return nullptr;
}
Expand All @@ -75,7 +75,7 @@ template <class E>
E* MmapArrayAllocator<E>::allocate(size_t length, MemTag mem_tag) {
size_t size = size_for(length);

char* addr = os::reserve_memory(size, !ExecMem, mem_tag);
char* addr = os::reserve_memory(size, mem_tag, !ExecMem);
if (addr == nullptr) {
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
}
Expand Down
8 changes: 4 additions & 4 deletions src/hotspot/share/memory/memoryReserver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,13 +90,13 @@ static char* reserve_memory_inner(char* requested_address,
assert(is_aligned(requested_address, alignment),
"Requested address " PTR_FORMAT " must be aligned to %zu",
p2i(requested_address), alignment);
return os::attempt_reserve_memory_at(requested_address, size, exec, mem_tag);
return os::attempt_reserve_memory_at(requested_address, size, mem_tag, exec);
}

// Optimistically assume that the OS returns an aligned base pointer.
// When reserving a large address range, most OSes seem to align to at
// least 64K.
char* base = os::reserve_memory(size, exec, mem_tag);
char* base = os::reserve_memory(size, mem_tag, exec);
if (is_aligned(base, alignment)) {
return base;
}
Expand All @@ -107,7 +107,7 @@ static char* reserve_memory_inner(char* requested_address,
}

// Map using the requested alignment.
return os::reserve_memory_aligned(size, alignment, exec);
return os::reserve_memory_aligned(size, alignment, mem_tag, exec);
}

ReservedSpace MemoryReserver::reserve_memory(char* requested_address,
Expand Down Expand Up @@ -261,7 +261,7 @@ static char* map_memory_to_file(char* requested_address,
// Optimistically assume that the OS returns an aligned base pointer.
// When reserving a large address range, most OSes seem to align to at
// least 64K.
char* base = os::map_memory_to_file(size, fd);
char* base = os::map_memory_to_file(size, fd, mem_tag);
if (is_aligned(base, alignment)) {
return base;
}
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/share/memory/memoryReserver.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,12 @@ class MemoryReserver : AllStatic {
size_t size,
size_t alignment,
size_t page_size,
MemTag mem_tag = mtNone);
MemTag mem_tag);

static ReservedSpace reserve(size_t size,
size_t alignment,
size_t page_size,
MemTag mem_tag = mtNone);
MemTag mem_tag);

static ReservedSpace reserve(size_t size,
MemTag mem_tag);
Expand Down
5 changes: 3 additions & 2 deletions src/hotspot/share/memory/metaspace.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -593,7 +593,7 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
if (result == nullptr) {
// Fallback: reserve anywhere
log_debug(metaspace, map)("Trying anywhere...");
result = os::reserve_memory_aligned(size, Metaspace::reserve_alignment(), false);
result = os::reserve_memory_aligned(size, Metaspace::reserve_alignment(), mtClass, false);
}

// Wrap resulting range in ReservedSpace
Expand Down Expand Up @@ -766,7 +766,8 @@ void Metaspace::global_initialize() {
rs = MemoryReserver::reserve((char*)base,
size,
Metaspace::reserve_alignment(),
os::vm_page_size());
os::vm_page_size(),
mtClass);

if (rs.is_reserved()) {
log_info(metaspace)("Successfully forced class space address to " PTR_FORMAT, p2i(base));
Expand Down
Loading