@@ -1077,6 +1077,14 @@ static void assert_no_oops_or_metadata(nmethod* nm) {
1077
1077
}
1078
1078
#endif
1079
1079
1080
+ static int required_mutable_data_space (CodeBuffer* code_buffer,
1081
+ int jvmci_data_size = 0 ) {
1082
+ return align_up (code_buffer->total_relocation_size (), oopSize) +
1083
+ align_up (code_buffer->total_oop_size (), oopSize) +
1084
+ align_up (jvmci_data_size, oopSize) +
1085
+ align_up (code_buffer->total_metadata_size (), oopSize);
1086
+ }
1087
+
1080
1088
nmethod* nmethod::new_native_nmethod (const methodHandle& method,
1081
1089
int compile_id,
1082
1090
CodeBuffer *code_buffer,
@@ -1101,6 +1109,8 @@ nmethod* nmethod::new_native_nmethod(const methodHandle& method,
1101
1109
offsets.set_value (CodeOffsets::Exceptions, exception_handler);
1102
1110
}
1103
1111
1112
+ int mutable_data_size = required_mutable_data_space (code_buffer);
1113
+
1104
1114
// MH intrinsics are dispatch stubs which are compatible with NonNMethod space.
1105
1115
// IsUnloadingBehaviour::is_unloading needs to handle them separately.
1106
1116
bool allow_NonNMethod_space = method->can_be_allocated_in_NonNMethod_space ();
@@ -1110,7 +1120,7 @@ nmethod* nmethod::new_native_nmethod(const methodHandle& method,
1110
1120
code_buffer, frame_size,
1111
1121
basic_lock_owner_sp_offset,
1112
1122
basic_lock_sp_offset,
1113
- oop_maps);
1123
+ oop_maps, mutable_data_size );
1114
1124
DEBUG_ONLY ( if (allow_NonNMethod_space) assert_no_oops_or_metadata (nm); )
1115
1125
NOT_PRODUCT (if (nm != nullptr ) native_nmethod_stats.note_native_nmethod (nm));
1116
1126
}
@@ -1148,7 +1158,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
1148
1158
code_buffer->finalize_oop_references (method);
1149
1159
// create nmethod
1150
1160
nmethod* nm = nullptr ;
1151
- int nmethod_size = CodeBlob::allocation_size (code_buffer, sizeof (nmethod), true );
1161
+ int nmethod_size = CodeBlob::allocation_size (code_buffer, sizeof (nmethod));
1152
1162
1153
1163
int immutable_data_size =
1154
1164
adjust_pcs_size (debug_info->pcs_size ())
@@ -1170,27 +1180,15 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
1170
1180
}
1171
1181
}
1172
1182
1173
- int mutable_data_size = align_up (code_buffer->total_relocation_size (), oopSize) +
1174
- + align_up (code_buffer->total_oop_size (), oopSize) +
1175
- + align_up (code_buffer->total_metadata_size (), oopSize);
1176
- #if INCLUDE_JVMCI
1177
- mutable_data_size += align_up (compiler->is_jvmci () ? jvmci_data->size () : 0 , oopSize);
1178
- #endif
1179
- address mutable_data = nullptr ;
1180
- if (mutable_data_size > 0 ) {
1181
- mutable_data = (address)os::malloc (mutable_data_size, mtCode);
1182
- if (mutable_data == nullptr ) {
1183
- vm_exit_out_of_memory (mutable_data_size, OOM_MALLOC_ERROR, " nmethod: no space for mutable data" );
1184
- return nullptr ;
1185
- }
1186
- }
1183
+ int mutable_data_size = required_mutable_data_space (code_buffer
1184
+ JVMCI_ONLY (COMMA (compiler->is_jvmci () ? jvmci_data->size () : 0 )));
1187
1185
1188
1186
{
1189
1187
MutexLocker mu (CodeCache_lock, Mutex::_no_safepoint_check_flag);
1190
1188
1191
1189
nm = new (nmethod_size, comp_level)
1192
1190
nmethod (method (), compiler->type (), nmethod_size, immutable_data_size, mutable_data_size,
1193
- compile_id, entry_bci, immutable_data, mutable_data, offsets, orig_pc_offset,
1191
+ compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
1194
1192
debug_info, dependencies, code_buffer, frame_size, oop_maps,
1195
1193
handler_table, nul_chk_table, compiler, comp_level
1196
1194
#if INCLUDE_JVMCI
@@ -1292,9 +1290,10 @@ nmethod::nmethod(
1292
1290
int frame_size,
1293
1291
ByteSize basic_lock_owner_sp_offset,
1294
1292
ByteSize basic_lock_sp_offset,
1295
- OopMapSet* oop_maps )
1293
+ OopMapSet* oop_maps,
1294
+ int mutable_data_size)
1296
1295
: CodeBlob(" native nmethod" , CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof (nmethod),
1297
- offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
1296
+ offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size ),
1298
1297
_deoptimization_generation(0 ),
1299
1298
_gc_epoch(CodeCache::gc_epoch()),
1300
1299
_method(method),
@@ -1328,14 +1327,17 @@ nmethod::nmethod(
1328
1327
_deopt_mh_handler_offset = 0 ;
1329
1328
_unwind_handler_offset = 0 ;
1330
1329
1331
- CHECKED_CAST (_metadata_offset, uint16_t , (align_up (code_buffer->total_oop_size (), oopSize)));
1332
- int data_end_offset = _metadata_offset + align_up (code_buffer->total_metadata_size (), wordSize);
1330
+ int reloc_size = align_up (code_buffer->total_relocation_size (), oopSize);
1331
+ int oop_size = align_up (code_buffer->total_oop_size (), oopSize);
1332
+ int metadata_size = align_up (code_buffer->total_metadata_size (), wordSize);
1333
+ CHECKED_CAST (_metadata_offset, uint16_t , reloc_size + oop_size);
1334
+ int data_end_offset = _metadata_offset + metadata_size;
1333
1335
#if INCLUDE_JVMCI
1334
1336
// jvmci_data_size is 0 in native wrapper but we need to set offset
1335
1337
// to correctly calculate metadata_end address
1336
1338
CHECKED_CAST (_jvmci_data_offset, uint16_t , data_end_offset);
1337
1339
#endif
1338
- assert (( data_offset () + data_end_offset) <= nmethod_size , " wrong nmethod's size : %d < %d" , nmethod_size, ( data_offset () + data_end_offset) );
1340
+ assert (data_end_offset <= mutable_data_size , " wrong nmutable_data_size : %d < %d" , data_end_offset, mutable_data_size );
1339
1341
1340
1342
// native wrapper does not have read-only data but we need unique not null address
1341
1343
_immutable_data = blob_end ();
@@ -1419,7 +1421,6 @@ nmethod::nmethod(
1419
1421
int compile_id,
1420
1422
int entry_bci,
1421
1423
address immutable_data,
1422
- address mutable_data,
1423
1424
CodeOffsets* offsets,
1424
1425
int orig_pc_offset,
1425
1426
DebugInformationRecorder* debug_info,
@@ -1438,7 +1439,7 @@ nmethod::nmethod(
1438
1439
#endif
1439
1440
)
1440
1441
: CodeBlob(" nmethod" , CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof (nmethod),
1441
- offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true ),
1442
+ offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, mutable_data_size ),
1442
1443
_deoptimization_generation(0 ),
1443
1444
_gc_epoch(CodeCache::gc_epoch()),
1444
1445
_method(method),
@@ -1514,9 +1515,9 @@ nmethod::nmethod(
1514
1515
CHECKED_CAST (_jvmci_data_offset, uint16_t , _metadata_offset + metadata_size);
1515
1516
jvmci_data_size = align_up (compiler->is_jvmci () ? jvmci_data->size () : 0 , oopSize);
1516
1517
#endif
1517
- assert (mutable_data_size == reloc_size + oop_size + metadata_size + jvmci_data_size,
1518
+ assert (_mutable_data_size == reloc_size + oop_size + metadata_size + jvmci_data_size,
1518
1519
" wrong mutable data size: %d != %d + %d + %d + %d" ,
1519
- mutable_data_size , reloc_size, oop_size, metadata_size, jvmci_data_size);
1520
+ _mutable_data_size , reloc_size, oop_size, metadata_size, jvmci_data_size);
1520
1521
assert (nmethod_size == code_end () - header_begin (), " wrong nmethod size: %d != %d" ,
1521
1522
nmethod_size, (int )(code_end () - header_begin ()));
1522
1523
@@ -1528,14 +1529,6 @@ nmethod::nmethod(
1528
1529
// We need unique not null address
1529
1530
_immutable_data = blob_end ();
1530
1531
}
1531
- _mutable_data_size = mutable_data_size;
1532
- if (mutable_data_size > 0 ) {
1533
- assert (mutable_data != nullptr , " required" );
1534
- _mutable_data = mutable_data;
1535
- } else {
1536
- // We need unique not null address
1537
- _mutable_data = blob_end ();
1538
- }
1539
1532
CHECKED_CAST (_nul_chk_table_offset, uint16_t , (align_up ((int )dependencies->size_in_bytes (), oopSize)));
1540
1533
CHECKED_CAST (_handler_table_offset, uint16_t , (_nul_chk_table_offset + align_up (nul_chk_table->size_in_bytes (), oopSize)));
1541
1534
_scopes_pcs_offset = _handler_table_offset + align_up (handler_table->size_in_bytes (), oopSize);
0 commit comments