Skip to content

Commit

Permalink
Upgrade to rust nightly-2021-10-29 (#491)
Browse files Browse the repository at this point in the history
  • Loading branch information
qinsoon authored Oct 31, 2021
1 parent bf54dd3 commit 62f5d17
Show file tree
Hide file tree
Showing 12 changed files with 62 additions and 78 deletions.
2 changes: 1 addition & 1 deletion rust-toolchain
Original file line number Diff line number Diff line change
@@ -1 +1 @@
nightly-2021-09-17
nightly-2021-10-29
1 change: 0 additions & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
#![feature(associated_type_defaults)]
#![feature(specialization)]
#![feature(trait_alias)]
#![feature(const_panic)]
#![feature(step_trait)]
#![feature(adt_const_params)]
#![feature(generic_const_exprs)]
Expand Down
11 changes: 5 additions & 6 deletions src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -354,12 +354,11 @@ pub trait Plan: 'static + Sync + Downcast {
fn force_full_heap_collection(&self) {}

fn modify_check(&self, object: ObjectReference) {
if self.base().gc_in_progress_proper() && object.is_movable() {
panic!(
"GC modifying a potentially moving object via Java (i.e. not magic) obj= {}",
object
);
}
assert!(
!(self.base().gc_in_progress_proper() && object.is_movable()),
"GC modifying a potentially moving object via Java (i.e. not magic) obj= {}",
object
);
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/plan/mutator_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ pub struct MutatorConfig<VM: VMBinding> {
pub allocator_mapping: &'static EnumMap<AllocationType, AllocatorSelector>,
/// Mapping between allocator selector and spaces. Each pair represents a mapping.
/// Put this behind a box, so it is a pointer-sized field.
#[allow(clippy::box_vec)]
#[allow(clippy::box_collection)]
pub space_mapping: Box<SpaceMapping<VM>>,
/// Plan-specific code for mutator prepare. The VMWorkerThread is the worker thread that executes this prepare function.
pub prepare_func: &'static (dyn Fn(&mut Mutator<VM>, VMWorkerThread) + Send + Sync),
Expand Down
7 changes: 4 additions & 3 deletions src/policy/mallocspace/metadata.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,10 @@ fn map_active_chunk_metadata(chunk_start: Address) {
chunk_start + (size / 2)
);

if CHUNK_METADATA.try_map_metadata_space(start, size).is_err() {
panic!("failed to mmap meta memory");
}
assert!(
CHUNK_METADATA.try_map_metadata_space(start, size).is_ok(),
"failed to mmap meta memory"
);
}

/// We map the active chunk metadata (if not previously mapped), as well as the alloc bit metadata
Expand Down
32 changes: 17 additions & 15 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -292,9 +292,7 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {

if should_poll && VM::VMActivePlan::global().poll(false, self.as_space()) {
debug!("Collection required");
if !allow_gc {
panic!("GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
}
assert!(allow_gc, "GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
pr.clear_request(pages_reserved);
VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator
unsafe { Address::zero() }
Expand Down Expand Up @@ -334,9 +332,10 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
}
Err(_) => {
// We thought we had memory to allocate, but somehow failed the allocation. Will force a GC.
if !allow_gc {
panic!("Physical allocation failed when GC is not allowed!");
}
assert!(
allow_gc,
"Physical allocation failed when GC is not allowed!"
);

let gc_performed = VM::VMActivePlan::global().poll(true, self.as_space());
debug_assert!(gc_performed, "GC not performed when forced.");
Expand Down Expand Up @@ -571,12 +570,12 @@ impl<VM: VMBinding> CommonSpace<VM> {
_ => unreachable!(),
};

if extent != raw_align_up(extent, BYTES_IN_CHUNK) {
panic!(
"{} requested non-aligned extent: {} bytes",
rtn.name, extent
);
}
assert!(
extent == raw_align_up(extent, BYTES_IN_CHUNK),
"{} requested non-aligned extent: {} bytes",
rtn.name,
extent
);

let start: Address;
if let VMRequest::Fixed { start: _start, .. } = vmrequest {
Expand All @@ -586,9 +585,12 @@ impl<VM: VMBinding> CommonSpace<VM> {
//if (HeapLayout.vmMap.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name);
start = heap.reserve(extent, top);
}
if start != chunk_align_up(start) {
panic!("{} starting on non-aligned boundary: {}", rtn.name, start);
}
assert!(
start == chunk_align_up(start),
"{} starting on non-aligned boundary: {}",
rtn.name,
start
);

rtn.contiguous = true;
rtn.start = start;
Expand Down
15 changes: 7 additions & 8 deletions src/util/heap/heap_meta.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,13 @@ impl HeapMeta {
start
};

if self.heap_cursor > self.heap_limit {
panic!(
"Out of virtual address space at {} ({} > {})",
self.heap_cursor - extent,
self.heap_cursor,
self.heap_limit
);
}
assert!(
self.heap_cursor <= self.heap_limit,
"Out of virtual address space at {} ({} > {})",
self.heap_cursor - extent,
self.heap_cursor,
self.heap_limit
);

ret
}
Expand Down
5 changes: 1 addition & 4 deletions src/util/heap/layout/fragmented_mapper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -137,10 +137,7 @@ impl Mmapper for FragmentedMapper {

let mmap_start = Self::chunk_index_to_address(base, chunk);
let _guard = self.lock.lock().unwrap();
let res = MapState::transition_to_mapped(entry, mmap_start);
if res.is_err() {
return res;
}
MapState::transition_to_mapped(entry, mmap_start)?;
}
start = high;
}
Expand Down
54 changes: 22 additions & 32 deletions src/util/metadata/side_metadata/sanity.rs
Original file line number Diff line number Diff line change
Expand Up @@ -185,18 +185,12 @@ fn verify_no_overlap_chunked(spec_1: &SideMetadataSpec, spec_2: &SideMetadataSpe
/// * `g_specs`: the slice of global specifications to be checked
///
fn verify_global_specs(g_specs: &[SideMetadataSpec]) -> Result<()> {
let v = verify_global_specs_total_size(g_specs);
if v.is_err() {
return v;
}
verify_global_specs_total_size(g_specs)?;

for spec_1 in g_specs {
for spec_2 in g_specs {
if spec_1 != spec_2 {
let v = verify_no_overlap_contiguous(spec_1, spec_2);
if v.is_err() {
return v;
}
verify_no_overlap_contiguous(spec_1, spec_2)?;
}
}
}
Expand Down Expand Up @@ -245,21 +239,15 @@ impl SideMetadataSanity {
fn verify_local_specs(&self) -> Result<()> {
let local_specs = self.get_all_specs(false);

let v = verify_local_specs_size(&local_specs);
if v.is_err() {
return v;
}
verify_local_specs_size(&local_specs)?;

for spec_1 in &local_specs {
for spec_2 in &local_specs {
if spec_1 != spec_2 {
#[cfg(target_pointer_width = "64")]
let v = verify_no_overlap_contiguous(spec_1, spec_2);
verify_no_overlap_contiguous(spec_1, spec_2)?;
#[cfg(target_pointer_width = "32")]
let v = verify_no_overlap_chunked(spec_1, spec_2);
if v.is_err() {
return v;
}
verify_no_overlap_chunked(spec_1, spec_2)?;
}
}
}
Expand Down Expand Up @@ -306,12 +294,11 @@ impl SideMetadataSanity {

for spec in &metadata_context.global {
// Make sure all input global specs are actually global
if !spec.is_global {
panic!(
"Policy-specific spec {:#?} detected in the global specs: {:#?}",
spec, metadata_context.global
);
}
assert!(
spec.is_global,
"Policy-specific spec {:#?} detected in the global specs: {:#?}",
spec, metadata_context.global
);
// On the first call to the function, initialise the content sanity map, and
// on the future calls, checks the global metadata specs have not changed
if first_call {
Expand All @@ -337,12 +324,11 @@ impl SideMetadataSanity {

for spec in &metadata_context.local {
// Make sure all input local specs are actually local
if spec.is_global {
panic!(
"Global spec {:#?} detected in the policy-specific specs: {:#?}",
spec, metadata_context.local
);
}
assert!(
!spec.is_global,
"Global spec {:#?} detected in the policy-specific specs: {:#?}",
spec, metadata_context.local
);
// The first call from each policy inserts the relevant (spec, hashmap) pair.
// Future calls only check that the metadata specs have not changed.
// This should work with multi mmtk instances, because the local side metadata specs are assumed to be constant per policy.
Expand Down Expand Up @@ -408,9 +394,13 @@ fn verify_metadata_address_bound(spec: &SideMetadataSpec, data_addr: Address) {
unreachable!()
}
};
if metadata_addr >= metadata_addr_bound {
panic!("We try access metadata address for address {} of spec {} that is not within the bound {}.", data_addr, spec.name, metadata_addr_bound);
}
assert!(
metadata_addr < metadata_addr_bound,
"We try access metadata address for address {} of spec {} that is not within the bound {}.",
data_addr,
spec.name,
metadata_addr_bound
);
}

/// Commits a side metadata bulk zero operation.
Expand Down
5 changes: 2 additions & 3 deletions src/util/raw_memory_freelist.rs
Original file line number Diff line number Diff line change
Expand Up @@ -194,9 +194,8 @@ impl RawMemoryFreeList {
}

fn mmap(&self, start: Address, bytes: usize) {
if super::memory::dzmmap_noreplace(start, bytes).is_err() {
panic!("Can't get more space with mmap()");
}
let res = super::memory::dzmmap_noreplace(start, bytes);
assert!(res.is_ok(), "Can't get more space with mmap()");
}
pub fn get_limit(&self) -> Address {
self.limit
Expand Down
4 changes: 1 addition & 3 deletions src/util/sanity/sanity_checker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,9 +192,7 @@ impl<VM: VMBinding> ProcessEdgesWork for SanityGCProcessEdges<VM> {
let mut sanity_checker = self.mmtk().sanity_checker.lock().unwrap();
if !sanity_checker.refs.contains(&object) {
// FIXME steveb consider VM-specific integrity check on reference.
if !object.is_sane() {
panic!("Invalid reference {:?}", object);
}
assert!(object.is_sane(), "Invalid reference {:?}", object);
// Object is not "marked"
sanity_checker.refs.insert(object); // "Mark" it
ProcessEdgesWork::process_node(self, object);
Expand Down
2 changes: 1 addition & 1 deletion src/util/statistics/counter/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,6 @@ impl Diffable for MonotoneNanoTime {
}

fn print_diff(val: u64) {
print!("{}", format!("{:.*}", 2, val as f64 / 1e6f64));
print!("{:.*}", 2, val as f64 / 1e6f64);
}
}

0 comments on commit 62f5d17

Please sign in to comment.