Skip to content

Commit 62f5d17

Browse files
authored
Upgrade to rust nightly-2021-10-29 (#491)
1 parent bf54dd3 commit 62f5d17

File tree

12 files changed

+62
-78
lines changed

12 files changed

+62
-78
lines changed

rust-toolchain

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
nightly-2021-09-17
1+
nightly-2021-10-29

src/lib.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
#![feature(associated_type_defaults)]
1212
#![feature(specialization)]
1313
#![feature(trait_alias)]
14-
#![feature(const_panic)]
1514
#![feature(step_trait)]
1615
#![feature(adt_const_params)]
1716
#![feature(generic_const_exprs)]

src/plan/global.rs

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -354,12 +354,11 @@ pub trait Plan: 'static + Sync + Downcast {
354354
fn force_full_heap_collection(&self) {}
355355

356356
fn modify_check(&self, object: ObjectReference) {
357-
if self.base().gc_in_progress_proper() && object.is_movable() {
358-
panic!(
359-
"GC modifying a potentially moving object via Java (i.e. not magic) obj= {}",
360-
object
361-
);
362-
}
357+
assert!(
358+
!(self.base().gc_in_progress_proper() && object.is_movable()),
359+
"GC modifying a potentially moving object via Java (i.e. not magic) obj= {}",
360+
object
361+
);
363362
}
364363
}
365364

src/plan/mutator_context.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ pub struct MutatorConfig<VM: VMBinding> {
2121
pub allocator_mapping: &'static EnumMap<AllocationType, AllocatorSelector>,
2222
/// Mapping between allocator selector and spaces. Each pair represents a mapping.
2323
/// Put this behind a box, so it is a pointer-sized field.
24-
#[allow(clippy::box_vec)]
24+
#[allow(clippy::box_collection)]
2525
pub space_mapping: Box<SpaceMapping<VM>>,
2626
/// Plan-specific code for mutator prepare. The VMWorkerThread is the worker thread that executes this prepare function.
2727
pub prepare_func: &'static (dyn Fn(&mut Mutator<VM>, VMWorkerThread) + Send + Sync),

src/policy/mallocspace/metadata.rs

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -101,9 +101,10 @@ fn map_active_chunk_metadata(chunk_start: Address) {
101101
chunk_start + (size / 2)
102102
);
103103

104-
if CHUNK_METADATA.try_map_metadata_space(start, size).is_err() {
105-
panic!("failed to mmap meta memory");
106-
}
104+
assert!(
105+
CHUNK_METADATA.try_map_metadata_space(start, size).is_ok(),
106+
"failed to mmap meta memory"
107+
);
107108
}
108109

109110
/// We map the active chunk metadata (if not previously mapped), as well as the alloc bit metadata

src/policy/space.rs

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -292,9 +292,7 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
292292

293293
if should_poll && VM::VMActivePlan::global().poll(false, self.as_space()) {
294294
debug!("Collection required");
295-
if !allow_gc {
296-
panic!("GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
297-
}
295+
assert!(allow_gc, "GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
298296
pr.clear_request(pages_reserved);
299297
VM::VMCollection::block_for_gc(VMMutatorThread(tls)); // We have checked that this is mutator
300298
unsafe { Address::zero() }
@@ -334,9 +332,10 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
334332
}
335333
Err(_) => {
336334
// We thought we had memory to allocate, but somehow failed the allocation. Will force a GC.
337-
if !allow_gc {
338-
panic!("Physical allocation failed when GC is not allowed!");
339-
}
335+
assert!(
336+
allow_gc,
337+
"Physical allocation failed when GC is not allowed!"
338+
);
340339

341340
let gc_performed = VM::VMActivePlan::global().poll(true, self.as_space());
342341
debug_assert!(gc_performed, "GC not performed when forced.");
@@ -571,12 +570,12 @@ impl<VM: VMBinding> CommonSpace<VM> {
571570
_ => unreachable!(),
572571
};
573572

574-
if extent != raw_align_up(extent, BYTES_IN_CHUNK) {
575-
panic!(
576-
"{} requested non-aligned extent: {} bytes",
577-
rtn.name, extent
578-
);
579-
}
573+
assert!(
574+
extent == raw_align_up(extent, BYTES_IN_CHUNK),
575+
"{} requested non-aligned extent: {} bytes",
576+
rtn.name,
577+
extent
578+
);
580579

581580
let start: Address;
582581
if let VMRequest::Fixed { start: _start, .. } = vmrequest {
@@ -586,9 +585,12 @@ impl<VM: VMBinding> CommonSpace<VM> {
586585
//if (HeapLayout.vmMap.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name);
587586
start = heap.reserve(extent, top);
588587
}
589-
if start != chunk_align_up(start) {
590-
panic!("{} starting on non-aligned boundary: {}", rtn.name, start);
591-
}
588+
assert!(
589+
start == chunk_align_up(start),
590+
"{} starting on non-aligned boundary: {}",
591+
rtn.name,
592+
start
593+
);
592594

593595
rtn.contiguous = true;
594596
rtn.start = start;

src/util/heap/heap_meta.rs

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,14 +27,13 @@ impl HeapMeta {
2727
start
2828
};
2929

30-
if self.heap_cursor > self.heap_limit {
31-
panic!(
32-
"Out of virtual address space at {} ({} > {})",
33-
self.heap_cursor - extent,
34-
self.heap_cursor,
35-
self.heap_limit
36-
);
37-
}
30+
assert!(
31+
self.heap_cursor <= self.heap_limit,
32+
"Out of virtual address space at {} ({} > {})",
33+
self.heap_cursor - extent,
34+
self.heap_cursor,
35+
self.heap_limit
36+
);
3837

3938
ret
4039
}

src/util/heap/layout/fragmented_mapper.rs

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -137,10 +137,7 @@ impl Mmapper for FragmentedMapper {
137137

138138
let mmap_start = Self::chunk_index_to_address(base, chunk);
139139
let _guard = self.lock.lock().unwrap();
140-
let res = MapState::transition_to_mapped(entry, mmap_start);
141-
if res.is_err() {
142-
return res;
143-
}
140+
MapState::transition_to_mapped(entry, mmap_start)?;
144141
}
145142
start = high;
146143
}

src/util/metadata/side_metadata/sanity.rs

Lines changed: 22 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -185,18 +185,12 @@ fn verify_no_overlap_chunked(spec_1: &SideMetadataSpec, spec_2: &SideMetadataSpe
185185
/// * `g_specs`: the slice of global specifications to be checked
186186
///
187187
fn verify_global_specs(g_specs: &[SideMetadataSpec]) -> Result<()> {
188-
let v = verify_global_specs_total_size(g_specs);
189-
if v.is_err() {
190-
return v;
191-
}
188+
verify_global_specs_total_size(g_specs)?;
192189

193190
for spec_1 in g_specs {
194191
for spec_2 in g_specs {
195192
if spec_1 != spec_2 {
196-
let v = verify_no_overlap_contiguous(spec_1, spec_2);
197-
if v.is_err() {
198-
return v;
199-
}
193+
verify_no_overlap_contiguous(spec_1, spec_2)?;
200194
}
201195
}
202196
}
@@ -245,21 +239,15 @@ impl SideMetadataSanity {
245239
fn verify_local_specs(&self) -> Result<()> {
246240
let local_specs = self.get_all_specs(false);
247241

248-
let v = verify_local_specs_size(&local_specs);
249-
if v.is_err() {
250-
return v;
251-
}
242+
verify_local_specs_size(&local_specs)?;
252243

253244
for spec_1 in &local_specs {
254245
for spec_2 in &local_specs {
255246
if spec_1 != spec_2 {
256247
#[cfg(target_pointer_width = "64")]
257-
let v = verify_no_overlap_contiguous(spec_1, spec_2);
248+
verify_no_overlap_contiguous(spec_1, spec_2)?;
258249
#[cfg(target_pointer_width = "32")]
259-
let v = verify_no_overlap_chunked(spec_1, spec_2);
260-
if v.is_err() {
261-
return v;
262-
}
250+
verify_no_overlap_chunked(spec_1, spec_2)?;
263251
}
264252
}
265253
}
@@ -306,12 +294,11 @@ impl SideMetadataSanity {
306294

307295
for spec in &metadata_context.global {
308296
// Make sure all input global specs are actually global
309-
if !spec.is_global {
310-
panic!(
311-
"Policy-specific spec {:#?} detected in the global specs: {:#?}",
312-
spec, metadata_context.global
313-
);
314-
}
297+
assert!(
298+
spec.is_global,
299+
"Policy-specific spec {:#?} detected in the global specs: {:#?}",
300+
spec, metadata_context.global
301+
);
315302
// On the first call to the function, initialise the content sanity map, and
316303
// on the future calls, checks the global metadata specs have not changed
317304
if first_call {
@@ -337,12 +324,11 @@ impl SideMetadataSanity {
337324

338325
for spec in &metadata_context.local {
339326
// Make sure all input local specs are actually local
340-
if spec.is_global {
341-
panic!(
342-
"Global spec {:#?} detected in the policy-specific specs: {:#?}",
343-
spec, metadata_context.local
344-
);
345-
}
327+
assert!(
328+
!spec.is_global,
329+
"Global spec {:#?} detected in the policy-specific specs: {:#?}",
330+
spec, metadata_context.local
331+
);
346332
// The first call from each policy inserts the relevant (spec, hashmap) pair.
347333
// Future calls only check that the metadata specs have not changed.
348334
// This should work with multi mmtk instances, because the local side metadata specs are assumed to be constant per policy.
@@ -408,9 +394,13 @@ fn verify_metadata_address_bound(spec: &SideMetadataSpec, data_addr: Address) {
408394
unreachable!()
409395
}
410396
};
411-
if metadata_addr >= metadata_addr_bound {
412-
panic!("We try access metadata address for address {} of spec {} that is not within the bound {}.", data_addr, spec.name, metadata_addr_bound);
413-
}
397+
assert!(
398+
metadata_addr < metadata_addr_bound,
399+
"We try access metadata address for address {} of spec {} that is not within the bound {}.",
400+
data_addr,
401+
spec.name,
402+
metadata_addr_bound
403+
);
414404
}
415405

416406
/// Commits a side metadata bulk zero operation.

src/util/raw_memory_freelist.rs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -194,9 +194,8 @@ impl RawMemoryFreeList {
194194
}
195195

196196
fn mmap(&self, start: Address, bytes: usize) {
197-
if super::memory::dzmmap_noreplace(start, bytes).is_err() {
198-
panic!("Can't get more space with mmap()");
199-
}
197+
let res = super::memory::dzmmap_noreplace(start, bytes);
198+
assert!(res.is_ok(), "Can't get more space with mmap()");
200199
}
201200
pub fn get_limit(&self) -> Address {
202201
self.limit

src/util/sanity/sanity_checker.rs

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -192,9 +192,7 @@ impl<VM: VMBinding> ProcessEdgesWork for SanityGCProcessEdges<VM> {
192192
let mut sanity_checker = self.mmtk().sanity_checker.lock().unwrap();
193193
if !sanity_checker.refs.contains(&object) {
194194
// FIXME steveb consider VM-specific integrity check on reference.
195-
if !object.is_sane() {
196-
panic!("Invalid reference {:?}", object);
197-
}
195+
assert!(object.is_sane(), "Invalid reference {:?}", object);
198196
// Object is not "marked"
199197
sanity_checker.refs.insert(object); // "Mark" it
200198
ProcessEdgesWork::process_node(self, object);

src/util/statistics/counter/mod.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,6 @@ impl Diffable for MonotoneNanoTime {
103103
}
104104

105105
fn print_diff(val: u64) {
106-
print!("{}", format!("{:.*}", 2, val as f64 / 1e6f64));
106+
print!("{:.*}", 2, val as f64 / 1e6f64);
107107
}
108108
}

0 commit comments

Comments
 (0)