From 42262c296e164b5b1d05103e527ce5a9aee7cfd2 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 16 Feb 2022 10:06:45 +1300 Subject: [PATCH] Bump version to v0.10. Update Rust to nightly-2022-02-11 (#547) --- CHANGELOG.md | 45 +++++++++++++++++++++++ Cargo.toml | 2 +- rust-toolchain | 2 +- src/lib.rs | 3 -- src/policy/copyspace.rs | 10 ++--- src/policy/largeobjectspace.rs | 4 +- src/policy/space.rs | 9 ++--- src/scheduler/stat.rs | 2 +- src/util/analysis/obj_size.rs | 2 +- src/util/heap/freelistpageresource.rs | 3 +- src/util/metadata/side_metadata/global.rs | 16 ++++---- src/util/options.rs | 2 +- src/util/reference_processor.rs | 2 +- src/util/rust_util.rs | 2 +- src/util/treadmill.rs | 6 +-- 15 files changed, 75 insertions(+), 35 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f63f846cf7..f5d68db79a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,48 @@ +0.10.0 (2022-02-14) +=== + +GC Plans +--- +* Removed plan-specific copy contexts. Now each plan needs to provide a configuration for + `GCWorkerCopyContext` (similar to how they config `Mutator`). +* Fixed a bug that `needs_log_bit` was always set to `true` for generational plans, no matter + their barrier used the log bit or not. +* Fixed a bug that we may overflow when calculating `get_available_pages()`. + +Policies +--- +* Refactored copy context. Now a copying policy provides its copy context. +* Mark sweep and mark compact now uses `ObjectIterator` for linear scan. + +Scheduler +--- +* Introduced `GCController`, a counterpart of `GCWorker`, for the controller thread. +* Refactored `GCWorker`. Now `GCWorker` is seperated into two parts, a thread local part `GCWorker` + which is owned by GC threads, and a shared part `GCWorkerShared` that is shared between GC threads + and the scheduler. +* Refactored the creation of the scheduler and the workers to remove some unnecessary `Option` and `RwLock`. + +API +--- +* Added `process_bulk()` that allows bindings to pass options as a string of key-value pairs. +* `ObjectModel::copy()` now takes `CopySemantics` as a parameter. +* Renamed `Collection::spawn_worker_thread()` to `spawn_gc_thread()`, which is now used to spawn both GC worker and + GC controller. +* `Collection::out_of_memory()` now takes `AllocationError` as a parameter which hints the binding + on how to handle the OOM error. +* `Collection::out_of_memory()` now allows a binding to return from the method in the case of a non-critical OOM. + If a binding returns, `alloc()` will return a zero address. + +Misc +--- +* Added `ObjectIterator` that provides linear scanning through a region to iterate + objects using the alloc bit. +* Added a feature `work_packet_stats` to optionally collect work packet statistics. Note that + MMTk used to always collect work packet statistics. +* Optimized the access to the SFT map. +* Fixed a few issues with documentation. +* The example header file `mmtk.h` now uses the prefix `mmtk_` for all the functions. + 0.9.0 (2021-12-16) === diff --git a/Cargo.toml b/Cargo.toml index a2ed1345fb..35c3afbff7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mmtk" -version = "0.9.0" +version = "0.10.0" authors = ["The MMTk Developers <>"] edition = "2018" license = "MIT OR Apache-2.0" diff --git a/rust-toolchain b/rust-toolchain index c740dfcf86..8445feb8bd 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2021-12-05 +nightly-2022-02-11 diff --git a/src/lib.rs b/src/lib.rs index 9d3c29ff28..94b0d0314b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,12 +1,9 @@ #![allow(incomplete_features)] -#![feature(asm)] #![feature(integer_atomics)] #![feature(is_sorted)] #![feature(drain_filter)] #![feature(nll)] #![feature(box_syntax)] -#![feature(maybe_uninit_extra)] -#![feature(get_mut_unchecked)] #![feature(arbitrary_self_types)] #![feature(associated_type_defaults)] #![feature(specialization)] diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 57148b62cd..3b658a4e91 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -34,7 +34,7 @@ impl SFT for CopySpace { } fn is_live(&self, object: ObjectReference) -> bool { - !self.from_space() || object_forwarding::is_forwarded::(object) + !self.is_from_space() || object_forwarding::is_forwarded::(object) } fn is_movable(&self) -> bool { @@ -43,7 +43,7 @@ impl SFT for CopySpace { #[cfg(feature = "sanity")] fn is_sane(&self) -> bool { - !self.from_space() + !self.is_from_space() } fn initialize_object_metadata(&self, _object: ObjectReference, _alloc: bool) { @@ -53,7 +53,7 @@ impl SFT for CopySpace { #[inline(always)] fn get_forwarded_object(&self, object: ObjectReference) -> Option { - if !self.from_space() { + if !self.is_from_space() { return None; } @@ -179,7 +179,7 @@ impl CopySpace { } } - fn from_space(&self) -> bool { + fn is_from_space(&self) -> bool { self.from_space.load(Ordering::SeqCst) } @@ -193,7 +193,7 @@ impl CopySpace { ) -> ObjectReference { trace!("copyspace.trace_object(, {:?}, {:?})", object, semantics,); debug_assert!( - self.from_space(), + self.is_from_space(), "Trace object called for object ({:?}) in to-space", object ); diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 1137bea6e4..0059beb239 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -153,7 +153,7 @@ impl LargeObjectSpace { pub fn prepare(&mut self, full_heap: bool) { if full_heap { - debug_assert!(self.treadmill.from_space_empty()); + debug_assert!(self.treadmill.is_from_space_empty()); self.mark_state = MARK_BIT - self.mark_state; } self.treadmill.flip(full_heap); @@ -162,7 +162,7 @@ impl LargeObjectSpace { pub fn release(&mut self, full_heap: bool) { self.sweep_large_pages(true); - debug_assert!(self.treadmill.nursery_empty()); + debug_assert!(self.treadmill.is_nursery_empty()); if full_heap { self.sweep_large_pages(false); } diff --git a/src/policy/space.rs b/src/policy/space.rs index ed32ecfce5..a92fdb75b8 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -578,14 +578,13 @@ impl CommonSpace { extent ); - let start: Address; - if let VMRequest::Fixed { start: _start, .. } = vmrequest { - start = _start; + let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest { + _start } else { // FIXME //if (HeapLayout.vmMap.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name); - start = heap.reserve(extent, top); - } + heap.reserve(extent, top) + }; assert!( start == chunk_align_up(start), "{} starting on non-aligned boundary: {}", diff --git a/src/scheduler/stat.rs b/src/scheduler/stat.rs index fa0c27c7de..fc6ef8256d 100644 --- a/src/scheduler/stat.rs +++ b/src/scheduler/stat.rs @@ -34,7 +34,7 @@ impl SchedulerStat { /// Extract the work-packet name from the full type name. /// i.e. simplifies `crate::scheduler::gc_work::SomeWorkPacket` to `SomeWorkPacket`. fn work_name(&self, name: &str) -> String { - let end_index = name.find('<').unwrap_or_else(|| name.len()); + let end_index = name.find('<').unwrap_or(name.len()); let name = name[..end_index].to_owned(); match name.rfind(':') { Some(start_index) => name[(start_index + 1)..end_index].to_owned(), diff --git a/src/util/analysis/obj_size.rs b/src/util/analysis/obj_size.rs index 2be0713417..6ad2b1715c 100644 --- a/src/util/analysis/obj_size.rs +++ b/src/util/analysis/obj_size.rs @@ -58,7 +58,7 @@ impl RtAnalysis for PerSizeClassObjectCounter { match c { None => { // Create (and increment) the counter associated with the size class if it doesn't exist - let ctr = new_ctr!(stats, size_classes, &size_class); + let ctr = new_ctr!(stats, size_classes, size_class); ctr.lock().unwrap().inc(); } Some(ctr) => { diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index 231879b8ca..334ce646ef 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -271,11 +271,10 @@ impl FreeListPageResource { self.free_list.set_uncoalescable(region_start as _); self.free_list.set_uncoalescable(region_end as i32 + 1); for p in (region_start..region_end).step_by(PAGES_IN_CHUNK) { - let liberated; if p != region_start { self.free_list.clear_uncoalescable(p as _); } - liberated = self.free_list.free(p as _, true); // add chunk to our free list + let liberated = self.free_list.free(p as _, true); // add chunk to our free list debug_assert!(liberated as usize == PAGES_IN_CHUNK + (p - region_start)); if self.meta_data_pages_per_region > 1 { let meta_data_pages_per_region = self.meta_data_pages_per_region; diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index af54cb4099..e6d37f5a82 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -580,13 +580,13 @@ pub fn fetch_add_atomic( (old_val & mask) as usize } else if bits_num_log == 3 { - unsafe { (&*meta_addr.to_ptr::()).fetch_add(val as u8, order) as usize } + unsafe { (*meta_addr.to_ptr::()).fetch_add(val as u8, order) as usize } } else if bits_num_log == 4 { - unsafe { (&*meta_addr.to_ptr::()).fetch_add(val as u16, order) as usize } + unsafe { (*meta_addr.to_ptr::()).fetch_add(val as u16, order) as usize } } else if bits_num_log == 5 { - unsafe { (&*meta_addr.to_ptr::()).fetch_add(val as u32, order) as usize } + unsafe { (*meta_addr.to_ptr::()).fetch_add(val as u32, order) as usize } } else if bits_num_log == 6 { - unsafe { (&*meta_addr.to_ptr::()).fetch_add(val, order) } + unsafe { (*meta_addr.to_ptr::()).fetch_add(val, order) } } else { unreachable!( "side metadata > {}-bits is not supported!", @@ -639,13 +639,13 @@ pub fn fetch_sub_atomic( (old_val & mask) as usize } else if bits_num_log == 3 { - unsafe { (&*meta_addr.to_ptr::()).fetch_sub(val as u8, order) as usize } + unsafe { (*meta_addr.to_ptr::()).fetch_sub(val as u8, order) as usize } } else if bits_num_log == 4 { - unsafe { (&*meta_addr.to_ptr::()).fetch_sub(val as u16, order) as usize } + unsafe { (*meta_addr.to_ptr::()).fetch_sub(val as u16, order) as usize } } else if bits_num_log == 5 { - unsafe { (&*meta_addr.to_ptr::()).fetch_sub(val as u32, order) as usize } + unsafe { (*meta_addr.to_ptr::()).fetch_sub(val as u32, order) as usize } } else if bits_num_log == 6 { - unsafe { (&*meta_addr.to_ptr::()).fetch_sub(val, order) } + unsafe { (*meta_addr.to_ptr::()).fetch_sub(val, order) } } else { unreachable!( "side metadata > {}-bits is not supported!", diff --git a/src/util/options.rs b/src/util/options.rs index b49c4cd654..2463b6ecaa 100644 --- a/src/util/options.rs +++ b/src/util/options.rs @@ -102,7 +102,7 @@ impl UnsafeOptionsWrapper { /// This method is not thread safe, as internally it acquires a mutable reference to self. /// It is supposed to be used by one thread during boot time. pub unsafe fn process(&self, name: &str, value: &str) -> bool { - (&mut *self.0.get()).set_from_command_line(name, value) + (*self.0.get()).set_from_command_line(name, value) } /// Bulk process options. Returns true if all the options are processed successfully. diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 46aa8a681e..17d279e15e 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -172,7 +172,7 @@ impl ReferenceProcessor { // TODO: We may need to rework on this to remove the unsafety. #[allow(clippy::mut_from_ref)] unsafe fn sync_mut(&self) -> &mut ReferenceProcessorSync { - (&mut *self.sync.get()).get_mut().unwrap() + (*self.sync.get()).get_mut().unwrap() } pub fn clear(&self) { diff --git a/src/util/rust_util.rs b/src/util/rust_util.rs index 065eabb86f..8f75123a6f 100644 --- a/src/util/rust_util.rs +++ b/src/util/rust_util.rs @@ -48,7 +48,7 @@ impl InitializeOnce { pub fn get_ref(&self) -> &T { // We only assert in debug builds. debug_assert!(self.once.is_completed()); - unsafe { (&*self.v.get()).assume_init_ref() } + unsafe { (*self.v.get()).assume_init_ref() } } } diff --git a/src/util/treadmill.rs b/src/util/treadmill.rs index 3573c917ec..aa7ce94919 100644 --- a/src/util/treadmill.rs +++ b/src/util/treadmill.rs @@ -81,15 +81,15 @@ impl TreadMill { self.to_space.lock().unwrap().insert(cell); } - pub fn to_space_empty(&self) -> bool { + pub fn is_to_space_empty(&self) -> bool { self.to_space.lock().unwrap().is_empty() } - pub fn from_space_empty(&self) -> bool { + pub fn is_from_space_empty(&self) -> bool { self.from_space.lock().unwrap().is_empty() } - pub fn nursery_empty(&self) -> bool { + pub fn is_nursery_empty(&self) -> bool { self.collect_nursery.lock().unwrap().is_empty() }