diff --git a/docs/tutorial/code/mygc_semispace/global.rs b/docs/tutorial/code/mygc_semispace/global.rs index c10693932f..12ffcfc1b5 100644 --- a/docs/tutorial/code/mygc_semispace/global.rs +++ b/docs/tutorial/code/mygc_semispace/global.rs @@ -136,17 +136,17 @@ impl Plan for MyGC { // Modify // ANCHOR: plan_get_collection_reserve - fn get_collection_reserve(&self) -> usize { + fn get_collection_reserved_pages(&self) -> usize { self.tospace().reserved_pages() } // ANCHOR_END: plan_get_collection_reserve // Modify - // ANCHOR: plan_get_pages_used - fn get_pages_used(&self) -> usize { - self.tospace().reserved_pages() + self.common.get_pages_used() + // ANCHOR: plan_get_used_pages + fn get_used_pages(&self) -> usize { + self.tospace().reserved_pages() + self.common.get_used_pages() } - // ANCHOR_END: plan_get_pages_used + // ANCHOR_END: plan_get_used_pages // Modify // ANCHOR: plan_base diff --git a/docs/tutorial/src/mygc/ss/alloc.md b/docs/tutorial/src/mygc/ss/alloc.md index 7a16e6ed34..ae0a43ab03 100644 --- a/docs/tutorial/src/mygc/ss/alloc.md +++ b/docs/tutorial/src/mygc/ss/alloc.md @@ -147,7 +147,7 @@ correctly count the pages contained in the tospace and the common plan spaces (which will be explained later). ```rust -{{#include ../../../code/mygc_semispace/global.rs:plan_get_pages_used}} +{{#include ../../../code/mygc_semispace/global.rs:plan_get_used_pages}} ``` Add and override the following helper function: diff --git a/src/memory_manager.rs b/src/memory_manager.rs index e77a2ab002..6c3c7c8d67 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -264,7 +264,7 @@ pub fn process_bulk(mmtk: &'static MMTK, options: &str) -> bo /// Arguments: /// * `mmtk`: A reference to an MMTk instance. pub fn used_bytes(mmtk: &MMTK) -> usize { - mmtk.plan.get_pages_used() << LOG_BYTES_IN_PAGE + mmtk.plan.get_used_pages() << LOG_BYTES_IN_PAGE } /// Return free memory in bytes. diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index 36c82aacde..628c0bf0e3 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -122,18 +122,21 @@ impl Plan for GenCopy { .set_next_gc_full_heap(Gen::should_next_gc_be_full_heap(self)); } - fn get_collection_reserve(&self) -> usize { - self.gen.get_collection_reserve() + self.tospace().reserved_pages() + fn get_collection_reserved_pages(&self) -> usize { + self.gen.get_collection_reserved_pages() + self.tospace().reserved_pages() } - fn get_pages_used(&self) -> usize { - self.gen.get_pages_used() + self.tospace().reserved_pages() + fn get_used_pages(&self) -> usize { + self.gen.get_used_pages() + self.tospace().reserved_pages() } /// Return the number of pages avilable for allocation. Assuming all future allocations goes to nursery. - fn get_pages_avail(&self) -> usize { + fn get_available_pages(&self) -> usize { // super.get_pages_avail() / 2 to reserve pages for copying - (self.get_total_pages() - self.get_pages_reserved()) >> 1 + (self + .get_total_pages() + .saturating_sub(self.get_reserved_pages())) + >> 1 } fn base(&self) -> &BasePlan { @@ -216,7 +219,7 @@ impl GenCopy { fn request_full_heap_collection(&self) -> bool { self.gen - .request_full_heap_collection(self.get_total_pages(), self.get_pages_reserved()) + .request_full_heap_collection(self.get_total_pages(), self.get_reserved_pages()) } pub fn tospace(&self) -> &CopySpace { diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 6146537274..492ef8e5fb 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -209,7 +209,8 @@ impl Gen { /// Check a plan to see if the next GC should be a full heap GC. pub fn should_next_gc_be_full_heap(plan: &dyn Plan) -> bool { - plan.get_pages_avail() < conversions::bytes_to_pages_up(*plan.base().options.min_nursery) + plan.get_available_pages() + < conversions::bytes_to_pages_up(*plan.base().options.min_nursery) } /// Set next_gc_full_heap to the given value. @@ -220,13 +221,13 @@ impl Gen { /// Get pages reserved for the collection by a generational plan. A generational plan should /// add their own reservatioin with the value returned by this method. - pub fn get_collection_reserve(&self) -> usize { + pub fn get_collection_reserved_pages(&self) -> usize { self.nursery.reserved_pages() } /// Get pages used by a generational plan. A generational plan should add their own used pages /// with the value returned by this method. - pub fn get_pages_used(&self) -> usize { - self.nursery.reserved_pages() + self.common.get_pages_used() + pub fn get_used_pages(&self) -> usize { + self.nursery.reserved_pages() + self.common.get_used_pages() } } diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index f55624fd5d..63c5ca8b34 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -165,18 +165,21 @@ impl Plan for GenImmix { .store(full_heap, Ordering::Relaxed); } - fn get_collection_reserve(&self) -> usize { - self.gen.get_collection_reserve() + self.immix.defrag_headroom_pages() + fn get_collection_reserved_pages(&self) -> usize { + self.gen.get_collection_reserved_pages() + self.immix.defrag_headroom_pages() } - fn get_pages_used(&self) -> usize { - self.gen.get_pages_used() + self.immix.reserved_pages() + fn get_used_pages(&self) -> usize { + self.gen.get_used_pages() + self.immix.reserved_pages() } /// Return the number of pages avilable for allocation. Assuming all future allocations goes to nursery. - fn get_pages_avail(&self) -> usize { + fn get_available_pages(&self) -> usize { // super.get_pages_avail() / 2 to reserve pages for copying - (self.get_total_pages() - self.get_pages_reserved()) >> 1 + (self + .get_total_pages() + .saturating_sub(self.get_reserved_pages())) + >> 1 } fn base(&self) -> &BasePlan { @@ -248,6 +251,6 @@ impl GenImmix { fn request_full_heap_collection(&self) -> bool { self.gen - .request_full_heap_collection(self.get_total_pages(), self.get_pages_reserved()) + .request_full_heap_collection(self.get_total_pages(), self.get_reserved_pages()) } } diff --git a/src/plan/global.rs b/src/plan/global.rs index 3b8d2b4bd8..851de095d0 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -248,30 +248,54 @@ pub trait Plan: 'static + Sync + Downcast { */ fn collection_required(&self, space_full: bool, _space: &dyn Space) -> bool; - fn get_pages_reserved(&self) -> usize { - self.get_pages_used() + self.get_collection_reserve() + // Note: The following methods are about page accounting. The default implementation should + // work fine for non-copying plans. For copying plans, the plan should override any of these methods + // if necessary. + + /// Get the number of pages that are reserved, including used pages and pages that will + /// be used (e.g. for copying). + fn get_reserved_pages(&self) -> usize { + self.get_used_pages() + self.get_collection_reserved_pages() } + /// Get the total number of pages for the heap. fn get_total_pages(&self) -> usize { self.base().heap.get_total_pages() } - fn get_pages_avail(&self) -> usize { - self.get_total_pages() - self.get_pages_reserved() - } - - fn get_collection_reserve(&self) -> usize { + /// Get the number of pages that are still available for use. The available pages + /// should always be positive or 0. + fn get_available_pages(&self) -> usize { + // It is possible that the reserved pages is larger than the total pages so we are doing + // a saturating substraction to make sure we return a non-negative number. + // For example, + // 1. our GC trigger checks if reserved pages is more than total pages. + // 2. when the heap is almost full of live objects (such as in the case of an OOM) and we are doing a copying GC, it is possible + // the reserved pages is larger than total pages after the copying GC (the reserved pages after a GC + // may be larger than the reserved pages before a GC, as we may end up using more memory for thread local + // buffers for copy allocators). + self.get_total_pages() + .saturating_sub(self.get_reserved_pages()) + } + + /// Get the number of pages that are reserved for collection. By default, we return 0. + /// For copying plans, they need to override this and calculate required pages to complete + /// a copying GC. + fn get_collection_reserved_pages(&self) -> usize { 0 } - fn get_pages_used(&self) -> usize; + /// Get the number of pages that are used. + fn get_used_pages(&self) -> usize; - fn is_emergency_collection(&self) -> bool { - self.base().emergency_collection.load(Ordering::Relaxed) + /// Get the number of pages that are NOT used. This is clearly different from available pages. + /// Free pages are unused, but some of them may have been reserved for some reason. + fn get_free_pages(&self) -> usize { + self.get_total_pages() - self.get_used_pages() } - fn get_free_pages(&self) -> usize { - self.get_total_pages() - self.get_pages_used() + fn is_emergency_collection(&self) -> bool { + self.base().emergency_collection.load(Ordering::Relaxed) } /// The application code has requested a collection. @@ -537,7 +561,7 @@ impl BasePlan { } // Depends on what base spaces we use, unsync may be unused. - pub fn get_pages_used(&self) -> usize { + pub fn get_used_pages(&self) -> usize { // Depends on what base spaces we use, pages may be unchanged. #[allow(unused_mut)] let mut pages = 0; @@ -766,11 +790,13 @@ impl BasePlan { } debug!( - "self.get_pages_reserved()={}, self.get_total_pages()={}", - plan.get_pages_reserved(), + "self.get_reserved_pages()={}, self.get_total_pages()={}", + plan.get_reserved_pages(), plan.get_total_pages() ); - let heap_full = plan.get_pages_reserved() > plan.get_total_pages(); + // Check if we reserved more pages (including the collection copy reserve) + // than the heap's total pages. In that case, we will have to do a GC. + let heap_full = plan.get_reserved_pages() > plan.get_total_pages(); space_full || stress_force_gc || heap_full } @@ -849,8 +875,8 @@ impl CommonPlan { self.los.init(vm_map); } - pub fn get_pages_used(&self) -> usize { - self.immortal.reserved_pages() + self.los.reserved_pages() + self.base.get_pages_used() + pub fn get_used_pages(&self) -> usize { + self.immortal.reserved_pages() + self.los.reserved_pages() + self.base.get_used_pages() } pub fn trace_object( diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index c58af2bb30..71643c0065 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -110,12 +110,12 @@ impl Plan for Immix { .store(self.immix_space.release(true), Ordering::Relaxed); } - fn get_collection_reserve(&self) -> usize { + fn get_collection_reserved_pages(&self) -> usize { self.immix_space.defrag_headroom_pages() } - fn get_pages_used(&self) -> usize { - self.immix_space.reserved_pages() + self.common.get_pages_used() + fn get_used_pages(&self) -> usize { + self.immix_space.reserved_pages() + self.common.get_used_pages() } fn base(&self) -> &BasePlan { diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 5b7852b4c1..bbc45a4223 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -140,11 +140,11 @@ impl Plan for MarkCompact { self.base().collection_required(self, space_full, space) } - fn get_pages_used(&self) -> usize { - self.mc_space.reserved_pages() + self.common.get_pages_used() + fn get_used_pages(&self) -> usize { + self.mc_space.reserved_pages() + self.common.get_used_pages() } - fn get_collection_reserve(&self) -> usize { + fn get_collection_reserved_pages(&self) -> usize { 0 } } diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index e29a922a19..1bf8ef0611 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -71,12 +71,8 @@ impl Plan for MarkSweep { self.base().collection_required(self, space_full, space) } - fn get_collection_reserve(&self) -> usize { - 0 - } - - fn get_pages_used(&self) -> usize { - self.common.get_pages_used() + self.ms.reserved_pages() + fn get_used_pages(&self) -> usize { + self.common.get_used_pages() + self.ms.reserved_pages() } fn base(&self) -> &BasePlan { diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index 428d2f0306..74bd1efd33 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -72,11 +72,11 @@ impl Plan for NoGC { unreachable!("GC triggered in nogc") } - fn get_pages_used(&self) -> usize { + fn get_used_pages(&self) -> usize { self.nogc_space.reserved_pages() + self.immortal.reserved_pages() + self.los.reserved_pages() - + self.base.get_pages_used() + + self.base.get_used_pages() } fn handle_user_collection_request(&self, _tls: VMMutatorThread, _force: bool) { diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 96226c7ff3..08ecff151e 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -79,12 +79,8 @@ impl Plan for PageProtect { self.base().collection_required(self, space_full, space) } - fn get_collection_reserve(&self) -> usize { - 0 - } - - fn get_pages_used(&self) -> usize { - self.space.reserved_pages() + self.common.get_pages_used() + fn get_used_pages(&self) -> usize { + self.space.reserved_pages() + self.common.get_used_pages() } fn base(&self) -> &BasePlan { diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index ea3e37f8cb..dacd2fc836 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -105,12 +105,12 @@ impl Plan for SemiSpace { self.base().collection_required(self, space_full, space) } - fn get_collection_reserve(&self) -> usize { + fn get_collection_reserved_pages(&self) -> usize { self.tospace().reserved_pages() } - fn get_pages_used(&self) -> usize { - self.tospace().reserved_pages() + self.common.get_pages_used() + fn get_used_pages(&self) -> usize { + self.tospace().reserved_pages() + self.common.get_used_pages() } fn base(&self) -> &BasePlan { diff --git a/src/policy/immix/defrag.rs b/src/policy/immix/defrag.rs index 500f116c39..0d1313e1d1 100644 --- a/src/policy/immix/defrag.rs +++ b/src/policy/immix/defrag.rs @@ -111,7 +111,7 @@ impl Defrag { let mut available_clean_pages_for_defrag = VM::VMActivePlan::global().get_total_pages() as isize - - VM::VMActivePlan::global().get_pages_reserved() as isize + - VM::VMActivePlan::global().get_reserved_pages() as isize + self.defrag_headroom_pages(space) as isize; if available_clean_pages_for_defrag < 0 { available_clean_pages_for_defrag = 0 @@ -126,7 +126,7 @@ impl Defrag { self.available_clean_pages_for_defrag.store( available_clean_pages_for_defrag as usize - + VM::VMActivePlan::global().get_collection_reserve(), + + VM::VMActivePlan::global().get_collection_reserved_pages(), Ordering::Release, ); }