Skip to content

Remove cast ref to mut everywhere #893

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
Aug 30, 2023
2 changes: 1 addition & 1 deletion .github/scripts/ci-style.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
. $(dirname "$0")/ci-common.sh

export RUSTFLAGS="-D warnings"
export RUSTFLAGS="-D warnings -A unknown-lints"

# --- Check main crate ---

37 changes: 23 additions & 14 deletions src/memory_manager.rs
Original file line number Diff line number Diff line change
@@ -88,7 +88,12 @@ pub fn mmtk_init<VM: VMBinding>(builder: &MMTKBuilder) -> Box<MMTK<VM>> {

#[cfg(feature = "vm_space")]
pub fn lazy_init_vm_space<VM: VMBinding>(mmtk: &'static mut MMTK<VM>, start: Address, size: usize) {
mmtk.plan.base_mut().vm_space.lazy_initialize(start, size);
unsafe {
mmtk.get_plan_mut()
.base_mut()
.vm_space
.lazy_initialize(start, size);
}
}

/// Request MMTk to create a mutator for the given thread. The ownership
@@ -345,7 +350,7 @@ pub fn get_allocator_mapping<VM: VMBinding>(
mmtk: &MMTK<VM>,
semantics: AllocationSemantics,
) -> AllocatorSelector {
mmtk.plan.get_allocator_mapping()[semantics]
mmtk.get_plan().get_allocator_mapping()[semantics]
}

/// The standard malloc. MMTk either uses its own allocator, or forward the call to a
@@ -467,11 +472,14 @@ pub fn start_worker<VM: VMBinding>(
/// Collection::spawn_gc_thread() so that the VM knows the context.
pub fn initialize_collection<VM: VMBinding>(mmtk: &'static MMTK<VM>, tls: VMThread) {
assert!(
!mmtk.plan.is_initialized(),
!mmtk.get_plan().is_initialized(),
"MMTk collection has been initialized (was initialize_collection() already called before?)"
);
mmtk.scheduler.spawn_gc_threads(mmtk, tls);
mmtk.plan.base().initialized.store(true, Ordering::SeqCst);
mmtk.get_plan()
.base()
.initialized
.store(true, Ordering::SeqCst);
probe!(mmtk, collection_initialized);
}

@@ -483,10 +491,10 @@ pub fn initialize_collection<VM: VMBinding>(mmtk: &'static MMTK<VM>, tls: VMThre
/// * `mmtk`: A reference to an MMTk instance.
pub fn enable_collection<VM: VMBinding>(mmtk: &'static MMTK<VM>) {
debug_assert!(
!mmtk.plan.should_trigger_gc_when_heap_is_full(),
!mmtk.get_plan().should_trigger_gc_when_heap_is_full(),
"enable_collection() is called when GC is already enabled."
);
mmtk.plan
mmtk.get_plan()
.base()
.trigger_gc_when_heap_is_full
.store(true, Ordering::SeqCst);
@@ -504,10 +512,10 @@ pub fn enable_collection<VM: VMBinding>(mmtk: &'static MMTK<VM>) {
/// * `mmtk`: A reference to an MMTk instance.
pub fn disable_collection<VM: VMBinding>(mmtk: &'static MMTK<VM>) {
debug_assert!(
mmtk.plan.should_trigger_gc_when_heap_is_full(),
mmtk.get_plan().should_trigger_gc_when_heap_is_full(),
"disable_collection() is called when GC is not enabled."
);
mmtk.plan
mmtk.get_plan()
.base()
.trigger_gc_when_heap_is_full
.store(false, Ordering::SeqCst);
@@ -538,7 +546,7 @@ pub fn process_bulk(builder: &mut MMTKBuilder, options: &str) -> bool {
/// Arguments:
/// * `mmtk`: A reference to an MMTk instance.
pub fn used_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
mmtk.plan.get_used_pages() << LOG_BYTES_IN_PAGE
mmtk.get_plan().get_used_pages() << LOG_BYTES_IN_PAGE
}

/// Return free memory in bytes. MMTk accounts for memory in pages, thus this method always returns a value in
@@ -547,7 +555,7 @@ pub fn used_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
/// Arguments:
/// * `mmtk`: A reference to an MMTk instance.
pub fn free_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
mmtk.plan.get_free_pages() << LOG_BYTES_IN_PAGE
mmtk.get_plan().get_free_pages() << LOG_BYTES_IN_PAGE
}

/// Return the size of all the live objects in bytes in the last GC. MMTk usually accounts for memory in pages.
@@ -558,7 +566,7 @@ pub fn free_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
/// to call this method is at the end of a GC (e.g. when the runtime is about to resume threads).
#[cfg(feature = "count_live_bytes_in_gc")]
pub fn live_bytes_in_last_gc<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
mmtk.plan
mmtk.get_plan()
.base()
.live_bytes_in_last_gc
.load(Ordering::SeqCst)
@@ -581,7 +589,7 @@ pub fn last_heap_address() -> Address {
/// Arguments:
/// * `mmtk`: A reference to an MMTk instance.
pub fn total_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
mmtk.plan.get_total_pages() << LOG_BYTES_IN_PAGE
mmtk.get_plan().get_total_pages() << LOG_BYTES_IN_PAGE
}

/// Trigger a garbage collection as requested by the user.
@@ -590,7 +598,8 @@ pub fn total_bytes<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
/// * `mmtk`: A reference to an MMTk instance.
/// * `tls`: The thread that triggers this collection request.
pub fn handle_user_collection_request<VM: VMBinding>(mmtk: &MMTK<VM>, tls: VMMutatorThread) {
mmtk.plan.handle_user_collection_request(tls, false, false);
mmtk.get_plan()
.handle_user_collection_request(tls, false, false);
}

/// Is the object alive?
@@ -709,7 +718,7 @@ pub fn is_mapped_address(address: Address) -> bool {
/// * `mmtk`: A reference to an MMTk instance.
/// * `object`: The object to check.
pub fn modify_check<VM: VMBinding>(mmtk: &MMTK<VM>, object: ObjectReference) {
mmtk.plan.modify_check(object);
mmtk.get_plan().modify_check(object);
}

/// Add a reference to the list of weak references. A binding may
31 changes: 23 additions & 8 deletions src/mmtk.rs
Original file line number Diff line number Diff line change
@@ -15,6 +15,7 @@ use crate::util::reference_processor::ReferenceProcessors;
use crate::util::sanity::sanity_checker::SanityChecker;
use crate::vm::ReferenceGlue;
use crate::vm::VMBinding;
use std::cell::UnsafeCell;
use std::default::Default;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
@@ -30,10 +31,10 @@ lazy_static! {
// TODO: We should refactor this when we know more about how multiple MMTK instances work.

/// A global VMMap that manages the mapping of spaces to virtual memory ranges.
pub static ref VM_MAP: Box<dyn VMMap> = layout::create_vm_map();
pub static ref VM_MAP: Box<dyn VMMap + Send + Sync> = layout::create_vm_map();

/// A global Mmapper for mmaping and protection of virtual memory.
pub static ref MMAPPER: Box<dyn Mmapper> = layout::create_mmapper();
pub static ref MMAPPER: Box<dyn Mmapper + Send + Sync> = layout::create_mmapper();
}

use crate::util::rust_util::InitializeOnce;
@@ -88,7 +89,7 @@ impl Default for MMTKBuilder {
/// *Note that multi-instances is not fully supported yet*
pub struct MMTK<VM: VMBinding> {
pub(crate) options: Arc<Options>,
pub(crate) plan: Box<dyn Plan<VM = VM>>,
pub(crate) plan: UnsafeCell<Box<dyn Plan<VM = VM>>>,
pub(crate) reference_processors: ReferenceProcessors,
pub(crate) finalizable_processor:
Mutex<FinalizableProcessor<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType>>,
@@ -100,6 +101,9 @@ pub struct MMTK<VM: VMBinding> {
inside_harness: AtomicBool,
}

unsafe impl<VM: VMBinding> Sync for MMTK<VM> {}
unsafe impl<VM: VMBinding> Send for MMTK<VM> {}

impl<VM: VMBinding> MMTK<VM> {
pub fn new(options: Arc<Options>) -> Self {
// Initialize SFT first in case we need to use this in the constructor.
@@ -136,7 +140,7 @@ impl<VM: VMBinding> MMTK<VM> {

MMTK {
options,
plan,
plan: UnsafeCell::new(plan),
reference_processors: ReferenceProcessors::new(),
finalizable_processor: Mutex::new(FinalizableProcessor::<
<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType,
@@ -152,20 +156,31 @@ impl<VM: VMBinding> MMTK<VM> {

pub fn harness_begin(&self, tls: VMMutatorThread) {
probe!(mmtk, harness_begin);
self.plan.handle_user_collection_request(tls, true, true);
self.get_plan()
.handle_user_collection_request(tls, true, true);
self.inside_harness.store(true, Ordering::SeqCst);
self.plan.base().stats.start_all();
self.get_plan().base().stats.start_all();
self.scheduler.enable_stat();
}

pub fn harness_end(&'static self) {
self.plan.base().stats.stop_all(self);
self.get_plan().base().stats.stop_all(self);
self.inside_harness.store(false, Ordering::SeqCst);
probe!(mmtk, harness_end);
}

pub fn get_plan(&self) -> &dyn Plan<VM = VM> {
self.plan.as_ref()
unsafe { &**(self.plan.get()) }
}

/// Get the plan as mutable reference.
///
/// # Safety
///
/// This is unsafe because the caller must ensure that the plan is not used by other threads.
#[allow(clippy::mut_from_ref)]
pub unsafe fn get_plan_mut(&self) -> &mut dyn Plan<VM = VM> {
&mut **(self.plan.get())
}

pub fn get_options(&self) -> &Options {
9 changes: 6 additions & 3 deletions src/plan/generational/copying/mutator.rs
Original file line number Diff line number Diff line change
@@ -32,16 +32,19 @@ pub fn create_gencopy_mutator<VM: VMBinding>(
mutator_tls: VMMutatorThread,
mmtk: &'static MMTK<VM>,
) -> Mutator<VM> {
let gencopy = mmtk.plan.downcast_ref::<GenCopy<VM>>().unwrap();
let gencopy = mmtk.get_plan().downcast_ref::<GenCopy<VM>>().unwrap();
let config = MutatorConfig {
allocator_mapping: &ALLOCATOR_MAPPING,
space_mapping: Box::new(create_gen_space_mapping(&*mmtk.plan, &gencopy.gen.nursery)),
space_mapping: Box::new(create_gen_space_mapping(
mmtk.get_plan(),
&gencopy.gen.nursery,
)),
prepare_func: &gencopy_mutator_prepare,
release_func: &gencopy_mutator_release,
};

Mutator {
allocators: Allocators::<VM>::new(mutator_tls, &*mmtk.plan, &config.space_mapping),
allocators: Allocators::<VM>::new(mutator_tls, mmtk.get_plan(), &config.space_mapping),
barrier: Box::new(ObjectBarrier::new(GenObjectBarrierSemantics::new(
mmtk, gencopy,
))),
14 changes: 12 additions & 2 deletions src/plan/generational/gc_work.rs
Original file line number Diff line number Diff line change
@@ -102,7 +102,12 @@ impl<E: ProcessEdgesWork> GCWork<E::VM> for ProcessModBuf<E> {
);
}
// scan modbuf only if the current GC is a nursery GC
if mmtk.plan.generational().unwrap().is_current_gc_nursery() {
if mmtk
.get_plan()
.generational()
.unwrap()
.is_current_gc_nursery()
{
// Scan objects in the modbuf and forward pointers
let modbuf = std::mem::take(&mut self.modbuf);
GCWork::do_work(
@@ -135,7 +140,12 @@ impl<E: ProcessEdgesWork> ProcessRegionModBuf<E> {
impl<E: ProcessEdgesWork> GCWork<E::VM> for ProcessRegionModBuf<E> {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
// Scan modbuf only if the current GC is a nursery GC
if mmtk.plan.generational().unwrap().is_current_gc_nursery() {
if mmtk
.get_plan()
.generational()
.unwrap()
.is_current_gc_nursery()
{
// Collect all the entries in all the slices
let mut edges = vec![];
for slice in &self.modbuf {
9 changes: 6 additions & 3 deletions src/plan/generational/immix/mutator.rs
Original file line number Diff line number Diff line change
@@ -30,16 +30,19 @@ pub fn create_genimmix_mutator<VM: VMBinding>(
mutator_tls: VMMutatorThread,
mmtk: &'static MMTK<VM>,
) -> Mutator<VM> {
let genimmix = mmtk.plan.downcast_ref::<GenImmix<VM>>().unwrap();
let genimmix = mmtk.get_plan().downcast_ref::<GenImmix<VM>>().unwrap();
let config = MutatorConfig {
allocator_mapping: &ALLOCATOR_MAPPING,
space_mapping: Box::new(create_gen_space_mapping(&*mmtk.plan, &genimmix.gen.nursery)),
space_mapping: Box::new(create_gen_space_mapping(
mmtk.get_plan(),
&genimmix.gen.nursery,
)),
prepare_func: &genimmix_mutator_prepare,
release_func: &genimmix_mutator_release,
};

Mutator {
allocators: Allocators::<VM>::new(mutator_tls, &*mmtk.plan, &config.space_mapping),
allocators: Allocators::<VM>::new(mutator_tls, mmtk.get_plan(), &config.space_mapping),
barrier: Box::new(ObjectBarrier::new(GenObjectBarrierSemantics::new(
mmtk, genimmix,
))),
17 changes: 10 additions & 7 deletions src/plan/global.rs
Original file line number Diff line number Diff line change
@@ -40,9 +40,9 @@ pub fn create_mutator<VM: VMBinding>(
mmtk: &'static MMTK<VM>,
) -> Box<Mutator<VM>> {
Box::new(match *mmtk.options.plan {
PlanSelector::NoGC => crate::plan::nogc::mutator::create_nogc_mutator(tls, &*mmtk.plan),
PlanSelector::NoGC => crate::plan::nogc::mutator::create_nogc_mutator(tls, mmtk.get_plan()),
PlanSelector::SemiSpace => {
crate::plan::semispace::mutator::create_ss_mutator(tls, &*mmtk.plan)
crate::plan::semispace::mutator::create_ss_mutator(tls, mmtk.get_plan())
}
PlanSelector::GenCopy => {
crate::plan::generational::copying::mutator::create_gencopy_mutator(tls, mmtk)
@@ -51,14 +51,16 @@ pub fn create_mutator<VM: VMBinding>(
crate::plan::generational::immix::mutator::create_genimmix_mutator(tls, mmtk)
}
PlanSelector::MarkSweep => {
crate::plan::marksweep::mutator::create_ms_mutator(tls, &*mmtk.plan)
crate::plan::marksweep::mutator::create_ms_mutator(tls, mmtk.get_plan())
}
PlanSelector::Immix => {
crate::plan::immix::mutator::create_immix_mutator(tls, mmtk.get_plan())
}
PlanSelector::Immix => crate::plan::immix::mutator::create_immix_mutator(tls, &*mmtk.plan),
PlanSelector::PageProtect => {
crate::plan::pageprotect::mutator::create_pp_mutator(tls, &*mmtk.plan)
crate::plan::pageprotect::mutator::create_pp_mutator(tls, mmtk.get_plan())
}
PlanSelector::MarkCompact => {
crate::plan::markcompact::mutator::create_markcompact_mutator(tls, &*mmtk.plan)
crate::plan::markcompact::mutator::create_markcompact_mutator(tls, mmtk.get_plan())
}
PlanSelector::StickyImmix => {
crate::plan::sticky::immix::mutator::create_stickyimmix_mutator(tls, mmtk)
@@ -137,7 +139,7 @@ pub fn create_gc_worker_context<VM: VMBinding>(
tls: VMWorkerThread,
mmtk: &'static MMTK<VM>,
) -> GCWorkerCopyContext<VM> {
GCWorkerCopyContext::<VM>::new(tls, &*mmtk.plan, mmtk.plan.create_copy_config())
GCWorkerCopyContext::<VM>::new(tls, mmtk.get_plan(), mmtk.get_plan().create_copy_config())
}

/// A plan describes the global core functionality for all memory management schemes.
@@ -857,6 +859,7 @@ impl<VM: VMBinding> BasePlan<VM> {
}

#[allow(unused_variables)] // depending on the enabled features, base may not be used.
#[allow(clippy::needless_pass_by_ref_mut)] // depending on the enabled features, base may not be used.
pub(crate) fn verify_side_metadata_sanity(
&self,
side_metadata_sanity_checker: &mut SideMetadataSanity,
2 changes: 1 addition & 1 deletion src/plan/markcompact/gc_work.rs
Original file line number Diff line number Diff line change
@@ -39,7 +39,7 @@ impl<VM: VMBinding> GCWork<VM> for UpdateReferences<VM> {
fn do_work(&mut self, _worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
// The following needs to be done right before the second round of root scanning
VM::VMScanning::prepare_for_roots_re_scanning();
mmtk.plan.base().prepare_for_stack_scanning();
mmtk.get_plan().base().prepare_for_stack_scanning();
#[cfg(feature = "extreme_assertions")]
mmtk.edge_logger.reset();

8 changes: 4 additions & 4 deletions src/plan/sticky/immix/mutator.rs
Original file line number Diff line number Diff line change
@@ -24,12 +24,12 @@ pub fn create_stickyimmix_mutator<VM: VMBinding>(
mutator_tls: VMMutatorThread,
mmtk: &'static MMTK<VM>,
) -> Mutator<VM> {
let stickyimmix = mmtk.plan.downcast_ref::<StickyImmix<VM>>().unwrap();
let stickyimmix = mmtk.get_plan().downcast_ref::<StickyImmix<VM>>().unwrap();
let config = MutatorConfig {
allocator_mapping: &ALLOCATOR_MAPPING,
space_mapping: Box::new({
let mut vec =
create_space_mapping(immix::mutator::RESERVED_ALLOCATORS, true, &*mmtk.plan);
create_space_mapping(immix::mutator::RESERVED_ALLOCATORS, true, mmtk.get_plan());
vec.push((AllocatorSelector::Immix(0), stickyimmix.get_immix_space()));
vec
}),
@@ -38,13 +38,13 @@ pub fn create_stickyimmix_mutator<VM: VMBinding>(
};

Mutator {
allocators: Allocators::<VM>::new(mutator_tls, &*mmtk.plan, &config.space_mapping),
allocators: Allocators::<VM>::new(mutator_tls, mmtk.get_plan(), &config.space_mapping),
barrier: Box::new(ObjectBarrier::new(GenObjectBarrierSemantics::new(
mmtk,
stickyimmix,
))),
mutator_tls,
config,
plan: &*mmtk.plan,
plan: mmtk.get_plan(),
}
}
2 changes: 1 addition & 1 deletion src/policy/immix/immixspace.rs
Original file line number Diff line number Diff line change
@@ -195,7 +195,7 @@ impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for ImmixSpace
if Block::containing::<VM>(object).is_defrag_source() {
debug_assert!(self.in_defrag());
debug_assert!(
!crate::plan::is_nursery_gc(&*worker.mmtk.plan),
!crate::plan::is_nursery_gc(worker.mmtk.get_plan()),
"Calling PolicyTraceObject on Immix in nursery GC"
);
self.trace_object_with_opportunistic_copy(
7 changes: 6 additions & 1 deletion src/scheduler/controller.rs
Original file line number Diff line number Diff line change
@@ -130,7 +130,12 @@ impl<VM: VMBinding> GCController<VM> {
self.scheduler.deactivate_all();

// Tell GC trigger that GC ended - this happens before EndOfGC where we resume mutators.
self.mmtk.plan.base().gc_trigger.policy.on_gc_end(self.mmtk);
self.mmtk
.get_plan()
.base()
.gc_trigger
.policy
.on_gc_end(self.mmtk);

// Finalization: Resume mutators, reset gc states
// Note: Resume-mutators must happen after all work buckets are closed.
61 changes: 32 additions & 29 deletions src/scheduler/gc_work.rs
Original file line number Diff line number Diff line change
@@ -14,14 +14,14 @@ pub struct ScheduleCollection;

impl<VM: VMBinding> GCWork<VM> for ScheduleCollection {
fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
mmtk.plan.schedule_collection(worker.scheduler());
mmtk.get_plan().schedule_collection(worker.scheduler());

// Tell GC trigger that GC started.
// We now know what kind of GC this is (e.g. nursery vs mature in gen copy, defrag vs fast in Immix)
// TODO: Depending on the OS scheduling, other workers can run so fast that they can finish
// everything in the `Unconstrained` and the `Prepare` buckets before we execute the next
// statement. Consider if there is a better place to call `on_gc_start`.
mmtk.plan.base().gc_trigger.policy.on_gc_start(mmtk);
mmtk.get_plan().base().gc_trigger.policy.on_gc_start(mmtk);
}
}

@@ -33,11 +33,13 @@ impl<VM: VMBinding> GCWork<VM> for ScheduleCollection {
/// be no other concurrent work packet that accesses plan (read or write). Otherwise, there may
/// be a race condition.
pub struct Prepare<C: GCWorkContext> {
pub plan: &'static C::PlanType,
pub plan: *const C::PlanType,
}

unsafe impl<C: GCWorkContext> Send for Prepare<C> {}

impl<C: GCWorkContext> Prepare<C> {
pub fn new(plan: &'static C::PlanType) -> Self {
pub fn new(plan: *const C::PlanType) -> Self {
Self { plan }
}
}
@@ -46,7 +48,6 @@ impl<C: GCWorkContext + 'static> GCWork<C::VM> for Prepare<C> {
fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
trace!("Prepare Global");
// We assume this is the only running work packet that accesses plan at the point of execution
#[allow(clippy::cast_ref_to_mut)]
let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
plan_mut.prepare(worker.tls);

@@ -89,7 +90,7 @@ impl<VM: VMBinding> GCWork<VM> for PrepareCollector {
fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
trace!("Prepare Collector");
worker.get_copy_context_mut().prepare();
mmtk.plan.prepare_worker(worker);
mmtk.get_plan().prepare_worker(worker);
}
}

@@ -101,23 +102,26 @@ impl<VM: VMBinding> GCWork<VM> for PrepareCollector {
/// be no other concurrent work packet that accesses plan (read or write). Otherwise, there may
/// be a race condition.
pub struct Release<C: GCWorkContext> {
pub plan: &'static C::PlanType,
pub plan: *const C::PlanType,
}

impl<C: GCWorkContext> Release<C> {
pub fn new(plan: &'static C::PlanType) -> Self {
pub fn new(plan: *const C::PlanType) -> Self {
Self { plan }
}
}

unsafe impl<C: GCWorkContext> Send for Release<C> {}

impl<C: GCWorkContext + 'static> GCWork<C::VM> for Release<C> {
fn do_work(&mut self, worker: &mut GCWorker<C::VM>, mmtk: &'static MMTK<C::VM>) {
trace!("Release Global");

self.plan.base().gc_trigger.policy.on_gc_release(mmtk);

unsafe {
(*self.plan).base().gc_trigger.policy.on_gc_release(mmtk);
}
// We assume this is the only running work packet that accesses plan at the point of execution
#[allow(clippy::cast_ref_to_mut)]

let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) };
plan_mut.release(worker.tls);

@@ -136,7 +140,7 @@ impl<C: GCWorkContext + 'static> GCWork<C::VM> for Release<C> {
.scheduler
.worker_group
.get_and_clear_worker_live_bytes();
self.plan
mmtk.get_plan()
.base()
.live_bytes_in_last_gc
.store(live_bytes, std::sync::atomic::Ordering::SeqCst);
@@ -190,7 +194,7 @@ impl<ScanEdges: ProcessEdgesWork> StopMutators<ScanEdges> {
impl<E: ProcessEdgesWork> GCWork<E::VM> for StopMutators<E> {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
trace!("stop_all_mutators start");
mmtk.plan.base().prepare_for_stack_scanning();
mmtk.get_plan().base().prepare_for_stack_scanning();
<E::VM as VMBinding>::VMCollection::stop_all_mutators(worker.tls, |mutator| {
// TODO: The stack scanning work won't start immediately, as the `Prepare` bucket is not opened yet (the bucket is opened in notify_mutators_paused).
// Should we push to Unconstrained instead?
@@ -212,20 +216,20 @@ impl<VM: VMBinding> GCWork<VM> for EndOfGC {
fn do_work(&mut self, worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
info!(
"End of GC ({}/{} pages, took {} ms)",
mmtk.plan.get_reserved_pages(),
mmtk.plan.get_total_pages(),
mmtk.get_plan().get_reserved_pages(),
mmtk.get_plan().get_total_pages(),
self.elapsed.as_millis()
);

#[cfg(feature = "count_live_bytes_in_gc")]
{
let live_bytes = mmtk
.plan
.get_plan()
.base()
.live_bytes_in_last_gc
.load(std::sync::atomic::Ordering::SeqCst);
let used_bytes =
mmtk.plan.get_used_pages() << crate::util::constants::LOG_BYTES_IN_PAGE;
mmtk.get_plan().get_used_pages() << crate::util::constants::LOG_BYTES_IN_PAGE;
debug_assert!(
live_bytes <= used_bytes,
"Live bytes of all live objects ({} bytes) is larger than used pages ({} bytes), something is wrong.",
@@ -235,25 +239,24 @@ impl<VM: VMBinding> GCWork<VM> for EndOfGC {
"Live objects = {} bytes ({:04.1}% of {} used pages)",
live_bytes,
live_bytes as f64 * 100.0 / used_bytes as f64,
mmtk.plan.get_used_pages()
mmtk.get_plan().get_used_pages()
);
}

// We assume this is the only running work packet that accesses plan at the point of execution
#[allow(clippy::cast_ref_to_mut)]
let plan_mut: &mut dyn Plan<VM = VM> = unsafe { &mut *(&*mmtk.plan as *const _ as *mut _) };
let plan_mut: &mut dyn Plan<VM = VM> = unsafe { mmtk.get_plan_mut() };
plan_mut.end_of_gc(worker.tls);

#[cfg(feature = "extreme_assertions")]
if crate::util::edge_logger::should_check_duplicate_edges(&*mmtk.plan) {
if crate::util::edge_logger::should_check_duplicate_edges(mmtk.get_plan()) {
// reset the logging info at the end of each GC
mmtk.edge_logger.reset();
}

mmtk.plan.base().set_gc_status(GcStatus::NotInGC);
mmtk.get_plan().base().set_gc_status(GcStatus::NotInGC);

// Reset the triggering information.
mmtk.plan.base().reset_collection_trigger();
mmtk.get_plan().base().reset_collection_trigger();

<VM as VMBinding>::VMCollection::resume_mutators(worker.tls);
}
@@ -448,7 +451,7 @@ pub struct ScanMutatorRoots<Edges: ProcessEdgesWork>(pub &'static mut Mutator<Ed
impl<E: ProcessEdgesWork> GCWork<E::VM> for ScanMutatorRoots<E> {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
trace!("ScanMutatorRoots for mutator {:?}", self.0.get_tls());
let base = &mmtk.plan.base();
let base = mmtk.get_plan().base();
let mutators = <E::VM as VMBinding>::VMActivePlan::number_of_mutators();
let factory = ProcessEdgesWorkRootsWorkFactory::<E>::new(mmtk);
<E::VM as VMBinding>::VMScanning::scan_roots_in_mutator_thread(
@@ -458,7 +461,7 @@ impl<E: ProcessEdgesWork> GCWork<E::VM> for ScanMutatorRoots<E> {
);
self.0.flush();

if mmtk.plan.base().inform_stack_scanned(mutators) {
if mmtk.get_plan().base().inform_stack_scanned(mutators) {
<E::VM as VMBinding>::VMScanning::notify_initial_thread_scan_complete(
false, worker.tls,
);
@@ -501,7 +504,7 @@ impl<VM: VMBinding> ProcessEdgesBase<VM> {
// at creation. This avoids overhead for dynamic dispatch or downcasting plan for each object traced.
pub fn new(edges: Vec<VM::VMEdge>, roots: bool, mmtk: &'static MMTK<VM>) -> Self {
#[cfg(feature = "extreme_assertions")]
if crate::util::edge_logger::should_check_duplicate_edges(&*mmtk.plan) {
if crate::util::edge_logger::should_check_duplicate_edges(mmtk.get_plan()) {
for edge in &edges {
// log edge, panic if already logged
mmtk.edge_logger.log_edge(*edge);
@@ -528,7 +531,7 @@ impl<VM: VMBinding> ProcessEdgesBase<VM> {
}

pub fn plan(&self) -> &'static dyn Plan<VM = VM> {
&*self.mmtk.plan
self.mmtk.get_plan()
}

/// Pop all nodes from nodes, and clear nodes to an empty vector.
@@ -640,7 +643,7 @@ impl<E: ProcessEdgesWork> GCWork<E::VM> for E {
self.flush();
}
#[cfg(feature = "sanity")]
if self.roots && !_mmtk.plan.is_in_sanity() {
if self.roots && !_mmtk.get_plan().is_in_sanity() {
self.cache_roots_for_sanity_gc();
}
trace!("ProcessEdgesWork End");
@@ -760,7 +763,7 @@ pub trait ScanObjectsWork<VM: VMBinding>: GCWork<VM> + Sized {

#[cfg(feature = "sanity")]
{
if self.roots() && !mmtk.plan.is_in_sanity() {
if self.roots() && !mmtk.get_plan().is_in_sanity() {
mmtk.sanity_checker
.lock()
.unwrap()
2 changes: 1 addition & 1 deletion src/scheduler/mod.rs
Original file line number Diff line number Diff line change
@@ -7,7 +7,7 @@ mod scheduler;
pub(crate) use scheduler::GCWorkScheduler;

mod stat;
pub(self) mod work_counter;
mod work_counter;

mod work;
pub use work::GCWork;
4 changes: 2 additions & 2 deletions src/scheduler/scheduler.rs
Original file line number Diff line number Diff line change
@@ -93,7 +93,7 @@ impl<VM: VMBinding> GCWorkScheduler<VM> {
);
let gc_controller = GCController::new(
mmtk,
mmtk.plan.base().gc_requester.clone(),
mmtk.get_plan().base().gc_requester.clone(),
self.clone(),
coordinator_worker,
);
@@ -399,7 +399,7 @@ impl<VM: VMBinding> GCWorkScheduler<VM> {
}

pub fn notify_mutators_paused(&self, mmtk: &'static MMTK<VM>) {
mmtk.plan.base().gc_requester.clear_request();
mmtk.get_plan().base().gc_requester.clear_request();
let first_stw_bucket = &self.work_buckets[WorkBucketStage::first_stw_stage()];
debug_assert!(!first_stw_bucket.is_activated());
// Note: This is the only place where a non-coordinator thread opens a bucket.
2 changes: 1 addition & 1 deletion src/util/analysis/mod.rs
Original file line number Diff line number Diff line change
@@ -33,7 +33,7 @@ pub struct GcHookWork;

impl<VM: VMBinding> GCWork<VM> for GcHookWork {
fn do_work(&mut self, _worker: &mut GCWorker<VM>, mmtk: &'static MMTK<VM>) {
let base = &mmtk.plan.base();
let base = &mmtk.get_plan().base();
base.analysis_manager.gc_hook(mmtk);
}
}
6 changes: 3 additions & 3 deletions src/util/finalizable_processor.rs
Original file line number Diff line number Diff line change
@@ -149,7 +149,7 @@ impl<E: ProcessEdgesWork> GCWork<E::VM> for Finalization<E> {

let mut w = E::new(vec![], false, mmtk);
w.set_worker(worker);
finalizable_processor.scan(worker.tls, &mut w, is_nursery_gc(&*mmtk.plan));
finalizable_processor.scan(worker.tls, &mut w, is_nursery_gc(mmtk.get_plan()));
debug!(
"Finished finalization, {} objects in candidates, {} objects ready to finalize",
finalizable_processor.candidates.len(),
@@ -172,9 +172,9 @@ impl<E: ProcessEdgesWork> GCWork<E::VM> for ForwardFinalization<E> {
let mut finalizable_processor = mmtk.finalizable_processor.lock().unwrap();
let mut w = E::new(vec![], false, mmtk);
w.set_worker(worker);
finalizable_processor.forward_candidate(&mut w, is_nursery_gc(&*mmtk.plan));
finalizable_processor.forward_candidate(&mut w, is_nursery_gc(mmtk.get_plan()));

finalizable_processor.forward_finalizable(&mut w, is_nursery_gc(&*mmtk.plan));
finalizable_processor.forward_finalizable(&mut w, is_nursery_gc(mmtk.get_plan()));
trace!("Finished forwarding finlizable");
}
}
139 changes: 93 additions & 46 deletions src/util/heap/freelistpageresource.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut};
use std::sync::{Mutex, MutexGuard};

@@ -37,14 +38,21 @@ impl CommonFreeListPageResource {
}

pub struct FreeListPageResource<VM: VMBinding> {
common: CommonPageResource,
common_flpr: Box<CommonFreeListPageResource>,
inner: UnsafeCell<FreeListPageResourceInner>,
sync: Mutex<FreeListPageResourceSync>,
_p: PhantomData<VM>,
/// Protect memory on release, and unprotect on re-allocate.
pub(crate) protect_memory_on_release: bool,
}

unsafe impl<VM: VMBinding> Send for FreeListPageResource<VM> {}
unsafe impl<VM: VMBinding> Sync for FreeListPageResource<VM> {}

struct FreeListPageResourceInner {
common: CommonPageResource,
common_flpr: Box<CommonFreeListPageResource>,
}

struct FreeListPageResourceSync {
pages_currently_on_freelist: usize,
highwater_mark: i32,
@@ -54,34 +62,49 @@ impl<VM: VMBinding> Deref for FreeListPageResource<VM> {
type Target = CommonFreeListPageResource;

fn deref(&self) -> &CommonFreeListPageResource {
&self.common_flpr
&self.inner().common_flpr
}
}

impl<VM: VMBinding> DerefMut for FreeListPageResource<VM> {
fn deref_mut(&mut self) -> &mut CommonFreeListPageResource {
&mut self.inner.get_mut().common_flpr
}
}

impl Deref for FreeListPageResourceInner {
type Target = CommonFreeListPageResource;

fn deref(&self) -> &CommonFreeListPageResource {
&self.common_flpr
}
}

impl DerefMut for FreeListPageResourceInner {
fn deref_mut(&mut self) -> &mut CommonFreeListPageResource {
&mut self.common_flpr
}
}

impl<VM: VMBinding> PageResource<VM> for FreeListPageResource<VM> {
fn common(&self) -> &CommonPageResource {
&self.common
&self.inner().common
}
fn common_mut(&mut self) -> &mut CommonPageResource {
&mut self.common
&mut self.inner.get_mut().common
}

fn get_available_physical_pages(&self) -> usize {
let mut rtn = self.sync.lock().unwrap().pages_currently_on_freelist;
if !self.common.contiguous {
if !self.inner().common.contiguous {
let chunks: usize = self
.inner()
.common
.vm_map
.get_available_discontiguous_chunks()
.saturating_sub(self.common.vm_map.get_chunk_consumer_count());
.saturating_sub(self.inner().common.vm_map.get_chunk_consumer_count());
rtn += chunks * PAGES_IN_CHUNK;
} else if self.common.growable && cfg!(target_pointer_width = "64") {
} else if self.inner().common.growable && cfg!(target_pointer_width = "64") {
rtn = vm_layout().pages_in_space64() - self.reserved_pages();
}

@@ -96,14 +119,14 @@ impl<VM: VMBinding> PageResource<VM> for FreeListPageResource<VM> {
tls: VMThread,
) -> Result<PRAllocResult, PRAllocFail> {
// FIXME: We need a safe implementation
#[allow(clippy::cast_ref_to_mut)]
let self_mut: &mut Self = unsafe { &mut *(self as *const _ as *mut _) };
let self_mut = unsafe { self.inner_mut() };
let mut sync = self.sync.lock().unwrap();
let mut new_chunk = false;
let mut page_offset = self_mut.free_list.alloc(required_pages as _);
if page_offset == freelist::FAILURE && self.common.growable {
page_offset =
self_mut.allocate_contiguous_chunks(space_descriptor, required_pages, &mut sync);
if page_offset == freelist::FAILURE && self.inner().common.growable {
page_offset = unsafe {
self.allocate_contiguous_chunks(space_descriptor, required_pages, &mut sync)
};
new_chunk = true;
}

@@ -157,15 +180,17 @@ impl<VM: VMBinding> FreeListPageResource<VM> {
// `CommonFreeListPageResource` lives as a member in space instances.
// Since `Space` instances are always stored as global variables, so it is okay here
// to turn `&CommonFreeListPageResource` into `&'static CommonFreeListPageResource`
vm_map.bind_freelist(unsafe {
&*(&common_flpr as &CommonFreeListPageResource as *const _)
});
unsafe {
vm_map.bind_freelist(&*(&common_flpr as &CommonFreeListPageResource as *const _));
}
common_flpr
};
let growable = cfg!(target_pointer_width = "64");
FreeListPageResource {
common: CommonPageResource::new(true, growable, vm_map),
common_flpr,
inner: UnsafeCell::new(FreeListPageResourceInner {
common: CommonPageResource::new(true, growable, vm_map),
common_flpr,
}),
sync: Mutex::new(FreeListPageResourceSync {
pages_currently_on_freelist: if growable { 0 } else { pages },
highwater_mark: UNINITIALIZED_WATER_MARK,
@@ -185,14 +210,16 @@ impl<VM: VMBinding> FreeListPageResource<VM> {
// `CommonFreeListPageResource` lives as a member in space instances.
// Since `Space` instances are always stored as global variables, so it is okay here
// to turn `&CommonFreeListPageResource` into `&'static CommonFreeListPageResource`
vm_map.bind_freelist(unsafe {
&*(&common_flpr as &CommonFreeListPageResource as *const _)
});
unsafe {
vm_map.bind_freelist(&*(&common_flpr as &CommonFreeListPageResource as *const _));
}
common_flpr
};
FreeListPageResource {
common: CommonPageResource::new(false, true, vm_map),
common_flpr,
inner: UnsafeCell::new(FreeListPageResourceInner {
common: CommonPageResource::new(false, true, vm_map),
common_flpr,
}),
sync: Mutex::new(FreeListPageResourceSync {
pages_currently_on_freelist: 0,
highwater_mark: UNINITIALIZED_WATER_MARK,
@@ -202,6 +229,14 @@ impl<VM: VMBinding> FreeListPageResource<VM> {
}
}

fn inner(&self) -> &FreeListPageResourceInner {
unsafe { &*self.inner.get() }
}
#[allow(clippy::mut_from_ref)]
unsafe fn inner_mut(&self) -> &mut FreeListPageResourceInner {
&mut *self.inner.get()
}

/// Protect the memory
fn mprotect(&self, start: Address, pages: usize) {
// We may fail here for ENOMEM, especially in PageProtect plan.
@@ -236,13 +271,11 @@ impl<VM: VMBinding> FreeListPageResource<VM> {
&self,
space_descriptor: SpaceDescriptor,
) -> Result<PRAllocResult, PRAllocFail> {
assert!(self.common.growable);
assert!(self.inner().common.growable);
// FIXME: We need a safe implementation
#[allow(clippy::cast_ref_to_mut)]
let self_mut: &mut Self = unsafe { &mut *(self as *const _ as *mut _) };
let mut sync = self.sync.lock().unwrap();
let page_offset =
self_mut.allocate_contiguous_chunks(space_descriptor, PAGES_IN_CHUNK, &mut sync);
unsafe { self.allocate_contiguous_chunks(space_descriptor, PAGES_IN_CHUNK, &mut sync) };

if page_offset == freelist::FAILURE {
return Result::Err(PRAllocFail);
@@ -261,44 +294,53 @@ impl<VM: VMBinding> FreeListPageResource<VM> {
})
}

fn allocate_contiguous_chunks(
&mut self,
unsafe fn allocate_contiguous_chunks(
&self,
space_descriptor: SpaceDescriptor,
pages: usize,
sync: &mut MutexGuard<FreeListPageResourceSync>,
) -> i32 {
let mut rtn = freelist::FAILURE;
let required_chunks = crate::policy::space::required_chunks(pages);
let region = self
.inner()
.common
.grow_discontiguous_space(space_descriptor, required_chunks);

if !region.is_zero() {
let region_start = conversions::bytes_to_pages(region - self.start);
let region_end = region_start + (required_chunks * PAGES_IN_CHUNK) - 1;
self.free_list.set_uncoalescable(region_start as _);
self.free_list.set_uncoalescable(region_end as i32 + 1);
self.inner_mut()
.free_list
.set_uncoalescable(region_start as _);
self.inner_mut()
.free_list
.set_uncoalescable(region_end as i32 + 1);
for p in (region_start..region_end).step_by(PAGES_IN_CHUNK) {
if p != region_start {
self.free_list.clear_uncoalescable(p as _);
self.inner_mut().free_list.clear_uncoalescable(p as _);
}
let liberated = self.free_list.free(p as _, true); // add chunk to our free list
let liberated = self.inner_mut().free_list.free(p as _, true); // add chunk to our free list
debug_assert!(liberated as usize == PAGES_IN_CHUNK + (p - region_start));
sync.pages_currently_on_freelist += PAGES_IN_CHUNK;
}
rtn = self.free_list.alloc(pages as _); // re-do the request which triggered this call
rtn = self.inner_mut().free_list.alloc(pages as _); // re-do the request which triggered this call
}

rtn
}

fn free_contiguous_chunk(&mut self, chunk: Address, sync: &mut FreeListPageResourceSync) {
unsafe fn free_contiguous_chunk(&self, chunk: Address, sync: &mut FreeListPageResourceSync) {
let num_chunks = self.vm_map().get_contiguous_region_chunks(chunk);
/* nail down all pages associated with the chunk, so it is no longer on our free list */
let mut chunk_start = conversions::bytes_to_pages(chunk - self.start);
let chunk_end = chunk_start + (num_chunks * PAGES_IN_CHUNK);
while chunk_start < chunk_end {
self.free_list.set_uncoalescable(chunk_start as _);
self.inner_mut()
.free_list
.set_uncoalescable(chunk_start as _);
let tmp = self
.inner_mut()
.free_list
.alloc_from_unit(PAGES_IN_CHUNK as _, chunk_start as _)
as usize; // then alloc the entire chunk
@@ -307,7 +349,8 @@ impl<VM: VMBinding> FreeListPageResource<VM> {
sync.pages_currently_on_freelist -= PAGES_IN_CHUNK;
}
/* now return the address space associated with the chunk for global reuse */
self.common.release_discontiguous_chunks(chunk);

self.inner_mut().common.release_discontiguous_chunks(chunk);
}

pub fn release_pages(&self, first: Address) {
@@ -316,27 +359,26 @@ impl<VM: VMBinding> FreeListPageResource<VM> {
let pages = self.free_list.size(page_offset as _);
// if (VM.config.ZERO_PAGES_ON_RELEASE)
// VM.memory.zero(false, first, Conversions.pagesToBytes(pages));
debug_assert!(pages as usize <= self.common.accounting.get_committed_pages());
debug_assert!(pages as usize <= self.inner().common.accounting.get_committed_pages());

if self.protect_memory_on_release {
self.mprotect(first, pages as _);
}

let mut sync = self.sync.lock().unwrap();
// FIXME
#[allow(clippy::cast_ref_to_mut)]
let me = unsafe { &mut *(self as *const _ as *mut Self) };
self.common.accounting.release(pages as _);
// FIXME: We need a safe implementation
let me = unsafe { self.inner_mut() };
self.inner().common.accounting.release(pages as _);
let freed = me.free_list.free(page_offset as _, true);
sync.pages_currently_on_freelist += pages as usize;
if !self.common.contiguous {
if !self.inner().common.contiguous {
// only discontiguous spaces use chunks
me.release_free_chunks(first, freed as _, &mut sync);
self.release_free_chunks(first, freed as _, &mut sync);
}
}

fn release_free_chunks(
&mut self,
&self,
freed_page: Address,
pages_freed: usize,
sync: &mut FreeListPageResourceSync,
@@ -362,7 +404,12 @@ impl<VM: VMBinding> FreeListPageResource<VM> {
debug_assert!(next_region_start < freelist::MAX_UNITS as usize);
if pages_freed == next_region_start - region_start {
let start = self.start;
self.free_contiguous_chunk(start + conversions::pages_to_bytes(region_start), sync);
unsafe {
self.free_contiguous_chunk(
start + conversions::pages_to_bytes(region_start),
sync,
);
}
}
}
}
22 changes: 11 additions & 11 deletions src/util/heap/gc_trigger.rs
Original file line number Diff line number Diff line change
@@ -286,22 +286,22 @@ impl MemBalancerStats {

fn non_generational_mem_stats_on_gc_start<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
self.allocation_pages = mmtk
.plan
.get_plan()
.get_reserved_pages()
.saturating_sub(self.gc_end_live_pages) as f64;
trace!(
"allocated pages = used {} - live in last gc {} = {}",
mmtk.plan.get_reserved_pages(),
mmtk.get_plan().get_reserved_pages(),
self.gc_end_live_pages,
self.allocation_pages
);
}
fn non_generational_mem_stats_on_gc_release<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
self.gc_release_live_pages = mmtk.plan.get_reserved_pages();
self.gc_release_live_pages = mmtk.get_plan().get_reserved_pages();
trace!("live before release = {}", self.gc_release_live_pages);
}
fn non_generational_mem_stats_on_gc_end<VM: VMBinding>(&mut self, mmtk: &'static MMTK<VM>) {
self.gc_end_live_pages = mmtk.plan.get_reserved_pages();
self.gc_end_live_pages = mmtk.get_plan().get_reserved_pages();
trace!("live pages = {}", self.gc_end_live_pages);
self.collection_pages = self
.gc_release_live_pages
@@ -331,7 +331,7 @@ impl<VM: VMBinding> GCTriggerPolicy<VM> for MemBalancerTrigger {
stats.allocation_time
);

if let Some(plan) = mmtk.plan.generational() {
if let Some(plan) = mmtk.get_plan().generational() {
stats.generational_mem_stats_on_gc_start(plan);
} else {
stats.non_generational_mem_stats_on_gc_start(mmtk);
@@ -342,7 +342,7 @@ impl<VM: VMBinding> GCTriggerPolicy<VM> for MemBalancerTrigger {
fn on_gc_release(&self, mmtk: &'static MMTK<VM>) {
trace!("=== on_gc_release ===");
self.access_stats(|stats| {
if let Some(plan) = mmtk.plan.generational() {
if let Some(plan) = mmtk.get_plan().generational() {
stats.generational_mem_stats_on_gc_release(plan);
} else {
stats.non_generational_mem_stats_on_gc_release(mmtk);
@@ -361,22 +361,22 @@ impl<VM: VMBinding> GCTriggerPolicy<VM> for MemBalancerTrigger {
stats.collection_time
);

if let Some(plan) = mmtk.plan.generational() {
if let Some(plan) = mmtk.get_plan().generational() {
if stats.generational_mem_stats_on_gc_end(plan) {
self.compute_new_heap_limit(
mmtk.plan.get_reserved_pages(),
mmtk.get_plan().get_reserved_pages(),
// We reserve an extra of min nursery. This ensures that we will not trigger
// a full heap GC in the next GC (if available pages is smaller than min nursery, we will force a full heap GC)
mmtk.plan.get_collection_reserved_pages()
mmtk.get_plan().get_collection_reserved_pages()
+ mmtk.options.get_min_nursery_pages(),
stats,
);
}
} else {
stats.non_generational_mem_stats_on_gc_end(mmtk);
self.compute_new_heap_limit(
mmtk.plan.get_reserved_pages(),
mmtk.plan.get_collection_reserved_pages(),
mmtk.get_plan().get_reserved_pages(),
mmtk.get_plan().get_collection_reserved_pages(),
stats,
);
}
74 changes: 45 additions & 29 deletions src/util/heap/layout/fragmented_mapper.rs
Original file line number Diff line number Diff line change
@@ -6,6 +6,7 @@ use crate::util::heap::layout::vm_layout::*;
use crate::util::memory::MmapStrategy;
use crate::util::Address;
use atomic::{Atomic, Ordering};
use std::cell::UnsafeCell;
use std::fmt;
use std::io::Result;
use std::mem::transmute;
@@ -46,6 +47,13 @@ type Slab = [Atomic<MapState>; MMAP_NUM_CHUNKS];

pub struct FragmentedMapper {
lock: Mutex<()>,
inner: UnsafeCell<InnerFragmentedMapper>,
}

unsafe impl Send for FragmentedMapper {}
unsafe impl Sync for FragmentedMapper {}

struct InnerFragmentedMapper {
free_slab_index: usize,
free_slabs: Vec<Option<Box<Slab>>>,
slab_table: Vec<Option<Box<Slab>>>,
@@ -61,7 +69,7 @@ impl fmt::Debug for FragmentedMapper {

impl Mmapper for FragmentedMapper {
fn set_mmap_strategy(&self, strategy: MmapStrategy) {
self.strategy.store(strategy, Ordering::Relaxed);
self.inner().strategy.store(strategy, Ordering::Relaxed);
}

fn eagerly_mmap_all_spaces(&self, _space_map: &[Address]) {}
@@ -132,7 +140,7 @@ impl Mmapper for FragmentedMapper {
MapState::bulk_transition_to_quarantined(
state_slices.as_slice(),
mmap_start,
self.strategy.load(Ordering::Relaxed),
self.inner().strategy.load(Ordering::Relaxed),
)?;
}

@@ -167,7 +175,7 @@ impl Mmapper for FragmentedMapper {
MapState::transition_to_mapped(
entry,
mmap_start,
self.strategy.load(Ordering::Relaxed),
self.inner().strategy.load(Ordering::Relaxed),
)?;
}
start = high;
@@ -223,11 +231,13 @@ impl FragmentedMapper {
pub fn new() -> Self {
Self {
lock: Mutex::new(()),
free_slab_index: 0,
free_slabs: (0..MAX_SLABS).map(|_| Some(Self::new_slab())).collect(),
slab_table: (0..SLAB_TABLE_SIZE).map(|_| None).collect(),
slab_map: vec![SENTINEL; SLAB_TABLE_SIZE],
strategy: Atomic::new(MmapStrategy::Normal),
inner: UnsafeCell::new(InnerFragmentedMapper {
free_slab_index: 0,
free_slabs: (0..MAX_SLABS).map(|_| Some(Self::new_slab())).collect(),
slab_table: (0..SLAB_TABLE_SIZE).map(|_| None).collect(),
slab_map: vec![SENTINEL; SLAB_TABLE_SIZE],
strategy: Atomic::new(MmapStrategy::Normal),
}),
}
}

@@ -248,23 +258,24 @@ impl FragmentedMapper {
}

fn slab_table(&self, addr: Address) -> Option<&Slab> {
unsafe { self.mut_self() }.get_or_optionally_allocate_slab_table(addr, false)
self.get_or_optionally_allocate_slab_table(addr, false)
}

fn get_or_allocate_slab_table(&self, addr: Address) -> &Slab {
unsafe { self.mut_self() }
.get_or_optionally_allocate_slab_table(addr, true)
self.get_or_optionally_allocate_slab_table(addr, true)
.unwrap()
}

#[allow(clippy::cast_ref_to_mut)]
fn inner(&self) -> &InnerFragmentedMapper {
unsafe { &*self.inner.get() }
}
#[allow(clippy::mut_from_ref)]
unsafe fn mut_self(&self) -> &mut Self {
&mut *(self as *const _ as *mut _)
fn inner_mut(&self) -> &mut InnerFragmentedMapper {
unsafe { &mut *self.inner.get() }
}

fn get_or_optionally_allocate_slab_table(
&mut self,
&self,
addr: Address,
allocate: bool,
) -> Option<&Slab> {
@@ -274,25 +285,27 @@ impl FragmentedMapper {
let mut index = hash; // Use 'index' to iterate over the hash table so that we remember where we started
loop {
/* Check for a hash-table hit. Should be the frequent case. */
if base == self.slab_map[index] {
if base == self.inner().slab_map[index] {
return self.slab_table_for(addr, index);
}
let _guard = self.lock.lock().unwrap();

/* Check whether another thread has allocated a slab while we were acquiring the lock */
if base == self.slab_map[index] {
if base == self.inner().slab_map[index] {
// drop(guard);
return self.slab_table_for(addr, index);
}

/* Check for a free slot */
if self.slab_map[index] == SENTINEL {
if self.inner().slab_map[index] == SENTINEL {
if !allocate {
// drop(guard);
return None;
}
unsafe { self.mut_self() }.commit_free_slab(index);
self.slab_map[index] = base;
unsafe {
self.commit_free_slab(index);
}
self.inner_mut().slab_map[index] = base;
return self.slab_table_for(addr, index);
}
// lock.release();
@@ -303,27 +316,30 @@ impl FragmentedMapper {
}

fn slab_table_for(&self, _addr: Address, index: usize) -> Option<&Slab> {
debug_assert!(self.slab_table[index].is_some());
self.slab_table[index].as_ref().map(|x| x as &Slab)
debug_assert!(self.inner().slab_table[index].is_some());
self.inner().slab_table[index].as_ref().map(|x| x as &Slab)
}

/**
* Take a free slab of chunks from the freeSlabs array, and insert it
* at the correct index in the slabTable.
* @param index slab table index
*/
fn commit_free_slab(&mut self, index: usize) {
/// # Safety
///
/// Caller must ensure that only one thread is calling this function at a time.
unsafe fn commit_free_slab(&self, index: usize) {
assert!(
self.free_slab_index < MAX_SLABS,
self.inner().free_slab_index < MAX_SLABS,
"All free slabs used: virtual address space is exhausled."
);
debug_assert!(self.slab_table[index].is_none());
debug_assert!(self.free_slabs[self.free_slab_index].is_some());
debug_assert!(self.inner().slab_table[index].is_none());
debug_assert!(self.inner().free_slabs[self.inner().free_slab_index].is_some());
::std::mem::swap(
&mut self.slab_table[index],
&mut self.free_slabs[self.free_slab_index],
&mut self.inner_mut().slab_table[index],
&mut self.inner_mut().free_slabs[self.inner().free_slab_index],
);
self.free_slab_index += 1;
self.inner_mut().free_slab_index += 1;
}

fn chunk_index_to_address(base: Address, chunk: usize) -> Address {
19 changes: 15 additions & 4 deletions src/util/heap/layout/map.rs
Original file line number Diff line number Diff line change
@@ -17,9 +17,17 @@ pub trait VMMap: Sync {

/// Bind a created freelist with the page resource.
/// This must called after create_freelist() or create_parent_freelist().
fn bind_freelist(&self, pr: &'static CommonFreeListPageResource);

fn allocate_contiguous_chunks(
///
/// # Safety
///
/// * `pr` must be a valid pointer to a CommonFreeListPageResource and be alive
/// for the duration of the VMMap.
unsafe fn bind_freelist(&self, pr: *const CommonFreeListPageResource);

/// # Safety
///
/// Caller must ensure that only one thread is calling this method.
unsafe fn allocate_contiguous_chunks(
&self,
descriptor: SpaceDescriptor,
chunks: usize,
@@ -42,7 +50,10 @@ pub trait VMMap: Sync {

fn free_all_chunks(&self, any_chunk: Address);

fn free_contiguous_chunks(&self, start: Address) -> usize;
/// # Safety
///
/// Caller must ensure that only one thread is calling this method.
unsafe fn free_contiguous_chunks(&self, start: Address) -> usize;

fn boot(&self) {}

134 changes: 75 additions & 59 deletions src/util/heap/layout/map32.rs
Original file line number Diff line number Diff line change
@@ -8,19 +8,26 @@ use crate::util::heap::layout::vm_layout::*;
use crate::util::heap::space_descriptor::SpaceDescriptor;
use crate::util::int_array_freelist::IntArrayFreeList;
use crate::util::Address;
use std::cell::UnsafeCell;
use std::ptr::NonNull;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Mutex, MutexGuard};

pub struct Map32 {
sync: Mutex<()>,
inner: UnsafeCell<Map32Inner>,
}

#[doc(hidden)]
pub struct Map32Inner {
prev_link: Vec<i32>,
next_link: Vec<i32>,
region_map: IntArrayFreeList,
global_page_map: IntArrayFreeList,
shared_discontig_fl_count: usize,
shared_fl_map: Vec<Option<&'static CommonFreeListPageResource>>,
shared_fl_map: Vec<Option<NonNull<CommonFreeListPageResource>>>,
total_available_discontiguous_chunks: usize,
finalized: bool,
sync: Mutex<()>,
descriptor_map: Vec<SpaceDescriptor>,

// TODO: Is this the right place for this field?
@@ -30,30 +37,42 @@ pub struct Map32 {
cumulative_committed_pages: AtomicUsize,
}

unsafe impl Send for Map32 {}
unsafe impl Sync for Map32 {}

impl Map32 {
pub fn new() -> Self {
let max_chunks = vm_layout().max_chunks();
Map32 {
prev_link: vec![0; max_chunks],
next_link: vec![0; max_chunks],
region_map: IntArrayFreeList::new(max_chunks, max_chunks as _, 1),
global_page_map: IntArrayFreeList::new(1, 1, MAX_SPACES),
shared_discontig_fl_count: 0,
shared_fl_map: vec![None; MAX_SPACES],
total_available_discontiguous_chunks: 0,
finalized: false,
inner: UnsafeCell::new(Map32Inner {
prev_link: vec![0; max_chunks],
next_link: vec![0; max_chunks],
region_map: IntArrayFreeList::new(max_chunks, max_chunks as _, 1),
global_page_map: IntArrayFreeList::new(1, 1, MAX_SPACES),
shared_discontig_fl_count: 0,
shared_fl_map: vec![None; MAX_SPACES],
total_available_discontiguous_chunks: 0,
finalized: false,
descriptor_map: vec![SpaceDescriptor::UNINITIALIZED; max_chunks],
cumulative_committed_pages: AtomicUsize::new(0),
}),
sync: Mutex::new(()),
descriptor_map: vec![SpaceDescriptor::UNINITIALIZED; max_chunks],
cumulative_committed_pages: AtomicUsize::new(0),
}
}
}

impl std::ops::Deref for Map32 {
type Target = Map32Inner;
fn deref(&self) -> &Self::Target {
unsafe { &*self.inner.get() }
}
}

impl VMMap for Map32 {
fn insert(&self, start: Address, extent: usize, descriptor: SpaceDescriptor) {
// Each space will call this on exclusive address ranges. It is fine to mutate the descriptor map,
// as each space will update different indices.
let self_mut: &mut Self = unsafe { self.mut_self() };
let self_mut: &mut Map32Inner = unsafe { self.mut_self() };
let mut e = 0;
while e < extent {
let index = (start + e).chunk_index();
@@ -88,17 +107,17 @@ impl VMMap for Map32 {
Box::new(IntArrayFreeList::new(units, grain, 1))
}

fn bind_freelist(&self, pr: &'static CommonFreeListPageResource) {
let ordinal: usize = pr
unsafe fn bind_freelist(&self, pr: *const CommonFreeListPageResource) {
let ordinal: usize = (*pr)
.free_list
.downcast_ref::<IntArrayFreeList>()
.unwrap()
.get_ordinal() as usize;
let self_mut: &mut Self = unsafe { self.mut_self() };
self_mut.shared_fl_map[ordinal] = Some(pr);
let self_mut: &mut Map32Inner = self.mut_self();
self_mut.shared_fl_map[ordinal] = Some(NonNull::new_unchecked(pr as *mut _));
}

fn allocate_contiguous_chunks(
unsafe fn allocate_contiguous_chunks(
&self,
descriptor: SpaceDescriptor,
chunks: usize,
@@ -108,7 +127,7 @@ impl VMMap for Map32 {
let chunk = self_mut.region_map.alloc(chunks as _);
debug_assert!(chunk != 0);
if chunk == -1 {
return unsafe { Address::zero() };
return Address::zero();
}
self_mut.total_available_discontiguous_chunks -= chunks;
let rtn = conversions::chunk_index_to_address(chunk as _);
@@ -151,7 +170,7 @@ impl VMMap for Map32 {
fn get_chunk_consumer_count(&self) -> usize {
self.shared_discontig_fl_count
}

#[allow(clippy::while_immutable_condition)]
fn free_all_chunks(&self, any_chunk: Address) {
debug!("free_all_chunks: {}", any_chunk);
let (_sync, self_mut) = self.mut_self_with_sync();
@@ -160,28 +179,28 @@ impl VMMap for Map32 {
let chunk = any_chunk.chunk_index();
while self_mut.next_link[chunk] != 0 {
let x = self_mut.next_link[chunk];
self_mut.free_contiguous_chunks_no_lock(x);
self.free_contiguous_chunks_no_lock(x);
}
while self_mut.prev_link[chunk] != 0 {
let x = self_mut.prev_link[chunk];
self_mut.free_contiguous_chunks_no_lock(x);
self.free_contiguous_chunks_no_lock(x);
}
self_mut.free_contiguous_chunks_no_lock(chunk as _);
self.free_contiguous_chunks_no_lock(chunk as _);
}
}

fn free_contiguous_chunks(&self, start: Address) -> usize {
unsafe fn free_contiguous_chunks(&self, start: Address) -> usize {
debug!("free_contiguous_chunks: {}", start);
let (_sync, self_mut) = self.mut_self_with_sync();
let (_sync, _) = self.mut_self_with_sync();
debug_assert!(start == conversions::chunk_align_down(start));
let chunk = start.chunk_index();
self_mut.free_contiguous_chunks_no_lock(chunk as _)
self.free_contiguous_chunks_no_lock(chunk as _)
}

fn finalize_static_space_map(&self, from: Address, to: Address) {
// This is only called during boot process by a single thread.
// It is fine to get a mutable reference.
let self_mut: &mut Self = unsafe { self.mut_self() };
let self_mut: &mut Map32Inner = unsafe { self.mut_self() };
/* establish bounds of discontiguous space */
let start_address = from;
let first_chunk = start_address.chunk_index();
@@ -197,13 +216,9 @@ impl VMMap for Map32 {
// Yi: I am not doing this refactoring right now, as I am not familiar with flatten() and
// there is no test to ensure the refactoring will be correct.
#[allow(clippy::manual_flatten)]
for fl in self_mut.shared_fl_map.iter() {
if let Some(fl) = fl {
#[allow(clippy::cast_ref_to_mut)]
let fl_mut: &mut CommonFreeListPageResource = unsafe {
&mut *(*fl as *const CommonFreeListPageResource
as *mut CommonFreeListPageResource)
};
for fl in self_mut.shared_fl_map.iter().copied() {
if let Some(mut fl) = fl {
let fl_mut = unsafe { fl.as_mut() };
fl_mut.resize_freelist(start_address);
}
}
@@ -262,43 +277,44 @@ impl Map32 {
/// The caller needs to guarantee there is no race condition. Either only one single thread
/// is using this method, or multiple threads are accessing mutally exclusive data (e.g. different indices in arrays).
/// In other cases, use mut_self_with_sync().
#[allow(clippy::cast_ref_to_mut)]
#[allow(clippy::mut_from_ref)]
unsafe fn mut_self(&self) -> &mut Self {
&mut *(self as *const _ as *mut _)
unsafe fn mut_self(&self) -> &mut Map32Inner {
&mut *self.inner.get()
}

fn mut_self_with_sync(&self) -> (MutexGuard<()>, &mut Self) {
fn mut_self_with_sync(&self) -> (MutexGuard<()>, &mut Map32Inner) {
let guard = self.sync.lock().unwrap();
(guard, unsafe { self.mut_self() })
}

fn free_contiguous_chunks_no_lock(&mut self, chunk: i32) -> usize {
let chunks = self.region_map.free(chunk, false);
self.total_available_discontiguous_chunks += chunks as usize;
let next = self.next_link[chunk as usize];
let prev = self.prev_link[chunk as usize];
if next != 0 {
self.prev_link[next as usize] = prev
};
if prev != 0 {
self.next_link[prev as usize] = next
};
self.prev_link[chunk as usize] = 0;
self.next_link[chunk as usize] = 0;
for offset in 0..chunks {
let index = (chunk + offset) as usize;
let chunk_start = conversions::chunk_index_to_address(index);
debug!("Clear descriptor for Chunk {}", chunk_start);
self.descriptor_map[index] = SpaceDescriptor::UNINITIALIZED;
unsafe { SFT_MAP.clear(chunk_start) };
fn free_contiguous_chunks_no_lock(&self, chunk: i32) -> usize {
unsafe {
let chunks = self.mut_self().region_map.free(chunk, false);
self.mut_self().total_available_discontiguous_chunks += chunks as usize;
let next = self.next_link[chunk as usize];
let prev = self.prev_link[chunk as usize];
if next != 0 {
self.mut_self().prev_link[next as usize] = prev
};
if prev != 0 {
self.mut_self().next_link[prev as usize] = next
};
self.mut_self().prev_link[chunk as usize] = 0;
self.mut_self().next_link[chunk as usize] = 0;
for offset in 0..chunks {
let index = (chunk + offset) as usize;
let chunk_start = conversions::chunk_index_to_address(index);
debug!("Clear descriptor for Chunk {}", chunk_start);
self.mut_self().descriptor_map[index] = SpaceDescriptor::UNINITIALIZED;
SFT_MAP.clear(chunk_start);
}
chunks as _
}
chunks as _
}

fn get_discontig_freelist_pr_ordinal(&self) -> usize {
// This is only called during creating a page resource/space/plan/mmtk instance, which is single threaded.
let self_mut: &mut Self = unsafe { self.mut_self() };
let self_mut: &mut Map32Inner = unsafe { self.mut_self() };
self_mut.shared_discontig_fl_count += 1;
self.shared_discontig_fl_count
}
111 changes: 63 additions & 48 deletions src/util/heap/layout/map64.rs
Original file line number Diff line number Diff line change
@@ -10,13 +10,19 @@ use crate::util::memory::MmapStrategy;
use crate::util::raw_memory_freelist::RawMemoryFreeList;
use crate::util::rust_util::zeroed_alloc::new_zeroed_vec;
use crate::util::Address;
use std::cell::UnsafeCell;
use std::ptr::NonNull;
use std::sync::atomic::{AtomicUsize, Ordering};

const NON_MAP_FRACTION: f64 = 1.0 - 8.0 / 4096.0;

pub struct Map64 {
fl_page_resources: Vec<Option<&'static CommonFreeListPageResource>>,
fl_map: Vec<Option<&'static RawMemoryFreeList>>,
inner: UnsafeCell<Map64Inner>,
}

struct Map64Inner {
fl_page_resources: Vec<Option<NonNull<CommonFreeListPageResource>>>,
fl_map: Vec<Option<NonNull<RawMemoryFreeList>>>,
finalized: bool,
descriptor_map: Vec<SpaceDescriptor>,
base_address: Vec<Address>,
@@ -29,6 +35,9 @@ pub struct Map64 {
cumulative_committed_pages: AtomicUsize,
}

unsafe impl Send for Map64 {}
unsafe impl Sync for Map64 {}

impl Map64 {
pub fn new() -> Self {
let mut high_water = vec![Address::ZERO; MAX_SPACES];
@@ -41,18 +50,20 @@ impl Map64 {
}

Self {
// Note: descriptor_map is very large. Although it is initialized to
// SpaceDescriptor(0), the compiler and the standard library are not smart enough to
// elide the storing of 0 for each of the element. Using standard vector creation,
// such as `vec![SpaceDescriptor::UNINITIALIZED; MAX_CHUNKS]`, will cause severe
// slowdown during start-up.
descriptor_map: unsafe { new_zeroed_vec::<SpaceDescriptor>(vm_layout().max_chunks()) },
high_water,
base_address,
fl_page_resources: vec![None; MAX_SPACES],
fl_map: vec![None; MAX_SPACES],
finalized: false,
cumulative_committed_pages: AtomicUsize::new(0),
inner: UnsafeCell::new(Map64Inner {
// Note: descriptor_map is very large. Although it is initialized to
// SpaceDescriptor(0), the compiler and the standard library are not smart enough to
// elide the storing of 0 for each of the element. Using standard vector creation,
// such as `vec![SpaceDescriptor::UNINITIALIZED; MAX_CHUNKS]`, will cause severe
// slowdown during start-up.
descriptor_map: unsafe { new_zeroed_vec::<SpaceDescriptor>(vm_layout().max_chunks()) },
high_water,
base_address,
fl_page_resources: vec![None; MAX_SPACES],
fl_map: vec![None; MAX_SPACES],
finalized: false,
cumulative_committed_pages: AtomicUsize::new(0),
}),
}
}
}
@@ -89,7 +100,7 @@ impl VMMap for Map64 {

let heads = 1;
let pages_per_block = RawMemoryFreeList::default_block_size(units as _, heads);
let list = Box::new(RawMemoryFreeList::new(
let mut list = Box::new(RawMemoryFreeList::new(
start,
start + list_extent,
pages_per_block,
@@ -99,9 +110,10 @@ impl VMMap for Map64 {
MmapStrategy::Normal,
));

self_mut.fl_map[index] =
/*self_mut.fl_map[index] =
Some(unsafe { &*(&list as &RawMemoryFreeList as *const RawMemoryFreeList) });

*/
self_mut.fl_map[index] = unsafe { Some(NonNull::new_unchecked(&mut *list)) };
/* Adjust the base address and highwater to account for the allocated chunks for the map */
let base = conversions::chunk_align_up(start + list_extent);

@@ -110,13 +122,16 @@ impl VMMap for Map64 {
list
}

fn bind_freelist(&self, pr: &'static CommonFreeListPageResource) {
let index = Self::space_index(pr.get_start()).unwrap();
let self_mut = unsafe { self.mut_self() };
self_mut.fl_page_resources[index] = Some(pr);
unsafe fn bind_freelist(&self, pr: *const CommonFreeListPageResource) {
let index = Self::space_index((*pr).get_start()).unwrap();
let self_mut = self.mut_self();
self_mut.fl_page_resources[index] = Some(NonNull::new_unchecked(pr as _));
}

fn allocate_contiguous_chunks(
/// # Safety
///
/// Caller must ensure that only one thread is calling this method.
unsafe fn allocate_contiguous_chunks(
&self,
descriptor: SpaceDescriptor,
chunks: usize,
@@ -125,20 +140,19 @@ impl VMMap for Map64 {
debug_assert!(Self::space_index(descriptor.get_start()).unwrap() == descriptor.get_index());
// Each space will call this on exclusive address ranges. It is fine to mutate the descriptor map,
// as each space will update different indices.
let self_mut = unsafe { self.mut_self() };
let self_mut = self.mut_self();

let index = descriptor.get_index();
let rtn = self.high_water[index];
let rtn = self.inner().high_water[index];
let extent = chunks << LOG_BYTES_IN_CHUNK;
self_mut.high_water[index] = rtn + extent;

/* Grow the free list to accommodate the new chunks */
let free_list = self.fl_map[Self::space_index(descriptor.get_start()).unwrap()];
if let Some(free_list) = free_list {
let free_list =
unsafe { &mut *(free_list as *const _ as usize as *mut RawMemoryFreeList) };
let free_list = self.inner().fl_map[Self::space_index(descriptor.get_start()).unwrap()];
if let Some(mut free_list) = free_list {
let free_list = free_list.as_mut();
free_list.grow_freelist(conversions::bytes_to_pages(extent) as _);
let base_page = conversions::bytes_to_pages(rtn - self.base_address[index]);
let base_page = conversions::bytes_to_pages(rtn - self.inner().base_address[index]);
for offset in (0..(chunks * PAGES_IN_CHUNK)).step_by(PAGES_IN_CHUNK) {
free_list.set_uncoalescable((base_page + offset) as _);
/* The 32-bit implementation requires that pages are returned allocated to the caller */
@@ -172,18 +186,17 @@ impl VMMap for Map64 {
unreachable!()
}

fn free_contiguous_chunks(&self, _start: Address) -> usize {
unsafe fn free_contiguous_chunks(&self, _start: Address) -> usize {
unreachable!()
}

fn boot(&self) {
// This is only called during boot process by a single thread.
// It is fine to get a mutable reference.
let self_mut: &mut Self = unsafe { self.mut_self() };
let self_mut: &mut Map64Inner = unsafe { self.mut_self() };
for pr in 0..MAX_SPACES {
if let Some(fl) = self_mut.fl_map[pr] {
#[allow(clippy::cast_ref_to_mut)]
let fl_mut: &mut RawMemoryFreeList = unsafe { &mut *(fl as *const _ as *mut _) };
if let Some(mut fl) = self_mut.fl_map[pr] {
let fl_mut: &mut RawMemoryFreeList = unsafe { fl.as_mut() };
fl_mut.grow_freelist(0);
}
}
@@ -192,31 +205,30 @@ impl VMMap for Map64 {
fn finalize_static_space_map(&self, _from: Address, _to: Address) {
// This is only called during boot process by a single thread.
// It is fine to get a mutable reference.
let self_mut: &mut Self = unsafe { self.mut_self() };
let self_mut: &mut Map64Inner = unsafe { self.mut_self() };
for pr in 0..MAX_SPACES {
if let Some(fl) = self_mut.fl_page_resources[pr] {
#[allow(clippy::cast_ref_to_mut)]
let fl_mut: &mut CommonFreeListPageResource =
unsafe { &mut *(fl as *const _ as *mut _) };
fl_mut.resize_freelist(conversions::chunk_align_up(
self.fl_map[pr].unwrap().get_limit(),
));
if let Some(mut fl) = self_mut.fl_page_resources[pr] {
let fl_mut = unsafe { fl.as_mut() };
fl_mut.resize_freelist(conversions::chunk_align_up(unsafe {
self.inner().fl_map[pr].unwrap().as_ref().get_limit()
}));
}
}
self_mut.finalized = true;
}

fn is_finalized(&self) -> bool {
self.finalized
self.inner().finalized
}

fn get_descriptor_for_address(&self, address: Address) -> SpaceDescriptor {
let index = Self::space_index(address).unwrap();
self.descriptor_map[index]
self.inner().descriptor_map[index]
}

fn add_to_cumulative_committed_pages(&self, pages: usize) {
self.cumulative_committed_pages
self.inner()
.cumulative_committed_pages
.fetch_add(pages, Ordering::Relaxed);
}
}
@@ -227,10 +239,13 @@ impl Map64 {
/// The caller needs to guarantee there is no race condition. Either only one single thread
/// is using this method, or multiple threads are accessing mutally exclusive data (e.g. different indices in arrays).
/// In other cases, use mut_self_with_sync().
#[allow(clippy::cast_ref_to_mut)]
#[allow(clippy::mut_from_ref)]
unsafe fn mut_self(&self) -> &mut Self {
&mut *(self as *const _ as *mut _)
unsafe fn mut_self(&self) -> &mut Map64Inner {
&mut *self.inner.get()
}

fn inner(&self) -> &Map64Inner {
unsafe { &*self.inner.get() }
}

fn space_index(addr: Address) -> Option<usize> {
8 changes: 4 additions & 4 deletions src/util/heap/layout/mod.rs
Original file line number Diff line number Diff line change
@@ -15,12 +15,12 @@ mod map32;
mod map64;

#[cfg(target_pointer_width = "32")]
pub fn create_vm_map() -> Box<dyn VMMap> {
pub fn create_vm_map() -> Box<dyn VMMap + Send + Sync> {
Box::new(map32::Map32::new())
}

#[cfg(target_pointer_width = "64")]
pub fn create_vm_map() -> Box<dyn VMMap> {
pub fn create_vm_map() -> Box<dyn VMMap + Send + Sync> {
if !vm_layout().force_use_contiguous_spaces {
Box::new(map32::Map32::new())
} else {
@@ -29,12 +29,12 @@ pub fn create_vm_map() -> Box<dyn VMMap> {
}

#[cfg(target_pointer_width = "32")]
pub fn create_mmapper() -> Box<dyn Mmapper> {
pub fn create_mmapper() -> Box<dyn Mmapper + Send + Sync> {
Box::new(byte_map_mmapper::ByteMapMmapper::new())
}

#[cfg(target_pointer_width = "64")]
pub fn create_mmapper() -> Box<dyn Mmapper> {
pub fn create_mmapper() -> Box<dyn Mmapper + Send + Sync> {
// TODO: ByteMapMmapper for 39-bit or less virtual space
Box::new(fragmented_mapper::FragmentedMapper::new())
}
16 changes: 10 additions & 6 deletions src/util/heap/pageresource.rs
Original file line number Diff line number Diff line change
@@ -148,11 +148,13 @@ impl CommonPageResource {
) -> Address {
let mut head_discontiguous_region = self.head_discontiguous_region.lock().unwrap();

let new_head: Address = self.vm_map.allocate_contiguous_chunks(
space_descriptor,
chunks,
*head_discontiguous_region,
);
let new_head: Address = unsafe {
self.vm_map.allocate_contiguous_chunks(
space_descriptor,
chunks,
*head_discontiguous_region,
)
};
if new_head.is_zero() {
return Address::ZERO;
}
@@ -169,7 +171,9 @@ impl CommonPageResource {
if chunk == *head_discontiguous_region {
*head_discontiguous_region = self.vm_map.get_next_contiguous_region(chunk);
}
self.vm_map.free_contiguous_chunks(chunk);
unsafe {
self.vm_map.free_contiguous_chunks(chunk);
}
}

pub fn release_all_chunks(&self) {
17 changes: 8 additions & 9 deletions src/util/int_array_freelist.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
use super::freelist::*;
use std::mem;
use std::{mem, ptr::NonNull};

#[derive(Debug)]
pub struct IntArrayFreeList {
pub head: i32,
pub heads: i32,
pub table: Option<Vec<i32>>,
parent: Option<&'static IntArrayFreeList>,
parent: Option<NonNull<IntArrayFreeList>>,
}

unsafe impl Send for IntArrayFreeList {}
unsafe impl Sync for IntArrayFreeList {}

impl FreeList for IntArrayFreeList {
fn head(&self) -> i32 {
self.head
@@ -53,20 +56,16 @@ impl IntArrayFreeList {
}
fn table(&self) -> &Vec<i32> {
match self.parent {
Some(p) => p.table(),
Some(p) => unsafe { p.as_ref().table() },
None => self.table.as_ref().unwrap(),
}
}

// FIXME: We need a safe implementation
#[allow(clippy::cast_ref_to_mut)]

fn table_mut(&mut self) -> &mut Vec<i32> {
match self.parent {
Some(p) => {
let parent_mut: &mut Self =
unsafe { &mut *(p as *const IntArrayFreeList as *mut IntArrayFreeList) };
parent_mut.table_mut()
}
Some(mut p) => unsafe { p.as_mut().table_mut() },
None => self.table.as_mut().unwrap(),
}
}
10 changes: 5 additions & 5 deletions src/util/malloc/mod.rs
Original file line number Diff line number Diff line change
@@ -27,7 +27,7 @@ pub fn malloc(size: usize) -> Address {
pub fn counted_malloc<VM: VMBinding>(mmtk: &MMTK<VM>, size: usize) -> Address {
let res = malloc(size);
if !res.is_zero() {
mmtk.plan.base().increase_malloc_bytes_by(size);
mmtk.get_plan().base().increase_malloc_bytes_by(size);
}
res
}
@@ -40,7 +40,7 @@ pub fn calloc(num: usize, size: usize) -> Address {
pub fn counted_calloc<VM: VMBinding>(mmtk: &MMTK<VM>, num: usize, size: usize) -> Address {
let res = calloc(num, size);
if !res.is_zero() {
mmtk.plan.base().increase_malloc_bytes_by(num * size);
mmtk.get_plan().base().increase_malloc_bytes_by(num * size);
}
res
}
@@ -59,10 +59,10 @@ pub fn realloc_with_old_size<VM: VMBinding>(
let res = realloc(addr, size);

if !addr.is_zero() {
mmtk.plan.base().decrease_malloc_bytes_by(old_size);
mmtk.get_plan().base().decrease_malloc_bytes_by(old_size);
}
if size != 0 && !res.is_zero() {
mmtk.plan.base().increase_malloc_bytes_by(size);
mmtk.get_plan().base().increase_malloc_bytes_by(size);
}

res
@@ -76,6 +76,6 @@ pub fn free(addr: Address) {
pub fn free_with_size<VM: VMBinding>(mmtk: &MMTK<VM>, addr: Address, old_size: usize) {
free(addr);
if !addr.is_zero() {
mmtk.plan.base().decrease_malloc_bytes_by(old_size);
mmtk.get_plan().base().decrease_malloc_bytes_by(old_size);
}
}
22 changes: 13 additions & 9 deletions src/util/reference_processor.rs
Original file line number Diff line number Diff line change
@@ -67,12 +67,15 @@ impl ReferenceProcessors {
/// plans, this separate step is required.
pub fn forward_refs<E: ProcessEdgesWork>(&self, trace: &mut E, mmtk: &'static MMTK<E::VM>) {
debug_assert!(
mmtk.plan.constraints().needs_forward_after_liveness,
mmtk.get_plan().constraints().needs_forward_after_liveness,
"A plan with needs_forward_after_liveness=false does not need a separate forward step"
);
self.soft.forward::<E>(trace, is_nursery_gc(&*mmtk.plan));
self.weak.forward::<E>(trace, is_nursery_gc(&*mmtk.plan));
self.phantom.forward::<E>(trace, is_nursery_gc(&*mmtk.plan));
self.soft
.forward::<E>(trace, is_nursery_gc(mmtk.get_plan()));
self.weak
.forward::<E>(trace, is_nursery_gc(mmtk.get_plan()));
self.phantom
.forward::<E>(trace, is_nursery_gc(mmtk.get_plan()));
}

// Methods for scanning weak references. It needs to be called in a decreasing order of reference strengths, i.e. soft > weak > phantom
@@ -81,18 +84,18 @@ impl ReferenceProcessors {
pub fn scan_soft_refs<E: ProcessEdgesWork>(&self, trace: &mut E, mmtk: &'static MMTK<E::VM>) {
// For soft refs, it is up to the VM to decide when to reclaim this.
// If this is not an emergency collection, we have no heap stress. We simply retain soft refs.
if !mmtk.plan.is_emergency_collection() {
if !mmtk.get_plan().is_emergency_collection() {
// This step only retains the referents (keep the referents alive), it does not update its addresses.
// We will call soft.scan() again with retain=false to update its addresses based on liveness.
self.soft.retain::<E>(trace, is_nursery_gc(&*mmtk.plan));
self.soft.retain::<E>(trace, is_nursery_gc(mmtk.get_plan()));
}
// This will update the references (and the referents).
self.soft.scan::<E>(trace, is_nursery_gc(&*mmtk.plan));
self.soft.scan::<E>(trace, is_nursery_gc(mmtk.get_plan()));
}

/// Scan weak references.
pub fn scan_weak_refs<E: ProcessEdgesWork>(&self, trace: &mut E, mmtk: &'static MMTK<E::VM>) {
self.weak.scan::<E>(trace, is_nursery_gc(&*mmtk.plan));
self.weak.scan::<E>(trace, is_nursery_gc(mmtk.get_plan()));
}

/// Scan phantom references.
@@ -101,7 +104,8 @@ impl ReferenceProcessors {
trace: &mut E,
mmtk: &'static MMTK<E::VM>,
) {
self.phantom.scan::<E>(trace, is_nursery_gc(&*mmtk.plan));
self.phantom
.scan::<E>(trace, is_nursery_gc(mmtk.get_plan()));
}
}

8 changes: 4 additions & 4 deletions src/util/sanity/sanity_checker.rs
Original file line number Diff line number Diff line change
@@ -63,7 +63,7 @@ impl<P: Plan> ScheduleSanityGC<P> {
impl<P: Plan> GCWork<P::VM> for ScheduleSanityGC<P> {
fn do_work(&mut self, worker: &mut GCWorker<P::VM>, mmtk: &'static MMTK<P::VM>) {
let scheduler = worker.scheduler();
let plan = &mmtk.plan;
let plan = mmtk.get_plan();

scheduler.reset_state();

@@ -122,7 +122,7 @@ impl<P: Plan> SanityPrepare<P> {
impl<P: Plan> GCWork<P::VM> for SanityPrepare<P> {
fn do_work(&mut self, _worker: &mut GCWorker<P::VM>, mmtk: &'static MMTK<P::VM>) {
info!("Sanity GC prepare");
mmtk.plan.enter_sanity();
mmtk.get_plan().enter_sanity();
{
let mut sanity_checker = mmtk.sanity_checker.lock().unwrap();
sanity_checker.refs.clear();
@@ -151,7 +151,7 @@ impl<P: Plan> SanityRelease<P> {
impl<P: Plan> GCWork<P::VM> for SanityRelease<P> {
fn do_work(&mut self, _worker: &mut GCWorker<P::VM>, mmtk: &'static MMTK<P::VM>) {
info!("Sanity GC release");
mmtk.plan.leave_sanity();
mmtk.get_plan().leave_sanity();
mmtk.sanity_checker.lock().unwrap().clear_roots_cache();
for mutator in <P::VM as VMBinding>::VMActivePlan::mutators() {
mmtk.scheduler.work_buckets[WorkBucketStage::Release]
@@ -205,7 +205,7 @@ impl<VM: VMBinding> ProcessEdgesWork for SanityGCProcessEdges<VM> {

// Let plan check object
assert!(
self.mmtk().plan.sanity_check_object(object),
self.mmtk().get_plan().sanity_check_object(object),
"Invalid reference {:?}",
object
);