Skip to content

Commit f010965

Browse files
authored
Rename all occurrences of (gc) works to work (#233)
1 parent 63258bc commit f010965

24 files changed

+49
-49
lines changed

src/lib.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,12 @@
2424
//! * [Allocators](util/alloc/allocator/trait.Allocator.html): handlers of allocation requests which allocate objects to the bound space.
2525
//! * [Policies](policy/space/trait.Space.html): definitions of semantics and behaviors for memory regions.
2626
//! Each space is an instance of a policy, and takes up a unique proportion of the heap.
27-
//! * [Work packets](scheduler/work/trait.GCWork.html): units of GC works scheduled by the MMTk's scheduler.
27+
//! * [Work packets](scheduler/work/trait.GCWork.html): units of GC work scheduled by the MMTk's scheduler.
2828
//! * [GC plans](plan/global/trait.Plan.html): GC algorithms composed from components.
2929
//! *Note that currently the choice of plans is made through Rust features, which is a build-time config, so only one plan is present in the generated binary
3030
//! and in this documentation. We plan to make this a run-time config so that users can choose GC plans at boot time.*
3131
//! * [Heap implementations](util/heap/index.html): the underlying implementations of memory resources that support spaces.
32-
//! * [Scheduler](scheduler/scheduler/struct.Scheduler.html): the MMTk scheduler to allow flexible and parallel execution of GC works.
32+
//! * [Scheduler](scheduler/scheduler/struct.Scheduler.html): the MMTk scheduler to allow flexible and parallel execution of GC work.
3333
//! * Interfaces: bi-directional interfaces between MMTk and language implementations
3434
//! i.e. [the memory manager API](memory_manager/index.html) that allows a language's memory manager to use MMTk
3535
//! and [the VMBinding trait](vm/trait.VMBinding.html) that allows MMTk to call the language implementation.

src/plan/barriers.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
use crate::policy::space::Space;
2-
use crate::scheduler::gc_works::*;
2+
use crate::scheduler::gc_work::*;
33
use crate::scheduler::WorkBucketStage;
44
use crate::util::*;
55
use crate::MMTK;

src/plan/controller_collector_context.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use crate::scheduler::gc_works::ScheduleCollection;
1+
use crate::scheduler::gc_work::ScheduleCollection;
22
use crate::scheduler::*;
33
use crate::util::OpaquePointer;
44
use crate::vm::VMBinding;

src/plan/gencopy/gc_works.rs renamed to src/plan/gencopy/gc_work.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ use super::global::GenCopy;
22
use crate::plan::CopyContext;
33
use crate::plan::PlanConstraints;
44
use crate::policy::space::Space;
5-
use crate::scheduler::gc_works::*;
5+
use crate::scheduler::gc_work::*;
66
use crate::scheduler::WorkerLocal;
77
use crate::scheduler::{GCWork, GCWorker, WorkBucketStage};
88
use crate::util::alloc::{Allocator, BumpAllocator};

src/plan/gencopy/global.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use super::gc_works::{GenCopyCopyContext, GenCopyMatureProcessEdges, GenCopyNurseryProcessEdges};
1+
use super::gc_work::{GenCopyCopyContext, GenCopyMatureProcessEdges, GenCopyNurseryProcessEdges};
22
use super::mutator::ALLOCATOR_MAPPING;
33
use crate::mmtk::MMTK;
44
use crate::plan::global::BasePlan;
@@ -9,7 +9,7 @@ use crate::plan::Plan;
99
use crate::plan::PlanConstraints;
1010
use crate::policy::copyspace::CopySpace;
1111
use crate::policy::space::Space;
12-
use crate::scheduler::gc_works::*;
12+
use crate::scheduler::gc_work::*;
1313
use crate::scheduler::*;
1414
use crate::util::alloc::allocators::AllocatorSelector;
1515
use crate::util::constants::LOG_BYTES_IN_PAGE;

src/plan/gencopy/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
pub(super) mod gc_works;
1+
pub(super) mod gc_work;
22
pub(super) mod global;
33
pub(super) mod mutator;
44

src/plan/gencopy/mutator.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use super::gc_works::*;
1+
use super::gc_work::*;
22
use super::GenCopy;
33
use crate::plan::barriers::*;
44
use crate::plan::mutator_context::Mutator;

src/plan/semispace/gc_works.rs renamed to src/plan/semispace/gc_work.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ use super::global::SemiSpace;
22
use crate::plan::CopyContext;
33
use crate::plan::PlanConstraints;
44
use crate::policy::space::Space;
5-
use crate::scheduler::gc_works::*;
5+
use crate::scheduler::gc_work::*;
66
use crate::scheduler::WorkerLocal;
77
use crate::util::alloc::{Allocator, BumpAllocator};
88
use crate::util::forwarding_word;

src/plan/semispace/global.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use super::gc_works::{SSCopyContext, SSProcessEdges};
1+
use super::gc_work::{SSCopyContext, SSProcessEdges};
22
use crate::mmtk::MMTK;
33
use crate::plan::global::BasePlan;
44
use crate::plan::global::CommonPlan;
@@ -9,7 +9,7 @@ use crate::plan::Plan;
99
use crate::plan::PlanConstraints;
1010
use crate::policy::copyspace::CopySpace;
1111
use crate::policy::space::Space;
12-
use crate::scheduler::gc_works::*;
12+
use crate::scheduler::gc_work::*;
1313
use crate::scheduler::*;
1414
use crate::util::alloc::allocators::AllocatorSelector;
1515
use crate::util::heap::layout::heap_layout::Mmapper;

src/plan/semispace/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
pub(super) mod gc_works;
1+
pub(super) mod gc_work;
22
pub(super) mod global;
33
pub(super) mod mutator;
44

src/plan/transitive_closure.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use crate::scheduler::gc_works::ProcessEdgesWork;
1+
use crate::scheduler::gc_work::ProcessEdgesWork;
22
use crate::util::{Address, ObjectReference};
33

44
/// This trait is the fundamental mechanism for performing a

src/scheduler/gc_works.rs renamed to src/scheduler/gc_work.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ impl<P: Plan, W: CopyContext + WorkerLocal> GCWork<P::VM> for Prepare<P, W> {
4747
.add(PrepareMutator::<P::VM>::new(mutator));
4848
}
4949
for w in &mmtk.scheduler.worker_group().workers {
50-
w.local_works.add(PrepareCollector::<W>::new());
50+
w.local_work_bucket.add(PrepareCollector::<W>::new());
5151
}
5252
}
5353
}
@@ -115,7 +115,7 @@ impl<P: Plan, W: CopyContext + WorkerLocal> GCWork<P::VM> for Release<P, W> {
115115
.add(ReleaseMutator::<P::VM>::new(mutator));
116116
}
117117
for w in &mmtk.scheduler.worker_group().workers {
118-
w.local_works.add(ReleaseCollector::<W>(PhantomData));
118+
w.local_work_bucket.add(ReleaseCollector::<W>(PhantomData));
119119
}
120120
// TODO: Process weak references properly
121121
mmtk.reference_processors.clear();
@@ -381,9 +381,9 @@ pub trait ProcessEdgesWork:
381381
if Self::SCAN_OBJECTS_IMMEDIATELY {
382382
// We execute this `scan_objects_work` immediately.
383383
// This is expected to be a useful optimization because,
384-
// say for _pmd_ with 200M heap, we're likely to have 50000~60000 `ScanObjects` works
384+
// say for _pmd_ with 200M heap, we're likely to have 50000~60000 `ScanObjects` work packets
385385
// being dispatched (similar amount to `ProcessEdgesWork`).
386-
// Executing these works now can remarkably reduce the global synchronization time.
386+
// Executing these work packets now can remarkably reduce the global synchronization time.
387387
self.worker().do_work(scan_objects_work);
388388
} else {
389389
self.mmtk.scheduler.work_buckets[WorkBucketStage::Closure].add(scan_objects_work);

src/scheduler/mod.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
//! A general scheduler implementation. MMTk uses it to schedule GC-related works.
1+
//! A general scheduler implementation. MMTk uses it to schedule GC-related work.
22
33
mod context;
44
mod mmtk_context;
@@ -16,4 +16,4 @@ pub use work::*;
1616
pub use work_bucket::WorkBucketStage;
1717
pub use worker::*;
1818

19-
pub mod gc_works;
19+
pub mod gc_work;

src/scheduler/scheduler.rs

+9-9
Original file line numberDiff line numberDiff line change
@@ -20,15 +20,15 @@ pub enum CoordinatorMessage<C: Context> {
2020

2121
pub struct Scheduler<C: Context> {
2222
pub work_buckets: EnumMap<WorkBucketStage, WorkBucket<C>>,
23-
/// Works for the coordinator thread
24-
pub coordinator_works: WorkBucket<C>,
23+
/// Work for the coordinator thread
24+
pub coordinator_work: WorkBucket<C>,
2525
/// workers
2626
worker_group: Option<Arc<WorkerGroup<C>>>,
2727
/// Condition Variable for worker synchronization
2828
pub worker_monitor: Arc<(Mutex<()>, Condvar)>,
2929
context: Option<&'static C>,
3030
coordinator_worker: Option<RwLock<Worker<C>>>,
31-
/// A message channel to send new coordinator works and other actions to the coordinator thread
31+
/// A message channel to send new coordinator work and other actions to the coordinator thread
3232
pub channel: (
3333
Sender<CoordinatorMessage<C>>,
3434
Receiver<CoordinatorMessage<C>>,
@@ -51,7 +51,7 @@ impl<C: Context> Scheduler<C> {
5151
WorkBucketStage::Release => WorkBucket::new(false, worker_monitor.clone()),
5252
WorkBucketStage::Final => WorkBucket::new(false, worker_monitor.clone()),
5353
},
54-
coordinator_works: WorkBucket::new(true, worker_monitor.clone()),
54+
coordinator_work: WorkBucket::new(true, worker_monitor.clone()),
5555
worker_group: None,
5656
worker_monitor,
5757
context: None,
@@ -142,20 +142,20 @@ impl<C: Context> Scheduler<C> {
142142
buckets_updated |= bucket.update();
143143
}
144144
if buckets_updated {
145-
// Notify the workers for new works
145+
// Notify the workers for new work
146146
let _guard = self.worker_monitor.0.lock().unwrap();
147147
self.worker_monitor.1.notify_all();
148148
}
149149
}
150150

151-
/// Execute coordinator works, in the controller thread
151+
/// Execute coordinator work, in the controller thread
152152
fn process_coordinator_work(&self, mut work: Box<dyn CoordinatorWork<C>>) {
153153
let mut coordinator_worker = self.coordinator_worker.as_ref().unwrap().write().unwrap();
154154
let context = self.context.unwrap();
155155
work.do_work_with_stat(&mut coordinator_worker, context);
156156
}
157157

158-
/// Drain the message queue and execute coordinator works
158+
/// Drain the message queue and execute coordinator work
159159
pub fn wait_for_completion(&self) {
160160
// At the start of a GC, we probably already have received a `ScheduleCollection` work. Run it now.
161161
if let Some(initializer) = self.startup.lock().unwrap().take() {
@@ -219,8 +219,8 @@ impl<C: Context> Scheduler<C> {
219219

220220
#[inline]
221221
fn pop_scheduable_work(&self, worker: &Worker<C>) -> Option<(Box<dyn Work<C>>, bool)> {
222-
if let Some(work) = worker.local_works.poll() {
223-
return Some((work, worker.local_works.is_empty()));
222+
if let Some(work) = worker.local_work_bucket.poll() {
223+
return Some((work, worker.local_work_bucket.is_empty()));
224224
}
225225
for work_bucket in self.work_buckets.values() {
226226
if let Some(work) = work_bucket.poll() {

src/scheduler/stat.rs

+7-7
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ pub struct SchedulerStat {
1212

1313
impl SchedulerStat {
1414
/// Extract the work-packet name from the full type name.
15-
/// i.e. simplifies `crate::scheduler::gc_works::SomeWorkPacket<Semispace>` to `SomeWorkPacket`.
15+
/// i.e. simplifies `crate::scheduler::gc_work::SomeWorkPacket<Semispace>` to `SomeWorkPacket`.
1616
fn work_name(&self, name: &str) -> String {
1717
let end_index = name.find('<').unwrap_or_else(|| name.len());
1818
let name = name[..end_index].to_owned();
@@ -55,11 +55,11 @@ impl SchedulerStat {
5555
total_count += c;
5656
let n = self.work_id_name_map[t];
5757
stat.insert(
58-
format!("works.{}.count", self.work_name(n)),
58+
format!("work.{}.count", self.work_name(n)),
5959
format!("{}", c),
6060
);
6161
}
62-
stat.insert("total-works.count".to_owned(), format!("{}", total_count));
62+
stat.insert("total-work.count".to_owned(), format!("{}", total_count));
6363
// Work execution times
6464
let mut total_durations = vec![];
6565
for (t, durations) in &self.work_durations {
@@ -74,7 +74,7 @@ impl SchedulerStat {
7474
.collect::<Vec<_>>(),
7575
);
7676
stat.insert(
77-
format!("works.{}.time.geomean", self.work_name(n)),
77+
format!("work.{}.time.geomean", self.work_name(n)),
7878
format!("{:.2}", geomean),
7979
);
8080
}
@@ -84,15 +84,15 @@ impl SchedulerStat {
8484
.collect::<Vec<_>>();
8585
if !durations.is_empty() {
8686
stat.insert(
87-
"total-works.time.geomean".to_owned(),
87+
"total-work.time.geomean".to_owned(),
8888
format!("{:.2}", self.geomean(&durations)),
8989
);
9090
stat.insert(
91-
"total-works.time.min".to_owned(),
91+
"total-work.time.min".to_owned(),
9292
format!("{:.2}", self.min(&durations)),
9393
);
9494
stat.insert(
95-
"total-works.time.max".to_owned(),
95+
"total-work.time.max".to_owned(),
9696
format!("{:.2}", self.max(&durations)),
9797
);
9898
}

src/scheduler/work.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ pub trait Work<C: Context>: 'static + Send + Sync {
1919
/// A special kind of work that will execute on the coorddinator (i.e. controller) thread
2020
///
2121
/// The coorddinator thread holds the global monitor lock when executing `CoordinatorWork`s.
22-
/// So, directly adding new works to any buckets will cause dead lock.
22+
/// So, directly adding new work to any buckets will cause dead lock.
2323
/// For this case, use `WorkBucket::add_with_priority_unsync` instead.
2424
pub trait CoordinatorWork<C: Context>: 'static + Send + Sync + Work<C> {}
2525

src/scheduler/work_bucket.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -115,10 +115,10 @@ impl<C: Context> WorkBucket<C> {
115115
pub fn add<W: Work<C>>(&self, work: W) {
116116
self.add_with_priority(1000, box work);
117117
}
118-
pub fn bulk_add(&self, priority: usize, works: Vec<Box<dyn Work<C>>>) {
118+
pub fn bulk_add(&self, priority: usize, work_vec: Vec<Box<dyn Work<C>>>) {
119119
{
120120
let mut queue = self.queue.write();
121-
for w in works {
121+
for w in work_vec {
122122
queue.push(PrioritizedWork::new(priority, w));
123123
}
124124
}

src/scheduler/worker.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ pub struct Worker<C: Context> {
5050
pub parked: AtomicBool,
5151
scheduler: Arc<Scheduler<C>>,
5252
local: WorkerLocalPtr,
53-
pub local_works: WorkBucket<C>,
53+
pub local_work_bucket: WorkBucket<C>,
5454
pub sender: Sender<CoordinatorMessage<C>>,
5555
pub stat: WorkerLocalStat,
5656
context: Option<&'static C>,
@@ -71,7 +71,7 @@ impl<C: Context> Worker<C> {
7171
ordinal,
7272
parked: AtomicBool::new(true),
7373
local: WorkerLocalPtr::UNINITIALIZED,
74-
local_works: WorkBucket::new(true, scheduler.worker_monitor.clone()),
74+
local_work_bucket: WorkBucket::new(true, scheduler.worker_monitor.clone()),
7575
sender: scheduler.channel.0.clone(),
7676
scheduler,
7777
stat: Default::default(),

src/util/sanity/sanity_checker.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
use crate::plan::global::CopyContext;
22
use crate::plan::Plan;
3-
use crate::scheduler::gc_works::*;
3+
use crate::scheduler::gc_work::*;
44
use crate::scheduler::*;
55
use crate::util::{Address, ObjectReference};
66
use crate::vm::*;
@@ -94,7 +94,7 @@ impl<P: Plan, W: CopyContext + WorkerLocal> GCWork<P::VM> for SanityPrepare<P, W
9494
.add(PrepareMutator::<P::VM>::new(mutator));
9595
}
9696
for w in &mmtk.scheduler.worker_group().workers {
97-
w.local_works.add(PrepareCollector::<W>::new());
97+
w.local_work_bucket.add(PrepareCollector::<W>::new());
9898
}
9999
}
100100
}
@@ -123,7 +123,7 @@ impl<P: Plan, W: CopyContext + WorkerLocal> GCWork<P::VM> for SanityRelease<P, W
123123
.add(ReleaseMutator::<P::VM>::new(mutator));
124124
}
125125
for w in &mmtk.scheduler.worker_group().workers {
126-
w.local_works.add(ReleaseCollector::<W>::new());
126+
w.local_work_bucket.add(ReleaseCollector::<W>::new());
127127
}
128128
}
129129
}

src/vm/collection.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
use crate::plan::MutatorContext;
2-
use crate::scheduler::gc_works::ProcessEdgesWork;
2+
use crate::scheduler::gc_work::ProcessEdgesWork;
33
use crate::scheduler::*;
44
use crate::util::OpaquePointer;
55
use crate::vm::VMBinding;

src/vm/scanning.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
use crate::plan::{Mutator, TransitiveClosure};
2-
use crate::scheduler::gc_works::ProcessEdgesWork;
2+
use crate::scheduler::gc_work::ProcessEdgesWork;
33
use crate::scheduler::GCWorker;
44
use crate::util::ObjectReference;
55
use crate::util::OpaquePointer;

tests/scheduler.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ impl Work<()> for Sort {
2323

2424
/// A work-packet to do array partition
2525
///
26-
/// Recursively generates `Sort` works for partitioned sub-arrays.
26+
/// Recursively generates `Sort` work for partitioned sub-arrays.
2727
struct Partition(&'static mut [usize]);
2828

2929
impl Work<()> for Partition {

vmbindings/dummyvm/src/collection.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ use mmtk::MutatorContext;
33
use mmtk::util::OpaquePointer;
44
use mmtk::MMTK;
55
use mmtk::scheduler::*;
6-
use mmtk::scheduler::gc_works::*;
6+
use mmtk::scheduler::gc_work::*;
77
use DummyVM;
88

99
pub struct VMCollection {}

vmbindings/dummyvm/src/scanning.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ use mmtk::vm::Scanning;
22
use mmtk::{TransitiveClosure, Mutator};
33
use mmtk::util::ObjectReference;
44
use mmtk::util::OpaquePointer;
5-
use mmtk::scheduler::gc_works::*;
5+
use mmtk::scheduler::gc_work::*;
66
use mmtk::scheduler::GCWorker;
77
use crate::DummyVM;
88

0 commit comments

Comments
 (0)