Skip to content

Commit bbf5f49

Browse files
authored
Refactor: Use Atomic<Address> where appropriate (#843)
Use `Atomic<Address>` instead of `AtomicUsize` where appropriate. Closes #121 --------- Signed-off-by: ClSlaid <[email protected]> Signed-off-by: 蔡略 <[email protected]>
1 parent 0d6ef2c commit bbf5f49

File tree

3 files changed

+30
-25
lines changed

3 files changed

+30
-25
lines changed

src/policy/lockfreeimmortalspace.rs

Lines changed: 17 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,22 @@
1+
use atomic::Atomic;
2+
3+
use std::marker::PhantomData;
4+
use std::sync::atomic::Ordering;
5+
16
use crate::mmtk::SFT_MAP;
7+
use crate::policy::sft::GCWorkerMutRef;
28
use crate::policy::sft::SFT;
39
use crate::policy::space::{CommonSpace, Space};
410
use crate::util::address::Address;
5-
use crate::util::heap::PageResource;
6-
use crate::util::ObjectReference;
711

8-
use crate::policy::sft::GCWorkerMutRef;
912
use crate::util::conversions;
1013
use crate::util::heap::layout::vm_layout_constants::{AVAILABLE_BYTES, AVAILABLE_START};
14+
use crate::util::heap::PageResource;
1115
use crate::util::metadata::side_metadata::SideMetadataContext;
1216
use crate::util::metadata::side_metadata::SideMetadataSanity;
1317
use crate::util::opaque_pointer::*;
18+
use crate::util::ObjectReference;
1419
use crate::vm::VMBinding;
15-
use std::marker::PhantomData;
16-
use std::sync::atomic::{AtomicUsize, Ordering};
1720

1821
/// This type implements a lock free version of the immortal collection
1922
/// policy. This is close to the OpenJDK's epsilon GC.
@@ -25,10 +28,7 @@ pub struct LockFreeImmortalSpace<VM: VMBinding> {
2528
#[allow(unused)]
2629
name: &'static str,
2730
/// Heap range start
28-
///
29-
/// We use `AtomicUsize` instead of `Address` here to atomically bumping this cursor.
30-
/// TODO: Better address type here (Atomic<Address>?)
31-
cursor: AtomicUsize,
31+
cursor: Atomic<Address>,
3232
/// Heap range end
3333
limit: Address,
3434
/// start of this space
@@ -108,15 +108,20 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
108108
}
109109

110110
fn reserved_pages(&self) -> usize {
111-
let cursor = unsafe { Address::from_usize(self.cursor.load(Ordering::Relaxed)) };
111+
let cursor = self.cursor.load(Ordering::Relaxed);
112112
let data_pages = conversions::bytes_to_pages_up(self.limit - cursor);
113113
let meta_pages = self.metadata.calculate_reserved_pages(data_pages);
114114
data_pages + meta_pages
115115
}
116116

117117
fn acquire(&self, _tls: VMThread, pages: usize) -> Address {
118118
let bytes = conversions::pages_to_bytes(pages);
119-
let start = unsafe { Address::from_usize(self.cursor.fetch_add(bytes, Ordering::Relaxed)) };
119+
let start = self
120+
.cursor
121+
.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |addr| {
122+
Some(addr.add(bytes))
123+
})
124+
.expect("update cursor failed");
120125
if start + bytes > self.limit {
121126
panic!("OutOfMemory")
122127
}
@@ -182,7 +187,7 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
182187
// https://github.com/mmtk/mmtk-core/issues/314
183188
let space = Self {
184189
name: args.name,
185-
cursor: AtomicUsize::new(AVAILABLE_START.as_usize()),
190+
cursor: Atomic::new(AVAILABLE_START),
186191
limit: AVAILABLE_START + total_bytes,
187192
start: AVAILABLE_START,
188193
extent: total_bytes,

src/policy/marksweepspace/malloc_ms/global.rs

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
use atomic::Atomic;
2+
13
use super::metadata::*;
24
use crate::plan::ObjectQueue;
35
use crate::plan::VectorObjectQueue;
@@ -39,8 +41,8 @@ pub struct MallocSpace<VM: VMBinding> {
3941
phantom: PhantomData<VM>,
4042
active_bytes: AtomicUsize,
4143
active_pages: AtomicUsize,
42-
pub chunk_addr_min: AtomicUsize, // XXX: have to use AtomicUsize to represent an Address
43-
pub chunk_addr_max: AtomicUsize,
44+
pub chunk_addr_min: Atomic<Address>,
45+
pub chunk_addr_max: Atomic<Address>,
4446
metadata: SideMetadataContext,
4547
/// Work packet scheduler
4648
scheduler: Arc<GCWorkScheduler<VM>>,
@@ -255,8 +257,8 @@ impl<VM: VMBinding> MallocSpace<VM> {
255257
phantom: PhantomData,
256258
active_bytes: AtomicUsize::new(0),
257259
active_pages: AtomicUsize::new(0),
258-
chunk_addr_min: AtomicUsize::new(usize::max_value()), // XXX: have to use AtomicUsize to represent an Address
259-
chunk_addr_max: AtomicUsize::new(0),
260+
chunk_addr_min: Atomic::new(Address::MAX),
261+
chunk_addr_max: Atomic::new(Address::ZERO),
260262
metadata: SideMetadataContext {
261263
global: args.global_side_metadata_specs.clone(),
262264
local: metadata::extract_side_metadata(&[
@@ -428,12 +430,11 @@ impl<VM: VMBinding> MallocSpace<VM> {
428430
// Update chunk_addr_min, basing on the start of the allocation: addr.
429431
{
430432
let min_chunk_start = conversions::chunk_align_down(addr);
431-
let min_chunk_usize = min_chunk_start.as_usize();
432433
let mut min = self.chunk_addr_min.load(Ordering::Relaxed);
433-
while min_chunk_usize < min {
434+
while min_chunk_start < min {
434435
match self.chunk_addr_min.compare_exchange_weak(
435436
min,
436-
min_chunk_usize,
437+
min_chunk_start,
437438
Ordering::AcqRel,
438439
Ordering::Relaxed,
439440
) {
@@ -446,12 +447,11 @@ impl<VM: VMBinding> MallocSpace<VM> {
446447
// Update chunk_addr_max, basing on the end of the allocation: addr + size.
447448
{
448449
let max_chunk_start = conversions::chunk_align_down(addr + size);
449-
let max_chunk_usize = max_chunk_start.as_usize();
450450
let mut max = self.chunk_addr_max.load(Ordering::Relaxed);
451-
while max_chunk_usize > max {
451+
while max_chunk_start > max {
452452
match self.chunk_addr_max.compare_exchange_weak(
453453
max,
454-
max_chunk_usize,
454+
max_chunk_start,
455455
Ordering::AcqRel,
456456
Ordering::Relaxed,
457457
) {
@@ -467,9 +467,8 @@ impl<VM: VMBinding> MallocSpace<VM> {
467467
pub fn release(&mut self) {
468468
use crate::scheduler::WorkBucketStage;
469469
let mut work_packets: Vec<Box<dyn GCWork<VM>>> = vec![];
470-
let mut chunk = unsafe { Address::from_usize(self.chunk_addr_min.load(Ordering::Relaxed)) }; // XXX: have to use AtomicUsize to represent an Address
471-
let end = unsafe { Address::from_usize(self.chunk_addr_max.load(Ordering::Relaxed)) }
472-
+ BYTES_IN_CHUNK;
470+
let mut chunk = self.chunk_addr_min.load(Ordering::Relaxed);
471+
let end = self.chunk_addr_max.load(Ordering::Relaxed) + BYTES_IN_CHUNK;
473472

474473
// Since only a single thread generates the sweep work packets as well as it is a Stop-the-World collector,
475474
// we can assume that the chunk mark metadata is not being accessed by anything else and hence we use

src/util/address.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
use atomic_traits::Atomic;
2+
23
use std::fmt;
34
use std::mem;
45
use std::ops::*;

0 commit comments

Comments
 (0)