1
+ use atomic:: Atomic ;
2
+
1
3
use super :: metadata:: * ;
2
4
use crate :: plan:: ObjectQueue ;
3
5
use crate :: plan:: VectorObjectQueue ;
@@ -39,8 +41,8 @@ pub struct MallocSpace<VM: VMBinding> {
39
41
phantom : PhantomData < VM > ,
40
42
active_bytes : AtomicUsize ,
41
43
active_pages : AtomicUsize ,
42
- pub chunk_addr_min : AtomicUsize , // XXX: have to use AtomicUsize to represent an Address
43
- pub chunk_addr_max : AtomicUsize ,
44
+ pub chunk_addr_min : Atomic < Address > ,
45
+ pub chunk_addr_max : Atomic < Address > ,
44
46
metadata : SideMetadataContext ,
45
47
/// Work packet scheduler
46
48
scheduler : Arc < GCWorkScheduler < VM > > ,
@@ -255,8 +257,8 @@ impl<VM: VMBinding> MallocSpace<VM> {
255
257
phantom : PhantomData ,
256
258
active_bytes : AtomicUsize :: new ( 0 ) ,
257
259
active_pages : AtomicUsize :: new ( 0 ) ,
258
- chunk_addr_min : AtomicUsize :: new ( usize :: max_value ( ) ) , // XXX: have to use AtomicUsize to represent an Address
259
- chunk_addr_max : AtomicUsize :: new ( 0 ) ,
260
+ chunk_addr_min : Atomic :: new ( Address :: MAX ) ,
261
+ chunk_addr_max : Atomic :: new ( Address :: ZERO ) ,
260
262
metadata : SideMetadataContext {
261
263
global : args. global_side_metadata_specs . clone ( ) ,
262
264
local : metadata:: extract_side_metadata ( & [
@@ -428,12 +430,11 @@ impl<VM: VMBinding> MallocSpace<VM> {
428
430
// Update chunk_addr_min, basing on the start of the allocation: addr.
429
431
{
430
432
let min_chunk_start = conversions:: chunk_align_down ( addr) ;
431
- let min_chunk_usize = min_chunk_start. as_usize ( ) ;
432
433
let mut min = self . chunk_addr_min . load ( Ordering :: Relaxed ) ;
433
- while min_chunk_usize < min {
434
+ while min_chunk_start < min {
434
435
match self . chunk_addr_min . compare_exchange_weak (
435
436
min,
436
- min_chunk_usize ,
437
+ min_chunk_start ,
437
438
Ordering :: AcqRel ,
438
439
Ordering :: Relaxed ,
439
440
) {
@@ -446,12 +447,11 @@ impl<VM: VMBinding> MallocSpace<VM> {
446
447
// Update chunk_addr_max, basing on the end of the allocation: addr + size.
447
448
{
448
449
let max_chunk_start = conversions:: chunk_align_down ( addr + size) ;
449
- let max_chunk_usize = max_chunk_start. as_usize ( ) ;
450
450
let mut max = self . chunk_addr_max . load ( Ordering :: Relaxed ) ;
451
- while max_chunk_usize > max {
451
+ while max_chunk_start > max {
452
452
match self . chunk_addr_max . compare_exchange_weak (
453
453
max,
454
- max_chunk_usize ,
454
+ max_chunk_start ,
455
455
Ordering :: AcqRel ,
456
456
Ordering :: Relaxed ,
457
457
) {
@@ -467,9 +467,8 @@ impl<VM: VMBinding> MallocSpace<VM> {
467
467
pub fn release ( & mut self ) {
468
468
use crate :: scheduler:: WorkBucketStage ;
469
469
let mut work_packets: Vec < Box < dyn GCWork < VM > > > = vec ! [ ] ;
470
- let mut chunk = unsafe { Address :: from_usize ( self . chunk_addr_min . load ( Ordering :: Relaxed ) ) } ; // XXX: have to use AtomicUsize to represent an Address
471
- let end = unsafe { Address :: from_usize ( self . chunk_addr_max . load ( Ordering :: Relaxed ) ) }
472
- + BYTES_IN_CHUNK ;
470
+ let mut chunk = self . chunk_addr_min . load ( Ordering :: Relaxed ) ;
471
+ let end = self . chunk_addr_max . load ( Ordering :: Relaxed ) + BYTES_IN_CHUNK ;
473
472
474
473
// Since only a single thread generates the sweep work packets as well as it is a Stop-the-World collector,
475
474
// we can assume that the chunk mark metadata is not being accessed by anything else and hence we use
0 commit comments