Skip to content
Merged
2 changes: 1 addition & 1 deletion src/policy/lockfreeimmortalspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
);
self.limit = AVAILABLE_START + total_bytes;
// Eagerly memory map the entire heap (also zero all the memory)
crate::util::memory::dzmmap(AVAILABLE_START, total_bytes).unwrap();
crate::util::memory::dzmmap_noreplace(AVAILABLE_START, total_bytes).unwrap();
if try_map_metadata_space(
AVAILABLE_START,
total_bytes,
Expand Down
9 changes: 9 additions & 0 deletions src/util/address.rs
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,15 @@ impl Address {
Address(self.0 + size)
}

// We implemented the Sub trait but we still keep this sub function.
// The sub() function is const fn, and we can use it to declare Address constants.
// The Sub trait function cannot be const.
#[allow(clippy::should_implement_trait)]
#[inline(always)]
pub const fn sub(self, size: usize) -> Address {
Address(self.0 - size)
}

/// loads a value of type T from the address
/// # Safety
/// This could throw a segment fault if the address is invalid
Expand Down
207 changes: 135 additions & 72 deletions src/util/heap/layout/byte_map_mmapper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use std::sync::atomic::AtomicU8;
use std::sync::atomic::Ordering;
use std::sync::Mutex;

use crate::util::memory::{dzmmap, mprotect, munprotect};
use crate::util::memory::{dzmmap_noreplace, mprotect, munprotect};
use std::mem::transmute;

const UNMAPPED: u8 = 0;
Expand Down Expand Up @@ -74,7 +74,7 @@ impl Mmapper for ByteMapMmapper {
let guard = self.lock.lock().unwrap();
// might have become MAPPED here
if self.mapped[chunk].load(Ordering::Relaxed) == UNMAPPED {
match dzmmap(mmap_start, MMAP_CHUNK_BYTES) {
match dzmmap_noreplace(mmap_start, MMAP_CHUNK_BYTES) {
Ok(_) => {
self.map_metadata(
mmap_start,
Expand Down Expand Up @@ -211,21 +211,20 @@ impl Default for ByteMapMmapper {
#[cfg(test)]
mod tests {
use crate::util::heap::layout::{ByteMapMmapper, Mmapper};
use crate::util::{conversions, Address};
use crate::util::Address;

use crate::util::constants::LOG_BYTES_IN_PAGE;
use crate::util::conversions::pages_to_bytes;
use crate::util::heap::layout::byte_map_mmapper::{MAPPED, PROTECTED};
use crate::util::heap::layout::vm_layout_constants::MMAP_CHUNK_BYTES;
use crate::util::memory;
use crate::util::test_util::BYTE_MAP_MMAPPER_TEST_REGION;
use crate::util::test_util::{serial_test, with_cleanup};
use std::sync::atomic::Ordering;

const CHUNK_SIZE: usize = 1 << 22;
#[cfg(target_os = "linux")]
const FIXED_ADDRESS: Address =
unsafe { conversions::chunk_align_down(Address::from_usize(0x6000_0000)) };
#[cfg(target_os = "macos")]
const FIXED_ADDRESS: Address =
unsafe { conversions::chunk_align_down(Address::from_usize(0x0001_3500_0000)) };
const FIXED_ADDRESS: Address = BYTE_MAP_MMAPPER_TEST_REGION.start;
const MAX_SIZE: usize = BYTE_MAP_MMAPPER_TEST_REGION.size;

#[test]
fn address_to_mmap_chunks() {
Expand Down Expand Up @@ -266,84 +265,148 @@ mod tests {

#[test]
fn ensure_mapped_1page() {
let mmapper = ByteMapMmapper::new();
let pages = 1;
let empty_vec = vec![];
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec);

let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
let end_chunk =
ByteMapMmapper::address_to_mmap_chunks_up(FIXED_ADDRESS + pages_to_bytes(pages));
for chunk in start_chunk..end_chunk {
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
}
serial_test(|| {
with_cleanup(
|| {
let mmapper = ByteMapMmapper::new();
let pages = 1;
let empty_vec = vec![];
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec);

let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up(
FIXED_ADDRESS + pages_to_bytes(pages),
);
for chunk in start_chunk..end_chunk {
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
}
},
|| {
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
},
)
})
}

#[test]
fn ensure_mapped_1chunk() {
let mmapper = ByteMapMmapper::new();
let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
let empty_vec = vec![];
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec);

let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
let end_chunk =
ByteMapMmapper::address_to_mmap_chunks_up(FIXED_ADDRESS + pages_to_bytes(pages));
for chunk in start_chunk..end_chunk {
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
}
serial_test(|| {
with_cleanup(
|| {
let mmapper = ByteMapMmapper::new();
let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
let empty_vec = vec![];
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec);

let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up(
FIXED_ADDRESS + pages_to_bytes(pages),
);
for chunk in start_chunk..end_chunk {
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
}
},
|| {
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
},
)
})
}

#[test]
fn ensure_mapped_more_than_1chunk() {
let mmapper = ByteMapMmapper::new();
let pages = (MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize;
let empty_vec = vec![];
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec);

let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
let end_chunk =
ByteMapMmapper::address_to_mmap_chunks_up(FIXED_ADDRESS + pages_to_bytes(pages));
assert_eq!(end_chunk - start_chunk, 2);
for chunk in start_chunk..end_chunk {
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
}
serial_test(|| {
with_cleanup(
|| {
let mmapper = ByteMapMmapper::new();
let pages =
(MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize;
let empty_vec = vec![];
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec);

let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up(
FIXED_ADDRESS + pages_to_bytes(pages),
);
assert_eq!(end_chunk - start_chunk, 2);
for chunk in start_chunk..end_chunk {
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
}
},
|| {
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
},
)
})
}

#[test]
fn protect() {
// map 2 chunks
let mmapper = ByteMapMmapper::new();
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
let empty_vec = vec![];
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &empty_vec, &empty_vec);

// protect 1 chunk
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);

let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), PROTECTED);
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);
serial_test(|| {
with_cleanup(
|| {
// map 2 chunks
let mmapper = ByteMapMmapper::new();
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
let empty_vec = vec![];
mmapper.ensure_mapped(
FIXED_ADDRESS,
pages_per_chunk * 2,
&empty_vec,
&empty_vec,
);

// protect 1 chunk
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);

let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), PROTECTED);
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);
},
|| {
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
},
)
})
}

#[test]
fn ensure_mapped_on_protected_chunks() {
// map 2 chunks
let mmapper = ByteMapMmapper::new();
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
let empty_vec = vec![];
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &empty_vec, &empty_vec);

// protect 1 chunk
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);

let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), PROTECTED);
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);

// ensure mapped - this will unprotect the previously protected chunk
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &empty_vec, &empty_vec);
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);
serial_test(|| {
with_cleanup(
|| {
// map 2 chunks
let mmapper = ByteMapMmapper::new();
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
let empty_vec = vec![];
mmapper.ensure_mapped(
FIXED_ADDRESS,
pages_per_chunk * 2,
&empty_vec,
&empty_vec,
);

// protect 1 chunk
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);

let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), PROTECTED);
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);

// ensure mapped - this will unprotect the previously protected chunk
mmapper.ensure_mapped(
FIXED_ADDRESS,
pages_per_chunk * 2,
&empty_vec,
&empty_vec,
);
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);
},
|| {
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
},
)
})
}
}
Loading