Skip to content

Commit 4e8d32c

Browse files
authored
Mmap refactoring (#296)
* Add unit tests for mmap and some refactoring * Replace dzmmap with dzmmap_noreplace in almost all use cases except the onces in side metadata * Make dzmmap unsafe. Add some comments. * mmap_noreserve maps memory with PROT_NONE. We have to do dzmmap before actually use it.
1 parent eb949af commit 4e8d32c

10 files changed

+592
-259
lines changed

src/policy/lockfreeimmortalspace.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
8686
);
8787
self.limit = AVAILABLE_START + total_bytes;
8888
// Eagerly memory map the entire heap (also zero all the memory)
89-
crate::util::memory::dzmmap(AVAILABLE_START, total_bytes).unwrap();
89+
crate::util::memory::dzmmap_noreplace(AVAILABLE_START, total_bytes).unwrap();
9090
if try_map_metadata_space(
9191
AVAILABLE_START,
9292
total_bytes,

src/util/address.rs

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,15 @@ impl Address {
204204
Address(self.0 + size)
205205
}
206206

207+
// We implemented the Sub trait but we still keep this sub function.
208+
// The sub() function is const fn, and we can use it to declare Address constants.
209+
// The Sub trait function cannot be const.
210+
#[allow(clippy::should_implement_trait)]
211+
#[inline(always)]
212+
pub const fn sub(self, size: usize) -> Address {
213+
Address(self.0 - size)
214+
}
215+
207216
/// loads a value of type T from the address
208217
/// # Safety
209218
/// This could throw a segment fault if the address is invalid

src/util/heap/layout/byte_map_mmapper.rs

Lines changed: 135 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ use std::sync::atomic::AtomicU8;
1010
use std::sync::atomic::Ordering;
1111
use std::sync::Mutex;
1212

13-
use crate::util::memory::{dzmmap, mprotect, munprotect};
13+
use crate::util::memory::{dzmmap_noreplace, mprotect, munprotect};
1414
use std::mem::transmute;
1515

1616
const UNMAPPED: u8 = 0;
@@ -74,7 +74,7 @@ impl Mmapper for ByteMapMmapper {
7474
let guard = self.lock.lock().unwrap();
7575
// might have become MAPPED here
7676
if self.mapped[chunk].load(Ordering::Relaxed) == UNMAPPED {
77-
match dzmmap(mmap_start, MMAP_CHUNK_BYTES) {
77+
match dzmmap_noreplace(mmap_start, MMAP_CHUNK_BYTES) {
7878
Ok(_) => {
7979
self.map_metadata(
8080
mmap_start,
@@ -211,21 +211,20 @@ impl Default for ByteMapMmapper {
211211
#[cfg(test)]
212212
mod tests {
213213
use crate::util::heap::layout::{ByteMapMmapper, Mmapper};
214-
use crate::util::{conversions, Address};
214+
use crate::util::Address;
215215

216216
use crate::util::constants::LOG_BYTES_IN_PAGE;
217217
use crate::util::conversions::pages_to_bytes;
218218
use crate::util::heap::layout::byte_map_mmapper::{MAPPED, PROTECTED};
219219
use crate::util::heap::layout::vm_layout_constants::MMAP_CHUNK_BYTES;
220+
use crate::util::memory;
221+
use crate::util::test_util::BYTE_MAP_MMAPPER_TEST_REGION;
222+
use crate::util::test_util::{serial_test, with_cleanup};
220223
use std::sync::atomic::Ordering;
221224

222225
const CHUNK_SIZE: usize = 1 << 22;
223-
#[cfg(target_os = "linux")]
224-
const FIXED_ADDRESS: Address =
225-
unsafe { conversions::chunk_align_down(Address::from_usize(0x6000_0000)) };
226-
#[cfg(target_os = "macos")]
227-
const FIXED_ADDRESS: Address =
228-
unsafe { conversions::chunk_align_down(Address::from_usize(0x0001_3500_0000)) };
226+
const FIXED_ADDRESS: Address = BYTE_MAP_MMAPPER_TEST_REGION.start;
227+
const MAX_SIZE: usize = BYTE_MAP_MMAPPER_TEST_REGION.size;
229228

230229
#[test]
231230
fn address_to_mmap_chunks() {
@@ -266,84 +265,148 @@ mod tests {
266265

267266
#[test]
268267
fn ensure_mapped_1page() {
269-
let mmapper = ByteMapMmapper::new();
270-
let pages = 1;
271-
let empty_vec = vec![];
272-
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec);
273-
274-
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
275-
let end_chunk =
276-
ByteMapMmapper::address_to_mmap_chunks_up(FIXED_ADDRESS + pages_to_bytes(pages));
277-
for chunk in start_chunk..end_chunk {
278-
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
279-
}
268+
serial_test(|| {
269+
with_cleanup(
270+
|| {
271+
let mmapper = ByteMapMmapper::new();
272+
let pages = 1;
273+
let empty_vec = vec![];
274+
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec);
275+
276+
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
277+
let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up(
278+
FIXED_ADDRESS + pages_to_bytes(pages),
279+
);
280+
for chunk in start_chunk..end_chunk {
281+
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
282+
}
283+
},
284+
|| {
285+
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
286+
},
287+
)
288+
})
280289
}
281290

282291
#[test]
283292
fn ensure_mapped_1chunk() {
284-
let mmapper = ByteMapMmapper::new();
285-
let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
286-
let empty_vec = vec![];
287-
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec);
288-
289-
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
290-
let end_chunk =
291-
ByteMapMmapper::address_to_mmap_chunks_up(FIXED_ADDRESS + pages_to_bytes(pages));
292-
for chunk in start_chunk..end_chunk {
293-
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
294-
}
293+
serial_test(|| {
294+
with_cleanup(
295+
|| {
296+
let mmapper = ByteMapMmapper::new();
297+
let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
298+
let empty_vec = vec![];
299+
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec);
300+
301+
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
302+
let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up(
303+
FIXED_ADDRESS + pages_to_bytes(pages),
304+
);
305+
for chunk in start_chunk..end_chunk {
306+
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
307+
}
308+
},
309+
|| {
310+
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
311+
},
312+
)
313+
})
295314
}
296315

297316
#[test]
298317
fn ensure_mapped_more_than_1chunk() {
299-
let mmapper = ByteMapMmapper::new();
300-
let pages = (MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize;
301-
let empty_vec = vec![];
302-
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec);
303-
304-
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
305-
let end_chunk =
306-
ByteMapMmapper::address_to_mmap_chunks_up(FIXED_ADDRESS + pages_to_bytes(pages));
307-
assert_eq!(end_chunk - start_chunk, 2);
308-
for chunk in start_chunk..end_chunk {
309-
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
310-
}
318+
serial_test(|| {
319+
with_cleanup(
320+
|| {
321+
let mmapper = ByteMapMmapper::new();
322+
let pages =
323+
(MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize;
324+
let empty_vec = vec![];
325+
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &empty_vec, &empty_vec);
326+
327+
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
328+
let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up(
329+
FIXED_ADDRESS + pages_to_bytes(pages),
330+
);
331+
assert_eq!(end_chunk - start_chunk, 2);
332+
for chunk in start_chunk..end_chunk {
333+
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
334+
}
335+
},
336+
|| {
337+
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
338+
},
339+
)
340+
})
311341
}
312342

313343
#[test]
314344
fn protect() {
315-
// map 2 chunks
316-
let mmapper = ByteMapMmapper::new();
317-
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
318-
let empty_vec = vec![];
319-
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &empty_vec, &empty_vec);
320-
321-
// protect 1 chunk
322-
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
323-
324-
let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
325-
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), PROTECTED);
326-
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);
345+
serial_test(|| {
346+
with_cleanup(
347+
|| {
348+
// map 2 chunks
349+
let mmapper = ByteMapMmapper::new();
350+
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
351+
let empty_vec = vec![];
352+
mmapper.ensure_mapped(
353+
FIXED_ADDRESS,
354+
pages_per_chunk * 2,
355+
&empty_vec,
356+
&empty_vec,
357+
);
358+
359+
// protect 1 chunk
360+
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
361+
362+
let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
363+
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), PROTECTED);
364+
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);
365+
},
366+
|| {
367+
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
368+
},
369+
)
370+
})
327371
}
328372

329373
#[test]
330374
fn ensure_mapped_on_protected_chunks() {
331-
// map 2 chunks
332-
let mmapper = ByteMapMmapper::new();
333-
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
334-
let empty_vec = vec![];
335-
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &empty_vec, &empty_vec);
336-
337-
// protect 1 chunk
338-
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
339-
340-
let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
341-
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), PROTECTED);
342-
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);
343-
344-
// ensure mapped - this will unprotect the previously protected chunk
345-
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &empty_vec, &empty_vec);
346-
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
347-
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);
375+
serial_test(|| {
376+
with_cleanup(
377+
|| {
378+
// map 2 chunks
379+
let mmapper = ByteMapMmapper::new();
380+
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
381+
let empty_vec = vec![];
382+
mmapper.ensure_mapped(
383+
FIXED_ADDRESS,
384+
pages_per_chunk * 2,
385+
&empty_vec,
386+
&empty_vec,
387+
);
388+
389+
// protect 1 chunk
390+
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
391+
392+
let chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
393+
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), PROTECTED);
394+
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);
395+
396+
// ensure mapped - this will unprotect the previously protected chunk
397+
mmapper.ensure_mapped(
398+
FIXED_ADDRESS,
399+
pages_per_chunk * 2,
400+
&empty_vec,
401+
&empty_vec,
402+
);
403+
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
404+
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);
405+
},
406+
|| {
407+
memory::munmap(FIXED_ADDRESS, MAX_SIZE).unwrap();
408+
},
409+
)
410+
})
348411
}
349412
}

0 commit comments

Comments
 (0)