Skip to content

Commit dee7fa5

Browse files
emberianalexcrichton
authored andcommitted
Use mmap to map in task stacks and guard page
Also implement caching of stacks.
1 parent 462f09e commit dee7fa5

File tree

6 files changed

+144
-58
lines changed

6 files changed

+144
-58
lines changed

src/libgreen/context.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,9 @@ use std::libc::c_void;
1212
use std::uint;
1313
use std::cast::{transmute, transmute_mut_unsafe,
1414
transmute_region, transmute_mut_region};
15+
use stack::Stack;
1516
use std::unstable::stack;
1617

17-
use stack::StackSegment;
18-
1918
// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing
2019
// SSE regs. It would be marginally better not to do this. In C++ we
2120
// use an attribute on a struct.
@@ -41,7 +40,7 @@ impl Context {
4140
}
4241

4342
/// Create a new context that will resume execution by running proc()
44-
pub fn new(start: proc(), stack: &mut StackSegment) -> Context {
43+
pub fn new(start: proc(), stack: &mut Stack) -> Context {
4544
// The C-ABI function that is the task entry point
4645
//
4746
// Note that this function is a little sketchy. We're taking a
@@ -79,6 +78,7 @@ impl Context {
7978
// be passed to the spawn function. Another unfortunate
8079
// allocation
8180
let start = ~start;
81+
8282
initialize_call_frame(&mut *regs,
8383
task_start_wrapper as *c_void,
8484
unsafe { transmute(&*start) },

src/libgreen/coroutine.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
use std::rt::env;
1515

1616
use context::Context;
17-
use stack::{StackPool, StackSegment};
17+
use stack::{StackPool, Stack};
1818

1919
/// A coroutine is nothing more than a (register context, stack) pair.
2020
pub struct Coroutine {
@@ -24,7 +24,7 @@ pub struct Coroutine {
2424
///
2525
/// Servo needs this to be public in order to tell SpiderMonkey
2626
/// about the stack bounds.
27-
current_stack_segment: StackSegment,
27+
current_stack_segment: Stack,
2828

2929
/// Always valid if the task is alive and not running.
3030
saved_context: Context
@@ -39,7 +39,7 @@ impl Coroutine {
3939
Some(size) => size,
4040
None => env::min_stack()
4141
};
42-
let mut stack = stack_pool.take_segment(stack_size);
42+
let mut stack = stack_pool.take_stack(stack_size);
4343
let initial_context = Context::new(start, &mut stack);
4444
Coroutine {
4545
current_stack_segment: stack,
@@ -49,14 +49,14 @@ impl Coroutine {
4949

5050
pub fn empty() -> Coroutine {
5151
Coroutine {
52-
current_stack_segment: StackSegment::new(0),
52+
current_stack_segment: Stack::new(0),
5353
saved_context: Context::empty()
5454
}
5555
}
5656

5757
/// Destroy coroutine and try to reuse std::stack segment.
5858
pub fn recycle(self, stack_pool: &mut StackPool) {
5959
let Coroutine { current_stack_segment, .. } = self;
60-
stack_pool.give_segment(current_stack_segment);
60+
stack_pool.give_stack(current_stack_segment);
6161
}
6262
}

src/libgreen/stack.rs

Lines changed: 95 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -8,46 +8,101 @@
88
// option. This file may not be copied, modified, or distributed
99
// except according to those terms.
1010

11-
use std::vec;
12-
use std::libc::{c_uint, uintptr_t};
11+
use std::rt::env::max_cached_stacks;
12+
use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable, MapNonStandardFlags};
13+
#[cfg(not(windows))]
14+
use std::libc::{MAP_STACK, MAP_PRIVATE, MAP_ANON};
15+
use std::libc::{c_uint, c_int, c_void, uintptr_t};
1316

14-
pub struct StackSegment {
15-
priv buf: ~[u8],
16-
priv valgrind_id: c_uint
17+
/// A task's stack. The name "Stack" is a vestige of segmented stacks.
18+
pub struct Stack {
19+
priv buf: MemoryMap,
20+
priv min_size: uint,
21+
priv valgrind_id: c_uint,
1722
}
1823

19-
impl StackSegment {
20-
pub fn new(size: uint) -> StackSegment {
21-
unsafe {
22-
// Crate a block of uninitialized values
23-
let mut stack = vec::with_capacity(size);
24-
stack.set_len(size);
24+
// Try to use MAP_STACK on platforms that support it (it's what we're doing
25+
// anyway), but some platforms don't support it at all. For example, it appears
26+
// that there's a bug in freebsd that MAP_STACK implies MAP_FIXED (so it always
27+
// fails): http://lists.freebsd.org/pipermail/freebsd-bugs/2011-July/044840.html
28+
#[cfg(not(windows), not(target_os = "freebsd"))]
29+
static STACK_FLAGS: c_int = MAP_STACK | MAP_PRIVATE | MAP_ANON;
30+
#[cfg(target_os = "freebsd")]
31+
static STACK_FLAGS: c_int = MAP_PRIVATE | MAP_ANON;
32+
#[cfg(windows)]
33+
static STACK_FLAGS: c_int = 0;
2534

26-
let mut stk = StackSegment {
27-
buf: stack,
28-
valgrind_id: 0
29-
};
35+
impl Stack {
36+
pub fn new(size: uint) -> Stack {
37+
// Map in a stack. Eventually we might be able to handle stack allocation failure, which
38+
// would fail to spawn the task. But there's not many sensible things to do on OOM.
39+
// Failure seems fine (and is what the old stack allocation did).
40+
let stack = match MemoryMap::new(size, [MapReadable, MapWritable,
41+
MapNonStandardFlags(STACK_FLAGS)]) {
42+
Ok(map) => map,
43+
Err(e) => fail!("Creating memory map for stack of size {} failed: {}", size, e)
44+
};
3045

31-
// XXX: Using the FFI to call a C macro. Slow
32-
stk.valgrind_id = rust_valgrind_stack_register(stk.start(), stk.end());
33-
return stk;
46+
// Change the last page to be inaccessible. This is to provide safety; when an FFI
47+
// function overflows it will (hopefully) hit this guard page. It isn't guaranteed, but
48+
// that's why FFI is unsafe. buf.data is guaranteed to be aligned properly.
49+
if !protect_last_page(&stack) {
50+
fail!("Could not memory-protect guard page. stack={:?}, errno={}",
51+
stack, errno());
3452
}
53+
54+
let mut stk = Stack {
55+
buf: stack,
56+
min_size: size,
57+
valgrind_id: 0
58+
};
59+
60+
// XXX: Using the FFI to call a C macro. Slow
61+
stk.valgrind_id = unsafe { rust_valgrind_stack_register(stk.start(), stk.end()) };
62+
return stk;
3563
}
3664

3765
/// Point to the low end of the allocated stack
3866
pub fn start(&self) -> *uint {
39-
self.buf.as_ptr() as *uint
67+
self.buf.data as *uint
4068
}
4169

4270
/// Point one word beyond the high end of the allocated stack
4371
pub fn end(&self) -> *uint {
4472
unsafe {
45-
self.buf.as_ptr().offset(self.buf.len() as int) as *uint
73+
self.buf.data.offset(self.buf.len as int) as *uint
4674
}
4775
}
4876
}
4977

50-
impl Drop for StackSegment {
78+
// These use ToPrimitive so that we never need to worry about the sizes of whatever types these
79+
// (which we would with scalar casts). It's either a wrapper for a scalar cast or failure: fast, or
80+
// will fail during compilation.
81+
#[cfg(unix)]
82+
fn protect_last_page(stack: &MemoryMap) -> bool {
83+
use std::libc::{mprotect, PROT_NONE, size_t};
84+
unsafe {
85+
// This may seem backwards: the start of the segment is the last page? Yes! The stack grows
86+
// from higher addresses (the end of the allocated block) to lower addresses (the start of
87+
// the allocated block).
88+
let last_page = stack.data as *c_void;
89+
mprotect(last_page, page_size() as size_t, PROT_NONE) != -1
90+
}
91+
}
92+
93+
#[cfg(windows)]
94+
fn protect_last_page(stack: &MemoryMap) -> bool {
95+
use std::libc::{VirtualProtect, PAGE_NOACCESS, SIZE_T, LPDWORD, DWORD};
96+
unsafe {
97+
// see above
98+
let last_page = stack.data as *mut c_void;
99+
let mut old_prot: DWORD = 0;
100+
VirtualProtect(last_page, page_size() as SIZE_T, PAGE_NOACCESS,
101+
&mut old_prot as LPDWORD) != 0
102+
}
103+
}
104+
105+
impl Drop for Stack {
51106
fn drop(&mut self) {
52107
unsafe {
53108
// XXX: Using the FFI to call a C macro. Slow
@@ -56,16 +111,30 @@ impl Drop for StackSegment {
56111
}
57112
}
58113

59-
pub struct StackPool(());
114+
pub struct StackPool {
115+
// Ideally this would be some datastructure that preserved ordering on Stack.min_size.
116+
priv stacks: ~[Stack],
117+
}
60118

61119
impl StackPool {
62-
pub fn new() -> StackPool { StackPool(()) }
120+
pub fn new() -> StackPool {
121+
StackPool {
122+
stacks: ~[],
123+
}
124+
}
63125

64-
pub fn take_segment(&self, min_size: uint) -> StackSegment {
65-
StackSegment::new(min_size)
126+
pub fn take_stack(&mut self, min_size: uint) -> Stack {
127+
// Ideally this would be a binary search
128+
match self.stacks.iter().position(|s| s.min_size < min_size) {
129+
Some(idx) => self.stacks.swap_remove(idx),
130+
None => Stack::new(min_size)
131+
}
66132
}
67133

68-
pub fn give_segment(&self, _stack: StackSegment) {
134+
pub fn give_stack(&mut self, stack: Stack) {
135+
if self.stacks.len() <= max_cached_stacks() {
136+
self.stacks.push(stack)
137+
}
69138
}
70139
}
71140

src/libstd/libc.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2863,6 +2863,7 @@ pub mod consts {
28632863
pub static MAP_PRIVATE : c_int = 0x0002;
28642864
pub static MAP_FIXED : c_int = 0x0010;
28652865
pub static MAP_ANON : c_int = 0x1000;
2866+
pub static MAP_STACK : c_int = 0;
28662867

28672868
pub static MAP_FAILED : *c_void = -1 as *c_void;
28682869

src/libstd/os.rs

Lines changed: 27 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ use os;
3939
use prelude::*;
4040
use ptr;
4141
use str;
42-
use to_str;
42+
use fmt;
4343
use unstable::finally::Finally;
4444
use sync::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
4545

@@ -871,7 +871,7 @@ pub enum MapOption {
871871
MapOffset(uint),
872872
/// On POSIX, this can be used to specify the default flags passed to `mmap`. By default it uses
873873
/// `MAP_PRIVATE` and, if not using `MapFd`, `MAP_ANON`. This will override both of those. This
874-
/// is platform-specific (the exact values used) and unused on Windows.
874+
/// is platform-specific (the exact values used) and ignored on Windows.
875875
MapNonStandardFlags(c_int),
876876
}
877877

@@ -911,23 +911,29 @@ pub enum MapError {
911911
ErrMapViewOfFile(uint)
912912
}
913913

914-
impl to_str::ToStr for MapError {
915-
fn to_str(&self) -> ~str {
916-
match *self {
917-
ErrFdNotAvail => ~"fd not available for reading or writing",
918-
ErrInvalidFd => ~"Invalid fd",
919-
ErrUnaligned => ~"Unaligned address, invalid flags, \
920-
negative length or unaligned offset",
921-
ErrNoMapSupport=> ~"File doesn't support mapping",
922-
ErrNoMem => ~"Invalid address, or not enough available memory",
923-
ErrUnknown(code) => format!("Unknown error={}", code),
924-
ErrUnsupProt => ~"Protection mode unsupported",
925-
ErrUnsupOffset => ~"Offset in virtual memory mode is unsupported",
926-
ErrAlreadyExists => ~"File mapping for specified file already exists",
927-
ErrVirtualAlloc(code) => format!("VirtualAlloc failure={}", code),
928-
ErrCreateFileMappingW(code) => format!("CreateFileMappingW failure={}", code),
929-
ErrMapViewOfFile(code) => format!("MapViewOfFile failure={}", code)
930-
}
914+
impl fmt::Default for MapError {
915+
fn fmt(val: &MapError, out: &mut fmt::Formatter) {
916+
let str = match *val {
917+
ErrFdNotAvail => "fd not available for reading or writing",
918+
ErrInvalidFd => "Invalid fd",
919+
ErrUnaligned => "Unaligned address, invalid flags, negative length or unaligned offset",
920+
ErrNoMapSupport=> "File doesn't support mapping",
921+
ErrNoMem => "Invalid address, or not enough available memory",
922+
ErrUnsupProt => "Protection mode unsupported",
923+
ErrUnsupOffset => "Offset in virtual memory mode is unsupported",
924+
ErrAlreadyExists => "File mapping for specified file already exists",
925+
ErrUnknown(code) => { write!(out.buf, "Unknown error = {}", code); return },
926+
ErrVirtualAlloc(code) => { write!(out.buf, "VirtualAlloc failure = {}", code); return },
927+
ErrCreateFileMappingW(code) => {
928+
format!("CreateFileMappingW failure = {}", code);
929+
return
930+
},
931+
ErrMapViewOfFile(code) => {
932+
write!(out.buf, "MapViewOfFile failure = {}", code);
933+
return
934+
}
935+
};
936+
write!(out.buf, "{}", str);
931937
}
932938
}
933939

@@ -1130,8 +1136,7 @@ impl Drop for MemoryMap {
11301136
unsafe {
11311137
match self.kind {
11321138
MapVirtual => {
1133-
if libc::VirtualFree(self.data as *mut c_void,
1134-
self.len as size_t,
1139+
if libc::VirtualFree(self.data as *mut c_void, 0,
11351140
libc::MEM_RELEASE) == FALSE {
11361141
error!("VirtualFree failed: {}", errno());
11371142
}
@@ -1487,7 +1492,7 @@ mod tests {
14871492
MapOffset(size / 2)
14881493
]) {
14891494
Ok(chunk) => chunk,
1490-
Err(msg) => fail!(msg.to_str())
1495+
Err(msg) => fail!("{}", msg)
14911496
};
14921497
assert!(chunk.len > 0);
14931498

src/libstd/rt/env.rs

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,26 +10,33 @@
1010

1111
//! Runtime environment settings
1212
13-
use from_str::FromStr;
13+
use from_str::from_str;
1414
use option::{Some, None};
1515
use os;
1616

1717
// Note that these are all accessed without any synchronization.
1818
// They are expected to be initialized once then left alone.
1919

2020
static mut MIN_STACK: uint = 2 * 1024 * 1024;
21+
/// This default corresponds to 20M of cache per scheduler (at the default size).
22+
static mut MAX_CACHED_STACKS: uint = 10;
2123
static mut DEBUG_BORROW: bool = false;
2224
static mut POISON_ON_FREE: bool = false;
2325

2426
pub fn init() {
2527
unsafe {
2628
match os::getenv("RUST_MIN_STACK") {
27-
Some(s) => match FromStr::from_str(s) {
29+
Some(s) => match from_str(s) {
2830
Some(i) => MIN_STACK = i,
2931
None => ()
3032
},
3133
None => ()
3234
}
35+
match os::getenv("RUST_MAX_CACHED_STACKS") {
36+
Some(max) => MAX_CACHED_STACKS = from_str(max).expect("expected positive integer in \
37+
RUST_MAX_CACHED_STACKS"),
38+
None => ()
39+
}
3340
match os::getenv("RUST_DEBUG_BORROW") {
3441
Some(_) => DEBUG_BORROW = true,
3542
None => ()
@@ -45,6 +52,10 @@ pub fn min_stack() -> uint {
4552
unsafe { MIN_STACK }
4653
}
4754

55+
pub fn max_cached_stacks() -> uint {
56+
unsafe { MAX_CACHED_STACKS }
57+
}
58+
4859
pub fn debug_borrow() -> bool {
4960
unsafe { DEBUG_BORROW }
5061
}

0 commit comments

Comments
 (0)