Skip to content

Commit fa4c8b7

Browse files
committed
Move ReseedingRng into a separate module
1 parent 12bf233 commit fa4c8b7

File tree

2 files changed

+86
-77
lines changed

2 files changed

+86
-77
lines changed

thread-rng/src/lib.rs

Lines changed: 24 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -13,77 +13,35 @@ use std::fmt;
1313
use std::rc::Rc;
1414
use std::thread_local;
1515

16-
use rand_chacha::ChaCha12Rng;
17-
use rand_core::SeedableRng;
18-
1916
pub use rand_core::{self, CryptoRng, RngCore};
2017

21-
// Rationale for using `UnsafeCell` in `ThreadRng`:
22-
//
23-
// Previously we used a `RefCell`, with an overhead of ~15%. There will only
24-
// ever be one mutable reference to the interior of the `UnsafeCell`, because
25-
// we only have such a reference inside `next_u32`, `next_u64`, etc. Within a
26-
// single thread (which is the definition of `ThreadRng`), there will only ever
27-
// be one of these methods active at a time.
28-
//
29-
// A possible scenario where there could be multiple mutable references is if
30-
// `ThreadRng` is used inside `next_u32` and co. But the implementation is
31-
// completely under our control. We just have to ensure none of them use
32-
// `ThreadRng` internally, which is nonsensical anyway. We should also never run
33-
// `ThreadRng` in destructors of its implementation, which is also nonsensical.
34-
35-
// Number of generated bytes after which to reseed `ThreadRng`.
36-
// According to benchmarks, reseeding has a noticeable impact with thresholds
37-
// of 32 kB and less. We choose 64 kB to avoid significant overhead.
38-
const THREAD_RNG_RESEED_THRESHOLD: isize = 1024 * 64;
39-
40-
struct ReseedingRng {
41-
rng: ChaCha12Rng,
42-
bytes_until_reseed: isize,
43-
}
18+
mod reseeding_rng;
19+
use reseeding_rng::ReseedingRng;
4420

45-
impl ReseedingRng {
46-
#[inline(always)]
47-
fn reseed_check(&mut self, n: isize) {
48-
if self.bytes_until_reseed < 0 {
49-
// If system RNG has failed for some reason, ignore the error
50-
// and continue to work with the old RNG state.
51-
let _ = self.reseed();
21+
thread_local!(
22+
// We require Rc<..> to avoid premature freeing when ThreadRng is used
23+
// within thread-local destructors. See https://github.com/rust-random/rand/issues/968.
24+
//
25+
// Rationale for using `UnsafeCell`:
26+
//
27+
// Previously we used a `RefCell`, with an overhead of ~15%. There will only
28+
// ever be one mutable reference to the interior of the `UnsafeCell`, because
29+
// we only have such a reference inside `next_u32`, `next_u64`, etc. Within a
30+
// single thread (which is the definition of `ThreadRng`), there will only ever
31+
// be one of these methods active at a time.
32+
//
33+
// A possible scenario where there could be multiple mutable references is if
34+
// `ThreadRng` is used inside `next_u32` and co. But the implementation is
35+
// completely under our control. We just have to ensure none of them use
36+
// `ThreadRng` internally, which is nonsensical anyway. We should also never run
37+
// `ThreadRng` in destructors of its implementation, which is also nonsensical.
38+
static THREAD_RNG_KEY: Rc<UnsafeCell<ReseedingRng>> = {
39+
match ReseedingRng::new() {
40+
Ok(rng) => Rc::new(UnsafeCell::new(rng)),
41+
Err(err) => panic!("could not initialize ThreadRng: {}", err),
5242
}
53-
self.bytes_until_reseed -= n;
54-
}
55-
56-
#[inline(always)]
57-
fn reseed(&mut self) -> Result<(), rand_core::getrandom::Error> {
58-
self.bytes_until_reseed = THREAD_RNG_RESEED_THRESHOLD;
59-
self.rng = ChaCha12Rng::try_from_os_rng()?;
60-
Ok(())
61-
}
62-
}
63-
64-
impl RngCore for ReseedingRng {
65-
#[inline(always)]
66-
fn next_u32(&mut self) -> u32 {
67-
self.reseed_check(core::mem::size_of::<u32>() as isize);
68-
self.rng.next_u32()
6943
}
70-
71-
#[inline(always)]
72-
fn next_u64(&mut self) -> u64 {
73-
self.reseed_check(core::mem::size_of::<u64>() as isize);
74-
self.rng.next_u64()
75-
}
76-
77-
#[inline(always)]
78-
fn fill_bytes(&mut self, dest: &mut [u8]) {
79-
// Valid allocation can not be bigger than `isize::MAX` bytes,
80-
// so we can cast length to `isize` without issues.
81-
self.reseed_check(dest.len() as isize);
82-
self.rng.fill_bytes(dest)
83-
}
84-
}
85-
86-
impl CryptoRng for ReseedingRng {}
44+
);
8745

8846
/// A reference to the thread-local generator.
8947
///
@@ -165,17 +123,6 @@ impl fmt::Debug for ThreadRng {
165123
}
166124
}
167125

168-
thread_local!(
169-
// We require Rc<..> to avoid premature freeing when ThreadRng is used
170-
// within thread-local destructors. See https://github.com/rust-random/rand/issues/968.
171-
static THREAD_RNG_KEY: Rc<UnsafeCell<ReseedingRng>> = {
172-
let rng = ChaCha12Rng::try_from_os_rng().unwrap_or_else(|err|
173-
panic!("could not initialize ThreadRng: {}", err));
174-
let reseeding_rng = ReseedingRng { rng, bytes_until_reseed: THREAD_RNG_RESEED_THRESHOLD };
175-
Rc::new(UnsafeCell::new(reseeding_rng))
176-
}
177-
);
178-
179126
/// Access a fast, pre-initialized generator
180127
///
181128
/// This is a handle to the local [`ThreadRng`].

thread-rng/src/reseeding_rng.rs

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
use rand_chacha::ChaCha12Rng;
2+
use rand_core::{CryptoRng, RngCore, SeedableRng};
3+
4+
// Number of generated bytes after which to reseed `ThreadRng`.
5+
// According to benchmarks, reseeding has a noticeable impact with thresholds
6+
// of 32 kB and less. We choose 64 kB to avoid significant overhead.
7+
const THREAD_RNG_RESEED_THRESHOLD: isize = 1024 * 64;
8+
9+
pub(crate) struct ReseedingRng {
10+
rng: ChaCha12Rng,
11+
bytes_until_reseed: isize,
12+
}
13+
14+
impl ReseedingRng {
15+
pub fn new() -> Result<Self, rand_core::getrandom::Error> {
16+
ChaCha12Rng::try_from_os_rng().map(|rng| Self {
17+
rng,
18+
bytes_until_reseed: THREAD_RNG_RESEED_THRESHOLD,
19+
})
20+
}
21+
22+
#[inline(always)]
23+
pub fn reseed(&mut self) -> Result<(), rand_core::getrandom::Error> {
24+
self.bytes_until_reseed = THREAD_RNG_RESEED_THRESHOLD;
25+
self.rng = ChaCha12Rng::try_from_os_rng()?;
26+
Ok(())
27+
}
28+
29+
#[inline(always)]
30+
fn reseed_check(&mut self, n: isize) {
31+
if self.bytes_until_reseed < 0 {
32+
// If system RNG has failed for some reason, ignore the error
33+
// and continue to work with the old RNG state.
34+
let _ = self.reseed();
35+
}
36+
self.bytes_until_reseed -= n;
37+
}
38+
}
39+
40+
impl RngCore for ReseedingRng {
41+
#[inline(always)]
42+
fn next_u32(&mut self) -> u32 {
43+
self.reseed_check(core::mem::size_of::<u32>() as isize);
44+
self.rng.next_u32()
45+
}
46+
47+
#[inline(always)]
48+
fn next_u64(&mut self) -> u64 {
49+
self.reseed_check(core::mem::size_of::<u64>() as isize);
50+
self.rng.next_u64()
51+
}
52+
53+
#[inline(always)]
54+
fn fill_bytes(&mut self, dest: &mut [u8]) {
55+
// Valid allocation can not be bigger than `isize::MAX` bytes,
56+
// so we can cast length to `isize` without issues.
57+
self.reseed_check(dest.len() as isize);
58+
self.rng.fill_bytes(dest)
59+
}
60+
}
61+
62+
impl CryptoRng for ReseedingRng {}

0 commit comments

Comments
 (0)