|
5 | 5 | //! use byte_pool::BytePool;
|
6 | 6 | //!
|
7 | 7 | //! // Create a pool
|
8 |
| -//! let pool = BytePool::new(); |
| 8 | +//! let pool = BytePool::<Vec<u8>>::new(); |
9 | 9 | //!
|
10 | 10 | //! // Allocate a buffer with capacity 1024.
|
11 | 11 | //! let mut buf = pool.alloc(1024);
|
|
25 | 25 | //! drop(pool);
|
26 | 26 | //! ```
|
27 | 27 |
|
28 |
| -use std::fmt; |
29 |
| -use std::mem; |
30 |
| -use std::ops::{Deref, DerefMut}; |
31 |
| -use std::ptr; |
| 28 | +mod pool; |
| 29 | +mod poolable; |
32 | 30 |
|
33 |
| -use crossbeam_queue::SegQueue; |
34 |
| - |
35 |
| -/// A pool of byte slices, that reuses memory. |
36 |
| -#[derive(Debug)] |
37 |
| -pub struct BytePool { |
38 |
| - list_large: SegQueue<Vec<u8>>, |
39 |
| - list_small: SegQueue<Vec<u8>>, |
40 |
| -} |
41 |
| - |
42 |
| -/// The size at which point values are allocated in the small list, rather than the big. |
43 |
| -const SPLIT_SIZE: usize = 4 * 1024; |
44 |
| - |
45 |
| -/// The value returned by an allocation of the pool. |
46 |
| -/// When it is dropped the memory gets returned into the pool, and is not zeroed. |
47 |
| -/// If that is a concern, you must clear the data yourself. |
48 |
| -pub struct Block<'a> { |
49 |
| - data: mem::ManuallyDrop<Vec<u8>>, |
50 |
| - pool: &'a BytePool, |
51 |
| -} |
52 |
| - |
53 |
| -impl fmt::Debug for Block<'_> { |
54 |
| - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
55 |
| - f.debug_struct("Block").field("data", &self.data).finish() |
56 |
| - } |
57 |
| -} |
58 |
| - |
59 |
| -impl Default for BytePool { |
60 |
| - fn default() -> Self { |
61 |
| - BytePool { |
62 |
| - list_large: SegQueue::new(), |
63 |
| - list_small: SegQueue::new(), |
64 |
| - } |
65 |
| - } |
66 |
| -} |
67 |
| - |
68 |
| -impl BytePool { |
69 |
| - /// Constructs a new pool. |
70 |
| - pub fn new() -> Self { |
71 |
| - BytePool::default() |
72 |
| - } |
73 |
| - |
74 |
| - /// Allocates a new `Block`, which represents a fixed sice byte slice. |
75 |
| - /// If `Block` is dropped, the memory is _not_ freed, but rather it is returned into the pool. |
76 |
| - /// The returned `Block` contains arbitrary data, and must be zeroed or overwritten, |
77 |
| - /// in cases this is needed. |
78 |
| - pub fn alloc(&self, size: usize) -> Block<'_> { |
79 |
| - assert!(size > 0, "Can not allocate empty blocks"); |
80 |
| - |
81 |
| - // check the last 4 blocks |
82 |
| - let list = if size < SPLIT_SIZE { |
83 |
| - &self.list_small |
84 |
| - } else { |
85 |
| - &self.list_large |
86 |
| - }; |
87 |
| - if let Ok(el) = list.pop() { |
88 |
| - if el.capacity() == size { |
89 |
| - // found one, reuse it |
90 |
| - return Block::new(el, self); |
91 |
| - } else { |
92 |
| - // put it back |
93 |
| - list.push(el); |
94 |
| - } |
95 |
| - } |
96 |
| - |
97 |
| - // allocate a new block |
98 |
| - let data = vec![0u8; size]; |
99 |
| - Block::new(data, self) |
100 |
| - } |
101 |
| - |
102 |
| - fn push_raw_block(&self, block: Vec<u8>) { |
103 |
| - if block.capacity() < SPLIT_SIZE { |
104 |
| - self.list_small.push(block); |
105 |
| - } else { |
106 |
| - self.list_large.push(block); |
107 |
| - } |
108 |
| - } |
109 |
| -} |
110 |
| - |
111 |
| -impl<'a> Drop for Block<'a> { |
112 |
| - fn drop(&mut self) { |
113 |
| - let data = mem::ManuallyDrop::into_inner(unsafe { ptr::read(&self.data) }); |
114 |
| - self.pool.push_raw_block(data); |
115 |
| - } |
116 |
| -} |
117 |
| - |
118 |
| -impl<'a> Block<'a> { |
119 |
| - fn new(data: Vec<u8>, pool: &'a BytePool) -> Self { |
120 |
| - Block { |
121 |
| - data: mem::ManuallyDrop::new(data), |
122 |
| - pool, |
123 |
| - } |
124 |
| - } |
125 |
| - |
126 |
| - /// Resizes a block to a new size. |
127 |
| - pub fn realloc(&mut self, new_size: usize) { |
128 |
| - use std::cmp::Ordering::*; |
129 |
| - |
130 |
| - assert!(new_size > 0); |
131 |
| - match new_size.cmp(&self.size()) { |
132 |
| - Greater => self.data.resize(new_size, 0u8), |
133 |
| - Less => { |
134 |
| - self.data.truncate(new_size); |
135 |
| - self.shrink_to_fit(); |
136 |
| - } |
137 |
| - Equal => {} |
138 |
| - } |
139 |
| - } |
140 |
| - |
141 |
| - /// Returns the amount of bytes this block has. |
142 |
| - pub fn size(&self) -> usize { |
143 |
| - self.data.capacity() |
144 |
| - } |
145 |
| -} |
146 |
| - |
147 |
| -impl<'a> Deref for Block<'a> { |
148 |
| - type Target = Vec<u8>; |
149 |
| - |
150 |
| - #[inline] |
151 |
| - fn deref(&self) -> &Self::Target { |
152 |
| - self.data.deref() |
153 |
| - } |
154 |
| -} |
155 |
| - |
156 |
| -impl<'a> DerefMut for Block<'a> { |
157 |
| - #[inline] |
158 |
| - fn deref_mut(&mut self) -> &mut Self::Target { |
159 |
| - self.data.deref_mut() |
160 |
| - } |
161 |
| -} |
162 |
| - |
163 |
| -#[cfg(test)] |
164 |
| -mod tests { |
165 |
| - use super::*; |
166 |
| - |
167 |
| - #[test] |
168 |
| - fn basics() { |
169 |
| - let pool = BytePool::new(); |
170 |
| - |
171 |
| - for i in 0..100 { |
172 |
| - let mut block_1k = pool.alloc(1 * 1024); |
173 |
| - let mut block_4k = pool.alloc(4 * 1024); |
174 |
| - |
175 |
| - for el in block_1k.deref_mut() { |
176 |
| - *el = i as u8; |
177 |
| - } |
178 |
| - |
179 |
| - for el in block_4k.deref_mut() { |
180 |
| - *el = i as u8; |
181 |
| - } |
182 |
| - |
183 |
| - for el in block_1k.deref() { |
184 |
| - assert_eq!(*el, i as u8); |
185 |
| - } |
186 |
| - |
187 |
| - for el in block_4k.deref() { |
188 |
| - assert_eq!(*el, i as u8); |
189 |
| - } |
190 |
| - } |
191 |
| - } |
192 |
| - |
193 |
| - #[test] |
194 |
| - fn realloc() { |
195 |
| - let pool = BytePool::new(); |
196 |
| - |
197 |
| - let mut buf = pool.alloc(10); |
198 |
| - |
199 |
| - let _slice: &[u8] = &buf; |
200 |
| - |
201 |
| - assert_eq!(buf.capacity(), 10); |
202 |
| - for i in 0..10 { |
203 |
| - buf[i] = 1; |
204 |
| - } |
205 |
| - |
206 |
| - buf.realloc(512); |
207 |
| - assert_eq!(buf.capacity(), 512); |
208 |
| - for el in buf.iter().take(10) { |
209 |
| - assert_eq!(*el, 1); |
210 |
| - } |
211 |
| - |
212 |
| - buf.realloc(5); |
213 |
| - assert_eq!(buf.capacity(), 5); |
214 |
| - for el in buf.iter() { |
215 |
| - assert_eq!(*el, 1); |
216 |
| - } |
217 |
| - } |
218 |
| - |
219 |
| - #[test] |
220 |
| - fn multi_thread() { |
221 |
| - let pool = std::sync::Arc::new(BytePool::new()); |
222 |
| - |
223 |
| - let pool1 = pool.clone(); |
224 |
| - let h1 = std::thread::spawn(move || { |
225 |
| - for _ in 0..100 { |
226 |
| - let mut buf = pool1.alloc(64); |
227 |
| - buf[10] = 10; |
228 |
| - } |
229 |
| - }); |
230 |
| - |
231 |
| - let pool2 = pool.clone(); |
232 |
| - let h2 = std::thread::spawn(move || { |
233 |
| - for _ in 0..100 { |
234 |
| - let mut buf = pool2.alloc(64); |
235 |
| - buf[10] = 10; |
236 |
| - } |
237 |
| - }); |
238 |
| - |
239 |
| - h1.join().unwrap(); |
240 |
| - h2.join().unwrap(); |
241 |
| - |
242 |
| - // two threads allocating in parallel will need 2 buffers |
243 |
| - assert!(pool.list_small.len() <= 2); |
244 |
| - } |
245 |
| -} |
| 31 | +pub use pool::{Block, BytePool}; |
| 32 | +pub use poolable::{Poolable, Realloc}; |
0 commit comments