Skip to content

Commit 2bf0df7

Browse files
committed
Move rust memchr impl to libcore
1 parent 707d070 commit 2bf0df7

File tree

9 files changed

+234
-232
lines changed

9 files changed

+234
-232
lines changed

src/libcore/slice/memchr.rs

+224
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,224 @@
1+
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2+
// file at the top-level directory of this distribution and at
3+
// http://rust-lang.org/COPYRIGHT.
4+
//
5+
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6+
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7+
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8+
// option. This file may not be copied, modified, or distributed
9+
// except according to those terms.
10+
//
11+
// Original implementation taken from rust-memchr
12+
// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
13+
14+
use cmp;
15+
use mem;
16+
17+
const LO_U64: u64 = 0x0101010101010101;
18+
const HI_U64: u64 = 0x8080808080808080;
19+
20+
// use truncation
21+
const LO_USIZE: usize = LO_U64 as usize;
22+
const HI_USIZE: usize = HI_U64 as usize;
23+
24+
/// Return `true` if `x` contains any zero byte.
25+
///
26+
/// From *Matters Computational*, J. Arndt
27+
///
28+
/// "The idea is to subtract one from each of the bytes and then look for
29+
/// bytes where the borrow propagated all the way to the most significant
30+
/// bit."
31+
#[inline]
32+
fn contains_zero_byte(x: usize) -> bool {
33+
x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
34+
}
35+
36+
#[cfg(target_pointer_width = "32")]
37+
#[inline]
38+
fn repeat_byte(b: u8) -> usize {
39+
let mut rep = (b as usize) << 8 | b as usize;
40+
rep = rep << 16 | rep;
41+
rep
42+
}
43+
44+
#[cfg(target_pointer_width = "64")]
45+
#[inline]
46+
fn repeat_byte(b: u8) -> usize {
47+
let mut rep = (b as usize) << 8 | b as usize;
48+
rep = rep << 16 | rep;
49+
rep = rep << 32 | rep;
50+
rep
51+
}
52+
53+
/// Return the first index matching the byte `a` in `text`.
54+
pub fn memchr(x: u8, text: &[u8]) -> Option<usize> {
55+
// Scan for a single byte value by reading two `usize` words at a time.
56+
//
57+
// Split `text` in three parts
58+
// - unaligned initial part, before the first word aligned address in text
59+
// - body, scan by 2 words at a time
60+
// - the last remaining part, < 2 word size
61+
let len = text.len();
62+
let ptr = text.as_ptr();
63+
let usize_bytes = mem::size_of::<usize>();
64+
65+
// search up to an aligned boundary
66+
let mut offset = ptr.align_offset(usize_bytes);
67+
if offset > 0 {
68+
offset = cmp::min(offset, len);
69+
if let Some(index) = text[..offset].iter().position(|elt| *elt == x) {
70+
return Some(index);
71+
}
72+
}
73+
74+
// search the body of the text
75+
let repeated_x = repeat_byte(x);
76+
77+
if len >= 2 * usize_bytes {
78+
while offset <= len - 2 * usize_bytes {
79+
unsafe {
80+
let u = *(ptr.offset(offset as isize) as *const usize);
81+
let v = *(ptr.offset((offset + usize_bytes) as isize) as *const usize);
82+
83+
// break if there is a matching byte
84+
let zu = contains_zero_byte(u ^ repeated_x);
85+
let zv = contains_zero_byte(v ^ repeated_x);
86+
if zu || zv {
87+
break;
88+
}
89+
}
90+
offset += usize_bytes * 2;
91+
}
92+
}
93+
94+
// find the byte after the point the body loop stopped
95+
text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i)
96+
}
97+
98+
/// Return the last index matching the byte `a` in `text`.
99+
pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
100+
// Scan for a single byte value by reading two `usize` words at a time.
101+
//
102+
// Split `text` in three parts
103+
// - unaligned tail, after the last word aligned address in text
104+
// - body, scan by 2 words at a time
105+
// - the first remaining bytes, < 2 word size
106+
let len = text.len();
107+
let ptr = text.as_ptr();
108+
let usize_bytes = mem::size_of::<usize>();
109+
110+
// search to an aligned boundary
111+
let end_align = (ptr as usize + len) & (usize_bytes - 1);
112+
let mut offset;
113+
if end_align > 0 {
114+
offset = if end_align >= len { 0 } else { len - end_align };
115+
if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) {
116+
return Some(offset + index);
117+
}
118+
} else {
119+
offset = len;
120+
}
121+
122+
// search the body of the text
123+
let repeated_x = repeat_byte(x);
124+
125+
while offset >= 2 * usize_bytes {
126+
unsafe {
127+
let u = *(ptr.offset(offset as isize - 2 * usize_bytes as isize) as *const usize);
128+
let v = *(ptr.offset(offset as isize - usize_bytes as isize) as *const usize);
129+
130+
// break if there is a matching byte
131+
let zu = contains_zero_byte(u ^ repeated_x);
132+
let zv = contains_zero_byte(v ^ repeated_x);
133+
if zu || zv {
134+
break;
135+
}
136+
}
137+
offset -= 2 * usize_bytes;
138+
}
139+
140+
// find the byte before the point the body loop stopped
141+
text[..offset].iter().rposition(|elt| *elt == x)
142+
}
143+
144+
// test fallback implementations on all platforms
145+
#[test]
146+
fn matches_one() {
147+
assert_eq!(Some(0), memchr(b'a', b"a"));
148+
}
149+
150+
#[test]
151+
fn matches_begin() {
152+
assert_eq!(Some(0), memchr(b'a', b"aaaa"));
153+
}
154+
155+
#[test]
156+
fn matches_end() {
157+
assert_eq!(Some(4), memchr(b'z', b"aaaaz"));
158+
}
159+
160+
#[test]
161+
fn matches_nul() {
162+
assert_eq!(Some(4), memchr(b'\x00', b"aaaa\x00"));
163+
}
164+
165+
#[test]
166+
fn matches_past_nul() {
167+
assert_eq!(Some(5), memchr(b'z', b"aaaa\x00z"));
168+
}
169+
170+
#[test]
171+
fn no_match_empty() {
172+
assert_eq!(None, memchr(b'a', b""));
173+
}
174+
175+
#[test]
176+
fn no_match() {
177+
assert_eq!(None, memchr(b'a', b"xyz"));
178+
}
179+
180+
#[test]
181+
fn matches_one_reversed() {
182+
assert_eq!(Some(0), memrchr(b'a', b"a"));
183+
}
184+
185+
#[test]
186+
fn matches_begin_reversed() {
187+
assert_eq!(Some(3), memrchr(b'a', b"aaaa"));
188+
}
189+
190+
#[test]
191+
fn matches_end_reversed() {
192+
assert_eq!(Some(0), memrchr(b'z', b"zaaaa"));
193+
}
194+
195+
#[test]
196+
fn matches_nul_reversed() {
197+
assert_eq!(Some(4), memrchr(b'\x00', b"aaaa\x00"));
198+
}
199+
200+
#[test]
201+
fn matches_past_nul_reversed() {
202+
assert_eq!(Some(0), memrchr(b'z', b"z\x00aaaa"));
203+
}
204+
205+
#[test]
206+
fn no_match_empty_reversed() {
207+
assert_eq!(None, memrchr(b'a', b""));
208+
}
209+
210+
#[test]
211+
fn no_match_reversed() {
212+
assert_eq!(None, memrchr(b'a', b"xyz"));
213+
}
214+
215+
#[test]
216+
fn each_alignment_reversed() {
217+
let mut data = [1u8; 64];
218+
let needle = 2;
219+
let pos = 40;
220+
data[pos] = needle;
221+
for start in 0..16 {
222+
assert_eq!(Some(pos - start), memrchr(needle, &data[start..]));
223+
}
224+
}

src/libcore/slice/mod.rs

+5
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,11 @@ use mem;
5050
use marker::{Copy, Send, Sync, Sized, self};
5151
use iter_private::TrustedRandomAccess;
5252

53+
#[unstable(feature = "slice_internals", issue = "0",
54+
reason = "exposed from core to be reused in std; use the memchr crate")]
55+
/// Pure rust memchr implementation, taken from rust-memchr
56+
pub mod memchr;
57+
5358
mod rotate;
5459
mod sort;
5560

src/libstd/lib.rs

+1
Original file line numberDiff line numberDiff line change
@@ -302,6 +302,7 @@
302302
#![feature(sip_hash_13)]
303303
#![feature(slice_bytes)]
304304
#![feature(slice_concat_ext)]
305+
#![feature(slice_internals)]
305306
#![feature(slice_patterns)]
306307
#![feature(staged_api)]
307308
#![feature(stmt_expr_attributes)]

src/libstd/sys/redox/memchr.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,4 @@
1111
// Original implementation taken from rust-memchr
1212
// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
1313

14-
pub use sys_common::memchr::fallback::{memchr, memrchr};
14+
pub use core::slice::memchr::{memchr, memrchr};

src/libstd/sys/unix/memchr.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
5050

5151
#[cfg(not(target_os = "linux"))]
5252
fn memrchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> {
53-
::sys_common::memchr::fallback::memrchr(needle, haystack)
53+
::core::slice::memchr::memrchr(needle, haystack)
5454
}
5555

5656
memrchr_specific(needle, haystack)

src/libstd/sys/wasm/memchr.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,4 +8,4 @@
88
// option. This file may not be copied, modified, or distributed
99
// except according to those terms.
1010

11-
pub use sys_common::memchr::fallback::{memchr, memrchr};
11+
pub use core::slice::memchr::{memchr, memrchr};

src/libstd/sys/windows/memchr.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,4 +12,4 @@
1212
// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
1313

1414
// Fallback memchr is fastest on windows
15-
pub use sys_common::memchr::fallback::{memchr, memrchr};
15+
pub use core::slice::memchr::{memchr, memrchr};

0 commit comments

Comments
 (0)