core/slice/memchr.rs
1// Original implementation taken from rust-memchr.
2// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
3
4use crate::intrinsics::const_eval_select;
5use crate::mem;
6
7const LO_USIZE: usize = usize::repeat_u8(0x01);
8const HI_USIZE: usize = usize::repeat_u8(0x80);
9const USIZE_BYTES: usize = mem::size_of::<usize>();
10
11/// Returns `true` if `x` contains any zero byte.
12///
13/// From *Matters Computational*, J. Arndt:
14///
15/// "The idea is to subtract one from each of the bytes and then look for
16/// bytes where the borrow propagated all the way to the most significant
17/// bit."
18#[inline]
19const fn contains_zero_byte(x: usize) -> bool {
20 x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
21}
22
23/// Returns the first index matching the byte `x` in `text`.
24#[inline]
25#[must_use]
26pub const fn memchr(x: u8, text: &[u8]) -> Option<usize> {
27 // Fast path for small slices.
28 if text.len() < 2 * USIZE_BYTES {
29 return memchr_naive(x, text);
30 }
31
32 memchr_aligned(x, text)
33}
34
35#[inline]
36const fn memchr_naive(x: u8, text: &[u8]) -> Option<usize> {
37 let mut i = 0;
38
39 // FIXME(const-hack): Replace with `text.iter().pos(|c| *c == x)`.
40 while i < text.len() {
41 if text[i] == x {
42 return Some(i);
43 }
44
45 i += 1;
46 }
47
48 None
49}
50
51#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
52const fn memchr_aligned(x: u8, text: &[u8]) -> Option<usize> {
53 // The runtime version behaves the same as the compiletime version, it's
54 // just more optimized.
55 const_eval_select!(
56 @capture { x: u8, text: &[u8] } -> Option<usize>:
57 if const {
58 memchr_naive(x, text)
59 } else {
60 // Scan for a single byte value by reading two `usize` words at a time.
61 //
62 // Split `text` in three parts
63 // - unaligned initial part, before the first word aligned address in text
64 // - body, scan by 2 words at a time
65 // - the last remaining part, < 2 word size
66
67 // search up to an aligned boundary
68 let len = text.len();
69 let ptr = text.as_ptr();
70 let mut offset = ptr.align_offset(USIZE_BYTES);
71
72 if offset > 0 {
73 offset = offset.min(len);
74 let slice = &text[..offset];
75 if let Some(index) = memchr_naive(x, slice) {
76 return Some(index);
77 }
78 }
79
80 // search the body of the text
81 let repeated_x = usize::repeat_u8(x);
82 while offset <= len - 2 * USIZE_BYTES {
83 // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes
84 // between the offset and the end of the slice.
85 unsafe {
86 let u = *(ptr.add(offset) as *const usize);
87 let v = *(ptr.add(offset + USIZE_BYTES) as *const usize);
88
89 // break if there is a matching byte
90 let zu = contains_zero_byte(u ^ repeated_x);
91 let zv = contains_zero_byte(v ^ repeated_x);
92 if zu || zv {
93 break;
94 }
95 }
96 offset += USIZE_BYTES * 2;
97 }
98
99 // Find the byte after the point the body loop stopped.
100 // FIXME(const-hack): Use `?` instead.
101 // FIXME(const-hack, fee1-dead): use range slicing
102 let slice =
103 // SAFETY: offset is within bounds
104 unsafe { super::from_raw_parts(text.as_ptr().add(offset), text.len() - offset) };
105 if let Some(i) = memchr_naive(x, slice) { Some(offset + i) } else { None }
106 }
107 )
108}
109
110/// Returns the last index matching the byte `x` in `text`.
111#[must_use]
112pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
113 // Scan for a single byte value by reading two `usize` words at a time.
114 //
115 // Split `text` in three parts:
116 // - unaligned tail, after the last word aligned address in text,
117 // - body, scanned by 2 words at a time,
118 // - the first remaining bytes, < 2 word size.
119 let len = text.len();
120 let ptr = text.as_ptr();
121 type Chunk = usize;
122
123 let (min_aligned_offset, max_aligned_offset) = {
124 // We call this just to obtain the length of the prefix and suffix.
125 // In the middle we always process two chunks at once.
126 // SAFETY: transmuting `[u8]` to `[usize]` is safe except for size differences
127 // which are handled by `align_to`.
128 let (prefix, _, suffix) = unsafe { text.align_to::<(Chunk, Chunk)>() };
129 (prefix.len(), len - suffix.len())
130 };
131
132 let mut offset = max_aligned_offset;
133 if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) {
134 return Some(offset + index);
135 }
136
137 // Search the body of the text, make sure we don't cross min_aligned_offset.
138 // offset is always aligned, so just testing `>` is sufficient and avoids possible
139 // overflow.
140 let repeated_x = usize::repeat_u8(x);
141 let chunk_bytes = mem::size_of::<Chunk>();
142
143 while offset > min_aligned_offset {
144 // SAFETY: offset starts at len - suffix.len(), as long as it is greater than
145 // min_aligned_offset (prefix.len()) the remaining distance is at least 2 * chunk_bytes.
146 unsafe {
147 let u = *(ptr.add(offset - 2 * chunk_bytes) as *const Chunk);
148 let v = *(ptr.add(offset - chunk_bytes) as *const Chunk);
149
150 // Break if there is a matching byte.
151 let zu = contains_zero_byte(u ^ repeated_x);
152 let zv = contains_zero_byte(v ^ repeated_x);
153 if zu || zv {
154 break;
155 }
156 }
157 offset -= 2 * chunk_bytes;
158 }
159
160 // Find the byte before the point the body loop stopped.
161 text[..offset].iter().rposition(|elt| *elt == x)
162}