core/slice/memchr.rs
1// Original implementation taken from rust-memchr.
2// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
3
4use crate::intrinsics::const_eval_select;
5
6const LO_USIZE: usize = usize::repeat_u8(0x01);
7const HI_USIZE: usize = usize::repeat_u8(0x80);
8const USIZE_BYTES: usize = size_of::<usize>();
9
10/// Returns `true` if `x` contains any zero byte.
11///
12/// From *Matters Computational*, J. Arndt:
13///
14/// "The idea is to subtract one from each of the bytes and then look for
15/// bytes where the borrow propagated all the way to the most significant
16/// bit."
17#[inline]
18const fn contains_zero_byte(x: usize) -> bool {
19 x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
20}
21
22/// Returns the first index matching the byte `x` in `text`.
23#[inline]
24#[must_use]
25pub const fn memchr(x: u8, text: &[u8]) -> Option<usize> {
26 // Fast path for small slices.
27 if text.len() < 2 * USIZE_BYTES {
28 return memchr_naive(x, text);
29 }
30
31 memchr_aligned(x, text)
32}
33
34#[inline]
35const fn memchr_naive(x: u8, text: &[u8]) -> Option<usize> {
36 let mut i = 0;
37
38 // FIXME(const-hack): Replace with `text.iter().pos(|c| *c == x)`.
39 while i < text.len() {
40 if text[i] == x {
41 return Some(i);
42 }
43
44 i += 1;
45 }
46
47 None
48}
49
50#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
51const fn memchr_aligned(x: u8, text: &[u8]) -> Option<usize> {
52 // The runtime version behaves the same as the compiletime version, it's
53 // just more optimized.
54 const_eval_select!(
55 @capture { x: u8, text: &[u8] } -> Option<usize>:
56 if const {
57 memchr_naive(x, text)
58 } else {
59 // Scan for a single byte value by reading two `usize` words at a time.
60 //
61 // Split `text` in three parts
62 // - unaligned initial part, before the first word aligned address in text
63 // - body, scan by 2 words at a time
64 // - the last remaining part, < 2 word size
65
66 // search up to an aligned boundary
67 let len = text.len();
68 let ptr = text.as_ptr();
69 let mut offset = ptr.align_offset(USIZE_BYTES);
70
71 if offset > 0 {
72 offset = offset.min(len);
73 let slice = &text[..offset];
74 if let Some(index) = memchr_naive(x, slice) {
75 return Some(index);
76 }
77 }
78
79 // search the body of the text
80 let repeated_x = usize::repeat_u8(x);
81 while offset <= len - 2 * USIZE_BYTES {
82 // SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes
83 // between the offset and the end of the slice.
84 unsafe {
85 let u = *(ptr.add(offset) as *const usize);
86 let v = *(ptr.add(offset + USIZE_BYTES) as *const usize);
87
88 // break if there is a matching byte
89 let zu = contains_zero_byte(u ^ repeated_x);
90 let zv = contains_zero_byte(v ^ repeated_x);
91 if zu || zv {
92 break;
93 }
94 }
95 offset += USIZE_BYTES * 2;
96 }
97
98 // Find the byte after the point the body loop stopped.
99 // FIXME(const-hack): Use `?` instead.
100 // FIXME(const-hack, fee1-dead): use range slicing
101 let slice =
102 // SAFETY: offset is within bounds
103 unsafe { super::from_raw_parts(text.as_ptr().add(offset), text.len() - offset) };
104 if let Some(i) = memchr_naive(x, slice) { Some(offset + i) } else { None }
105 }
106 )
107}
108
109/// Returns the last index matching the byte `x` in `text`.
110#[must_use]
111pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
112 // Scan for a single byte value by reading two `usize` words at a time.
113 //
114 // Split `text` in three parts:
115 // - unaligned tail, after the last word aligned address in text,
116 // - body, scanned by 2 words at a time,
117 // - the first remaining bytes, < 2 word size.
118 let len = text.len();
119 let ptr = text.as_ptr();
120 type Chunk = usize;
121
122 let (min_aligned_offset, max_aligned_offset) = {
123 // We call this just to obtain the length of the prefix and suffix.
124 // In the middle we always process two chunks at once.
125 // SAFETY: transmuting `[u8]` to `[usize]` is safe except for size differences
126 // which are handled by `align_to`.
127 let (prefix, _, suffix) = unsafe { text.align_to::<(Chunk, Chunk)>() };
128 (prefix.len(), len - suffix.len())
129 };
130
131 let mut offset = max_aligned_offset;
132 if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) {
133 return Some(offset + index);
134 }
135
136 // Search the body of the text, make sure we don't cross min_aligned_offset.
137 // offset is always aligned, so just testing `>` is sufficient and avoids possible
138 // overflow.
139 let repeated_x = usize::repeat_u8(x);
140 let chunk_bytes = size_of::<Chunk>();
141
142 while offset > min_aligned_offset {
143 // SAFETY: offset starts at len - suffix.len(), as long as it is greater than
144 // min_aligned_offset (prefix.len()) the remaining distance is at least 2 * chunk_bytes.
145 unsafe {
146 let u = *(ptr.add(offset - 2 * chunk_bytes) as *const Chunk);
147 let v = *(ptr.add(offset - chunk_bytes) as *const Chunk);
148
149 // Break if there is a matching byte.
150 let zu = contains_zero_byte(u ^ repeated_x);
151 let zv = contains_zero_byte(v ^ repeated_x);
152 if zu || zv {
153 break;
154 }
155 }
156 offset -= 2 * chunk_bytes;
157 }
158
159 // Find the byte before the point the body loop stopped.
160 text[..offset].iter().rposition(|elt| *elt == x)
161}