core/slice/ascii.rs
1//! Operations on ASCII `[u8]`.
2
3use core::ascii::EscapeDefault;
4
5use crate::fmt::{self, Write};
6#[cfg(not(all(target_arch = "x86_64", target_feature = "sse2")))]
7use crate::intrinsics::const_eval_select;
8use crate::{ascii, iter, ops};
9
10#[cfg(not(test))]
11impl [u8] {
12 /// Checks if all bytes in this slice are within the ASCII range.
13 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
14 #[rustc_const_stable(feature = "const_slice_is_ascii", since = "1.74.0")]
15 #[must_use]
16 #[inline]
17 pub const fn is_ascii(&self) -> bool {
18 is_ascii(self)
19 }
20
21 /// If this slice [`is_ascii`](Self::is_ascii), returns it as a slice of
22 /// [ASCII characters](`ascii::Char`), otherwise returns `None`.
23 #[unstable(feature = "ascii_char", issue = "110998")]
24 #[must_use]
25 #[inline]
26 pub const fn as_ascii(&self) -> Option<&[ascii::Char]> {
27 if self.is_ascii() {
28 // SAFETY: Just checked that it's ASCII
29 Some(unsafe { self.as_ascii_unchecked() })
30 } else {
31 None
32 }
33 }
34
35 /// Converts this slice of bytes into a slice of ASCII characters,
36 /// without checking whether they're valid.
37 ///
38 /// # Safety
39 ///
40 /// Every byte in the slice must be in `0..=127`, or else this is UB.
41 #[unstable(feature = "ascii_char", issue = "110998")]
42 #[must_use]
43 #[inline]
44 pub const unsafe fn as_ascii_unchecked(&self) -> &[ascii::Char] {
45 let byte_ptr: *const [u8] = self;
46 let ascii_ptr = byte_ptr as *const [ascii::Char];
47 // SAFETY: The caller promised all the bytes are ASCII
48 unsafe { &*ascii_ptr }
49 }
50
51 /// Checks that two slices are an ASCII case-insensitive match.
52 ///
53 /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
54 /// but without allocating and copying temporaries.
55 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
56 #[rustc_const_unstable(feature = "const_eq_ignore_ascii_case", issue = "131719")]
57 #[must_use]
58 #[inline]
59 pub const fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
60 if self.len() != other.len() {
61 return false;
62 }
63
64 // FIXME(const-hack): This implementation can be reverted when
65 // `core::iter::zip` is allowed in const. The original implementation:
66 // self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
67 let mut a = self;
68 let mut b = other;
69
70 while let ([first_a, rest_a @ ..], [first_b, rest_b @ ..]) = (a, b) {
71 if first_a.eq_ignore_ascii_case(&first_b) {
72 a = rest_a;
73 b = rest_b;
74 } else {
75 return false;
76 }
77 }
78
79 true
80 }
81
82 /// Converts this slice to its ASCII upper case equivalent in-place.
83 ///
84 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
85 /// but non-ASCII letters are unchanged.
86 ///
87 /// To return a new uppercased value without modifying the existing one, use
88 /// [`to_ascii_uppercase`].
89 ///
90 /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
91 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
92 #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
93 #[inline]
94 pub const fn make_ascii_uppercase(&mut self) {
95 // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
96 let mut i = 0;
97 while i < self.len() {
98 let byte = &mut self[i];
99 byte.make_ascii_uppercase();
100 i += 1;
101 }
102 }
103
104 /// Converts this slice to its ASCII lower case equivalent in-place.
105 ///
106 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
107 /// but non-ASCII letters are unchanged.
108 ///
109 /// To return a new lowercased value without modifying the existing one, use
110 /// [`to_ascii_lowercase`].
111 ///
112 /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
113 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
114 #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
115 #[inline]
116 pub const fn make_ascii_lowercase(&mut self) {
117 // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
118 let mut i = 0;
119 while i < self.len() {
120 let byte = &mut self[i];
121 byte.make_ascii_lowercase();
122 i += 1;
123 }
124 }
125
126 /// Returns an iterator that produces an escaped version of this slice,
127 /// treating it as an ASCII string.
128 ///
129 /// # Examples
130 ///
131 /// ```
132 ///
133 /// let s = b"0\t\r\n'\"\\\x9d";
134 /// let escaped = s.escape_ascii().to_string();
135 /// assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
136 /// ```
137 #[must_use = "this returns the escaped bytes as an iterator, \
138 without modifying the original"]
139 #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
140 pub fn escape_ascii(&self) -> EscapeAscii<'_> {
141 EscapeAscii { inner: self.iter().flat_map(EscapeByte) }
142 }
143
144 /// Returns a byte slice with leading ASCII whitespace bytes removed.
145 ///
146 /// 'Whitespace' refers to the definition used by
147 /// [`u8::is_ascii_whitespace`].
148 ///
149 /// # Examples
150 ///
151 /// ```
152 /// assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
153 /// assert_eq!(b" ".trim_ascii_start(), b"");
154 /// assert_eq!(b"".trim_ascii_start(), b"");
155 /// ```
156 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
157 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
158 #[inline]
159 pub const fn trim_ascii_start(&self) -> &[u8] {
160 let mut bytes = self;
161 // Note: A pattern matching based approach (instead of indexing) allows
162 // making the function const.
163 while let [first, rest @ ..] = bytes {
164 if first.is_ascii_whitespace() {
165 bytes = rest;
166 } else {
167 break;
168 }
169 }
170 bytes
171 }
172
173 /// Returns a byte slice with trailing ASCII whitespace bytes removed.
174 ///
175 /// 'Whitespace' refers to the definition used by
176 /// [`u8::is_ascii_whitespace`].
177 ///
178 /// # Examples
179 ///
180 /// ```
181 /// assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
182 /// assert_eq!(b" ".trim_ascii_end(), b"");
183 /// assert_eq!(b"".trim_ascii_end(), b"");
184 /// ```
185 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
186 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
187 #[inline]
188 pub const fn trim_ascii_end(&self) -> &[u8] {
189 let mut bytes = self;
190 // Note: A pattern matching based approach (instead of indexing) allows
191 // making the function const.
192 while let [rest @ .., last] = bytes {
193 if last.is_ascii_whitespace() {
194 bytes = rest;
195 } else {
196 break;
197 }
198 }
199 bytes
200 }
201
202 /// Returns a byte slice with leading and trailing ASCII whitespace bytes
203 /// removed.
204 ///
205 /// 'Whitespace' refers to the definition used by
206 /// [`u8::is_ascii_whitespace`].
207 ///
208 /// # Examples
209 ///
210 /// ```
211 /// assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
212 /// assert_eq!(b" ".trim_ascii(), b"");
213 /// assert_eq!(b"".trim_ascii(), b"");
214 /// ```
215 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
216 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
217 #[inline]
218 pub const fn trim_ascii(&self) -> &[u8] {
219 self.trim_ascii_start().trim_ascii_end()
220 }
221}
222
223impl_fn_for_zst! {
224 #[derive(Clone)]
225 struct EscapeByte impl Fn = |byte: &u8| -> ascii::EscapeDefault {
226 ascii::escape_default(*byte)
227 };
228}
229
230/// An iterator over the escaped version of a byte slice.
231///
232/// This `struct` is created by the [`slice::escape_ascii`] method. See its
233/// documentation for more information.
234#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
235#[derive(Clone)]
236#[must_use = "iterators are lazy and do nothing unless consumed"]
237pub struct EscapeAscii<'a> {
238 inner: iter::FlatMap<super::Iter<'a, u8>, ascii::EscapeDefault, EscapeByte>,
239}
240
241#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
242impl<'a> iter::Iterator for EscapeAscii<'a> {
243 type Item = u8;
244 #[inline]
245 fn next(&mut self) -> Option<u8> {
246 self.inner.next()
247 }
248 #[inline]
249 fn size_hint(&self) -> (usize, Option<usize>) {
250 self.inner.size_hint()
251 }
252 #[inline]
253 fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
254 where
255 Fold: FnMut(Acc, Self::Item) -> R,
256 R: ops::Try<Output = Acc>,
257 {
258 self.inner.try_fold(init, fold)
259 }
260 #[inline]
261 fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
262 where
263 Fold: FnMut(Acc, Self::Item) -> Acc,
264 {
265 self.inner.fold(init, fold)
266 }
267 #[inline]
268 fn last(mut self) -> Option<u8> {
269 self.next_back()
270 }
271}
272
273#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
274impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
275 fn next_back(&mut self) -> Option<u8> {
276 self.inner.next_back()
277 }
278}
279#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
280impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
281#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
282impl<'a> fmt::Display for EscapeAscii<'a> {
283 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
284 // disassemble iterator, including front/back parts of flatmap in case it has been partially consumed
285 let (front, slice, back) = self.clone().inner.into_parts();
286 let front = front.unwrap_or(EscapeDefault::empty());
287 let mut bytes = slice.unwrap_or_default().as_slice();
288 let back = back.unwrap_or(EscapeDefault::empty());
289
290 // usually empty, so the formatter won't have to do any work
291 for byte in front {
292 f.write_char(byte as char)?;
293 }
294
295 fn needs_escape(b: u8) -> bool {
296 b > 0x7E || b < 0x20 || b == b'\\' || b == b'\'' || b == b'"'
297 }
298
299 while bytes.len() > 0 {
300 // fast path for the printable, non-escaped subset of ascii
301 let prefix = bytes.iter().take_while(|&&b| !needs_escape(b)).count();
302 // SAFETY: prefix length was derived by counting bytes in the same splice, so it's in-bounds
303 let (prefix, remainder) = unsafe { bytes.split_at_unchecked(prefix) };
304 // SAFETY: prefix is a valid utf8 sequence, as it's a subset of ASCII
305 let prefix = unsafe { crate::str::from_utf8_unchecked(prefix) };
306
307 f.write_str(prefix)?; // the fast part
308
309 bytes = remainder;
310
311 if let Some(&b) = bytes.first() {
312 // guaranteed to be non-empty, better to write it as a str
313 f.write_str(ascii::escape_default(b).as_str())?;
314 bytes = &bytes[1..];
315 }
316 }
317
318 // also usually empty
319 for byte in back {
320 f.write_char(byte as char)?;
321 }
322 Ok(())
323 }
324}
325#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
326impl<'a> fmt::Debug for EscapeAscii<'a> {
327 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
328 f.debug_struct("EscapeAscii").finish_non_exhaustive()
329 }
330}
331
332/// ASCII test *without* the chunk-at-a-time optimizations.
333///
334/// This is carefully structured to produce nice small code -- it's smaller in
335/// `-O` than what the "obvious" ways produces under `-C opt-level=s`. If you
336/// touch it, be sure to run (and update if needed) the assembly test.
337#[unstable(feature = "str_internals", issue = "none")]
338#[doc(hidden)]
339#[inline]
340pub const fn is_ascii_simple(mut bytes: &[u8]) -> bool {
341 while let [rest @ .., last] = bytes {
342 if !last.is_ascii() {
343 break;
344 }
345 bytes = rest;
346 }
347 bytes.is_empty()
348}
349
350/// Optimized ASCII test that will use usize-at-a-time operations instead of
351/// byte-at-a-time operations (when possible).
352///
353/// The algorithm we use here is pretty simple. If `s` is too short, we just
354/// check each byte and be done with it. Otherwise:
355///
356/// - Read the first word with an unaligned load.
357/// - Align the pointer, read subsequent words until end with aligned loads.
358/// - Read the last `usize` from `s` with an unaligned load.
359///
360/// If any of these loads produces something for which `contains_nonascii`
361/// (above) returns true, then we know the answer is false.
362#[cfg(not(all(target_arch = "x86_64", target_feature = "sse2")))]
363#[inline]
364#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
365const fn is_ascii(s: &[u8]) -> bool {
366 // The runtime version behaves the same as the compiletime version, it's
367 // just more optimized.
368 const_eval_select!(
369 @capture { s: &[u8] } -> bool:
370 if const {
371 is_ascii_simple(s)
372 } else {
373 /// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
374 /// from `../str/mod.rs`, which does something similar for utf8 validation.
375 const fn contains_nonascii(v: usize) -> bool {
376 const NONASCII_MASK: usize = usize::repeat_u8(0x80);
377 (NONASCII_MASK & v) != 0
378 }
379
380 const USIZE_SIZE: usize = size_of::<usize>();
381
382 let len = s.len();
383 let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
384
385 // If we wouldn't gain anything from the word-at-a-time implementation, fall
386 // back to a scalar loop.
387 //
388 // We also do this for architectures where `size_of::<usize>()` isn't
389 // sufficient alignment for `usize`, because it's a weird edge case.
390 if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < align_of::<usize>() {
391 return is_ascii_simple(s);
392 }
393
394 // We always read the first word unaligned, which means `align_offset` is
395 // 0, we'd read the same value again for the aligned read.
396 let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
397
398 let start = s.as_ptr();
399 // SAFETY: We verify `len < USIZE_SIZE` above.
400 let first_word = unsafe { (start as *const usize).read_unaligned() };
401
402 if contains_nonascii(first_word) {
403 return false;
404 }
405 // We checked this above, somewhat implicitly. Note that `offset_to_aligned`
406 // is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
407 // above.
408 debug_assert!(offset_to_aligned <= len);
409
410 // SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
411 // middle chunk of the slice.
412 let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
413
414 // `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
415 let mut byte_pos = offset_to_aligned;
416
417 // Paranoia check about alignment, since we're about to do a bunch of
418 // unaligned loads. In practice this should be impossible barring a bug in
419 // `align_offset` though.
420 // While this method is allowed to spuriously fail in CTFE, if it doesn't
421 // have alignment information it should have given a `usize::MAX` for
422 // `align_offset` earlier, sending things through the scalar path instead of
423 // this one, so this check should pass if it's reachable.
424 debug_assert!(word_ptr.is_aligned_to(align_of::<usize>()));
425
426 // Read subsequent words until the last aligned word, excluding the last
427 // aligned word by itself to be done in tail check later, to ensure that
428 // tail is always one `usize` at most to extra branch `byte_pos == len`.
429 while byte_pos < len - USIZE_SIZE {
430 // Sanity check that the read is in bounds
431 debug_assert!(byte_pos + USIZE_SIZE <= len);
432 // And that our assumptions about `byte_pos` hold.
433 debug_assert!(word_ptr.cast::<u8>() == start.wrapping_add(byte_pos));
434
435 // SAFETY: We know `word_ptr` is properly aligned (because of
436 // `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
437 let word = unsafe { word_ptr.read() };
438 if contains_nonascii(word) {
439 return false;
440 }
441
442 byte_pos += USIZE_SIZE;
443 // SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
444 // after this `add`, `word_ptr` will be at most one-past-the-end.
445 word_ptr = unsafe { word_ptr.add(1) };
446 }
447
448 // Sanity check to ensure there really is only one `usize` left. This should
449 // be guaranteed by our loop condition.
450 debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
451
452 // SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
453 let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
454
455 !contains_nonascii(last_word)
456 }
457 )
458}
459
460/// ASCII test optimized to use the `pmovmskb` instruction available on `x86-64`
461/// platforms.
462///
463/// Other platforms are not likely to benefit from this code structure, so they
464/// use SWAR techniques to test for ASCII in `usize`-sized chunks.
465#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
466#[inline]
467const fn is_ascii(bytes: &[u8]) -> bool {
468 // Process chunks of 32 bytes at a time in the fast path to enable
469 // auto-vectorization and use of `pmovmskb`. Two 128-bit vector registers
470 // can be OR'd together and then the resulting vector can be tested for
471 // non-ASCII bytes.
472 const CHUNK_SIZE: usize = 32;
473
474 let mut i = 0;
475
476 while i + CHUNK_SIZE <= bytes.len() {
477 let chunk_end = i + CHUNK_SIZE;
478
479 // Get LLVM to produce a `pmovmskb` instruction on x86-64 which
480 // creates a mask from the most significant bit of each byte.
481 // ASCII bytes are less than 128 (0x80), so their most significant
482 // bit is unset.
483 let mut count = 0;
484 while i < chunk_end {
485 count += bytes[i].is_ascii() as u8;
486 i += 1;
487 }
488
489 // All bytes should be <= 127 so count is equal to chunk size.
490 if count != CHUNK_SIZE as u8 {
491 return false;
492 }
493 }
494
495 // Process the remaining `bytes.len() % N` bytes.
496 let mut is_ascii = true;
497 while i < bytes.len() {
498 is_ascii &= bytes[i].is_ascii();
499 i += 1;
500 }
501
502 is_ascii
503}