Skip to main content

core/slice/
ascii.rs

1//! Operations on ASCII `[u8]`.
2
3use core::ascii::EscapeDefault;
4
5use crate::fmt::{self, Write};
6#[cfg(not(all(target_arch = "loongarch64", target_feature = "lsx")))]
7use crate::intrinsics::const_eval_select;
8use crate::{ascii, iter, ops};
9
10impl [u8] {
11    /// Checks if all bytes in this slice are within the ASCII range.
12    ///
13    /// An empty slice returns `true`.
14    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
15    #[rustc_const_stable(feature = "const_slice_is_ascii", since = "1.74.0")]
16    #[must_use]
17    #[inline]
18    pub const fn is_ascii(&self) -> bool {
19        is_ascii(self)
20    }
21
22    /// If this slice [`is_ascii`](Self::is_ascii), returns it as a slice of
23    /// [ASCII characters](`ascii::Char`), otherwise returns `None`.
24    #[unstable(feature = "ascii_char", issue = "110998")]
25    #[must_use]
26    #[inline]
27    pub const fn as_ascii(&self) -> Option<&[ascii::Char]> {
28        if self.is_ascii() {
29            // SAFETY: Just checked that it's ASCII
30            Some(unsafe { self.as_ascii_unchecked() })
31        } else {
32            None
33        }
34    }
35
36    /// Converts this slice of bytes into a slice of ASCII characters,
37    /// without checking whether they're valid.
38    ///
39    /// # Safety
40    ///
41    /// Every byte in the slice must be in `0..=127`, or else this is UB.
42    #[unstable(feature = "ascii_char", issue = "110998")]
43    #[must_use]
44    #[inline]
45    pub const unsafe fn as_ascii_unchecked(&self) -> &[ascii::Char] {
46        let byte_ptr: *const [u8] = self;
47        let ascii_ptr = byte_ptr as *const [ascii::Char];
48        // SAFETY: The caller promised all the bytes are ASCII
49        unsafe { &*ascii_ptr }
50    }
51
52    /// Checks that two slices are an ASCII case-insensitive match.
53    ///
54    /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
55    /// but without allocating and copying temporaries.
56    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
57    #[rustc_const_stable(feature = "const_eq_ignore_ascii_case", since = "1.89.0")]
58    #[must_use]
59    #[inline]
60    pub const fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
61        if self.len() != other.len() {
62            return false;
63        }
64
65        #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
66        {
67            const CHUNK_SIZE: usize = 16;
68            // The following function has two invariants:
69            // 1. The slice lengths must be equal, which we checked above.
70            // 2. The slice lengths must greater than or equal to N, which this
71            //    if-statement is checking.
72            if self.len() >= CHUNK_SIZE {
73                return self.eq_ignore_ascii_case_chunks::<CHUNK_SIZE>(other);
74            }
75        }
76
77        self.eq_ignore_ascii_case_simple(other)
78    }
79
80    /// ASCII case-insensitive equality check without chunk-at-a-time
81    /// optimization.
82    #[inline]
83    const fn eq_ignore_ascii_case_simple(&self, other: &[u8]) -> bool {
84        // FIXME(const-hack): This implementation can be reverted when
85        // `core::iter::zip` is allowed in const. The original implementation:
86        //  self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
87        let mut a = self;
88        let mut b = other;
89
90        while let ([first_a, rest_a @ ..], [first_b, rest_b @ ..]) = (a, b) {
91            if first_a.eq_ignore_ascii_case(&first_b) {
92                a = rest_a;
93                b = rest_b;
94            } else {
95                return false;
96            }
97        }
98
99        true
100    }
101
102    /// Optimized version of `eq_ignore_ascii_case` to process chunks at a time.
103    ///
104    /// Platforms that have SIMD instructions may benefit from this
105    /// implementation over `eq_ignore_ascii_case_simple`.
106    ///
107    /// # Invariants
108    ///
109    /// The caller must guarantee that the slices are equal in length, and the
110    /// slice lengths are greater than or equal to `N` bytes.
111    #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
112    #[inline]
113    const fn eq_ignore_ascii_case_chunks<const N: usize>(&self, other: &[u8]) -> bool {
114        // FIXME(const-hack): The while-loops that follow should be replaced by
115        // for-loops when available in const.
116
117        let (self_chunks, self_rem) = self.as_chunks::<N>();
118        let (other_chunks, _) = other.as_chunks::<N>();
119
120        // Branchless check to encourage auto-vectorization
121        #[inline(always)]
122        const fn eq_ignore_ascii_inner<const L: usize>(lhs: &[u8; L], rhs: &[u8; L]) -> bool {
123            let mut equal_ascii = true;
124            let mut j = 0;
125            while j < L {
126                equal_ascii &= lhs[j].eq_ignore_ascii_case(&rhs[j]);
127                j += 1;
128            }
129
130            equal_ascii
131        }
132
133        // Process the chunks, returning early if an inequality is found
134        let mut i = 0;
135        while i < self_chunks.len() && i < other_chunks.len() {
136            if !eq_ignore_ascii_inner(&self_chunks[i], &other_chunks[i]) {
137                return false;
138            }
139            i += 1;
140        }
141
142        // Check the length invariant which is necessary for the tail-handling
143        // logic to be correct. This should have been upheld by the caller,
144        // otherwise lengths less than N will compare as true without any
145        // checking.
146        debug_assert!(self.len() >= N);
147
148        // If there are remaining tails, load the last N bytes in the slices to
149        // avoid falling back to per-byte checking.
150        if !self_rem.is_empty() {
151            if let (Some(a_rem), Some(b_rem)) = (self.last_chunk::<N>(), other.last_chunk::<N>()) {
152                if !eq_ignore_ascii_inner(a_rem, b_rem) {
153                    return false;
154                }
155            }
156        }
157
158        true
159    }
160
161    /// Converts this slice to its ASCII upper case equivalent in-place.
162    ///
163    /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
164    /// but non-ASCII letters are unchanged.
165    ///
166    /// To return a new uppercased value without modifying the existing one, use
167    /// [`to_ascii_uppercase`].
168    ///
169    /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
170    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
171    #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
172    #[inline]
173    pub const fn make_ascii_uppercase(&mut self) {
174        // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
175        let mut i = 0;
176        while i < self.len() {
177            let byte = &mut self[i];
178            byte.make_ascii_uppercase();
179            i += 1;
180        }
181    }
182
183    /// Converts this slice to its ASCII lower case equivalent in-place.
184    ///
185    /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
186    /// but non-ASCII letters are unchanged.
187    ///
188    /// To return a new lowercased value without modifying the existing one, use
189    /// [`to_ascii_lowercase`].
190    ///
191    /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
192    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
193    #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
194    #[inline]
195    pub const fn make_ascii_lowercase(&mut self) {
196        // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
197        let mut i = 0;
198        while i < self.len() {
199            let byte = &mut self[i];
200            byte.make_ascii_lowercase();
201            i += 1;
202        }
203    }
204
205    /// Returns an iterator that produces an escaped version of this slice,
206    /// treating it as an ASCII string.
207    ///
208    /// # Examples
209    ///
210    /// ```
211    /// let s = b"0\t\r\n'\"\\\x9d";
212    /// let escaped = s.escape_ascii().to_string();
213    /// assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
214    /// ```
215    #[must_use = "this returns the escaped bytes as an iterator, \
216                  without modifying the original"]
217    #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
218    pub fn escape_ascii(&self) -> EscapeAscii<'_> {
219        EscapeAscii { inner: self.iter().flat_map(EscapeByte) }
220    }
221
222    /// Returns a byte slice with leading ASCII whitespace bytes removed.
223    ///
224    /// 'Whitespace' refers to the definition used by
225    /// [`u8::is_ascii_whitespace`]. Importantly, this definition excludes
226    /// the `\0x0B` byte even though it has the Unicode [`White_Space`] property
227    /// and is removed by [`str::trim_start`].
228    ///
229    /// [`White_Space`]: https://www.unicode.org/reports/tr44/#White_Space
230    ///
231    /// # Examples
232    ///
233    /// ```
234    /// assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
235    /// assert_eq!(b"  ".trim_ascii_start(), b"");
236    /// assert_eq!(b"".trim_ascii_start(), b"");
237    /// ```
238    #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
239    #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
240    #[inline]
241    pub const fn trim_ascii_start(&self) -> &[u8] {
242        let mut bytes = self;
243        // Note: A pattern matching based approach (instead of indexing) allows
244        // making the function const.
245        while let [first, rest @ ..] = bytes {
246            if first.is_ascii_whitespace() {
247                bytes = rest;
248            } else {
249                break;
250            }
251        }
252        bytes
253    }
254
255    /// Returns a byte slice with trailing ASCII whitespace bytes removed.
256    ///
257    /// 'Whitespace' refers to the definition used by
258    /// [`u8::is_ascii_whitespace`]. Importantly, this definition excludes
259    /// the `\0x0B` byte even though it has the Unicode [`White_Space`] property
260    /// and is removed by [`str::trim_end`].
261    ///
262    /// [`White_Space`]: https://www.unicode.org/reports/tr44/#White_Space
263    ///
264    /// # Examples
265    ///
266    /// ```
267    /// assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
268    /// assert_eq!(b"  ".trim_ascii_end(), b"");
269    /// assert_eq!(b"".trim_ascii_end(), b"");
270    /// ```
271    #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
272    #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
273    #[inline]
274    pub const fn trim_ascii_end(&self) -> &[u8] {
275        let mut bytes = self;
276        // Note: A pattern matching based approach (instead of indexing) allows
277        // making the function const.
278        while let [rest @ .., last] = bytes {
279            if last.is_ascii_whitespace() {
280                bytes = rest;
281            } else {
282                break;
283            }
284        }
285        bytes
286    }
287
288    /// Returns a byte slice with leading and trailing ASCII whitespace bytes
289    /// removed.
290    ///
291    /// 'Whitespace' refers to the definition used by
292    /// [`u8::is_ascii_whitespace`]. Importantly, this definition excludes
293    /// the `\0x0B` byte even though it has the Unicode [`White_Space`] property
294    /// and is removed by [`str::trim`].
295    ///
296    /// [`White_Space`]: https://www.unicode.org/reports/tr44/#White_Space
297    ///
298    /// # Examples
299    ///
300    /// ```
301    /// assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
302    /// assert_eq!(b"  ".trim_ascii(), b"");
303    /// assert_eq!(b"".trim_ascii(), b"");
304    /// ```
305    #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
306    #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
307    #[inline]
308    pub const fn trim_ascii(&self) -> &[u8] {
309        self.trim_ascii_start().trim_ascii_end()
310    }
311}
312
313impl_fn_for_zst! {
314    #[derive(Clone)]
315    struct EscapeByte impl Fn = |byte: &u8| -> ascii::EscapeDefault {
316        ascii::escape_default(*byte)
317    };
318}
319
320/// An iterator over the escaped version of a byte slice.
321///
322/// This `struct` is created by the [`slice::escape_ascii`] method. See its
323/// documentation for more information.
324#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
325#[derive(Clone)]
326#[must_use = "iterators are lazy and do nothing unless consumed"]
327pub struct EscapeAscii<'a> {
328    inner: iter::FlatMap<super::Iter<'a, u8>, ascii::EscapeDefault, EscapeByte>,
329}
330
331#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
332impl<'a> iter::Iterator for EscapeAscii<'a> {
333    type Item = u8;
334    #[inline]
335    fn next(&mut self) -> Option<u8> {
336        self.inner.next()
337    }
338    #[inline]
339    fn size_hint(&self) -> (usize, Option<usize>) {
340        self.inner.size_hint()
341    }
342    #[inline]
343    fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
344    where
345        Fold: FnMut(Acc, Self::Item) -> R,
346        R: ops::Try<Output = Acc>,
347    {
348        self.inner.try_fold(init, fold)
349    }
350    #[inline]
351    fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
352    where
353        Fold: FnMut(Acc, Self::Item) -> Acc,
354    {
355        self.inner.fold(init, fold)
356    }
357    #[inline]
358    fn last(mut self) -> Option<u8> {
359        self.next_back()
360    }
361}
362
363#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
364impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
365    fn next_back(&mut self) -> Option<u8> {
366        self.inner.next_back()
367    }
368}
369#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
370impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
371#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
372impl<'a> fmt::Display for EscapeAscii<'a> {
373    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
374        // disassemble iterator, including front/back parts of flatmap in case it has been partially consumed
375        let (front, slice, back) = self.clone().inner.into_parts();
376        let front = front.unwrap_or(EscapeDefault::empty());
377        let mut bytes = slice.unwrap_or_default().as_slice();
378        let back = back.unwrap_or(EscapeDefault::empty());
379
380        // usually empty, so the formatter won't have to do any work
381        for byte in front {
382            f.write_char(byte as char)?;
383        }
384
385        fn needs_escape(b: u8) -> bool {
386            b > 0x7E || b < 0x20 || b == b'\\' || b == b'\'' || b == b'"'
387        }
388
389        while bytes.len() > 0 {
390            // fast path for the printable, non-escaped subset of ascii
391            let prefix = bytes.iter().take_while(|&&b| !needs_escape(b)).count();
392            // SAFETY: prefix length was derived by counting bytes in the same splice, so it's in-bounds
393            let (prefix, remainder) = unsafe { bytes.split_at_unchecked(prefix) };
394            // SAFETY: prefix is a valid utf8 sequence, as it's a subset of ASCII
395            let prefix = unsafe { crate::str::from_utf8_unchecked(prefix) };
396
397            f.write_str(prefix)?; // the fast part
398
399            bytes = remainder;
400
401            if let Some(&b) = bytes.first() {
402                // guaranteed to be non-empty, better to write it as a str
403                fmt::Display::fmt(&ascii::escape_default(b), f)?;
404                bytes = &bytes[1..];
405            }
406        }
407
408        // also usually empty
409        for byte in back {
410            f.write_char(byte as char)?;
411        }
412        Ok(())
413    }
414}
415#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
416impl<'a> fmt::Debug for EscapeAscii<'a> {
417    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
418        f.debug_struct("EscapeAscii").finish_non_exhaustive()
419    }
420}
421
422/// ASCII test *without* the chunk-at-a-time optimizations.
423///
424/// This is carefully structured to produce nice small code -- it's smaller in
425/// `-O` than what the "obvious" ways produces under `-C opt-level=s`.  If you
426/// touch it, be sure to run (and update if needed) the assembly test.
427#[unstable(feature = "str_internals", issue = "none")]
428#[doc(hidden)]
429#[inline]
430pub const fn is_ascii_simple(mut bytes: &[u8]) -> bool {
431    while let [rest @ .., last] = bytes {
432        if !last.is_ascii() {
433            break;
434        }
435        bytes = rest;
436    }
437    bytes.is_empty()
438}
439
440/// Optimized ASCII test that will use usize-at-a-time operations instead of
441/// byte-at-a-time operations (when possible).
442///
443/// The algorithm we use here is pretty simple. If `s` is too short, we just
444/// check each byte and be done with it. Otherwise:
445///
446/// - Read the first word with an unaligned load.
447/// - Align the pointer, read subsequent words until end with aligned loads.
448/// - Read the last `usize` from `s` with an unaligned load.
449///
450/// If any of these loads produces something for which `contains_nonascii`
451/// (above) returns true, then we know the answer is false.
452#[cfg(not(any(
453    all(target_arch = "x86_64", target_feature = "sse2"),
454    all(target_arch = "loongarch64", target_feature = "lsx")
455)))]
456#[inline]
457#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
458const fn is_ascii(s: &[u8]) -> bool {
459    // The runtime version behaves the same as the compiletime version, it's
460    // just more optimized.
461    const_eval_select!(
462        @capture { s: &[u8] } -> bool:
463        if const {
464            is_ascii_simple(s)
465        } else {
466            /// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
467            /// from `../str/mod.rs`, which does something similar for utf8 validation.
468            const fn contains_nonascii(v: usize) -> bool {
469                const NONASCII_MASK: usize = usize::repeat_u8(0x80);
470                (NONASCII_MASK & v) != 0
471            }
472
473            const USIZE_SIZE: usize = size_of::<usize>();
474
475            let len = s.len();
476            let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
477
478            // If we wouldn't gain anything from the word-at-a-time implementation, fall
479            // back to a scalar loop.
480            //
481            // We also do this for architectures where `size_of::<usize>()` isn't
482            // sufficient alignment for `usize`, because it's a weird edge case.
483            if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < align_of::<usize>() {
484                return is_ascii_simple(s);
485            }
486
487            // We always read the first word unaligned, which means `align_offset` is
488            // 0, we'd read the same value again for the aligned read.
489            let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
490
491            let start = s.as_ptr();
492            // SAFETY: We verify `len < USIZE_SIZE` above.
493            let first_word = unsafe { (start as *const usize).read_unaligned() };
494
495            if contains_nonascii(first_word) {
496                return false;
497            }
498            // We checked this above, somewhat implicitly. Note that `offset_to_aligned`
499            // is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
500            // above.
501            debug_assert!(offset_to_aligned <= len);
502
503            // SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
504            // middle chunk of the slice.
505            let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
506
507            // `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
508            let mut byte_pos = offset_to_aligned;
509
510            // Paranoia check about alignment, since we're about to do a bunch of
511            // unaligned loads. In practice this should be impossible barring a bug in
512            // `align_offset` though.
513            // While this method is allowed to spuriously fail in CTFE, if it doesn't
514            // have alignment information it should have given a `usize::MAX` for
515            // `align_offset` earlier, sending things through the scalar path instead of
516            // this one, so this check should pass if it's reachable.
517            debug_assert!(word_ptr.is_aligned_to(align_of::<usize>()));
518
519            // Read subsequent words until the last aligned word, excluding the last
520            // aligned word by itself to be done in tail check later, to ensure that
521            // tail is always one `usize` at most to extra branch `byte_pos == len`.
522            while byte_pos < len - USIZE_SIZE {
523                // Sanity check that the read is in bounds
524                debug_assert!(byte_pos + USIZE_SIZE <= len);
525                // And that our assumptions about `byte_pos` hold.
526                debug_assert!(word_ptr.cast::<u8>() == start.wrapping_add(byte_pos));
527
528                // SAFETY: We know `word_ptr` is properly aligned (because of
529                // `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
530                let word = unsafe { word_ptr.read() };
531                if contains_nonascii(word) {
532                    return false;
533                }
534
535                byte_pos += USIZE_SIZE;
536                // SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
537                // after this `add`, `word_ptr` will be at most one-past-the-end.
538                word_ptr = unsafe { word_ptr.add(1) };
539            }
540
541            // Sanity check to ensure there really is only one `usize` left. This should
542            // be guaranteed by our loop condition.
543            debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
544
545            // SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
546            let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
547
548            !contains_nonascii(last_word)
549        }
550    )
551}
552
553/// Chunk size for SSE2 vectorized ASCII checking (4x 16-byte loads).
554#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
555const SSE2_CHUNK_SIZE: usize = 64;
556
557#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
558#[inline]
559fn is_ascii_sse2(bytes: &[u8]) -> bool {
560    use crate::arch::x86_64::{__m128i, _mm_loadu_si128, _mm_movemask_epi8, _mm_or_si128};
561
562    let (chunks, rest) = bytes.as_chunks::<SSE2_CHUNK_SIZE>();
563
564    for chunk in chunks {
565        let ptr = chunk.as_ptr();
566        // SAFETY: chunk is 64 bytes. SSE2 is baseline on x86_64.
567        let mask = unsafe {
568            let a1 = _mm_loadu_si128(ptr as *const __m128i);
569            let a2 = _mm_loadu_si128(ptr.add(16) as *const __m128i);
570            let b1 = _mm_loadu_si128(ptr.add(32) as *const __m128i);
571            let b2 = _mm_loadu_si128(ptr.add(48) as *const __m128i);
572            // OR all chunks - if any byte has high bit set, combined will too.
573            let combined = _mm_or_si128(_mm_or_si128(a1, a2), _mm_or_si128(b1, b2));
574            // Create a mask from the MSBs of each byte.
575            // If any byte is >= 128, its MSB is 1, so the mask will be non-zero.
576            _mm_movemask_epi8(combined)
577        };
578        if mask != 0 {
579            return false;
580        }
581    }
582
583    // Handle remaining bytes
584    rest.iter().all(|b| b.is_ascii())
585}
586
587/// ASCII test optimized to use the `pmovmskb` instruction on `x86-64`.
588///
589/// Uses explicit SSE2 intrinsics to prevent LLVM from auto-vectorizing with
590/// broken AVX-512 code that extracts mask bits one-by-one.
591#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
592#[inline]
593#[rustc_allow_const_fn_unstable(const_eval_select)]
594const fn is_ascii(bytes: &[u8]) -> bool {
595    const USIZE_SIZE: usize = size_of::<usize>();
596    const NONASCII_MASK: usize = usize::MAX / 255 * 0x80;
597
598    const_eval_select!(
599        @capture { bytes: &[u8] } -> bool:
600        if const {
601            is_ascii_simple(bytes)
602        } else {
603            // For small inputs, use usize-at-a-time processing to avoid SSE2 call overhead.
604            if bytes.len() < SSE2_CHUNK_SIZE {
605                let chunks = bytes.chunks_exact(USIZE_SIZE);
606                let remainder = chunks.remainder();
607                for chunk in chunks {
608                    let word = usize::from_ne_bytes(chunk.try_into().unwrap());
609                    if (word & NONASCII_MASK) != 0 {
610                        return false;
611                    }
612                }
613                return remainder.iter().all(|b| b.is_ascii());
614            }
615
616            is_ascii_sse2(bytes)
617        }
618    )
619}
620
621/// ASCII test optimized to use the `vmskltz.b` instruction on `loongarch64`.
622///
623/// Other platforms are not likely to benefit from this code structure, so they
624/// use SWAR techniques to test for ASCII in `usize`-sized chunks.
625#[cfg(all(target_arch = "loongarch64", target_feature = "lsx"))]
626#[inline]
627const fn is_ascii(bytes: &[u8]) -> bool {
628    // Process chunks of 32 bytes at a time in the fast path to enable
629    // auto-vectorization and use of `vmskltz.b`. Two 128-bit vector registers
630    // can be OR'd together and then the resulting vector can be tested for
631    // non-ASCII bytes.
632    const CHUNK_SIZE: usize = 32;
633
634    let mut i = 0;
635
636    while i + CHUNK_SIZE <= bytes.len() {
637        let chunk_end = i + CHUNK_SIZE;
638
639        // Get LLVM to produce a `vmskltz.b` instruction on loongarch64 which
640        // creates a mask from the most significant bit of each byte.
641        // ASCII bytes are less than 128 (0x80), so their most significant
642        // bit is unset.
643        let mut count = 0;
644        while i < chunk_end {
645            count += bytes[i].is_ascii() as u8;
646            i += 1;
647        }
648
649        // All bytes should be <= 127 so count is equal to chunk size.
650        if count != CHUNK_SIZE as u8 {
651            return false;
652        }
653    }
654
655    // Process the remaining `bytes.len() % N` bytes.
656    let mut is_ascii = true;
657    while i < bytes.len() {
658        is_ascii &= bytes[i].is_ascii();
659        i += 1;
660    }
661
662    is_ascii
663}