core/slice/
cmp.rs

1//! Comparison traits for `[T]`.
2
3use super::{from_raw_parts, memchr};
4use crate::cmp::{self, BytewiseEq, Ordering};
5use crate::intrinsics::compare_bytes;
6use crate::num::NonZero;
7use crate::{ascii, mem};
8
9#[stable(feature = "rust1", since = "1.0.0")]
10impl<T, U> PartialEq<[U]> for [T]
11where
12    T: PartialEq<U>,
13{
14    fn eq(&self, other: &[U]) -> bool {
15        SlicePartialEq::equal(self, other)
16    }
17
18    fn ne(&self, other: &[U]) -> bool {
19        SlicePartialEq::not_equal(self, other)
20    }
21}
22
23#[stable(feature = "rust1", since = "1.0.0")]
24impl<T: Eq> Eq for [T] {}
25
26/// Implements comparison of slices [lexicographically](Ord#lexicographical-comparison).
27#[stable(feature = "rust1", since = "1.0.0")]
28impl<T: Ord> Ord for [T] {
29    fn cmp(&self, other: &[T]) -> Ordering {
30        SliceOrd::compare(self, other)
31    }
32}
33
34/// Implements comparison of slices [lexicographically](Ord#lexicographical-comparison).
35#[stable(feature = "rust1", since = "1.0.0")]
36impl<T: PartialOrd> PartialOrd for [T] {
37    fn partial_cmp(&self, other: &[T]) -> Option<Ordering> {
38        SlicePartialOrd::partial_compare(self, other)
39    }
40}
41
42#[doc(hidden)]
43// intermediate trait for specialization of slice's PartialEq
44trait SlicePartialEq<B> {
45    fn equal(&self, other: &[B]) -> bool;
46
47    fn not_equal(&self, other: &[B]) -> bool {
48        !self.equal(other)
49    }
50}
51
52// Generic slice equality
53impl<A, B> SlicePartialEq<B> for [A]
54where
55    A: PartialEq<B>,
56{
57    default fn equal(&self, other: &[B]) -> bool {
58        if self.len() != other.len() {
59            return false;
60        }
61
62        // Implemented as explicit indexing rather
63        // than zipped iterators for performance reasons.
64        // See PR https://github.com/rust-lang/rust/pull/116846
65        for idx in 0..self.len() {
66            // bound checks are optimized away
67            if self[idx] != other[idx] {
68                return false;
69            }
70        }
71
72        true
73    }
74}
75
76// When each element can be compared byte-wise, we can compare all the bytes
77// from the whole size in one call to the intrinsics.
78impl<A, B> SlicePartialEq<B> for [A]
79where
80    A: BytewiseEq<B>,
81{
82    fn equal(&self, other: &[B]) -> bool {
83        if self.len() != other.len() {
84            return false;
85        }
86
87        // SAFETY: `self` and `other` are references and are thus guaranteed to be valid.
88        // The two slices have been checked to have the same size above.
89        unsafe {
90            let size = mem::size_of_val(self);
91            compare_bytes(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
92        }
93    }
94}
95
96#[doc(hidden)]
97// intermediate trait for specialization of slice's PartialOrd
98trait SlicePartialOrd: Sized {
99    fn partial_compare(left: &[Self], right: &[Self]) -> Option<Ordering>;
100}
101
102impl<A: PartialOrd> SlicePartialOrd for A {
103    default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
104        let l = cmp::min(left.len(), right.len());
105
106        // Slice to the loop iteration range to enable bound check
107        // elimination in the compiler
108        let lhs = &left[..l];
109        let rhs = &right[..l];
110
111        for i in 0..l {
112            match lhs[i].partial_cmp(&rhs[i]) {
113                Some(Ordering::Equal) => (),
114                non_eq => return non_eq,
115            }
116        }
117
118        left.len().partial_cmp(&right.len())
119    }
120}
121
122// This is the impl that we would like to have. Unfortunately it's not sound.
123// See `partial_ord_slice.rs`.
124/*
125impl<A> SlicePartialOrd for A
126where
127    A: Ord,
128{
129    default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
130        Some(SliceOrd::compare(left, right))
131    }
132}
133*/
134
135impl<A: AlwaysApplicableOrd> SlicePartialOrd for A {
136    fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
137        Some(SliceOrd::compare(left, right))
138    }
139}
140
141#[rustc_specialization_trait]
142trait AlwaysApplicableOrd: SliceOrd + Ord {}
143
144macro_rules! always_applicable_ord {
145    ($([$($p:tt)*] $t:ty,)*) => {
146        $(impl<$($p)*> AlwaysApplicableOrd for $t {})*
147    }
148}
149
150always_applicable_ord! {
151    [] u8, [] u16, [] u32, [] u64, [] u128, [] usize,
152    [] i8, [] i16, [] i32, [] i64, [] i128, [] isize,
153    [] bool, [] char,
154    [T: ?Sized] *const T, [T: ?Sized] *mut T,
155    [T: AlwaysApplicableOrd] &T,
156    [T: AlwaysApplicableOrd] &mut T,
157    [T: AlwaysApplicableOrd] Option<T>,
158}
159
160#[doc(hidden)]
161// intermediate trait for specialization of slice's Ord
162trait SliceOrd: Sized {
163    fn compare(left: &[Self], right: &[Self]) -> Ordering;
164}
165
166impl<A: Ord> SliceOrd for A {
167    default fn compare(left: &[Self], right: &[Self]) -> Ordering {
168        let l = cmp::min(left.len(), right.len());
169
170        // Slice to the loop iteration range to enable bound check
171        // elimination in the compiler
172        let lhs = &left[..l];
173        let rhs = &right[..l];
174
175        for i in 0..l {
176            match lhs[i].cmp(&rhs[i]) {
177                Ordering::Equal => (),
178                non_eq => return non_eq,
179            }
180        }
181
182        left.len().cmp(&right.len())
183    }
184}
185
186/// Marks that a type should be treated as an unsigned byte for comparisons.
187///
188/// # Safety
189/// * The type must be readable as an `u8`, meaning it has to have the same
190///   layout as `u8` and always be initialized.
191/// * For every `x` and `y` of this type, `Ord(x, y)` must return the same
192///   value as `Ord::cmp(transmute::<_, u8>(x), transmute::<_, u8>(y))`.
193#[rustc_specialization_trait]
194unsafe trait UnsignedBytewiseOrd {}
195
196unsafe impl UnsignedBytewiseOrd for bool {}
197unsafe impl UnsignedBytewiseOrd for u8 {}
198unsafe impl UnsignedBytewiseOrd for NonZero<u8> {}
199unsafe impl UnsignedBytewiseOrd for Option<NonZero<u8>> {}
200unsafe impl UnsignedBytewiseOrd for ascii::Char {}
201
202// `compare_bytes` compares a sequence of unsigned bytes lexicographically, so
203// use it if the requirements for `UnsignedBytewiseOrd` are fulfilled.
204impl<A: Ord + UnsignedBytewiseOrd> SliceOrd for A {
205    #[inline]
206    fn compare(left: &[Self], right: &[Self]) -> Ordering {
207        // Since the length of a slice is always less than or equal to
208        // isize::MAX, this never underflows.
209        let diff = left.len() as isize - right.len() as isize;
210        // This comparison gets optimized away (on x86_64 and ARM) because the
211        // subtraction updates flags.
212        let len = if left.len() < right.len() { left.len() } else { right.len() };
213        let left = left.as_ptr().cast();
214        let right = right.as_ptr().cast();
215        // SAFETY: `left` and `right` are references and are thus guaranteed to
216        // be valid. `UnsignedBytewiseOrd` is only implemented for types that
217        // are valid u8s and can be compared the same way. We use the minimum
218        // of both lengths which guarantees that both regions are valid for
219        // reads in that interval.
220        let mut order = unsafe { compare_bytes(left, right, len) as isize };
221        if order == 0 {
222            order = diff;
223        }
224        order.cmp(&0)
225    }
226}
227
228pub(super) trait SliceContains: Sized {
229    fn slice_contains(&self, x: &[Self]) -> bool;
230}
231
232impl<T> SliceContains for T
233where
234    T: PartialEq,
235{
236    default fn slice_contains(&self, x: &[Self]) -> bool {
237        x.iter().any(|y| *y == *self)
238    }
239}
240
241impl SliceContains for u8 {
242    #[inline]
243    fn slice_contains(&self, x: &[Self]) -> bool {
244        memchr::memchr(*self, x).is_some()
245    }
246}
247
248impl SliceContains for i8 {
249    #[inline]
250    fn slice_contains(&self, x: &[Self]) -> bool {
251        let byte = *self as u8;
252        // SAFETY: `i8` and `u8` have the same memory layout, thus casting `x.as_ptr()`
253        // as `*const u8` is safe. The `x.as_ptr()` comes from a reference and is thus guaranteed
254        // to be valid for reads for the length of the slice `x.len()`, which cannot be larger
255        // than `isize::MAX`. The returned slice is never mutated.
256        let bytes: &[u8] = unsafe { from_raw_parts(x.as_ptr() as *const u8, x.len()) };
257        memchr::memchr(byte, bytes).is_some()
258    }
259}
260
261macro_rules! impl_slice_contains {
262    ($($t:ty),*) => {
263        $(
264            impl SliceContains for $t {
265                #[inline]
266                fn slice_contains(&self, arr: &[$t]) -> bool {
267                    // Make our LANE_COUNT 4x the normal lane count (aiming for 128 bit vectors).
268                    // The compiler will nicely unroll it.
269                    const LANE_COUNT: usize = 4 * (128 / (mem::size_of::<$t>() * 8));
270                    // SIMD
271                    let mut chunks = arr.chunks_exact(LANE_COUNT);
272                    for chunk in &mut chunks {
273                        if chunk.iter().fold(false, |acc, x| acc | (*x == *self)) {
274                            return true;
275                        }
276                    }
277                    // Scalar remainder
278                    return chunks.remainder().iter().any(|x| *x == *self);
279                }
280            }
281        )*
282    };
283}
284
285impl_slice_contains!(u16, u32, u64, i16, i32, i64, f32, f64, usize, isize);