core/portable-simd/crates/core_simd/src/masks/
full_masks.rs

1//! Masks that take up full SIMD vector registers.
2
3use crate::simd::{LaneCount, MaskElement, Simd, SupportedLaneCount};
4
5#[repr(transparent)]
6pub struct Mask<T, const N: usize>(Simd<T, N>)
7where
8    T: MaskElement,
9    LaneCount<N>: SupportedLaneCount;
10
11impl<T, const N: usize> Copy for Mask<T, N>
12where
13    T: MaskElement,
14    LaneCount<N>: SupportedLaneCount,
15{
16}
17
18impl<T, const N: usize> Clone for Mask<T, N>
19where
20    T: MaskElement,
21    LaneCount<N>: SupportedLaneCount,
22{
23    #[inline]
24    #[must_use = "method returns a new mask and does not mutate the original value"]
25    fn clone(&self) -> Self {
26        *self
27    }
28}
29
30impl<T, const N: usize> PartialEq for Mask<T, N>
31where
32    T: MaskElement + PartialEq,
33    LaneCount<N>: SupportedLaneCount,
34{
35    #[inline]
36    fn eq(&self, other: &Self) -> bool {
37        self.0.eq(&other.0)
38    }
39}
40
41impl<T, const N: usize> PartialOrd for Mask<T, N>
42where
43    T: MaskElement + PartialOrd,
44    LaneCount<N>: SupportedLaneCount,
45{
46    #[inline]
47    fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
48        self.0.partial_cmp(&other.0)
49    }
50}
51
52impl<T, const N: usize> Eq for Mask<T, N>
53where
54    T: MaskElement + Eq,
55    LaneCount<N>: SupportedLaneCount,
56{
57}
58
59impl<T, const N: usize> Ord for Mask<T, N>
60where
61    T: MaskElement + Ord,
62    LaneCount<N>: SupportedLaneCount,
63{
64    #[inline]
65    fn cmp(&self, other: &Self) -> core::cmp::Ordering {
66        self.0.cmp(&other.0)
67    }
68}
69
70// Used for bitmask bit order workaround
71pub(crate) trait ReverseBits {
72    // Reverse the least significant `n` bits of `self`.
73    // (Remaining bits must be 0.)
74    fn reverse_bits(self, n: usize) -> Self;
75}
76
77macro_rules! impl_reverse_bits {
78    { $($int:ty),* } => {
79        $(
80        impl ReverseBits for $int {
81            #[inline(always)]
82            fn reverse_bits(self, n: usize) -> Self {
83                let rev = <$int>::reverse_bits(self);
84                let bitsize = core::mem::size_of::<$int>() * 8;
85                if n < bitsize {
86                    // Shift things back to the right
87                    rev >> (bitsize - n)
88                } else {
89                    rev
90                }
91            }
92        }
93        )*
94    }
95}
96
97impl_reverse_bits! { u8, u16, u32, u64 }
98
99impl<T, const N: usize> Mask<T, N>
100where
101    T: MaskElement,
102    LaneCount<N>: SupportedLaneCount,
103{
104    #[inline]
105    #[must_use = "method returns a new mask and does not mutate the original value"]
106    pub fn splat(value: bool) -> Self {
107        Self(Simd::splat(if value { T::TRUE } else { T::FALSE }))
108    }
109
110    #[inline]
111    #[must_use = "method returns a new bool and does not mutate the original value"]
112    pub unsafe fn test_unchecked(&self, lane: usize) -> bool {
113        T::eq(self.0[lane], T::TRUE)
114    }
115
116    #[inline]
117    pub unsafe fn set_unchecked(&mut self, lane: usize, value: bool) {
118        self.0[lane] = if value { T::TRUE } else { T::FALSE }
119    }
120
121    #[inline]
122    #[must_use = "method returns a new vector and does not mutate the original value"]
123    pub fn to_int(self) -> Simd<T, N> {
124        self.0
125    }
126
127    #[inline]
128    #[must_use = "method returns a new mask and does not mutate the original value"]
129    pub unsafe fn from_int_unchecked(value: Simd<T, N>) -> Self {
130        Self(value)
131    }
132
133    #[inline]
134    #[must_use = "method returns a new mask and does not mutate the original value"]
135    pub fn convert<U>(self) -> Mask<U, N>
136    where
137        U: MaskElement,
138    {
139        // Safety: masks are simply integer vectors of 0 and -1, and we can cast the element type.
140        unsafe { Mask(core::intrinsics::simd::simd_cast(self.0)) }
141    }
142
143    #[inline]
144    unsafe fn to_bitmask_impl<U: ReverseBits, const M: usize>(self) -> U
145    where
146        LaneCount<M>: SupportedLaneCount,
147    {
148        let resized = self.to_int().resize::<M>(T::FALSE);
149
150        // Safety: `resized` is an integer vector with length M, which must match T
151        let bitmask: U = unsafe { core::intrinsics::simd::simd_bitmask(resized) };
152
153        // LLVM assumes bit order should match endianness
154        if cfg!(target_endian = "big") {
155            bitmask.reverse_bits(M)
156        } else {
157            bitmask
158        }
159    }
160
161    #[inline]
162    unsafe fn from_bitmask_impl<U: ReverseBits, const M: usize>(bitmask: U) -> Self
163    where
164        LaneCount<M>: SupportedLaneCount,
165    {
166        // LLVM assumes bit order should match endianness
167        let bitmask = if cfg!(target_endian = "big") {
168            bitmask.reverse_bits(M)
169        } else {
170            bitmask
171        };
172
173        // SAFETY: `mask` is the correct bitmask type for a u64 bitmask
174        let mask: Simd<T, M> = unsafe {
175            core::intrinsics::simd::simd_select_bitmask(
176                bitmask,
177                Simd::<T, M>::splat(T::TRUE),
178                Simd::<T, M>::splat(T::FALSE),
179            )
180        };
181
182        // SAFETY: `mask` only contains `T::TRUE` or `T::FALSE`
183        unsafe { Self::from_int_unchecked(mask.resize::<N>(T::FALSE)) }
184    }
185
186    #[inline]
187    pub(crate) fn to_bitmask_integer(self) -> u64 {
188        // TODO modify simd_bitmask to zero-extend output, making this unnecessary
189        if N <= 8 {
190            // Safety: bitmask matches length
191            unsafe { self.to_bitmask_impl::<u8, 8>() as u64 }
192        } else if N <= 16 {
193            // Safety: bitmask matches length
194            unsafe { self.to_bitmask_impl::<u16, 16>() as u64 }
195        } else if N <= 32 {
196            // Safety: bitmask matches length
197            unsafe { self.to_bitmask_impl::<u32, 32>() as u64 }
198        } else {
199            // Safety: bitmask matches length
200            unsafe { self.to_bitmask_impl::<u64, 64>() }
201        }
202    }
203
204    #[inline]
205    pub(crate) fn from_bitmask_integer(bitmask: u64) -> Self {
206        // TODO modify simd_bitmask_select to truncate input, making this unnecessary
207        if N <= 8 {
208            // Safety: bitmask matches length
209            unsafe { Self::from_bitmask_impl::<u8, 8>(bitmask as u8) }
210        } else if N <= 16 {
211            // Safety: bitmask matches length
212            unsafe { Self::from_bitmask_impl::<u16, 16>(bitmask as u16) }
213        } else if N <= 32 {
214            // Safety: bitmask matches length
215            unsafe { Self::from_bitmask_impl::<u32, 32>(bitmask as u32) }
216        } else {
217            // Safety: bitmask matches length
218            unsafe { Self::from_bitmask_impl::<u64, 64>(bitmask) }
219        }
220    }
221
222    #[inline]
223    #[must_use = "method returns a new bool and does not mutate the original value"]
224    pub fn any(self) -> bool {
225        // Safety: use `self` as an integer vector
226        unsafe { core::intrinsics::simd::simd_reduce_any(self.to_int()) }
227    }
228
229    #[inline]
230    #[must_use = "method returns a new bool and does not mutate the original value"]
231    pub fn all(self) -> bool {
232        // Safety: use `self` as an integer vector
233        unsafe { core::intrinsics::simd::simd_reduce_all(self.to_int()) }
234    }
235}
236
237impl<T, const N: usize> From<Mask<T, N>> for Simd<T, N>
238where
239    T: MaskElement,
240    LaneCount<N>: SupportedLaneCount,
241{
242    #[inline]
243    fn from(value: Mask<T, N>) -> Self {
244        value.0
245    }
246}
247
248impl<T, const N: usize> core::ops::BitAnd for Mask<T, N>
249where
250    T: MaskElement,
251    LaneCount<N>: SupportedLaneCount,
252{
253    type Output = Self;
254    #[inline]
255    #[must_use = "method returns a new mask and does not mutate the original value"]
256    fn bitand(self, rhs: Self) -> Self {
257        // Safety: `self` is an integer vector
258        unsafe { Self(core::intrinsics::simd::simd_and(self.0, rhs.0)) }
259    }
260}
261
262impl<T, const N: usize> core::ops::BitOr for Mask<T, N>
263where
264    T: MaskElement,
265    LaneCount<N>: SupportedLaneCount,
266{
267    type Output = Self;
268    #[inline]
269    #[must_use = "method returns a new mask and does not mutate the original value"]
270    fn bitor(self, rhs: Self) -> Self {
271        // Safety: `self` is an integer vector
272        unsafe { Self(core::intrinsics::simd::simd_or(self.0, rhs.0)) }
273    }
274}
275
276impl<T, const N: usize> core::ops::BitXor for Mask<T, N>
277where
278    T: MaskElement,
279    LaneCount<N>: SupportedLaneCount,
280{
281    type Output = Self;
282    #[inline]
283    #[must_use = "method returns a new mask and does not mutate the original value"]
284    fn bitxor(self, rhs: Self) -> Self {
285        // Safety: `self` is an integer vector
286        unsafe { Self(core::intrinsics::simd::simd_xor(self.0, rhs.0)) }
287    }
288}
289
290impl<T, const N: usize> core::ops::Not for Mask<T, N>
291where
292    T: MaskElement,
293    LaneCount<N>: SupportedLaneCount,
294{
295    type Output = Self;
296    #[inline]
297    #[must_use = "method returns a new mask and does not mutate the original value"]
298    fn not(self) -> Self::Output {
299        Self::splat(true) ^ self
300    }
301}