#![allow(non_camel_case_types)]
#[cfg_attr(
not(all(target_arch = "x86_64", target_feature = "avx512f")),
path = "masks/full_masks.rs"
)]
#[cfg_attr(
all(target_arch = "x86_64", target_feature = "avx512f"),
path = "masks/bitmask.rs"
)]
mod mask_impl;
use crate::simd::{LaneCount, Simd, SimdCast, SimdElement, SupportedLaneCount};
use core::cmp::Ordering;
use core::{fmt, mem};
mod sealed {
use super::*;
pub trait Sealed {
fn valid<const N: usize>(values: Simd<Self, N>) -> bool
where
LaneCount<N>: SupportedLaneCount,
Self: SimdElement;
fn eq(self, other: Self) -> bool;
fn to_usize(self) -> usize;
fn max_unsigned() -> u64;
type Unsigned: SimdElement;
const TRUE: Self;
const FALSE: Self;
}
}
use sealed::Sealed;
pub unsafe trait MaskElement: SimdElement<Mask = Self> + SimdCast + Sealed {}
macro_rules! impl_element {
{ $ty:ty, $unsigned:ty } => {
impl Sealed for $ty {
#[inline]
fn valid<const N: usize>(value: Simd<Self, N>) -> bool
where
LaneCount<N>: SupportedLaneCount,
{
unsafe {
use core::intrinsics::simd;
let falses: Simd<Self, N> = simd::simd_eq(value, Simd::splat(0 as _));
let trues: Simd<Self, N> = simd::simd_eq(value, Simd::splat(-1 as _));
let valid: Simd<Self, N> = simd::simd_or(falses, trues);
simd::simd_reduce_all(valid)
}
}
#[inline]
fn eq(self, other: Self) -> bool { self == other }
#[inline]
fn to_usize(self) -> usize {
self as usize
}
#[inline]
fn max_unsigned() -> u64 {
<$unsigned>::MAX as u64
}
type Unsigned = $unsigned;
const TRUE: Self = -1;
const FALSE: Self = 0;
}
unsafe impl MaskElement for $ty {}
}
}
impl_element! { i8, u8 }
impl_element! { i16, u16 }
impl_element! { i32, u32 }
impl_element! { i64, u64 }
impl_element! { isize, usize }
#[repr(transparent)]
pub struct Mask<T, const N: usize>(mask_impl::Mask<T, N>)
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount;
impl<T, const N: usize> Copy for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
}
impl<T, const N: usize> Clone for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<T, const N: usize> Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
pub fn splat(value: bool) -> Self {
Self(mask_impl::Mask::splat(value))
}
#[inline]
pub fn from_array(array: [bool; N]) -> Self {
unsafe {
let bytes: [u8; N] = mem::transmute_copy(&array);
let bools: Simd<i8, N> =
core::intrinsics::simd::simd_ne(Simd::from_array(bytes), Simd::splat(0u8));
Mask::from_int_unchecked(core::intrinsics::simd::simd_cast(bools))
}
}
#[inline]
pub fn to_array(self) -> [bool; N] {
unsafe {
let mut bytes: Simd<i8, N> = core::intrinsics::simd::simd_cast(self.to_int());
bytes &= Simd::splat(1i8);
mem::transmute_copy(&bytes)
}
}
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
pub unsafe fn from_int_unchecked(value: Simd<T, N>) -> Self {
unsafe {
core::intrinsics::assume(<T as Sealed>::valid(value));
Self(mask_impl::Mask::from_int_unchecked(value))
}
}
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
#[track_caller]
pub fn from_int(value: Simd<T, N>) -> Self {
assert!(T::valid(value), "all values must be either 0 or -1",);
unsafe { Self::from_int_unchecked(value) }
}
#[inline]
#[must_use = "method returns a new vector and does not mutate the original value"]
pub fn to_int(self) -> Simd<T, N> {
self.0.to_int()
}
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
pub fn cast<U: MaskElement>(self) -> Mask<U, N> {
Mask(self.0.convert())
}
#[inline]
#[must_use = "method returns a new bool and does not mutate the original value"]
pub unsafe fn test_unchecked(&self, index: usize) -> bool {
unsafe { self.0.test_unchecked(index) }
}
#[inline]
#[must_use = "method returns a new bool and does not mutate the original value"]
#[track_caller]
pub fn test(&self, index: usize) -> bool {
assert!(index < N, "element index out of range");
unsafe { self.test_unchecked(index) }
}
#[inline]
pub unsafe fn set_unchecked(&mut self, index: usize, value: bool) {
unsafe {
self.0.set_unchecked(index, value);
}
}
#[inline]
#[track_caller]
pub fn set(&mut self, index: usize, value: bool) {
assert!(index < N, "element index out of range");
unsafe {
self.set_unchecked(index, value);
}
}
#[inline]
#[must_use = "method returns a new bool and does not mutate the original value"]
pub fn any(self) -> bool {
self.0.any()
}
#[inline]
#[must_use = "method returns a new bool and does not mutate the original value"]
pub fn all(self) -> bool {
self.0.all()
}
#[inline]
#[must_use = "method returns a new integer and does not mutate the original value"]
pub fn to_bitmask(self) -> u64 {
self.0.to_bitmask_integer()
}
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
pub fn from_bitmask(bitmask: u64) -> Self {
Self(mask_impl::Mask::from_bitmask_integer(bitmask))
}
#[inline]
#[must_use = "method returns a new integer and does not mutate the original value"]
pub fn to_bitmask_vector(self) -> Simd<u8, N> {
self.0.to_bitmask_vector()
}
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
pub fn from_bitmask_vector(bitmask: Simd<u8, N>) -> Self {
Self(mask_impl::Mask::from_bitmask_vector(bitmask))
}
#[inline]
#[must_use = "method returns the index and does not mutate the original value"]
pub fn first_set(self) -> Option<usize> {
if cfg!(target_feature = "sse") && N <= 64 {
let tz = self.to_bitmask().trailing_zeros();
return if tz == 64 { None } else { Some(tz as usize) };
}
let index = Simd::from_array(
const {
let mut index = [0; N];
let mut i = 0;
while i < N {
index[i] = i;
i += 1;
}
index
},
);
let index: Simd<T, N> = unsafe { core::intrinsics::simd::simd_cast(index) };
let masked_index = self.select(index, Self::splat(true).to_int());
let masked_index: Simd<T::Unsigned, N> =
unsafe { core::intrinsics::simd::simd_cast(masked_index) };
let min_index: T::Unsigned =
unsafe { core::intrinsics::simd::simd_reduce_min(masked_index) };
let min_index: T = unsafe { core::mem::transmute_copy(&min_index) };
if min_index.eq(T::TRUE) {
None
} else {
Some(min_index.to_usize())
}
}
}
impl<T, const N: usize> From<[bool; N]> for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn from(array: [bool; N]) -> Self {
Self::from_array(array)
}
}
impl<T, const N: usize> From<Mask<T, N>> for [bool; N]
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn from(vector: Mask<T, N>) -> Self {
vector.to_array()
}
}
impl<T, const N: usize> Default for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
#[must_use = "method returns a defaulted mask with all elements set to false (0)"]
fn default() -> Self {
Self::splat(false)
}
}
impl<T, const N: usize> PartialEq for Mask<T, N>
where
T: MaskElement + PartialEq,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
#[must_use = "method returns a new bool and does not mutate the original value"]
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl<T, const N: usize> PartialOrd for Mask<T, N>
where
T: MaskElement + PartialOrd,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
#[must_use = "method returns a new Ordering and does not mutate the original value"]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl<T, const N: usize> fmt::Debug for Mask<T, N>
where
T: MaskElement + fmt::Debug,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list()
.entries((0..N).map(|i| self.test(i)))
.finish()
}
}
impl<T, const N: usize> core::ops::BitAnd for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
fn bitand(self, rhs: Self) -> Self {
Self(self.0 & rhs.0)
}
}
impl<T, const N: usize> core::ops::BitAnd<bool> for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
fn bitand(self, rhs: bool) -> Self {
self & Self::splat(rhs)
}
}
impl<T, const N: usize> core::ops::BitAnd<Mask<T, N>> for bool
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
type Output = Mask<T, N>;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
fn bitand(self, rhs: Mask<T, N>) -> Mask<T, N> {
Mask::splat(self) & rhs
}
}
impl<T, const N: usize> core::ops::BitOr for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
impl<T, const N: usize> core::ops::BitOr<bool> for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
fn bitor(self, rhs: bool) -> Self {
self | Self::splat(rhs)
}
}
impl<T, const N: usize> core::ops::BitOr<Mask<T, N>> for bool
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
type Output = Mask<T, N>;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
fn bitor(self, rhs: Mask<T, N>) -> Mask<T, N> {
Mask::splat(self) | rhs
}
}
impl<T, const N: usize> core::ops::BitXor for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
fn bitxor(self, rhs: Self) -> Self::Output {
Self(self.0 ^ rhs.0)
}
}
impl<T, const N: usize> core::ops::BitXor<bool> for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
type Output = Self;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
fn bitxor(self, rhs: bool) -> Self::Output {
self ^ Self::splat(rhs)
}
}
impl<T, const N: usize> core::ops::BitXor<Mask<T, N>> for bool
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
type Output = Mask<T, N>;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
fn bitxor(self, rhs: Mask<T, N>) -> Self::Output {
Mask::splat(self) ^ rhs
}
}
impl<T, const N: usize> core::ops::Not for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
type Output = Mask<T, N>;
#[inline]
#[must_use = "method returns a new mask and does not mutate the original value"]
fn not(self) -> Self::Output {
Self(!self.0)
}
}
impl<T, const N: usize> core::ops::BitAndAssign for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn bitand_assign(&mut self, rhs: Self) {
self.0 = self.0 & rhs.0;
}
}
impl<T, const N: usize> core::ops::BitAndAssign<bool> for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn bitand_assign(&mut self, rhs: bool) {
*self &= Self::splat(rhs);
}
}
impl<T, const N: usize> core::ops::BitOrAssign for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn bitor_assign(&mut self, rhs: Self) {
self.0 = self.0 | rhs.0;
}
}
impl<T, const N: usize> core::ops::BitOrAssign<bool> for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn bitor_assign(&mut self, rhs: bool) {
*self |= Self::splat(rhs);
}
}
impl<T, const N: usize> core::ops::BitXorAssign for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn bitxor_assign(&mut self, rhs: Self) {
self.0 = self.0 ^ rhs.0;
}
}
impl<T, const N: usize> core::ops::BitXorAssign<bool> for Mask<T, N>
where
T: MaskElement,
LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn bitxor_assign(&mut self, rhs: bool) {
*self ^= Self::splat(rhs);
}
}
macro_rules! impl_from {
{ $from:ty => $($to:ty),* } => {
$(
impl<const N: usize> From<Mask<$from, N>> for Mask<$to, N>
where
LaneCount<N>: SupportedLaneCount,
{
#[inline]
fn from(value: Mask<$from, N>) -> Self {
value.cast()
}
}
)*
}
}
impl_from! { i8 => i16, i32, i64, isize }
impl_from! { i16 => i32, i64, isize, i8 }
impl_from! { i32 => i64, isize, i8, i16 }
impl_from! { i64 => isize, i8, i16, i32 }
impl_from! { isize => i8, i16, i32, i64 }