core/stdarch/crates/core_arch/src/x86/
f16c.rs

1//! [F16C intrinsics].
2//!
3//! [F16C intrinsics]: https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=fp16&expand=1769
4
5use crate::core_arch::{simd::*, x86::*};
6use crate::intrinsics::simd::*;
7
8#[cfg(test)]
9use stdarch_test::assert_instr;
10
11#[allow(improper_ctypes)]
12unsafe extern "unadjusted" {
13    #[link_name = "llvm.x86.vcvtps2ph.128"]
14    fn llvm_vcvtps2ph_128(a: f32x4, rounding: i32) -> i16x8;
15    #[link_name = "llvm.x86.vcvtps2ph.256"]
16    fn llvm_vcvtps2ph_256(a: f32x8, rounding: i32) -> i16x8;
17}
18
19/// Converts the 4 x 16-bit half-precision float values in the lowest 64-bit of
20/// the 128-bit vector `a` into 4 x 32-bit float values stored in a 128-bit wide
21/// vector.
22///
23/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtph_ps)
24#[inline]
25#[target_feature(enable = "f16c")]
26#[cfg_attr(test, assert_instr("vcvtph2ps"))]
27#[stable(feature = "x86_f16c_intrinsics", since = "1.68.0")]
28pub fn _mm_cvtph_ps(a: __m128i) -> __m128 {
29    unsafe {
30        let a: f16x8 = transmute(a);
31        let a: f16x4 = simd_shuffle!(a, a, [0, 1, 2, 3]);
32        simd_cast(a)
33    }
34}
35
36/// Converts the 8 x 16-bit half-precision float values in the 128-bit vector
37/// `a` into 8 x 32-bit float values stored in a 256-bit wide vector.
38///
39/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtph_ps)
40#[inline]
41#[target_feature(enable = "f16c")]
42#[cfg_attr(test, assert_instr("vcvtph2ps"))]
43#[stable(feature = "x86_f16c_intrinsics", since = "1.68.0")]
44pub fn _mm256_cvtph_ps(a: __m128i) -> __m256 {
45    unsafe {
46        let a: f16x8 = transmute(a);
47        simd_cast(a)
48    }
49}
50
51/// Converts the 4 x 32-bit float values in the 128-bit vector `a` into 4 x
52/// 16-bit half-precision float values stored in the lowest 64-bit of a 128-bit
53/// vector.
54///
55/// Rounding is done according to the `imm_rounding` parameter, which can be one of:
56///
57/// * [`_MM_FROUND_TO_NEAREST_INT`] | [`_MM_FROUND_NO_EXC`] : round to nearest and suppress exceptions
58/// * [`_MM_FROUND_TO_NEG_INF`] | [`_MM_FROUND_NO_EXC`] : round down and suppress exceptions
59/// * [`_MM_FROUND_TO_POS_INF`] | [`_MM_FROUND_NO_EXC`] : round up and suppress exceptions
60/// * [`_MM_FROUND_TO_ZERO`] | [`_MM_FROUND_NO_EXC`] : truncate and suppress exceptions
61/// * [`_MM_FROUND_CUR_DIRECTION`] : use `MXCSR.RC` - see [`_MM_SET_ROUNDING_MODE`]
62///
63/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cvtps_ph)
64#[inline]
65#[target_feature(enable = "f16c")]
66#[cfg_attr(test, assert_instr("vcvtps2ph", IMM_ROUNDING = 0))]
67#[rustc_legacy_const_generics(1)]
68#[stable(feature = "x86_f16c_intrinsics", since = "1.68.0")]
69pub fn _mm_cvtps_ph<const IMM_ROUNDING: i32>(a: __m128) -> __m128i {
70    static_assert_uimm_bits!(IMM_ROUNDING, 3);
71    unsafe {
72        let a = a.as_f32x4();
73        let r = llvm_vcvtps2ph_128(a, IMM_ROUNDING);
74        transmute(r)
75    }
76}
77
78/// Converts the 8 x 32-bit float values in the 256-bit vector `a` into 8 x
79/// 16-bit half-precision float values stored in a 128-bit wide vector.
80///
81/// Rounding is done according to the `imm_rounding` parameter, which can be one of:
82///
83/// * [`_MM_FROUND_TO_NEAREST_INT`] | [`_MM_FROUND_NO_EXC`] : round to nearest and suppress exceptions
84/// * [`_MM_FROUND_TO_NEG_INF`] | [`_MM_FROUND_NO_EXC`] : round down and suppress exceptions
85/// * [`_MM_FROUND_TO_POS_INF`] | [`_MM_FROUND_NO_EXC`] : round up and suppress exceptions
86/// * [`_MM_FROUND_TO_ZERO`] | [`_MM_FROUND_NO_EXC`] : truncate and suppress exceptions
87/// * [`_MM_FROUND_CUR_DIRECTION`] : use `MXCSR.RC` - see [`_MM_SET_ROUNDING_MODE`]
88///
89/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtps_ph)
90#[inline]
91#[target_feature(enable = "f16c")]
92#[cfg_attr(test, assert_instr("vcvtps2ph", IMM_ROUNDING = 0))]
93#[rustc_legacy_const_generics(1)]
94#[stable(feature = "x86_f16c_intrinsics", since = "1.68.0")]
95pub fn _mm256_cvtps_ph<const IMM_ROUNDING: i32>(a: __m256) -> __m128i {
96    static_assert_uimm_bits!(IMM_ROUNDING, 3);
97    unsafe {
98        let a = a.as_f32x8();
99        let r = llvm_vcvtps2ph_256(a, IMM_ROUNDING);
100        transmute(r)
101    }
102}
103
104#[cfg(test)]
105mod tests {
106    use crate::{core_arch::x86::*, mem::transmute};
107    use stdarch_test::simd_test;
108
109    const F16_ONE: i16 = 0x3c00;
110    const F16_TWO: i16 = 0x4000;
111    const F16_THREE: i16 = 0x4200;
112    const F16_FOUR: i16 = 0x4400;
113    const F16_FIVE: i16 = 0x4500;
114    const F16_SIX: i16 = 0x4600;
115    const F16_SEVEN: i16 = 0x4700;
116    const F16_EIGHT: i16 = 0x4800;
117
118    #[simd_test(enable = "f16c")]
119    unsafe fn test_mm_cvtph_ps() {
120        let a = _mm_set_epi16(0, 0, 0, 0, F16_ONE, F16_TWO, F16_THREE, F16_FOUR);
121        let r = _mm_cvtph_ps(a);
122        let e = _mm_set_ps(1.0, 2.0, 3.0, 4.0);
123        assert_eq_m128(r, e);
124    }
125
126    #[simd_test(enable = "f16c")]
127    unsafe fn test_mm256_cvtph_ps() {
128        let a = _mm_set_epi16(
129            F16_ONE, F16_TWO, F16_THREE, F16_FOUR, F16_FIVE, F16_SIX, F16_SEVEN, F16_EIGHT,
130        );
131        let r = _mm256_cvtph_ps(a);
132        let e = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0);
133        assert_eq_m256(r, e);
134    }
135
136    #[simd_test(enable = "f16c")]
137    unsafe fn test_mm_cvtps_ph() {
138        let a = _mm_set_ps(1.0, 2.0, 3.0, 4.0);
139        let r = _mm_cvtps_ph::<_MM_FROUND_CUR_DIRECTION>(a);
140        let e = _mm_set_epi16(0, 0, 0, 0, F16_ONE, F16_TWO, F16_THREE, F16_FOUR);
141        assert_eq_m128i(r, e);
142    }
143
144    #[simd_test(enable = "f16c")]
145    unsafe fn test_mm256_cvtps_ph() {
146        let a = _mm256_set_ps(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0);
147        let r = _mm256_cvtps_ph::<_MM_FROUND_CUR_DIRECTION>(a);
148        let e = _mm_set_epi16(
149            F16_ONE, F16_TWO, F16_THREE, F16_FOUR, F16_FIVE, F16_SIX, F16_SEVEN, F16_EIGHT,
150        );
151        assert_eq_m128i(r, e);
152    }
153}