Skip to main content

core/num/
f16.rs

1//! Constants for the `f16` half-precision floating point type.
2//!
3//! *[See also the `f16` primitive type][f16].*
4//!
5//! Mathematically significant numbers are provided in the `consts` sub-module.
6//!
7//! For the constants defined directly in this module
8//! (as distinct from those defined in the `consts` sub-module),
9//! new code should instead use the associated constants
10//! defined directly on the `f16` type.
11
12#![unstable(feature = "f16", issue = "116909")]
13
14use crate::convert::FloatToInt;
15use crate::num::FpCategory;
16#[cfg(not(test))]
17use crate::num::libm;
18use crate::panic::const_assert;
19use crate::{intrinsics, mem};
20
21/// Basic mathematical constants.
22#[unstable(feature = "f16", issue = "116909")]
23#[rustc_diagnostic_item = "f16_consts_mod"]
24pub mod consts {
25    // FIXME: replace with mathematical constants from cmath.
26
27    /// Archimedes' constant (π)
28    #[unstable(feature = "f16", issue = "116909")]
29    pub const PI: f16 = 3.14159265358979323846264338327950288_f16;
30
31    /// The full circle constant (τ)
32    ///
33    /// Equal to 2π.
34    #[unstable(feature = "f16", issue = "116909")]
35    pub const TAU: f16 = 6.28318530717958647692528676655900577_f16;
36
37    /// The golden ratio (φ)
38    #[unstable(feature = "f16", issue = "116909")]
39    pub const GOLDEN_RATIO: f16 = 1.618033988749894848204586834365638118_f16;
40
41    /// The Euler-Mascheroni constant (γ)
42    #[unstable(feature = "f16", issue = "116909")]
43    pub const EULER_GAMMA: f16 = 0.577215664901532860606512090082402431_f16;
44
45    /// π/2
46    #[unstable(feature = "f16", issue = "116909")]
47    pub const FRAC_PI_2: f16 = 1.57079632679489661923132169163975144_f16;
48
49    /// π/3
50    #[unstable(feature = "f16", issue = "116909")]
51    pub const FRAC_PI_3: f16 = 1.04719755119659774615421446109316763_f16;
52
53    /// π/4
54    #[unstable(feature = "f16", issue = "116909")]
55    pub const FRAC_PI_4: f16 = 0.785398163397448309615660845819875721_f16;
56
57    /// π/6
58    #[unstable(feature = "f16", issue = "116909")]
59    pub const FRAC_PI_6: f16 = 0.52359877559829887307710723054658381_f16;
60
61    /// π/8
62    #[unstable(feature = "f16", issue = "116909")]
63    pub const FRAC_PI_8: f16 = 0.39269908169872415480783042290993786_f16;
64
65    /// 1/π
66    #[unstable(feature = "f16", issue = "116909")]
67    pub const FRAC_1_PI: f16 = 0.318309886183790671537767526745028724_f16;
68
69    /// 1/sqrt(π)
70    #[unstable(feature = "f16", issue = "116909")]
71    // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
72    pub const FRAC_1_SQRT_PI: f16 = 0.564189583547756286948079451560772586_f16;
73
74    /// 1/sqrt(2π)
75    #[doc(alias = "FRAC_1_SQRT_TAU")]
76    #[unstable(feature = "f16", issue = "116909")]
77    // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
78    pub const FRAC_1_SQRT_2PI: f16 = 0.398942280401432677939946059934381868_f16;
79
80    /// 2/π
81    #[unstable(feature = "f16", issue = "116909")]
82    pub const FRAC_2_PI: f16 = 0.636619772367581343075535053490057448_f16;
83
84    /// 2/sqrt(π)
85    #[unstable(feature = "f16", issue = "116909")]
86    pub const FRAC_2_SQRT_PI: f16 = 1.12837916709551257389615890312154517_f16;
87
88    /// sqrt(2)
89    #[unstable(feature = "f16", issue = "116909")]
90    pub const SQRT_2: f16 = 1.41421356237309504880168872420969808_f16;
91
92    /// 1/sqrt(2)
93    #[unstable(feature = "f16", issue = "116909")]
94    pub const FRAC_1_SQRT_2: f16 = 0.707106781186547524400844362104849039_f16;
95
96    /// sqrt(3)
97    #[unstable(feature = "f16", issue = "116909")]
98    // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
99    pub const SQRT_3: f16 = 1.732050807568877293527446341505872367_f16;
100
101    /// 1/sqrt(3)
102    #[unstable(feature = "f16", issue = "116909")]
103    // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
104    pub const FRAC_1_SQRT_3: f16 = 0.577350269189625764509148780501957456_f16;
105
106    /// sqrt(5)
107    #[unstable(feature = "more_float_constants", issue = "146939")]
108    // Also, #[unstable(feature = "f16", issue = "116909")]
109    pub const SQRT_5: f16 = 2.23606797749978969640917366873127623_f16;
110
111    /// 1/sqrt(5)
112    #[unstable(feature = "more_float_constants", issue = "146939")]
113    // Also, #[unstable(feature = "f16", issue = "116909")]
114    pub const FRAC_1_SQRT_5: f16 = 0.44721359549995793928183473374625524_f16;
115
116    /// Euler's number (e)
117    #[unstable(feature = "f16", issue = "116909")]
118    pub const E: f16 = 2.71828182845904523536028747135266250_f16;
119
120    /// log<sub>2</sub>(10)
121    #[unstable(feature = "f16", issue = "116909")]
122    pub const LOG2_10: f16 = 3.32192809488736234787031942948939018_f16;
123
124    /// log<sub>2</sub>(e)
125    #[unstable(feature = "f16", issue = "116909")]
126    pub const LOG2_E: f16 = 1.44269504088896340735992468100189214_f16;
127
128    /// log<sub>10</sub>(2)
129    #[unstable(feature = "f16", issue = "116909")]
130    pub const LOG10_2: f16 = 0.301029995663981195213738894724493027_f16;
131
132    /// log<sub>10</sub>(e)
133    #[unstable(feature = "f16", issue = "116909")]
134    pub const LOG10_E: f16 = 0.434294481903251827651128918916605082_f16;
135
136    /// ln(2)
137    #[unstable(feature = "f16", issue = "116909")]
138    pub const LN_2: f16 = 0.693147180559945309417232121458176568_f16;
139
140    /// ln(10)
141    #[unstable(feature = "f16", issue = "116909")]
142    pub const LN_10: f16 = 2.30258509299404568401799145468436421_f16;
143}
144
145#[doc(test(attr(
146    feature(cfg_target_has_reliable_f16_f128),
147    allow(internal_features, unused_features)
148)))]
149impl f16 {
150    /// The radix or base of the internal representation of `f16`.
151    #[unstable(feature = "f16", issue = "116909")]
152    pub const RADIX: u32 = 2;
153
154    /// The size of this float type in bits.
155    // #[unstable(feature = "f16", issue = "116909")]
156    #[unstable(feature = "float_bits_const", issue = "151073")]
157    pub const BITS: u32 = 16;
158
159    /// Number of significant digits in base 2.
160    ///
161    /// Note that the size of the mantissa in the bitwise representation is one
162    /// smaller than this since the leading 1 is not stored explicitly.
163    #[unstable(feature = "f16", issue = "116909")]
164    pub const MANTISSA_DIGITS: u32 = 11;
165
166    /// Approximate number of significant digits in base 10.
167    ///
168    /// This is the maximum <i>x</i> such that any decimal number with <i>x</i>
169    /// significant digits can be converted to `f16` and back without loss.
170    ///
171    /// Equal to floor(log<sub>10</sub>&nbsp;2<sup>[`MANTISSA_DIGITS`]&nbsp;&minus;&nbsp;1</sup>).
172    ///
173    /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
174    #[unstable(feature = "f16", issue = "116909")]
175    pub const DIGITS: u32 = 3;
176
177    /// [Machine epsilon] value for `f16`.
178    ///
179    /// This is the difference between `1.0` and the next larger representable number.
180    ///
181    /// Equal to 2<sup>1&nbsp;&minus;&nbsp;[`MANTISSA_DIGITS`]</sup>.
182    ///
183    /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
184    /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
185    #[unstable(feature = "f16", issue = "116909")]
186    #[rustc_diagnostic_item = "f16_epsilon"]
187    pub const EPSILON: f16 = 9.7656e-4_f16;
188
189    /// Smallest finite `f16` value.
190    ///
191    /// Equal to &minus;[`MAX`].
192    ///
193    /// [`MAX`]: f16::MAX
194    #[unstable(feature = "f16", issue = "116909")]
195    pub const MIN: f16 = -6.5504e+4_f16;
196    /// Smallest positive normal `f16` value.
197    ///
198    /// Equal to 2<sup>[`MIN_EXP`]&nbsp;&minus;&nbsp;1</sup>.
199    ///
200    /// [`MIN_EXP`]: f16::MIN_EXP
201    #[unstable(feature = "f16", issue = "116909")]
202    pub const MIN_POSITIVE: f16 = 6.1035e-5_f16;
203    /// Largest finite `f16` value.
204    ///
205    /// Equal to
206    /// (1&nbsp;&minus;&nbsp;2<sup>&minus;[`MANTISSA_DIGITS`]</sup>)&nbsp;2<sup>[`MAX_EXP`]</sup>.
207    ///
208    /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
209    /// [`MAX_EXP`]: f16::MAX_EXP
210    #[unstable(feature = "f16", issue = "116909")]
211    pub const MAX: f16 = 6.5504e+4_f16;
212
213    /// One greater than the minimum possible *normal* power of 2 exponent
214    /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
215    ///
216    /// This corresponds to the exact minimum possible *normal* power of 2 exponent
217    /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
218    /// In other words, all normal numbers representable by this type are
219    /// greater than or equal to 0.5&nbsp;×&nbsp;2<sup><i>MIN_EXP</i></sup>.
220    #[unstable(feature = "f16", issue = "116909")]
221    pub const MIN_EXP: i32 = -13;
222    /// One greater than the maximum possible power of 2 exponent
223    /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
224    ///
225    /// This corresponds to the exact maximum possible power of 2 exponent
226    /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
227    /// In other words, all numbers representable by this type are
228    /// strictly less than 2<sup><i>MAX_EXP</i></sup>.
229    #[unstable(feature = "f16", issue = "116909")]
230    pub const MAX_EXP: i32 = 16;
231
232    /// Minimum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
233    ///
234    /// Equal to ceil(log<sub>10</sub>&nbsp;[`MIN_POSITIVE`]).
235    ///
236    /// [`MIN_POSITIVE`]: f16::MIN_POSITIVE
237    #[unstable(feature = "f16", issue = "116909")]
238    pub const MIN_10_EXP: i32 = -4;
239    /// Maximum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
240    ///
241    /// Equal to floor(log<sub>10</sub>&nbsp;[`MAX`]).
242    ///
243    /// [`MAX`]: f16::MAX
244    #[unstable(feature = "f16", issue = "116909")]
245    pub const MAX_10_EXP: i32 = 4;
246
247    /// Not a Number (NaN).
248    ///
249    /// Note that IEEE 754 doesn't define just a single NaN value; a plethora of bit patterns are
250    /// considered to be NaN. Furthermore, the standard makes a difference between a "signaling" and
251    /// a "quiet" NaN, and allows inspecting its "payload" (the unspecified bits in the bit pattern)
252    /// and its sign. See the [specification of NaN bit patterns](f32#nan-bit-patterns) for more
253    /// info.
254    ///
255    /// This constant is guaranteed to be a quiet NaN (on targets that follow the Rust assumptions
256    /// that the quiet/signaling bit being set to 1 indicates a quiet NaN). Beyond that, nothing is
257    /// guaranteed about the specific bit pattern chosen here: both payload and sign are arbitrary.
258    /// The concrete bit pattern may change across Rust versions and target platforms.
259    #[allow(clippy::eq_op)]
260    #[rustc_diagnostic_item = "f16_nan"]
261    #[unstable(feature = "f16", issue = "116909")]
262    pub const NAN: f16 = 0.0_f16 / 0.0_f16;
263
264    /// Infinity (∞).
265    #[unstable(feature = "f16", issue = "116909")]
266    pub const INFINITY: f16 = 1.0_f16 / 0.0_f16;
267
268    /// Negative infinity (−∞).
269    #[unstable(feature = "f16", issue = "116909")]
270    pub const NEG_INFINITY: f16 = -1.0_f16 / 0.0_f16;
271
272    /// Maximum integer that can be represented exactly in an [`f16`] value,
273    /// with no other integer converting to the same floating point value.
274    ///
275    /// For an integer `x` which satisfies `MIN_EXACT_INTEGER <= x <= MAX_EXACT_INTEGER`,
276    /// there is a "one-to-one" mapping between [`i16`] and [`f16`] values.
277    /// `MAX_EXACT_INTEGER + 1` also converts losslessly to [`f16`] and back to
278    /// [`i16`], but `MAX_EXACT_INTEGER + 2` converts to the same [`f16`] value
279    /// (and back to `MAX_EXACT_INTEGER + 1` as an integer) so there is not a
280    /// "one-to-one" mapping.
281    ///
282    /// [`MAX_EXACT_INTEGER`]: f16::MAX_EXACT_INTEGER
283    /// [`MIN_EXACT_INTEGER`]: f16::MIN_EXACT_INTEGER
284    /// ```
285    /// #![feature(f16)]
286    /// #![feature(float_exact_integer_constants)]
287    /// # // FIXME(#152635): Float rounding on `i586` does not adhere to IEEE 754
288    /// # #[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))] {
289    /// # #[cfg(target_has_reliable_f16)] {
290    /// let max_exact_int = f16::MAX_EXACT_INTEGER;
291    /// assert_eq!(max_exact_int, max_exact_int as f16 as i16);
292    /// assert_eq!(max_exact_int + 1, (max_exact_int + 1) as f16 as i16);
293    /// assert_ne!(max_exact_int + 2, (max_exact_int + 2) as f16 as i16);
294    ///
295    /// // Beyond `f16::MAX_EXACT_INTEGER`, multiple integers can map to one float value
296    /// assert_eq!((max_exact_int + 1) as f16, (max_exact_int + 2) as f16);
297    /// # }}
298    /// ```
299    // #[unstable(feature = "f16", issue = "116909")]
300    #[unstable(feature = "float_exact_integer_constants", issue = "152466")]
301    pub const MAX_EXACT_INTEGER: i16 = (1 << Self::MANTISSA_DIGITS) - 1;
302
303    /// Minimum integer that can be represented exactly in an [`f16`] value,
304    /// with no other integer converting to the same floating point value.
305    ///
306    /// For an integer `x` which satisfies `MIN_EXACT_INTEGER <= x <= MAX_EXACT_INTEGER`,
307    /// there is a "one-to-one" mapping between [`i16`] and [`f16`] values.
308    /// `MAX_EXACT_INTEGER + 1` also converts losslessly to [`f16`] and back to
309    /// [`i16`], but `MAX_EXACT_INTEGER + 2` converts to the same [`f16`] value
310    /// (and back to `MAX_EXACT_INTEGER + 1` as an integer) so there is not a
311    /// "one-to-one" mapping.
312    ///
313    /// This constant is equivalent to `-MAX_EXACT_INTEGER`.
314    ///
315    /// [`MAX_EXACT_INTEGER`]: f16::MAX_EXACT_INTEGER
316    /// [`MIN_EXACT_INTEGER`]: f16::MIN_EXACT_INTEGER
317    /// ```
318    /// #![feature(f16)]
319    /// #![feature(float_exact_integer_constants)]
320    /// # // FIXME(#152635): Float rounding on `i586` does not adhere to IEEE 754
321    /// # #[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))] {
322    /// # #[cfg(target_has_reliable_f16)] {
323    /// let min_exact_int = f16::MIN_EXACT_INTEGER;
324    /// assert_eq!(min_exact_int, min_exact_int as f16 as i16);
325    /// assert_eq!(min_exact_int - 1, (min_exact_int - 1) as f16 as i16);
326    /// assert_ne!(min_exact_int - 2, (min_exact_int - 2) as f16 as i16);
327    ///
328    /// // Below `f16::MIN_EXACT_INTEGER`, multiple integers can map to one float value
329    /// assert_eq!((min_exact_int - 1) as f16, (min_exact_int - 2) as f16);
330    /// # }}
331    /// ```
332    // #[unstable(feature = "f16", issue = "116909")]
333    #[unstable(feature = "float_exact_integer_constants", issue = "152466")]
334    pub const MIN_EXACT_INTEGER: i16 = -Self::MAX_EXACT_INTEGER;
335
336    /// Sign bit
337    pub(crate) const SIGN_MASK: u16 = 0x8000;
338
339    /// Exponent mask
340    pub(crate) const EXP_MASK: u16 = 0x7c00;
341
342    /// Mantissa mask
343    pub(crate) const MAN_MASK: u16 = 0x03ff;
344
345    /// Minimum representable positive value (min subnormal)
346    const TINY_BITS: u16 = 0x1;
347
348    /// Minimum representable negative value (min negative subnormal)
349    const NEG_TINY_BITS: u16 = Self::TINY_BITS | Self::SIGN_MASK;
350
351    /// Returns `true` if this value is NaN.
352    ///
353    /// ```
354    /// #![feature(f16)]
355    /// # #[cfg(target_has_reliable_f16)] {
356    ///
357    /// let nan = f16::NAN;
358    /// let f = 7.0_f16;
359    ///
360    /// assert!(nan.is_nan());
361    /// assert!(!f.is_nan());
362    /// # }
363    /// ```
364    #[inline]
365    #[must_use]
366    #[unstable(feature = "f16", issue = "116909")]
367    #[allow(clippy::eq_op)] // > if you intended to check if the operand is NaN, use `.is_nan()` instead :)
368    pub const fn is_nan(self) -> bool {
369        self != self
370    }
371
372    /// Returns `true` if this value is positive infinity or negative infinity, and
373    /// `false` otherwise.
374    ///
375    /// ```
376    /// #![feature(f16)]
377    /// # #[cfg(target_has_reliable_f16)] {
378    ///
379    /// let f = 7.0f16;
380    /// let inf = f16::INFINITY;
381    /// let neg_inf = f16::NEG_INFINITY;
382    /// let nan = f16::NAN;
383    ///
384    /// assert!(!f.is_infinite());
385    /// assert!(!nan.is_infinite());
386    ///
387    /// assert!(inf.is_infinite());
388    /// assert!(neg_inf.is_infinite());
389    /// # }
390    /// ```
391    #[inline]
392    #[must_use]
393    #[unstable(feature = "f16", issue = "116909")]
394    pub const fn is_infinite(self) -> bool {
395        (self == f16::INFINITY) | (self == f16::NEG_INFINITY)
396    }
397
398    /// Returns `true` if this number is neither infinite nor NaN.
399    ///
400    /// ```
401    /// #![feature(f16)]
402    /// # #[cfg(target_has_reliable_f16)] {
403    ///
404    /// let f = 7.0f16;
405    /// let inf: f16 = f16::INFINITY;
406    /// let neg_inf: f16 = f16::NEG_INFINITY;
407    /// let nan: f16 = f16::NAN;
408    ///
409    /// assert!(f.is_finite());
410    ///
411    /// assert!(!nan.is_finite());
412    /// assert!(!inf.is_finite());
413    /// assert!(!neg_inf.is_finite());
414    /// # }
415    /// ```
416    #[inline]
417    #[must_use]
418    #[unstable(feature = "f16", issue = "116909")]
419    #[rustc_const_unstable(feature = "f16", issue = "116909")]
420    pub const fn is_finite(self) -> bool {
421        // There's no need to handle NaN separately: if self is NaN,
422        // the comparison is not true, exactly as desired.
423        self.abs() < Self::INFINITY
424    }
425
426    /// Returns `true` if the number is [subnormal].
427    ///
428    /// ```
429    /// #![feature(f16)]
430    /// # #[cfg(target_has_reliable_f16)] {
431    ///
432    /// let min = f16::MIN_POSITIVE; // 6.1035e-5
433    /// let max = f16::MAX;
434    /// let lower_than_min = 1.0e-7_f16;
435    /// let zero = 0.0_f16;
436    ///
437    /// assert!(!min.is_subnormal());
438    /// assert!(!max.is_subnormal());
439    ///
440    /// assert!(!zero.is_subnormal());
441    /// assert!(!f16::NAN.is_subnormal());
442    /// assert!(!f16::INFINITY.is_subnormal());
443    /// // Values between `0` and `min` are Subnormal.
444    /// assert!(lower_than_min.is_subnormal());
445    /// # }
446    /// ```
447    /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
448    #[inline]
449    #[must_use]
450    #[unstable(feature = "f16", issue = "116909")]
451    pub const fn is_subnormal(self) -> bool {
452        matches!(self.classify(), FpCategory::Subnormal)
453    }
454
455    /// Returns `true` if the number is neither zero, infinite, [subnormal], or NaN.
456    ///
457    /// ```
458    /// #![feature(f16)]
459    /// # #[cfg(target_has_reliable_f16)] {
460    ///
461    /// let min = f16::MIN_POSITIVE; // 6.1035e-5
462    /// let max = f16::MAX;
463    /// let lower_than_min = 1.0e-7_f16;
464    /// let zero = 0.0_f16;
465    ///
466    /// assert!(min.is_normal());
467    /// assert!(max.is_normal());
468    ///
469    /// assert!(!zero.is_normal());
470    /// assert!(!f16::NAN.is_normal());
471    /// assert!(!f16::INFINITY.is_normal());
472    /// // Values between `0` and `min` are Subnormal.
473    /// assert!(!lower_than_min.is_normal());
474    /// # }
475    /// ```
476    /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
477    #[inline]
478    #[must_use]
479    #[unstable(feature = "f16", issue = "116909")]
480    pub const fn is_normal(self) -> bool {
481        matches!(self.classify(), FpCategory::Normal)
482    }
483
484    /// Returns the floating point category of the number. If only one property
485    /// is going to be tested, it is generally faster to use the specific
486    /// predicate instead.
487    ///
488    /// ```
489    /// #![feature(f16)]
490    /// # #[cfg(target_has_reliable_f16)] {
491    ///
492    /// use std::num::FpCategory;
493    ///
494    /// let num = 12.4_f16;
495    /// let inf = f16::INFINITY;
496    ///
497    /// assert_eq!(num.classify(), FpCategory::Normal);
498    /// assert_eq!(inf.classify(), FpCategory::Infinite);
499    /// # }
500    /// ```
501    #[inline]
502    #[unstable(feature = "f16", issue = "116909")]
503    pub const fn classify(self) -> FpCategory {
504        let b = self.to_bits();
505        match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
506            (0, Self::EXP_MASK) => FpCategory::Infinite,
507            (_, Self::EXP_MASK) => FpCategory::Nan,
508            (0, 0) => FpCategory::Zero,
509            (_, 0) => FpCategory::Subnormal,
510            _ => FpCategory::Normal,
511        }
512    }
513
514    /// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
515    /// positive sign bit and positive infinity.
516    ///
517    /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
518    /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
519    /// conserved over arithmetic operations, the result of `is_sign_positive` on
520    /// a NaN might produce an unexpected or non-portable result. See the [specification
521    /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == 1.0`
522    /// if you need fully portable behavior (will return `false` for all NaNs).
523    ///
524    /// ```
525    /// #![feature(f16)]
526    /// # #[cfg(target_has_reliable_f16)] {
527    ///
528    /// let f = 7.0_f16;
529    /// let g = -7.0_f16;
530    ///
531    /// assert!(f.is_sign_positive());
532    /// assert!(!g.is_sign_positive());
533    /// # }
534    /// ```
535    #[inline]
536    #[must_use]
537    #[unstable(feature = "f16", issue = "116909")]
538    pub const fn is_sign_positive(self) -> bool {
539        !self.is_sign_negative()
540    }
541
542    /// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with
543    /// negative sign bit and negative infinity.
544    ///
545    /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
546    /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
547    /// conserved over arithmetic operations, the result of `is_sign_negative` on
548    /// a NaN might produce an unexpected or non-portable result. See the [specification
549    /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == -1.0`
550    /// if you need fully portable behavior (will return `false` for all NaNs).
551    ///
552    /// ```
553    /// #![feature(f16)]
554    /// # #[cfg(target_has_reliable_f16)] {
555    ///
556    /// let f = 7.0_f16;
557    /// let g = -7.0_f16;
558    ///
559    /// assert!(!f.is_sign_negative());
560    /// assert!(g.is_sign_negative());
561    /// # }
562    /// ```
563    #[inline]
564    #[must_use]
565    #[unstable(feature = "f16", issue = "116909")]
566    pub const fn is_sign_negative(self) -> bool {
567        // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
568        // applies to zeros and NaNs as well.
569        // SAFETY: This is just transmuting to get the sign bit, it's fine.
570        (self.to_bits() & (1 << 15)) != 0
571    }
572
573    /// Returns the least number greater than `self`.
574    ///
575    /// Let `TINY` be the smallest representable positive `f16`. Then,
576    ///  - if `self.is_nan()`, this returns `self`;
577    ///  - if `self` is [`NEG_INFINITY`], this returns [`MIN`];
578    ///  - if `self` is `-TINY`, this returns -0.0;
579    ///  - if `self` is -0.0 or +0.0, this returns `TINY`;
580    ///  - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`];
581    ///  - otherwise the unique least value greater than `self` is returned.
582    ///
583    /// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x`
584    /// is finite `x == x.next_up().next_down()` also holds.
585    ///
586    /// ```rust
587    /// #![feature(f16)]
588    /// # #[cfg(target_has_reliable_f16)] {
589    ///
590    /// // f16::EPSILON is the difference between 1.0 and the next number up.
591    /// assert_eq!(1.0f16.next_up(), 1.0 + f16::EPSILON);
592    /// // But not for most numbers.
593    /// assert!(0.1f16.next_up() < 0.1 + f16::EPSILON);
594    /// assert_eq!(4356f16.next_up(), 4360.0);
595    /// # }
596    /// ```
597    ///
598    /// This operation corresponds to IEEE-754 `nextUp`.
599    ///
600    /// [`NEG_INFINITY`]: Self::NEG_INFINITY
601    /// [`INFINITY`]: Self::INFINITY
602    /// [`MIN`]: Self::MIN
603    /// [`MAX`]: Self::MAX
604    #[inline]
605    #[doc(alias = "nextUp")]
606    #[unstable(feature = "f16", issue = "116909")]
607    pub const fn next_up(self) -> Self {
608        // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
609        // denormals to zero. This is in general unsound and unsupported, but here
610        // we do our best to still produce the correct result on such targets.
611        let bits = self.to_bits();
612        if self.is_nan() || bits == Self::INFINITY.to_bits() {
613            return self;
614        }
615
616        let abs = bits & !Self::SIGN_MASK;
617        let next_bits = if abs == 0 {
618            Self::TINY_BITS
619        } else if bits == abs {
620            bits + 1
621        } else {
622            bits - 1
623        };
624        Self::from_bits(next_bits)
625    }
626
627    /// Returns the greatest number less than `self`.
628    ///
629    /// Let `TINY` be the smallest representable positive `f16`. Then,
630    ///  - if `self.is_nan()`, this returns `self`;
631    ///  - if `self` is [`INFINITY`], this returns [`MAX`];
632    ///  - if `self` is `TINY`, this returns 0.0;
633    ///  - if `self` is -0.0 or +0.0, this returns `-TINY`;
634    ///  - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`];
635    ///  - otherwise the unique greatest value less than `self` is returned.
636    ///
637    /// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x`
638    /// is finite `x == x.next_down().next_up()` also holds.
639    ///
640    /// ```rust
641    /// #![feature(f16)]
642    /// # #[cfg(target_has_reliable_f16)] {
643    ///
644    /// let x = 1.0f16;
645    /// // Clamp value into range [0, 1).
646    /// let clamped = x.clamp(0.0, 1.0f16.next_down());
647    /// assert!(clamped < 1.0);
648    /// assert_eq!(clamped.next_up(), 1.0);
649    /// # }
650    /// ```
651    ///
652    /// This operation corresponds to IEEE-754 `nextDown`.
653    ///
654    /// [`NEG_INFINITY`]: Self::NEG_INFINITY
655    /// [`INFINITY`]: Self::INFINITY
656    /// [`MIN`]: Self::MIN
657    /// [`MAX`]: Self::MAX
658    #[inline]
659    #[doc(alias = "nextDown")]
660    #[unstable(feature = "f16", issue = "116909")]
661    pub const fn next_down(self) -> Self {
662        // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
663        // denormals to zero. This is in general unsound and unsupported, but here
664        // we do our best to still produce the correct result on such targets.
665        let bits = self.to_bits();
666        if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
667            return self;
668        }
669
670        let abs = bits & !Self::SIGN_MASK;
671        let next_bits = if abs == 0 {
672            Self::NEG_TINY_BITS
673        } else if bits == abs {
674            bits - 1
675        } else {
676            bits + 1
677        };
678        Self::from_bits(next_bits)
679    }
680
681    /// Takes the reciprocal (inverse) of a number, `1/x`.
682    ///
683    /// ```
684    /// #![feature(f16)]
685    /// # #[cfg(target_has_reliable_f16)] {
686    ///
687    /// let x = 2.0_f16;
688    /// let abs_difference = (x.recip() - (1.0 / x)).abs();
689    ///
690    /// assert!(abs_difference <= f16::EPSILON);
691    /// # }
692    /// ```
693    #[inline]
694    #[unstable(feature = "f16", issue = "116909")]
695    #[must_use = "this returns the result of the operation, without modifying the original"]
696    pub const fn recip(self) -> Self {
697        1.0 / self
698    }
699
700    /// Converts radians to degrees.
701    ///
702    /// # Unspecified precision
703    ///
704    /// The precision of this function is non-deterministic. This means it varies by platform,
705    /// Rust version, and can even differ within the same execution from one invocation to the next.
706    ///
707    /// # Examples
708    ///
709    /// ```
710    /// #![feature(f16)]
711    /// # #[cfg(target_has_reliable_f16)] {
712    ///
713    /// let angle = std::f16::consts::PI;
714    ///
715    /// let abs_difference = (angle.to_degrees() - 180.0).abs();
716    /// assert!(abs_difference <= 0.5);
717    /// # }
718    /// ```
719    #[inline]
720    #[unstable(feature = "f16", issue = "116909")]
721    #[must_use = "this returns the result of the operation, without modifying the original"]
722    pub const fn to_degrees(self) -> Self {
723        // Use a literal to avoid double rounding, consts::PI is already rounded,
724        // and dividing would round again.
725        const PIS_IN_180: f16 = 57.2957795130823208767981548141051703_f16;
726        self * PIS_IN_180
727    }
728
729    /// Converts degrees to radians.
730    ///
731    /// # Unspecified precision
732    ///
733    /// The precision of this function is non-deterministic. This means it varies by platform,
734    /// Rust version, and can even differ within the same execution from one invocation to the next.
735    ///
736    /// # Examples
737    ///
738    /// ```
739    /// #![feature(f16)]
740    /// # #[cfg(target_has_reliable_f16)] {
741    ///
742    /// let angle = 180.0f16;
743    ///
744    /// let abs_difference = (angle.to_radians() - std::f16::consts::PI).abs();
745    ///
746    /// assert!(abs_difference <= 0.01);
747    /// # }
748    /// ```
749    #[inline]
750    #[unstable(feature = "f16", issue = "116909")]
751    #[must_use = "this returns the result of the operation, without modifying the original"]
752    pub const fn to_radians(self) -> f16 {
753        // Use a literal to avoid double rounding, consts::PI is already rounded,
754        // and dividing would round again.
755        const RADS_PER_DEG: f16 = 0.017453292519943295769236907684886_f16;
756        self * RADS_PER_DEG
757    }
758
759    /// Returns the maximum of the two numbers, ignoring NaN.
760    ///
761    /// If exactly one of the arguments is NaN (quiet or signaling), then the other argument is
762    /// returned. If both arguments are NaN, the return value is NaN, with the bit pattern picked
763    /// using the usual [rules for arithmetic operations](f32#nan-bit-patterns). If the inputs
764    /// compare equal (such as for the case of `+0.0` and `-0.0`), either input may be returned
765    /// non-deterministically.
766    ///
767    /// The handling of NaNs follows the IEEE 754-2019 semantics for `maximumNumber`, treating all
768    /// NaNs the same way to ensure the operation is associative. The handling of signed zeros
769    /// follows the IEEE 754-2008 semantics for `maxNum`.
770    ///
771    /// ```
772    /// #![feature(f16)]
773    /// # #[cfg(target_has_reliable_f16)] {
774    ///
775    /// let x = 1.0f16;
776    /// let y = 2.0f16;
777    ///
778    /// assert_eq!(x.max(y), y);
779    /// assert_eq!(x.max(f16::NAN), x);
780    /// # }
781    /// ```
782    #[inline]
783    #[unstable(feature = "f16", issue = "116909")]
784    #[rustc_const_unstable(feature = "f16", issue = "116909")]
785    #[must_use = "this returns the result of the comparison, without modifying either input"]
786    pub const fn max(self, other: f16) -> f16 {
787        intrinsics::maxnumf16(self, other)
788    }
789
790    /// Returns the minimum of the two numbers, ignoring NaN.
791    ///
792    /// If exactly one of the arguments is NaN (quiet or signaling), then the other argument is
793    /// returned. If both arguments are NaN, the return value is NaN, with the bit pattern picked
794    /// using the usual [rules for arithmetic operations](f32#nan-bit-patterns). If the inputs
795    /// compare equal (such as for the case of `+0.0` and `-0.0`), either input may be returned
796    /// non-deterministically.
797    ///
798    /// The handling of NaNs follows the IEEE 754-2019 semantics for `minimumNumber`, treating all
799    /// NaNs the same way to ensure the operation is associative. The handling of signed zeros
800    /// follows the IEEE 754-2008 semantics for `minNum`.
801    ///
802    /// ```
803    /// #![feature(f16)]
804    /// # #[cfg(target_has_reliable_f16)] {
805    ///
806    /// let x = 1.0f16;
807    /// let y = 2.0f16;
808    ///
809    /// assert_eq!(x.min(y), x);
810    /// assert_eq!(x.min(f16::NAN), x);
811    /// # }
812    /// ```
813    #[inline]
814    #[unstable(feature = "f16", issue = "116909")]
815    #[rustc_const_unstable(feature = "f16", issue = "116909")]
816    #[must_use = "this returns the result of the comparison, without modifying either input"]
817    pub const fn min(self, other: f16) -> f16 {
818        intrinsics::minnumf16(self, other)
819    }
820
821    /// Returns the maximum of the two numbers, propagating NaN.
822    ///
823    /// If at least one of the arguments is NaN, the return value is NaN, with the bit pattern
824    /// picked using the usual [rules for arithmetic operations](f32#nan-bit-patterns). Furthermore,
825    /// `-0.0` is considered to be less than `+0.0`, making this function fully deterministic for
826    /// non-NaN inputs.
827    ///
828    /// This is in contrast to [`f16::max`] which only returns NaN when *both* arguments are NaN,
829    /// and which does not reliably order `-0.0` and `+0.0`.
830    ///
831    /// This follows the IEEE 754-2019 semantics for `maximum`.
832    ///
833    /// ```
834    /// #![feature(f16)]
835    /// #![feature(float_minimum_maximum)]
836    /// # #[cfg(target_has_reliable_f16)] {
837    ///
838    /// let x = 1.0f16;
839    /// let y = 2.0f16;
840    ///
841    /// assert_eq!(x.maximum(y), y);
842    /// assert!(x.maximum(f16::NAN).is_nan());
843    /// # }
844    /// ```
845    #[inline]
846    #[unstable(feature = "f16", issue = "116909")]
847    // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
848    #[must_use = "this returns the result of the comparison, without modifying either input"]
849    pub const fn maximum(self, other: f16) -> f16 {
850        intrinsics::maximumf16(self, other)
851    }
852
853    /// Returns the minimum of the two numbers, propagating NaN.
854    ///
855    /// If at least one of the arguments is NaN, the return value is NaN, with the bit pattern
856    /// picked using the usual [rules for arithmetic operations](f32#nan-bit-patterns). Furthermore,
857    /// `-0.0` is considered to be less than `+0.0`, making this function fully deterministic for
858    /// non-NaN inputs.
859    ///
860    /// This is in contrast to [`f16::min`] which only returns NaN when *both* arguments are NaN,
861    /// and which does not reliably order `-0.0` and `+0.0`.
862    ///
863    /// This follows the IEEE 754-2019 semantics for `minimum`.
864    ///
865    /// ```
866    /// #![feature(f16)]
867    /// #![feature(float_minimum_maximum)]
868    /// # #[cfg(target_has_reliable_f16)] {
869    ///
870    /// let x = 1.0f16;
871    /// let y = 2.0f16;
872    ///
873    /// assert_eq!(x.minimum(y), x);
874    /// assert!(x.minimum(f16::NAN).is_nan());
875    /// # }
876    /// ```
877    #[inline]
878    #[unstable(feature = "f16", issue = "116909")]
879    // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
880    #[must_use = "this returns the result of the comparison, without modifying either input"]
881    pub const fn minimum(self, other: f16) -> f16 {
882        intrinsics::minimumf16(self, other)
883    }
884
885    /// Calculates the midpoint (average) between `self` and `rhs`.
886    ///
887    /// This returns NaN when *either* argument is NaN or if a combination of
888    /// +inf and -inf is provided as arguments.
889    ///
890    /// # Examples
891    ///
892    /// ```
893    /// #![feature(f16)]
894    /// # #[cfg(target_has_reliable_f16)] {
895    ///
896    /// assert_eq!(1f16.midpoint(4.0), 2.5);
897    /// assert_eq!((-5.5f16).midpoint(8.0), 1.25);
898    /// # }
899    /// ```
900    #[inline]
901    #[doc(alias = "average")]
902    #[unstable(feature = "f16", issue = "116909")]
903    #[rustc_const_unstable(feature = "f16", issue = "116909")]
904    pub const fn midpoint(self, other: f16) -> f16 {
905        const HI: f16 = f16::MAX / 2.;
906
907        let (a, b) = (self, other);
908        let abs_a = a.abs();
909        let abs_b = b.abs();
910
911        if abs_a <= HI && abs_b <= HI {
912            // Overflow is impossible
913            (a + b) / 2.
914        } else {
915            (a / 2.) + (b / 2.)
916        }
917    }
918
919    /// Rounds toward zero and converts to any primitive integer type,
920    /// assuming that the value is finite and fits in that type.
921    ///
922    /// ```
923    /// #![feature(f16)]
924    /// # #[cfg(target_has_reliable_f16)] {
925    ///
926    /// let value = 4.6_f16;
927    /// let rounded = unsafe { value.to_int_unchecked::<u16>() };
928    /// assert_eq!(rounded, 4);
929    ///
930    /// let value = -128.9_f16;
931    /// let rounded = unsafe { value.to_int_unchecked::<i8>() };
932    /// assert_eq!(rounded, i8::MIN);
933    /// # }
934    /// ```
935    ///
936    /// # Safety
937    ///
938    /// The value must:
939    ///
940    /// * Not be `NaN`
941    /// * Not be infinite
942    /// * Be representable in the return type `Int`, after truncating off its fractional part
943    #[inline]
944    #[unstable(feature = "f16", issue = "116909")]
945    #[must_use = "this returns the result of the operation, without modifying the original"]
946    pub unsafe fn to_int_unchecked<Int>(self) -> Int
947    where
948        Self: FloatToInt<Int>,
949    {
950        // SAFETY: the caller must uphold the safety contract for
951        // `FloatToInt::to_int_unchecked`.
952        unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
953    }
954
955    /// Raw transmutation to `u16`.
956    ///
957    /// This is currently identical to `transmute::<f16, u16>(self)` on all platforms.
958    ///
959    /// See [`from_bits`](#method.from_bits) for some discussion of the
960    /// portability of this operation (there are almost no issues).
961    ///
962    /// Note that this function is distinct from `as` casting, which attempts to
963    /// preserve the *numeric* value, and not the bitwise value.
964    ///
965    /// ```
966    /// #![feature(f16)]
967    /// # #[cfg(target_has_reliable_f16)] {
968    ///
969    /// assert_ne!((1f16).to_bits(), 1f16 as u16); // to_bits() is not casting!
970    /// assert_eq!((12.5f16).to_bits(), 0x4a40);
971    /// # }
972    /// ```
973    #[inline]
974    #[unstable(feature = "f16", issue = "116909")]
975    #[must_use = "this returns the result of the operation, without modifying the original"]
976    #[allow(unnecessary_transmutes)]
977    pub const fn to_bits(self) -> u16 {
978        // SAFETY: `u16` is a plain old datatype so we can always transmute to it.
979        unsafe { mem::transmute(self) }
980    }
981
982    /// Raw transmutation from `u16`.
983    ///
984    /// This is currently identical to `transmute::<u16, f16>(v)` on all platforms.
985    /// It turns out this is incredibly portable, for two reasons:
986    ///
987    /// * Floats and Ints have the same endianness on all supported platforms.
988    /// * IEEE 754 very precisely specifies the bit layout of floats.
989    ///
990    /// However there is one caveat: prior to the 2008 version of IEEE 754, how
991    /// to interpret the NaN signaling bit wasn't actually specified. Most platforms
992    /// (notably x86 and ARM) picked the interpretation that was ultimately
993    /// standardized in 2008, but some didn't (notably MIPS). As a result, all
994    /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
995    ///
996    /// Rather than trying to preserve signaling-ness cross-platform, this
997    /// implementation favors preserving the exact bits. This means that
998    /// any payloads encoded in NaNs will be preserved even if the result of
999    /// this method is sent over the network from an x86 machine to a MIPS one.
1000    ///
1001    /// If the results of this method are only manipulated by the same
1002    /// architecture that produced them, then there is no portability concern.
1003    ///
1004    /// If the input isn't NaN, then there is no portability concern.
1005    ///
1006    /// If you don't care about signalingness (very likely), then there is no
1007    /// portability concern.
1008    ///
1009    /// Note that this function is distinct from `as` casting, which attempts to
1010    /// preserve the *numeric* value, and not the bitwise value.
1011    ///
1012    /// ```
1013    /// #![feature(f16)]
1014    /// # #[cfg(target_has_reliable_f16)] {
1015    ///
1016    /// let v = f16::from_bits(0x4a40);
1017    /// assert_eq!(v, 12.5);
1018    /// # }
1019    /// ```
1020    #[inline]
1021    #[must_use]
1022    #[unstable(feature = "f16", issue = "116909")]
1023    #[allow(unnecessary_transmutes)]
1024    pub const fn from_bits(v: u16) -> Self {
1025        // It turns out the safety issues with sNaN were overblown! Hooray!
1026        // SAFETY: `u16` is a plain old datatype so we can always transmute from it.
1027        unsafe { mem::transmute(v) }
1028    }
1029
1030    /// Returns the memory representation of this floating point number as a byte array in
1031    /// big-endian (network) byte order.
1032    ///
1033    /// See [`from_bits`](Self::from_bits) for some discussion of the
1034    /// portability of this operation (there are almost no issues).
1035    ///
1036    /// # Examples
1037    ///
1038    /// ```
1039    /// #![feature(f16)]
1040    /// # #[cfg(target_has_reliable_f16)] {
1041    ///
1042    /// let bytes = 12.5f16.to_be_bytes();
1043    /// assert_eq!(bytes, [0x4a, 0x40]);
1044    /// # }
1045    /// ```
1046    #[inline]
1047    #[unstable(feature = "f16", issue = "116909")]
1048    #[must_use = "this returns the result of the operation, without modifying the original"]
1049    pub const fn to_be_bytes(self) -> [u8; 2] {
1050        self.to_bits().to_be_bytes()
1051    }
1052
1053    /// Returns the memory representation of this floating point number as a byte array in
1054    /// little-endian byte order.
1055    ///
1056    /// See [`from_bits`](Self::from_bits) for some discussion of the
1057    /// portability of this operation (there are almost no issues).
1058    ///
1059    /// # Examples
1060    ///
1061    /// ```
1062    /// #![feature(f16)]
1063    /// # #[cfg(target_has_reliable_f16)] {
1064    ///
1065    /// let bytes = 12.5f16.to_le_bytes();
1066    /// assert_eq!(bytes, [0x40, 0x4a]);
1067    /// # }
1068    /// ```
1069    #[inline]
1070    #[unstable(feature = "f16", issue = "116909")]
1071    #[must_use = "this returns the result of the operation, without modifying the original"]
1072    pub const fn to_le_bytes(self) -> [u8; 2] {
1073        self.to_bits().to_le_bytes()
1074    }
1075
1076    /// Returns the memory representation of this floating point number as a byte array in
1077    /// native byte order.
1078    ///
1079    /// As the target platform's native endianness is used, portable code
1080    /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
1081    ///
1082    /// [`to_be_bytes`]: f16::to_be_bytes
1083    /// [`to_le_bytes`]: f16::to_le_bytes
1084    ///
1085    /// See [`from_bits`](Self::from_bits) for some discussion of the
1086    /// portability of this operation (there are almost no issues).
1087    ///
1088    /// # Examples
1089    ///
1090    /// ```
1091    /// #![feature(f16)]
1092    /// # #[cfg(target_has_reliable_f16)] {
1093    ///
1094    /// let bytes = 12.5f16.to_ne_bytes();
1095    /// assert_eq!(
1096    ///     bytes,
1097    ///     if cfg!(target_endian = "big") {
1098    ///         [0x4a, 0x40]
1099    ///     } else {
1100    ///         [0x40, 0x4a]
1101    ///     }
1102    /// );
1103    /// # }
1104    /// ```
1105    #[inline]
1106    #[unstable(feature = "f16", issue = "116909")]
1107    #[must_use = "this returns the result of the operation, without modifying the original"]
1108    pub const fn to_ne_bytes(self) -> [u8; 2] {
1109        self.to_bits().to_ne_bytes()
1110    }
1111
1112    /// Creates a floating point value from its representation as a byte array in big endian.
1113    ///
1114    /// See [`from_bits`](Self::from_bits) for some discussion of the
1115    /// portability of this operation (there are almost no issues).
1116    ///
1117    /// # Examples
1118    ///
1119    /// ```
1120    /// #![feature(f16)]
1121    /// # #[cfg(target_has_reliable_f16)] {
1122    ///
1123    /// let value = f16::from_be_bytes([0x4a, 0x40]);
1124    /// assert_eq!(value, 12.5);
1125    /// # }
1126    /// ```
1127    #[inline]
1128    #[must_use]
1129    #[unstable(feature = "f16", issue = "116909")]
1130    pub const fn from_be_bytes(bytes: [u8; 2]) -> Self {
1131        Self::from_bits(u16::from_be_bytes(bytes))
1132    }
1133
1134    /// Creates a floating point value from its representation as a byte array in little endian.
1135    ///
1136    /// See [`from_bits`](Self::from_bits) for some discussion of the
1137    /// portability of this operation (there are almost no issues).
1138    ///
1139    /// # Examples
1140    ///
1141    /// ```
1142    /// #![feature(f16)]
1143    /// # #[cfg(target_has_reliable_f16)] {
1144    ///
1145    /// let value = f16::from_le_bytes([0x40, 0x4a]);
1146    /// assert_eq!(value, 12.5);
1147    /// # }
1148    /// ```
1149    #[inline]
1150    #[must_use]
1151    #[unstable(feature = "f16", issue = "116909")]
1152    pub const fn from_le_bytes(bytes: [u8; 2]) -> Self {
1153        Self::from_bits(u16::from_le_bytes(bytes))
1154    }
1155
1156    /// Creates a floating point value from its representation as a byte array in native endian.
1157    ///
1158    /// As the target platform's native endianness is used, portable code
1159    /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
1160    /// appropriate instead.
1161    ///
1162    /// [`from_be_bytes`]: f16::from_be_bytes
1163    /// [`from_le_bytes`]: f16::from_le_bytes
1164    ///
1165    /// See [`from_bits`](Self::from_bits) for some discussion of the
1166    /// portability of this operation (there are almost no issues).
1167    ///
1168    /// # Examples
1169    ///
1170    /// ```
1171    /// #![feature(f16)]
1172    /// # #[cfg(target_has_reliable_f16)] {
1173    ///
1174    /// let value = f16::from_ne_bytes(if cfg!(target_endian = "big") {
1175    ///     [0x4a, 0x40]
1176    /// } else {
1177    ///     [0x40, 0x4a]
1178    /// });
1179    /// assert_eq!(value, 12.5);
1180    /// # }
1181    /// ```
1182    #[inline]
1183    #[must_use]
1184    #[unstable(feature = "f16", issue = "116909")]
1185    pub const fn from_ne_bytes(bytes: [u8; 2]) -> Self {
1186        Self::from_bits(u16::from_ne_bytes(bytes))
1187    }
1188
1189    /// Returns the ordering between `self` and `other`.
1190    ///
1191    /// Unlike the standard partial comparison between floating point numbers,
1192    /// this comparison always produces an ordering in accordance to
1193    /// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision)
1194    /// floating point standard. The values are ordered in the following sequence:
1195    ///
1196    /// - negative quiet NaN
1197    /// - negative signaling NaN
1198    /// - negative infinity
1199    /// - negative numbers
1200    /// - negative subnormal numbers
1201    /// - negative zero
1202    /// - positive zero
1203    /// - positive subnormal numbers
1204    /// - positive numbers
1205    /// - positive infinity
1206    /// - positive signaling NaN
1207    /// - positive quiet NaN.
1208    ///
1209    /// The ordering established by this function does not always agree with the
1210    /// [`PartialOrd`] and [`PartialEq`] implementations of `f16`. For example,
1211    /// they consider negative and positive zero equal, while `total_cmp`
1212    /// doesn't.
1213    ///
1214    /// The interpretation of the signaling NaN bit follows the definition in
1215    /// the IEEE 754 standard, which may not match the interpretation by some of
1216    /// the older, non-conformant (e.g. MIPS) hardware implementations.
1217    ///
1218    /// # Example
1219    ///
1220    /// ```
1221    /// #![feature(f16)]
1222    /// # #[cfg(target_has_reliable_f16)] {
1223    ///
1224    /// struct GoodBoy {
1225    ///     name: &'static str,
1226    ///     weight: f16,
1227    /// }
1228    ///
1229    /// let mut bois = vec![
1230    ///     GoodBoy { name: "Pucci", weight: 0.1 },
1231    ///     GoodBoy { name: "Woofer", weight: 99.0 },
1232    ///     GoodBoy { name: "Yapper", weight: 10.0 },
1233    ///     GoodBoy { name: "Chonk", weight: f16::INFINITY },
1234    ///     GoodBoy { name: "Abs. Unit", weight: f16::NAN },
1235    ///     GoodBoy { name: "Floaty", weight: -5.0 },
1236    /// ];
1237    ///
1238    /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
1239    ///
1240    /// // `f16::NAN` could be positive or negative, which will affect the sort order.
1241    /// if f16::NAN.is_sign_negative() {
1242    ///     bois.into_iter().map(|b| b.weight)
1243    ///         .zip([f16::NAN, -5.0, 0.1, 10.0, 99.0, f16::INFINITY].iter())
1244    ///         .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1245    /// } else {
1246    ///     bois.into_iter().map(|b| b.weight)
1247    ///         .zip([-5.0, 0.1, 10.0, 99.0, f16::INFINITY, f16::NAN].iter())
1248    ///         .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1249    /// }
1250    /// # }
1251    /// ```
1252    #[inline]
1253    #[must_use]
1254    #[unstable(feature = "f16", issue = "116909")]
1255    #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
1256    pub const fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
1257        let mut left = self.to_bits() as i16;
1258        let mut right = other.to_bits() as i16;
1259
1260        // In case of negatives, flip all the bits except the sign
1261        // to achieve a similar layout as two's complement integers
1262        //
1263        // Why does this work? IEEE 754 floats consist of three fields:
1264        // Sign bit, exponent and mantissa. The set of exponent and mantissa
1265        // fields as a whole have the property that their bitwise order is
1266        // equal to the numeric magnitude where the magnitude is defined.
1267        // The magnitude is not normally defined on NaN values, but
1268        // IEEE 754 totalOrder defines the NaN values also to follow the
1269        // bitwise order. This leads to order explained in the doc comment.
1270        // However, the representation of magnitude is the same for negative
1271        // and positive numbers – only the sign bit is different.
1272        // To easily compare the floats as signed integers, we need to
1273        // flip the exponent and mantissa bits in case of negative numbers.
1274        // We effectively convert the numbers to "two's complement" form.
1275        //
1276        // To do the flipping, we construct a mask and XOR against it.
1277        // We branchlessly calculate an "all-ones except for the sign bit"
1278        // mask from negative-signed values: right shifting sign-extends
1279        // the integer, so we "fill" the mask with sign bits, and then
1280        // convert to unsigned to push one more zero bit.
1281        // On positive values, the mask is all zeros, so it's a no-op.
1282        left ^= (((left >> 15) as u16) >> 1) as i16;
1283        right ^= (((right >> 15) as u16) >> 1) as i16;
1284
1285        left.cmp(&right)
1286    }
1287
1288    /// Restrict a value to a certain interval unless it is NaN.
1289    ///
1290    /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
1291    /// less than `min`. Otherwise this returns `self`.
1292    ///
1293    /// Note that this function returns NaN if the initial value was NaN as
1294    /// well. If the result is zero and among the three inputs `self`, `min`, and `max` there are
1295    /// zeros with different sign, either `0.0` or `-0.0` is returned non-deterministically.
1296    ///
1297    /// # Panics
1298    ///
1299    /// Panics if `min > max`, `min` is NaN, or `max` is NaN.
1300    ///
1301    /// # Examples
1302    ///
1303    /// ```
1304    /// #![feature(f16)]
1305    /// # #[cfg(target_has_reliable_f16)] {
1306    ///
1307    /// assert!((-3.0f16).clamp(-2.0, 1.0) == -2.0);
1308    /// assert!((0.0f16).clamp(-2.0, 1.0) == 0.0);
1309    /// assert!((2.0f16).clamp(-2.0, 1.0) == 1.0);
1310    /// assert!((f16::NAN).clamp(-2.0, 1.0).is_nan());
1311    ///
1312    /// // These always returns zero, but the sign (which is ignored by `==`) is non-deterministic.
1313    /// assert!((0.0f16).clamp(-0.0, -0.0) == 0.0);
1314    /// assert!((1.0f16).clamp(-0.0, 0.0) == 0.0);
1315    /// // This is definitely a negative zero.
1316    /// assert!((-1.0f16).clamp(-0.0, 1.0).is_sign_negative());
1317    /// # }
1318    /// ```
1319    #[inline]
1320    #[unstable(feature = "f16", issue = "116909")]
1321    #[must_use = "method returns a new number and does not mutate the original value"]
1322    pub const fn clamp(mut self, min: f16, max: f16) -> f16 {
1323        const_assert!(
1324            min <= max,
1325            "min > max, or either was NaN",
1326            "min > max, or either was NaN. min = {min:?}, max = {max:?}",
1327            min: f16,
1328            max: f16,
1329        );
1330
1331        if self < min {
1332            self = min;
1333        }
1334        if self > max {
1335            self = max;
1336        }
1337        self
1338    }
1339
1340    /// Clamps this number to a symmetric range centered around zero.
1341    ///
1342    /// The method clamps the number's magnitude (absolute value) to be at most `limit`.
1343    ///
1344    /// This is functionally equivalent to `self.clamp(-limit, limit)`, but is more
1345    /// explicit about the intent.
1346    ///
1347    /// # Panics
1348    ///
1349    /// Panics if `limit` is negative or NaN, as this indicates a logic error.
1350    ///
1351    /// # Examples
1352    ///
1353    /// ```
1354    /// #![feature(f16)]
1355    /// #![feature(clamp_magnitude)]
1356    /// # #[cfg(target_has_reliable_f16)] {
1357    /// assert_eq!(5.0f16.clamp_magnitude(3.0), 3.0);
1358    /// assert_eq!((-5.0f16).clamp_magnitude(3.0), -3.0);
1359    /// assert_eq!(2.0f16.clamp_magnitude(3.0), 2.0);
1360    /// assert_eq!((-2.0f16).clamp_magnitude(3.0), -2.0);
1361    /// # }
1362    /// ```
1363    #[inline]
1364    #[unstable(feature = "clamp_magnitude", issue = "148519")]
1365    #[must_use = "this returns the clamped value and does not modify the original"]
1366    pub fn clamp_magnitude(self, limit: f16) -> f16 {
1367        assert!(limit >= 0.0, "limit must be non-negative");
1368        let limit = limit.abs(); // Canonicalises -0.0 to 0.0
1369        self.clamp(-limit, limit)
1370    }
1371
1372    /// Computes the absolute value of `self`.
1373    ///
1374    /// This function always returns the precise result.
1375    ///
1376    /// # Examples
1377    ///
1378    /// ```
1379    /// #![feature(f16)]
1380    /// # #[cfg(target_has_reliable_f16_math)] {
1381    ///
1382    /// let x = 3.5_f16;
1383    /// let y = -3.5_f16;
1384    ///
1385    /// assert_eq!(x.abs(), x);
1386    /// assert_eq!(y.abs(), -y);
1387    ///
1388    /// assert!(f16::NAN.abs().is_nan());
1389    /// # }
1390    /// ```
1391    #[inline]
1392    #[unstable(feature = "f16", issue = "116909")]
1393    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1394    #[must_use = "method returns a new number and does not mutate the original value"]
1395    pub const fn abs(self) -> Self {
1396        intrinsics::fabsf16(self)
1397    }
1398
1399    /// Returns a number that represents the sign of `self`.
1400    ///
1401    /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
1402    /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
1403    /// - NaN if the number is NaN
1404    ///
1405    /// # Examples
1406    ///
1407    /// ```
1408    /// #![feature(f16)]
1409    /// # #[cfg(target_has_reliable_f16)] {
1410    ///
1411    /// let f = 3.5_f16;
1412    ///
1413    /// assert_eq!(f.signum(), 1.0);
1414    /// assert_eq!(f16::NEG_INFINITY.signum(), -1.0);
1415    ///
1416    /// assert!(f16::NAN.signum().is_nan());
1417    /// # }
1418    /// ```
1419    #[inline]
1420    #[unstable(feature = "f16", issue = "116909")]
1421    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1422    #[must_use = "method returns a new number and does not mutate the original value"]
1423    pub const fn signum(self) -> f16 {
1424        if self.is_nan() { Self::NAN } else { 1.0_f16.copysign(self) }
1425    }
1426
1427    /// Returns a number composed of the magnitude of `self` and the sign of
1428    /// `sign`.
1429    ///
1430    /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise equal to `-self`.
1431    /// If `self` is a NaN, then a NaN with the same payload as `self` and the sign bit of `sign` is
1432    /// returned.
1433    ///
1434    /// If `sign` is a NaN, then this operation will still carry over its sign into the result. Note
1435    /// that IEEE 754 doesn't assign any meaning to the sign bit in case of a NaN, and as Rust
1436    /// doesn't guarantee that the bit pattern of NaNs are conserved over arithmetic operations, the
1437    /// result of `copysign` with `sign` being a NaN might produce an unexpected or non-portable
1438    /// result. See the [specification of NaN bit patterns](primitive@f32#nan-bit-patterns) for more
1439    /// info.
1440    ///
1441    /// # Examples
1442    ///
1443    /// ```
1444    /// #![feature(f16)]
1445    /// # #[cfg(target_has_reliable_f16_math)] {
1446    ///
1447    /// let f = 3.5_f16;
1448    ///
1449    /// assert_eq!(f.copysign(0.42), 3.5_f16);
1450    /// assert_eq!(f.copysign(-0.42), -3.5_f16);
1451    /// assert_eq!((-f).copysign(0.42), 3.5_f16);
1452    /// assert_eq!((-f).copysign(-0.42), -3.5_f16);
1453    ///
1454    /// assert!(f16::NAN.copysign(1.0).is_nan());
1455    /// # }
1456    /// ```
1457    #[inline]
1458    #[unstable(feature = "f16", issue = "116909")]
1459    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1460    #[must_use = "method returns a new number and does not mutate the original value"]
1461    pub const fn copysign(self, sign: f16) -> f16 {
1462        intrinsics::copysignf16(self, sign)
1463    }
1464
1465    /// Float addition that allows optimizations based on algebraic rules.
1466    ///
1467    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1468    #[must_use = "method returns a new number and does not mutate the original value"]
1469    #[unstable(feature = "float_algebraic", issue = "136469")]
1470    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1471    #[inline]
1472    pub const fn algebraic_add(self, rhs: f16) -> f16 {
1473        intrinsics::fadd_algebraic(self, rhs)
1474    }
1475
1476    /// Float subtraction that allows optimizations based on algebraic rules.
1477    ///
1478    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1479    #[must_use = "method returns a new number and does not mutate the original value"]
1480    #[unstable(feature = "float_algebraic", issue = "136469")]
1481    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1482    #[inline]
1483    pub const fn algebraic_sub(self, rhs: f16) -> f16 {
1484        intrinsics::fsub_algebraic(self, rhs)
1485    }
1486
1487    /// Float multiplication that allows optimizations based on algebraic rules.
1488    ///
1489    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1490    #[must_use = "method returns a new number and does not mutate the original value"]
1491    #[unstable(feature = "float_algebraic", issue = "136469")]
1492    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1493    #[inline]
1494    pub const fn algebraic_mul(self, rhs: f16) -> f16 {
1495        intrinsics::fmul_algebraic(self, rhs)
1496    }
1497
1498    /// Float division that allows optimizations based on algebraic rules.
1499    ///
1500    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1501    #[must_use = "method returns a new number and does not mutate the original value"]
1502    #[unstable(feature = "float_algebraic", issue = "136469")]
1503    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1504    #[inline]
1505    pub const fn algebraic_div(self, rhs: f16) -> f16 {
1506        intrinsics::fdiv_algebraic(self, rhs)
1507    }
1508
1509    /// Float remainder that allows optimizations based on algebraic rules.
1510    ///
1511    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1512    #[must_use = "method returns a new number and does not mutate the original value"]
1513    #[unstable(feature = "float_algebraic", issue = "136469")]
1514    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1515    #[inline]
1516    pub const fn algebraic_rem(self, rhs: f16) -> f16 {
1517        intrinsics::frem_algebraic(self, rhs)
1518    }
1519}
1520
1521// Functions in this module fall into `core_float_math`
1522// #[unstable(feature = "core_float_math", issue = "137578")]
1523#[cfg(not(test))]
1524#[doc(test(attr(feature(cfg_target_has_reliable_f16_f128), expect(internal_features))))]
1525impl f16 {
1526    /// Returns the largest integer less than or equal to `self`.
1527    ///
1528    /// This function always returns the precise result.
1529    ///
1530    /// # Examples
1531    ///
1532    /// ```
1533    /// #![feature(f16)]
1534    /// # #[cfg(not(miri))]
1535    /// # #[cfg(target_has_reliable_f16)] {
1536    ///
1537    /// let f = 3.7_f16;
1538    /// let g = 3.0_f16;
1539    /// let h = -3.7_f16;
1540    ///
1541    /// assert_eq!(f.floor(), 3.0);
1542    /// assert_eq!(g.floor(), 3.0);
1543    /// assert_eq!(h.floor(), -4.0);
1544    /// # }
1545    /// ```
1546    #[inline]
1547    #[rustc_allow_incoherent_impl]
1548    #[unstable(feature = "f16", issue = "116909")]
1549    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1550    #[must_use = "method returns a new number and does not mutate the original value"]
1551    pub const fn floor(self) -> f16 {
1552        intrinsics::floorf16(self)
1553    }
1554
1555    /// Returns the smallest integer greater than or equal to `self`.
1556    ///
1557    /// This function always returns the precise result.
1558    ///
1559    /// # Examples
1560    ///
1561    /// ```
1562    /// #![feature(f16)]
1563    /// # #[cfg(not(miri))]
1564    /// # #[cfg(target_has_reliable_f16)] {
1565    ///
1566    /// let f = 3.01_f16;
1567    /// let g = 4.0_f16;
1568    ///
1569    /// assert_eq!(f.ceil(), 4.0);
1570    /// assert_eq!(g.ceil(), 4.0);
1571    /// # }
1572    /// ```
1573    #[inline]
1574    #[doc(alias = "ceiling")]
1575    #[rustc_allow_incoherent_impl]
1576    #[unstable(feature = "f16", issue = "116909")]
1577    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1578    #[must_use = "method returns a new number and does not mutate the original value"]
1579    pub const fn ceil(self) -> f16 {
1580        intrinsics::ceilf16(self)
1581    }
1582
1583    /// Returns the nearest integer to `self`. If a value is half-way between two
1584    /// integers, round away from `0.0`.
1585    ///
1586    /// This function always returns the precise result.
1587    ///
1588    /// # Examples
1589    ///
1590    /// ```
1591    /// #![feature(f16)]
1592    /// # #[cfg(not(miri))]
1593    /// # #[cfg(target_has_reliable_f16)] {
1594    ///
1595    /// let f = 3.3_f16;
1596    /// let g = -3.3_f16;
1597    /// let h = -3.7_f16;
1598    /// let i = 3.5_f16;
1599    /// let j = 4.5_f16;
1600    ///
1601    /// assert_eq!(f.round(), 3.0);
1602    /// assert_eq!(g.round(), -3.0);
1603    /// assert_eq!(h.round(), -4.0);
1604    /// assert_eq!(i.round(), 4.0);
1605    /// assert_eq!(j.round(), 5.0);
1606    /// # }
1607    /// ```
1608    #[inline]
1609    #[rustc_allow_incoherent_impl]
1610    #[unstable(feature = "f16", issue = "116909")]
1611    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1612    #[must_use = "method returns a new number and does not mutate the original value"]
1613    pub const fn round(self) -> f16 {
1614        intrinsics::roundf16(self)
1615    }
1616
1617    /// Returns the nearest integer to a number. Rounds half-way cases to the number
1618    /// with an even least significant digit.
1619    ///
1620    /// This function always returns the precise result.
1621    ///
1622    /// # Examples
1623    ///
1624    /// ```
1625    /// #![feature(f16)]
1626    /// # #[cfg(not(miri))]
1627    /// # #[cfg(target_has_reliable_f16)] {
1628    ///
1629    /// let f = 3.3_f16;
1630    /// let g = -3.3_f16;
1631    /// let h = 3.5_f16;
1632    /// let i = 4.5_f16;
1633    ///
1634    /// assert_eq!(f.round_ties_even(), 3.0);
1635    /// assert_eq!(g.round_ties_even(), -3.0);
1636    /// assert_eq!(h.round_ties_even(), 4.0);
1637    /// assert_eq!(i.round_ties_even(), 4.0);
1638    /// # }
1639    /// ```
1640    #[inline]
1641    #[rustc_allow_incoherent_impl]
1642    #[unstable(feature = "f16", issue = "116909")]
1643    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1644    #[must_use = "method returns a new number and does not mutate the original value"]
1645    pub const fn round_ties_even(self) -> f16 {
1646        intrinsics::round_ties_even_f16(self)
1647    }
1648
1649    /// Returns the integer part of `self`.
1650    /// This means that non-integer numbers are always truncated towards zero.
1651    ///
1652    /// This function always returns the precise result.
1653    ///
1654    /// # Examples
1655    ///
1656    /// ```
1657    /// #![feature(f16)]
1658    /// # #[cfg(not(miri))]
1659    /// # #[cfg(target_has_reliable_f16)] {
1660    ///
1661    /// let f = 3.7_f16;
1662    /// let g = 3.0_f16;
1663    /// let h = -3.7_f16;
1664    ///
1665    /// assert_eq!(f.trunc(), 3.0);
1666    /// assert_eq!(g.trunc(), 3.0);
1667    /// assert_eq!(h.trunc(), -3.0);
1668    /// # }
1669    /// ```
1670    #[inline]
1671    #[doc(alias = "truncate")]
1672    #[rustc_allow_incoherent_impl]
1673    #[unstable(feature = "f16", issue = "116909")]
1674    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1675    #[must_use = "method returns a new number and does not mutate the original value"]
1676    pub const fn trunc(self) -> f16 {
1677        intrinsics::truncf16(self)
1678    }
1679
1680    /// Returns the fractional part of `self`.
1681    ///
1682    /// This function always returns the precise result.
1683    ///
1684    /// # Examples
1685    ///
1686    /// ```
1687    /// #![feature(f16)]
1688    /// # #[cfg(not(miri))]
1689    /// # #[cfg(target_has_reliable_f16)] {
1690    ///
1691    /// let x = 3.6_f16;
1692    /// let y = -3.6_f16;
1693    /// let abs_difference_x = (x.fract() - 0.6).abs();
1694    /// let abs_difference_y = (y.fract() - (-0.6)).abs();
1695    ///
1696    /// assert!(abs_difference_x <= f16::EPSILON);
1697    /// assert!(abs_difference_y <= f16::EPSILON);
1698    /// # }
1699    /// ```
1700    #[inline]
1701    #[rustc_allow_incoherent_impl]
1702    #[unstable(feature = "f16", issue = "116909")]
1703    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1704    #[must_use = "method returns a new number and does not mutate the original value"]
1705    pub const fn fract(self) -> f16 {
1706        self - self.trunc()
1707    }
1708
1709    /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
1710    /// error, yielding a more accurate result than an unfused multiply-add.
1711    ///
1712    /// Using `mul_add` *may* be more performant than an unfused multiply-add if
1713    /// the target architecture has a dedicated `fma` CPU instruction. However,
1714    /// this is not always true, and will be heavily dependant on designing
1715    /// algorithms with specific target hardware in mind.
1716    ///
1717    /// # Precision
1718    ///
1719    /// The result of this operation is guaranteed to be the rounded
1720    /// infinite-precision result. It is specified by IEEE 754 as
1721    /// `fusedMultiplyAdd` and guaranteed not to change.
1722    ///
1723    /// # Examples
1724    ///
1725    /// ```
1726    /// #![feature(f16)]
1727    /// # #[cfg(not(miri))]
1728    /// # #[cfg(target_has_reliable_f16)] {
1729    ///
1730    /// let m = 10.0_f16;
1731    /// let x = 4.0_f16;
1732    /// let b = 60.0_f16;
1733    ///
1734    /// assert_eq!(m.mul_add(x, b), 100.0);
1735    /// assert_eq!(m * x + b, 100.0);
1736    ///
1737    /// let one_plus_eps = 1.0_f16 + f16::EPSILON;
1738    /// let one_minus_eps = 1.0_f16 - f16::EPSILON;
1739    /// let minus_one = -1.0_f16;
1740    ///
1741    /// // The exact result (1 + eps) * (1 - eps) = 1 - eps * eps.
1742    /// assert_eq!(one_plus_eps.mul_add(one_minus_eps, minus_one), -f16::EPSILON * f16::EPSILON);
1743    /// // Different rounding with the non-fused multiply and add.
1744    /// assert_eq!(one_plus_eps * one_minus_eps + minus_one, 0.0);
1745    /// # }
1746    /// ```
1747    #[inline]
1748    #[rustc_allow_incoherent_impl]
1749    #[unstable(feature = "f16", issue = "116909")]
1750    #[doc(alias = "fmaf16", alias = "fusedMultiplyAdd")]
1751    #[must_use = "method returns a new number and does not mutate the original value"]
1752    pub const fn mul_add(self, a: f16, b: f16) -> f16 {
1753        intrinsics::fmaf16(self, a, b)
1754    }
1755
1756    /// Calculates Euclidean division, the matching method for `rem_euclid`.
1757    ///
1758    /// This computes the integer `n` such that
1759    /// `self = n * rhs + self.rem_euclid(rhs)`.
1760    /// In other words, the result is `self / rhs` rounded to the integer `n`
1761    /// such that `self >= n * rhs`.
1762    ///
1763    /// # Precision
1764    ///
1765    /// The result of this operation is guaranteed to be the rounded
1766    /// infinite-precision result.
1767    ///
1768    /// # Examples
1769    ///
1770    /// ```
1771    /// #![feature(f16)]
1772    /// # #[cfg(not(miri))]
1773    /// # #[cfg(target_has_reliable_f16)] {
1774    ///
1775    /// let a: f16 = 7.0;
1776    /// let b = 4.0;
1777    /// assert_eq!(a.div_euclid(b), 1.0); // 7.0 > 4.0 * 1.0
1778    /// assert_eq!((-a).div_euclid(b), -2.0); // -7.0 >= 4.0 * -2.0
1779    /// assert_eq!(a.div_euclid(-b), -1.0); // 7.0 >= -4.0 * -1.0
1780    /// assert_eq!((-a).div_euclid(-b), 2.0); // -7.0 >= -4.0 * 2.0
1781    /// # }
1782    /// ```
1783    #[inline]
1784    #[rustc_allow_incoherent_impl]
1785    #[unstable(feature = "f16", issue = "116909")]
1786    #[must_use = "method returns a new number and does not mutate the original value"]
1787    pub fn div_euclid(self, rhs: f16) -> f16 {
1788        let q = (self / rhs).trunc();
1789        if self % rhs < 0.0 {
1790            return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
1791        }
1792        q
1793    }
1794
1795    /// Calculates the least nonnegative remainder of `self` when
1796    /// divided by `rhs`.
1797    ///
1798    /// In particular, the return value `r` satisfies `0.0 <= r < rhs.abs()` in
1799    /// most cases. However, due to a floating point round-off error it can
1800    /// result in `r == rhs.abs()`, violating the mathematical definition, if
1801    /// `self` is much smaller than `rhs.abs()` in magnitude and `self < 0.0`.
1802    /// This result is not an element of the function's codomain, but it is the
1803    /// closest floating point number in the real numbers and thus fulfills the
1804    /// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)`
1805    /// approximately.
1806    ///
1807    /// # Precision
1808    ///
1809    /// The result of this operation is guaranteed to be the rounded
1810    /// infinite-precision result.
1811    ///
1812    /// # Examples
1813    ///
1814    /// ```
1815    /// #![feature(f16)]
1816    /// # #[cfg(not(miri))]
1817    /// # #[cfg(target_has_reliable_f16)] {
1818    ///
1819    /// let a: f16 = 7.0;
1820    /// let b = 4.0;
1821    /// assert_eq!(a.rem_euclid(b), 3.0);
1822    /// assert_eq!((-a).rem_euclid(b), 1.0);
1823    /// assert_eq!(a.rem_euclid(-b), 3.0);
1824    /// assert_eq!((-a).rem_euclid(-b), 1.0);
1825    /// // limitation due to round-off error
1826    /// assert!((-f16::EPSILON).rem_euclid(3.0) != 0.0);
1827    /// # }
1828    /// ```
1829    #[inline]
1830    #[rustc_allow_incoherent_impl]
1831    #[doc(alias = "modulo", alias = "mod")]
1832    #[unstable(feature = "f16", issue = "116909")]
1833    #[must_use = "method returns a new number and does not mutate the original value"]
1834    pub fn rem_euclid(self, rhs: f16) -> f16 {
1835        let r = self % rhs;
1836        if r < 0.0 { r + rhs.abs() } else { r }
1837    }
1838
1839    /// Raises a number to an integer power.
1840    ///
1841    /// Using this function is generally faster than using `powf`.
1842    /// It might have a different sequence of rounding operations than `powf`,
1843    /// so the results are not guaranteed to agree.
1844    ///
1845    /// Note that this function is special in that it can return non-NaN results for NaN inputs. For
1846    /// example, `f16::powi(f16::NAN, 0)` returns `1.0`. However, if an input is a *signaling*
1847    /// NaN, then the result is non-deterministically either a NaN or the result that the
1848    /// corresponding quiet NaN would produce.
1849    ///
1850    /// # Unspecified precision
1851    ///
1852    /// The precision of this function is non-deterministic. This means it varies by platform,
1853    /// Rust version, and can even differ within the same execution from one invocation to the next.
1854    ///
1855    /// # Examples
1856    ///
1857    /// ```
1858    /// #![feature(f16)]
1859    /// # #[cfg(not(miri))]
1860    /// # #[cfg(target_has_reliable_f16)] {
1861    ///
1862    /// let x = 2.0_f16;
1863    /// let abs_difference = (x.powi(2) - (x * x)).abs();
1864    /// assert!(abs_difference <= f16::EPSILON);
1865    ///
1866    /// assert_eq!(f16::powi(f16::NAN, 0), 1.0);
1867    /// assert_eq!(f16::powi(0.0, 0), 1.0);
1868    /// # }
1869    /// ```
1870    #[inline]
1871    #[rustc_allow_incoherent_impl]
1872    #[unstable(feature = "f16", issue = "116909")]
1873    #[must_use = "method returns a new number and does not mutate the original value"]
1874    pub fn powi(self, n: i32) -> f16 {
1875        intrinsics::powif16(self, n)
1876    }
1877
1878    /// Returns the square root of a number.
1879    ///
1880    /// Returns NaN if `self` is a negative number other than `-0.0`.
1881    ///
1882    /// # Precision
1883    ///
1884    /// The result of this operation is guaranteed to be the rounded
1885    /// infinite-precision result. It is specified by IEEE 754 as `squareRoot`
1886    /// and guaranteed not to change.
1887    ///
1888    /// # Examples
1889    ///
1890    /// ```
1891    /// #![feature(f16)]
1892    /// # #[cfg(not(miri))]
1893    /// # #[cfg(target_has_reliable_f16)] {
1894    ///
1895    /// let positive = 4.0_f16;
1896    /// let negative = -4.0_f16;
1897    /// let negative_zero = -0.0_f16;
1898    ///
1899    /// assert_eq!(positive.sqrt(), 2.0);
1900    /// assert!(negative.sqrt().is_nan());
1901    /// assert!(negative_zero.sqrt() == negative_zero);
1902    /// # }
1903    /// ```
1904    #[inline]
1905    #[doc(alias = "squareRoot")]
1906    #[rustc_allow_incoherent_impl]
1907    #[unstable(feature = "f16", issue = "116909")]
1908    #[must_use = "method returns a new number and does not mutate the original value"]
1909    pub fn sqrt(self) -> f16 {
1910        intrinsics::sqrtf16(self)
1911    }
1912
1913    /// Returns the cube root of a number.
1914    ///
1915    /// # Unspecified precision
1916    ///
1917    /// The precision of this function is non-deterministic. This means it varies by platform,
1918    /// Rust version, and can even differ within the same execution from one invocation to the next.
1919    ///
1920    /// This function currently corresponds to the `cbrtf` from libc on Unix
1921    /// and Windows. Note that this might change in the future.
1922    ///
1923    /// # Examples
1924    ///
1925    /// ```
1926    /// #![feature(f16)]
1927    /// # #[cfg(not(miri))]
1928    /// # #[cfg(target_has_reliable_f16)] {
1929    ///
1930    /// let x = 8.0f16;
1931    ///
1932    /// // x^(1/3) - 2 == 0
1933    /// let abs_difference = (x.cbrt() - 2.0).abs();
1934    ///
1935    /// assert!(abs_difference <= f16::EPSILON);
1936    /// # }
1937    /// ```
1938    #[inline]
1939    #[rustc_allow_incoherent_impl]
1940    #[unstable(feature = "f16", issue = "116909")]
1941    #[must_use = "method returns a new number and does not mutate the original value"]
1942    pub fn cbrt(self) -> f16 {
1943        libm::cbrtf(self as f32) as f16
1944    }
1945}