Skip to main content

core/num/
f16.rs

1//! Constants for the `f16` half-precision floating point type.
2//!
3//! *[See also the `f16` primitive type][f16].*
4//!
5//! Mathematically significant numbers are provided in the `consts` sub-module.
6//!
7//! For the constants defined directly in this module
8//! (as distinct from those defined in the `consts` sub-module),
9//! new code should instead use the associated constants
10//! defined directly on the `f16` type.
11
12#![unstable(feature = "f16", issue = "116909")]
13
14use crate::convert::FloatToInt;
15use crate::num::FpCategory;
16#[cfg(not(test))]
17use crate::num::imp::libm;
18use crate::panic::const_assert;
19use crate::{intrinsics, mem};
20
21/// Basic mathematical constants.
22#[unstable(feature = "f16", issue = "116909")]
23#[rustc_diagnostic_item = "f16_consts_mod"]
24pub mod consts {
25    // FIXME: replace with mathematical constants from cmath.
26
27    /// Archimedes' constant (π)
28    #[unstable(feature = "f16", issue = "116909")]
29    pub const PI: f16 = 3.14159265358979323846264338327950288_f16;
30
31    /// The full circle constant (τ)
32    ///
33    /// Equal to 2π.
34    #[unstable(feature = "f16", issue = "116909")]
35    pub const TAU: f16 = 6.28318530717958647692528676655900577_f16;
36
37    /// The golden ratio (φ)
38    #[unstable(feature = "f16", issue = "116909")]
39    pub const GOLDEN_RATIO: f16 = 1.618033988749894848204586834365638118_f16;
40
41    /// The Euler-Mascheroni constant (γ)
42    #[unstable(feature = "f16", issue = "116909")]
43    pub const EULER_GAMMA: f16 = 0.577215664901532860606512090082402431_f16;
44
45    /// π/2
46    #[unstable(feature = "f16", issue = "116909")]
47    pub const FRAC_PI_2: f16 = 1.57079632679489661923132169163975144_f16;
48
49    /// π/3
50    #[unstable(feature = "f16", issue = "116909")]
51    pub const FRAC_PI_3: f16 = 1.04719755119659774615421446109316763_f16;
52
53    /// π/4
54    #[unstable(feature = "f16", issue = "116909")]
55    pub const FRAC_PI_4: f16 = 0.785398163397448309615660845819875721_f16;
56
57    /// π/6
58    #[unstable(feature = "f16", issue = "116909")]
59    pub const FRAC_PI_6: f16 = 0.52359877559829887307710723054658381_f16;
60
61    /// π/8
62    #[unstable(feature = "f16", issue = "116909")]
63    pub const FRAC_PI_8: f16 = 0.39269908169872415480783042290993786_f16;
64
65    /// 1/π
66    #[unstable(feature = "f16", issue = "116909")]
67    pub const FRAC_1_PI: f16 = 0.318309886183790671537767526745028724_f16;
68
69    /// 1/sqrt(π)
70    #[unstable(feature = "f16", issue = "116909")]
71    // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
72    pub const FRAC_1_SQRT_PI: f16 = 0.564189583547756286948079451560772586_f16;
73
74    /// 1/sqrt(2π)
75    #[doc(alias = "FRAC_1_SQRT_TAU")]
76    #[unstable(feature = "f16", issue = "116909")]
77    // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
78    pub const FRAC_1_SQRT_2PI: f16 = 0.398942280401432677939946059934381868_f16;
79
80    /// 2/π
81    #[unstable(feature = "f16", issue = "116909")]
82    pub const FRAC_2_PI: f16 = 0.636619772367581343075535053490057448_f16;
83
84    /// 2/sqrt(π)
85    #[unstable(feature = "f16", issue = "116909")]
86    pub const FRAC_2_SQRT_PI: f16 = 1.12837916709551257389615890312154517_f16;
87
88    /// sqrt(2)
89    #[unstable(feature = "f16", issue = "116909")]
90    pub const SQRT_2: f16 = 1.41421356237309504880168872420969808_f16;
91
92    /// 1/sqrt(2)
93    #[unstable(feature = "f16", issue = "116909")]
94    pub const FRAC_1_SQRT_2: f16 = 0.707106781186547524400844362104849039_f16;
95
96    /// sqrt(3)
97    #[unstable(feature = "f16", issue = "116909")]
98    // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
99    pub const SQRT_3: f16 = 1.732050807568877293527446341505872367_f16;
100
101    /// 1/sqrt(3)
102    #[unstable(feature = "f16", issue = "116909")]
103    // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
104    pub const FRAC_1_SQRT_3: f16 = 0.577350269189625764509148780501957456_f16;
105
106    /// sqrt(5)
107    #[unstable(feature = "more_float_constants", issue = "146939")]
108    // Also, #[unstable(feature = "f16", issue = "116909")]
109    pub const SQRT_5: f16 = 2.23606797749978969640917366873127623_f16;
110
111    /// 1/sqrt(5)
112    #[unstable(feature = "more_float_constants", issue = "146939")]
113    // Also, #[unstable(feature = "f16", issue = "116909")]
114    pub const FRAC_1_SQRT_5: f16 = 0.44721359549995793928183473374625524_f16;
115
116    /// Euler's number (e)
117    #[unstable(feature = "f16", issue = "116909")]
118    pub const E: f16 = 2.71828182845904523536028747135266250_f16;
119
120    /// log<sub>2</sub>(10)
121    #[unstable(feature = "f16", issue = "116909")]
122    pub const LOG2_10: f16 = 3.32192809488736234787031942948939018_f16;
123
124    /// log<sub>2</sub>(e)
125    #[unstable(feature = "f16", issue = "116909")]
126    pub const LOG2_E: f16 = 1.44269504088896340735992468100189214_f16;
127
128    /// log<sub>10</sub>(2)
129    #[unstable(feature = "f16", issue = "116909")]
130    pub const LOG10_2: f16 = 0.301029995663981195213738894724493027_f16;
131
132    /// log<sub>10</sub>(e)
133    #[unstable(feature = "f16", issue = "116909")]
134    pub const LOG10_E: f16 = 0.434294481903251827651128918916605082_f16;
135
136    /// ln(2)
137    #[unstable(feature = "f16", issue = "116909")]
138    pub const LN_2: f16 = 0.693147180559945309417232121458176568_f16;
139
140    /// ln(10)
141    #[unstable(feature = "f16", issue = "116909")]
142    pub const LN_10: f16 = 2.30258509299404568401799145468436421_f16;
143}
144
145#[doc(test(attr(
146    feature(cfg_target_has_reliable_f16_f128),
147    allow(internal_features, unused_features)
148)))]
149impl f16 {
150    /// The radix or base of the internal representation of `f16`.
151    #[unstable(feature = "f16", issue = "116909")]
152    pub const RADIX: u32 = 2;
153
154    /// The size of this float type in bits.
155    // #[unstable(feature = "f16", issue = "116909")]
156    #[unstable(feature = "float_bits_const", issue = "151073")]
157    pub const BITS: u32 = 16;
158
159    /// Number of significant digits in base 2.
160    ///
161    /// Note that the size of the mantissa in the bitwise representation is one
162    /// smaller than this since the leading 1 is not stored explicitly.
163    #[unstable(feature = "f16", issue = "116909")]
164    pub const MANTISSA_DIGITS: u32 = 11;
165
166    /// Approximate number of significant digits in base 10.
167    ///
168    /// This is the maximum <i>x</i> such that any decimal number with <i>x</i>
169    /// significant digits can be converted to `f16` and back without loss.
170    ///
171    /// Equal to floor(log<sub>10</sub>&nbsp;2<sup>[`MANTISSA_DIGITS`]&nbsp;&minus;&nbsp;1</sup>).
172    ///
173    /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
174    #[unstable(feature = "f16", issue = "116909")]
175    pub const DIGITS: u32 = 3;
176
177    /// [Machine epsilon] value for `f16`.
178    ///
179    /// This is the difference between `1.0` and the next larger representable number.
180    ///
181    /// Equal to 2<sup>1&nbsp;&minus;&nbsp;[`MANTISSA_DIGITS`]</sup>.
182    ///
183    /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
184    /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
185    #[unstable(feature = "f16", issue = "116909")]
186    #[rustc_diagnostic_item = "f16_epsilon"]
187    pub const EPSILON: f16 = 9.7656e-4_f16;
188
189    /// Smallest finite `f16` value.
190    ///
191    /// Equal to &minus;[`MAX`].
192    ///
193    /// [`MAX`]: f16::MAX
194    #[unstable(feature = "f16", issue = "116909")]
195    pub const MIN: f16 = -6.5504e+4_f16;
196    /// Smallest positive normal `f16` value.
197    ///
198    /// Equal to 2<sup>[`MIN_EXP`]&nbsp;&minus;&nbsp;1</sup>.
199    ///
200    /// [`MIN_EXP`]: f16::MIN_EXP
201    #[unstable(feature = "f16", issue = "116909")]
202    pub const MIN_POSITIVE: f16 = 6.1035e-5_f16;
203    /// Largest finite `f16` value.
204    ///
205    /// Equal to
206    /// (1&nbsp;&minus;&nbsp;2<sup>&minus;[`MANTISSA_DIGITS`]</sup>)&nbsp;2<sup>[`MAX_EXP`]</sup>.
207    ///
208    /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
209    /// [`MAX_EXP`]: f16::MAX_EXP
210    #[unstable(feature = "f16", issue = "116909")]
211    pub const MAX: f16 = 6.5504e+4_f16;
212
213    /// One greater than the minimum possible *normal* power of 2 exponent
214    /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
215    ///
216    /// This corresponds to the exact minimum possible *normal* power of 2 exponent
217    /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
218    /// In other words, all normal numbers representable by this type are
219    /// greater than or equal to 0.5&nbsp;×&nbsp;2<sup><i>MIN_EXP</i></sup>.
220    #[unstable(feature = "f16", issue = "116909")]
221    pub const MIN_EXP: i32 = -13;
222    /// One greater than the maximum possible power of 2 exponent
223    /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
224    ///
225    /// This corresponds to the exact maximum possible power of 2 exponent
226    /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
227    /// In other words, all numbers representable by this type are
228    /// strictly less than 2<sup><i>MAX_EXP</i></sup>.
229    #[unstable(feature = "f16", issue = "116909")]
230    pub const MAX_EXP: i32 = 16;
231
232    /// Minimum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
233    ///
234    /// Equal to ceil(log<sub>10</sub>&nbsp;[`MIN_POSITIVE`]).
235    ///
236    /// [`MIN_POSITIVE`]: f16::MIN_POSITIVE
237    #[unstable(feature = "f16", issue = "116909")]
238    pub const MIN_10_EXP: i32 = -4;
239    /// Maximum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
240    ///
241    /// Equal to floor(log<sub>10</sub>&nbsp;[`MAX`]).
242    ///
243    /// [`MAX`]: f16::MAX
244    #[unstable(feature = "f16", issue = "116909")]
245    pub const MAX_10_EXP: i32 = 4;
246
247    /// Not a Number (NaN).
248    ///
249    /// Note that IEEE 754 doesn't define just a single NaN value; a plethora of bit patterns are
250    /// considered to be NaN. Furthermore, the standard makes a difference between a "signaling" and
251    /// a "quiet" NaN, and allows inspecting its "payload" (the unspecified bits in the bit pattern)
252    /// and its sign. See the [specification of NaN bit patterns](f32#nan-bit-patterns) for more
253    /// info.
254    ///
255    /// This constant is guaranteed to be a quiet NaN (on targets that follow the Rust assumptions
256    /// that the quiet/signaling bit being set to 1 indicates a quiet NaN). Beyond that, nothing is
257    /// guaranteed about the specific bit pattern chosen here: both payload and sign are arbitrary.
258    /// The concrete bit pattern may change across Rust versions and target platforms.
259    #[allow(clippy::eq_op)]
260    #[rustc_diagnostic_item = "f16_nan"]
261    #[unstable(feature = "f16", issue = "116909")]
262    pub const NAN: f16 = 0.0_f16 / 0.0_f16;
263
264    /// Infinity (∞).
265    #[unstable(feature = "f16", issue = "116909")]
266    pub const INFINITY: f16 = 1.0_f16 / 0.0_f16;
267
268    /// Negative infinity (−∞).
269    #[unstable(feature = "f16", issue = "116909")]
270    pub const NEG_INFINITY: f16 = -1.0_f16 / 0.0_f16;
271
272    /// Maximum integer that can be represented exactly in an [`f16`] value,
273    /// with no other integer converting to the same floating point value.
274    ///
275    /// For an integer `x` which satisfies `MIN_EXACT_INTEGER <= x <= MAX_EXACT_INTEGER`,
276    /// there is a "one-to-one" mapping between [`i16`] and [`f16`] values.
277    /// `MAX_EXACT_INTEGER + 1` also converts losslessly to [`f16`] and back to
278    /// [`i16`], but `MAX_EXACT_INTEGER + 2` converts to the same [`f16`] value
279    /// (and back to `MAX_EXACT_INTEGER + 1` as an integer) so there is not a
280    /// "one-to-one" mapping.
281    ///
282    /// [`MAX_EXACT_INTEGER`]: f16::MAX_EXACT_INTEGER
283    /// [`MIN_EXACT_INTEGER`]: f16::MIN_EXACT_INTEGER
284    /// ```
285    /// #![feature(f16)]
286    /// #![feature(float_exact_integer_constants)]
287    /// # // FIXME(#152635): Float rounding on `i586` does not adhere to IEEE 754
288    /// # #[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))] {
289    /// # #[cfg(target_has_reliable_f16)] {
290    /// let max_exact_int = f16::MAX_EXACT_INTEGER;
291    /// assert_eq!(max_exact_int, max_exact_int as f16 as i16);
292    /// assert_eq!(max_exact_int + 1, (max_exact_int + 1) as f16 as i16);
293    /// assert_ne!(max_exact_int + 2, (max_exact_int + 2) as f16 as i16);
294    ///
295    /// // Beyond `f16::MAX_EXACT_INTEGER`, multiple integers can map to one float value
296    /// assert_eq!((max_exact_int + 1) as f16, (max_exact_int + 2) as f16);
297    /// # }}
298    /// ```
299    // #[unstable(feature = "f16", issue = "116909")]
300    #[unstable(feature = "float_exact_integer_constants", issue = "152466")]
301    pub const MAX_EXACT_INTEGER: i16 = (1 << Self::MANTISSA_DIGITS) - 1;
302
303    /// Minimum integer that can be represented exactly in an [`f16`] value,
304    /// with no other integer converting to the same floating point value.
305    ///
306    /// For an integer `x` which satisfies `MIN_EXACT_INTEGER <= x <= MAX_EXACT_INTEGER`,
307    /// there is a "one-to-one" mapping between [`i16`] and [`f16`] values.
308    /// `MAX_EXACT_INTEGER + 1` also converts losslessly to [`f16`] and back to
309    /// [`i16`], but `MAX_EXACT_INTEGER + 2` converts to the same [`f16`] value
310    /// (and back to `MAX_EXACT_INTEGER + 1` as an integer) so there is not a
311    /// "one-to-one" mapping.
312    ///
313    /// This constant is equivalent to `-MAX_EXACT_INTEGER`.
314    ///
315    /// [`MAX_EXACT_INTEGER`]: f16::MAX_EXACT_INTEGER
316    /// [`MIN_EXACT_INTEGER`]: f16::MIN_EXACT_INTEGER
317    /// ```
318    /// #![feature(f16)]
319    /// #![feature(float_exact_integer_constants)]
320    /// # // FIXME(#152635): Float rounding on `i586` does not adhere to IEEE 754
321    /// # #[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))] {
322    /// # #[cfg(target_has_reliable_f16)] {
323    /// let min_exact_int = f16::MIN_EXACT_INTEGER;
324    /// assert_eq!(min_exact_int, min_exact_int as f16 as i16);
325    /// assert_eq!(min_exact_int - 1, (min_exact_int - 1) as f16 as i16);
326    /// assert_ne!(min_exact_int - 2, (min_exact_int - 2) as f16 as i16);
327    ///
328    /// // Below `f16::MIN_EXACT_INTEGER`, multiple integers can map to one float value
329    /// assert_eq!((min_exact_int - 1) as f16, (min_exact_int - 2) as f16);
330    /// # }}
331    /// ```
332    // #[unstable(feature = "f16", issue = "116909")]
333    #[unstable(feature = "float_exact_integer_constants", issue = "152466")]
334    pub const MIN_EXACT_INTEGER: i16 = -Self::MAX_EXACT_INTEGER;
335
336    /// The mask of the bit used to encode the sign of an [`f16`].
337    ///
338    /// This bit is set when the sign is negative and unset when the sign is
339    /// positive.
340    /// If you only need to check whether a value is positive or negative,
341    /// [`is_sign_positive`] or [`is_sign_negative`] can be used.
342    ///
343    /// [`is_sign_positive`]: f16::is_sign_positive
344    /// [`is_sign_negative`]: f16::is_sign_negative
345    /// ```rust
346    /// #![feature(float_masks)]
347    /// #![feature(f16)]
348    /// # #[cfg(target_has_reliable_f16)] {
349    /// let sign_mask = f16::SIGN_MASK;
350    /// let a = 1.6552f16;
351    /// let a_bits = a.to_bits();
352    ///
353    /// assert_eq!(a_bits & sign_mask, 0x0);
354    /// assert_eq!(f16::from_bits(a_bits ^ sign_mask), -a);
355    /// assert_eq!(sign_mask, (-0.0f16).to_bits());
356    /// # }
357    /// ```
358    #[unstable(feature = "float_masks", issue = "154064")]
359    pub const SIGN_MASK: u16 = 0x8000;
360
361    /// The mask of the bits used to encode the exponent of an [`f16`].
362    ///
363    /// Note that the exponent is stored as a biased value, with a bias of 15 for `f16`.
364    ///
365    /// ```rust
366    /// #![feature(float_masks)]
367    /// #![feature(f16)]
368    /// # #[cfg(target_has_reliable_f16)] {
369    /// let exponent_mask = f16::EXPONENT_MASK;
370    ///
371    /// fn get_exp(a: f16) -> i16 {
372    ///     let bias = 15;
373    ///     let biased = a.to_bits() & f16::EXPONENT_MASK;
374    ///     (biased >> (f16::MANTISSA_DIGITS - 1)).cast_signed() - bias
375    /// }
376    ///
377    /// assert_eq!(get_exp(0.5), -1);
378    /// assert_eq!(get_exp(1.0), 0);
379    /// assert_eq!(get_exp(2.0), 1);
380    /// assert_eq!(get_exp(4.0), 2);
381    /// # }
382    /// ```
383    #[unstable(feature = "float_masks", issue = "154064")]
384    pub const EXPONENT_MASK: u16 = 0x7c00;
385
386    /// The mask of the bits used to encode the mantissa of an [`f16`].
387    ///
388    /// ```rust
389    /// #![feature(float_masks)]
390    /// #![feature(f16)]
391    /// # #[cfg(target_has_reliable_f16)] {
392    /// let mantissa_mask = f16::MANTISSA_MASK;
393    ///
394    /// assert_eq!(0f16.to_bits() & mantissa_mask, 0x0);
395    /// assert_eq!(1f16.to_bits() & mantissa_mask, 0x0);
396    ///
397    /// // multiplying a finite value by a power of 2 doesn't change its mantissa
398    /// // unless the result or initial value is not normal.
399    /// let a = 1.6552f16;
400    /// let b = 4.0 * a;
401    /// assert_eq!(a.to_bits() & mantissa_mask, b.to_bits() & mantissa_mask);
402    ///
403    /// // The maximum and minimum values have a saturated significand
404    /// assert_eq!(f16::MAX.to_bits() & f16::MANTISSA_MASK, f16::MANTISSA_MASK);
405    /// assert_eq!(f16::MIN.to_bits() & f16::MANTISSA_MASK, f16::MANTISSA_MASK);
406    /// # }
407    /// ```
408    #[unstable(feature = "float_masks", issue = "154064")]
409    pub const MANTISSA_MASK: u16 = 0x03ff;
410
411    /// Minimum representable positive value (min subnormal)
412    const TINY_BITS: u16 = 0x1;
413
414    /// Minimum representable negative value (min negative subnormal)
415    const NEG_TINY_BITS: u16 = Self::TINY_BITS | Self::SIGN_MASK;
416
417    /// Returns `true` if this value is NaN.
418    ///
419    /// ```
420    /// #![feature(f16)]
421    /// # #[cfg(target_has_reliable_f16)] {
422    ///
423    /// let nan = f16::NAN;
424    /// let f = 7.0_f16;
425    ///
426    /// assert!(nan.is_nan());
427    /// assert!(!f.is_nan());
428    /// # }
429    /// ```
430    #[inline]
431    #[must_use]
432    #[unstable(feature = "f16", issue = "116909")]
433    #[allow(clippy::eq_op)] // > if you intended to check if the operand is NaN, use `.is_nan()` instead :)
434    pub const fn is_nan(self) -> bool {
435        self != self
436    }
437
438    /// Returns `true` if this value is positive infinity or negative infinity, and
439    /// `false` otherwise.
440    ///
441    /// ```
442    /// #![feature(f16)]
443    /// # #[cfg(target_has_reliable_f16)] {
444    ///
445    /// let f = 7.0f16;
446    /// let inf = f16::INFINITY;
447    /// let neg_inf = f16::NEG_INFINITY;
448    /// let nan = f16::NAN;
449    ///
450    /// assert!(!f.is_infinite());
451    /// assert!(!nan.is_infinite());
452    ///
453    /// assert!(inf.is_infinite());
454    /// assert!(neg_inf.is_infinite());
455    /// # }
456    /// ```
457    #[inline]
458    #[must_use]
459    #[unstable(feature = "f16", issue = "116909")]
460    pub const fn is_infinite(self) -> bool {
461        (self == f16::INFINITY) | (self == f16::NEG_INFINITY)
462    }
463
464    /// Returns `true` if this number is neither infinite nor NaN.
465    ///
466    /// ```
467    /// #![feature(f16)]
468    /// # #[cfg(target_has_reliable_f16)] {
469    ///
470    /// let f = 7.0f16;
471    /// let inf: f16 = f16::INFINITY;
472    /// let neg_inf: f16 = f16::NEG_INFINITY;
473    /// let nan: f16 = f16::NAN;
474    ///
475    /// assert!(f.is_finite());
476    ///
477    /// assert!(!nan.is_finite());
478    /// assert!(!inf.is_finite());
479    /// assert!(!neg_inf.is_finite());
480    /// # }
481    /// ```
482    #[inline]
483    #[must_use]
484    #[unstable(feature = "f16", issue = "116909")]
485    #[rustc_const_unstable(feature = "f16", issue = "116909")]
486    pub const fn is_finite(self) -> bool {
487        // There's no need to handle NaN separately: if self is NaN,
488        // the comparison is not true, exactly as desired.
489        self.abs() < Self::INFINITY
490    }
491
492    /// Returns `true` if the number is [subnormal].
493    ///
494    /// ```
495    /// #![feature(f16)]
496    /// # #[cfg(target_has_reliable_f16)] {
497    ///
498    /// let min = f16::MIN_POSITIVE; // 6.1035e-5
499    /// let max = f16::MAX;
500    /// let lower_than_min = 1.0e-7_f16;
501    /// let zero = 0.0_f16;
502    ///
503    /// assert!(!min.is_subnormal());
504    /// assert!(!max.is_subnormal());
505    ///
506    /// assert!(!zero.is_subnormal());
507    /// assert!(!f16::NAN.is_subnormal());
508    /// assert!(!f16::INFINITY.is_subnormal());
509    /// // Values between `0` and `min` are Subnormal.
510    /// assert!(lower_than_min.is_subnormal());
511    /// # }
512    /// ```
513    /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
514    #[inline]
515    #[must_use]
516    #[unstable(feature = "f16", issue = "116909")]
517    pub const fn is_subnormal(self) -> bool {
518        matches!(self.classify(), FpCategory::Subnormal)
519    }
520
521    /// Returns `true` if the number is neither zero, infinite, [subnormal], or NaN.
522    ///
523    /// ```
524    /// #![feature(f16)]
525    /// # #[cfg(target_has_reliable_f16)] {
526    ///
527    /// let min = f16::MIN_POSITIVE; // 6.1035e-5
528    /// let max = f16::MAX;
529    /// let lower_than_min = 1.0e-7_f16;
530    /// let zero = 0.0_f16;
531    ///
532    /// assert!(min.is_normal());
533    /// assert!(max.is_normal());
534    ///
535    /// assert!(!zero.is_normal());
536    /// assert!(!f16::NAN.is_normal());
537    /// assert!(!f16::INFINITY.is_normal());
538    /// // Values between `0` and `min` are Subnormal.
539    /// assert!(!lower_than_min.is_normal());
540    /// # }
541    /// ```
542    /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
543    #[inline]
544    #[must_use]
545    #[unstable(feature = "f16", issue = "116909")]
546    pub const fn is_normal(self) -> bool {
547        matches!(self.classify(), FpCategory::Normal)
548    }
549
550    /// Returns the floating point category of the number. If only one property
551    /// is going to be tested, it is generally faster to use the specific
552    /// predicate instead.
553    ///
554    /// ```
555    /// #![feature(f16)]
556    /// # #[cfg(target_has_reliable_f16)] {
557    ///
558    /// use std::num::FpCategory;
559    ///
560    /// let num = 12.4_f16;
561    /// let inf = f16::INFINITY;
562    ///
563    /// assert_eq!(num.classify(), FpCategory::Normal);
564    /// assert_eq!(inf.classify(), FpCategory::Infinite);
565    /// # }
566    /// ```
567    #[inline]
568    #[unstable(feature = "f16", issue = "116909")]
569    #[must_use]
570    pub const fn classify(self) -> FpCategory {
571        let b = self.to_bits();
572        match (b & Self::MANTISSA_MASK, b & Self::EXPONENT_MASK) {
573            (0, Self::EXPONENT_MASK) => FpCategory::Infinite,
574            (_, Self::EXPONENT_MASK) => FpCategory::Nan,
575            (0, 0) => FpCategory::Zero,
576            (_, 0) => FpCategory::Subnormal,
577            _ => FpCategory::Normal,
578        }
579    }
580
581    /// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
582    /// positive sign bit and positive infinity.
583    ///
584    /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
585    /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
586    /// conserved over arithmetic operations, the result of `is_sign_positive` on
587    /// a NaN might produce an unexpected or non-portable result. See the [specification
588    /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == 1.0`
589    /// if you need fully portable behavior (will return `false` for all NaNs).
590    ///
591    /// ```
592    /// #![feature(f16)]
593    /// # #[cfg(target_has_reliable_f16)] {
594    ///
595    /// let f = 7.0_f16;
596    /// let g = -7.0_f16;
597    ///
598    /// assert!(f.is_sign_positive());
599    /// assert!(!g.is_sign_positive());
600    /// # }
601    /// ```
602    #[inline]
603    #[must_use]
604    #[unstable(feature = "f16", issue = "116909")]
605    pub const fn is_sign_positive(self) -> bool {
606        !self.is_sign_negative()
607    }
608
609    /// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with
610    /// negative sign bit and negative infinity.
611    ///
612    /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
613    /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
614    /// conserved over arithmetic operations, the result of `is_sign_negative` on
615    /// a NaN might produce an unexpected or non-portable result. See the [specification
616    /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == -1.0`
617    /// if you need fully portable behavior (will return `false` for all NaNs).
618    ///
619    /// ```
620    /// #![feature(f16)]
621    /// # #[cfg(target_has_reliable_f16)] {
622    ///
623    /// let f = 7.0_f16;
624    /// let g = -7.0_f16;
625    ///
626    /// assert!(!f.is_sign_negative());
627    /// assert!(g.is_sign_negative());
628    /// # }
629    /// ```
630    #[inline]
631    #[must_use]
632    #[unstable(feature = "f16", issue = "116909")]
633    pub const fn is_sign_negative(self) -> bool {
634        // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
635        // applies to zeros and NaNs as well.
636        // SAFETY: This is just transmuting to get the sign bit, it's fine.
637        (self.to_bits() & (1 << 15)) != 0
638    }
639
640    /// Returns the least number greater than `self`.
641    ///
642    /// Let `TINY` be the smallest representable positive `f16`. Then,
643    ///  - if `self.is_nan()`, this returns `self`;
644    ///  - if `self` is [`NEG_INFINITY`], this returns [`MIN`];
645    ///  - if `self` is `-TINY`, this returns -0.0;
646    ///  - if `self` is -0.0 or +0.0, this returns `TINY`;
647    ///  - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`];
648    ///  - otherwise the unique least value greater than `self` is returned.
649    ///
650    /// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x`
651    /// is finite `x == x.next_up().next_down()` also holds.
652    ///
653    /// ```rust
654    /// #![feature(f16)]
655    /// # #[cfg(target_has_reliable_f16)] {
656    ///
657    /// // f16::EPSILON is the difference between 1.0 and the next number up.
658    /// assert_eq!(1.0f16.next_up(), 1.0 + f16::EPSILON);
659    /// // But not for most numbers.
660    /// assert!(0.1f16.next_up() < 0.1 + f16::EPSILON);
661    /// assert_eq!(4356f16.next_up(), 4360.0);
662    /// # }
663    /// ```
664    ///
665    /// This operation corresponds to IEEE-754 `nextUp`.
666    ///
667    /// [`NEG_INFINITY`]: Self::NEG_INFINITY
668    /// [`INFINITY`]: Self::INFINITY
669    /// [`MIN`]: Self::MIN
670    /// [`MAX`]: Self::MAX
671    #[inline]
672    #[doc(alias = "nextUp")]
673    #[unstable(feature = "f16", issue = "116909")]
674    #[must_use = "method returns a new number and does not mutate the original value"]
675    pub const fn next_up(self) -> Self {
676        // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
677        // denormals to zero. This is in general unsound and unsupported, but here
678        // we do our best to still produce the correct result on such targets.
679        let bits = self.to_bits();
680        if self.is_nan() || bits == Self::INFINITY.to_bits() {
681            return self;
682        }
683
684        let abs = bits & !Self::SIGN_MASK;
685        let next_bits = if abs == 0 {
686            Self::TINY_BITS
687        } else if bits == abs {
688            bits + 1
689        } else {
690            bits - 1
691        };
692        Self::from_bits(next_bits)
693    }
694
695    /// Returns the greatest number less than `self`.
696    ///
697    /// Let `TINY` be the smallest representable positive `f16`. Then,
698    ///  - if `self.is_nan()`, this returns `self`;
699    ///  - if `self` is [`INFINITY`], this returns [`MAX`];
700    ///  - if `self` is `TINY`, this returns 0.0;
701    ///  - if `self` is -0.0 or +0.0, this returns `-TINY`;
702    ///  - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`];
703    ///  - otherwise the unique greatest value less than `self` is returned.
704    ///
705    /// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x`
706    /// is finite `x == x.next_down().next_up()` also holds.
707    ///
708    /// ```rust
709    /// #![feature(f16)]
710    /// # #[cfg(target_has_reliable_f16)] {
711    ///
712    /// let x = 1.0f16;
713    /// // Clamp value into range [0, 1).
714    /// let clamped = x.clamp(0.0, 1.0f16.next_down());
715    /// assert!(clamped < 1.0);
716    /// assert_eq!(clamped.next_up(), 1.0);
717    /// # }
718    /// ```
719    ///
720    /// This operation corresponds to IEEE-754 `nextDown`.
721    ///
722    /// [`NEG_INFINITY`]: Self::NEG_INFINITY
723    /// [`INFINITY`]: Self::INFINITY
724    /// [`MIN`]: Self::MIN
725    /// [`MAX`]: Self::MAX
726    #[inline]
727    #[doc(alias = "nextDown")]
728    #[unstable(feature = "f16", issue = "116909")]
729    #[must_use = "method returns a new number and does not mutate the original value"]
730    pub const fn next_down(self) -> Self {
731        // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
732        // denormals to zero. This is in general unsound and unsupported, but here
733        // we do our best to still produce the correct result on such targets.
734        let bits = self.to_bits();
735        if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
736            return self;
737        }
738
739        let abs = bits & !Self::SIGN_MASK;
740        let next_bits = if abs == 0 {
741            Self::NEG_TINY_BITS
742        } else if bits == abs {
743            bits - 1
744        } else {
745            bits + 1
746        };
747        Self::from_bits(next_bits)
748    }
749
750    /// Takes the reciprocal (inverse) of a number, `1/x`.
751    ///
752    /// ```
753    /// #![feature(f16)]
754    /// # #[cfg(target_has_reliable_f16)] {
755    ///
756    /// let x = 2.0_f16;
757    /// let abs_difference = (x.recip() - (1.0 / x)).abs();
758    ///
759    /// assert!(abs_difference <= f16::EPSILON);
760    /// # }
761    /// ```
762    #[inline]
763    #[unstable(feature = "f16", issue = "116909")]
764    #[must_use = "this returns the result of the operation, without modifying the original"]
765    pub const fn recip(self) -> Self {
766        1.0 / self
767    }
768
769    /// Converts radians to degrees.
770    ///
771    /// # Unspecified precision
772    ///
773    /// The precision of this function is non-deterministic. This means it varies by platform,
774    /// Rust version, and can even differ within the same execution from one invocation to the next.
775    ///
776    /// # Examples
777    ///
778    /// ```
779    /// #![feature(f16)]
780    /// # #[cfg(target_has_reliable_f16)] {
781    ///
782    /// let angle = std::f16::consts::PI;
783    ///
784    /// let abs_difference = (angle.to_degrees() - 180.0).abs();
785    /// assert!(abs_difference <= 0.5);
786    /// # }
787    /// ```
788    #[inline]
789    #[unstable(feature = "f16", issue = "116909")]
790    #[must_use = "this returns the result of the operation, without modifying the original"]
791    pub const fn to_degrees(self) -> Self {
792        // Use a literal to avoid double rounding, consts::PI is already rounded,
793        // and dividing would round again.
794        const PIS_IN_180: f16 = 57.2957795130823208767981548141051703_f16;
795        self * PIS_IN_180
796    }
797
798    /// Converts degrees to radians.
799    ///
800    /// # Unspecified precision
801    ///
802    /// The precision of this function is non-deterministic. This means it varies by platform,
803    /// Rust version, and can even differ within the same execution from one invocation to the next.
804    ///
805    /// # Examples
806    ///
807    /// ```
808    /// #![feature(f16)]
809    /// # #[cfg(target_has_reliable_f16)] {
810    ///
811    /// let angle = 180.0f16;
812    ///
813    /// let abs_difference = (angle.to_radians() - std::f16::consts::PI).abs();
814    ///
815    /// assert!(abs_difference <= 0.01);
816    /// # }
817    /// ```
818    #[inline]
819    #[unstable(feature = "f16", issue = "116909")]
820    #[must_use = "this returns the result of the operation, without modifying the original"]
821    pub const fn to_radians(self) -> f16 {
822        // Use a literal to avoid double rounding, consts::PI is already rounded,
823        // and dividing would round again.
824        const RADS_PER_DEG: f16 = 0.017453292519943295769236907684886_f16;
825        self * RADS_PER_DEG
826    }
827
828    /// Returns the maximum of the two numbers, ignoring NaN.
829    ///
830    /// If exactly one of the arguments is NaN (quiet or signaling), then the other argument is
831    /// returned. If both arguments are NaN, the return value is NaN, with the bit pattern picked
832    /// using the usual [rules for arithmetic operations](f32#nan-bit-patterns). If the inputs
833    /// compare equal (such as for the case of `+0.0` and `-0.0`), either input may be returned
834    /// non-deterministically.
835    ///
836    /// The handling of NaNs follows the IEEE 754-2019 semantics for `maximumNumber`, treating all
837    /// NaNs the same way to ensure the operation is associative. The handling of signed zeros
838    /// follows the IEEE 754-2008 semantics for `maxNum`.
839    ///
840    /// ```
841    /// #![feature(f16)]
842    /// # #[cfg(target_has_reliable_f16)] {
843    ///
844    /// let x = 1.0f16;
845    /// let y = 2.0f16;
846    ///
847    /// assert_eq!(x.max(y), y);
848    /// assert_eq!(x.max(f16::NAN), x);
849    /// # }
850    /// ```
851    #[inline]
852    #[unstable(feature = "f16", issue = "116909")]
853    #[rustc_const_unstable(feature = "f16", issue = "116909")]
854    #[must_use = "this returns the result of the comparison, without modifying either input"]
855    pub const fn max(self, other: f16) -> f16 {
856        intrinsics::maximum_number_nsz_f16(self, other)
857    }
858
859    /// Returns the minimum of the two numbers, ignoring NaN.
860    ///
861    /// If exactly one of the arguments is NaN (quiet or signaling), then the other argument is
862    /// returned. If both arguments are NaN, the return value is NaN, with the bit pattern picked
863    /// using the usual [rules for arithmetic operations](f32#nan-bit-patterns). If the inputs
864    /// compare equal (such as for the case of `+0.0` and `-0.0`), either input may be returned
865    /// non-deterministically.
866    ///
867    /// The handling of NaNs follows the IEEE 754-2019 semantics for `minimumNumber`, treating all
868    /// NaNs the same way to ensure the operation is associative. The handling of signed zeros
869    /// follows the IEEE 754-2008 semantics for `minNum`.
870    ///
871    /// ```
872    /// #![feature(f16)]
873    /// # #[cfg(target_has_reliable_f16)] {
874    ///
875    /// let x = 1.0f16;
876    /// let y = 2.0f16;
877    ///
878    /// assert_eq!(x.min(y), x);
879    /// assert_eq!(x.min(f16::NAN), x);
880    /// # }
881    /// ```
882    #[inline]
883    #[unstable(feature = "f16", issue = "116909")]
884    #[rustc_const_unstable(feature = "f16", issue = "116909")]
885    #[must_use = "this returns the result of the comparison, without modifying either input"]
886    pub const fn min(self, other: f16) -> f16 {
887        intrinsics::minimum_number_nsz_f16(self, other)
888    }
889
890    /// Returns the maximum of the two numbers, propagating NaN.
891    ///
892    /// If at least one of the arguments is NaN, the return value is NaN, with the bit pattern
893    /// picked using the usual [rules for arithmetic operations](f32#nan-bit-patterns). Furthermore,
894    /// `-0.0` is considered to be less than `+0.0`, making this function fully deterministic for
895    /// non-NaN inputs.
896    ///
897    /// This is in contrast to [`f16::max`] which only returns NaN when *both* arguments are NaN,
898    /// and which does not reliably order `-0.0` and `+0.0`.
899    ///
900    /// This follows the IEEE 754-2019 semantics for `maximum`.
901    ///
902    /// ```
903    /// #![feature(f16)]
904    /// #![feature(float_minimum_maximum)]
905    /// # #[cfg(target_has_reliable_f16)] {
906    ///
907    /// let x = 1.0f16;
908    /// let y = 2.0f16;
909    ///
910    /// assert_eq!(x.maximum(y), y);
911    /// assert!(x.maximum(f16::NAN).is_nan());
912    /// # }
913    /// ```
914    #[inline]
915    #[unstable(feature = "f16", issue = "116909")]
916    // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
917    #[must_use = "this returns the result of the comparison, without modifying either input"]
918    pub const fn maximum(self, other: f16) -> f16 {
919        intrinsics::maximumf16(self, other)
920    }
921
922    /// Returns the minimum of the two numbers, propagating NaN.
923    ///
924    /// If at least one of the arguments is NaN, the return value is NaN, with the bit pattern
925    /// picked using the usual [rules for arithmetic operations](f32#nan-bit-patterns). Furthermore,
926    /// `-0.0` is considered to be less than `+0.0`, making this function fully deterministic for
927    /// non-NaN inputs.
928    ///
929    /// This is in contrast to [`f16::min`] which only returns NaN when *both* arguments are NaN,
930    /// and which does not reliably order `-0.0` and `+0.0`.
931    ///
932    /// This follows the IEEE 754-2019 semantics for `minimum`.
933    ///
934    /// ```
935    /// #![feature(f16)]
936    /// #![feature(float_minimum_maximum)]
937    /// # #[cfg(target_has_reliable_f16)] {
938    ///
939    /// let x = 1.0f16;
940    /// let y = 2.0f16;
941    ///
942    /// assert_eq!(x.minimum(y), x);
943    /// assert!(x.minimum(f16::NAN).is_nan());
944    /// # }
945    /// ```
946    #[inline]
947    #[unstable(feature = "f16", issue = "116909")]
948    // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
949    #[must_use = "this returns the result of the comparison, without modifying either input"]
950    pub const fn minimum(self, other: f16) -> f16 {
951        intrinsics::minimumf16(self, other)
952    }
953
954    /// Calculates the midpoint (average) between `self` and `rhs`.
955    ///
956    /// This returns NaN when *either* argument is NaN or if a combination of
957    /// +inf and -inf is provided as arguments.
958    ///
959    /// # Examples
960    ///
961    /// ```
962    /// #![feature(f16)]
963    /// # #[cfg(target_has_reliable_f16)] {
964    ///
965    /// assert_eq!(1f16.midpoint(4.0), 2.5);
966    /// assert_eq!((-5.5f16).midpoint(8.0), 1.25);
967    /// # }
968    /// ```
969    #[inline]
970    #[doc(alias = "average")]
971    #[unstable(feature = "f16", issue = "116909")]
972    #[rustc_const_unstable(feature = "f16", issue = "116909")]
973    #[must_use = "this returns the result of the operation, \
974                  without modifying the original"]
975    pub const fn midpoint(self, other: f16) -> f16 {
976        const HI: f16 = f16::MAX / 2.;
977
978        let (a, b) = (self, other);
979        let abs_a = a.abs();
980        let abs_b = b.abs();
981
982        if abs_a <= HI && abs_b <= HI {
983            // Overflow is impossible
984            (a + b) / 2.
985        } else {
986            (a / 2.) + (b / 2.)
987        }
988    }
989
990    /// Rounds toward zero and converts to any primitive integer type,
991    /// assuming that the value is finite and fits in that type.
992    ///
993    /// ```
994    /// #![feature(f16)]
995    /// # #[cfg(target_has_reliable_f16)] {
996    ///
997    /// let value = 4.6_f16;
998    /// let rounded = unsafe { value.to_int_unchecked::<u16>() };
999    /// assert_eq!(rounded, 4);
1000    ///
1001    /// let value = -128.9_f16;
1002    /// let rounded = unsafe { value.to_int_unchecked::<i8>() };
1003    /// assert_eq!(rounded, i8::MIN);
1004    /// # }
1005    /// ```
1006    ///
1007    /// # Safety
1008    ///
1009    /// The value must:
1010    ///
1011    /// * Not be `NaN`
1012    /// * Not be infinite
1013    /// * Be representable in the return type `Int`, after truncating off its fractional part
1014    #[inline]
1015    #[unstable(feature = "f16", issue = "116909")]
1016    #[must_use = "this returns the result of the operation, without modifying the original"]
1017    pub unsafe fn to_int_unchecked<Int>(self) -> Int
1018    where
1019        Self: FloatToInt<Int>,
1020    {
1021        // SAFETY: the caller must uphold the safety contract for
1022        // `FloatToInt::to_int_unchecked`.
1023        unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
1024    }
1025
1026    /// Raw transmutation to `u16`.
1027    ///
1028    /// This is currently identical to `transmute::<f16, u16>(self)` on all platforms.
1029    ///
1030    /// See [`from_bits`](#method.from_bits) for some discussion of the
1031    /// portability of this operation (there are almost no issues).
1032    ///
1033    /// Note that this function is distinct from `as` casting, which attempts to
1034    /// preserve the *numeric* value, and not the bitwise value.
1035    ///
1036    /// ```
1037    /// #![feature(f16)]
1038    /// # #[cfg(target_has_reliable_f16)] {
1039    ///
1040    /// assert_ne!((1f16).to_bits(), 1f16 as u16); // to_bits() is not casting!
1041    /// assert_eq!((12.5f16).to_bits(), 0x4a40);
1042    /// # }
1043    /// ```
1044    #[inline]
1045    #[unstable(feature = "f16", issue = "116909")]
1046    #[must_use = "this returns the result of the operation, without modifying the original"]
1047    #[allow(unnecessary_transmutes)]
1048    pub const fn to_bits(self) -> u16 {
1049        // SAFETY: `u16` is a plain old datatype so we can always transmute to it.
1050        unsafe { mem::transmute(self) }
1051    }
1052
1053    /// Raw transmutation from `u16`.
1054    ///
1055    /// This is currently identical to `transmute::<u16, f16>(v)` on all platforms.
1056    /// It turns out this is incredibly portable, for two reasons:
1057    ///
1058    /// * Floats and Ints have the same endianness on all supported platforms.
1059    /// * IEEE 754 very precisely specifies the bit layout of floats.
1060    ///
1061    /// However there is one caveat: prior to the 2008 version of IEEE 754, how
1062    /// to interpret the NaN signaling bit wasn't actually specified. Most platforms
1063    /// (notably x86 and ARM) picked the interpretation that was ultimately
1064    /// standardized in 2008, but some didn't (notably MIPS). As a result, all
1065    /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
1066    ///
1067    /// Rather than trying to preserve signaling-ness cross-platform, this
1068    /// implementation favors preserving the exact bits. This means that
1069    /// any payloads encoded in NaNs will be preserved even if the result of
1070    /// this method is sent over the network from an x86 machine to a MIPS one.
1071    ///
1072    /// If the results of this method are only manipulated by the same
1073    /// architecture that produced them, then there is no portability concern.
1074    ///
1075    /// If the input isn't NaN, then there is no portability concern.
1076    ///
1077    /// If you don't care about signalingness (very likely), then there is no
1078    /// portability concern.
1079    ///
1080    /// Note that this function is distinct from `as` casting, which attempts to
1081    /// preserve the *numeric* value, and not the bitwise value.
1082    ///
1083    /// ```
1084    /// #![feature(f16)]
1085    /// # #[cfg(target_has_reliable_f16)] {
1086    ///
1087    /// let v = f16::from_bits(0x4a40);
1088    /// assert_eq!(v, 12.5);
1089    /// # }
1090    /// ```
1091    #[inline]
1092    #[must_use]
1093    #[unstable(feature = "f16", issue = "116909")]
1094    #[allow(unnecessary_transmutes)]
1095    pub const fn from_bits(v: u16) -> Self {
1096        // It turns out the safety issues with sNaN were overblown! Hooray!
1097        // SAFETY: `u16` is a plain old datatype so we can always transmute from it.
1098        unsafe { mem::transmute(v) }
1099    }
1100
1101    /// Returns the memory representation of this floating point number as a byte array in
1102    /// big-endian (network) byte order.
1103    ///
1104    /// See [`from_bits`](Self::from_bits) for some discussion of the
1105    /// portability of this operation (there are almost no issues).
1106    ///
1107    /// # Examples
1108    ///
1109    /// ```
1110    /// #![feature(f16)]
1111    /// # #[cfg(target_has_reliable_f16)] {
1112    ///
1113    /// let bytes = 12.5f16.to_be_bytes();
1114    /// assert_eq!(bytes, [0x4a, 0x40]);
1115    /// # }
1116    /// ```
1117    #[inline]
1118    #[unstable(feature = "f16", issue = "116909")]
1119    #[must_use = "this returns the result of the operation, without modifying the original"]
1120    pub const fn to_be_bytes(self) -> [u8; 2] {
1121        self.to_bits().to_be_bytes()
1122    }
1123
1124    /// Returns the memory representation of this floating point number as a byte array in
1125    /// little-endian byte order.
1126    ///
1127    /// See [`from_bits`](Self::from_bits) for some discussion of the
1128    /// portability of this operation (there are almost no issues).
1129    ///
1130    /// # Examples
1131    ///
1132    /// ```
1133    /// #![feature(f16)]
1134    /// # #[cfg(target_has_reliable_f16)] {
1135    ///
1136    /// let bytes = 12.5f16.to_le_bytes();
1137    /// assert_eq!(bytes, [0x40, 0x4a]);
1138    /// # }
1139    /// ```
1140    #[inline]
1141    #[unstable(feature = "f16", issue = "116909")]
1142    #[must_use = "this returns the result of the operation, without modifying the original"]
1143    pub const fn to_le_bytes(self) -> [u8; 2] {
1144        self.to_bits().to_le_bytes()
1145    }
1146
1147    /// Returns the memory representation of this floating point number as a byte array in
1148    /// native byte order.
1149    ///
1150    /// As the target platform's native endianness is used, portable code
1151    /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
1152    ///
1153    /// [`to_be_bytes`]: f16::to_be_bytes
1154    /// [`to_le_bytes`]: f16::to_le_bytes
1155    ///
1156    /// See [`from_bits`](Self::from_bits) for some discussion of the
1157    /// portability of this operation (there are almost no issues).
1158    ///
1159    /// # Examples
1160    ///
1161    /// ```
1162    /// #![feature(f16)]
1163    /// # #[cfg(target_has_reliable_f16)] {
1164    ///
1165    /// let bytes = 12.5f16.to_ne_bytes();
1166    /// assert_eq!(
1167    ///     bytes,
1168    ///     if cfg!(target_endian = "big") {
1169    ///         [0x4a, 0x40]
1170    ///     } else {
1171    ///         [0x40, 0x4a]
1172    ///     }
1173    /// );
1174    /// # }
1175    /// ```
1176    #[inline]
1177    #[unstable(feature = "f16", issue = "116909")]
1178    #[must_use = "this returns the result of the operation, without modifying the original"]
1179    pub const fn to_ne_bytes(self) -> [u8; 2] {
1180        self.to_bits().to_ne_bytes()
1181    }
1182
1183    /// Creates a floating point value from its representation as a byte array in big endian.
1184    ///
1185    /// See [`from_bits`](Self::from_bits) for some discussion of the
1186    /// portability of this operation (there are almost no issues).
1187    ///
1188    /// # Examples
1189    ///
1190    /// ```
1191    /// #![feature(f16)]
1192    /// # #[cfg(target_has_reliable_f16)] {
1193    ///
1194    /// let value = f16::from_be_bytes([0x4a, 0x40]);
1195    /// assert_eq!(value, 12.5);
1196    /// # }
1197    /// ```
1198    #[inline]
1199    #[must_use]
1200    #[unstable(feature = "f16", issue = "116909")]
1201    pub const fn from_be_bytes(bytes: [u8; 2]) -> Self {
1202        Self::from_bits(u16::from_be_bytes(bytes))
1203    }
1204
1205    /// Creates a floating point value from its representation as a byte array in little endian.
1206    ///
1207    /// See [`from_bits`](Self::from_bits) for some discussion of the
1208    /// portability of this operation (there are almost no issues).
1209    ///
1210    /// # Examples
1211    ///
1212    /// ```
1213    /// #![feature(f16)]
1214    /// # #[cfg(target_has_reliable_f16)] {
1215    ///
1216    /// let value = f16::from_le_bytes([0x40, 0x4a]);
1217    /// assert_eq!(value, 12.5);
1218    /// # }
1219    /// ```
1220    #[inline]
1221    #[must_use]
1222    #[unstable(feature = "f16", issue = "116909")]
1223    pub const fn from_le_bytes(bytes: [u8; 2]) -> Self {
1224        Self::from_bits(u16::from_le_bytes(bytes))
1225    }
1226
1227    /// Creates a floating point value from its representation as a byte array in native endian.
1228    ///
1229    /// As the target platform's native endianness is used, portable code
1230    /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
1231    /// appropriate instead.
1232    ///
1233    /// [`from_be_bytes`]: f16::from_be_bytes
1234    /// [`from_le_bytes`]: f16::from_le_bytes
1235    ///
1236    /// See [`from_bits`](Self::from_bits) for some discussion of the
1237    /// portability of this operation (there are almost no issues).
1238    ///
1239    /// # Examples
1240    ///
1241    /// ```
1242    /// #![feature(f16)]
1243    /// # #[cfg(target_has_reliable_f16)] {
1244    ///
1245    /// let value = f16::from_ne_bytes(if cfg!(target_endian = "big") {
1246    ///     [0x4a, 0x40]
1247    /// } else {
1248    ///     [0x40, 0x4a]
1249    /// });
1250    /// assert_eq!(value, 12.5);
1251    /// # }
1252    /// ```
1253    #[inline]
1254    #[must_use]
1255    #[unstable(feature = "f16", issue = "116909")]
1256    pub const fn from_ne_bytes(bytes: [u8; 2]) -> Self {
1257        Self::from_bits(u16::from_ne_bytes(bytes))
1258    }
1259
1260    /// Returns the ordering between `self` and `other`.
1261    ///
1262    /// Unlike the standard partial comparison between floating point numbers,
1263    /// this comparison always produces an ordering in accordance to
1264    /// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision)
1265    /// floating point standard. The values are ordered in the following sequence:
1266    ///
1267    /// - negative quiet NaN
1268    /// - negative signaling NaN
1269    /// - negative infinity
1270    /// - negative numbers
1271    /// - negative subnormal numbers
1272    /// - negative zero
1273    /// - positive zero
1274    /// - positive subnormal numbers
1275    /// - positive numbers
1276    /// - positive infinity
1277    /// - positive signaling NaN
1278    /// - positive quiet NaN.
1279    ///
1280    /// The ordering established by this function does not always agree with the
1281    /// [`PartialOrd`] and [`PartialEq`] implementations of `f16`. For example,
1282    /// they consider negative and positive zero equal, while `total_cmp`
1283    /// doesn't.
1284    ///
1285    /// The interpretation of the signaling NaN bit follows the definition in
1286    /// the IEEE 754 standard, which may not match the interpretation by some of
1287    /// the older, non-conformant (e.g. MIPS) hardware implementations.
1288    ///
1289    /// # Example
1290    ///
1291    /// ```
1292    /// #![feature(f16)]
1293    /// # #[cfg(target_has_reliable_f16)] {
1294    ///
1295    /// struct GoodBoy {
1296    ///     name: &'static str,
1297    ///     weight: f16,
1298    /// }
1299    ///
1300    /// let mut bois = vec![
1301    ///     GoodBoy { name: "Pucci", weight: 0.1 },
1302    ///     GoodBoy { name: "Woofer", weight: 99.0 },
1303    ///     GoodBoy { name: "Yapper", weight: 10.0 },
1304    ///     GoodBoy { name: "Chonk", weight: f16::INFINITY },
1305    ///     GoodBoy { name: "Abs. Unit", weight: f16::NAN },
1306    ///     GoodBoy { name: "Floaty", weight: -5.0 },
1307    /// ];
1308    ///
1309    /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
1310    ///
1311    /// // `f16::NAN` could be positive or negative, which will affect the sort order.
1312    /// if f16::NAN.is_sign_negative() {
1313    ///     bois.into_iter().map(|b| b.weight)
1314    ///         .zip([f16::NAN, -5.0, 0.1, 10.0, 99.0, f16::INFINITY].iter())
1315    ///         .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1316    /// } else {
1317    ///     bois.into_iter().map(|b| b.weight)
1318    ///         .zip([-5.0, 0.1, 10.0, 99.0, f16::INFINITY, f16::NAN].iter())
1319    ///         .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1320    /// }
1321    /// # }
1322    /// ```
1323    #[inline]
1324    #[must_use]
1325    #[unstable(feature = "f16", issue = "116909")]
1326    #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
1327    pub const fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
1328        let mut left = self.to_bits() as i16;
1329        let mut right = other.to_bits() as i16;
1330
1331        // In case of negatives, flip all the bits except the sign
1332        // to achieve a similar layout as two's complement integers
1333        //
1334        // Why does this work? IEEE 754 floats consist of three fields:
1335        // Sign bit, exponent and mantissa. The set of exponent and mantissa
1336        // fields as a whole have the property that their bitwise order is
1337        // equal to the numeric magnitude where the magnitude is defined.
1338        // The magnitude is not normally defined on NaN values, but
1339        // IEEE 754 totalOrder defines the NaN values also to follow the
1340        // bitwise order. This leads to order explained in the doc comment.
1341        // However, the representation of magnitude is the same for negative
1342        // and positive numbers – only the sign bit is different.
1343        // To easily compare the floats as signed integers, we need to
1344        // flip the exponent and mantissa bits in case of negative numbers.
1345        // We effectively convert the numbers to "two's complement" form.
1346        //
1347        // To do the flipping, we construct a mask and XOR against it.
1348        // We branchlessly calculate an "all-ones except for the sign bit"
1349        // mask from negative-signed values: right shifting sign-extends
1350        // the integer, so we "fill" the mask with sign bits, and then
1351        // convert to unsigned to push one more zero bit.
1352        // On positive values, the mask is all zeros, so it's a no-op.
1353        left ^= (((left >> 15) as u16) >> 1) as i16;
1354        right ^= (((right >> 15) as u16) >> 1) as i16;
1355
1356        left.cmp(&right)
1357    }
1358
1359    /// Restrict a value to a certain interval unless it is NaN.
1360    ///
1361    /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
1362    /// less than `min`. Otherwise this returns `self`.
1363    ///
1364    /// Note that this function returns NaN if the initial value was NaN as
1365    /// well. If the result is zero and among the three inputs `self`, `min`, and `max` there are
1366    /// zeros with different sign, either `0.0` or `-0.0` is returned non-deterministically.
1367    ///
1368    /// # Panics
1369    ///
1370    /// Panics if `min > max`, `min` is NaN, or `max` is NaN.
1371    ///
1372    /// # Examples
1373    ///
1374    /// ```
1375    /// #![feature(f16)]
1376    /// # #[cfg(target_has_reliable_f16)] {
1377    ///
1378    /// assert!((-3.0f16).clamp(-2.0, 1.0) == -2.0);
1379    /// assert!((0.0f16).clamp(-2.0, 1.0) == 0.0);
1380    /// assert!((2.0f16).clamp(-2.0, 1.0) == 1.0);
1381    /// assert!((f16::NAN).clamp(-2.0, 1.0).is_nan());
1382    ///
1383    /// // These always returns zero, but the sign (which is ignored by `==`) is non-deterministic.
1384    /// assert!((0.0f16).clamp(-0.0, -0.0) == 0.0);
1385    /// assert!((1.0f16).clamp(-0.0, 0.0) == 0.0);
1386    /// // This is definitely a negative zero.
1387    /// assert!((-1.0f16).clamp(-0.0, 1.0).is_sign_negative());
1388    /// # }
1389    /// ```
1390    #[inline]
1391    #[unstable(feature = "f16", issue = "116909")]
1392    #[must_use = "method returns a new number and does not mutate the original value"]
1393    pub const fn clamp(mut self, min: f16, max: f16) -> f16 {
1394        const_assert!(
1395            min <= max,
1396            "min > max, or either was NaN",
1397            "min > max, or either was NaN. min = {min:?}, max = {max:?}",
1398            min: f16,
1399            max: f16,
1400        );
1401
1402        if self < min {
1403            self = min;
1404        }
1405        if self > max {
1406            self = max;
1407        }
1408        self
1409    }
1410
1411    /// Clamps this number to a symmetric range centered around zero.
1412    ///
1413    /// The method clamps the number's magnitude (absolute value) to be at most `limit`.
1414    ///
1415    /// This is functionally equivalent to `self.clamp(-limit, limit)`, but is more
1416    /// explicit about the intent.
1417    ///
1418    /// # Panics
1419    ///
1420    /// Panics if `limit` is negative or NaN, as this indicates a logic error.
1421    ///
1422    /// # Examples
1423    ///
1424    /// ```
1425    /// #![feature(f16)]
1426    /// #![feature(clamp_magnitude)]
1427    /// # #[cfg(target_has_reliable_f16)] {
1428    /// assert_eq!(5.0f16.clamp_magnitude(3.0), 3.0);
1429    /// assert_eq!((-5.0f16).clamp_magnitude(3.0), -3.0);
1430    /// assert_eq!(2.0f16.clamp_magnitude(3.0), 2.0);
1431    /// assert_eq!((-2.0f16).clamp_magnitude(3.0), -2.0);
1432    /// # }
1433    /// ```
1434    #[inline]
1435    #[unstable(feature = "clamp_magnitude", issue = "148519")]
1436    #[must_use = "this returns the clamped value and does not modify the original"]
1437    pub fn clamp_magnitude(self, limit: f16) -> f16 {
1438        assert!(limit >= 0.0, "limit must be non-negative");
1439        let limit = limit.abs(); // Canonicalises -0.0 to 0.0
1440        self.clamp(-limit, limit)
1441    }
1442
1443    /// Computes the absolute value of `self`.
1444    ///
1445    /// This function always returns the precise result.
1446    ///
1447    /// # Examples
1448    ///
1449    /// ```
1450    /// #![feature(f16)]
1451    /// # #[cfg(target_has_reliable_f16_math)] {
1452    ///
1453    /// let x = 3.5_f16;
1454    /// let y = -3.5_f16;
1455    ///
1456    /// assert_eq!(x.abs(), x);
1457    /// assert_eq!(y.abs(), -y);
1458    ///
1459    /// assert!(f16::NAN.abs().is_nan());
1460    /// # }
1461    /// ```
1462    #[inline]
1463    #[unstable(feature = "f16", issue = "116909")]
1464    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1465    #[must_use = "method returns a new number and does not mutate the original value"]
1466    pub const fn abs(self) -> Self {
1467        intrinsics::fabs(self)
1468    }
1469
1470    /// Returns a number that represents the sign of `self`.
1471    ///
1472    /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
1473    /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
1474    /// - NaN if the number is NaN
1475    ///
1476    /// # Examples
1477    ///
1478    /// ```
1479    /// #![feature(f16)]
1480    /// # #[cfg(target_has_reliable_f16)] {
1481    ///
1482    /// let f = 3.5_f16;
1483    ///
1484    /// assert_eq!(f.signum(), 1.0);
1485    /// assert_eq!(f16::NEG_INFINITY.signum(), -1.0);
1486    ///
1487    /// assert!(f16::NAN.signum().is_nan());
1488    /// # }
1489    /// ```
1490    #[inline]
1491    #[unstable(feature = "f16", issue = "116909")]
1492    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1493    #[must_use = "method returns a new number and does not mutate the original value"]
1494    pub const fn signum(self) -> f16 {
1495        if self.is_nan() { Self::NAN } else { 1.0_f16.copysign(self) }
1496    }
1497
1498    /// Returns a number composed of the magnitude of `self` and the sign of
1499    /// `sign`.
1500    ///
1501    /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise equal to `-self`.
1502    /// If `self` is a NaN, then a NaN with the same payload as `self` and the sign bit of `sign` is
1503    /// returned.
1504    ///
1505    /// If `sign` is a NaN, then this operation will still carry over its sign into the result. Note
1506    /// that IEEE 754 doesn't assign any meaning to the sign bit in case of a NaN, and as Rust
1507    /// doesn't guarantee that the bit pattern of NaNs are conserved over arithmetic operations, the
1508    /// result of `copysign` with `sign` being a NaN might produce an unexpected or non-portable
1509    /// result. See the [specification of NaN bit patterns](primitive@f32#nan-bit-patterns) for more
1510    /// info.
1511    ///
1512    /// # Examples
1513    ///
1514    /// ```
1515    /// #![feature(f16)]
1516    /// # #[cfg(target_has_reliable_f16_math)] {
1517    ///
1518    /// let f = 3.5_f16;
1519    ///
1520    /// assert_eq!(f.copysign(0.42), 3.5_f16);
1521    /// assert_eq!(f.copysign(-0.42), -3.5_f16);
1522    /// assert_eq!((-f).copysign(0.42), 3.5_f16);
1523    /// assert_eq!((-f).copysign(-0.42), -3.5_f16);
1524    ///
1525    /// assert!(f16::NAN.copysign(1.0).is_nan());
1526    /// # }
1527    /// ```
1528    #[inline]
1529    #[unstable(feature = "f16", issue = "116909")]
1530    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1531    #[must_use = "method returns a new number and does not mutate the original value"]
1532    pub const fn copysign(self, sign: f16) -> f16 {
1533        intrinsics::copysignf16(self, sign)
1534    }
1535
1536    /// Float addition that allows optimizations based on algebraic rules.
1537    ///
1538    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1539    #[must_use = "method returns a new number and does not mutate the original value"]
1540    #[unstable(feature = "float_algebraic", issue = "136469")]
1541    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1542    #[inline]
1543    pub const fn algebraic_add(self, rhs: f16) -> f16 {
1544        intrinsics::fadd_algebraic(self, rhs)
1545    }
1546
1547    /// Float subtraction that allows optimizations based on algebraic rules.
1548    ///
1549    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1550    #[must_use = "method returns a new number and does not mutate the original value"]
1551    #[unstable(feature = "float_algebraic", issue = "136469")]
1552    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1553    #[inline]
1554    pub const fn algebraic_sub(self, rhs: f16) -> f16 {
1555        intrinsics::fsub_algebraic(self, rhs)
1556    }
1557
1558    /// Float multiplication that allows optimizations based on algebraic rules.
1559    ///
1560    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1561    #[must_use = "method returns a new number and does not mutate the original value"]
1562    #[unstable(feature = "float_algebraic", issue = "136469")]
1563    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1564    #[inline]
1565    pub const fn algebraic_mul(self, rhs: f16) -> f16 {
1566        intrinsics::fmul_algebraic(self, rhs)
1567    }
1568
1569    /// Float division that allows optimizations based on algebraic rules.
1570    ///
1571    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1572    #[must_use = "method returns a new number and does not mutate the original value"]
1573    #[unstable(feature = "float_algebraic", issue = "136469")]
1574    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1575    #[inline]
1576    pub const fn algebraic_div(self, rhs: f16) -> f16 {
1577        intrinsics::fdiv_algebraic(self, rhs)
1578    }
1579
1580    /// Float remainder that allows optimizations based on algebraic rules.
1581    ///
1582    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1583    #[must_use = "method returns a new number and does not mutate the original value"]
1584    #[unstable(feature = "float_algebraic", issue = "136469")]
1585    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1586    #[inline]
1587    pub const fn algebraic_rem(self, rhs: f16) -> f16 {
1588        intrinsics::frem_algebraic(self, rhs)
1589    }
1590}
1591
1592// Functions in this module fall into `core_float_math`
1593// #[unstable(feature = "core_float_math", issue = "137578")]
1594#[cfg(not(test))]
1595#[doc(test(attr(
1596    feature(cfg_target_has_reliable_f16_f128),
1597    expect(internal_features),
1598    allow(unused_features)
1599)))]
1600impl f16 {
1601    /// Returns the largest integer less than or equal to `self`.
1602    ///
1603    /// This function always returns the precise result.
1604    ///
1605    /// # Examples
1606    ///
1607    /// ```
1608    /// #![feature(f16)]
1609    /// # #[cfg(not(miri))]
1610    /// # #[cfg(target_has_reliable_f16)] {
1611    ///
1612    /// let f = 3.7_f16;
1613    /// let g = 3.0_f16;
1614    /// let h = -3.7_f16;
1615    ///
1616    /// assert_eq!(f.floor(), 3.0);
1617    /// assert_eq!(g.floor(), 3.0);
1618    /// assert_eq!(h.floor(), -4.0);
1619    /// # }
1620    /// ```
1621    #[inline]
1622    #[rustc_allow_incoherent_impl]
1623    #[unstable(feature = "f16", issue = "116909")]
1624    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1625    #[must_use = "method returns a new number and does not mutate the original value"]
1626    pub const fn floor(self) -> f16 {
1627        intrinsics::floorf16(self)
1628    }
1629
1630    /// Returns the smallest integer greater than or equal to `self`.
1631    ///
1632    /// This function always returns the precise result.
1633    ///
1634    /// # Examples
1635    ///
1636    /// ```
1637    /// #![feature(f16)]
1638    /// # #[cfg(not(miri))]
1639    /// # #[cfg(target_has_reliable_f16)] {
1640    ///
1641    /// let f = 3.01_f16;
1642    /// let g = 4.0_f16;
1643    ///
1644    /// assert_eq!(f.ceil(), 4.0);
1645    /// assert_eq!(g.ceil(), 4.0);
1646    /// # }
1647    /// ```
1648    #[inline]
1649    #[doc(alias = "ceiling")]
1650    #[rustc_allow_incoherent_impl]
1651    #[unstable(feature = "f16", issue = "116909")]
1652    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1653    #[must_use = "method returns a new number and does not mutate the original value"]
1654    pub const fn ceil(self) -> f16 {
1655        intrinsics::ceilf16(self)
1656    }
1657
1658    /// Returns the nearest integer to `self`. If a value is half-way between two
1659    /// integers, round away from `0.0`.
1660    ///
1661    /// This function always returns the precise result.
1662    ///
1663    /// # Examples
1664    ///
1665    /// ```
1666    /// #![feature(f16)]
1667    /// # #[cfg(not(miri))]
1668    /// # #[cfg(target_has_reliable_f16)] {
1669    ///
1670    /// let f = 3.3_f16;
1671    /// let g = -3.3_f16;
1672    /// let h = -3.7_f16;
1673    /// let i = 3.5_f16;
1674    /// let j = 4.5_f16;
1675    ///
1676    /// assert_eq!(f.round(), 3.0);
1677    /// assert_eq!(g.round(), -3.0);
1678    /// assert_eq!(h.round(), -4.0);
1679    /// assert_eq!(i.round(), 4.0);
1680    /// assert_eq!(j.round(), 5.0);
1681    /// # }
1682    /// ```
1683    #[inline]
1684    #[rustc_allow_incoherent_impl]
1685    #[unstable(feature = "f16", issue = "116909")]
1686    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1687    #[must_use = "method returns a new number and does not mutate the original value"]
1688    pub const fn round(self) -> f16 {
1689        intrinsics::roundf16(self)
1690    }
1691
1692    /// Returns the nearest integer to a number. Rounds half-way cases to the number
1693    /// with an even least significant digit.
1694    ///
1695    /// This function always returns the precise result.
1696    ///
1697    /// # Examples
1698    ///
1699    /// ```
1700    /// #![feature(f16)]
1701    /// # #[cfg(not(miri))]
1702    /// # #[cfg(target_has_reliable_f16)] {
1703    ///
1704    /// let f = 3.3_f16;
1705    /// let g = -3.3_f16;
1706    /// let h = 3.5_f16;
1707    /// let i = 4.5_f16;
1708    ///
1709    /// assert_eq!(f.round_ties_even(), 3.0);
1710    /// assert_eq!(g.round_ties_even(), -3.0);
1711    /// assert_eq!(h.round_ties_even(), 4.0);
1712    /// assert_eq!(i.round_ties_even(), 4.0);
1713    /// # }
1714    /// ```
1715    #[inline]
1716    #[rustc_allow_incoherent_impl]
1717    #[unstable(feature = "f16", issue = "116909")]
1718    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1719    #[must_use = "method returns a new number and does not mutate the original value"]
1720    pub const fn round_ties_even(self) -> f16 {
1721        intrinsics::round_ties_even_f16(self)
1722    }
1723
1724    /// Returns the integer part of `self`.
1725    /// This means that non-integer numbers are always truncated towards zero.
1726    ///
1727    /// This function always returns the precise result.
1728    ///
1729    /// # Examples
1730    ///
1731    /// ```
1732    /// #![feature(f16)]
1733    /// # #[cfg(not(miri))]
1734    /// # #[cfg(target_has_reliable_f16)] {
1735    ///
1736    /// let f = 3.7_f16;
1737    /// let g = 3.0_f16;
1738    /// let h = -3.7_f16;
1739    ///
1740    /// assert_eq!(f.trunc(), 3.0);
1741    /// assert_eq!(g.trunc(), 3.0);
1742    /// assert_eq!(h.trunc(), -3.0);
1743    /// # }
1744    /// ```
1745    #[inline]
1746    #[doc(alias = "truncate")]
1747    #[rustc_allow_incoherent_impl]
1748    #[unstable(feature = "f16", issue = "116909")]
1749    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1750    #[must_use = "method returns a new number and does not mutate the original value"]
1751    pub const fn trunc(self) -> f16 {
1752        intrinsics::truncf16(self)
1753    }
1754
1755    /// Returns the fractional part of `self`.
1756    ///
1757    /// This function always returns the precise result.
1758    ///
1759    /// # Examples
1760    ///
1761    /// ```
1762    /// #![feature(f16)]
1763    /// # #[cfg(not(miri))]
1764    /// # #[cfg(target_has_reliable_f16)] {
1765    ///
1766    /// let x = 3.6_f16;
1767    /// let y = -3.6_f16;
1768    /// let abs_difference_x = (x.fract() - 0.6).abs();
1769    /// let abs_difference_y = (y.fract() - (-0.6)).abs();
1770    ///
1771    /// assert!(abs_difference_x <= f16::EPSILON);
1772    /// assert!(abs_difference_y <= f16::EPSILON);
1773    /// # }
1774    /// ```
1775    #[inline]
1776    #[rustc_allow_incoherent_impl]
1777    #[unstable(feature = "f16", issue = "116909")]
1778    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1779    #[must_use = "method returns a new number and does not mutate the original value"]
1780    pub const fn fract(self) -> f16 {
1781        self - self.trunc()
1782    }
1783
1784    /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
1785    /// error, yielding a more accurate result than an unfused multiply-add.
1786    ///
1787    /// Using `mul_add` *may* be more performant than an unfused multiply-add if
1788    /// the target architecture has a dedicated `fma` CPU instruction. However,
1789    /// this is not always true, and will be heavily dependant on designing
1790    /// algorithms with specific target hardware in mind.
1791    ///
1792    /// # Precision
1793    ///
1794    /// The result of this operation is guaranteed to be the rounded
1795    /// infinite-precision result. It is specified by IEEE 754 as
1796    /// `fusedMultiplyAdd` and guaranteed not to change.
1797    ///
1798    /// # Examples
1799    ///
1800    /// ```
1801    /// #![feature(f16)]
1802    /// # #[cfg(not(miri))]
1803    /// # #[cfg(target_has_reliable_f16)] {
1804    ///
1805    /// let m = 10.0_f16;
1806    /// let x = 4.0_f16;
1807    /// let b = 60.0_f16;
1808    ///
1809    /// assert_eq!(m.mul_add(x, b), 100.0);
1810    /// assert_eq!(m * x + b, 100.0);
1811    ///
1812    /// let one_plus_eps = 1.0_f16 + f16::EPSILON;
1813    /// let one_minus_eps = 1.0_f16 - f16::EPSILON;
1814    /// let minus_one = -1.0_f16;
1815    ///
1816    /// // The exact result (1 + eps) * (1 - eps) = 1 - eps * eps.
1817    /// assert_eq!(one_plus_eps.mul_add(one_minus_eps, minus_one), -f16::EPSILON * f16::EPSILON);
1818    /// // Different rounding with the non-fused multiply and add.
1819    /// assert_eq!(one_plus_eps * one_minus_eps + minus_one, 0.0);
1820    /// # }
1821    /// ```
1822    #[inline]
1823    #[rustc_allow_incoherent_impl]
1824    #[unstable(feature = "f16", issue = "116909")]
1825    #[doc(alias = "fmaf16", alias = "fusedMultiplyAdd")]
1826    #[must_use = "method returns a new number and does not mutate the original value"]
1827    pub const fn mul_add(self, a: f16, b: f16) -> f16 {
1828        intrinsics::fmaf16(self, a, b)
1829    }
1830
1831    /// Calculates Euclidean division, the matching method for `rem_euclid`.
1832    ///
1833    /// This computes the integer `n` such that
1834    /// `self = n * rhs + self.rem_euclid(rhs)`.
1835    /// In other words, the result is `self / rhs` rounded to the integer `n`
1836    /// such that `self >= n * rhs`.
1837    ///
1838    /// # Precision
1839    ///
1840    /// The result of this operation is guaranteed to be the rounded
1841    /// infinite-precision result.
1842    ///
1843    /// # Examples
1844    ///
1845    /// ```
1846    /// #![feature(f16)]
1847    /// # #[cfg(not(miri))]
1848    /// # #[cfg(target_has_reliable_f16)] {
1849    ///
1850    /// let a: f16 = 7.0;
1851    /// let b = 4.0;
1852    /// assert_eq!(a.div_euclid(b), 1.0); // 7.0 > 4.0 * 1.0
1853    /// assert_eq!((-a).div_euclid(b), -2.0); // -7.0 >= 4.0 * -2.0
1854    /// assert_eq!(a.div_euclid(-b), -1.0); // 7.0 >= -4.0 * -1.0
1855    /// assert_eq!((-a).div_euclid(-b), 2.0); // -7.0 >= -4.0 * 2.0
1856    /// # }
1857    /// ```
1858    #[inline]
1859    #[rustc_allow_incoherent_impl]
1860    #[unstable(feature = "f16", issue = "116909")]
1861    #[must_use = "method returns a new number and does not mutate the original value"]
1862    pub fn div_euclid(self, rhs: f16) -> f16 {
1863        let q = (self / rhs).trunc();
1864        if self % rhs < 0.0 {
1865            return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
1866        }
1867        q
1868    }
1869
1870    /// Calculates the least nonnegative remainder of `self` when
1871    /// divided by `rhs`.
1872    ///
1873    /// In particular, the return value `r` satisfies `0.0 <= r < rhs.abs()` in
1874    /// most cases. However, due to a floating point round-off error it can
1875    /// result in `r == rhs.abs()`, violating the mathematical definition, if
1876    /// `self` is much smaller than `rhs.abs()` in magnitude and `self < 0.0`.
1877    /// This result is not an element of the function's codomain, but it is the
1878    /// closest floating point number in the real numbers and thus fulfills the
1879    /// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)`
1880    /// approximately.
1881    ///
1882    /// # Precision
1883    ///
1884    /// The result of this operation is guaranteed to be the rounded
1885    /// infinite-precision result.
1886    ///
1887    /// # Examples
1888    ///
1889    /// ```
1890    /// #![feature(f16)]
1891    /// # #[cfg(not(miri))]
1892    /// # #[cfg(target_has_reliable_f16)] {
1893    ///
1894    /// let a: f16 = 7.0;
1895    /// let b = 4.0;
1896    /// assert_eq!(a.rem_euclid(b), 3.0);
1897    /// assert_eq!((-a).rem_euclid(b), 1.0);
1898    /// assert_eq!(a.rem_euclid(-b), 3.0);
1899    /// assert_eq!((-a).rem_euclid(-b), 1.0);
1900    /// // limitation due to round-off error
1901    /// assert!((-f16::EPSILON).rem_euclid(3.0) != 0.0);
1902    /// # }
1903    /// ```
1904    #[inline]
1905    #[rustc_allow_incoherent_impl]
1906    #[doc(alias = "modulo", alias = "mod")]
1907    #[unstable(feature = "f16", issue = "116909")]
1908    #[must_use = "method returns a new number and does not mutate the original value"]
1909    pub fn rem_euclid(self, rhs: f16) -> f16 {
1910        let r = self % rhs;
1911        if r < 0.0 { r + rhs.abs() } else { r }
1912    }
1913
1914    /// Raises a number to an integer power.
1915    ///
1916    /// Using this function is generally faster than using `powf`.
1917    /// It might have a different sequence of rounding operations than `powf`,
1918    /// so the results are not guaranteed to agree.
1919    ///
1920    /// Note that this function is special in that it can return non-NaN results for NaN inputs. For
1921    /// example, `f16::powi(f16::NAN, 0)` returns `1.0`. However, if an input is a *signaling*
1922    /// NaN, then the result is non-deterministically either a NaN or the result that the
1923    /// corresponding quiet NaN would produce.
1924    ///
1925    /// # Unspecified precision
1926    ///
1927    /// The precision of this function is non-deterministic. This means it varies by platform,
1928    /// Rust version, and can even differ within the same execution from one invocation to the next.
1929    ///
1930    /// # Examples
1931    ///
1932    /// ```
1933    /// #![feature(f16)]
1934    /// # #[cfg(not(miri))]
1935    /// # #[cfg(target_has_reliable_f16)] {
1936    ///
1937    /// let x = 2.0_f16;
1938    /// let abs_difference = (x.powi(2) - (x * x)).abs();
1939    /// assert!(abs_difference <= f16::EPSILON);
1940    ///
1941    /// assert_eq!(f16::powi(f16::NAN, 0), 1.0);
1942    /// assert_eq!(f16::powi(0.0, 0), 1.0);
1943    /// # }
1944    /// ```
1945    #[inline]
1946    #[rustc_allow_incoherent_impl]
1947    #[unstable(feature = "f16", issue = "116909")]
1948    #[must_use = "method returns a new number and does not mutate the original value"]
1949    pub fn powi(self, n: i32) -> f16 {
1950        intrinsics::powif16(self, n)
1951    }
1952
1953    /// Returns the square root of a number.
1954    ///
1955    /// Returns NaN if `self` is a negative number other than `-0.0`.
1956    ///
1957    /// # Precision
1958    ///
1959    /// The result of this operation is guaranteed to be the rounded
1960    /// infinite-precision result. It is specified by IEEE 754 as `squareRoot`
1961    /// and guaranteed not to change.
1962    ///
1963    /// # Examples
1964    ///
1965    /// ```
1966    /// #![feature(f16)]
1967    /// # #[cfg(not(miri))]
1968    /// # #[cfg(target_has_reliable_f16)] {
1969    ///
1970    /// let positive = 4.0_f16;
1971    /// let negative = -4.0_f16;
1972    /// let negative_zero = -0.0_f16;
1973    ///
1974    /// assert_eq!(positive.sqrt(), 2.0);
1975    /// assert!(negative.sqrt().is_nan());
1976    /// assert!(negative_zero.sqrt() == negative_zero);
1977    /// # }
1978    /// ```
1979    #[inline]
1980    #[doc(alias = "squareRoot")]
1981    #[rustc_allow_incoherent_impl]
1982    #[unstable(feature = "f16", issue = "116909")]
1983    #[must_use = "method returns a new number and does not mutate the original value"]
1984    pub fn sqrt(self) -> f16 {
1985        intrinsics::sqrtf16(self)
1986    }
1987
1988    /// Returns the cube root of a number.
1989    ///
1990    /// # Unspecified precision
1991    ///
1992    /// The precision of this function is non-deterministic. This means it varies by platform,
1993    /// Rust version, and can even differ within the same execution from one invocation to the next.
1994    ///
1995    /// This function currently corresponds to the `cbrtf` from libc on Unix
1996    /// and Windows. Note that this might change in the future.
1997    ///
1998    /// # Examples
1999    ///
2000    /// ```
2001    /// #![feature(f16)]
2002    /// # #[cfg(not(miri))]
2003    /// # #[cfg(target_has_reliable_f16)] {
2004    ///
2005    /// let x = 8.0f16;
2006    ///
2007    /// // x^(1/3) - 2 == 0
2008    /// let abs_difference = (x.cbrt() - 2.0).abs();
2009    ///
2010    /// assert!(abs_difference <= f16::EPSILON);
2011    /// # }
2012    /// ```
2013    #[inline]
2014    #[rustc_allow_incoherent_impl]
2015    #[unstable(feature = "f16", issue = "116909")]
2016    #[must_use = "method returns a new number and does not mutate the original value"]
2017    pub fn cbrt(self) -> f16 {
2018        libm::cbrtf(self as f32) as f16
2019    }
2020}