rustc_middle/ty/consts/
int.rs

1use std::fmt;
2use std::num::NonZero;
3
4use rustc_abi::Size;
5use rustc_apfloat::Float;
6use rustc_apfloat::ieee::{Double, Half, Quad, Single};
7use rustc_errors::{DiagArgValue, IntoDiagArg};
8use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
9
10use crate::ty::TyCtxt;
11
12#[derive(Copy, Clone)]
13/// A type for representing any integer. Only used for printing.
14pub struct ConstInt {
15    /// The "untyped" variant of `ConstInt`.
16    int: ScalarInt,
17    /// Whether the value is of a signed integer type.
18    signed: bool,
19    /// Whether the value is a `usize` or `isize` type.
20    is_ptr_sized_integral: bool,
21}
22
23impl ConstInt {
24    pub fn new(int: ScalarInt, signed: bool, is_ptr_sized_integral: bool) -> Self {
25        Self { int, signed, is_ptr_sized_integral }
26    }
27}
28
29/// An enum to represent the compiler-side view of `intrinsics::AtomicOrdering`.
30/// This lives here because there's a method in this file that needs it and it is entirely unclear
31/// where else to put this...
32#[derive(Debug, Copy, Clone)]
33pub enum AtomicOrdering {
34    // These values must match `intrinsics::AtomicOrdering`!
35    Relaxed = 0,
36    Release = 1,
37    Acquire = 2,
38    AcqRel = 3,
39    SeqCst = 4,
40}
41
42/// An enum to represent the compiler-side view of `intrinsics::simd::SimdAlign`.
43#[derive(Debug, Copy, Clone)]
44pub enum SimdAlign {
45    // These values must match `intrinsics::simd::SimdAlign`!
46    Unaligned = 0,
47    Element = 1,
48    Vector = 2,
49}
50
51impl std::fmt::Debug for ConstInt {
52    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
53        let Self { int, signed, is_ptr_sized_integral } = *self;
54        let size = int.size().bytes();
55        let raw = int.data;
56        if signed {
57            let bit_size = size * 8;
58            let min = 1u128 << (bit_size - 1);
59            let max = min - 1;
60            if raw == min {
61                match (size, is_ptr_sized_integral) {
62                    (_, true) => write!(fmt, "isize::MIN"),
63                    (1, _) => write!(fmt, "i8::MIN"),
64                    (2, _) => write!(fmt, "i16::MIN"),
65                    (4, _) => write!(fmt, "i32::MIN"),
66                    (8, _) => write!(fmt, "i64::MIN"),
67                    (16, _) => write!(fmt, "i128::MIN"),
68                    _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
69                }
70            } else if raw == max {
71                match (size, is_ptr_sized_integral) {
72                    (_, true) => write!(fmt, "isize::MAX"),
73                    (1, _) => write!(fmt, "i8::MAX"),
74                    (2, _) => write!(fmt, "i16::MAX"),
75                    (4, _) => write!(fmt, "i32::MAX"),
76                    (8, _) => write!(fmt, "i64::MAX"),
77                    (16, _) => write!(fmt, "i128::MAX"),
78                    _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
79                }
80            } else {
81                match size {
82                    1 => write!(fmt, "{}", raw as i8)?,
83                    2 => write!(fmt, "{}", raw as i16)?,
84                    4 => write!(fmt, "{}", raw as i32)?,
85                    8 => write!(fmt, "{}", raw as i64)?,
86                    16 => write!(fmt, "{}", raw as i128)?,
87                    _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
88                }
89                if fmt.alternate() {
90                    match (size, is_ptr_sized_integral) {
91                        (_, true) => write!(fmt, "_isize")?,
92                        (1, _) => write!(fmt, "_i8")?,
93                        (2, _) => write!(fmt, "_i16")?,
94                        (4, _) => write!(fmt, "_i32")?,
95                        (8, _) => write!(fmt, "_i64")?,
96                        (16, _) => write!(fmt, "_i128")?,
97                        (sz, _) => bug!("unexpected int size i{sz}"),
98                    }
99                }
100                Ok(())
101            }
102        } else {
103            let max = Size::from_bytes(size).truncate(u128::MAX);
104            if raw == max {
105                match (size, is_ptr_sized_integral) {
106                    (_, true) => write!(fmt, "usize::MAX"),
107                    (1, _) => write!(fmt, "u8::MAX"),
108                    (2, _) => write!(fmt, "u16::MAX"),
109                    (4, _) => write!(fmt, "u32::MAX"),
110                    (8, _) => write!(fmt, "u64::MAX"),
111                    (16, _) => write!(fmt, "u128::MAX"),
112                    _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
113                }
114            } else {
115                match size {
116                    1 => write!(fmt, "{}", raw as u8)?,
117                    2 => write!(fmt, "{}", raw as u16)?,
118                    4 => write!(fmt, "{}", raw as u32)?,
119                    8 => write!(fmt, "{}", raw as u64)?,
120                    16 => write!(fmt, "{}", raw as u128)?,
121                    _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
122                }
123                if fmt.alternate() {
124                    match (size, is_ptr_sized_integral) {
125                        (_, true) => write!(fmt, "_usize")?,
126                        (1, _) => write!(fmt, "_u8")?,
127                        (2, _) => write!(fmt, "_u16")?,
128                        (4, _) => write!(fmt, "_u32")?,
129                        (8, _) => write!(fmt, "_u64")?,
130                        (16, _) => write!(fmt, "_u128")?,
131                        (sz, _) => bug!("unexpected unsigned int size u{sz}"),
132                    }
133                }
134                Ok(())
135            }
136        }
137    }
138}
139
140impl IntoDiagArg for ConstInt {
141    // FIXME this simply uses the Debug impl, but we could probably do better by converting both
142    // to an inherent method that returns `Cow`.
143    fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue {
144        DiagArgValue::Str(format!("{self:?}").into())
145    }
146}
147
148/// The raw bytes of a simple value.
149///
150/// This is a packed struct in order to allow this type to be optimally embedded in enums
151/// (like Scalar).
152#[derive(Clone, Copy, Eq, PartialEq, Hash)]
153#[repr(packed)]
154pub struct ScalarInt {
155    /// The first `size` bytes of `data` are the value.
156    /// Do not try to read less or more bytes than that. The remaining bytes must be 0.
157    data: u128,
158    size: NonZero<u8>,
159}
160
161// Cannot derive these, as the derives take references to the fields, and we
162// can't take references to fields of packed structs.
163impl<CTX> crate::ty::HashStable<CTX> for ScalarInt {
164    fn hash_stable(&self, hcx: &mut CTX, hasher: &mut crate::ty::StableHasher) {
165        // Using a block `{self.data}` here to force a copy instead of using `self.data`
166        // directly, because `hash_stable` takes `&self` and would thus borrow `self.data`.
167        // Since `Self` is a packed struct, that would create a possibly unaligned reference,
168        // which is UB.
169        { self.data }.hash_stable(hcx, hasher);
170        self.size.get().hash_stable(hcx, hasher);
171    }
172}
173
174impl<S: Encoder> Encodable<S> for ScalarInt {
175    fn encode(&self, s: &mut S) {
176        let size = self.size.get();
177        s.emit_u8(size);
178        s.emit_raw_bytes(&self.data.to_le_bytes()[..size as usize]);
179    }
180}
181
182impl<D: Decoder> Decodable<D> for ScalarInt {
183    fn decode(d: &mut D) -> ScalarInt {
184        let mut data = [0u8; 16];
185        let size = d.read_u8();
186        data[..size as usize].copy_from_slice(d.read_raw_bytes(size as usize));
187        ScalarInt { data: u128::from_le_bytes(data), size: NonZero::new(size).unwrap() }
188    }
189}
190
191impl ScalarInt {
192    pub const TRUE: ScalarInt = ScalarInt { data: 1_u128, size: NonZero::new(1).unwrap() };
193    pub const FALSE: ScalarInt = ScalarInt { data: 0_u128, size: NonZero::new(1).unwrap() };
194
195    fn raw(data: u128, size: Size) -> Self {
196        Self { data, size: NonZero::new(size.bytes() as u8).unwrap() }
197    }
198
199    #[inline]
200    pub fn size(self) -> Size {
201        Size::from_bytes(self.size.get())
202    }
203
204    /// Make sure the `data` fits in `size`.
205    /// This is guaranteed by all constructors here, but having had this check saved us from
206    /// bugs many times in the past, so keeping it around is definitely worth it.
207    #[inline(always)]
208    fn check_data(self) {
209        // Using a block `{self.data}` here to force a copy instead of using `self.data`
210        // directly, because `debug_assert_eq` takes references to its arguments and formatting
211        // arguments and would thus borrow `self.data`. Since `Self`
212        // is a packed struct, that would create a possibly unaligned reference, which
213        // is UB.
214        debug_assert_eq!(
215            self.size().truncate(self.data),
216            { self.data },
217            "Scalar value {:#x} exceeds size of {} bytes",
218            { self.data },
219            self.size
220        );
221    }
222
223    #[inline]
224    pub fn null(size: Size) -> Self {
225        Self::raw(0, size)
226    }
227
228    #[inline]
229    pub fn is_null(self) -> bool {
230        self.data == 0
231    }
232
233    #[inline]
234    pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
235        let (r, overflow) = Self::truncate_from_uint(i, size);
236        if overflow { None } else { Some(r) }
237    }
238
239    /// Returns the truncated result, and whether truncation changed the value.
240    #[inline]
241    pub fn truncate_from_uint(i: impl Into<u128>, size: Size) -> (Self, bool) {
242        let data = i.into();
243        let r = Self::raw(size.truncate(data), size);
244        (r, r.data != data)
245    }
246
247    #[inline]
248    pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
249        let (r, overflow) = Self::truncate_from_int(i, size);
250        if overflow { None } else { Some(r) }
251    }
252
253    /// Returns the truncated result, and whether truncation changed the value.
254    #[inline]
255    pub fn truncate_from_int(i: impl Into<i128>, size: Size) -> (Self, bool) {
256        let data = i.into();
257        // `into` performed sign extension, we have to truncate
258        let r = Self::raw(size.truncate(data as u128), size);
259        (r, size.sign_extend(r.data) != data)
260    }
261
262    #[inline]
263    pub fn try_from_target_usize(i: impl Into<u128>, tcx: TyCtxt<'_>) -> Option<Self> {
264        Self::try_from_uint(i, tcx.data_layout.pointer_size())
265    }
266
267    /// Try to convert this ScalarInt to the raw underlying bits.
268    /// Fails if the size is wrong. Generally a wrong size should lead to a panic,
269    /// but Miri sometimes wants to be resilient to size mismatches,
270    /// so the interpreter will generally use this `try` method.
271    #[inline]
272    pub fn try_to_bits(self, target_size: Size) -> Result<u128, Size> {
273        assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
274        if target_size.bytes() == u64::from(self.size.get()) {
275            self.check_data();
276            Ok(self.data)
277        } else {
278            Err(self.size())
279        }
280    }
281
282    #[inline]
283    pub fn to_bits(self, target_size: Size) -> u128 {
284        self.try_to_bits(target_size).unwrap_or_else(|size| {
285            bug!("expected int of size {}, but got size {}", target_size.bytes(), size.bytes())
286        })
287    }
288
289    /// Extracts the bits from the scalar without checking the size.
290    #[inline]
291    pub fn to_bits_unchecked(self) -> u128 {
292        self.check_data();
293        self.data
294    }
295
296    /// Converts the `ScalarInt` to an unsigned integer of the given size.
297    /// Panics if the size of the `ScalarInt` is not equal to `size`.
298    #[inline]
299    pub fn to_uint(self, size: Size) -> u128 {
300        self.to_bits(size)
301    }
302
303    /// Converts the `ScalarInt` to `u8`.
304    /// Panics if the `size` of the `ScalarInt`in not equal to 1 byte.
305    #[inline]
306    pub fn to_u8(self) -> u8 {
307        self.to_uint(Size::from_bits(8)).try_into().unwrap()
308    }
309
310    /// Converts the `ScalarInt` to `u16`.
311    /// Panics if the size of the `ScalarInt` in not equal to 2 bytes.
312    #[inline]
313    pub fn to_u16(self) -> u16 {
314        self.to_uint(Size::from_bits(16)).try_into().unwrap()
315    }
316
317    /// Converts the `ScalarInt` to `u32`.
318    /// Panics if the `size` of the `ScalarInt` in not equal to 4 bytes.
319    #[inline]
320    pub fn to_u32(self) -> u32 {
321        self.to_uint(Size::from_bits(32)).try_into().unwrap()
322    }
323
324    /// Converts the `ScalarInt` to `u64`.
325    /// Panics if the `size` of the `ScalarInt` in not equal to 8 bytes.
326    #[inline]
327    pub fn to_u64(self) -> u64 {
328        self.to_uint(Size::from_bits(64)).try_into().unwrap()
329    }
330
331    /// Converts the `ScalarInt` to `u128`.
332    /// Panics if the `size` of the `ScalarInt` in not equal to 16 bytes.
333    #[inline]
334    pub fn to_u128(self) -> u128 {
335        self.to_uint(Size::from_bits(128))
336    }
337
338    #[inline]
339    pub fn to_target_usize(&self, tcx: TyCtxt<'_>) -> u64 {
340        self.to_uint(tcx.data_layout.pointer_size()).try_into().unwrap()
341    }
342
343    #[inline]
344    pub fn to_atomic_ordering(self) -> AtomicOrdering {
345        use AtomicOrdering::*;
346        let val = self.to_u32();
347        if val == Relaxed as u32 {
348            Relaxed
349        } else if val == Release as u32 {
350            Release
351        } else if val == Acquire as u32 {
352            Acquire
353        } else if val == AcqRel as u32 {
354            AcqRel
355        } else if val == SeqCst as u32 {
356            SeqCst
357        } else {
358            panic!("not a valid atomic ordering")
359        }
360    }
361
362    #[inline]
363    pub fn to_simd_alignment(self) -> SimdAlign {
364        use SimdAlign::*;
365        let val = self.to_u32();
366        if val == Unaligned as u32 {
367            Unaligned
368        } else if val == Element as u32 {
369            Element
370        } else if val == Vector as u32 {
371            Vector
372        } else {
373            panic!("not a valid simd alignment")
374        }
375    }
376
377    /// Converts the `ScalarInt` to `bool`.
378    /// Panics if the `size` of the `ScalarInt` is not equal to 1 byte.
379    /// Errors if it is not a valid `bool`.
380    #[inline]
381    pub fn try_to_bool(self) -> Result<bool, ()> {
382        match self.to_u8() {
383            0 => Ok(false),
384            1 => Ok(true),
385            _ => Err(()),
386        }
387    }
388
389    /// Converts the `ScalarInt` to a signed integer of the given size.
390    /// Panics if the size of the `ScalarInt` is not equal to `size`.
391    #[inline]
392    pub fn to_int(self, size: Size) -> i128 {
393        let b = self.to_bits(size);
394        size.sign_extend(b)
395    }
396
397    /// Converts the `ScalarInt` to i8.
398    /// Panics if the size of the `ScalarInt` is not equal to 1 byte.
399    pub fn to_i8(self) -> i8 {
400        self.to_int(Size::from_bits(8)).try_into().unwrap()
401    }
402
403    /// Converts the `ScalarInt` to i16.
404    /// Panics if the size of the `ScalarInt` is not equal to 2 bytes.
405    pub fn to_i16(self) -> i16 {
406        self.to_int(Size::from_bits(16)).try_into().unwrap()
407    }
408
409    /// Converts the `ScalarInt` to i32.
410    /// Panics if the size of the `ScalarInt` is not equal to 4 bytes.
411    pub fn to_i32(self) -> i32 {
412        self.to_int(Size::from_bits(32)).try_into().unwrap()
413    }
414
415    /// Converts the `ScalarInt` to i64.
416    /// Panics if the size of the `ScalarInt` is not equal to 8 bytes.
417    pub fn to_i64(self) -> i64 {
418        self.to_int(Size::from_bits(64)).try_into().unwrap()
419    }
420
421    /// Converts the `ScalarInt` to i128.
422    /// Panics if the size of the `ScalarInt` is not equal to 16 bytes.
423    pub fn to_i128(self) -> i128 {
424        self.to_int(Size::from_bits(128))
425    }
426
427    #[inline]
428    pub fn to_target_isize(&self, tcx: TyCtxt<'_>) -> i64 {
429        self.to_int(tcx.data_layout.pointer_size()).try_into().unwrap()
430    }
431
432    #[inline]
433    pub fn to_float<F: Float>(self) -> F {
434        // Going through `to_uint` to check size and truncation.
435        F::from_bits(self.to_bits(Size::from_bits(F::BITS)))
436    }
437
438    #[inline]
439    pub fn to_f16(self) -> Half {
440        self.to_float()
441    }
442
443    #[inline]
444    pub fn to_f32(self) -> Single {
445        self.to_float()
446    }
447
448    #[inline]
449    pub fn to_f64(self) -> Double {
450        self.to_float()
451    }
452
453    #[inline]
454    pub fn to_f128(self) -> Quad {
455        self.to_float()
456    }
457}
458
459macro_rules! from_x_for_scalar_int {
460    ($($ty:ty),*) => {
461        $(
462            impl From<$ty> for ScalarInt {
463                #[inline]
464                fn from(u: $ty) -> Self {
465                    Self {
466                        data: u128::from(u),
467                        size: NonZero::new(size_of::<$ty>() as u8).unwrap(),
468                    }
469                }
470            }
471        )*
472    }
473}
474
475macro_rules! from_scalar_int_for_x {
476    ($($ty:ty),*) => {
477        $(
478            impl From<ScalarInt> for $ty {
479                #[inline]
480                fn from(int: ScalarInt) -> Self {
481                    // The `unwrap` cannot fail because to_uint (if it succeeds)
482                    // is guaranteed to return a value that fits into the size.
483                    int.to_uint(Size::from_bytes(size_of::<$ty>()))
484                       .try_into().unwrap()
485                }
486            }
487        )*
488    }
489}
490
491from_x_for_scalar_int!(u8, u16, u32, u64, u128, bool);
492from_scalar_int_for_x!(u8, u16, u32, u64, u128);
493
494impl TryFrom<ScalarInt> for bool {
495    type Error = ();
496    #[inline]
497    fn try_from(int: ScalarInt) -> Result<Self, ()> {
498        int.try_to_bool()
499    }
500}
501
502impl From<char> for ScalarInt {
503    #[inline]
504    fn from(c: char) -> Self {
505        (c as u32).into()
506    }
507}
508
509macro_rules! from_x_for_scalar_int_signed {
510    ($($ty:ty),*) => {
511        $(
512            impl From<$ty> for ScalarInt {
513                #[inline]
514                fn from(u: $ty) -> Self {
515                    Self {
516                        data: u128::from(u.cast_unsigned()), // go via the unsigned type of the same size
517                        size: NonZero::new(size_of::<$ty>() as u8).unwrap(),
518                    }
519                }
520            }
521        )*
522    }
523}
524
525macro_rules! from_scalar_int_for_x_signed {
526    ($($ty:ty),*) => {
527        $(
528            impl From<ScalarInt> for $ty {
529                #[inline]
530                fn from(int: ScalarInt) -> Self {
531                    // The `unwrap` cannot fail because to_int (if it succeeds)
532                    // is guaranteed to return a value that fits into the size.
533                    int.to_int(Size::from_bytes(size_of::<$ty>()))
534                       .try_into().unwrap()
535                }
536            }
537        )*
538    }
539}
540
541from_x_for_scalar_int_signed!(i8, i16, i32, i64, i128);
542from_scalar_int_for_x_signed!(i8, i16, i32, i64, i128);
543
544impl From<std::cmp::Ordering> for ScalarInt {
545    #[inline]
546    fn from(c: std::cmp::Ordering) -> Self {
547        // Here we rely on `cmp::Ordering` having the same values in host and target!
548        ScalarInt::from(c as i8)
549    }
550}
551
552/// Error returned when a conversion from ScalarInt to char fails.
553#[derive(Debug)]
554pub struct CharTryFromScalarInt;
555
556impl TryFrom<ScalarInt> for char {
557    type Error = CharTryFromScalarInt;
558
559    #[inline]
560    fn try_from(int: ScalarInt) -> Result<Self, Self::Error> {
561        match char::from_u32(int.to_u32()) {
562            Some(c) => Ok(c),
563            None => Err(CharTryFromScalarInt),
564        }
565    }
566}
567
568impl From<Half> for ScalarInt {
569    #[inline]
570    fn from(f: Half) -> Self {
571        // We trust apfloat to give us properly truncated data.
572        Self { data: f.to_bits(), size: NonZero::new((Half::BITS / 8) as u8).unwrap() }
573    }
574}
575
576impl From<ScalarInt> for Half {
577    #[inline]
578    fn from(int: ScalarInt) -> Self {
579        Self::from_bits(int.to_bits(Size::from_bytes(2)))
580    }
581}
582
583impl From<Single> for ScalarInt {
584    #[inline]
585    fn from(f: Single) -> Self {
586        // We trust apfloat to give us properly truncated data.
587        Self { data: f.to_bits(), size: NonZero::new((Single::BITS / 8) as u8).unwrap() }
588    }
589}
590
591impl From<ScalarInt> for Single {
592    #[inline]
593    fn from(int: ScalarInt) -> Self {
594        Self::from_bits(int.to_bits(Size::from_bytes(4)))
595    }
596}
597
598impl From<Double> for ScalarInt {
599    #[inline]
600    fn from(f: Double) -> Self {
601        // We trust apfloat to give us properly truncated data.
602        Self { data: f.to_bits(), size: NonZero::new((Double::BITS / 8) as u8).unwrap() }
603    }
604}
605
606impl From<ScalarInt> for Double {
607    #[inline]
608    fn from(int: ScalarInt) -> Self {
609        Self::from_bits(int.to_bits(Size::from_bytes(8)))
610    }
611}
612
613impl From<Quad> for ScalarInt {
614    #[inline]
615    fn from(f: Quad) -> Self {
616        // We trust apfloat to give us properly truncated data.
617        Self { data: f.to_bits(), size: NonZero::new((Quad::BITS / 8) as u8).unwrap() }
618    }
619}
620
621impl From<ScalarInt> for Quad {
622    #[inline]
623    fn from(int: ScalarInt) -> Self {
624        Self::from_bits(int.to_bits(Size::from_bytes(16)))
625    }
626}
627
628impl fmt::Debug for ScalarInt {
629    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
630        // Dispatch to LowerHex below.
631        write!(f, "0x{self:x}")
632    }
633}
634
635impl fmt::LowerHex for ScalarInt {
636    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
637        self.check_data();
638        if f.alternate() {
639            // Like regular ints, alternate flag adds leading `0x`.
640            write!(f, "0x")?;
641        }
642        // Format as hex number wide enough to fit any value of the given `size`.
643        // So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
644        // Using a block `{self.data}` here to force a copy instead of using `self.data`
645        // directly, because `write!` takes references to its formatting arguments and
646        // would thus borrow `self.data`. Since `Self`
647        // is a packed struct, that would create a possibly unaligned reference, which
648        // is UB.
649        write!(f, "{:01$x}", { self.data }, self.size.get() as usize * 2)
650    }
651}
652
653impl fmt::UpperHex for ScalarInt {
654    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
655        self.check_data();
656        // Format as hex number wide enough to fit any value of the given `size`.
657        // So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
658        // Using a block `{self.data}` here to force a copy instead of using `self.data`
659        // directly, because `write!` takes references to its formatting arguments and
660        // would thus borrow `self.data`. Since `Self`
661        // is a packed struct, that would create a possibly unaligned reference, which
662        // is UB.
663        write!(f, "{:01$X}", { self.data }, self.size.get() as usize * 2)
664    }
665}
666
667impl fmt::Display for ScalarInt {
668    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
669        self.check_data();
670        write!(f, "{}", { self.data })
671    }
672}