miri/concurrency/
cpu_affinity.rs

1use rustc_abi::Endian;
2use rustc_middle::ty::layout::LayoutOf;
3
4use crate::*;
5
6/// The maximum number of CPUs supported by miri.
7///
8/// This value is compatible with the libc `CPU_SETSIZE` constant and corresponds to the number
9/// of CPUs that a `cpu_set_t` can contain.
10///
11/// Real machines can have more CPUs than this number, and there exist APIs to set their affinity,
12/// but this is not currently supported by miri.
13pub const MAX_CPUS: usize = 1024;
14
15/// A thread's CPU affinity mask determines the set of CPUs on which it is eligible to run.
16// the actual representation depends on the target's endianness and pointer width.
17// See CpuAffinityMask::set for details
18#[derive(Clone)]
19pub(crate) struct CpuAffinityMask([u8; Self::CPU_MASK_BYTES]);
20
21impl CpuAffinityMask {
22    pub(crate) const CPU_MASK_BYTES: usize = MAX_CPUS / 8;
23
24    pub fn new<'tcx>(cx: &impl LayoutOf<'tcx>, cpu_count: u32) -> Self {
25        let mut this = Self([0; Self::CPU_MASK_BYTES]);
26
27        // the default affinity mask includes only the available CPUs
28        for i in 0..cpu_count as usize {
29            this.set(cx, i);
30        }
31
32        this
33    }
34
35    pub fn chunk_size<'tcx>(cx: &impl LayoutOf<'tcx>) -> u64 {
36        // The actual representation of the CpuAffinityMask is [c_ulong; _].
37        let ulong = helpers::path_ty_layout(cx, &["core", "ffi", "c_ulong"]);
38        ulong.size.bytes()
39    }
40
41    fn set<'tcx>(&mut self, cx: &impl LayoutOf<'tcx>, cpu: usize) {
42        // we silently ignore CPUs that are out of bounds. This matches the behavior of
43        // `sched_setaffinity` with a mask that specifies more than `CPU_SETSIZE` CPUs.
44        if cpu >= MAX_CPUS {
45            return;
46        }
47
48        // The actual representation of the CpuAffinityMask is [c_ulong; _].
49        // Within the array elements, we need to use the endianness of the target.
50        let target = &cx.tcx().sess.target;
51        match Self::chunk_size(cx) {
52            4 => {
53                let start = cpu / 32 * 4; // first byte of the correct u32
54                let chunk = self.0[start..].first_chunk_mut::<4>().unwrap();
55                let offset = cpu % 32;
56                *chunk = match target.options.endian {
57                    Endian::Little => (u32::from_le_bytes(*chunk) | (1 << offset)).to_le_bytes(),
58                    Endian::Big => (u32::from_be_bytes(*chunk) | (1 << offset)).to_be_bytes(),
59                };
60            }
61            8 => {
62                let start = cpu / 64 * 8; // first byte of the correct u64
63                let chunk = self.0[start..].first_chunk_mut::<8>().unwrap();
64                let offset = cpu % 64;
65                *chunk = match target.options.endian {
66                    Endian::Little => (u64::from_le_bytes(*chunk) | (1 << offset)).to_le_bytes(),
67                    Endian::Big => (u64::from_be_bytes(*chunk) | (1 << offset)).to_be_bytes(),
68                };
69            }
70            other => bug!("chunk size not supported: {other}"),
71        };
72    }
73
74    pub fn as_slice(&self) -> &[u8] {
75        self.0.as_slice()
76    }
77
78    pub fn from_array<'tcx>(
79        cx: &impl LayoutOf<'tcx>,
80        cpu_count: u32,
81        bytes: [u8; Self::CPU_MASK_BYTES],
82    ) -> Option<Self> {
83        // mask by what CPUs are actually available
84        let default = Self::new(cx, cpu_count);
85        let masked = std::array::from_fn(|i| bytes[i] & default.0[i]);
86
87        // at least one thread must be set for the input to be valid
88        masked.iter().any(|b| *b != 0).then_some(Self(masked))
89    }
90}