miri/concurrency/
cpu_affinity.rs
1use rustc_abi::Endian;
2use rustc_middle::ty::layout::LayoutOf;
3
4use crate::*;
5
6pub const MAX_CPUS: usize = 1024;
14
15#[derive(Clone)]
19pub(crate) struct CpuAffinityMask([u8; Self::CPU_MASK_BYTES]);
20
21impl CpuAffinityMask {
22 pub(crate) const CPU_MASK_BYTES: usize = MAX_CPUS / 8;
23
24 pub fn new<'tcx>(cx: &impl LayoutOf<'tcx>, cpu_count: u32) -> Self {
25 let mut this = Self([0; Self::CPU_MASK_BYTES]);
26
27 for i in 0..cpu_count as usize {
29 this.set(cx, i);
30 }
31
32 this
33 }
34
35 pub fn chunk_size<'tcx>(cx: &impl LayoutOf<'tcx>) -> u64 {
36 let ulong = helpers::path_ty_layout(cx, &["core", "ffi", "c_ulong"]);
38 ulong.size.bytes()
39 }
40
41 fn set<'tcx>(&mut self, cx: &impl LayoutOf<'tcx>, cpu: usize) {
42 if cpu >= MAX_CPUS {
45 return;
46 }
47
48 let target = &cx.tcx().sess.target;
51 match Self::chunk_size(cx) {
52 4 => {
53 let start = cpu / 32 * 4; let chunk = self.0[start..].first_chunk_mut::<4>().unwrap();
55 let offset = cpu % 32;
56 *chunk = match target.options.endian {
57 Endian::Little => (u32::from_le_bytes(*chunk) | (1 << offset)).to_le_bytes(),
58 Endian::Big => (u32::from_be_bytes(*chunk) | (1 << offset)).to_be_bytes(),
59 };
60 }
61 8 => {
62 let start = cpu / 64 * 8; let chunk = self.0[start..].first_chunk_mut::<8>().unwrap();
64 let offset = cpu % 64;
65 *chunk = match target.options.endian {
66 Endian::Little => (u64::from_le_bytes(*chunk) | (1 << offset)).to_le_bytes(),
67 Endian::Big => (u64::from_be_bytes(*chunk) | (1 << offset)).to_be_bytes(),
68 };
69 }
70 other => bug!("chunk size not supported: {other}"),
71 };
72 }
73
74 pub fn as_slice(&self) -> &[u8] {
75 self.0.as_slice()
76 }
77
78 pub fn from_array<'tcx>(
79 cx: &impl LayoutOf<'tcx>,
80 cpu_count: u32,
81 bytes: [u8; Self::CPU_MASK_BYTES],
82 ) -> Option<Self> {
83 let default = Self::new(cx, cpu_count);
85 let masked = std::array::from_fn(|i| bytes[i] & default.0[i]);
86
87 masked.iter().any(|b| *b != 0).then_some(Self(masked))
89 }
90}