rustc_middle/mir/interpret/allocation/
provenance_map.rs

1//! Store the provenance for each byte in the range, with a more efficient
2//! representation for the common case where PTR_SIZE consecutive bytes have the same provenance.
3
4use std::cmp;
5use std::ops::Range;
6
7use rustc_abi::{HasDataLayout, Size};
8use rustc_data_structures::sorted_map::SortedMap;
9use rustc_macros::HashStable;
10use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
11use tracing::trace;
12
13use super::{AllocError, AllocRange, AllocResult, CtfeProvenance, Provenance, alloc_range};
14
15/// Stores the provenance information of pointers stored in memory.
16#[derive(Clone, PartialEq, Eq, Hash, Debug)]
17#[derive(HashStable)]
18pub struct ProvenanceMap<Prov = CtfeProvenance> {
19    /// `Provenance` in this map applies from the given offset for an entire pointer-size worth of
20    /// bytes. Two entries in this map are always at least a pointer size apart.
21    ptrs: SortedMap<Size, Prov>,
22    /// Provenance in this map only applies to the given single byte.
23    /// This map is disjoint from the previous. It will always be empty when
24    /// `Prov::OFFSET_IS_ADDR` is false.
25    bytes: Option<Box<SortedMap<Size, Prov>>>,
26}
27
28// These impls are generic over `Prov` since `CtfeProvenance` is only decodable/encodable
29// for some particular `D`/`S`.
30impl<D: Decoder, Prov: Provenance + Decodable<D>> Decodable<D> for ProvenanceMap<Prov> {
31    fn decode(d: &mut D) -> Self {
32        assert!(!Prov::OFFSET_IS_ADDR); // only `CtfeProvenance` is ever serialized
33        Self { ptrs: Decodable::decode(d), bytes: None }
34    }
35}
36impl<S: Encoder, Prov: Provenance + Encodable<S>> Encodable<S> for ProvenanceMap<Prov> {
37    fn encode(&self, s: &mut S) {
38        let Self { ptrs, bytes } = self;
39        assert!(!Prov::OFFSET_IS_ADDR); // only `CtfeProvenance` is ever serialized
40        debug_assert!(bytes.is_none()); // without `OFFSET_IS_ADDR`, this is always empty
41        ptrs.encode(s)
42    }
43}
44
45impl<Prov> ProvenanceMap<Prov> {
46    pub fn new() -> Self {
47        ProvenanceMap { ptrs: SortedMap::new(), bytes: None }
48    }
49
50    /// The caller must guarantee that the given provenance list is already sorted
51    /// by address and contain no duplicates.
52    pub fn from_presorted_ptrs(r: Vec<(Size, Prov)>) -> Self {
53        ProvenanceMap { ptrs: SortedMap::from_presorted_elements(r), bytes: None }
54    }
55}
56
57impl ProvenanceMap {
58    /// Give access to the ptr-sized provenances (which can also be thought of as relocations, and
59    /// indeed that is how codegen treats them).
60    ///
61    /// Only exposed with `CtfeProvenance` provenance, since it panics if there is bytewise provenance.
62    #[inline]
63    pub fn ptrs(&self) -> &SortedMap<Size, CtfeProvenance> {
64        debug_assert!(self.bytes.is_none()); // `CtfeProvenance::OFFSET_IS_ADDR` is false so this cannot fail
65        &self.ptrs
66    }
67}
68
69impl<Prov: Provenance> ProvenanceMap<Prov> {
70    fn adjusted_range_ptrs(range: AllocRange, cx: &impl HasDataLayout) -> Range<Size> {
71        // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
72        // the beginning of this range.
73        let adjusted_start = Size::from_bytes(
74            range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1),
75        );
76        adjusted_start..range.end()
77    }
78
79    /// Returns all ptr-sized provenance in the given range.
80    /// If the range has length 0, returns provenance that crosses the edge between `start-1` and
81    /// `start`.
82    pub(super) fn range_ptrs_get(
83        &self,
84        range: AllocRange,
85        cx: &impl HasDataLayout,
86    ) -> &[(Size, Prov)] {
87        self.ptrs.range(Self::adjusted_range_ptrs(range, cx))
88    }
89
90    /// `pm.range_ptrs_is_empty(r, cx)` == `pm.range_ptrs_get(r, cx).is_empty()`, but is faster.
91    pub(super) fn range_ptrs_is_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool {
92        self.ptrs.range_is_empty(Self::adjusted_range_ptrs(range, cx))
93    }
94
95    /// Returns all byte-wise provenance in the given range.
96    fn range_bytes_get(&self, range: AllocRange) -> &[(Size, Prov)] {
97        if let Some(bytes) = self.bytes.as_ref() {
98            bytes.range(range.start..range.end())
99        } else {
100            &[]
101        }
102    }
103
104    /// Same as `range_bytes_get(range).is_empty()`, but faster.
105    fn range_bytes_is_empty(&self, range: AllocRange) -> bool {
106        self.bytes.as_ref().is_none_or(|bytes| bytes.range_is_empty(range.start..range.end()))
107    }
108
109    /// Get the provenance of a single byte.
110    pub fn get(&self, offset: Size, cx: &impl HasDataLayout) -> Option<Prov> {
111        let prov = self.range_ptrs_get(alloc_range(offset, Size::from_bytes(1)), cx);
112        debug_assert!(prov.len() <= 1);
113        if let Some(entry) = prov.first() {
114            // If it overlaps with this byte, it is on this byte.
115            debug_assert!(self.bytes.as_ref().is_none_or(|b| !b.contains_key(&offset)));
116            Some(entry.1)
117        } else {
118            // Look up per-byte provenance.
119            self.bytes.as_ref().and_then(|b| b.get(&offset).copied())
120        }
121    }
122
123    /// Check if here is ptr-sized provenance at the given index.
124    /// Does not mean anything for bytewise provenance! But can be useful as an optimization.
125    pub fn get_ptr(&self, offset: Size) -> Option<Prov> {
126        self.ptrs.get(&offset).copied()
127    }
128
129    /// Returns whether this allocation has provenance overlapping with the given range.
130    ///
131    /// Note: this function exists to allow `range_get_provenance` to be private, in order to somewhat
132    /// limit access to provenance outside of the `Allocation` abstraction.
133    ///
134    pub fn range_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool {
135        self.range_ptrs_is_empty(range, cx) && self.range_bytes_is_empty(range)
136    }
137
138    /// Yields all the provenances stored in this map.
139    pub fn provenances(&self) -> impl Iterator<Item = Prov> {
140        let bytes = self.bytes.iter().flat_map(|b| b.values());
141        self.ptrs.values().chain(bytes).copied()
142    }
143
144    pub fn insert_ptr(&mut self, offset: Size, prov: Prov, cx: &impl HasDataLayout) {
145        debug_assert!(self.range_empty(alloc_range(offset, cx.data_layout().pointer_size), cx));
146        self.ptrs.insert(offset, prov);
147    }
148
149    /// Removes all provenance inside the given range.
150    /// If there is provenance overlapping with the edges, might result in an error.
151    pub fn clear(&mut self, range: AllocRange, cx: &impl HasDataLayout) -> AllocResult {
152        let start = range.start;
153        let end = range.end();
154        // Clear the bytewise part -- this is easy.
155        if Prov::OFFSET_IS_ADDR {
156            if let Some(bytes) = self.bytes.as_mut() {
157                bytes.remove_range(start..end);
158            }
159        } else {
160            debug_assert!(self.bytes.is_none());
161        }
162
163        // For the ptr-sized part, find the first (inclusive) and last (exclusive) byte of
164        // provenance that overlaps with the given range.
165        let (first, last) = {
166            // Find all provenance overlapping the given range.
167            if self.range_ptrs_is_empty(range, cx) {
168                // No provenance in this range, we are done. This is the common case.
169                return Ok(());
170            }
171
172            // This redoes some of the work of `range_get_ptrs_is_empty`, but this path is much
173            // colder than the early return above, so it's worth it.
174            let provenance = self.range_ptrs_get(range, cx);
175            (
176                provenance.first().unwrap().0,
177                provenance.last().unwrap().0 + cx.data_layout().pointer_size,
178            )
179        };
180
181        // We need to handle clearing the provenance from parts of a pointer.
182        if first < start {
183            if !Prov::OFFSET_IS_ADDR {
184                // We can't split up the provenance into less than a pointer.
185                return Err(AllocError::OverwritePartialPointer(first));
186            }
187            // Insert the remaining part in the bytewise provenance.
188            let prov = self.ptrs[&first];
189            let bytes = self.bytes.get_or_insert_with(Box::default);
190            for offset in first..start {
191                bytes.insert(offset, prov);
192            }
193        }
194        if last > end {
195            let begin_of_last = last - cx.data_layout().pointer_size;
196            if !Prov::OFFSET_IS_ADDR {
197                // We can't split up the provenance into less than a pointer.
198                return Err(AllocError::OverwritePartialPointer(begin_of_last));
199            }
200            // Insert the remaining part in the bytewise provenance.
201            let prov = self.ptrs[&begin_of_last];
202            let bytes = self.bytes.get_or_insert_with(Box::default);
203            for offset in end..last {
204                bytes.insert(offset, prov);
205            }
206        }
207
208        // Forget all the provenance.
209        // Since provenance do not overlap, we know that removing until `last` (exclusive) is fine,
210        // i.e., this will not remove any other provenance just after the ones we care about.
211        self.ptrs.remove_range(first..last);
212
213        Ok(())
214    }
215
216    /// Overwrites all provenance in the allocation with wildcard provenance.
217    ///
218    /// Provided for usage in Miri and panics otherwise.
219    pub fn write_wildcards(&mut self, alloc_size: usize) {
220        assert!(
221            Prov::OFFSET_IS_ADDR,
222            "writing wildcard provenance is not supported when `OFFSET_IS_ADDR` is false"
223        );
224        let wildcard = Prov::WILDCARD.unwrap();
225
226        // Remove all pointer provenances, then write wildcards into the whole byte range.
227        self.ptrs.clear();
228        let last = Size::from_bytes(alloc_size);
229        let bytes = self.bytes.get_or_insert_with(Box::default);
230        for offset in Size::ZERO..last {
231            bytes.insert(offset, wildcard);
232        }
233    }
234}
235
236/// A partial, owned list of provenance to transfer into another allocation.
237///
238/// Offsets are already adjusted to the destination allocation.
239pub struct ProvenanceCopy<Prov> {
240    dest_ptrs: Option<Box<[(Size, Prov)]>>,
241    dest_bytes: Option<Box<[(Size, Prov)]>>,
242}
243
244impl<Prov: Provenance> ProvenanceMap<Prov> {
245    pub fn prepare_copy(
246        &self,
247        src: AllocRange,
248        dest: Size,
249        count: u64,
250        cx: &impl HasDataLayout,
251    ) -> AllocResult<ProvenanceCopy<Prov>> {
252        let shift_offset = move |idx, offset| {
253            // compute offset for current repetition
254            let dest_offset = dest + src.size * idx; // `Size` operations
255            // shift offsets from source allocation to destination allocation
256            (offset - src.start) + dest_offset // `Size` operations
257        };
258        let ptr_size = cx.data_layout().pointer_size;
259
260        // # Pointer-sized provenances
261        // Get the provenances that are entirely within this range.
262        // (Different from `range_get_ptrs` which asks if they overlap the range.)
263        // Only makes sense if we are copying at least one pointer worth of bytes.
264        let mut dest_ptrs_box = None;
265        if src.size >= ptr_size {
266            let adjusted_end = Size::from_bytes(src.end().bytes() - (ptr_size.bytes() - 1));
267            let ptrs = self.ptrs.range(src.start..adjusted_end);
268            // If `count` is large, this is rather wasteful -- we are allocating a big array here, which
269            // is mostly filled with redundant information since it's just N copies of the same `Prov`s
270            // at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range`
271            // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
272            // the right sequence of provenance for all N copies.
273            // Basically, this large array would have to be created anyway in the target allocation.
274            let mut dest_ptrs = Vec::with_capacity(ptrs.len() * (count as usize));
275            for i in 0..count {
276                dest_ptrs
277                    .extend(ptrs.iter().map(|&(offset, reloc)| (shift_offset(i, offset), reloc)));
278            }
279            debug_assert_eq!(dest_ptrs.len(), dest_ptrs.capacity());
280            dest_ptrs_box = Some(dest_ptrs.into_boxed_slice());
281        };
282
283        // # Byte-sized provenances
284        // This includes the existing bytewise provenance in the range, and ptr provenance
285        // that overlaps with the begin/end of the range.
286        let mut dest_bytes_box = None;
287        let begin_overlap = self.range_ptrs_get(alloc_range(src.start, Size::ZERO), cx).first();
288        let end_overlap = self.range_ptrs_get(alloc_range(src.end(), Size::ZERO), cx).first();
289        if !Prov::OFFSET_IS_ADDR {
290            // There can't be any bytewise provenance, and we cannot split up the begin/end overlap.
291            if let Some(entry) = begin_overlap {
292                return Err(AllocError::ReadPartialPointer(entry.0));
293            }
294            if let Some(entry) = end_overlap {
295                return Err(AllocError::ReadPartialPointer(entry.0));
296            }
297            debug_assert!(self.bytes.is_none());
298        } else {
299            let mut bytes = Vec::new();
300            // First, if there is a part of a pointer at the start, add that.
301            if let Some(entry) = begin_overlap {
302                trace!("start overlapping entry: {entry:?}");
303                // For really small copies, make sure we don't run off the end of the `src` range.
304                let entry_end = cmp::min(entry.0 + ptr_size, src.end());
305                for offset in src.start..entry_end {
306                    bytes.push((offset, entry.1));
307                }
308            } else {
309                trace!("no start overlapping entry");
310            }
311
312            // Then the main part, bytewise provenance from `self.bytes`.
313            bytes.extend(self.range_bytes_get(src));
314
315            // And finally possibly parts of a pointer at the end.
316            if let Some(entry) = end_overlap {
317                trace!("end overlapping entry: {entry:?}");
318                // For really small copies, make sure we don't start before `src` does.
319                let entry_start = cmp::max(entry.0, src.start);
320                for offset in entry_start..src.end() {
321                    if bytes.last().is_none_or(|bytes_entry| bytes_entry.0 < offset) {
322                        // The last entry, if it exists, has a lower offset than us.
323                        bytes.push((offset, entry.1));
324                    } else {
325                        // There already is an entry for this offset in there! This can happen when the
326                        // start and end range checks actually end up hitting the same pointer, so we
327                        // already added this in the "pointer at the start" part above.
328                        assert!(entry.0 <= src.start);
329                    }
330                }
331            } else {
332                trace!("no end overlapping entry");
333            }
334            trace!("byte provenances: {bytes:?}");
335
336            // And again a buffer for the new list on the target side.
337            let mut dest_bytes = Vec::with_capacity(bytes.len() * (count as usize));
338            for i in 0..count {
339                dest_bytes
340                    .extend(bytes.iter().map(|&(offset, reloc)| (shift_offset(i, offset), reloc)));
341            }
342            debug_assert_eq!(dest_bytes.len(), dest_bytes.capacity());
343            dest_bytes_box = Some(dest_bytes.into_boxed_slice());
344        }
345
346        Ok(ProvenanceCopy { dest_ptrs: dest_ptrs_box, dest_bytes: dest_bytes_box })
347    }
348
349    /// Applies a provenance copy.
350    /// The affected range, as defined in the parameters to `prepare_copy` is expected
351    /// to be clear of provenance.
352    pub fn apply_copy(&mut self, copy: ProvenanceCopy<Prov>) {
353        if let Some(dest_ptrs) = copy.dest_ptrs {
354            self.ptrs.insert_presorted(dest_ptrs.into());
355        }
356        if Prov::OFFSET_IS_ADDR {
357            if let Some(dest_bytes) = copy.dest_bytes
358                && !dest_bytes.is_empty()
359            {
360                self.bytes.get_or_insert_with(Box::default).insert_presorted(dest_bytes.into());
361            }
362        } else {
363            debug_assert!(copy.dest_bytes.is_none());
364        }
365    }
366}