1//! The arena, a fast but limited type of allocator.
2//!
3//! Arenas are a type of allocator that destroy the objects within, all at
4//! once, once the arena itself is destroyed. They do not support deallocation
5//! of individual objects while the arena itself is still alive. The benefit
6//! of an arena is very fast allocation; just a pointer bump.
7//!
8//! This crate implements several kinds of arena.
910// tidy-alphabetical-start
11#![allow(clippy::mut_from_ref)] // Arena allocators are one place where this pattern is fine.
12#![allow(internal_features)]
13#![cfg_attr(test, feature(test))]
14#![deny(unsafe_op_in_unsafe_fn)]
15#![doc(test(no_crate_inject, attr(deny(warnings), allow(internal_features))))]
16#![feature(core_intrinsics)]
17#![feature(decl_macro)]
18#![feature(dropck_eyepatch)]
19#![feature(never_type)]
20#![feature(rustc_attrs)]
21#![feature(unwrap_infallible)]
22// tidy-alphabetical-end
2324use std::alloc::Layout;
25use std::cell::{Cell, RefCell};
26use std::marker::PhantomData;
27use std::mem::{self, MaybeUninit};
28use std::ptr::{self, NonNull};
29use std::{cmp, intrinsics, slice};
3031use smallvec::SmallVec;
3233/// This calls the passed function while ensuring it won't be inlined into the caller.
34#[inline(never)]
35#[cold]
36fn outline<F: FnOnce() -> R, R>(f: F) -> R {
37f()
38}
3940struct ArenaChunk<T = u8> {
41/// The raw storage for the arena chunk.
42storage: NonNull<[MaybeUninit<T>]>,
43/// The number of valid entries in the chunk.
44entries: usize,
45}
4647unsafe impl<#[may_dangle] T> Dropfor ArenaChunk<T> {
48fn drop(&mut self) {
49unsafe { drop(Box::from_raw(self.storage.as_mut())) }
50 }
51}
5253impl<T> ArenaChunk<T> {
54#[inline]
55unsafe fn new(capacity: usize) -> ArenaChunk<T> {
56ArenaChunk {
57 storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
58 entries: 0,
59 }
60 }
6162/// Destroys this arena chunk.
63 ///
64 /// # Safety
65 ///
66 /// The caller must ensure that `len` elements of this chunk have been initialized.
67#[inline]
68unsafe fn destroy(&mut self, len: usize) {
69// The branch on needs_drop() is an -O1 performance optimization.
70 // Without the branch, dropping TypedArena<T> takes linear time.
71if mem::needs_drop::<T>() {
72// SAFETY: The caller must ensure that `len` elements of this chunk have
73 // been initialized.
74unsafe {
75let slice = self.storage.as_mut();
76slice[..len].assume_init_drop();
77 }
78 }
79 }
8081// Returns a pointer to the first allocated object.
82#[inline]
83fn start(&mut self) -> *mut T {
84self.storage.as_ptr() as *mut T
85 }
8687// Returns a pointer to the end of the allocated space.
88#[inline]
89fn end(&mut self) -> *mut T {
90unsafe {
91if size_of::<T>() == 0 {
92// A pointer as large as possible for zero-sized elements.
93ptr::without_provenance_mut(!0)
94 } else {
95self.start().add(self.storage.len())
96 }
97 }
98 }
99}
100101// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
102// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
103// we stop growing. This scales well, from arenas that are barely used up to
104// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
105// the usual sizes of pages and huge pages on Linux.
106const PAGE: usize = 4096;
107const HUGE_PAGE: usize = 2 * 1024 * 1024;
108109/// An arena that can hold objects of only one type.
110pub struct TypedArena<T> {
111/// A pointer to the next object to be allocated.
112ptr: Cell<*mut T>,
113114/// A pointer to the end of the allocated area. When this pointer is
115 /// reached, a new chunk is allocated.
116end: Cell<*mut T>,
117118/// A vector of arena chunks.
119chunks: RefCell<Vec<ArenaChunk<T>>>,
120121/// Marker indicating that dropping the arena causes its owned
122 /// instances of `T` to be dropped.
123_own: PhantomData<T>,
124}
125126impl<T> Defaultfor TypedArena<T> {
127/// Creates a new `TypedArena`.
128fn default() -> TypedArena<T> {
129TypedArena {
130// We set both `ptr` and `end` to 0 so that the first call to
131 // alloc() will trigger a grow().
132ptr: Cell::new(ptr::null_mut()),
133 end: Cell::new(ptr::null_mut()),
134 chunks: Default::default(),
135 _own: PhantomData,
136 }
137 }
138}
139140impl<T> TypedArena<T> {
141/// Allocates an object in the `TypedArena`, returning a reference to it.
142#[inline]
143pub fn alloc(&self, object: T) -> &mut T {
144if self.ptr == self.end {
145self.grow(1)
146 }
147148unsafe {
149if size_of::<T>() == 0 {
150self.ptr.set(self.ptr.get().wrapping_byte_add(1));
151let ptr = ptr::NonNull::<T>::dangling().as_ptr();
152// Don't drop the object. This `write` is equivalent to `forget`.
153ptr::write(ptr, object);
154&mut *ptr155 } else {
156let ptr = self.ptr.get();
157// Advance the pointer.
158self.ptr.set(self.ptr.get().add(1));
159// Write into uninitialized memory.
160ptr::write(ptr, object);
161&mut *ptr162 }
163 }
164 }
165166#[inline]
167fn can_allocate(&self, additional: usize) -> bool {
168// FIXME: this should *likely* use `offset_from`, but more
169 // investigation is needed (including running tests in miri).
170let available_bytes = self.end.get().addr() - self.ptr.get().addr();
171let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
172available_bytes >= additional_bytes173 }
174175#[inline]
176fn alloc_raw_slice(&self, len: usize) -> *mut T {
177if !(size_of::<T>() != 0) {
::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
178if !(len != 0) { ::core::panicking::panic("assertion failed: len != 0") };assert!(len != 0);
179180// Ensure the current chunk can fit `len` objects.
181if !self.can_allocate(len) {
182self.grow(len);
183if true {
if !self.can_allocate(len) {
::core::panicking::panic("assertion failed: self.can_allocate(len)")
};
};debug_assert!(self.can_allocate(len));
184 }
185186let start_ptr = self.ptr.get();
187// SAFETY: `can_allocate`/`grow` ensures that there is enough space for
188 // `len` elements.
189unsafe { self.ptr.set(start_ptr.add(len)) };
190start_ptr191 }
192193/// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
194 ///
195 /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
196 /// storing the elements in the arena.
197#[inline]
198pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
199self.try_alloc_from_iter(iter.into_iter().map(Ok::<T, !>)).into_ok()
200 }
201202/// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
203 ///
204 /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
205 /// storing the elements in the arena.
206#[inline]
207pub fn try_alloc_from_iter<E>(
208&self,
209 iter: impl IntoIterator<Item = Result<T, E>>,
210 ) -> Result<&mut [T], E> {
211// Despite the similarlty with `DroplessArena`, we cannot reuse their fast case. The reason
212 // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
213 // reference to `self` and adding elements to the arena during iteration.
214 //
215 // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
216 // have to track that some uninitialized elements are followed by some initialized elements,
217 // else we might accidentally drop uninitialized memory if something panics or if the
218 // iterator doesn't fill all the length we expected.
219 //
220 // So we collect all the elements beforehand, which takes care of reentrancy and panic
221 // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
222 // doesn't need to be hyper-optimized.
223if !(size_of::<T>() != 0) {
::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
224225let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
226let mut vec = vec?;
227if vec.is_empty() {
228return Ok(&mut []);
229 }
230// Move the content to the arena by copying and then forgetting it.
231let len = vec.len();
232let start_ptr = self.alloc_raw_slice(len);
233Ok(unsafe {
234vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
235vec.set_len(0);
236 slice::from_raw_parts_mut(start_ptr, len)
237 })
238 }
239240/// Grows the arena.
241#[inline(never)]
242 #[cold]
243fn grow(&self, additional: usize) {
244unsafe {
245// We need the element size to convert chunk sizes (ranging from
246 // PAGE to HUGE_PAGE bytes) to element counts.
247let elem_size = cmp::max(1, size_of::<T>());
248let mut chunks = self.chunks.borrow_mut();
249let mut new_cap;
250if let Some(last_chunk) = chunks.last_mut() {
251// If a type is `!needs_drop`, we don't need to keep track of how many elements
252 // the chunk stores - the field will be ignored anyway.
253if mem::needs_drop::<T>() {
254// FIXME: this should *likely* use `offset_from`, but more
255 // investigation is needed (including running tests in miri).
256let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
257last_chunk.entries = used_bytes / size_of::<T>();
258 }
259260// If the previous chunk's len is less than HUGE_PAGE
261 // bytes, then this chunk will be least double the previous
262 // chunk's size.
263new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
264new_cap*= 2;
265 } else {
266new_cap = PAGE / elem_size;
267 }
268// Also ensure that this chunk can fit `additional`.
269new_cap = cmp::max(additional, new_cap);
270271let mut chunk = ArenaChunk::<T>::new(new_cap);
272self.ptr.set(chunk.start());
273self.end.set(chunk.end());
274chunks.push(chunk);
275 }
276 }
277278// Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
279 // chunks.
280fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
281// Determine how much was filled.
282let start = last_chunk.start().addr();
283// We obtain the value of the pointer to the first uninitialized element.
284let end = self.ptr.get().addr();
285// We then calculate the number of elements to be dropped in the last chunk,
286 // which is the filled area's length.
287let diff = if size_of::<T>() == 0 {
288// `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
289 // the number of zero-sized values in the last and only chunk, just out of caution.
290 // Recall that `end` was incremented for each allocated value.
291end - start292 } else {
293// FIXME: this should *likely* use `offset_from`, but more
294 // investigation is needed (including running tests in miri).
295(end - start) / size_of::<T>()
296 };
297// Pass that to the `destroy` method.
298unsafe {
299last_chunk.destroy(diff);
300 }
301// Reset the chunk.
302self.ptr.set(last_chunk.start());
303 }
304}
305306unsafe impl<#[may_dangle] T> Dropfor TypedArena<T> {
307fn drop(&mut self) {
308unsafe {
309// Determine how much was filled.
310let mut chunks_borrow = self.chunks.borrow_mut();
311if let Some(mut last_chunk) = chunks_borrow.pop() {
312// Drop the contents of the last chunk.
313self.clear_last_chunk(&mut last_chunk);
314// The last chunk will be dropped. Destroy all other chunks.
315for chunk in chunks_borrow.iter_mut() {
316 chunk.destroy(chunk.entries);
317 }
318 }
319// Box handles deallocation of `last_chunk` and `self.chunks`.
320}
321 }
322}
323324unsafe impl<T: Send> Sendfor TypedArena<T> {}
325326#[inline(always)]
327fn align_down(val: usize, align: usize) -> usize {
328if true {
if !align.is_power_of_two() {
::core::panicking::panic("assertion failed: align.is_power_of_two()")
};
};debug_assert!(align.is_power_of_two());
329val & !(align - 1)
330}
331332#[inline(always)]
333fn align_up(val: usize, align: usize) -> usize {
334if true {
if !align.is_power_of_two() {
::core::panicking::panic("assertion failed: align.is_power_of_two()")
};
};debug_assert!(align.is_power_of_two());
335 (val + align - 1) & !(align - 1)
336}
337338// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
339// to optimize away alignment code.
340const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
341342/// An arena that can hold objects of multiple different types that impl `Copy`
343/// and/or satisfy `!mem::needs_drop`.
344pub struct DroplessArena {
345/// A pointer to the start of the free space.
346start: Cell<*mut u8>,
347348/// A pointer to the end of free space.
349 ///
350 /// The allocation proceeds downwards from the end of the chunk towards the
351 /// start. (This is slightly simpler and faster than allocating upwards,
352 /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
353 /// When this pointer crosses the start pointer, a new chunk is allocated.
354 ///
355 /// This is kept aligned to DROPLESS_ALIGNMENT.
356end: Cell<*mut u8>,
357358/// A vector of arena chunks.
359chunks: RefCell<Vec<ArenaChunk>>,
360}
361362unsafe impl Sendfor DroplessArena {}
363364impl Defaultfor DroplessArena {
365#[inline]
366fn default() -> DroplessArena {
367DroplessArena {
368// We set both `start` and `end` to 0 so that the first call to
369 // alloc() will trigger a grow().
370start: Cell::new(ptr::null_mut()),
371 end: Cell::new(ptr::null_mut()),
372 chunks: Default::default(),
373 }
374 }
375}
376377impl DroplessArena {
378#[inline(never)]
379 #[cold]
380fn grow(&self, layout: Layout) {
381// Add some padding so we can align `self.end` while
382 // still fitting in a `layout` allocation.
383let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
384385unsafe {
386let mut chunks = self.chunks.borrow_mut();
387let mut new_cap;
388if let Some(last_chunk) = chunks.last_mut() {
389// There is no need to update `last_chunk.entries` because that
390 // field isn't used by `DroplessArena`.
391392 // If the previous chunk's len is less than HUGE_PAGE
393 // bytes, then this chunk will be least double the previous
394 // chunk's size.
395new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
396new_cap*= 2;
397 } else {
398new_cap = PAGE;
399 }
400// Also ensure that this chunk can fit `additional`.
401new_cap = cmp::max(additional, new_cap);
402403let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
404self.start.set(chunk.start());
405406// Align the end to DROPLESS_ALIGNMENT.
407let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
408409// Make sure we don't go past `start`. This should not happen since the allocation
410 // should be at least DROPLESS_ALIGNMENT - 1 bytes.
411if true {
if !(chunk.start().addr() <= end) {
::core::panicking::panic("assertion failed: chunk.start().addr() <= end")
};
};debug_assert!(chunk.start().addr() <= end);
412413self.end.set(chunk.end().with_addr(end));
414415chunks.push(chunk);
416 }
417 }
418419#[inline]
420pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
421if !(layout.size() != 0) {
::core::panicking::panic("assertion failed: layout.size() != 0")
};assert!(layout.size() != 0);
422423// This loop executes once or twice: if allocation fails the first
424 // time, the `grow` ensures it will succeed the second time.
425loop {
426let start = self.start.get().addr();
427let old_end = self.end.get();
428let end = old_end.addr();
429430// Align allocated bytes so that `self.end` stays aligned to
431 // DROPLESS_ALIGNMENT.
432let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
433434// Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
435unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
436437if let Some(sub) = end.checked_sub(bytes) {
438let new_end = align_down(sub, layout.align());
439if start <= new_end {
440let new_end = old_end.with_addr(new_end);
441// `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
442 // preserves alignment as both `end` and `bytes` are already
443 // aligned to DROPLESS_ALIGNMENT.
444self.end.set(new_end);
445return new_end;
446 }
447 }
448449// No free space left. Allocate a new chunk to satisfy the request.
450 // On failure the grow will panic or abort.
451self.grow(layout);
452 }
453 }
454455#[inline]
456pub fn alloc<T>(&self, object: T) -> &mut T {
457if !!mem::needs_drop::<T>() {
::core::panicking::panic("assertion failed: !mem::needs_drop::<T>()")
};assert!(!mem::needs_drop::<T>());
458if !(size_of::<T>() != 0) {
::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
459460let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
461462unsafe {
463// Write into uninitialized memory.
464ptr::write(mem, object);
465&mut *mem466 }
467 }
468469/// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
470 /// reference to it. Will panic if passed a zero-sized type.
471 ///
472 /// Panics:
473 ///
474 /// - Zero-sized types
475 /// - Zero-length slices
476#[inline]
477pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
478where
479T: Copy,
480 {
481if !!mem::needs_drop::<T>() {
::core::panicking::panic("assertion failed: !mem::needs_drop::<T>()")
};assert!(!mem::needs_drop::<T>());
482if !(size_of::<T>() != 0) {
::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
483if !!slice.is_empty() {
::core::panicking::panic("assertion failed: !slice.is_empty()")
};assert!(!slice.is_empty());
484485let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
486487unsafe {
488mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
489 slice::from_raw_parts_mut(mem, slice.len())
490 }
491 }
492493/// Used by `Lift` to check whether this slice is allocated
494 /// in this arena.
495#[inline]
496pub fn contains_slice<T>(&self, slice: &[T]) -> bool {
497for chunk in self.chunks.borrow_mut().iter_mut() {
498let ptr = slice.as_ptr().cast::<u8>().cast_mut();
499if chunk.start() <= ptr && chunk.end() >= ptr {
500return true;
501 }
502 }
503false
504}
505506/// Allocates a string slice that is copied into the `DroplessArena`, returning a
507 /// reference to it. Will panic if passed an empty string.
508 ///
509 /// Panics:
510 ///
511 /// - Zero-length string
512#[inline]
513pub fn alloc_str(&self, string: &str) -> &str {
514let slice = self.alloc_slice(string.as_bytes());
515516// SAFETY: the result has a copy of the same valid UTF-8 bytes.
517unsafe { std::str::from_utf8_unchecked(slice) }
518 }
519520/// # Safety
521 ///
522 /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
523 /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
524 /// if `iter.next()` allocates onto `self`.
525#[inline]
526unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
527&self,
528mut iter: I,
529 len: usize,
530 mem: *mut T,
531 ) -> &mut [T] {
532let mut i = 0;
533// Use a manual loop since LLVM manages to optimize it better for
534 // slice iterators
535loop {
536// SAFETY: The caller must ensure that `mem` is valid for writes up to
537 // `size_of::<T>() * len`.
538unsafe {
539match iter.next() {
540Some(value) if i < len => mem.add(i).write(value),
541Some(_) | None => {
542// We only return as many items as the iterator gave us, even
543 // though it was supposed to give us `len`
544return slice::from_raw_parts_mut(mem, i);
545 }
546 }
547 }
548i += 1;
549 }
550 }
551552#[inline]
553pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
554// Warning: this function is reentrant: `iter` could hold a reference to `&self` and
555 // allocate additional elements while we're iterating.
556let iter = iter.into_iter();
557if !(size_of::<T>() != 0) {
::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
558if !!mem::needs_drop::<T>() {
::core::panicking::panic("assertion failed: !mem::needs_drop::<T>()")
};assert!(!mem::needs_drop::<T>());
559560let size_hint = iter.size_hint();
561562match size_hint {
563 (min, Some(max)) if min == max => {
564// We know the exact number of elements the iterator expects to produce here.
565let len = min;
566567if len == 0 {
568return &mut [];
569 }
570571let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
572// SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
573 // reserved. If the iterator panics or doesn't output `len` elements, this will
574 // leave some unallocated slots in the arena, which is fine because we do not call
575 // `drop`.
576unsafe { self.write_from_iter(iter, len, mem) }
577 }
578 (_, _) => outline(move || self.try_alloc_from_iter(iter.map(Ok::<T, !>)).into_ok()),
579 }
580 }
581582#[inline]
583pub fn try_alloc_from_iter<T, E>(
584&self,
585 iter: impl IntoIterator<Item = Result<T, E>>,
586 ) -> Result<&mut [T], E> {
587// Despite the similarlty with `alloc_from_iter`, we cannot reuse their fast case, as we
588 // cannot know the minimum length of the iterator in this case.
589if !(size_of::<T>() != 0) {
::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
590591// Takes care of reentrancy.
592let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
593let mut vec = vec?;
594if vec.is_empty() {
595return Ok(&mut []);
596 }
597// Move the content to the arena by copying and then forgetting it.
598let len = vec.len();
599Ok(unsafe {
600let start_ptr = self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
601vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
602vec.set_len(0);
603 slice::from_raw_parts_mut(start_ptr, len)
604 })
605 }
606}
607608/// Declare an `Arena` containing one dropless arena and many typed arenas (the
609/// types of the typed arenas are specified by the arguments).
610///
611/// There are three cases of interest.
612/// - Types that are `Copy`: these need not be specified in the arguments. They
613/// will use the `DroplessArena`.
614/// - Types that are `!Copy` and `!Drop`: these must be specified in the
615/// arguments. An empty `TypedArena` will be created for each one, but the
616/// `DroplessArena` will always be used and the `TypedArena` will stay empty.
617/// This is odd but harmless, because an empty arena allocates no memory.
618/// - Types that are `!Copy` and `Drop`: these must be specified in the
619/// arguments. The `TypedArena` will be used for them.
620///
621#[rustc_macro_transparency = "semiopaque"]
622pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
623#[derive(Default)]
624pub struct Arena<'tcx> {
625pub dropless: $crate::DroplessArena,
626 $($name: $crate::TypedArena<$ty>,)*
627 }
628629pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
630#[allow(clippy::mut_from_ref)]
631fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self;
632#[allow(clippy::mut_from_ref)]
633fn allocate_from_iter(
634 arena: &'tcx Arena<'tcx>,
635 iter: impl ::std::iter::IntoIterator<Item = Self>,
636 ) -> &'tcx mut [Self];
637 }
638639// Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
640impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
641#[inline]
642 #[allow(clippy::mut_from_ref)]
643fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
644 arena.dropless.alloc(self)
645 }
646#[inline]
647 #[allow(clippy::mut_from_ref)]
648fn allocate_from_iter(
649 arena: &'tcx Arena<'tcx>,
650 iter: impl ::std::iter::IntoIterator<Item = Self>,
651 ) -> &'tcx mut [Self] {
652 arena.dropless.alloc_from_iter(iter)
653 }
654 }
655 $(
656impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
657#[inline]
658fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
659if !::std::mem::needs_drop::<Self>() {
660 arena.dropless.alloc(self)
661 } else {
662 arena.$name.alloc(self)
663 }
664 }
665666#[inline]
667 #[allow(clippy::mut_from_ref)]
668fn allocate_from_iter(
669 arena: &'tcx Arena<'tcx>,
670 iter: impl ::std::iter::IntoIterator<Item = Self>,
671 ) -> &'tcx mut [Self] {
672if !::std::mem::needs_drop::<Self>() {
673 arena.dropless.alloc_from_iter(iter)
674 } else {
675 arena.$name.alloc_from_iter(iter)
676 }
677 }
678 }
679 )*
680681impl<'tcx> Arena<'tcx> {
682#[inline]
683 #[allow(clippy::mut_from_ref)]
684pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&'tcx self, value: T) -> &mut T {
685 value.allocate_on(self)
686 }
687688// Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
689#[inline]
690 #[allow(clippy::mut_from_ref)]
691pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
692if value.is_empty() {
693return &mut [];
694 }
695self.dropless.alloc_slice(value)
696 }
697698#[inline]
699pub fn alloc_str(&self, string: &str) -> &str {
700if string.is_empty() {
701return "";
702 }
703self.dropless.alloc_str(string)
704 }
705706#[allow(clippy::mut_from_ref)]
707pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
708&'tcx self,
709 iter: impl ::std::iter::IntoIterator<Item = T>,
710 ) -> &mut [T] {
711 T::allocate_from_iter(self, iter)
712 }
713 }
714}
715716// Marker types that let us give different behaviour for arenas allocating
717// `Copy` types vs `!Copy` types.
718pub struct IsCopy;
719pub struct IsNotCopy;
720721#[cfg(test)]
722mod tests;