rustc_arena/
lib.rs

1//! The arena, a fast but limited type of allocator.
2//!
3//! Arenas are a type of allocator that destroy the objects within, all at
4//! once, once the arena itself is destroyed. They do not support deallocation
5//! of individual objects while the arena itself is still alive. The benefit
6//! of an arena is very fast allocation; just a pointer bump.
7//!
8//! This crate implements several kinds of arena.
9
10// tidy-alphabetical-start
11#![allow(clippy::mut_from_ref)] // Arena allocators are one place where this pattern is fine.
12#![allow(internal_features)]
13#![cfg_attr(test, feature(test))]
14#![deny(unsafe_op_in_unsafe_fn)]
15#![doc(test(no_crate_inject, attr(deny(warnings), allow(internal_features))))]
16#![feature(core_intrinsics)]
17#![feature(decl_macro)]
18#![feature(dropck_eyepatch)]
19#![feature(never_type)]
20#![feature(rustc_attrs)]
21#![feature(unwrap_infallible)]
22// tidy-alphabetical-end
23
24use std::alloc::Layout;
25use std::cell::{Cell, RefCell};
26use std::marker::PhantomData;
27use std::mem::{self, MaybeUninit};
28use std::ptr::{self, NonNull};
29use std::{cmp, intrinsics, slice};
30
31use smallvec::SmallVec;
32
33/// This calls the passed function while ensuring it won't be inlined into the caller.
34#[inline(never)]
35#[cold]
36fn outline<F: FnOnce() -> R, R>(f: F) -> R {
37    f()
38}
39
40struct ArenaChunk<T = u8> {
41    /// The raw storage for the arena chunk.
42    storage: NonNull<[MaybeUninit<T>]>,
43    /// The number of valid entries in the chunk.
44    entries: usize,
45}
46
47unsafe impl<#[may_dangle] T> Drop for ArenaChunk<T> {
48    fn drop(&mut self) {
49        unsafe { drop(Box::from_raw(self.storage.as_mut())) }
50    }
51}
52
53impl<T> ArenaChunk<T> {
54    #[inline]
55    unsafe fn new(capacity: usize) -> ArenaChunk<T> {
56        ArenaChunk {
57            storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
58            entries: 0,
59        }
60    }
61
62    /// Destroys this arena chunk.
63    ///
64    /// # Safety
65    ///
66    /// The caller must ensure that `len` elements of this chunk have been initialized.
67    #[inline]
68    unsafe fn destroy(&mut self, len: usize) {
69        // The branch on needs_drop() is an -O1 performance optimization.
70        // Without the branch, dropping TypedArena<T> takes linear time.
71        if mem::needs_drop::<T>() {
72            // SAFETY: The caller must ensure that `len` elements of this chunk have
73            // been initialized.
74            unsafe {
75                let slice = self.storage.as_mut();
76                slice[..len].assume_init_drop();
77            }
78        }
79    }
80
81    // Returns a pointer to the first allocated object.
82    #[inline]
83    fn start(&mut self) -> *mut T {
84        self.storage.as_ptr() as *mut T
85    }
86
87    // Returns a pointer to the end of the allocated space.
88    #[inline]
89    fn end(&mut self) -> *mut T {
90        unsafe {
91            if size_of::<T>() == 0 {
92                // A pointer as large as possible for zero-sized elements.
93                ptr::without_provenance_mut(!0)
94            } else {
95                self.start().add(self.storage.len())
96            }
97        }
98    }
99}
100
101// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
102// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
103// we stop growing. This scales well, from arenas that are barely used up to
104// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
105// the usual sizes of pages and huge pages on Linux.
106const PAGE: usize = 4096;
107const HUGE_PAGE: usize = 2 * 1024 * 1024;
108
109/// An arena that can hold objects of only one type.
110pub struct TypedArena<T> {
111    /// A pointer to the next object to be allocated.
112    ptr: Cell<*mut T>,
113
114    /// A pointer to the end of the allocated area. When this pointer is
115    /// reached, a new chunk is allocated.
116    end: Cell<*mut T>,
117
118    /// A vector of arena chunks.
119    chunks: RefCell<Vec<ArenaChunk<T>>>,
120
121    /// Marker indicating that dropping the arena causes its owned
122    /// instances of `T` to be dropped.
123    _own: PhantomData<T>,
124}
125
126impl<T> Default for TypedArena<T> {
127    /// Creates a new `TypedArena`.
128    fn default() -> TypedArena<T> {
129        TypedArena {
130            // We set both `ptr` and `end` to 0 so that the first call to
131            // alloc() will trigger a grow().
132            ptr: Cell::new(ptr::null_mut()),
133            end: Cell::new(ptr::null_mut()),
134            chunks: Default::default(),
135            _own: PhantomData,
136        }
137    }
138}
139
140impl<T> TypedArena<T> {
141    /// Allocates an object in the `TypedArena`, returning a reference to it.
142    #[inline]
143    pub fn alloc(&self, object: T) -> &mut T {
144        if self.ptr == self.end {
145            self.grow(1)
146        }
147
148        unsafe {
149            if size_of::<T>() == 0 {
150                self.ptr.set(self.ptr.get().wrapping_byte_add(1));
151                let ptr = ptr::NonNull::<T>::dangling().as_ptr();
152                // Don't drop the object. This `write` is equivalent to `forget`.
153                ptr::write(ptr, object);
154                &mut *ptr
155            } else {
156                let ptr = self.ptr.get();
157                // Advance the pointer.
158                self.ptr.set(self.ptr.get().add(1));
159                // Write into uninitialized memory.
160                ptr::write(ptr, object);
161                &mut *ptr
162            }
163        }
164    }
165
166    #[inline]
167    fn can_allocate(&self, additional: usize) -> bool {
168        // FIXME: this should *likely* use `offset_from`, but more
169        // investigation is needed (including running tests in miri).
170        let available_bytes = self.end.get().addr() - self.ptr.get().addr();
171        let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
172        available_bytes >= additional_bytes
173    }
174
175    #[inline]
176    fn alloc_raw_slice(&self, len: usize) -> *mut T {
177        assert!(size_of::<T>() != 0);
178        assert!(len != 0);
179
180        // Ensure the current chunk can fit `len` objects.
181        if !self.can_allocate(len) {
182            self.grow(len);
183            debug_assert!(self.can_allocate(len));
184        }
185
186        let start_ptr = self.ptr.get();
187        // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
188        // `len` elements.
189        unsafe { self.ptr.set(start_ptr.add(len)) };
190        start_ptr
191    }
192
193    /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
194    ///
195    /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
196    /// storing the elements in the arena.
197    #[inline]
198    pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
199        self.try_alloc_from_iter(iter.into_iter().map(Ok::<T, !>)).into_ok()
200    }
201
202    /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
203    ///
204    /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
205    /// storing the elements in the arena.
206    #[inline]
207    pub fn try_alloc_from_iter<E>(
208        &self,
209        iter: impl IntoIterator<Item = Result<T, E>>,
210    ) -> Result<&mut [T], E> {
211        // Despite the similarlty with `DroplessArena`, we cannot reuse their fast case. The reason
212        // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
213        // reference to `self` and adding elements to the arena during iteration.
214        //
215        // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
216        // have to track that some uninitialized elements are followed by some initialized elements,
217        // else we might accidentally drop uninitialized memory if something panics or if the
218        // iterator doesn't fill all the length we expected.
219        //
220        // So we collect all the elements beforehand, which takes care of reentrancy and panic
221        // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
222        // doesn't need to be hyper-optimized.
223        assert!(size_of::<T>() != 0);
224
225        let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
226        let mut vec = vec?;
227        if vec.is_empty() {
228            return Ok(&mut []);
229        }
230        // Move the content to the arena by copying and then forgetting it.
231        let len = vec.len();
232        let start_ptr = self.alloc_raw_slice(len);
233        Ok(unsafe {
234            vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
235            vec.set_len(0);
236            slice::from_raw_parts_mut(start_ptr, len)
237        })
238    }
239
240    /// Grows the arena.
241    #[inline(never)]
242    #[cold]
243    fn grow(&self, additional: usize) {
244        unsafe {
245            // We need the element size to convert chunk sizes (ranging from
246            // PAGE to HUGE_PAGE bytes) to element counts.
247            let elem_size = cmp::max(1, size_of::<T>());
248            let mut chunks = self.chunks.borrow_mut();
249            let mut new_cap;
250            if let Some(last_chunk) = chunks.last_mut() {
251                // If a type is `!needs_drop`, we don't need to keep track of how many elements
252                // the chunk stores - the field will be ignored anyway.
253                if mem::needs_drop::<T>() {
254                    // FIXME: this should *likely* use `offset_from`, but more
255                    // investigation is needed (including running tests in miri).
256                    let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
257                    last_chunk.entries = used_bytes / size_of::<T>();
258                }
259
260                // If the previous chunk's len is less than HUGE_PAGE
261                // bytes, then this chunk will be least double the previous
262                // chunk's size.
263                new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
264                new_cap *= 2;
265            } else {
266                new_cap = PAGE / elem_size;
267            }
268            // Also ensure that this chunk can fit `additional`.
269            new_cap = cmp::max(additional, new_cap);
270
271            let mut chunk = ArenaChunk::<T>::new(new_cap);
272            self.ptr.set(chunk.start());
273            self.end.set(chunk.end());
274            chunks.push(chunk);
275        }
276    }
277
278    // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
279    // chunks.
280    fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
281        // Determine how much was filled.
282        let start = last_chunk.start().addr();
283        // We obtain the value of the pointer to the first uninitialized element.
284        let end = self.ptr.get().addr();
285        // We then calculate the number of elements to be dropped in the last chunk,
286        // which is the filled area's length.
287        let diff = if size_of::<T>() == 0 {
288            // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
289            // the number of zero-sized values in the last and only chunk, just out of caution.
290            // Recall that `end` was incremented for each allocated value.
291            end - start
292        } else {
293            // FIXME: this should *likely* use `offset_from`, but more
294            // investigation is needed (including running tests in miri).
295            (end - start) / size_of::<T>()
296        };
297        // Pass that to the `destroy` method.
298        unsafe {
299            last_chunk.destroy(diff);
300        }
301        // Reset the chunk.
302        self.ptr.set(last_chunk.start());
303    }
304}
305
306unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
307    fn drop(&mut self) {
308        unsafe {
309            // Determine how much was filled.
310            let mut chunks_borrow = self.chunks.borrow_mut();
311            if let Some(mut last_chunk) = chunks_borrow.pop() {
312                // Drop the contents of the last chunk.
313                self.clear_last_chunk(&mut last_chunk);
314                // The last chunk will be dropped. Destroy all other chunks.
315                for chunk in chunks_borrow.iter_mut() {
316                    chunk.destroy(chunk.entries);
317                }
318            }
319            // Box handles deallocation of `last_chunk` and `self.chunks`.
320        }
321    }
322}
323
324unsafe impl<T: Send> Send for TypedArena<T> {}
325
326#[inline(always)]
327fn align_down(val: usize, align: usize) -> usize {
328    debug_assert!(align.is_power_of_two());
329    val & !(align - 1)
330}
331
332#[inline(always)]
333fn align_up(val: usize, align: usize) -> usize {
334    debug_assert!(align.is_power_of_two());
335    (val + align - 1) & !(align - 1)
336}
337
338// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
339// to optimize away alignment code.
340const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
341
342/// An arena that can hold objects of multiple different types that impl `Copy`
343/// and/or satisfy `!mem::needs_drop`.
344pub struct DroplessArena {
345    /// A pointer to the start of the free space.
346    start: Cell<*mut u8>,
347
348    /// A pointer to the end of free space.
349    ///
350    /// The allocation proceeds downwards from the end of the chunk towards the
351    /// start. (This is slightly simpler and faster than allocating upwards,
352    /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
353    /// When this pointer crosses the start pointer, a new chunk is allocated.
354    ///
355    /// This is kept aligned to DROPLESS_ALIGNMENT.
356    end: Cell<*mut u8>,
357
358    /// A vector of arena chunks.
359    chunks: RefCell<Vec<ArenaChunk>>,
360}
361
362unsafe impl Send for DroplessArena {}
363
364impl Default for DroplessArena {
365    #[inline]
366    fn default() -> DroplessArena {
367        DroplessArena {
368            // We set both `start` and `end` to 0 so that the first call to
369            // alloc() will trigger a grow().
370            start: Cell::new(ptr::null_mut()),
371            end: Cell::new(ptr::null_mut()),
372            chunks: Default::default(),
373        }
374    }
375}
376
377impl DroplessArena {
378    #[inline(never)]
379    #[cold]
380    fn grow(&self, layout: Layout) {
381        // Add some padding so we can align `self.end` while
382        // still fitting in a `layout` allocation.
383        let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
384
385        unsafe {
386            let mut chunks = self.chunks.borrow_mut();
387            let mut new_cap;
388            if let Some(last_chunk) = chunks.last_mut() {
389                // There is no need to update `last_chunk.entries` because that
390                // field isn't used by `DroplessArena`.
391
392                // If the previous chunk's len is less than HUGE_PAGE
393                // bytes, then this chunk will be least double the previous
394                // chunk's size.
395                new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
396                new_cap *= 2;
397            } else {
398                new_cap = PAGE;
399            }
400            // Also ensure that this chunk can fit `additional`.
401            new_cap = cmp::max(additional, new_cap);
402
403            let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
404            self.start.set(chunk.start());
405
406            // Align the end to DROPLESS_ALIGNMENT.
407            let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
408
409            // Make sure we don't go past `start`. This should not happen since the allocation
410            // should be at least DROPLESS_ALIGNMENT - 1 bytes.
411            debug_assert!(chunk.start().addr() <= end);
412
413            self.end.set(chunk.end().with_addr(end));
414
415            chunks.push(chunk);
416        }
417    }
418
419    #[inline]
420    pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
421        assert!(layout.size() != 0);
422
423        // This loop executes once or twice: if allocation fails the first
424        // time, the `grow` ensures it will succeed the second time.
425        loop {
426            let start = self.start.get().addr();
427            let old_end = self.end.get();
428            let end = old_end.addr();
429
430            // Align allocated bytes so that `self.end` stays aligned to
431            // DROPLESS_ALIGNMENT.
432            let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
433
434            // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
435            unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
436
437            if let Some(sub) = end.checked_sub(bytes) {
438                let new_end = align_down(sub, layout.align());
439                if start <= new_end {
440                    let new_end = old_end.with_addr(new_end);
441                    // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
442                    // preserves alignment as both `end` and `bytes` are already
443                    // aligned to DROPLESS_ALIGNMENT.
444                    self.end.set(new_end);
445                    return new_end;
446                }
447            }
448
449            // No free space left. Allocate a new chunk to satisfy the request.
450            // On failure the grow will panic or abort.
451            self.grow(layout);
452        }
453    }
454
455    #[inline]
456    pub fn alloc<T>(&self, object: T) -> &mut T {
457        assert!(!mem::needs_drop::<T>());
458        assert!(size_of::<T>() != 0);
459
460        let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
461
462        unsafe {
463            // Write into uninitialized memory.
464            ptr::write(mem, object);
465            &mut *mem
466        }
467    }
468
469    /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
470    /// reference to it. Will panic if passed a zero-sized type.
471    ///
472    /// Panics:
473    ///
474    ///  - Zero-sized types
475    ///  - Zero-length slices
476    #[inline]
477    pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
478    where
479        T: Copy,
480    {
481        assert!(!mem::needs_drop::<T>());
482        assert!(size_of::<T>() != 0);
483        assert!(!slice.is_empty());
484
485        let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
486
487        unsafe {
488            mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
489            slice::from_raw_parts_mut(mem, slice.len())
490        }
491    }
492
493    /// Used by `Lift` to check whether this slice is allocated
494    /// in this arena.
495    #[inline]
496    pub fn contains_slice<T>(&self, slice: &[T]) -> bool {
497        for chunk in self.chunks.borrow_mut().iter_mut() {
498            let ptr = slice.as_ptr().cast::<u8>().cast_mut();
499            if chunk.start() <= ptr && chunk.end() >= ptr {
500                return true;
501            }
502        }
503        false
504    }
505
506    /// Allocates a string slice that is copied into the `DroplessArena`, returning a
507    /// reference to it. Will panic if passed an empty string.
508    ///
509    /// Panics:
510    ///
511    ///  - Zero-length string
512    #[inline]
513    pub fn alloc_str(&self, string: &str) -> &str {
514        let slice = self.alloc_slice(string.as_bytes());
515
516        // SAFETY: the result has a copy of the same valid UTF-8 bytes.
517        unsafe { std::str::from_utf8_unchecked(slice) }
518    }
519
520    /// # Safety
521    ///
522    /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
523    /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
524    /// if `iter.next()` allocates onto `self`.
525    #[inline]
526    unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
527        &self,
528        mut iter: I,
529        len: usize,
530        mem: *mut T,
531    ) -> &mut [T] {
532        let mut i = 0;
533        // Use a manual loop since LLVM manages to optimize it better for
534        // slice iterators
535        loop {
536            // SAFETY: The caller must ensure that `mem` is valid for writes up to
537            // `size_of::<T>() * len`.
538            unsafe {
539                match iter.next() {
540                    Some(value) if i < len => mem.add(i).write(value),
541                    Some(_) | None => {
542                        // We only return as many items as the iterator gave us, even
543                        // though it was supposed to give us `len`
544                        return slice::from_raw_parts_mut(mem, i);
545                    }
546                }
547            }
548            i += 1;
549        }
550    }
551
552    #[inline]
553    pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
554        // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
555        // allocate additional elements while we're iterating.
556        let iter = iter.into_iter();
557        assert!(size_of::<T>() != 0);
558        assert!(!mem::needs_drop::<T>());
559
560        let size_hint = iter.size_hint();
561
562        match size_hint {
563            (min, Some(max)) if min == max => {
564                // We know the exact number of elements the iterator expects to produce here.
565                let len = min;
566
567                if len == 0 {
568                    return &mut [];
569                }
570
571                let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
572                // SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
573                // reserved. If the iterator panics or doesn't output `len` elements, this will
574                // leave some unallocated slots in the arena, which is fine because we do not call
575                // `drop`.
576                unsafe { self.write_from_iter(iter, len, mem) }
577            }
578            (_, _) => outline(move || self.try_alloc_from_iter(iter.map(Ok::<T, !>)).into_ok()),
579        }
580    }
581
582    #[inline]
583    pub fn try_alloc_from_iter<T, E>(
584        &self,
585        iter: impl IntoIterator<Item = Result<T, E>>,
586    ) -> Result<&mut [T], E> {
587        // Despite the similarlty with `alloc_from_iter`, we cannot reuse their fast case, as we
588        // cannot know the minimum length of the iterator in this case.
589        assert!(size_of::<T>() != 0);
590
591        // Takes care of reentrancy.
592        let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
593        let mut vec = vec?;
594        if vec.is_empty() {
595            return Ok(&mut []);
596        }
597        // Move the content to the arena by copying and then forgetting it.
598        let len = vec.len();
599        Ok(unsafe {
600            let start_ptr = self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
601            vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
602            vec.set_len(0);
603            slice::from_raw_parts_mut(start_ptr, len)
604        })
605    }
606}
607
608/// Declare an `Arena` containing one dropless arena and many typed arenas (the
609/// types of the typed arenas are specified by the arguments).
610///
611/// There are three cases of interest.
612/// - Types that are `Copy`: these need not be specified in the arguments. They
613///   will use the `DroplessArena`.
614/// - Types that are `!Copy` and `!Drop`: these must be specified in the
615///   arguments. An empty `TypedArena` will be created for each one, but the
616///   `DroplessArena` will always be used and the `TypedArena` will stay empty.
617///   This is odd but harmless, because an empty arena allocates no memory.
618/// - Types that are `!Copy` and `Drop`: these must be specified in the
619///   arguments. The `TypedArena` will be used for them.
620///
621#[rustc_macro_transparency = "semitransparent"]
622pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
623    #[derive(Default)]
624    pub struct Arena<'tcx> {
625        pub dropless: $crate::DroplessArena,
626        $($name: $crate::TypedArena<$ty>,)*
627    }
628
629    pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
630        #[allow(clippy::mut_from_ref)]
631        fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self;
632        #[allow(clippy::mut_from_ref)]
633        fn allocate_from_iter(
634            arena: &'tcx Arena<'tcx>,
635            iter: impl ::std::iter::IntoIterator<Item = Self>,
636        ) -> &'tcx mut [Self];
637    }
638
639    // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
640    impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
641        #[inline]
642        #[allow(clippy::mut_from_ref)]
643        fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
644            arena.dropless.alloc(self)
645        }
646        #[inline]
647        #[allow(clippy::mut_from_ref)]
648        fn allocate_from_iter(
649            arena: &'tcx Arena<'tcx>,
650            iter: impl ::std::iter::IntoIterator<Item = Self>,
651        ) -> &'tcx mut [Self] {
652            arena.dropless.alloc_from_iter(iter)
653        }
654    }
655    $(
656        impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
657            #[inline]
658            fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
659                if !::std::mem::needs_drop::<Self>() {
660                    arena.dropless.alloc(self)
661                } else {
662                    arena.$name.alloc(self)
663                }
664            }
665
666            #[inline]
667            #[allow(clippy::mut_from_ref)]
668            fn allocate_from_iter(
669                arena: &'tcx Arena<'tcx>,
670                iter: impl ::std::iter::IntoIterator<Item = Self>,
671            ) -> &'tcx mut [Self] {
672                if !::std::mem::needs_drop::<Self>() {
673                    arena.dropless.alloc_from_iter(iter)
674                } else {
675                    arena.$name.alloc_from_iter(iter)
676                }
677            }
678        }
679    )*
680
681    impl<'tcx> Arena<'tcx> {
682        #[inline]
683        #[allow(clippy::mut_from_ref)]
684        pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&'tcx self, value: T) -> &mut T {
685            value.allocate_on(self)
686        }
687
688        // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
689        #[inline]
690        #[allow(clippy::mut_from_ref)]
691        pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
692            if value.is_empty() {
693                return &mut [];
694            }
695            self.dropless.alloc_slice(value)
696        }
697
698        #[inline]
699        pub fn alloc_str(&self, string: &str) -> &str {
700            if string.is_empty() {
701                return "";
702            }
703            self.dropless.alloc_str(string)
704        }
705
706        #[allow(clippy::mut_from_ref)]
707        pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
708            &'tcx self,
709            iter: impl ::std::iter::IntoIterator<Item = T>,
710        ) -> &mut [T] {
711            T::allocate_from_iter(self, iter)
712        }
713    }
714}
715
716// Marker types that let us give different behaviour for arenas allocating
717// `Copy` types vs `!Copy` types.
718pub struct IsCopy;
719pub struct IsNotCopy;
720
721#[cfg(test)]
722mod tests;