rustc_arena/
lib.rs

1//! The arena, a fast but limited type of allocator.
2//!
3//! Arenas are a type of allocator that destroy the objects within, all at
4//! once, once the arena itself is destroyed. They do not support deallocation
5//! of individual objects while the arena itself is still alive. The benefit
6//! of an arena is very fast allocation; just a pointer bump.
7//!
8//! This crate implements several kinds of arena.
9
10// tidy-alphabetical-start
11#![allow(clippy::mut_from_ref)] // Arena allocators are one place where this pattern is fine.
12#![allow(internal_features)]
13#![cfg_attr(test, feature(test))]
14#![deny(unsafe_op_in_unsafe_fn)]
15#![doc(
16    html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
17    test(no_crate_inject, attr(deny(warnings)))
18)]
19#![doc(rust_logo)]
20#![feature(core_intrinsics)]
21#![feature(decl_macro)]
22#![feature(dropck_eyepatch)]
23#![feature(maybe_uninit_slice)]
24#![feature(never_type)]
25#![feature(rustc_attrs)]
26#![feature(rustdoc_internals)]
27#![feature(unwrap_infallible)]
28// tidy-alphabetical-end
29
30use std::alloc::Layout;
31use std::cell::{Cell, RefCell};
32use std::marker::PhantomData;
33use std::mem::{self, MaybeUninit};
34use std::ptr::{self, NonNull};
35use std::{cmp, intrinsics, slice};
36
37use smallvec::SmallVec;
38
39/// This calls the passed function while ensuring it won't be inlined into the caller.
40#[inline(never)]
41#[cold]
42fn outline<F: FnOnce() -> R, R>(f: F) -> R {
43    f()
44}
45
46struct ArenaChunk<T = u8> {
47    /// The raw storage for the arena chunk.
48    storage: NonNull<[MaybeUninit<T>]>,
49    /// The number of valid entries in the chunk.
50    entries: usize,
51}
52
53unsafe impl<#[may_dangle] T> Drop for ArenaChunk<T> {
54    fn drop(&mut self) {
55        unsafe { drop(Box::from_raw(self.storage.as_mut())) }
56    }
57}
58
59impl<T> ArenaChunk<T> {
60    #[inline]
61    unsafe fn new(capacity: usize) -> ArenaChunk<T> {
62        ArenaChunk {
63            storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
64            entries: 0,
65        }
66    }
67
68    /// Destroys this arena chunk.
69    ///
70    /// # Safety
71    ///
72    /// The caller must ensure that `len` elements of this chunk have been initialized.
73    #[inline]
74    unsafe fn destroy(&mut self, len: usize) {
75        // The branch on needs_drop() is an -O1 performance optimization.
76        // Without the branch, dropping TypedArena<T> takes linear time.
77        if mem::needs_drop::<T>() {
78            // SAFETY: The caller must ensure that `len` elements of this chunk have
79            // been initialized.
80            unsafe {
81                let slice = self.storage.as_mut();
82                slice[..len].assume_init_drop();
83            }
84        }
85    }
86
87    // Returns a pointer to the first allocated object.
88    #[inline]
89    fn start(&mut self) -> *mut T {
90        self.storage.as_ptr() as *mut T
91    }
92
93    // Returns a pointer to the end of the allocated space.
94    #[inline]
95    fn end(&mut self) -> *mut T {
96        unsafe {
97            if size_of::<T>() == 0 {
98                // A pointer as large as possible for zero-sized elements.
99                ptr::without_provenance_mut(!0)
100            } else {
101                self.start().add(self.storage.len())
102            }
103        }
104    }
105}
106
107// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
108// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
109// we stop growing. This scales well, from arenas that are barely used up to
110// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
111// the usual sizes of pages and huge pages on Linux.
112const PAGE: usize = 4096;
113const HUGE_PAGE: usize = 2 * 1024 * 1024;
114
115/// An arena that can hold objects of only one type.
116pub struct TypedArena<T> {
117    /// A pointer to the next object to be allocated.
118    ptr: Cell<*mut T>,
119
120    /// A pointer to the end of the allocated area. When this pointer is
121    /// reached, a new chunk is allocated.
122    end: Cell<*mut T>,
123
124    /// A vector of arena chunks.
125    chunks: RefCell<Vec<ArenaChunk<T>>>,
126
127    /// Marker indicating that dropping the arena causes its owned
128    /// instances of `T` to be dropped.
129    _own: PhantomData<T>,
130}
131
132impl<T> Default for TypedArena<T> {
133    /// Creates a new `TypedArena`.
134    fn default() -> TypedArena<T> {
135        TypedArena {
136            // We set both `ptr` and `end` to 0 so that the first call to
137            // alloc() will trigger a grow().
138            ptr: Cell::new(ptr::null_mut()),
139            end: Cell::new(ptr::null_mut()),
140            chunks: Default::default(),
141            _own: PhantomData,
142        }
143    }
144}
145
146impl<T> TypedArena<T> {
147    /// Allocates an object in the `TypedArena`, returning a reference to it.
148    #[inline]
149    pub fn alloc(&self, object: T) -> &mut T {
150        if self.ptr == self.end {
151            self.grow(1)
152        }
153
154        unsafe {
155            if size_of::<T>() == 0 {
156                self.ptr.set(self.ptr.get().wrapping_byte_add(1));
157                let ptr = ptr::NonNull::<T>::dangling().as_ptr();
158                // Don't drop the object. This `write` is equivalent to `forget`.
159                ptr::write(ptr, object);
160                &mut *ptr
161            } else {
162                let ptr = self.ptr.get();
163                // Advance the pointer.
164                self.ptr.set(self.ptr.get().add(1));
165                // Write into uninitialized memory.
166                ptr::write(ptr, object);
167                &mut *ptr
168            }
169        }
170    }
171
172    #[inline]
173    fn can_allocate(&self, additional: usize) -> bool {
174        // FIXME: this should *likely* use `offset_from`, but more
175        // investigation is needed (including running tests in miri).
176        let available_bytes = self.end.get().addr() - self.ptr.get().addr();
177        let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
178        available_bytes >= additional_bytes
179    }
180
181    #[inline]
182    fn alloc_raw_slice(&self, len: usize) -> *mut T {
183        assert!(size_of::<T>() != 0);
184        assert!(len != 0);
185
186        // Ensure the current chunk can fit `len` objects.
187        if !self.can_allocate(len) {
188            self.grow(len);
189            debug_assert!(self.can_allocate(len));
190        }
191
192        let start_ptr = self.ptr.get();
193        // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
194        // `len` elements.
195        unsafe { self.ptr.set(start_ptr.add(len)) };
196        start_ptr
197    }
198
199    /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
200    ///
201    /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
202    /// storing the elements in the arena.
203    #[inline]
204    pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
205        self.try_alloc_from_iter(iter.into_iter().map(Ok::<T, !>)).into_ok()
206    }
207
208    /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
209    ///
210    /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
211    /// storing the elements in the arena.
212    #[inline]
213    pub fn try_alloc_from_iter<E>(
214        &self,
215        iter: impl IntoIterator<Item = Result<T, E>>,
216    ) -> Result<&mut [T], E> {
217        // Despite the similarlty with `DroplessArena`, we cannot reuse their fast case. The reason
218        // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
219        // reference to `self` and adding elements to the arena during iteration.
220        //
221        // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
222        // have to track that some uninitialized elements are followed by some initialized elements,
223        // else we might accidentally drop uninitialized memory if something panics or if the
224        // iterator doesn't fill all the length we expected.
225        //
226        // So we collect all the elements beforehand, which takes care of reentrancy and panic
227        // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
228        // doesn't need to be hyper-optimized.
229        assert!(size_of::<T>() != 0);
230
231        let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
232        let mut vec = vec?;
233        if vec.is_empty() {
234            return Ok(&mut []);
235        }
236        // Move the content to the arena by copying and then forgetting it.
237        let len = vec.len();
238        let start_ptr = self.alloc_raw_slice(len);
239        Ok(unsafe {
240            vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
241            vec.set_len(0);
242            slice::from_raw_parts_mut(start_ptr, len)
243        })
244    }
245
246    /// Grows the arena.
247    #[inline(never)]
248    #[cold]
249    fn grow(&self, additional: usize) {
250        unsafe {
251            // We need the element size to convert chunk sizes (ranging from
252            // PAGE to HUGE_PAGE bytes) to element counts.
253            let elem_size = cmp::max(1, size_of::<T>());
254            let mut chunks = self.chunks.borrow_mut();
255            let mut new_cap;
256            if let Some(last_chunk) = chunks.last_mut() {
257                // If a type is `!needs_drop`, we don't need to keep track of how many elements
258                // the chunk stores - the field will be ignored anyway.
259                if mem::needs_drop::<T>() {
260                    // FIXME: this should *likely* use `offset_from`, but more
261                    // investigation is needed (including running tests in miri).
262                    let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
263                    last_chunk.entries = used_bytes / size_of::<T>();
264                }
265
266                // If the previous chunk's len is less than HUGE_PAGE
267                // bytes, then this chunk will be least double the previous
268                // chunk's size.
269                new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
270                new_cap *= 2;
271            } else {
272                new_cap = PAGE / elem_size;
273            }
274            // Also ensure that this chunk can fit `additional`.
275            new_cap = cmp::max(additional, new_cap);
276
277            let mut chunk = ArenaChunk::<T>::new(new_cap);
278            self.ptr.set(chunk.start());
279            self.end.set(chunk.end());
280            chunks.push(chunk);
281        }
282    }
283
284    // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
285    // chunks.
286    fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
287        // Determine how much was filled.
288        let start = last_chunk.start().addr();
289        // We obtain the value of the pointer to the first uninitialized element.
290        let end = self.ptr.get().addr();
291        // We then calculate the number of elements to be dropped in the last chunk,
292        // which is the filled area's length.
293        let diff = if size_of::<T>() == 0 {
294            // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
295            // the number of zero-sized values in the last and only chunk, just out of caution.
296            // Recall that `end` was incremented for each allocated value.
297            end - start
298        } else {
299            // FIXME: this should *likely* use `offset_from`, but more
300            // investigation is needed (including running tests in miri).
301            (end - start) / size_of::<T>()
302        };
303        // Pass that to the `destroy` method.
304        unsafe {
305            last_chunk.destroy(diff);
306        }
307        // Reset the chunk.
308        self.ptr.set(last_chunk.start());
309    }
310}
311
312unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
313    fn drop(&mut self) {
314        unsafe {
315            // Determine how much was filled.
316            let mut chunks_borrow = self.chunks.borrow_mut();
317            if let Some(mut last_chunk) = chunks_borrow.pop() {
318                // Drop the contents of the last chunk.
319                self.clear_last_chunk(&mut last_chunk);
320                // The last chunk will be dropped. Destroy all other chunks.
321                for chunk in chunks_borrow.iter_mut() {
322                    chunk.destroy(chunk.entries);
323                }
324            }
325            // Box handles deallocation of `last_chunk` and `self.chunks`.
326        }
327    }
328}
329
330unsafe impl<T: Send> Send for TypedArena<T> {}
331
332#[inline(always)]
333fn align_down(val: usize, align: usize) -> usize {
334    debug_assert!(align.is_power_of_two());
335    val & !(align - 1)
336}
337
338#[inline(always)]
339fn align_up(val: usize, align: usize) -> usize {
340    debug_assert!(align.is_power_of_two());
341    (val + align - 1) & !(align - 1)
342}
343
344// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
345// to optimize away alignment code.
346const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
347
348/// An arena that can hold objects of multiple different types that impl `Copy`
349/// and/or satisfy `!mem::needs_drop`.
350pub struct DroplessArena {
351    /// A pointer to the start of the free space.
352    start: Cell<*mut u8>,
353
354    /// A pointer to the end of free space.
355    ///
356    /// The allocation proceeds downwards from the end of the chunk towards the
357    /// start. (This is slightly simpler and faster than allocating upwards,
358    /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
359    /// When this pointer crosses the start pointer, a new chunk is allocated.
360    ///
361    /// This is kept aligned to DROPLESS_ALIGNMENT.
362    end: Cell<*mut u8>,
363
364    /// A vector of arena chunks.
365    chunks: RefCell<Vec<ArenaChunk>>,
366}
367
368unsafe impl Send for DroplessArena {}
369
370impl Default for DroplessArena {
371    #[inline]
372    fn default() -> DroplessArena {
373        DroplessArena {
374            // We set both `start` and `end` to 0 so that the first call to
375            // alloc() will trigger a grow().
376            start: Cell::new(ptr::null_mut()),
377            end: Cell::new(ptr::null_mut()),
378            chunks: Default::default(),
379        }
380    }
381}
382
383impl DroplessArena {
384    #[inline(never)]
385    #[cold]
386    fn grow(&self, layout: Layout) {
387        // Add some padding so we can align `self.end` while
388        // still fitting in a `layout` allocation.
389        let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
390
391        unsafe {
392            let mut chunks = self.chunks.borrow_mut();
393            let mut new_cap;
394            if let Some(last_chunk) = chunks.last_mut() {
395                // There is no need to update `last_chunk.entries` because that
396                // field isn't used by `DroplessArena`.
397
398                // If the previous chunk's len is less than HUGE_PAGE
399                // bytes, then this chunk will be least double the previous
400                // chunk's size.
401                new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
402                new_cap *= 2;
403            } else {
404                new_cap = PAGE;
405            }
406            // Also ensure that this chunk can fit `additional`.
407            new_cap = cmp::max(additional, new_cap);
408
409            let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
410            self.start.set(chunk.start());
411
412            // Align the end to DROPLESS_ALIGNMENT.
413            let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
414
415            // Make sure we don't go past `start`. This should not happen since the allocation
416            // should be at least DROPLESS_ALIGNMENT - 1 bytes.
417            debug_assert!(chunk.start().addr() <= end);
418
419            self.end.set(chunk.end().with_addr(end));
420
421            chunks.push(chunk);
422        }
423    }
424
425    #[inline]
426    pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
427        assert!(layout.size() != 0);
428
429        // This loop executes once or twice: if allocation fails the first
430        // time, the `grow` ensures it will succeed the second time.
431        loop {
432            let start = self.start.get().addr();
433            let old_end = self.end.get();
434            let end = old_end.addr();
435
436            // Align allocated bytes so that `self.end` stays aligned to
437            // DROPLESS_ALIGNMENT.
438            let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
439
440            // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
441            unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
442
443            if let Some(sub) = end.checked_sub(bytes) {
444                let new_end = align_down(sub, layout.align());
445                if start <= new_end {
446                    let new_end = old_end.with_addr(new_end);
447                    // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
448                    // preserves alignment as both `end` and `bytes` are already
449                    // aligned to DROPLESS_ALIGNMENT.
450                    self.end.set(new_end);
451                    return new_end;
452                }
453            }
454
455            // No free space left. Allocate a new chunk to satisfy the request.
456            // On failure the grow will panic or abort.
457            self.grow(layout);
458        }
459    }
460
461    #[inline]
462    pub fn alloc<T>(&self, object: T) -> &mut T {
463        assert!(!mem::needs_drop::<T>());
464        assert!(size_of::<T>() != 0);
465
466        let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
467
468        unsafe {
469            // Write into uninitialized memory.
470            ptr::write(mem, object);
471            &mut *mem
472        }
473    }
474
475    /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
476    /// reference to it. Will panic if passed a zero-sized type.
477    ///
478    /// Panics:
479    ///
480    ///  - Zero-sized types
481    ///  - Zero-length slices
482    #[inline]
483    pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
484    where
485        T: Copy,
486    {
487        assert!(!mem::needs_drop::<T>());
488        assert!(size_of::<T>() != 0);
489        assert!(!slice.is_empty());
490
491        let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
492
493        unsafe {
494            mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
495            slice::from_raw_parts_mut(mem, slice.len())
496        }
497    }
498
499    /// Used by `Lift` to check whether this slice is allocated
500    /// in this arena.
501    #[inline]
502    pub fn contains_slice<T>(&self, slice: &[T]) -> bool {
503        for chunk in self.chunks.borrow_mut().iter_mut() {
504            let ptr = slice.as_ptr().cast::<u8>().cast_mut();
505            if chunk.start() <= ptr && chunk.end() >= ptr {
506                return true;
507            }
508        }
509        false
510    }
511
512    /// Allocates a string slice that is copied into the `DroplessArena`, returning a
513    /// reference to it. Will panic if passed an empty string.
514    ///
515    /// Panics:
516    ///
517    ///  - Zero-length string
518    #[inline]
519    pub fn alloc_str(&self, string: &str) -> &str {
520        let slice = self.alloc_slice(string.as_bytes());
521
522        // SAFETY: the result has a copy of the same valid UTF-8 bytes.
523        unsafe { std::str::from_utf8_unchecked(slice) }
524    }
525
526    /// # Safety
527    ///
528    /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
529    /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
530    /// if `iter.next()` allocates onto `self`.
531    #[inline]
532    unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
533        &self,
534        mut iter: I,
535        len: usize,
536        mem: *mut T,
537    ) -> &mut [T] {
538        let mut i = 0;
539        // Use a manual loop since LLVM manages to optimize it better for
540        // slice iterators
541        loop {
542            // SAFETY: The caller must ensure that `mem` is valid for writes up to
543            // `size_of::<T>() * len`.
544            unsafe {
545                match iter.next() {
546                    Some(value) if i < len => mem.add(i).write(value),
547                    Some(_) | None => {
548                        // We only return as many items as the iterator gave us, even
549                        // though it was supposed to give us `len`
550                        return slice::from_raw_parts_mut(mem, i);
551                    }
552                }
553            }
554            i += 1;
555        }
556    }
557
558    #[inline]
559    pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
560        // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
561        // allocate additional elements while we're iterating.
562        let iter = iter.into_iter();
563        assert!(size_of::<T>() != 0);
564        assert!(!mem::needs_drop::<T>());
565
566        let size_hint = iter.size_hint();
567
568        match size_hint {
569            (min, Some(max)) if min == max => {
570                // We know the exact number of elements the iterator expects to produce here.
571                let len = min;
572
573                if len == 0 {
574                    return &mut [];
575                }
576
577                let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
578                // SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
579                // reserved. If the iterator panics or doesn't output `len` elements, this will
580                // leave some unallocated slots in the arena, which is fine because we do not call
581                // `drop`.
582                unsafe { self.write_from_iter(iter, len, mem) }
583            }
584            (_, _) => outline(move || self.try_alloc_from_iter(iter.map(Ok::<T, !>)).into_ok()),
585        }
586    }
587
588    #[inline]
589    pub fn try_alloc_from_iter<T, E>(
590        &self,
591        iter: impl IntoIterator<Item = Result<T, E>>,
592    ) -> Result<&mut [T], E> {
593        // Despite the similarlty with `alloc_from_iter`, we cannot reuse their fast case, as we
594        // cannot know the minimum length of the iterator in this case.
595        assert!(size_of::<T>() != 0);
596
597        // Takes care of reentrancy.
598        let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
599        let mut vec = vec?;
600        if vec.is_empty() {
601            return Ok(&mut []);
602        }
603        // Move the content to the arena by copying and then forgetting it.
604        let len = vec.len();
605        Ok(unsafe {
606            let start_ptr = self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
607            vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
608            vec.set_len(0);
609            slice::from_raw_parts_mut(start_ptr, len)
610        })
611    }
612}
613
614/// Declare an `Arena` containing one dropless arena and many typed arenas (the
615/// types of the typed arenas are specified by the arguments).
616///
617/// There are three cases of interest.
618/// - Types that are `Copy`: these need not be specified in the arguments. They
619///   will use the `DroplessArena`.
620/// - Types that are `!Copy` and `!Drop`: these must be specified in the
621///   arguments. An empty `TypedArena` will be created for each one, but the
622///   `DroplessArena` will always be used and the `TypedArena` will stay empty.
623///   This is odd but harmless, because an empty arena allocates no memory.
624/// - Types that are `!Copy` and `Drop`: these must be specified in the
625///   arguments. The `TypedArena` will be used for them.
626///
627#[rustc_macro_transparency = "semitransparent"]
628pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
629    #[derive(Default)]
630    pub struct Arena<'tcx> {
631        pub dropless: $crate::DroplessArena,
632        $($name: $crate::TypedArena<$ty>,)*
633    }
634
635    pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
636        #[allow(clippy::mut_from_ref)]
637        fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self;
638        #[allow(clippy::mut_from_ref)]
639        fn allocate_from_iter(
640            arena: &'tcx Arena<'tcx>,
641            iter: impl ::std::iter::IntoIterator<Item = Self>,
642        ) -> &'tcx mut [Self];
643    }
644
645    // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
646    impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
647        #[inline]
648        #[allow(clippy::mut_from_ref)]
649        fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
650            arena.dropless.alloc(self)
651        }
652        #[inline]
653        #[allow(clippy::mut_from_ref)]
654        fn allocate_from_iter(
655            arena: &'tcx Arena<'tcx>,
656            iter: impl ::std::iter::IntoIterator<Item = Self>,
657        ) -> &'tcx mut [Self] {
658            arena.dropless.alloc_from_iter(iter)
659        }
660    }
661    $(
662        impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
663            #[inline]
664            fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
665                if !::std::mem::needs_drop::<Self>() {
666                    arena.dropless.alloc(self)
667                } else {
668                    arena.$name.alloc(self)
669                }
670            }
671
672            #[inline]
673            #[allow(clippy::mut_from_ref)]
674            fn allocate_from_iter(
675                arena: &'tcx Arena<'tcx>,
676                iter: impl ::std::iter::IntoIterator<Item = Self>,
677            ) -> &'tcx mut [Self] {
678                if !::std::mem::needs_drop::<Self>() {
679                    arena.dropless.alloc_from_iter(iter)
680                } else {
681                    arena.$name.alloc_from_iter(iter)
682                }
683            }
684        }
685    )*
686
687    impl<'tcx> Arena<'tcx> {
688        #[inline]
689        #[allow(clippy::mut_from_ref)]
690        pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&'tcx self, value: T) -> &mut T {
691            value.allocate_on(self)
692        }
693
694        // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
695        #[inline]
696        #[allow(clippy::mut_from_ref)]
697        pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
698            if value.is_empty() {
699                return &mut [];
700            }
701            self.dropless.alloc_slice(value)
702        }
703
704        #[inline]
705        pub fn alloc_str(&self, string: &str) -> &str {
706            if string.is_empty() {
707                return "";
708            }
709            self.dropless.alloc_str(string)
710        }
711
712        #[allow(clippy::mut_from_ref)]
713        pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
714            &'tcx self,
715            iter: impl ::std::iter::IntoIterator<Item = T>,
716        ) -> &mut [T] {
717            T::allocate_from_iter(self, iter)
718        }
719    }
720}
721
722// Marker types that let us give different behaviour for arenas allocating
723// `Copy` types vs `!Copy` types.
724pub struct IsCopy;
725pub struct IsNotCopy;
726
727#[cfg(test)]
728mod tests;