Skip to main content

rustc_arena/
lib.rs

1//! The arena, a fast but limited type of allocator.
2//!
3//! Arenas are a type of allocator that destroy the objects within, all at
4//! once, once the arena itself is destroyed. They do not support deallocation
5//! of individual objects while the arena itself is still alive. The benefit
6//! of an arena is very fast allocation; just a pointer bump.
7//!
8//! This crate implements several kinds of arena.
9
10// tidy-alphabetical-start
11#![allow(clippy::mut_from_ref)] // Arena allocators are one place where this pattern is fine.
12#![allow(internal_features)]
13#![cfg_attr(test, feature(test))]
14#![deny(unsafe_op_in_unsafe_fn)]
15#![doc(test(no_crate_inject, attr(deny(warnings), allow(internal_features))))]
16#![feature(decl_macro)]
17#![feature(dropck_eyepatch)]
18#![feature(never_type)]
19#![feature(rustc_attrs)]
20#![feature(unwrap_infallible)]
21// tidy-alphabetical-end
22
23use std::alloc::Layout;
24use std::cell::{Cell, RefCell};
25use std::marker::PhantomData;
26use std::mem::{self, MaybeUninit};
27use std::ptr::{self, NonNull};
28use std::{cmp, hint, slice};
29
30use smallvec::SmallVec;
31
32/// This calls the passed function while ensuring it won't be inlined into the caller.
33#[inline(never)]
34#[cold]
35fn outline<F: FnOnce() -> R, R>(f: F) -> R {
36    f()
37}
38
39struct ArenaChunk<T = u8> {
40    /// The raw storage for the arena chunk.
41    storage: NonNull<[MaybeUninit<T>]>,
42    /// The number of valid entries in the chunk.
43    entries: usize,
44}
45
46unsafe impl<#[may_dangle] T> Drop for ArenaChunk<T> {
47    fn drop(&mut self) {
48        unsafe { drop(Box::from_raw(self.storage.as_mut())) }
49    }
50}
51
52impl<T> ArenaChunk<T> {
53    #[inline]
54    unsafe fn new(capacity: usize) -> ArenaChunk<T> {
55        ArenaChunk {
56            storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
57            entries: 0,
58        }
59    }
60
61    /// Destroys this arena chunk.
62    ///
63    /// # Safety
64    ///
65    /// The caller must ensure that `len` elements of this chunk have been initialized.
66    #[inline]
67    unsafe fn destroy(&mut self, len: usize) {
68        // The branch on needs_drop() is an -O1 performance optimization.
69        // Without the branch, dropping TypedArena<T> takes linear time.
70        if mem::needs_drop::<T>() {
71            // SAFETY: The caller must ensure that `len` elements of this chunk have
72            // been initialized.
73            unsafe {
74                let slice = self.storage.as_mut();
75                slice[..len].assume_init_drop();
76            }
77        }
78    }
79
80    // Returns a pointer to the first allocated object.
81    #[inline]
82    fn start(&mut self) -> *mut T {
83        self.storage.as_ptr() as *mut T
84    }
85
86    // Returns a pointer to the end of the allocated space.
87    #[inline]
88    fn end(&mut self) -> *mut T {
89        unsafe {
90            if size_of::<T>() == 0 {
91                // A pointer as large as possible for zero-sized elements.
92                ptr::without_provenance_mut(!0)
93            } else {
94                self.start().add(self.storage.len())
95            }
96        }
97    }
98}
99
100// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
101// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
102// we stop growing. This scales well, from arenas that are barely used up to
103// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
104// the usual sizes of pages and huge pages on Linux.
105const PAGE: usize = 4096;
106const HUGE_PAGE: usize = 2 * 1024 * 1024;
107
108/// An arena that can hold objects of only one type.
109pub struct TypedArena<T> {
110    /// A pointer to the next object to be allocated.
111    ptr: Cell<*mut T>,
112
113    /// A pointer to the end of the allocated area. When this pointer is
114    /// reached, a new chunk is allocated.
115    end: Cell<*mut T>,
116
117    /// A vector of arena chunks.
118    chunks: RefCell<Vec<ArenaChunk<T>>>,
119
120    /// Marker indicating that dropping the arena causes its owned
121    /// instances of `T` to be dropped.
122    _own: PhantomData<T>,
123}
124
125impl<T> Default for TypedArena<T> {
126    /// Creates a new `TypedArena`.
127    fn default() -> TypedArena<T> {
128        TypedArena {
129            // We set both `ptr` and `end` to 0 so that the first call to
130            // alloc() will trigger a grow().
131            ptr: Cell::new(ptr::null_mut()),
132            end: Cell::new(ptr::null_mut()),
133            chunks: Default::default(),
134            _own: PhantomData,
135        }
136    }
137}
138
139impl<T> TypedArena<T> {
140    /// Allocates an object in the `TypedArena`, returning a reference to it.
141    #[inline]
142    pub fn alloc(&self, object: T) -> &mut T {
143        if !(size_of::<T>() != 0) {
    ::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
144
145        if self.ptr == self.end {
146            self.grow(1)
147        }
148
149        unsafe {
150            let ptr = self.ptr.get();
151            // Advance the pointer.
152            self.ptr.set(self.ptr.get().add(1));
153            // Write into uninitialized memory.
154            ptr::write(ptr, object);
155            &mut *ptr
156        }
157    }
158
159    #[inline]
160    fn can_allocate(&self, additional: usize) -> bool {
161        // FIXME: this should *likely* use `offset_from`, but more
162        // investigation is needed (including running tests in miri).
163        let available_bytes = self.end.get().addr() - self.ptr.get().addr();
164        let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
165        available_bytes >= additional_bytes
166    }
167
168    /// Allocates storage for `len >= 1` values in this arena, and returns a
169    /// raw pointer to the first value's storage.
170    ///
171    /// # Safety
172    ///
173    /// Caller must initialize each of the `len` slots to a droppable value
174    /// before the arena is dropped.
175    ///
176    /// In practice, this typically means that the caller must be able to
177    /// raw-copy `len` already-initialized values into the slice without any
178    /// possibility of panicking.
179    ///
180    /// FIXME(Zalathar): This is *very* fragile; perhaps we need a different
181    /// approach to arena-allocating slices of droppable values.
182    #[inline]
183    unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T {
184        if !(size_of::<T>() != 0) {
    ::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
185        if !(len != 0) { ::core::panicking::panic("assertion failed: len != 0") };assert!(len != 0);
186
187        // Ensure the current chunk can fit `len` objects.
188        if !self.can_allocate(len) {
189            self.grow(len);
190            if true {
    if !self.can_allocate(len) {
        ::core::panicking::panic("assertion failed: self.can_allocate(len)")
    };
};debug_assert!(self.can_allocate(len));
191        }
192
193        let start_ptr = self.ptr.get();
194        // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
195        // `len` elements.
196        unsafe { self.ptr.set(start_ptr.add(len)) };
197        start_ptr
198    }
199
200    /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
201    ///
202    /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
203    /// storing the elements in the arena.
204    #[inline]
205    pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
206        self.try_alloc_from_iter(iter.into_iter().map(Ok::<T, !>)).into_ok()
207    }
208
209    /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
210    ///
211    /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
212    /// storing the elements in the arena.
213    #[inline]
214    pub fn try_alloc_from_iter<E>(
215        &self,
216        iter: impl IntoIterator<Item = Result<T, E>>,
217    ) -> Result<&mut [T], E> {
218        // Despite the similarity with `DroplessArena`, we cannot reuse their fast case. The reason
219        // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
220        // reference to `self` and adding elements to the arena during iteration.
221        //
222        // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
223        // have to track that some uninitialized elements are followed by some initialized elements,
224        // else we might accidentally drop uninitialized memory if something panics or if the
225        // iterator doesn't fill all the length we expected.
226        //
227        // So we collect all the elements beforehand, which takes care of reentrancy and panic
228        // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
229        // doesn't need to be hyper-optimized.
230        if !(size_of::<T>() != 0) {
    ::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
231
232        let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
233        let mut vec = vec?;
234        if vec.is_empty() {
235            return Ok(&mut []);
236        }
237        // Move the content to the arena by copying and then forgetting it.
238        let len = vec.len();
239
240        // SAFETY: After allocating raw storage for exactly `len` values, we
241        // must fully initialize the storage without panicking, and we must
242        // also prevent the stale values in the vec from being dropped.
243        Ok(unsafe {
244            let start_ptr = self.alloc_raw_slice(len);
245            // Initialize the newly-allocated storage without panicking.
246            vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
247            // Prevent the stale values in the vec from being dropped.
248            vec.set_len(0);
249            slice::from_raw_parts_mut(start_ptr, len)
250        })
251    }
252
253    /// Grows the arena.
254    #[inline(never)]
255    #[cold]
256    fn grow(&self, additional: usize) {
257        unsafe {
258            // We need the element size to convert chunk sizes (ranging from
259            // PAGE to HUGE_PAGE bytes) to element counts.
260            let elem_size = cmp::max(1, size_of::<T>());
261            let mut chunks = self.chunks.borrow_mut();
262            let mut new_cap;
263            if let Some(last_chunk) = chunks.last_mut() {
264                // If a type is `!needs_drop`, we don't need to keep track of how many elements
265                // the chunk stores - the field will be ignored anyway.
266                if mem::needs_drop::<T>() {
267                    // FIXME: this should *likely* use `offset_from`, but more
268                    // investigation is needed (including running tests in miri).
269                    let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
270                    last_chunk.entries = used_bytes / size_of::<T>();
271                }
272
273                // If the previous chunk's len is less than HUGE_PAGE
274                // bytes, then this chunk will be least double the previous
275                // chunk's size.
276                new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
277                new_cap *= 2;
278            } else {
279                new_cap = PAGE / elem_size;
280            }
281            // Also ensure that this chunk can fit `additional`.
282            new_cap = cmp::max(additional, new_cap);
283
284            let chunk = chunks.push_mut(ArenaChunk::<T>::new(new_cap));
285            self.ptr.set(chunk.start());
286            self.end.set(chunk.end());
287        }
288    }
289
290    // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
291    // chunks.
292    fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
293        // Determine how much was filled.
294        let start = last_chunk.start().addr();
295        // We obtain the value of the pointer to the first uninitialized element.
296        let end = self.ptr.get().addr();
297        // We then calculate the number of elements to be dropped in the last chunk,
298        // which is the filled area's length.
299        match (&size_of::<T>(), &0) {
    (left_val, right_val) => {
        if *left_val == *right_val {
            let kind = ::core::panicking::AssertKind::Ne;
            ::core::panicking::assert_failed(kind, &*left_val, &*right_val,
                ::core::option::Option::None);
        }
    }
};assert_ne!(size_of::<T>(), 0);
300        // FIXME: this should *likely* use `offset_from`, but more
301        // investigation is needed (including running tests in miri).
302        let diff = (end - start) / size_of::<T>();
303        // Pass that to the `destroy` method.
304        unsafe {
305            last_chunk.destroy(diff);
306        }
307        // Reset the chunk.
308        self.ptr.set(last_chunk.start());
309    }
310}
311
312unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
313    fn drop(&mut self) {
314        unsafe {
315            // Determine how much was filled.
316            let mut chunks_borrow = self.chunks.borrow_mut();
317            if let Some(mut last_chunk) = chunks_borrow.pop() {
318                // Drop the contents of the last chunk.
319                self.clear_last_chunk(&mut last_chunk);
320                // The last chunk will be dropped. Destroy all other chunks.
321                for chunk in chunks_borrow.iter_mut() {
322                    chunk.destroy(chunk.entries);
323                }
324            }
325            // Box handles deallocation of `last_chunk` and `self.chunks`.
326        }
327    }
328}
329
330unsafe impl<T: Send> Send for TypedArena<T> {}
331
332#[inline(always)]
333fn align_down(val: usize, align: usize) -> usize {
334    if true {
    if !align.is_power_of_two() {
        ::core::panicking::panic("assertion failed: align.is_power_of_two()")
    };
};debug_assert!(align.is_power_of_two());
335    val & !(align - 1)
336}
337
338#[inline(always)]
339fn align_up(val: usize, align: usize) -> usize {
340    if true {
    if !align.is_power_of_two() {
        ::core::panicking::panic("assertion failed: align.is_power_of_two()")
    };
};debug_assert!(align.is_power_of_two());
341    (val + align - 1) & !(align - 1)
342}
343
344// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
345// to optimize away alignment code.
346const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
347
348/// An arena that can hold objects of multiple different types that impl `Copy`
349/// and/or satisfy `!mem::needs_drop`.
350pub struct DroplessArena {
351    /// A pointer to the start of the free space.
352    start: Cell<*mut u8>,
353
354    /// A pointer to the end of free space.
355    ///
356    /// The allocation proceeds downwards from the end of the chunk towards the
357    /// start. (This is slightly simpler and faster than allocating upwards,
358    /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
359    /// When this pointer crosses the start pointer, a new chunk is allocated.
360    ///
361    /// This is kept aligned to DROPLESS_ALIGNMENT.
362    end: Cell<*mut u8>,
363
364    /// A vector of arena chunks.
365    chunks: RefCell<Vec<ArenaChunk>>,
366}
367
368unsafe impl Send for DroplessArena {}
369
370impl Default for DroplessArena {
371    #[inline]
372    fn default() -> DroplessArena {
373        DroplessArena {
374            // We set both `start` and `end` to 0 so that the first call to
375            // alloc() will trigger a grow().
376            start: Cell::new(ptr::null_mut()),
377            end: Cell::new(ptr::null_mut()),
378            chunks: Default::default(),
379        }
380    }
381}
382
383impl DroplessArena {
384    #[inline(never)]
385    #[cold]
386    fn grow(&self, layout: Layout) {
387        // Add some padding so we can align `self.end` while
388        // still fitting in a `layout` allocation.
389        let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
390
391        unsafe {
392            let mut chunks = self.chunks.borrow_mut();
393            let mut new_cap;
394            if let Some(last_chunk) = chunks.last_mut() {
395                // There is no need to update `last_chunk.entries` because that
396                // field isn't used by `DroplessArena`.
397
398                // If the previous chunk's len is less than HUGE_PAGE
399                // bytes, then this chunk will be least double the previous
400                // chunk's size.
401                new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
402                new_cap *= 2;
403            } else {
404                new_cap = PAGE;
405            }
406            // Also ensure that this chunk can fit `additional`.
407            new_cap = cmp::max(additional, new_cap);
408
409            let chunk = chunks.push_mut(ArenaChunk::new(align_up(new_cap, PAGE)));
410            self.start.set(chunk.start());
411
412            // Align the end to DROPLESS_ALIGNMENT.
413            let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
414
415            // Make sure we don't go past `start`. This should not happen since the allocation
416            // should be at least DROPLESS_ALIGNMENT - 1 bytes.
417            if true {
    if !(chunk.start().addr() <= end) {
        ::core::panicking::panic("assertion failed: chunk.start().addr() <= end")
    };
};debug_assert!(chunk.start().addr() <= end);
418
419            self.end.set(chunk.end().with_addr(end));
420        }
421    }
422
423    #[inline]
424    pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
425        if !(layout.size() != 0) {
    ::core::panicking::panic("assertion failed: layout.size() != 0")
};assert!(layout.size() != 0);
426
427        // This loop executes once or twice: if allocation fails the first
428        // time, the `grow` ensures it will succeed the second time.
429        loop {
430            let start = self.start.get().addr();
431            let old_end = self.end.get();
432            let end = old_end.addr();
433
434            // Align allocated bytes so that `self.end` stays aligned to
435            // DROPLESS_ALIGNMENT.
436            let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
437
438            // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
439            unsafe { hint::assert_unchecked(end == align_down(end, DROPLESS_ALIGNMENT)) };
440
441            if let Some(sub) = end.checked_sub(bytes) {
442                let new_end = align_down(sub, layout.align());
443                if start <= new_end {
444                    let new_end = old_end.with_addr(new_end);
445                    // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
446                    // preserves alignment as both `end` and `bytes` are already
447                    // aligned to DROPLESS_ALIGNMENT.
448                    self.end.set(new_end);
449                    return new_end;
450                }
451            }
452
453            // No free space left. Allocate a new chunk to satisfy the request.
454            // On failure the grow will panic or abort.
455            self.grow(layout);
456        }
457    }
458
459    #[inline]
460    pub fn alloc<T>(&self, object: T) -> &mut T {
461        if !!mem::needs_drop::<T>() {
    ::core::panicking::panic("assertion failed: !mem::needs_drop::<T>()")
};assert!(!mem::needs_drop::<T>());
462        if !(size_of::<T>() != 0) {
    ::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
463
464        let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
465
466        unsafe {
467            // Write into uninitialized memory.
468            ptr::write(mem, object);
469            &mut *mem
470        }
471    }
472
473    /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
474    /// reference to it. Will panic if passed a zero-sized type.
475    ///
476    /// Panics:
477    ///
478    ///  - Zero-sized types
479    ///  - Zero-length slices
480    #[inline]
481    pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
482    where
483        T: Copy,
484    {
485        if !!mem::needs_drop::<T>() {
    ::core::panicking::panic("assertion failed: !mem::needs_drop::<T>()")
};assert!(!mem::needs_drop::<T>());
486        if !(size_of::<T>() != 0) {
    ::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
487        if !!slice.is_empty() {
    ::core::panicking::panic("assertion failed: !slice.is_empty()")
};assert!(!slice.is_empty());
488
489        let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
490
491        unsafe {
492            mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
493            slice::from_raw_parts_mut(mem, slice.len())
494        }
495    }
496
497    /// Allocates a string slice that is copied into the `DroplessArena`, returning a
498    /// reference to it. Will panic if passed an empty string.
499    ///
500    /// Panics:
501    ///
502    ///  - Zero-length string
503    #[inline]
504    pub fn alloc_str(&self, string: &str) -> &str {
505        let slice = self.alloc_slice(string.as_bytes());
506
507        // SAFETY: the result has a copy of the same valid UTF-8 bytes.
508        unsafe { std::str::from_utf8_unchecked(slice) }
509    }
510
511    /// # Safety
512    ///
513    /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
514    /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
515    /// if `iter.next()` allocates onto `self`.
516    #[inline]
517    unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
518        &self,
519        mut iter: I,
520        len: usize,
521        mem: *mut T,
522    ) -> &mut [T] {
523        let mut i = 0;
524        // Use a manual loop since LLVM manages to optimize it better for
525        // slice iterators
526        loop {
527            // SAFETY: The caller must ensure that `mem` is valid for writes up to
528            // `size_of::<T>() * len`.
529            unsafe {
530                match iter.next() {
531                    Some(value) if i < len => mem.add(i).write(value),
532                    Some(_) | None => {
533                        // We only return as many items as the iterator gave us, even
534                        // though it was supposed to give us `len`
535                        return slice::from_raw_parts_mut(mem, i);
536                    }
537                }
538            }
539            i += 1;
540        }
541    }
542
543    #[inline]
544    pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
545        // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
546        // allocate additional elements while we're iterating.
547        let iter = iter.into_iter();
548        if !(size_of::<T>() != 0) {
    ::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
549        if !!mem::needs_drop::<T>() {
    ::core::panicking::panic("assertion failed: !mem::needs_drop::<T>()")
};assert!(!mem::needs_drop::<T>());
550
551        let size_hint = iter.size_hint();
552
553        match size_hint {
554            (min, Some(max)) if min == max => {
555                // We know the exact number of elements the iterator expects to produce here.
556                let len = min;
557
558                if len == 0 {
559                    return &mut [];
560                }
561
562                let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
563                // SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
564                // reserved. If the iterator panics or doesn't output `len` elements, this will
565                // leave some unallocated slots in the arena, which is fine because we do not call
566                // `drop`.
567                unsafe { self.write_from_iter(iter, len, mem) }
568            }
569            (_, _) => outline(move || self.try_alloc_from_iter(iter.map(Ok::<T, !>)).into_ok()),
570        }
571    }
572
573    #[inline]
574    pub fn try_alloc_from_iter<T, E>(
575        &self,
576        iter: impl IntoIterator<Item = Result<T, E>>,
577    ) -> Result<&mut [T], E> {
578        // Despite the similarity with `alloc_from_iter`, we cannot reuse their fast case, as we
579        // cannot know the minimum length of the iterator in this case.
580        if !(size_of::<T>() != 0) {
    ::core::panicking::panic("assertion failed: size_of::<T>() != 0")
};assert!(size_of::<T>() != 0);
581
582        // Takes care of reentrancy.
583        let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
584        let mut vec = vec?;
585        if vec.is_empty() {
586            return Ok(&mut []);
587        }
588        // Move the content to the arena by copying and then forgetting it.
589        let len = vec.len();
590        Ok(unsafe {
591            let start_ptr = self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
592            vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
593            vec.set_len(0);
594            slice::from_raw_parts_mut(start_ptr, len)
595        })
596    }
597}
598
599/// Declare an `Arena` containing one dropless arena and many typed arenas (the
600/// types of the typed arenas are specified by the arguments).
601///
602/// There are three cases of interest.
603/// - Types that are `Copy`: these need not be specified in the arguments. They
604///   will use the `DroplessArena`.
605/// - Types that are `!Copy` and `!Drop`: these must be specified in the
606///   arguments. An empty `TypedArena` will be created for each one, but the
607///   `DroplessArena` will always be used and the `TypedArena` will stay empty.
608///   This is odd but harmless, because an empty arena allocates no memory.
609/// - Types that are `!Copy` and `Drop`: these must be specified in the
610///   arguments. The `TypedArena` will be used for them.
611///
612#[rustc_macro_transparency = "semiopaque"]
613pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
614    #[derive(Default)]
615    pub struct Arena<'tcx> {
616        pub dropless: $crate::DroplessArena,
617        $($name: $crate::TypedArena<$ty>,)*
618    }
619
620    pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
621        #[allow(clippy::mut_from_ref)]
622        fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self;
623        #[allow(clippy::mut_from_ref)]
624        fn allocate_from_iter(
625            arena: &'tcx Arena<'tcx>,
626            iter: impl ::std::iter::IntoIterator<Item = Self>,
627        ) -> &'tcx mut [Self];
628    }
629
630    // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
631    impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
632        #[inline]
633        #[allow(clippy::mut_from_ref)]
634        fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
635            arena.dropless.alloc(self)
636        }
637        #[inline]
638        #[allow(clippy::mut_from_ref)]
639        fn allocate_from_iter(
640            arena: &'tcx Arena<'tcx>,
641            iter: impl ::std::iter::IntoIterator<Item = Self>,
642        ) -> &'tcx mut [Self] {
643            arena.dropless.alloc_from_iter(iter)
644        }
645    }
646    $(
647        impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
648            #[inline]
649            fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
650                if !::std::mem::needs_drop::<Self>() {
651                    arena.dropless.alloc(self)
652                } else {
653                    arena.$name.alloc(self)
654                }
655            }
656
657            #[inline]
658            #[allow(clippy::mut_from_ref)]
659            fn allocate_from_iter(
660                arena: &'tcx Arena<'tcx>,
661                iter: impl ::std::iter::IntoIterator<Item = Self>,
662            ) -> &'tcx mut [Self] {
663                if !::std::mem::needs_drop::<Self>() {
664                    arena.dropless.alloc_from_iter(iter)
665                } else {
666                    arena.$name.alloc_from_iter(iter)
667                }
668            }
669        }
670    )*
671
672    impl<'tcx> Arena<'tcx> {
673        #[inline]
674        #[allow(clippy::mut_from_ref)]
675        pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&'tcx self, value: T) -> &mut T {
676            value.allocate_on(self)
677        }
678
679        // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
680        #[inline]
681        #[allow(clippy::mut_from_ref)]
682        pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
683            if value.is_empty() {
684                return &mut [];
685            }
686            self.dropless.alloc_slice(value)
687        }
688
689        #[inline]
690        pub fn alloc_str(&self, string: &str) -> &str {
691            if string.is_empty() {
692                return "";
693            }
694            self.dropless.alloc_str(string)
695        }
696
697        #[allow(clippy::mut_from_ref)]
698        pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
699            &'tcx self,
700            iter: impl ::std::iter::IntoIterator<Item = T>,
701        ) -> &mut [T] {
702            T::allocate_from_iter(self, iter)
703        }
704    }
705}
706
707// Marker types that let us give different behaviour for arenas allocating
708// `Copy` types vs `!Copy` types.
709pub struct IsCopy;
710pub struct IsNotCopy;
711
712#[cfg(test)]
713mod tests;