rustc_arena/lib.rs
1//! The arena, a fast but limited type of allocator.
2//!
3//! Arenas are a type of allocator that destroy the objects within, all at
4//! once, once the arena itself is destroyed. They do not support deallocation
5//! of individual objects while the arena itself is still alive. The benefit
6//! of an arena is very fast allocation; just a pointer bump.
7//!
8//! This crate implements several kinds of arena.
9
10// tidy-alphabetical-start
11#![allow(clippy::mut_from_ref)] // Arena allocators are one place where this pattern is fine.
12#![allow(internal_features)]
13#![cfg_attr(test, feature(test))]
14#![deny(unsafe_op_in_unsafe_fn)]
15#![doc(test(no_crate_inject, attr(deny(warnings), allow(internal_features))))]
16#![feature(decl_macro)]
17#![feature(dropck_eyepatch)]
18#![feature(never_type)]
19#![feature(rustc_attrs)]
20#![feature(unwrap_infallible)]
21// tidy-alphabetical-end
22
23use std::alloc::Layout;
24use std::cell::{Cell, RefCell};
25use std::marker::PhantomData;
26use std::mem::{self, MaybeUninit};
27use std::ptr::{self, NonNull};
28use std::{cmp, hint, slice};
29
30use smallvec::SmallVec;
31
32/// This calls the passed function while ensuring it won't be inlined into the caller.
33#[inline(never)]
34#[cold]
35fn outline<F: FnOnce() -> R, R>(f: F) -> R {
36 f()
37}
38
39struct ArenaChunk<T = u8> {
40 /// The raw storage for the arena chunk.
41 storage: NonNull<[MaybeUninit<T>]>,
42 /// The number of valid entries in the chunk.
43 entries: usize,
44}
45
46unsafe impl<#[may_dangle] T> Drop for ArenaChunk<T> {
47 fn drop(&mut self) {
48 unsafe { drop(Box::from_raw(self.storage.as_mut())) }
49 }
50}
51
52impl<T> ArenaChunk<T> {
53 #[inline]
54 unsafe fn new(capacity: usize) -> ArenaChunk<T> {
55 ArenaChunk {
56 storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
57 entries: 0,
58 }
59 }
60
61 /// Destroys this arena chunk.
62 ///
63 /// # Safety
64 ///
65 /// The caller must ensure that `len` elements of this chunk have been initialized.
66 #[inline]
67 unsafe fn destroy(&mut self, len: usize) {
68 // The branch on needs_drop() is an -O1 performance optimization.
69 // Without the branch, dropping TypedArena<T> takes linear time.
70 if mem::needs_drop::<T>() {
71 // SAFETY: The caller must ensure that `len` elements of this chunk have
72 // been initialized.
73 unsafe {
74 let slice = self.storage.as_mut();
75 slice[..len].assume_init_drop();
76 }
77 }
78 }
79
80 // Returns a pointer to the first allocated object.
81 #[inline]
82 fn start(&mut self) -> *mut T {
83 self.storage.as_ptr() as *mut T
84 }
85
86 // Returns a pointer to the end of the allocated space.
87 #[inline]
88 fn end(&mut self) -> *mut T {
89 unsafe {
90 if size_of::<T>() == 0 {
91 // A pointer as large as possible for zero-sized elements.
92 ptr::without_provenance_mut(!0)
93 } else {
94 self.start().add(self.storage.len())
95 }
96 }
97 }
98}
99
100// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
101// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
102// we stop growing. This scales well, from arenas that are barely used up to
103// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
104// the usual sizes of pages and huge pages on Linux.
105const PAGE: usize = 4096;
106const HUGE_PAGE: usize = 2 * 1024 * 1024;
107
108/// An arena that can hold objects of only one type.
109pub struct TypedArena<T> {
110 /// A pointer to the next object to be allocated.
111 ptr: Cell<*mut T>,
112
113 /// A pointer to the end of the allocated area. When this pointer is
114 /// reached, a new chunk is allocated.
115 end: Cell<*mut T>,
116
117 /// A vector of arena chunks.
118 chunks: RefCell<Vec<ArenaChunk<T>>>,
119
120 /// Marker indicating that dropping the arena causes its owned
121 /// instances of `T` to be dropped.
122 _own: PhantomData<T>,
123}
124
125impl<T> Default for TypedArena<T> {
126 /// Creates a new `TypedArena`.
127 fn default() -> TypedArena<T> {
128 TypedArena {
129 // We set both `ptr` and `end` to 0 so that the first call to
130 // alloc() will trigger a grow().
131 ptr: Cell::new(ptr::null_mut()),
132 end: Cell::new(ptr::null_mut()),
133 chunks: Default::default(),
134 _own: PhantomData,
135 }
136 }
137}
138
139impl<T> TypedArena<T> {
140 /// Allocates an object in the `TypedArena`, returning a reference to it.
141 #[inline]
142 pub fn alloc(&self, object: T) -> &mut T {
143 if self.ptr == self.end {
144 self.grow(1)
145 }
146
147 unsafe {
148 if size_of::<T>() == 0 {
149 self.ptr.set(self.ptr.get().wrapping_byte_add(1));
150 let ptr = ptr::NonNull::<T>::dangling().as_ptr();
151 // Don't drop the object. This `write` is equivalent to `forget`.
152 ptr::write(ptr, object);
153 &mut *ptr
154 } else {
155 let ptr = self.ptr.get();
156 // Advance the pointer.
157 self.ptr.set(self.ptr.get().add(1));
158 // Write into uninitialized memory.
159 ptr::write(ptr, object);
160 &mut *ptr
161 }
162 }
163 }
164
165 #[inline]
166 fn can_allocate(&self, additional: usize) -> bool {
167 // FIXME: this should *likely* use `offset_from`, but more
168 // investigation is needed (including running tests in miri).
169 let available_bytes = self.end.get().addr() - self.ptr.get().addr();
170 let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
171 available_bytes >= additional_bytes
172 }
173
174 /// Allocates storage for `len >= 1` values in this arena, and returns a
175 /// raw pointer to the first value's storage.
176 ///
177 /// # Safety
178 ///
179 /// Caller must initialize each of the `len` slots to a droppable value
180 /// before the arena is dropped.
181 ///
182 /// In practice, this typically means that the caller must be able to
183 /// raw-copy `len` already-initialized values into the slice without any
184 /// possibility of panicking.
185 ///
186 /// FIXME(Zalathar): This is *very* fragile; perhaps we need a different
187 /// approach to arena-allocating slices of droppable values.
188 #[inline]
189 unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T {
190 assert!(size_of::<T>() != 0);
191 assert!(len != 0);
192
193 // Ensure the current chunk can fit `len` objects.
194 if !self.can_allocate(len) {
195 self.grow(len);
196 debug_assert!(self.can_allocate(len));
197 }
198
199 let start_ptr = self.ptr.get();
200 // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
201 // `len` elements.
202 unsafe { self.ptr.set(start_ptr.add(len)) };
203 start_ptr
204 }
205
206 /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
207 ///
208 /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
209 /// storing the elements in the arena.
210 #[inline]
211 pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
212 self.try_alloc_from_iter(iter.into_iter().map(Ok::<T, !>)).into_ok()
213 }
214
215 /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
216 ///
217 /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
218 /// storing the elements in the arena.
219 #[inline]
220 pub fn try_alloc_from_iter<E>(
221 &self,
222 iter: impl IntoIterator<Item = Result<T, E>>,
223 ) -> Result<&mut [T], E> {
224 // Despite the similarity with `DroplessArena`, we cannot reuse their fast case. The reason
225 // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
226 // reference to `self` and adding elements to the arena during iteration.
227 //
228 // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
229 // have to track that some uninitialized elements are followed by some initialized elements,
230 // else we might accidentally drop uninitialized memory if something panics or if the
231 // iterator doesn't fill all the length we expected.
232 //
233 // So we collect all the elements beforehand, which takes care of reentrancy and panic
234 // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
235 // doesn't need to be hyper-optimized.
236 assert!(size_of::<T>() != 0);
237
238 let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
239 let mut vec = vec?;
240 if vec.is_empty() {
241 return Ok(&mut []);
242 }
243 // Move the content to the arena by copying and then forgetting it.
244 let len = vec.len();
245
246 // SAFETY: After allocating raw storage for exactly `len` values, we
247 // must fully initialize the storage without panicking, and we must
248 // also prevent the stale values in the vec from being dropped.
249 Ok(unsafe {
250 let start_ptr = self.alloc_raw_slice(len);
251 // Initialize the newly-allocated storage without panicking.
252 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
253 // Prevent the stale values in the vec from being dropped.
254 vec.set_len(0);
255 slice::from_raw_parts_mut(start_ptr, len)
256 })
257 }
258
259 /// Grows the arena.
260 #[inline(never)]
261 #[cold]
262 fn grow(&self, additional: usize) {
263 unsafe {
264 // We need the element size to convert chunk sizes (ranging from
265 // PAGE to HUGE_PAGE bytes) to element counts.
266 let elem_size = cmp::max(1, size_of::<T>());
267 let mut chunks = self.chunks.borrow_mut();
268 let mut new_cap;
269 if let Some(last_chunk) = chunks.last_mut() {
270 // If a type is `!needs_drop`, we don't need to keep track of how many elements
271 // the chunk stores - the field will be ignored anyway.
272 if mem::needs_drop::<T>() {
273 // FIXME: this should *likely* use `offset_from`, but more
274 // investigation is needed (including running tests in miri).
275 let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
276 last_chunk.entries = used_bytes / size_of::<T>();
277 }
278
279 // If the previous chunk's len is less than HUGE_PAGE
280 // bytes, then this chunk will be least double the previous
281 // chunk's size.
282 new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
283 new_cap *= 2;
284 } else {
285 new_cap = PAGE / elem_size;
286 }
287 // Also ensure that this chunk can fit `additional`.
288 new_cap = cmp::max(additional, new_cap);
289
290 let mut chunk = ArenaChunk::<T>::new(new_cap);
291 self.ptr.set(chunk.start());
292 self.end.set(chunk.end());
293 chunks.push(chunk);
294 }
295 }
296
297 // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
298 // chunks.
299 fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
300 // Determine how much was filled.
301 let start = last_chunk.start().addr();
302 // We obtain the value of the pointer to the first uninitialized element.
303 let end = self.ptr.get().addr();
304 // We then calculate the number of elements to be dropped in the last chunk,
305 // which is the filled area's length.
306 let diff = if size_of::<T>() == 0 {
307 // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
308 // the number of zero-sized values in the last and only chunk, just out of caution.
309 // Recall that `end` was incremented for each allocated value.
310 end - start
311 } else {
312 // FIXME: this should *likely* use `offset_from`, but more
313 // investigation is needed (including running tests in miri).
314 (end - start) / size_of::<T>()
315 };
316 // Pass that to the `destroy` method.
317 unsafe {
318 last_chunk.destroy(diff);
319 }
320 // Reset the chunk.
321 self.ptr.set(last_chunk.start());
322 }
323}
324
325unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
326 fn drop(&mut self) {
327 unsafe {
328 // Determine how much was filled.
329 let mut chunks_borrow = self.chunks.borrow_mut();
330 if let Some(mut last_chunk) = chunks_borrow.pop() {
331 // Drop the contents of the last chunk.
332 self.clear_last_chunk(&mut last_chunk);
333 // The last chunk will be dropped. Destroy all other chunks.
334 for chunk in chunks_borrow.iter_mut() {
335 chunk.destroy(chunk.entries);
336 }
337 }
338 // Box handles deallocation of `last_chunk` and `self.chunks`.
339 }
340 }
341}
342
343unsafe impl<T: Send> Send for TypedArena<T> {}
344
345#[inline(always)]
346fn align_down(val: usize, align: usize) -> usize {
347 debug_assert!(align.is_power_of_two());
348 val & !(align - 1)
349}
350
351#[inline(always)]
352fn align_up(val: usize, align: usize) -> usize {
353 debug_assert!(align.is_power_of_two());
354 (val + align - 1) & !(align - 1)
355}
356
357// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
358// to optimize away alignment code.
359const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
360
361/// An arena that can hold objects of multiple different types that impl `Copy`
362/// and/or satisfy `!mem::needs_drop`.
363pub struct DroplessArena {
364 /// A pointer to the start of the free space.
365 start: Cell<*mut u8>,
366
367 /// A pointer to the end of free space.
368 ///
369 /// The allocation proceeds downwards from the end of the chunk towards the
370 /// start. (This is slightly simpler and faster than allocating upwards,
371 /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
372 /// When this pointer crosses the start pointer, a new chunk is allocated.
373 ///
374 /// This is kept aligned to DROPLESS_ALIGNMENT.
375 end: Cell<*mut u8>,
376
377 /// A vector of arena chunks.
378 chunks: RefCell<Vec<ArenaChunk>>,
379}
380
381unsafe impl Send for DroplessArena {}
382
383impl Default for DroplessArena {
384 #[inline]
385 fn default() -> DroplessArena {
386 DroplessArena {
387 // We set both `start` and `end` to 0 so that the first call to
388 // alloc() will trigger a grow().
389 start: Cell::new(ptr::null_mut()),
390 end: Cell::new(ptr::null_mut()),
391 chunks: Default::default(),
392 }
393 }
394}
395
396impl DroplessArena {
397 #[inline(never)]
398 #[cold]
399 fn grow(&self, layout: Layout) {
400 // Add some padding so we can align `self.end` while
401 // still fitting in a `layout` allocation.
402 let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
403
404 unsafe {
405 let mut chunks = self.chunks.borrow_mut();
406 let mut new_cap;
407 if let Some(last_chunk) = chunks.last_mut() {
408 // There is no need to update `last_chunk.entries` because that
409 // field isn't used by `DroplessArena`.
410
411 // If the previous chunk's len is less than HUGE_PAGE
412 // bytes, then this chunk will be least double the previous
413 // chunk's size.
414 new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
415 new_cap *= 2;
416 } else {
417 new_cap = PAGE;
418 }
419 // Also ensure that this chunk can fit `additional`.
420 new_cap = cmp::max(additional, new_cap);
421
422 let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
423 self.start.set(chunk.start());
424
425 // Align the end to DROPLESS_ALIGNMENT.
426 let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
427
428 // Make sure we don't go past `start`. This should not happen since the allocation
429 // should be at least DROPLESS_ALIGNMENT - 1 bytes.
430 debug_assert!(chunk.start().addr() <= end);
431
432 self.end.set(chunk.end().with_addr(end));
433
434 chunks.push(chunk);
435 }
436 }
437
438 #[inline]
439 pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
440 assert!(layout.size() != 0);
441
442 // This loop executes once or twice: if allocation fails the first
443 // time, the `grow` ensures it will succeed the second time.
444 loop {
445 let start = self.start.get().addr();
446 let old_end = self.end.get();
447 let end = old_end.addr();
448
449 // Align allocated bytes so that `self.end` stays aligned to
450 // DROPLESS_ALIGNMENT.
451 let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
452
453 // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
454 unsafe { hint::assert_unchecked(end == align_down(end, DROPLESS_ALIGNMENT)) };
455
456 if let Some(sub) = end.checked_sub(bytes) {
457 let new_end = align_down(sub, layout.align());
458 if start <= new_end {
459 let new_end = old_end.with_addr(new_end);
460 // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
461 // preserves alignment as both `end` and `bytes` are already
462 // aligned to DROPLESS_ALIGNMENT.
463 self.end.set(new_end);
464 return new_end;
465 }
466 }
467
468 // No free space left. Allocate a new chunk to satisfy the request.
469 // On failure the grow will panic or abort.
470 self.grow(layout);
471 }
472 }
473
474 #[inline]
475 pub fn alloc<T>(&self, object: T) -> &mut T {
476 assert!(!mem::needs_drop::<T>());
477 assert!(size_of::<T>() != 0);
478
479 let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
480
481 unsafe {
482 // Write into uninitialized memory.
483 ptr::write(mem, object);
484 &mut *mem
485 }
486 }
487
488 /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
489 /// reference to it. Will panic if passed a zero-sized type.
490 ///
491 /// Panics:
492 ///
493 /// - Zero-sized types
494 /// - Zero-length slices
495 #[inline]
496 pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
497 where
498 T: Copy,
499 {
500 assert!(!mem::needs_drop::<T>());
501 assert!(size_of::<T>() != 0);
502 assert!(!slice.is_empty());
503
504 let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
505
506 unsafe {
507 mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
508 slice::from_raw_parts_mut(mem, slice.len())
509 }
510 }
511
512 /// Allocates a string slice that is copied into the `DroplessArena`, returning a
513 /// reference to it. Will panic if passed an empty string.
514 ///
515 /// Panics:
516 ///
517 /// - Zero-length string
518 #[inline]
519 pub fn alloc_str(&self, string: &str) -> &str {
520 let slice = self.alloc_slice(string.as_bytes());
521
522 // SAFETY: the result has a copy of the same valid UTF-8 bytes.
523 unsafe { std::str::from_utf8_unchecked(slice) }
524 }
525
526 /// # Safety
527 ///
528 /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
529 /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
530 /// if `iter.next()` allocates onto `self`.
531 #[inline]
532 unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
533 &self,
534 mut iter: I,
535 len: usize,
536 mem: *mut T,
537 ) -> &mut [T] {
538 let mut i = 0;
539 // Use a manual loop since LLVM manages to optimize it better for
540 // slice iterators
541 loop {
542 // SAFETY: The caller must ensure that `mem` is valid for writes up to
543 // `size_of::<T>() * len`.
544 unsafe {
545 match iter.next() {
546 Some(value) if i < len => mem.add(i).write(value),
547 Some(_) | None => {
548 // We only return as many items as the iterator gave us, even
549 // though it was supposed to give us `len`
550 return slice::from_raw_parts_mut(mem, i);
551 }
552 }
553 }
554 i += 1;
555 }
556 }
557
558 #[inline]
559 pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
560 // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
561 // allocate additional elements while we're iterating.
562 let iter = iter.into_iter();
563 assert!(size_of::<T>() != 0);
564 assert!(!mem::needs_drop::<T>());
565
566 let size_hint = iter.size_hint();
567
568 match size_hint {
569 (min, Some(max)) if min == max => {
570 // We know the exact number of elements the iterator expects to produce here.
571 let len = min;
572
573 if len == 0 {
574 return &mut [];
575 }
576
577 let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
578 // SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
579 // reserved. If the iterator panics or doesn't output `len` elements, this will
580 // leave some unallocated slots in the arena, which is fine because we do not call
581 // `drop`.
582 unsafe { self.write_from_iter(iter, len, mem) }
583 }
584 (_, _) => outline(move || self.try_alloc_from_iter(iter.map(Ok::<T, !>)).into_ok()),
585 }
586 }
587
588 #[inline]
589 pub fn try_alloc_from_iter<T, E>(
590 &self,
591 iter: impl IntoIterator<Item = Result<T, E>>,
592 ) -> Result<&mut [T], E> {
593 // Despite the similarity with `alloc_from_iter`, we cannot reuse their fast case, as we
594 // cannot know the minimum length of the iterator in this case.
595 assert!(size_of::<T>() != 0);
596
597 // Takes care of reentrancy.
598 let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
599 let mut vec = vec?;
600 if vec.is_empty() {
601 return Ok(&mut []);
602 }
603 // Move the content to the arena by copying and then forgetting it.
604 let len = vec.len();
605 Ok(unsafe {
606 let start_ptr = self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
607 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
608 vec.set_len(0);
609 slice::from_raw_parts_mut(start_ptr, len)
610 })
611 }
612}
613
614/// Declare an `Arena` containing one dropless arena and many typed arenas (the
615/// types of the typed arenas are specified by the arguments).
616///
617/// There are three cases of interest.
618/// - Types that are `Copy`: these need not be specified in the arguments. They
619/// will use the `DroplessArena`.
620/// - Types that are `!Copy` and `!Drop`: these must be specified in the
621/// arguments. An empty `TypedArena` will be created for each one, but the
622/// `DroplessArena` will always be used and the `TypedArena` will stay empty.
623/// This is odd but harmless, because an empty arena allocates no memory.
624/// - Types that are `!Copy` and `Drop`: these must be specified in the
625/// arguments. The `TypedArena` will be used for them.
626///
627#[rustc_macro_transparency = "semiopaque"]
628pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
629 #[derive(Default)]
630 pub struct Arena<'tcx> {
631 pub dropless: $crate::DroplessArena,
632 $($name: $crate::TypedArena<$ty>,)*
633 }
634
635 pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
636 #[allow(clippy::mut_from_ref)]
637 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self;
638 #[allow(clippy::mut_from_ref)]
639 fn allocate_from_iter(
640 arena: &'tcx Arena<'tcx>,
641 iter: impl ::std::iter::IntoIterator<Item = Self>,
642 ) -> &'tcx mut [Self];
643 }
644
645 // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
646 impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
647 #[inline]
648 #[allow(clippy::mut_from_ref)]
649 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
650 arena.dropless.alloc(self)
651 }
652 #[inline]
653 #[allow(clippy::mut_from_ref)]
654 fn allocate_from_iter(
655 arena: &'tcx Arena<'tcx>,
656 iter: impl ::std::iter::IntoIterator<Item = Self>,
657 ) -> &'tcx mut [Self] {
658 arena.dropless.alloc_from_iter(iter)
659 }
660 }
661 $(
662 impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
663 #[inline]
664 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
665 if !::std::mem::needs_drop::<Self>() {
666 arena.dropless.alloc(self)
667 } else {
668 arena.$name.alloc(self)
669 }
670 }
671
672 #[inline]
673 #[allow(clippy::mut_from_ref)]
674 fn allocate_from_iter(
675 arena: &'tcx Arena<'tcx>,
676 iter: impl ::std::iter::IntoIterator<Item = Self>,
677 ) -> &'tcx mut [Self] {
678 if !::std::mem::needs_drop::<Self>() {
679 arena.dropless.alloc_from_iter(iter)
680 } else {
681 arena.$name.alloc_from_iter(iter)
682 }
683 }
684 }
685 )*
686
687 impl<'tcx> Arena<'tcx> {
688 #[inline]
689 #[allow(clippy::mut_from_ref)]
690 pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&'tcx self, value: T) -> &mut T {
691 value.allocate_on(self)
692 }
693
694 // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
695 #[inline]
696 #[allow(clippy::mut_from_ref)]
697 pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
698 if value.is_empty() {
699 return &mut [];
700 }
701 self.dropless.alloc_slice(value)
702 }
703
704 #[inline]
705 pub fn alloc_str(&self, string: &str) -> &str {
706 if string.is_empty() {
707 return "";
708 }
709 self.dropless.alloc_str(string)
710 }
711
712 #[allow(clippy::mut_from_ref)]
713 pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
714 &'tcx self,
715 iter: impl ::std::iter::IntoIterator<Item = T>,
716 ) -> &mut [T] {
717 T::allocate_from_iter(self, iter)
718 }
719 }
720}
721
722// Marker types that let us give different behaviour for arenas allocating
723// `Copy` types vs `!Copy` types.
724pub struct IsCopy;
725pub struct IsNotCopy;
726
727#[cfg(test)]
728mod tests;