rustc_arena/lib.rs
1//! The arena, a fast but limited type of allocator.
2//!
3//! Arenas are a type of allocator that destroy the objects within, all at
4//! once, once the arena itself is destroyed. They do not support deallocation
5//! of individual objects while the arena itself is still alive. The benefit
6//! of an arena is very fast allocation; just a pointer bump.
7//!
8//! This crate implements several kinds of arena.
9
10// tidy-alphabetical-start
11#![allow(clippy::mut_from_ref)] // Arena allocators are one place where this pattern is fine.
12#![allow(internal_features)]
13#![cfg_attr(test, feature(test))]
14#![deny(unsafe_op_in_unsafe_fn)]
15#![doc(test(no_crate_inject, attr(deny(warnings), allow(internal_features))))]
16#![feature(core_intrinsics)]
17#![feature(decl_macro)]
18#![feature(dropck_eyepatch)]
19#![feature(never_type)]
20#![feature(rustc_attrs)]
21#![feature(unwrap_infallible)]
22// tidy-alphabetical-end
23
24use std::alloc::Layout;
25use std::cell::{Cell, RefCell};
26use std::marker::PhantomData;
27use std::mem::{self, MaybeUninit};
28use std::ptr::{self, NonNull};
29use std::{cmp, intrinsics, slice};
30
31use smallvec::SmallVec;
32
33/// This calls the passed function while ensuring it won't be inlined into the caller.
34#[inline(never)]
35#[cold]
36fn outline<F: FnOnce() -> R, R>(f: F) -> R {
37 f()
38}
39
40struct ArenaChunk<T = u8> {
41 /// The raw storage for the arena chunk.
42 storage: NonNull<[MaybeUninit<T>]>,
43 /// The number of valid entries in the chunk.
44 entries: usize,
45}
46
47unsafe impl<#[may_dangle] T> Drop for ArenaChunk<T> {
48 fn drop(&mut self) {
49 unsafe { drop(Box::from_raw(self.storage.as_mut())) }
50 }
51}
52
53impl<T> ArenaChunk<T> {
54 #[inline]
55 unsafe fn new(capacity: usize) -> ArenaChunk<T> {
56 ArenaChunk {
57 storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
58 entries: 0,
59 }
60 }
61
62 /// Destroys this arena chunk.
63 ///
64 /// # Safety
65 ///
66 /// The caller must ensure that `len` elements of this chunk have been initialized.
67 #[inline]
68 unsafe fn destroy(&mut self, len: usize) {
69 // The branch on needs_drop() is an -O1 performance optimization.
70 // Without the branch, dropping TypedArena<T> takes linear time.
71 if mem::needs_drop::<T>() {
72 // SAFETY: The caller must ensure that `len` elements of this chunk have
73 // been initialized.
74 unsafe {
75 let slice = self.storage.as_mut();
76 slice[..len].assume_init_drop();
77 }
78 }
79 }
80
81 // Returns a pointer to the first allocated object.
82 #[inline]
83 fn start(&mut self) -> *mut T {
84 self.storage.as_ptr() as *mut T
85 }
86
87 // Returns a pointer to the end of the allocated space.
88 #[inline]
89 fn end(&mut self) -> *mut T {
90 unsafe {
91 if size_of::<T>() == 0 {
92 // A pointer as large as possible for zero-sized elements.
93 ptr::without_provenance_mut(!0)
94 } else {
95 self.start().add(self.storage.len())
96 }
97 }
98 }
99}
100
101// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
102// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
103// we stop growing. This scales well, from arenas that are barely used up to
104// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
105// the usual sizes of pages and huge pages on Linux.
106const PAGE: usize = 4096;
107const HUGE_PAGE: usize = 2 * 1024 * 1024;
108
109/// An arena that can hold objects of only one type.
110pub struct TypedArena<T> {
111 /// A pointer to the next object to be allocated.
112 ptr: Cell<*mut T>,
113
114 /// A pointer to the end of the allocated area. When this pointer is
115 /// reached, a new chunk is allocated.
116 end: Cell<*mut T>,
117
118 /// A vector of arena chunks.
119 chunks: RefCell<Vec<ArenaChunk<T>>>,
120
121 /// Marker indicating that dropping the arena causes its owned
122 /// instances of `T` to be dropped.
123 _own: PhantomData<T>,
124}
125
126impl<T> Default for TypedArena<T> {
127 /// Creates a new `TypedArena`.
128 fn default() -> TypedArena<T> {
129 TypedArena {
130 // We set both `ptr` and `end` to 0 so that the first call to
131 // alloc() will trigger a grow().
132 ptr: Cell::new(ptr::null_mut()),
133 end: Cell::new(ptr::null_mut()),
134 chunks: Default::default(),
135 _own: PhantomData,
136 }
137 }
138}
139
140impl<T> TypedArena<T> {
141 /// Allocates an object in the `TypedArena`, returning a reference to it.
142 #[inline]
143 pub fn alloc(&self, object: T) -> &mut T {
144 if self.ptr == self.end {
145 self.grow(1)
146 }
147
148 unsafe {
149 if size_of::<T>() == 0 {
150 self.ptr.set(self.ptr.get().wrapping_byte_add(1));
151 let ptr = ptr::NonNull::<T>::dangling().as_ptr();
152 // Don't drop the object. This `write` is equivalent to `forget`.
153 ptr::write(ptr, object);
154 &mut *ptr
155 } else {
156 let ptr = self.ptr.get();
157 // Advance the pointer.
158 self.ptr.set(self.ptr.get().add(1));
159 // Write into uninitialized memory.
160 ptr::write(ptr, object);
161 &mut *ptr
162 }
163 }
164 }
165
166 #[inline]
167 fn can_allocate(&self, additional: usize) -> bool {
168 // FIXME: this should *likely* use `offset_from`, but more
169 // investigation is needed (including running tests in miri).
170 let available_bytes = self.end.get().addr() - self.ptr.get().addr();
171 let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
172 available_bytes >= additional_bytes
173 }
174
175 /// Allocates storage for `len >= 1` values in this arena, and returns a
176 /// raw pointer to the first value's storage.
177 ///
178 /// # Safety
179 ///
180 /// Caller must initialize each of the `len` slots to a droppable value
181 /// before the arena is dropped.
182 ///
183 /// In practice, this typically means that the caller must be able to
184 /// raw-copy `len` already-initialized values into the slice without any
185 /// possibility of panicking.
186 ///
187 /// FIXME(Zalathar): This is *very* fragile; perhaps we need a different
188 /// approach to arena-allocating slices of droppable values.
189 #[inline]
190 unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T {
191 assert!(size_of::<T>() != 0);
192 assert!(len != 0);
193
194 // Ensure the current chunk can fit `len` objects.
195 if !self.can_allocate(len) {
196 self.grow(len);
197 debug_assert!(self.can_allocate(len));
198 }
199
200 let start_ptr = self.ptr.get();
201 // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
202 // `len` elements.
203 unsafe { self.ptr.set(start_ptr.add(len)) };
204 start_ptr
205 }
206
207 /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
208 ///
209 /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
210 /// storing the elements in the arena.
211 #[inline]
212 pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
213 self.try_alloc_from_iter(iter.into_iter().map(Ok::<T, !>)).into_ok()
214 }
215
216 /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
217 ///
218 /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
219 /// storing the elements in the arena.
220 #[inline]
221 pub fn try_alloc_from_iter<E>(
222 &self,
223 iter: impl IntoIterator<Item = Result<T, E>>,
224 ) -> Result<&mut [T], E> {
225 // Despite the similarity with `DroplessArena`, we cannot reuse their fast case. The reason
226 // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
227 // reference to `self` and adding elements to the arena during iteration.
228 //
229 // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
230 // have to track that some uninitialized elements are followed by some initialized elements,
231 // else we might accidentally drop uninitialized memory if something panics or if the
232 // iterator doesn't fill all the length we expected.
233 //
234 // So we collect all the elements beforehand, which takes care of reentrancy and panic
235 // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
236 // doesn't need to be hyper-optimized.
237 assert!(size_of::<T>() != 0);
238
239 let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
240 let mut vec = vec?;
241 if vec.is_empty() {
242 return Ok(&mut []);
243 }
244 // Move the content to the arena by copying and then forgetting it.
245 let len = vec.len();
246
247 // SAFETY: After allocating raw storage for exactly `len` values, we
248 // must fully initialize the storage without panicking, and we must
249 // also prevent the stale values in the vec from being dropped.
250 Ok(unsafe {
251 let start_ptr = self.alloc_raw_slice(len);
252 // Initialize the newly-allocated storage without panicking.
253 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
254 // Prevent the stale values in the vec from being dropped.
255 vec.set_len(0);
256 slice::from_raw_parts_mut(start_ptr, len)
257 })
258 }
259
260 /// Grows the arena.
261 #[inline(never)]
262 #[cold]
263 fn grow(&self, additional: usize) {
264 unsafe {
265 // We need the element size to convert chunk sizes (ranging from
266 // PAGE to HUGE_PAGE bytes) to element counts.
267 let elem_size = cmp::max(1, size_of::<T>());
268 let mut chunks = self.chunks.borrow_mut();
269 let mut new_cap;
270 if let Some(last_chunk) = chunks.last_mut() {
271 // If a type is `!needs_drop`, we don't need to keep track of how many elements
272 // the chunk stores - the field will be ignored anyway.
273 if mem::needs_drop::<T>() {
274 // FIXME: this should *likely* use `offset_from`, but more
275 // investigation is needed (including running tests in miri).
276 let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
277 last_chunk.entries = used_bytes / size_of::<T>();
278 }
279
280 // If the previous chunk's len is less than HUGE_PAGE
281 // bytes, then this chunk will be least double the previous
282 // chunk's size.
283 new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
284 new_cap *= 2;
285 } else {
286 new_cap = PAGE / elem_size;
287 }
288 // Also ensure that this chunk can fit `additional`.
289 new_cap = cmp::max(additional, new_cap);
290
291 let mut chunk = ArenaChunk::<T>::new(new_cap);
292 self.ptr.set(chunk.start());
293 self.end.set(chunk.end());
294 chunks.push(chunk);
295 }
296 }
297
298 // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
299 // chunks.
300 fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
301 // Determine how much was filled.
302 let start = last_chunk.start().addr();
303 // We obtain the value of the pointer to the first uninitialized element.
304 let end = self.ptr.get().addr();
305 // We then calculate the number of elements to be dropped in the last chunk,
306 // which is the filled area's length.
307 let diff = if size_of::<T>() == 0 {
308 // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
309 // the number of zero-sized values in the last and only chunk, just out of caution.
310 // Recall that `end` was incremented for each allocated value.
311 end - start
312 } else {
313 // FIXME: this should *likely* use `offset_from`, but more
314 // investigation is needed (including running tests in miri).
315 (end - start) / size_of::<T>()
316 };
317 // Pass that to the `destroy` method.
318 unsafe {
319 last_chunk.destroy(diff);
320 }
321 // Reset the chunk.
322 self.ptr.set(last_chunk.start());
323 }
324}
325
326unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
327 fn drop(&mut self) {
328 unsafe {
329 // Determine how much was filled.
330 let mut chunks_borrow = self.chunks.borrow_mut();
331 if let Some(mut last_chunk) = chunks_borrow.pop() {
332 // Drop the contents of the last chunk.
333 self.clear_last_chunk(&mut last_chunk);
334 // The last chunk will be dropped. Destroy all other chunks.
335 for chunk in chunks_borrow.iter_mut() {
336 chunk.destroy(chunk.entries);
337 }
338 }
339 // Box handles deallocation of `last_chunk` and `self.chunks`.
340 }
341 }
342}
343
344unsafe impl<T: Send> Send for TypedArena<T> {}
345
346#[inline(always)]
347fn align_down(val: usize, align: usize) -> usize {
348 debug_assert!(align.is_power_of_two());
349 val & !(align - 1)
350}
351
352#[inline(always)]
353fn align_up(val: usize, align: usize) -> usize {
354 debug_assert!(align.is_power_of_two());
355 (val + align - 1) & !(align - 1)
356}
357
358// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
359// to optimize away alignment code.
360const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
361
362/// An arena that can hold objects of multiple different types that impl `Copy`
363/// and/or satisfy `!mem::needs_drop`.
364pub struct DroplessArena {
365 /// A pointer to the start of the free space.
366 start: Cell<*mut u8>,
367
368 /// A pointer to the end of free space.
369 ///
370 /// The allocation proceeds downwards from the end of the chunk towards the
371 /// start. (This is slightly simpler and faster than allocating upwards,
372 /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
373 /// When this pointer crosses the start pointer, a new chunk is allocated.
374 ///
375 /// This is kept aligned to DROPLESS_ALIGNMENT.
376 end: Cell<*mut u8>,
377
378 /// A vector of arena chunks.
379 chunks: RefCell<Vec<ArenaChunk>>,
380}
381
382unsafe impl Send for DroplessArena {}
383
384impl Default for DroplessArena {
385 #[inline]
386 fn default() -> DroplessArena {
387 DroplessArena {
388 // We set both `start` and `end` to 0 so that the first call to
389 // alloc() will trigger a grow().
390 start: Cell::new(ptr::null_mut()),
391 end: Cell::new(ptr::null_mut()),
392 chunks: Default::default(),
393 }
394 }
395}
396
397impl DroplessArena {
398 #[inline(never)]
399 #[cold]
400 fn grow(&self, layout: Layout) {
401 // Add some padding so we can align `self.end` while
402 // still fitting in a `layout` allocation.
403 let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
404
405 unsafe {
406 let mut chunks = self.chunks.borrow_mut();
407 let mut new_cap;
408 if let Some(last_chunk) = chunks.last_mut() {
409 // There is no need to update `last_chunk.entries` because that
410 // field isn't used by `DroplessArena`.
411
412 // If the previous chunk's len is less than HUGE_PAGE
413 // bytes, then this chunk will be least double the previous
414 // chunk's size.
415 new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
416 new_cap *= 2;
417 } else {
418 new_cap = PAGE;
419 }
420 // Also ensure that this chunk can fit `additional`.
421 new_cap = cmp::max(additional, new_cap);
422
423 let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
424 self.start.set(chunk.start());
425
426 // Align the end to DROPLESS_ALIGNMENT.
427 let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
428
429 // Make sure we don't go past `start`. This should not happen since the allocation
430 // should be at least DROPLESS_ALIGNMENT - 1 bytes.
431 debug_assert!(chunk.start().addr() <= end);
432
433 self.end.set(chunk.end().with_addr(end));
434
435 chunks.push(chunk);
436 }
437 }
438
439 #[inline]
440 pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
441 assert!(layout.size() != 0);
442
443 // This loop executes once or twice: if allocation fails the first
444 // time, the `grow` ensures it will succeed the second time.
445 loop {
446 let start = self.start.get().addr();
447 let old_end = self.end.get();
448 let end = old_end.addr();
449
450 // Align allocated bytes so that `self.end` stays aligned to
451 // DROPLESS_ALIGNMENT.
452 let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
453
454 // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
455 unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
456
457 if let Some(sub) = end.checked_sub(bytes) {
458 let new_end = align_down(sub, layout.align());
459 if start <= new_end {
460 let new_end = old_end.with_addr(new_end);
461 // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
462 // preserves alignment as both `end` and `bytes` are already
463 // aligned to DROPLESS_ALIGNMENT.
464 self.end.set(new_end);
465 return new_end;
466 }
467 }
468
469 // No free space left. Allocate a new chunk to satisfy the request.
470 // On failure the grow will panic or abort.
471 self.grow(layout);
472 }
473 }
474
475 #[inline]
476 pub fn alloc<T>(&self, object: T) -> &mut T {
477 assert!(!mem::needs_drop::<T>());
478 assert!(size_of::<T>() != 0);
479
480 let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
481
482 unsafe {
483 // Write into uninitialized memory.
484 ptr::write(mem, object);
485 &mut *mem
486 }
487 }
488
489 /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
490 /// reference to it. Will panic if passed a zero-sized type.
491 ///
492 /// Panics:
493 ///
494 /// - Zero-sized types
495 /// - Zero-length slices
496 #[inline]
497 pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
498 where
499 T: Copy,
500 {
501 assert!(!mem::needs_drop::<T>());
502 assert!(size_of::<T>() != 0);
503 assert!(!slice.is_empty());
504
505 let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
506
507 unsafe {
508 mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
509 slice::from_raw_parts_mut(mem, slice.len())
510 }
511 }
512
513 /// Allocates a string slice that is copied into the `DroplessArena`, returning a
514 /// reference to it. Will panic if passed an empty string.
515 ///
516 /// Panics:
517 ///
518 /// - Zero-length string
519 #[inline]
520 pub fn alloc_str(&self, string: &str) -> &str {
521 let slice = self.alloc_slice(string.as_bytes());
522
523 // SAFETY: the result has a copy of the same valid UTF-8 bytes.
524 unsafe { std::str::from_utf8_unchecked(slice) }
525 }
526
527 /// # Safety
528 ///
529 /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
530 /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
531 /// if `iter.next()` allocates onto `self`.
532 #[inline]
533 unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
534 &self,
535 mut iter: I,
536 len: usize,
537 mem: *mut T,
538 ) -> &mut [T] {
539 let mut i = 0;
540 // Use a manual loop since LLVM manages to optimize it better for
541 // slice iterators
542 loop {
543 // SAFETY: The caller must ensure that `mem` is valid for writes up to
544 // `size_of::<T>() * len`.
545 unsafe {
546 match iter.next() {
547 Some(value) if i < len => mem.add(i).write(value),
548 Some(_) | None => {
549 // We only return as many items as the iterator gave us, even
550 // though it was supposed to give us `len`
551 return slice::from_raw_parts_mut(mem, i);
552 }
553 }
554 }
555 i += 1;
556 }
557 }
558
559 #[inline]
560 pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
561 // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
562 // allocate additional elements while we're iterating.
563 let iter = iter.into_iter();
564 assert!(size_of::<T>() != 0);
565 assert!(!mem::needs_drop::<T>());
566
567 let size_hint = iter.size_hint();
568
569 match size_hint {
570 (min, Some(max)) if min == max => {
571 // We know the exact number of elements the iterator expects to produce here.
572 let len = min;
573
574 if len == 0 {
575 return &mut [];
576 }
577
578 let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
579 // SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
580 // reserved. If the iterator panics or doesn't output `len` elements, this will
581 // leave some unallocated slots in the arena, which is fine because we do not call
582 // `drop`.
583 unsafe { self.write_from_iter(iter, len, mem) }
584 }
585 (_, _) => outline(move || self.try_alloc_from_iter(iter.map(Ok::<T, !>)).into_ok()),
586 }
587 }
588
589 #[inline]
590 pub fn try_alloc_from_iter<T, E>(
591 &self,
592 iter: impl IntoIterator<Item = Result<T, E>>,
593 ) -> Result<&mut [T], E> {
594 // Despite the similarity with `alloc_from_iter`, we cannot reuse their fast case, as we
595 // cannot know the minimum length of the iterator in this case.
596 assert!(size_of::<T>() != 0);
597
598 // Takes care of reentrancy.
599 let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
600 let mut vec = vec?;
601 if vec.is_empty() {
602 return Ok(&mut []);
603 }
604 // Move the content to the arena by copying and then forgetting it.
605 let len = vec.len();
606 Ok(unsafe {
607 let start_ptr = self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
608 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
609 vec.set_len(0);
610 slice::from_raw_parts_mut(start_ptr, len)
611 })
612 }
613}
614
615/// Declare an `Arena` containing one dropless arena and many typed arenas (the
616/// types of the typed arenas are specified by the arguments).
617///
618/// There are three cases of interest.
619/// - Types that are `Copy`: these need not be specified in the arguments. They
620/// will use the `DroplessArena`.
621/// - Types that are `!Copy` and `!Drop`: these must be specified in the
622/// arguments. An empty `TypedArena` will be created for each one, but the
623/// `DroplessArena` will always be used and the `TypedArena` will stay empty.
624/// This is odd but harmless, because an empty arena allocates no memory.
625/// - Types that are `!Copy` and `Drop`: these must be specified in the
626/// arguments. The `TypedArena` will be used for them.
627///
628#[rustc_macro_transparency = "semiopaque"]
629pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
630 #[derive(Default)]
631 pub struct Arena<'tcx> {
632 pub dropless: $crate::DroplessArena,
633 $($name: $crate::TypedArena<$ty>,)*
634 }
635
636 pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
637 #[allow(clippy::mut_from_ref)]
638 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self;
639 #[allow(clippy::mut_from_ref)]
640 fn allocate_from_iter(
641 arena: &'tcx Arena<'tcx>,
642 iter: impl ::std::iter::IntoIterator<Item = Self>,
643 ) -> &'tcx mut [Self];
644 }
645
646 // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
647 impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
648 #[inline]
649 #[allow(clippy::mut_from_ref)]
650 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
651 arena.dropless.alloc(self)
652 }
653 #[inline]
654 #[allow(clippy::mut_from_ref)]
655 fn allocate_from_iter(
656 arena: &'tcx Arena<'tcx>,
657 iter: impl ::std::iter::IntoIterator<Item = Self>,
658 ) -> &'tcx mut [Self] {
659 arena.dropless.alloc_from_iter(iter)
660 }
661 }
662 $(
663 impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
664 #[inline]
665 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
666 if !::std::mem::needs_drop::<Self>() {
667 arena.dropless.alloc(self)
668 } else {
669 arena.$name.alloc(self)
670 }
671 }
672
673 #[inline]
674 #[allow(clippy::mut_from_ref)]
675 fn allocate_from_iter(
676 arena: &'tcx Arena<'tcx>,
677 iter: impl ::std::iter::IntoIterator<Item = Self>,
678 ) -> &'tcx mut [Self] {
679 if !::std::mem::needs_drop::<Self>() {
680 arena.dropless.alloc_from_iter(iter)
681 } else {
682 arena.$name.alloc_from_iter(iter)
683 }
684 }
685 }
686 )*
687
688 impl<'tcx> Arena<'tcx> {
689 #[inline]
690 #[allow(clippy::mut_from_ref)]
691 pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&'tcx self, value: T) -> &mut T {
692 value.allocate_on(self)
693 }
694
695 // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
696 #[inline]
697 #[allow(clippy::mut_from_ref)]
698 pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
699 if value.is_empty() {
700 return &mut [];
701 }
702 self.dropless.alloc_slice(value)
703 }
704
705 #[inline]
706 pub fn alloc_str(&self, string: &str) -> &str {
707 if string.is_empty() {
708 return "";
709 }
710 self.dropless.alloc_str(string)
711 }
712
713 #[allow(clippy::mut_from_ref)]
714 pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
715 &'tcx self,
716 iter: impl ::std::iter::IntoIterator<Item = T>,
717 ) -> &mut [T] {
718 T::allocate_from_iter(self, iter)
719 }
720 }
721}
722
723// Marker types that let us give different behaviour for arenas allocating
724// `Copy` types vs `!Copy` types.
725pub struct IsCopy;
726pub struct IsNotCopy;
727
728#[cfg(test)]
729mod tests;