rustc_arena/lib.rs
1//! The arena, a fast but limited type of allocator.
2//!
3//! Arenas are a type of allocator that destroy the objects within, all at
4//! once, once the arena itself is destroyed. They do not support deallocation
5//! of individual objects while the arena itself is still alive. The benefit
6//! of an arena is very fast allocation; just a pointer bump.
7//!
8//! This crate implements several kinds of arena.
9
10// tidy-alphabetical-start
11#![allow(clippy::mut_from_ref)] // Arena allocators are one place where this pattern is fine.
12#![allow(internal_features)]
13#![cfg_attr(test, feature(test))]
14#![deny(unsafe_op_in_unsafe_fn)]
15#![doc(test(no_crate_inject, attr(deny(warnings), allow(internal_features))))]
16#![feature(core_intrinsics)]
17#![feature(decl_macro)]
18#![feature(dropck_eyepatch)]
19#![feature(maybe_uninit_slice)]
20#![feature(never_type)]
21#![feature(rustc_attrs)]
22#![feature(unwrap_infallible)]
23// tidy-alphabetical-end
24
25use std::alloc::Layout;
26use std::cell::{Cell, RefCell};
27use std::marker::PhantomData;
28use std::mem::{self, MaybeUninit};
29use std::ptr::{self, NonNull};
30use std::{cmp, intrinsics, slice};
31
32use smallvec::SmallVec;
33
34/// This calls the passed function while ensuring it won't be inlined into the caller.
35#[inline(never)]
36#[cold]
37fn outline<F: FnOnce() -> R, R>(f: F) -> R {
38 f()
39}
40
41struct ArenaChunk<T = u8> {
42 /// The raw storage for the arena chunk.
43 storage: NonNull<[MaybeUninit<T>]>,
44 /// The number of valid entries in the chunk.
45 entries: usize,
46}
47
48unsafe impl<#[may_dangle] T> Drop for ArenaChunk<T> {
49 fn drop(&mut self) {
50 unsafe { drop(Box::from_raw(self.storage.as_mut())) }
51 }
52}
53
54impl<T> ArenaChunk<T> {
55 #[inline]
56 unsafe fn new(capacity: usize) -> ArenaChunk<T> {
57 ArenaChunk {
58 storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
59 entries: 0,
60 }
61 }
62
63 /// Destroys this arena chunk.
64 ///
65 /// # Safety
66 ///
67 /// The caller must ensure that `len` elements of this chunk have been initialized.
68 #[inline]
69 unsafe fn destroy(&mut self, len: usize) {
70 // The branch on needs_drop() is an -O1 performance optimization.
71 // Without the branch, dropping TypedArena<T> takes linear time.
72 if mem::needs_drop::<T>() {
73 // SAFETY: The caller must ensure that `len` elements of this chunk have
74 // been initialized.
75 unsafe {
76 let slice = self.storage.as_mut();
77 slice[..len].assume_init_drop();
78 }
79 }
80 }
81
82 // Returns a pointer to the first allocated object.
83 #[inline]
84 fn start(&mut self) -> *mut T {
85 self.storage.as_ptr() as *mut T
86 }
87
88 // Returns a pointer to the end of the allocated space.
89 #[inline]
90 fn end(&mut self) -> *mut T {
91 unsafe {
92 if size_of::<T>() == 0 {
93 // A pointer as large as possible for zero-sized elements.
94 ptr::without_provenance_mut(!0)
95 } else {
96 self.start().add(self.storage.len())
97 }
98 }
99 }
100}
101
102// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
103// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
104// we stop growing. This scales well, from arenas that are barely used up to
105// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
106// the usual sizes of pages and huge pages on Linux.
107const PAGE: usize = 4096;
108const HUGE_PAGE: usize = 2 * 1024 * 1024;
109
110/// An arena that can hold objects of only one type.
111pub struct TypedArena<T> {
112 /// A pointer to the next object to be allocated.
113 ptr: Cell<*mut T>,
114
115 /// A pointer to the end of the allocated area. When this pointer is
116 /// reached, a new chunk is allocated.
117 end: Cell<*mut T>,
118
119 /// A vector of arena chunks.
120 chunks: RefCell<Vec<ArenaChunk<T>>>,
121
122 /// Marker indicating that dropping the arena causes its owned
123 /// instances of `T` to be dropped.
124 _own: PhantomData<T>,
125}
126
127impl<T> Default for TypedArena<T> {
128 /// Creates a new `TypedArena`.
129 fn default() -> TypedArena<T> {
130 TypedArena {
131 // We set both `ptr` and `end` to 0 so that the first call to
132 // alloc() will trigger a grow().
133 ptr: Cell::new(ptr::null_mut()),
134 end: Cell::new(ptr::null_mut()),
135 chunks: Default::default(),
136 _own: PhantomData,
137 }
138 }
139}
140
141impl<T> TypedArena<T> {
142 /// Allocates an object in the `TypedArena`, returning a reference to it.
143 #[inline]
144 pub fn alloc(&self, object: T) -> &mut T {
145 if self.ptr == self.end {
146 self.grow(1)
147 }
148
149 unsafe {
150 if size_of::<T>() == 0 {
151 self.ptr.set(self.ptr.get().wrapping_byte_add(1));
152 let ptr = ptr::NonNull::<T>::dangling().as_ptr();
153 // Don't drop the object. This `write` is equivalent to `forget`.
154 ptr::write(ptr, object);
155 &mut *ptr
156 } else {
157 let ptr = self.ptr.get();
158 // Advance the pointer.
159 self.ptr.set(self.ptr.get().add(1));
160 // Write into uninitialized memory.
161 ptr::write(ptr, object);
162 &mut *ptr
163 }
164 }
165 }
166
167 #[inline]
168 fn can_allocate(&self, additional: usize) -> bool {
169 // FIXME: this should *likely* use `offset_from`, but more
170 // investigation is needed (including running tests in miri).
171 let available_bytes = self.end.get().addr() - self.ptr.get().addr();
172 let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
173 available_bytes >= additional_bytes
174 }
175
176 #[inline]
177 fn alloc_raw_slice(&self, len: usize) -> *mut T {
178 assert!(size_of::<T>() != 0);
179 assert!(len != 0);
180
181 // Ensure the current chunk can fit `len` objects.
182 if !self.can_allocate(len) {
183 self.grow(len);
184 debug_assert!(self.can_allocate(len));
185 }
186
187 let start_ptr = self.ptr.get();
188 // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
189 // `len` elements.
190 unsafe { self.ptr.set(start_ptr.add(len)) };
191 start_ptr
192 }
193
194 /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
195 ///
196 /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
197 /// storing the elements in the arena.
198 #[inline]
199 pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
200 self.try_alloc_from_iter(iter.into_iter().map(Ok::<T, !>)).into_ok()
201 }
202
203 /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
204 ///
205 /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
206 /// storing the elements in the arena.
207 #[inline]
208 pub fn try_alloc_from_iter<E>(
209 &self,
210 iter: impl IntoIterator<Item = Result<T, E>>,
211 ) -> Result<&mut [T], E> {
212 // Despite the similarlty with `DroplessArena`, we cannot reuse their fast case. The reason
213 // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
214 // reference to `self` and adding elements to the arena during iteration.
215 //
216 // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
217 // have to track that some uninitialized elements are followed by some initialized elements,
218 // else we might accidentally drop uninitialized memory if something panics or if the
219 // iterator doesn't fill all the length we expected.
220 //
221 // So we collect all the elements beforehand, which takes care of reentrancy and panic
222 // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
223 // doesn't need to be hyper-optimized.
224 assert!(size_of::<T>() != 0);
225
226 let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
227 let mut vec = vec?;
228 if vec.is_empty() {
229 return Ok(&mut []);
230 }
231 // Move the content to the arena by copying and then forgetting it.
232 let len = vec.len();
233 let start_ptr = self.alloc_raw_slice(len);
234 Ok(unsafe {
235 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
236 vec.set_len(0);
237 slice::from_raw_parts_mut(start_ptr, len)
238 })
239 }
240
241 /// Grows the arena.
242 #[inline(never)]
243 #[cold]
244 fn grow(&self, additional: usize) {
245 unsafe {
246 // We need the element size to convert chunk sizes (ranging from
247 // PAGE to HUGE_PAGE bytes) to element counts.
248 let elem_size = cmp::max(1, size_of::<T>());
249 let mut chunks = self.chunks.borrow_mut();
250 let mut new_cap;
251 if let Some(last_chunk) = chunks.last_mut() {
252 // If a type is `!needs_drop`, we don't need to keep track of how many elements
253 // the chunk stores - the field will be ignored anyway.
254 if mem::needs_drop::<T>() {
255 // FIXME: this should *likely* use `offset_from`, but more
256 // investigation is needed (including running tests in miri).
257 let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
258 last_chunk.entries = used_bytes / size_of::<T>();
259 }
260
261 // If the previous chunk's len is less than HUGE_PAGE
262 // bytes, then this chunk will be least double the previous
263 // chunk's size.
264 new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
265 new_cap *= 2;
266 } else {
267 new_cap = PAGE / elem_size;
268 }
269 // Also ensure that this chunk can fit `additional`.
270 new_cap = cmp::max(additional, new_cap);
271
272 let mut chunk = ArenaChunk::<T>::new(new_cap);
273 self.ptr.set(chunk.start());
274 self.end.set(chunk.end());
275 chunks.push(chunk);
276 }
277 }
278
279 // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
280 // chunks.
281 fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
282 // Determine how much was filled.
283 let start = last_chunk.start().addr();
284 // We obtain the value of the pointer to the first uninitialized element.
285 let end = self.ptr.get().addr();
286 // We then calculate the number of elements to be dropped in the last chunk,
287 // which is the filled area's length.
288 let diff = if size_of::<T>() == 0 {
289 // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
290 // the number of zero-sized values in the last and only chunk, just out of caution.
291 // Recall that `end` was incremented for each allocated value.
292 end - start
293 } else {
294 // FIXME: this should *likely* use `offset_from`, but more
295 // investigation is needed (including running tests in miri).
296 (end - start) / size_of::<T>()
297 };
298 // Pass that to the `destroy` method.
299 unsafe {
300 last_chunk.destroy(diff);
301 }
302 // Reset the chunk.
303 self.ptr.set(last_chunk.start());
304 }
305}
306
307unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
308 fn drop(&mut self) {
309 unsafe {
310 // Determine how much was filled.
311 let mut chunks_borrow = self.chunks.borrow_mut();
312 if let Some(mut last_chunk) = chunks_borrow.pop() {
313 // Drop the contents of the last chunk.
314 self.clear_last_chunk(&mut last_chunk);
315 // The last chunk will be dropped. Destroy all other chunks.
316 for chunk in chunks_borrow.iter_mut() {
317 chunk.destroy(chunk.entries);
318 }
319 }
320 // Box handles deallocation of `last_chunk` and `self.chunks`.
321 }
322 }
323}
324
325unsafe impl<T: Send> Send for TypedArena<T> {}
326
327#[inline(always)]
328fn align_down(val: usize, align: usize) -> usize {
329 debug_assert!(align.is_power_of_two());
330 val & !(align - 1)
331}
332
333#[inline(always)]
334fn align_up(val: usize, align: usize) -> usize {
335 debug_assert!(align.is_power_of_two());
336 (val + align - 1) & !(align - 1)
337}
338
339// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
340// to optimize away alignment code.
341const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
342
343/// An arena that can hold objects of multiple different types that impl `Copy`
344/// and/or satisfy `!mem::needs_drop`.
345pub struct DroplessArena {
346 /// A pointer to the start of the free space.
347 start: Cell<*mut u8>,
348
349 /// A pointer to the end of free space.
350 ///
351 /// The allocation proceeds downwards from the end of the chunk towards the
352 /// start. (This is slightly simpler and faster than allocating upwards,
353 /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
354 /// When this pointer crosses the start pointer, a new chunk is allocated.
355 ///
356 /// This is kept aligned to DROPLESS_ALIGNMENT.
357 end: Cell<*mut u8>,
358
359 /// A vector of arena chunks.
360 chunks: RefCell<Vec<ArenaChunk>>,
361}
362
363unsafe impl Send for DroplessArena {}
364
365impl Default for DroplessArena {
366 #[inline]
367 fn default() -> DroplessArena {
368 DroplessArena {
369 // We set both `start` and `end` to 0 so that the first call to
370 // alloc() will trigger a grow().
371 start: Cell::new(ptr::null_mut()),
372 end: Cell::new(ptr::null_mut()),
373 chunks: Default::default(),
374 }
375 }
376}
377
378impl DroplessArena {
379 #[inline(never)]
380 #[cold]
381 fn grow(&self, layout: Layout) {
382 // Add some padding so we can align `self.end` while
383 // still fitting in a `layout` allocation.
384 let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
385
386 unsafe {
387 let mut chunks = self.chunks.borrow_mut();
388 let mut new_cap;
389 if let Some(last_chunk) = chunks.last_mut() {
390 // There is no need to update `last_chunk.entries` because that
391 // field isn't used by `DroplessArena`.
392
393 // If the previous chunk's len is less than HUGE_PAGE
394 // bytes, then this chunk will be least double the previous
395 // chunk's size.
396 new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
397 new_cap *= 2;
398 } else {
399 new_cap = PAGE;
400 }
401 // Also ensure that this chunk can fit `additional`.
402 new_cap = cmp::max(additional, new_cap);
403
404 let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
405 self.start.set(chunk.start());
406
407 // Align the end to DROPLESS_ALIGNMENT.
408 let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
409
410 // Make sure we don't go past `start`. This should not happen since the allocation
411 // should be at least DROPLESS_ALIGNMENT - 1 bytes.
412 debug_assert!(chunk.start().addr() <= end);
413
414 self.end.set(chunk.end().with_addr(end));
415
416 chunks.push(chunk);
417 }
418 }
419
420 #[inline]
421 pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
422 assert!(layout.size() != 0);
423
424 // This loop executes once or twice: if allocation fails the first
425 // time, the `grow` ensures it will succeed the second time.
426 loop {
427 let start = self.start.get().addr();
428 let old_end = self.end.get();
429 let end = old_end.addr();
430
431 // Align allocated bytes so that `self.end` stays aligned to
432 // DROPLESS_ALIGNMENT.
433 let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
434
435 // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
436 unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
437
438 if let Some(sub) = end.checked_sub(bytes) {
439 let new_end = align_down(sub, layout.align());
440 if start <= new_end {
441 let new_end = old_end.with_addr(new_end);
442 // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
443 // preserves alignment as both `end` and `bytes` are already
444 // aligned to DROPLESS_ALIGNMENT.
445 self.end.set(new_end);
446 return new_end;
447 }
448 }
449
450 // No free space left. Allocate a new chunk to satisfy the request.
451 // On failure the grow will panic or abort.
452 self.grow(layout);
453 }
454 }
455
456 #[inline]
457 pub fn alloc<T>(&self, object: T) -> &mut T {
458 assert!(!mem::needs_drop::<T>());
459 assert!(size_of::<T>() != 0);
460
461 let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
462
463 unsafe {
464 // Write into uninitialized memory.
465 ptr::write(mem, object);
466 &mut *mem
467 }
468 }
469
470 /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
471 /// reference to it. Will panic if passed a zero-sized type.
472 ///
473 /// Panics:
474 ///
475 /// - Zero-sized types
476 /// - Zero-length slices
477 #[inline]
478 pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
479 where
480 T: Copy,
481 {
482 assert!(!mem::needs_drop::<T>());
483 assert!(size_of::<T>() != 0);
484 assert!(!slice.is_empty());
485
486 let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
487
488 unsafe {
489 mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
490 slice::from_raw_parts_mut(mem, slice.len())
491 }
492 }
493
494 /// Used by `Lift` to check whether this slice is allocated
495 /// in this arena.
496 #[inline]
497 pub fn contains_slice<T>(&self, slice: &[T]) -> bool {
498 for chunk in self.chunks.borrow_mut().iter_mut() {
499 let ptr = slice.as_ptr().cast::<u8>().cast_mut();
500 if chunk.start() <= ptr && chunk.end() >= ptr {
501 return true;
502 }
503 }
504 false
505 }
506
507 /// Allocates a string slice that is copied into the `DroplessArena`, returning a
508 /// reference to it. Will panic if passed an empty string.
509 ///
510 /// Panics:
511 ///
512 /// - Zero-length string
513 #[inline]
514 pub fn alloc_str(&self, string: &str) -> &str {
515 let slice = self.alloc_slice(string.as_bytes());
516
517 // SAFETY: the result has a copy of the same valid UTF-8 bytes.
518 unsafe { std::str::from_utf8_unchecked(slice) }
519 }
520
521 /// # Safety
522 ///
523 /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
524 /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
525 /// if `iter.next()` allocates onto `self`.
526 #[inline]
527 unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
528 &self,
529 mut iter: I,
530 len: usize,
531 mem: *mut T,
532 ) -> &mut [T] {
533 let mut i = 0;
534 // Use a manual loop since LLVM manages to optimize it better for
535 // slice iterators
536 loop {
537 // SAFETY: The caller must ensure that `mem` is valid for writes up to
538 // `size_of::<T>() * len`.
539 unsafe {
540 match iter.next() {
541 Some(value) if i < len => mem.add(i).write(value),
542 Some(_) | None => {
543 // We only return as many items as the iterator gave us, even
544 // though it was supposed to give us `len`
545 return slice::from_raw_parts_mut(mem, i);
546 }
547 }
548 }
549 i += 1;
550 }
551 }
552
553 #[inline]
554 pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
555 // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
556 // allocate additional elements while we're iterating.
557 let iter = iter.into_iter();
558 assert!(size_of::<T>() != 0);
559 assert!(!mem::needs_drop::<T>());
560
561 let size_hint = iter.size_hint();
562
563 match size_hint {
564 (min, Some(max)) if min == max => {
565 // We know the exact number of elements the iterator expects to produce here.
566 let len = min;
567
568 if len == 0 {
569 return &mut [];
570 }
571
572 let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
573 // SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
574 // reserved. If the iterator panics or doesn't output `len` elements, this will
575 // leave some unallocated slots in the arena, which is fine because we do not call
576 // `drop`.
577 unsafe { self.write_from_iter(iter, len, mem) }
578 }
579 (_, _) => outline(move || self.try_alloc_from_iter(iter.map(Ok::<T, !>)).into_ok()),
580 }
581 }
582
583 #[inline]
584 pub fn try_alloc_from_iter<T, E>(
585 &self,
586 iter: impl IntoIterator<Item = Result<T, E>>,
587 ) -> Result<&mut [T], E> {
588 // Despite the similarlty with `alloc_from_iter`, we cannot reuse their fast case, as we
589 // cannot know the minimum length of the iterator in this case.
590 assert!(size_of::<T>() != 0);
591
592 // Takes care of reentrancy.
593 let vec: Result<SmallVec<[T; 8]>, E> = iter.into_iter().collect();
594 let mut vec = vec?;
595 if vec.is_empty() {
596 return Ok(&mut []);
597 }
598 // Move the content to the arena by copying and then forgetting it.
599 let len = vec.len();
600 Ok(unsafe {
601 let start_ptr = self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
602 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
603 vec.set_len(0);
604 slice::from_raw_parts_mut(start_ptr, len)
605 })
606 }
607}
608
609/// Declare an `Arena` containing one dropless arena and many typed arenas (the
610/// types of the typed arenas are specified by the arguments).
611///
612/// There are three cases of interest.
613/// - Types that are `Copy`: these need not be specified in the arguments. They
614/// will use the `DroplessArena`.
615/// - Types that are `!Copy` and `!Drop`: these must be specified in the
616/// arguments. An empty `TypedArena` will be created for each one, but the
617/// `DroplessArena` will always be used and the `TypedArena` will stay empty.
618/// This is odd but harmless, because an empty arena allocates no memory.
619/// - Types that are `!Copy` and `Drop`: these must be specified in the
620/// arguments. The `TypedArena` will be used for them.
621///
622#[rustc_macro_transparency = "semitransparent"]
623pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
624 #[derive(Default)]
625 pub struct Arena<'tcx> {
626 pub dropless: $crate::DroplessArena,
627 $($name: $crate::TypedArena<$ty>,)*
628 }
629
630 pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
631 #[allow(clippy::mut_from_ref)]
632 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self;
633 #[allow(clippy::mut_from_ref)]
634 fn allocate_from_iter(
635 arena: &'tcx Arena<'tcx>,
636 iter: impl ::std::iter::IntoIterator<Item = Self>,
637 ) -> &'tcx mut [Self];
638 }
639
640 // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
641 impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
642 #[inline]
643 #[allow(clippy::mut_from_ref)]
644 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
645 arena.dropless.alloc(self)
646 }
647 #[inline]
648 #[allow(clippy::mut_from_ref)]
649 fn allocate_from_iter(
650 arena: &'tcx Arena<'tcx>,
651 iter: impl ::std::iter::IntoIterator<Item = Self>,
652 ) -> &'tcx mut [Self] {
653 arena.dropless.alloc_from_iter(iter)
654 }
655 }
656 $(
657 impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
658 #[inline]
659 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
660 if !::std::mem::needs_drop::<Self>() {
661 arena.dropless.alloc(self)
662 } else {
663 arena.$name.alloc(self)
664 }
665 }
666
667 #[inline]
668 #[allow(clippy::mut_from_ref)]
669 fn allocate_from_iter(
670 arena: &'tcx Arena<'tcx>,
671 iter: impl ::std::iter::IntoIterator<Item = Self>,
672 ) -> &'tcx mut [Self] {
673 if !::std::mem::needs_drop::<Self>() {
674 arena.dropless.alloc_from_iter(iter)
675 } else {
676 arena.$name.alloc_from_iter(iter)
677 }
678 }
679 }
680 )*
681
682 impl<'tcx> Arena<'tcx> {
683 #[inline]
684 #[allow(clippy::mut_from_ref)]
685 pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&'tcx self, value: T) -> &mut T {
686 value.allocate_on(self)
687 }
688
689 // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
690 #[inline]
691 #[allow(clippy::mut_from_ref)]
692 pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
693 if value.is_empty() {
694 return &mut [];
695 }
696 self.dropless.alloc_slice(value)
697 }
698
699 #[inline]
700 pub fn alloc_str(&self, string: &str) -> &str {
701 if string.is_empty() {
702 return "";
703 }
704 self.dropless.alloc_str(string)
705 }
706
707 #[allow(clippy::mut_from_ref)]
708 pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
709 &'tcx self,
710 iter: impl ::std::iter::IntoIterator<Item = T>,
711 ) -> &mut [T] {
712 T::allocate_from_iter(self, iter)
713 }
714 }
715}
716
717// Marker types that let us give different behaviour for arenas allocating
718// `Copy` types vs `!Copy` types.
719pub struct IsCopy;
720pub struct IsNotCopy;
721
722#[cfg(test)]
723mod tests;