rustc_arena/lib.rs
1//! The arena, a fast but limited type of allocator.
2//!
3//! Arenas are a type of allocator that destroy the objects within, all at
4//! once, once the arena itself is destroyed. They do not support deallocation
5//! of individual objects while the arena itself is still alive. The benefit
6//! of an arena is very fast allocation; just a pointer bump.
7//!
8//! This crate implements several kinds of arena.
9
10// tidy-alphabetical-start
11#![allow(clippy::mut_from_ref)] // Arena allocators are one place where this pattern is fine.
12#![allow(internal_features)]
13#![cfg_attr(test, feature(test))]
14#![deny(unsafe_op_in_unsafe_fn)]
15#![doc(
16 html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
17 test(no_crate_inject, attr(deny(warnings)))
18)]
19#![doc(rust_logo)]
20#![feature(core_intrinsics)]
21#![feature(decl_macro)]
22#![feature(dropck_eyepatch)]
23#![feature(maybe_uninit_slice)]
24#![feature(rustc_attrs)]
25#![feature(rustdoc_internals)]
26// tidy-alphabetical-end
27
28use std::alloc::Layout;
29use std::cell::{Cell, RefCell};
30use std::marker::PhantomData;
31use std::mem::{self, MaybeUninit};
32use std::ptr::{self, NonNull};
33use std::{cmp, intrinsics, slice};
34
35use smallvec::SmallVec;
36
37/// This calls the passed function while ensuring it won't be inlined into the caller.
38#[inline(never)]
39#[cold]
40fn outline<F: FnOnce() -> R, R>(f: F) -> R {
41 f()
42}
43
44struct ArenaChunk<T = u8> {
45 /// The raw storage for the arena chunk.
46 storage: NonNull<[MaybeUninit<T>]>,
47 /// The number of valid entries in the chunk.
48 entries: usize,
49}
50
51unsafe impl<#[may_dangle] T> Drop for ArenaChunk<T> {
52 fn drop(&mut self) {
53 unsafe { drop(Box::from_raw(self.storage.as_mut())) }
54 }
55}
56
57impl<T> ArenaChunk<T> {
58 #[inline]
59 unsafe fn new(capacity: usize) -> ArenaChunk<T> {
60 ArenaChunk {
61 storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
62 entries: 0,
63 }
64 }
65
66 /// Destroys this arena chunk.
67 ///
68 /// # Safety
69 ///
70 /// The caller must ensure that `len` elements of this chunk have been initialized.
71 #[inline]
72 unsafe fn destroy(&mut self, len: usize) {
73 // The branch on needs_drop() is an -O1 performance optimization.
74 // Without the branch, dropping TypedArena<T> takes linear time.
75 if mem::needs_drop::<T>() {
76 // SAFETY: The caller must ensure that `len` elements of this chunk have
77 // been initialized.
78 unsafe {
79 let slice = self.storage.as_mut();
80 slice[..len].assume_init_drop();
81 }
82 }
83 }
84
85 // Returns a pointer to the first allocated object.
86 #[inline]
87 fn start(&mut self) -> *mut T {
88 self.storage.as_ptr() as *mut T
89 }
90
91 // Returns a pointer to the end of the allocated space.
92 #[inline]
93 fn end(&mut self) -> *mut T {
94 unsafe {
95 if size_of::<T>() == 0 {
96 // A pointer as large as possible for zero-sized elements.
97 ptr::without_provenance_mut(!0)
98 } else {
99 self.start().add(self.storage.len())
100 }
101 }
102 }
103}
104
105// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
106// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
107// we stop growing. This scales well, from arenas that are barely used up to
108// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
109// the usual sizes of pages and huge pages on Linux.
110const PAGE: usize = 4096;
111const HUGE_PAGE: usize = 2 * 1024 * 1024;
112
113/// An arena that can hold objects of only one type.
114pub struct TypedArena<T> {
115 /// A pointer to the next object to be allocated.
116 ptr: Cell<*mut T>,
117
118 /// A pointer to the end of the allocated area. When this pointer is
119 /// reached, a new chunk is allocated.
120 end: Cell<*mut T>,
121
122 /// A vector of arena chunks.
123 chunks: RefCell<Vec<ArenaChunk<T>>>,
124
125 /// Marker indicating that dropping the arena causes its owned
126 /// instances of `T` to be dropped.
127 _own: PhantomData<T>,
128}
129
130impl<T> Default for TypedArena<T> {
131 /// Creates a new `TypedArena`.
132 fn default() -> TypedArena<T> {
133 TypedArena {
134 // We set both `ptr` and `end` to 0 so that the first call to
135 // alloc() will trigger a grow().
136 ptr: Cell::new(ptr::null_mut()),
137 end: Cell::new(ptr::null_mut()),
138 chunks: Default::default(),
139 _own: PhantomData,
140 }
141 }
142}
143
144impl<T> TypedArena<T> {
145 /// Allocates an object in the `TypedArena`, returning a reference to it.
146 #[inline]
147 pub fn alloc(&self, object: T) -> &mut T {
148 if self.ptr == self.end {
149 self.grow(1)
150 }
151
152 unsafe {
153 if size_of::<T>() == 0 {
154 self.ptr.set(self.ptr.get().wrapping_byte_add(1));
155 let ptr = ptr::NonNull::<T>::dangling().as_ptr();
156 // Don't drop the object. This `write` is equivalent to `forget`.
157 ptr::write(ptr, object);
158 &mut *ptr
159 } else {
160 let ptr = self.ptr.get();
161 // Advance the pointer.
162 self.ptr.set(self.ptr.get().add(1));
163 // Write into uninitialized memory.
164 ptr::write(ptr, object);
165 &mut *ptr
166 }
167 }
168 }
169
170 #[inline]
171 fn can_allocate(&self, additional: usize) -> bool {
172 // FIXME: this should *likely* use `offset_from`, but more
173 // investigation is needed (including running tests in miri).
174 let available_bytes = self.end.get().addr() - self.ptr.get().addr();
175 let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
176 available_bytes >= additional_bytes
177 }
178
179 #[inline]
180 fn alloc_raw_slice(&self, len: usize) -> *mut T {
181 assert!(size_of::<T>() != 0);
182 assert!(len != 0);
183
184 // Ensure the current chunk can fit `len` objects.
185 if !self.can_allocate(len) {
186 self.grow(len);
187 debug_assert!(self.can_allocate(len));
188 }
189
190 let start_ptr = self.ptr.get();
191 // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
192 // `len` elements.
193 unsafe { self.ptr.set(start_ptr.add(len)) };
194 start_ptr
195 }
196
197 /// Allocates the elements of this iterator into a contiguous slice in the `TypedArena`.
198 ///
199 /// Note: for reasons of reentrancy and panic safety we collect into a `SmallVec<[_; 8]>` before
200 /// storing the elements in the arena.
201 #[inline]
202 pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
203 // Despite the similarlty with `DroplessArena`, we cannot reuse their fast case. The reason
204 // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a
205 // reference to `self` and adding elements to the arena during iteration.
206 //
207 // For this reason, if we pre-allocated any space for the elements of this iterator, we'd
208 // have to track that some uninitialized elements are followed by some initialized elements,
209 // else we might accidentally drop uninitialized memory if something panics or if the
210 // iterator doesn't fill all the length we expected.
211 //
212 // So we collect all the elements beforehand, which takes care of reentrancy and panic
213 // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
214 // doesn't need to be hyper-optimized.
215 assert!(size_of::<T>() != 0);
216
217 let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
218 if vec.is_empty() {
219 return &mut [];
220 }
221 // Move the content to the arena by copying and then forgetting it.
222 let len = vec.len();
223 let start_ptr = self.alloc_raw_slice(len);
224 unsafe {
225 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
226 vec.set_len(0);
227 slice::from_raw_parts_mut(start_ptr, len)
228 }
229 }
230
231 /// Grows the arena.
232 #[inline(never)]
233 #[cold]
234 fn grow(&self, additional: usize) {
235 unsafe {
236 // We need the element size to convert chunk sizes (ranging from
237 // PAGE to HUGE_PAGE bytes) to element counts.
238 let elem_size = cmp::max(1, size_of::<T>());
239 let mut chunks = self.chunks.borrow_mut();
240 let mut new_cap;
241 if let Some(last_chunk) = chunks.last_mut() {
242 // If a type is `!needs_drop`, we don't need to keep track of how many elements
243 // the chunk stores - the field will be ignored anyway.
244 if mem::needs_drop::<T>() {
245 // FIXME: this should *likely* use `offset_from`, but more
246 // investigation is needed (including running tests in miri).
247 let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
248 last_chunk.entries = used_bytes / size_of::<T>();
249 }
250
251 // If the previous chunk's len is less than HUGE_PAGE
252 // bytes, then this chunk will be least double the previous
253 // chunk's size.
254 new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
255 new_cap *= 2;
256 } else {
257 new_cap = PAGE / elem_size;
258 }
259 // Also ensure that this chunk can fit `additional`.
260 new_cap = cmp::max(additional, new_cap);
261
262 let mut chunk = ArenaChunk::<T>::new(new_cap);
263 self.ptr.set(chunk.start());
264 self.end.set(chunk.end());
265 chunks.push(chunk);
266 }
267 }
268
269 // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
270 // chunks.
271 fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
272 // Determine how much was filled.
273 let start = last_chunk.start().addr();
274 // We obtain the value of the pointer to the first uninitialized element.
275 let end = self.ptr.get().addr();
276 // We then calculate the number of elements to be dropped in the last chunk,
277 // which is the filled area's length.
278 let diff = if size_of::<T>() == 0 {
279 // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
280 // the number of zero-sized values in the last and only chunk, just out of caution.
281 // Recall that `end` was incremented for each allocated value.
282 end - start
283 } else {
284 // FIXME: this should *likely* use `offset_from`, but more
285 // investigation is needed (including running tests in miri).
286 (end - start) / size_of::<T>()
287 };
288 // Pass that to the `destroy` method.
289 unsafe {
290 last_chunk.destroy(diff);
291 }
292 // Reset the chunk.
293 self.ptr.set(last_chunk.start());
294 }
295}
296
297unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
298 fn drop(&mut self) {
299 unsafe {
300 // Determine how much was filled.
301 let mut chunks_borrow = self.chunks.borrow_mut();
302 if let Some(mut last_chunk) = chunks_borrow.pop() {
303 // Drop the contents of the last chunk.
304 self.clear_last_chunk(&mut last_chunk);
305 // The last chunk will be dropped. Destroy all other chunks.
306 for chunk in chunks_borrow.iter_mut() {
307 chunk.destroy(chunk.entries);
308 }
309 }
310 // Box handles deallocation of `last_chunk` and `self.chunks`.
311 }
312 }
313}
314
315unsafe impl<T: Send> Send for TypedArena<T> {}
316
317#[inline(always)]
318fn align_down(val: usize, align: usize) -> usize {
319 debug_assert!(align.is_power_of_two());
320 val & !(align - 1)
321}
322
323#[inline(always)]
324fn align_up(val: usize, align: usize) -> usize {
325 debug_assert!(align.is_power_of_two());
326 (val + align - 1) & !(align - 1)
327}
328
329// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
330// to optimize away alignment code.
331const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
332
333/// An arena that can hold objects of multiple different types that impl `Copy`
334/// and/or satisfy `!mem::needs_drop`.
335pub struct DroplessArena {
336 /// A pointer to the start of the free space.
337 start: Cell<*mut u8>,
338
339 /// A pointer to the end of free space.
340 ///
341 /// The allocation proceeds downwards from the end of the chunk towards the
342 /// start. (This is slightly simpler and faster than allocating upwards,
343 /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
344 /// When this pointer crosses the start pointer, a new chunk is allocated.
345 ///
346 /// This is kept aligned to DROPLESS_ALIGNMENT.
347 end: Cell<*mut u8>,
348
349 /// A vector of arena chunks.
350 chunks: RefCell<Vec<ArenaChunk>>,
351}
352
353unsafe impl Send for DroplessArena {}
354
355impl Default for DroplessArena {
356 #[inline]
357 fn default() -> DroplessArena {
358 DroplessArena {
359 // We set both `start` and `end` to 0 so that the first call to
360 // alloc() will trigger a grow().
361 start: Cell::new(ptr::null_mut()),
362 end: Cell::new(ptr::null_mut()),
363 chunks: Default::default(),
364 }
365 }
366}
367
368impl DroplessArena {
369 #[inline(never)]
370 #[cold]
371 fn grow(&self, layout: Layout) {
372 // Add some padding so we can align `self.end` while
373 // still fitting in a `layout` allocation.
374 let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
375
376 unsafe {
377 let mut chunks = self.chunks.borrow_mut();
378 let mut new_cap;
379 if let Some(last_chunk) = chunks.last_mut() {
380 // There is no need to update `last_chunk.entries` because that
381 // field isn't used by `DroplessArena`.
382
383 // If the previous chunk's len is less than HUGE_PAGE
384 // bytes, then this chunk will be least double the previous
385 // chunk's size.
386 new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
387 new_cap *= 2;
388 } else {
389 new_cap = PAGE;
390 }
391 // Also ensure that this chunk can fit `additional`.
392 new_cap = cmp::max(additional, new_cap);
393
394 let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
395 self.start.set(chunk.start());
396
397 // Align the end to DROPLESS_ALIGNMENT.
398 let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
399
400 // Make sure we don't go past `start`. This should not happen since the allocation
401 // should be at least DROPLESS_ALIGNMENT - 1 bytes.
402 debug_assert!(chunk.start().addr() <= end);
403
404 self.end.set(chunk.end().with_addr(end));
405
406 chunks.push(chunk);
407 }
408 }
409
410 #[inline]
411 pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
412 assert!(layout.size() != 0);
413
414 // This loop executes once or twice: if allocation fails the first
415 // time, the `grow` ensures it will succeed the second time.
416 loop {
417 let start = self.start.get().addr();
418 let old_end = self.end.get();
419 let end = old_end.addr();
420
421 // Align allocated bytes so that `self.end` stays aligned to
422 // DROPLESS_ALIGNMENT.
423 let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
424
425 // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
426 unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
427
428 if let Some(sub) = end.checked_sub(bytes) {
429 let new_end = align_down(sub, layout.align());
430 if start <= new_end {
431 let new_end = old_end.with_addr(new_end);
432 // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
433 // preserves alignment as both `end` and `bytes` are already
434 // aligned to DROPLESS_ALIGNMENT.
435 self.end.set(new_end);
436 return new_end;
437 }
438 }
439
440 // No free space left. Allocate a new chunk to satisfy the request.
441 // On failure the grow will panic or abort.
442 self.grow(layout);
443 }
444 }
445
446 #[inline]
447 pub fn alloc<T>(&self, object: T) -> &mut T {
448 assert!(!mem::needs_drop::<T>());
449 assert!(size_of::<T>() != 0);
450
451 let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
452
453 unsafe {
454 // Write into uninitialized memory.
455 ptr::write(mem, object);
456 &mut *mem
457 }
458 }
459
460 /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
461 /// reference to it. Will panic if passed a zero-sized type.
462 ///
463 /// Panics:
464 ///
465 /// - Zero-sized types
466 /// - Zero-length slices
467 #[inline]
468 pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
469 where
470 T: Copy,
471 {
472 assert!(!mem::needs_drop::<T>());
473 assert!(size_of::<T>() != 0);
474 assert!(!slice.is_empty());
475
476 let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
477
478 unsafe {
479 mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
480 slice::from_raw_parts_mut(mem, slice.len())
481 }
482 }
483
484 /// Used by `Lift` to check whether this slice is allocated
485 /// in this arena.
486 #[inline]
487 pub fn contains_slice<T>(&self, slice: &[T]) -> bool {
488 for chunk in self.chunks.borrow_mut().iter_mut() {
489 let ptr = slice.as_ptr().cast::<u8>().cast_mut();
490 if chunk.start() <= ptr && chunk.end() >= ptr {
491 return true;
492 }
493 }
494 false
495 }
496
497 /// Allocates a string slice that is copied into the `DroplessArena`, returning a
498 /// reference to it. Will panic if passed an empty string.
499 ///
500 /// Panics:
501 ///
502 /// - Zero-length string
503 #[inline]
504 pub fn alloc_str(&self, string: &str) -> &str {
505 let slice = self.alloc_slice(string.as_bytes());
506
507 // SAFETY: the result has a copy of the same valid UTF-8 bytes.
508 unsafe { std::str::from_utf8_unchecked(slice) }
509 }
510
511 /// # Safety
512 ///
513 /// The caller must ensure that `mem` is valid for writes up to `size_of::<T>() * len`, and that
514 /// that memory stays allocated and not shared for the lifetime of `self`. This must hold even
515 /// if `iter.next()` allocates onto `self`.
516 #[inline]
517 unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
518 &self,
519 mut iter: I,
520 len: usize,
521 mem: *mut T,
522 ) -> &mut [T] {
523 let mut i = 0;
524 // Use a manual loop since LLVM manages to optimize it better for
525 // slice iterators
526 loop {
527 // SAFETY: The caller must ensure that `mem` is valid for writes up to
528 // `size_of::<T>() * len`.
529 unsafe {
530 match iter.next() {
531 Some(value) if i < len => mem.add(i).write(value),
532 Some(_) | None => {
533 // We only return as many items as the iterator gave us, even
534 // though it was supposed to give us `len`
535 return slice::from_raw_parts_mut(mem, i);
536 }
537 }
538 }
539 i += 1;
540 }
541 }
542
543 #[inline]
544 pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
545 // Warning: this function is reentrant: `iter` could hold a reference to `&self` and
546 // allocate additional elements while we're iterating.
547 let iter = iter.into_iter();
548 assert!(size_of::<T>() != 0);
549 assert!(!mem::needs_drop::<T>());
550
551 let size_hint = iter.size_hint();
552
553 match size_hint {
554 (min, Some(max)) if min == max => {
555 // We know the exact number of elements the iterator expects to produce here.
556 let len = min;
557
558 if len == 0 {
559 return &mut [];
560 }
561
562 let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
563 // SAFETY: `write_from_iter` doesn't touch `self`. It only touches the slice we just
564 // reserved. If the iterator panics or doesn't output `len` elements, this will
565 // leave some unallocated slots in the arena, which is fine because we do not call
566 // `drop`.
567 unsafe { self.write_from_iter(iter, len, mem) }
568 }
569 (_, _) => {
570 outline(move || -> &mut [T] {
571 // Takes care of reentrancy.
572 let mut vec: SmallVec<[_; 8]> = iter.collect();
573 if vec.is_empty() {
574 return &mut [];
575 }
576 // Move the content to the arena by copying it and then forgetting
577 // the content of the SmallVec
578 unsafe {
579 let len = vec.len();
580 let start_ptr =
581 self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
582 vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
583 vec.set_len(0);
584 slice::from_raw_parts_mut(start_ptr, len)
585 }
586 })
587 }
588 }
589 }
590}
591
592/// Declare an `Arena` containing one dropless arena and many typed arenas (the
593/// types of the typed arenas are specified by the arguments).
594///
595/// There are three cases of interest.
596/// - Types that are `Copy`: these need not be specified in the arguments. They
597/// will use the `DroplessArena`.
598/// - Types that are `!Copy` and `!Drop`: these must be specified in the
599/// arguments. An empty `TypedArena` will be created for each one, but the
600/// `DroplessArena` will always be used and the `TypedArena` will stay empty.
601/// This is odd but harmless, because an empty arena allocates no memory.
602/// - Types that are `!Copy` and `Drop`: these must be specified in the
603/// arguments. The `TypedArena` will be used for them.
604///
605#[rustc_macro_transparency = "semitransparent"]
606pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
607 #[derive(Default)]
608 pub struct Arena<'tcx> {
609 pub dropless: $crate::DroplessArena,
610 $($name: $crate::TypedArena<$ty>,)*
611 }
612
613 pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
614 #[allow(clippy::mut_from_ref)]
615 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self;
616 #[allow(clippy::mut_from_ref)]
617 fn allocate_from_iter(
618 arena: &'tcx Arena<'tcx>,
619 iter: impl ::std::iter::IntoIterator<Item = Self>,
620 ) -> &'tcx mut [Self];
621 }
622
623 // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
624 impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
625 #[inline]
626 #[allow(clippy::mut_from_ref)]
627 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
628 arena.dropless.alloc(self)
629 }
630 #[inline]
631 #[allow(clippy::mut_from_ref)]
632 fn allocate_from_iter(
633 arena: &'tcx Arena<'tcx>,
634 iter: impl ::std::iter::IntoIterator<Item = Self>,
635 ) -> &'tcx mut [Self] {
636 arena.dropless.alloc_from_iter(iter)
637 }
638 }
639 $(
640 impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
641 #[inline]
642 fn allocate_on(self, arena: &'tcx Arena<'tcx>) -> &'tcx mut Self {
643 if !::std::mem::needs_drop::<Self>() {
644 arena.dropless.alloc(self)
645 } else {
646 arena.$name.alloc(self)
647 }
648 }
649
650 #[inline]
651 #[allow(clippy::mut_from_ref)]
652 fn allocate_from_iter(
653 arena: &'tcx Arena<'tcx>,
654 iter: impl ::std::iter::IntoIterator<Item = Self>,
655 ) -> &'tcx mut [Self] {
656 if !::std::mem::needs_drop::<Self>() {
657 arena.dropless.alloc_from_iter(iter)
658 } else {
659 arena.$name.alloc_from_iter(iter)
660 }
661 }
662 }
663 )*
664
665 impl<'tcx> Arena<'tcx> {
666 #[inline]
667 #[allow(clippy::mut_from_ref)]
668 pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&'tcx self, value: T) -> &mut T {
669 value.allocate_on(self)
670 }
671
672 // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
673 #[inline]
674 #[allow(clippy::mut_from_ref)]
675 pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
676 if value.is_empty() {
677 return &mut [];
678 }
679 self.dropless.alloc_slice(value)
680 }
681
682 #[inline]
683 pub fn alloc_str(&self, string: &str) -> &str {
684 if string.is_empty() {
685 return "";
686 }
687 self.dropless.alloc_str(string)
688 }
689
690 #[allow(clippy::mut_from_ref)]
691 pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
692 &'tcx self,
693 iter: impl ::std::iter::IntoIterator<Item = T>,
694 ) -> &mut [T] {
695 T::allocate_from_iter(self, iter)
696 }
697 }
698}
699
700// Marker types that let us give different behaviour for arenas allocating
701// `Copy` types vs `!Copy` types.
702pub struct IsCopy;
703pub struct IsNotCopy;
704
705#[cfg(test)]
706mod tests;