alloc/collections/vec_deque/
drain.rs

1use core::iter::FusedIterator;
2use core::marker::PhantomData;
3use core::mem::{self, SizedTypeProperties};
4use core::ptr::NonNull;
5use core::{fmt, ptr};
6
7use super::VecDeque;
8use crate::alloc::{Allocator, Global};
9
10/// A draining iterator over the elements of a `VecDeque`.
11///
12/// This `struct` is created by the [`drain`] method on [`VecDeque`]. See its
13/// documentation for more.
14///
15/// [`drain`]: VecDeque::drain
16#[stable(feature = "drain", since = "1.6.0")]
17pub struct Drain<
18    'a,
19    T: 'a,
20    #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
21> {
22    // We can't just use a &mut VecDeque<T, A>, as that would make Drain invariant over T
23    // and we want it to be covariant instead
24    pub(super) deque: NonNull<VecDeque<T, A>>,
25    // drain_start is stored in deque.len
26    pub(super) drain_len: usize,
27    // index into the logical array, not the physical one (always lies in [0..deque.len))
28    pub(super) idx: usize,
29    // number of elements after the drained range
30    pub(super) tail_len: usize,
31    pub(super) remaining: usize,
32    // Needed to make Drain covariant over T
33    _marker: PhantomData<&'a T>,
34}
35
36impl<'a, T, A: Allocator> Drain<'a, T, A> {
37    pub(super) unsafe fn new(
38        deque: &'a mut VecDeque<T, A>,
39        drain_start: usize,
40        drain_len: usize,
41    ) -> Self {
42        let orig_len = mem::replace(&mut deque.len, drain_start);
43        let tail_len = orig_len - drain_start - drain_len;
44        Drain {
45            deque: NonNull::from(deque),
46            drain_len,
47            idx: drain_start,
48            tail_len,
49            remaining: drain_len,
50            _marker: PhantomData,
51        }
52    }
53
54    // Only returns pointers to the slices, as that's all we need
55    // to drop them. May only be called if `self.remaining != 0`.
56    pub(super) unsafe fn as_slices(&self) -> (*mut [T], *mut [T]) {
57        unsafe {
58            let deque = self.deque.as_ref();
59
60            // We know that `self.idx + self.remaining <= deque.len <= usize::MAX`, so this won't overflow.
61            let logical_remaining_range = self.idx..self.idx + self.remaining;
62
63            // SAFETY: `logical_remaining_range` represents the
64            // range into the logical buffer of elements that
65            // haven't been drained yet, so they're all initialized,
66            // and `slice::range(start..end, end) == start..end`,
67            // so the preconditions for `slice_ranges` are met.
68            let (a_range, b_range) =
69                deque.slice_ranges(logical_remaining_range.clone(), logical_remaining_range.end);
70            (deque.buffer_range(a_range), deque.buffer_range(b_range))
71        }
72    }
73}
74
75#[stable(feature = "collection_debug", since = "1.17.0")]
76impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
77    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
78        f.debug_tuple("Drain")
79            .field(&self.drain_len)
80            .field(&self.idx)
81            .field(&self.tail_len)
82            .field(&self.remaining)
83            .finish()
84    }
85}
86
87#[stable(feature = "drain", since = "1.6.0")]
88unsafe impl<T: Sync, A: Allocator + Sync> Sync for Drain<'_, T, A> {}
89#[stable(feature = "drain", since = "1.6.0")]
90unsafe impl<T: Send, A: Allocator + Send> Send for Drain<'_, T, A> {}
91
92#[stable(feature = "drain", since = "1.6.0")]
93impl<T, A: Allocator> Drop for Drain<'_, T, A> {
94    fn drop(&mut self) {
95        struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
96
97        let guard = DropGuard(self);
98
99        if mem::needs_drop::<T>() && guard.0.remaining != 0 {
100            unsafe {
101                // SAFETY: We just checked that `self.remaining != 0`.
102                let (front, back) = guard.0.as_slices();
103                // since idx is a logical index, we don't need to worry about wrapping.
104                guard.0.idx += front.len();
105                guard.0.remaining -= front.len();
106                ptr::drop_in_place(front);
107                guard.0.remaining = 0;
108                ptr::drop_in_place(back);
109            }
110        }
111
112        // Dropping `guard` handles moving the remaining elements into place.
113        impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
114            #[inline]
115            fn drop(&mut self) {
116                if mem::needs_drop::<T>() && self.0.remaining != 0 {
117                    unsafe {
118                        // SAFETY: We just checked that `self.remaining != 0`.
119                        let (front, back) = self.0.as_slices();
120                        ptr::drop_in_place(front);
121                        ptr::drop_in_place(back);
122                    }
123                }
124
125                let source_deque = unsafe { self.0.deque.as_mut() };
126
127                let drain_len = self.0.drain_len;
128                let head_len = source_deque.len; // #elements in front of the drain
129                let tail_len = self.0.tail_len; // #elements behind the drain
130                let new_len = head_len + tail_len;
131
132                if T::IS_ZST {
133                    // no need to copy around any memory if T is a ZST
134                    source_deque.len = new_len;
135                    return;
136                }
137
138                // Next, we will fill the hole left by the drain with as few writes as possible.
139                // The code below handles the following control flow and reduces the amount of
140                // branches under the assumption that `head_len == 0 || tail_len == 0`, i.e.
141                // draining at the front or at the back of the dequeue is especially common.
142                //
143                // H = "head index" = `deque.head`
144                // h = elements in front of the drain
145                // d = elements in the drain
146                // t = elements behind the drain
147                //
148                // Note that the buffer may wrap at any point and the wrapping is handled by
149                // `wrap_copy` and `to_physical_idx`.
150                //
151                // Case 1: if `head_len == 0 && tail_len == 0`
152                // Everything was drained, reset the head index back to 0.
153                //             H
154                // [ . . . . . d d d d . . . . . ]
155                //   H
156                // [ . . . . . . . . . . . . . . ]
157                //
158                // Case 2: else if `tail_len == 0`
159                // Don't move data or the head index.
160                //         H
161                // [ . . . h h h h d d d d . . . ]
162                //         H
163                // [ . . . h h h h . . . . . . . ]
164                //
165                // Case 3: else if `head_len == 0`
166                // Don't move data, but move the head index.
167                //         H
168                // [ . . . d d d d t t t t . . . ]
169                //                 H
170                // [ . . . . . . . t t t t . . . ]
171                //
172                // Case 4: else if `tail_len <= head_len`
173                // Move data, but not the head index.
174                //       H
175                // [ . . h h h h d d d d t t . . ]
176                //       H
177                // [ . . h h h h t t . . . . . . ]
178                //
179                // Case 5: else
180                // Move data and the head index.
181                //       H
182                // [ . . h h d d d d t t t t . . ]
183                //               H
184                // [ . . . . . . h h t t t t . . ]
185
186                // When draining at the front (`.drain(..n)`) or at the back (`.drain(n..)`),
187                // we don't need to copy any data. The number of elements copied would be 0.
188                if head_len != 0 && tail_len != 0 {
189                    join_head_and_tail_wrapping(source_deque, drain_len, head_len, tail_len);
190                    // Marking this function as cold helps LLVM to eliminate it entirely if
191                    // this branch is never taken.
192                    // We use `#[cold]` instead of `#[inline(never)]`, because inlining this
193                    // function into the general case (`.drain(n..m)`) is fine.
194                    // See `tests/codegen-llvm/vecdeque-drain.rs` for a test.
195                    #[cold]
196                    fn join_head_and_tail_wrapping<T, A: Allocator>(
197                        source_deque: &mut VecDeque<T, A>,
198                        drain_len: usize,
199                        head_len: usize,
200                        tail_len: usize,
201                    ) {
202                        // Pick whether to move the head or the tail here.
203                        let (src, dst, len);
204                        if head_len < tail_len {
205                            src = source_deque.head;
206                            dst = source_deque.to_physical_idx(drain_len);
207                            len = head_len;
208                        } else {
209                            src = source_deque.to_physical_idx(head_len + drain_len);
210                            dst = source_deque.to_physical_idx(head_len);
211                            len = tail_len;
212                        };
213
214                        unsafe {
215                            source_deque.wrap_copy(src, dst, len);
216                        }
217                    }
218                }
219
220                if new_len == 0 {
221                    // Special case: If the entire deque was drained, reset the head back to 0,
222                    // like `.clear()` does.
223                    source_deque.head = 0;
224                } else if head_len < tail_len {
225                    // If we moved the head above, then we need to adjust the head index here.
226                    source_deque.head = source_deque.to_physical_idx(drain_len);
227                }
228                source_deque.len = new_len;
229            }
230        }
231    }
232}
233
234#[stable(feature = "drain", since = "1.6.0")]
235impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
236    type Item = T;
237
238    #[inline]
239    fn next(&mut self) -> Option<T> {
240        if self.remaining == 0 {
241            return None;
242        }
243        let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx) };
244        self.idx += 1;
245        self.remaining -= 1;
246        Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
247    }
248
249    #[inline]
250    fn size_hint(&self) -> (usize, Option<usize>) {
251        let len = self.remaining;
252        (len, Some(len))
253    }
254}
255
256#[stable(feature = "drain", since = "1.6.0")]
257impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
258    #[inline]
259    fn next_back(&mut self) -> Option<T> {
260        if self.remaining == 0 {
261            return None;
262        }
263        self.remaining -= 1;
264        let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx + self.remaining) };
265        Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
266    }
267}
268
269#[stable(feature = "drain", since = "1.6.0")]
270impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {}
271
272#[stable(feature = "fused", since = "1.26.0")]
273impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}