core/slice/sort/stable/mod.rs
1//! This module contains the entry points for `slice::sort`.
2
3#[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
4use crate::cmp;
5use crate::intrinsics;
6use crate::mem::{MaybeUninit, SizedTypeProperties};
7#[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
8use crate::slice::sort::shared::smallsort::{
9 SMALL_SORT_GENERAL_SCRATCH_LEN, StableSmallSortTypeImpl, insertion_sort_shift_left,
10};
11
12pub(crate) mod merge;
13
14#[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
15pub(crate) mod drift;
16#[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
17pub(crate) mod quicksort;
18
19#[cfg(any(feature = "optimize_for_size", target_pointer_width = "16"))]
20pub(crate) mod tiny;
21
22/// Stable sort called driftsort by Orson Peters and Lukas Bergdoll.
23/// Design document:
24/// <https://github.com/Voultapher/sort-research-rs/blob/main/writeup/driftsort_introduction/text.md>
25///
26/// Upholds all safety properties outlined here:
27/// <https://github.com/Voultapher/sort-research-rs/blob/main/writeup/sort_safety/text.md>
28#[inline(always)]
29pub fn sort<T, F: FnMut(&T, &T) -> bool, BufT: BufGuard<T>>(v: &mut [T], is_less: &mut F) {
30 // Arrays of zero-sized types are always all-equal, and thus sorted.
31 if T::IS_ZST {
32 return;
33 }
34
35 // Instrumenting the standard library showed that 90+% of the calls to sort
36 // by rustc are either of size 0 or 1.
37 let len = v.len();
38 if intrinsics::likely(len < 2) {
39 return;
40 }
41
42 cfg_if! {
43 if #[cfg(any(feature = "optimize_for_size", target_pointer_width = "16"))] {
44 // Unlike driftsort, mergesort only requires len / 2,
45 // not len - len / 2.
46 let alloc_len = len / 2;
47
48 cfg_if! {
49 if #[cfg(target_pointer_width = "16")] {
50 let mut heap_buf = BufT::with_capacity(alloc_len);
51 let scratch = heap_buf.as_uninit_slice_mut();
52 } else {
53 // For small inputs 4KiB of stack storage suffices, which allows us to avoid
54 // calling the (de-)allocator. Benchmarks showed this was quite beneficial.
55 let mut stack_buf = AlignedStorage::<T, 4096>::new();
56 let stack_scratch = stack_buf.as_uninit_slice_mut();
57 let mut heap_buf;
58 let scratch = if stack_scratch.len() >= alloc_len {
59 stack_scratch
60 } else {
61 heap_buf = BufT::with_capacity(alloc_len);
62 heap_buf.as_uninit_slice_mut()
63 };
64 }
65 }
66
67 tiny::mergesort(v, scratch, is_less);
68 } else {
69 // More advanced sorting methods than insertion sort are faster if called in
70 // a hot loop for small inputs, but for general-purpose code the small
71 // binary size of insertion sort is more important. The instruction cache in
72 // modern processors is very valuable, and for a single sort call in general
73 // purpose code any gains from an advanced method are cancelled by i-cache
74 // misses during the sort, and thrashing the i-cache for surrounding code.
75 const MAX_LEN_ALWAYS_INSERTION_SORT: usize = 20;
76 if intrinsics::likely(len <= MAX_LEN_ALWAYS_INSERTION_SORT) {
77 insertion_sort_shift_left(v, 1, is_less);
78 return;
79 }
80
81 driftsort_main::<T, F, BufT>(v, is_less);
82 }
83 }
84}
85
86/// See [`sort`]
87///
88/// Deliberately don't inline the main sorting routine entrypoint to ensure the
89/// inlined insertion sort i-cache footprint remains minimal.
90#[cfg(not(any(feature = "optimize_for_size", target_pointer_width = "16")))]
91#[inline(never)]
92fn driftsort_main<T, F: FnMut(&T, &T) -> bool, BufT: BufGuard<T>>(v: &mut [T], is_less: &mut F) {
93 // By allocating n elements of memory we can ensure the entire input can
94 // be sorted using stable quicksort, which allows better performance on
95 // random and low-cardinality distributions. However, we still want to
96 // reduce our memory usage to n - n / 2 for large inputs. We do this by scaling
97 // our allocation as max(n - n / 2, min(n, 8MB)), ensuring we scale like n for
98 // small inputs and n - n / 2 for large inputs, without a sudden drop off. We
99 // also need to ensure our alloc >= SMALL_SORT_GENERAL_SCRATCH_LEN, as the
100 // small-sort always needs this much memory.
101 //
102 // driftsort will produce unsorted runs of up to min_good_run_len, which
103 // is at most len - len / 2.
104 // Unsorted runs need to be processed by quicksort, which requires as much
105 // scratch space as the run length, therefore the scratch space must be at
106 // least len - len / 2.
107 // If min_good_run_len is ever modified, this code must be updated to allocate
108 // the correct scratch size for it.
109 const MAX_FULL_ALLOC_BYTES: usize = 8_000_000; // 8MB
110 let max_full_alloc = MAX_FULL_ALLOC_BYTES / size_of::<T>();
111 let len = v.len();
112 let alloc_len = cmp::max(
113 cmp::max(len - len / 2, cmp::min(len, max_full_alloc)),
114 SMALL_SORT_GENERAL_SCRATCH_LEN,
115 );
116
117 // For small inputs 4KiB of stack storage suffices, which allows us to avoid
118 // calling the (de-)allocator. Benchmarks showed this was quite beneficial.
119 let mut stack_buf = AlignedStorage::<T, 4096>::new();
120 let stack_scratch = stack_buf.as_uninit_slice_mut();
121 let mut heap_buf;
122 let scratch = if stack_scratch.len() >= alloc_len {
123 stack_scratch
124 } else {
125 heap_buf = BufT::with_capacity(alloc_len);
126 heap_buf.as_uninit_slice_mut()
127 };
128
129 // For small inputs using quicksort is not yet beneficial, and a single
130 // small-sort or two small-sorts plus a single merge outperforms it, so use
131 // eager mode.
132 let eager_sort = len <= T::small_sort_threshold() * 2;
133 crate::slice::sort::stable::drift::sort(v, scratch, eager_sort, is_less);
134}
135
136#[doc(hidden)]
137/// Abstracts owned memory buffer, so that sort code can live in core where no allocation is
138/// possible. This trait can then be implemented in a place that has access to allocation.
139pub trait BufGuard<T> {
140 /// Creates new buffer that holds at least `capacity` memory.
141 fn with_capacity(capacity: usize) -> Self;
142 /// Returns mutable access to uninitialized memory owned by the buffer.
143 fn as_uninit_slice_mut(&mut self) -> &mut [MaybeUninit<T>];
144}
145
146#[repr(C)]
147struct AlignedStorage<T, const N: usize> {
148 _align: [T; 0],
149 storage: [MaybeUninit<u8>; N],
150}
151
152impl<T, const N: usize> AlignedStorage<T, N> {
153 fn new() -> Self {
154 Self { _align: [], storage: [const { MaybeUninit::uninit() }; N] }
155 }
156
157 fn as_uninit_slice_mut(&mut self) -> &mut [MaybeUninit<T>] {
158 let len = N / size_of::<T>();
159
160 // SAFETY: `_align` ensures we are correctly aligned.
161 unsafe { core::slice::from_raw_parts_mut(self.storage.as_mut_ptr().cast(), len) }
162 }
163}