pdqsort/
lib.rs

1//! Pattern-defeating quicksort
2//!
3//! This sort is in most cases significantly faster than the standard sort in Rust. In particular,
4//! it sorts random arrays of integers approximately 45% faster. The key drawback is that it is an
5//! unstable sort (i.e. may reorder equal elements). However, in most cases stability doesn't
6//! matter anyway.
7//!
8//! The algorithm is based on pattern-defeating quicksort by Orson Peters, published at:
9//! https://github.com/orlp/pdqsort
10//!
11//! # Properties
12//!
13//! - Best-case running time is `O(n)`.
14//! - Worst-case running time is `O(n log n)`.
15//! - Unstable, i.e. may reorder equal elements.
16//! - Does not allocate additional memory.
17//! - Uses `#![no_std]`.
18//!
19//! # Examples
20//!
21//! ```
22//! extern crate pdqsort;
23//!
24//! let mut v = [-5i32, 4, 1, -3, 2];
25//!
26//! pdqsort::sort(&mut v);
27//! assert!(v == [-5, -3, 1, 2, 4]);
28//!
29//! pdqsort::sort_by(&mut v, |a, b| b.cmp(a));
30//! assert!(v == [4, 2, 1, -3, -5]);
31//!
32//! pdqsort::sort_by_key(&mut v, |k| k.abs());
33//! assert!(v == [1, 2, -3, 4, -5]);
34//! ```
35
36#![no_std]
37
38use core::cmp::{self, Ordering};
39use core::mem;
40use core::ptr;
41
42/// When dropped, takes the value out of `Option` and writes it into `dest`.
43///
44/// This allows us to safely read the pivot into a stack-allocated variable for efficiency, and
45/// write it back into the slice after partitioning. This way we ensure that the write happens
46/// even if `is_less` panics in the meantime.
47struct WriteOnDrop<T> {
48    value: Option<T>,
49    dest: *mut T,
50}
51
52impl<T> Drop for WriteOnDrop<T> {
53    fn drop(&mut self) {
54        unsafe {
55            ptr::write(self.dest, self.value.take().unwrap());
56        }
57    }
58}
59
60/// Holds a value, but never drops it.
61struct NoDrop<T> {
62    value: Option<T>,
63}
64
65impl<T> Drop for NoDrop<T> {
66    fn drop(&mut self) {
67        mem::forget(self.value.take());
68    }
69}
70
71/// When dropped, copies from `src` into `dest`.
72struct CopyOnDrop<T> {
73    src: *mut T,
74    dest: *mut T,
75}
76
77impl<T> Drop for CopyOnDrop<T> {
78    fn drop(&mut self) {
79        unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); }
80    }
81}
82
83/// Shifts the first element to the right until it encounters a greater or equal element.
84fn shift_head<T, F>(v: &mut [T], is_less: &mut F)
85    where F: FnMut(&T, &T) -> bool
86{
87    let len = v.len();
88    unsafe {
89        // If the first two elements are out-of-order...
90        if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) {
91            // Read the first element into a stack-allocated variable. If a following comparison
92            // operation panics, `hole` will get dropped and automatically write the element back
93            // into the slice.
94            let mut tmp = NoDrop { value: Some(ptr::read(v.get_unchecked(0))) };
95            let mut hole = CopyOnDrop {
96                src: tmp.value.as_mut().unwrap(),
97                dest: v.get_unchecked_mut(1),
98            };
99            ptr::copy_nonoverlapping(v.get_unchecked(1), v.get_unchecked_mut(0), 1);
100
101            for i in 2..len {
102                if !is_less(v.get_unchecked(i), tmp.value.as_ref().unwrap()) {
103                    break;
104                }
105
106                // Move `i`-th element one place to the left, thus shifting the hole to the right.
107                ptr::copy_nonoverlapping(v.get_unchecked(i), v.get_unchecked_mut(i - 1), 1);
108                hole.dest = v.get_unchecked_mut(i);
109            }
110            // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
111        }
112    }
113}
114
115/// Shifts the last element to the left until it encounters a smaller or equal element.
116fn shift_tail<T, F>(v: &mut [T], is_less: &mut F)
117    where F: FnMut(&T, &T) -> bool
118{
119    let len = v.len();
120    unsafe {
121        // If the last two elements are out-of-order...
122        if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) {
123            // Read the last element into a stack-allocated variable. If a following comparison
124            // operation panics, `hole` will get dropped and automatically write the element back
125            // into the slice.
126            let mut tmp = NoDrop { value: Some(ptr::read(v.get_unchecked(len - 1))) };
127            let mut hole = CopyOnDrop {
128                src: tmp.value.as_mut().unwrap(),
129                dest: v.get_unchecked_mut(len - 2),
130            };
131            ptr::copy_nonoverlapping(v.get_unchecked(len - 2), v.get_unchecked_mut(len - 1), 1);
132
133            for i in (0..len-2).rev() {
134                if !is_less(&tmp.value.as_ref().unwrap(), v.get_unchecked(i)) {
135                    break;
136                }
137
138                // Move `i`-th element one place to the right, thus shifting the hole to the left.
139                ptr::copy_nonoverlapping(v.get_unchecked(i), v.get_unchecked_mut(i + 1), 1);
140                hole.dest = v.get_unchecked_mut(i);
141            }
142            // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
143        }
144    }
145}
146
147/// Partially sorts a slice by shifting several out-of-order elements around.
148///
149/// Returns `true` if the slice is sorted at the end. This function is `O(n)` worst-case.
150#[cold]
151fn partial_insertion_sort<T, F>(v: &mut [T], is_less: &mut F) -> bool
152    where F: FnMut(&T, &T) -> bool
153{
154    // Maximum number of adjacent out-of-order pairs that will get shifted.
155    const MAX_STEPS: usize = 5;
156    // If the slice is shorter than this, don't shift any elements.
157    const SHORTEST_SHIFTING: usize = 50;
158
159    let len = v.len();
160    let mut i = 1;
161
162    for _ in 0..MAX_STEPS {
163        unsafe {
164            // Find the next pair of adjacent out-of-order elements.
165            while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) {
166                i += 1;
167            }
168        }
169
170        // Are we done?
171        if i == len {
172            return true;
173        }
174
175        // Don't shift elements on short arrays, that has a performance cost.
176        if len < SHORTEST_SHIFTING {
177            return false;
178        }
179
180        // Swap the found pair of elements. This puts them in correct order.
181        v.swap(i - 1, i);
182
183        // Shift the smaller element to the left.
184        shift_tail(&mut v[..i], is_less);
185        // Shift the greater element to the right.
186        shift_head(&mut v[i..], is_less);
187    }
188
189    // Didn't manage to sort the slice in the limited number of steps.
190    false
191}
192
193/// Sorts a slice using insertion sort, which is `O(n^2)` worst-case.
194fn insertion_sort<T, F>(v: &mut [T], is_less: &mut F)
195    where F: FnMut(&T, &T) -> bool
196{
197    for i in 1..v.len() {
198        shift_tail(&mut v[..i+1], is_less);
199    }
200}
201
202/// Sorts `v` using heapsort, which guarantees `O(n log n)` worst-case.
203#[cold]
204fn heapsort<T, F>(v: &mut [T], is_less: &mut F)
205    where F: FnMut(&T, &T) -> bool
206{
207    // This binary heap respects the invariant `parent >= child`.
208    let mut sift_down = |v: &mut [T], mut node| {
209        loop {
210            // Children of `node`:
211            let left = 2 * node + 1;
212            let right = 2 * node + 2;
213
214            // Choose the greater child.
215            let greater = if right < v.len() && is_less(&v[left], &v[right]) {
216                right
217            } else {
218                left
219            };
220
221            // Stop if the invariant holds at `node`.
222            if greater >= v.len() || !is_less(&v[node], &v[greater]) {
223                break;
224            }
225
226            // Swap `node` with the greater child, move one step down, and continue sifting.
227            v.swap(node, greater);
228            node = greater;
229        }
230    };
231
232    // Build the heap in linear time.
233    for i in (0 .. v.len() / 2).rev() {
234        sift_down(v, i);
235    }
236
237    // Pop maximal elements from the heap.
238    for i in (1 .. v.len()).rev() {
239        v.swap(0, i);
240        sift_down(&mut v[..i], 0);
241    }
242}
243
244/// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal
245/// to `pivot`.
246///
247/// Returns the number of elements smaller than `pivot`.
248///
249/// Partitioning is performed block-by-block in order to minimize the cost of branching operations.
250/// This idea is presented in the [BlockQuicksort][pdf] paper.
251///
252/// [pdf]: http://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf
253fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
254    where F: FnMut(&T, &T) -> bool
255{
256    // Number of elements in a typical block.
257    const BLOCK: usize = 128;
258
259    // The partitioning algorithm repeats the following steps until completion:
260    //
261    // 1. Trace a block from the left side to identify elements greater than or equal to the pivot.
262    // 2. Trace a block from the right side to identify elements smaller than the pivot.
263    // 3. Exchange the identified elements between the left and right side.
264    //
265    // We keep the following variables for a block of elements:
266    //
267    // 1. `block` - Number of elements in the block.
268    // 2. `start` - Start pointer into the `offsets` array.
269    // 3. `end` - End pointer into the `offsets` array.
270    // 4. `offsets - Indices of out-of-order elements within the block.
271
272    // The current block on the left side (from `l` to `l.offset(block_l)`).
273    let mut l = v.as_mut_ptr();
274    let mut block_l = BLOCK;
275    let mut start_l = ptr::null_mut();
276    let mut end_l = ptr::null_mut();
277    let mut offsets_l: [u8; BLOCK] = unsafe { mem::uninitialized() };
278
279    // The current block on the right side (from `r.offset(-block_r)` to `r`).
280    let mut r = unsafe { l.offset(v.len() as isize) };
281    let mut block_r = BLOCK;
282    let mut start_r = ptr::null_mut();
283    let mut end_r = ptr::null_mut();
284    let mut offsets_r: [u8; BLOCK] = unsafe { mem::uninitialized() };
285
286    // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive).
287    fn width<T>(l: *mut T, r: *mut T) -> usize {
288        assert!(mem::size_of::<T>() > 0);
289        (r as usize - l as usize) / mem::size_of::<T>()
290    }
291
292    loop {
293        // We are done with partitioning block-by-block when `l` and `r` get very close. Then we do
294        // some patch-up work in order to partition the remaining elements in between.
295        let is_done = width(l, r) <= 2 * BLOCK;
296
297        if is_done {
298            // Number of remaining elements (still not compared to the pivot).
299            let mut rem = width(l, r);
300            if start_l < end_l || start_r < end_r {
301                rem -= BLOCK;
302            }
303
304            // Adjust block sizes so that the left and right block don't overlap, but get perfectly
305            // aligned to cover the whole remaining gap.
306            if start_l < end_l {
307                block_r = rem;
308            } else if start_r < end_r {
309                block_l = rem;
310            } else {
311                block_l = rem / 2;
312                block_r = rem - block_l;
313            }
314            debug_assert!(block_l <= BLOCK && block_r <= BLOCK);
315            debug_assert!(width(l, r) == block_l + block_r);
316        }
317
318        if start_l == end_l {
319            // Trace `block_l` elements from the left side.
320            start_l = offsets_l.as_mut_ptr();
321            end_l = offsets_l.as_mut_ptr();
322            let mut elem = l;
323
324            for i in 0..block_l {
325                unsafe {
326                    // Branchless comparison.
327                    *end_l = i as u8;
328                    end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
329                    elem = elem.offset(1);
330                }
331            }
332        }
333
334        if start_r == end_r {
335            // Trace `block_r` elements from the right side.
336            start_r = offsets_r.as_mut_ptr();
337            end_r = offsets_r.as_mut_ptr();
338            let mut elem = r;
339
340            for i in 0..block_r {
341                unsafe {
342                    // Branchless comparison.
343                    elem = elem.offset(-1);
344                    *end_r = i as u8;
345                    end_r = end_r.offset(is_less(&*elem, pivot) as isize);
346                }
347            }
348        }
349
350        // Number of out-of-order elements to swap between the left and right side.
351        let count = cmp::min(width(start_l, end_l), width(start_r, end_r));
352
353        if count > 0 {
354            macro_rules! left { () => { l.offset(*start_l as isize) } }
355            macro_rules! right { () => { r.offset(-(*start_r as isize) - 1) } }
356
357            // Instead of swapping one pair at the time, it is more efficient to perform a cyclic
358            // permutation. This is not strictly equivalent to swapping, but produces a similar
359            // result using fewer memory operations.
360            unsafe {
361                let tmp = ptr::read(left!());
362                ptr::copy_nonoverlapping(right!(), left!(), 1);
363
364                for _ in 1..count {
365                    start_l = start_l.offset(1);
366                    ptr::copy_nonoverlapping(left!(), right!(), 1);
367                    start_r = start_r.offset(1);
368                    ptr::copy_nonoverlapping(right!(), left!(), 1);
369                }
370
371                ptr::copy_nonoverlapping(&tmp, right!(), 1);
372                mem::forget(tmp);
373                start_l = start_l.offset(1);
374                start_r = start_r.offset(1);
375            }
376        }
377
378        if start_l == end_l {
379            // All out-of-order elements in the left block were moved. Move to the next block.
380            l = unsafe { l.offset(block_l as isize) };
381        }
382
383        if start_r == end_r {
384            // All out-of-order elements in the right block were moved. Move to the previous block.
385            r = unsafe { r.offset(-(block_r as isize)) };
386        }
387
388        if is_done {
389            break;
390        }
391    }
392
393    // All that remains now is at most one block (either the left or the right) with out-of-order
394    // elements that need to be moved. Such remaining elements can be simply shifted to the end
395    // within their block.
396
397    if start_l < end_l {
398        // The left block remains.
399        // Move it's remaining out-of-order elements to the far right.
400        debug_assert_eq!(width(l, r), block_l);
401        while start_l < end_l {
402            unsafe {
403                end_l = end_l.offset(-1);
404                ptr::swap(l.offset(*end_l as isize), r.offset(-1));
405                r = r.offset(-1);
406            }
407        }
408        width(v.as_mut_ptr(), r)
409    } else if start_r < end_r {
410        // The right block remains.
411        // Move it's remaining out-of-order elements to the far left.
412        debug_assert_eq!(width(l, r), block_r);
413        while start_r < end_r {
414            unsafe {
415                end_r = end_r.offset(-1);
416                ptr::swap(l, r.offset(-(*end_r as isize) - 1));
417                l = l.offset(1);
418            }
419        }
420        width(v.as_mut_ptr(), l)
421    } else {
422        // Nothing else to do, we're done.
423        width(v.as_mut_ptr(), l)
424    }
425}
426
427/// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or
428/// equal to `v[pivot]`.
429///
430/// Returns a tuple of:
431///
432/// 1. Number of elements smaller than `v[pivot]`.
433/// 2. True if `v` was already partitioned.
434fn partition<T, F>(v: &mut [T], pivot: usize, is_less: &mut F) -> (usize, bool)
435    where F: FnMut(&T, &T) -> bool
436{
437    let (mid, was_partitioned) = {
438        // Place the pivot at the beginning of slice.
439        v.swap(0, pivot);
440        let (pivot, v) = v.split_at_mut(1);
441        let pivot = &mut pivot[0];
442
443        // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
444        // operation panics, the pivot will be automatically written back into the slice.
445        let write_on_drop = WriteOnDrop {
446            value: unsafe { Some(ptr::read(pivot)) },
447            dest: pivot,
448        };
449        let pivot = write_on_drop.value.as_ref().unwrap();
450
451        // Find the first pair of out-of-order elements.
452        let mut l = 0;
453        let mut r = v.len();
454        unsafe {
455            // Find the first element greater then or equal to the pivot.
456            while l < r && is_less(v.get_unchecked(l), pivot) {
457                l += 1;
458            }
459
460            // Find the last element smaller that the pivot.
461            while l < r && !is_less(v.get_unchecked(r - 1), pivot) {
462                r -= 1;
463            }
464        }
465
466        (l + partition_in_blocks(&mut v[l..r], pivot, is_less), l >= r)
467
468        // `write_on_drop` goes out of scope and writes the pivot (which is a stack-allocated
469        // variable) back into the slice where it originally was. This step is critical in ensuring
470        // safety!
471    };
472
473    // Place the pivot between the two partitions.
474    v.swap(0, mid);
475
476    (mid, was_partitioned)
477}
478
479/// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`.
480///
481/// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain
482/// elements smaller than the pivot.
483fn partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &mut F) -> usize
484    where F: FnMut(&T, &T) -> bool
485{
486    // Place the pivot at the beginning of slice.
487    v.swap(0, pivot);
488    let (pivot, v) = v.split_at_mut(1);
489    let pivot = &mut pivot[0];
490
491    // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
492    // operation panics, the pivot will be automatically written back into the slice.
493    let write_on_drop = WriteOnDrop {
494        value: unsafe { Some(ptr::read(pivot)) },
495        dest: pivot,
496    };
497    let pivot = write_on_drop.value.as_ref().unwrap();
498
499    // Now partition the slice.
500    let mut l = 0;
501    let mut r = v.len();
502    loop {
503        unsafe {
504            // Find the first element greater that the pivot.
505            while l < r && !is_less(pivot, v.get_unchecked(l)) {
506                l += 1;
507            }
508
509            // Find the last element equal to the pivot.
510            while l < r && is_less(pivot, v.get_unchecked(r - 1)) {
511                r -= 1;
512            }
513
514            // Are we done?
515            if l >= r {
516                break;
517            }
518
519            // Swap the found pair of out-of-order elements.
520            r -= 1;
521            ptr::swap(v.get_unchecked_mut(l), v.get_unchecked_mut(r));
522            l += 1;
523        }
524    }
525
526    // We found `l` elements equal to the pivot. Add 1 to account for the pivot itself.
527    l + 1
528
529    // `write_on_drop` goes out of scope and writes the pivot (which is a stack-allocated variable)
530    // back into the slice where it originally was. This step is critical in ensuring safety!
531}
532
533/// Scatters some elements around in an attempt to break patterns that might cause imbalanced
534/// partitions in quicksort.
535#[cold]
536fn break_patterns<T>(v: &mut [T]) {
537    let len = v.len();
538    if len >= 8 {
539        // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia.
540        let mut random = len as u32;
541        let mut gen_u32 = || {
542            random ^= random << 13;
543            random ^= random >> 17;
544            random ^= random << 5;
545            random
546        };
547        let mut gen_usize = || {
548            if mem::size_of::<usize>() <= 4 {
549                gen_u32() as usize
550            } else {
551                (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
552            }
553        };
554
555        // Take random numbers modulo this number.
556        // The number fits into `usize` because `len` is not greater than `isize::MAX`.
557        let modulus = len.next_power_of_two();
558
559        // Some pivot candidates will be in the nearby of this index. Let's randomize them.
560        let pos = len / 4 * 2;
561
562        for i in 0..3 {
563            // Generate a random number modulo `len`. However, in order to avoid costly operations
564            // we first take it modulo a power of two, and then decrease by `len` until it fits
565            // into the range `[0, len - 1]`.
566            let mut other = gen_usize() & (modulus - 1);
567            while other >= len {
568                other -= len;
569            }
570
571            v.swap(pos - 1 + i, other);
572        }
573    }
574}
575
576/// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted.
577///
578/// Elements in `v` might be reordered in the process.
579fn choose_pivot<T, F>(v: &mut [T], is_less: &mut F) -> (usize, bool)
580    where F: FnMut(&T, &T) -> bool
581{
582    // Minimum length to choose the median-of-medians method.
583    // Shorter slices use the simple median-of-three method.
584    const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50;
585    // Maximum number of swaps that can be performed in this function.
586    const MAX_SWAPS: usize = 4 * 3;
587
588    let len = v.len();
589
590    // Three indices near which we are going to choose a pivot.
591    let mut a = len / 4 * 1;
592    let mut b = len / 4 * 2;
593    let mut c = len / 4 * 3;
594
595    // Counts the total number of swaps we are about to perform while sorting indices.
596    let mut swaps = 0;
597
598    if len >= 8 {
599        // Swaps indices so that `v[a] <= v[b]`.
600        let mut sort2 = |a: &mut usize, b: &mut usize| unsafe {
601            if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) {
602                ptr::swap(a, b);
603                swaps += 1;
604            }
605        };
606
607        // Swaps indices so that `v[a] <= v[b] <= v[c]`.
608        let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| {
609            sort2(a, b);
610            sort2(b, c);
611            sort2(a, b);
612        };
613
614        if len >= SHORTEST_MEDIAN_OF_MEDIANS {
615            // Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`.
616            let mut sort_adjacent = |a: &mut usize| {
617                let tmp = *a;
618                sort3(&mut (tmp - 1), a, &mut (tmp + 1));
619            };
620
621            // Find medians in the neighborhoods of `a`, `b`, and `c`.
622            sort_adjacent(&mut a);
623            sort_adjacent(&mut b);
624            sort_adjacent(&mut c);
625        }
626
627        // Find the median among `a`, `b`, and `c`.
628        sort3(&mut a, &mut b, &mut c);
629    }
630
631    if swaps < MAX_SWAPS {
632        (b, swaps == 0)
633    } else {
634        // The maximum number of swaps was performed. Chances are the slice is descending or mostly
635        // descending, so reversing will probably help sort it faster.
636        v.reverse();
637        (len - 1 - b, true)
638    }
639}
640
641/// Sorts `v` recursively.
642///
643/// If the slice had a predecessor in the original array, it is specified as `pred`.
644///
645/// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
646/// this function will immediately switch to heapsort.
647fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: usize)
648    where F: FnMut(&T, &T) -> bool
649{
650    // Slices of up to this length get sorted using insertion sort.
651    const MAX_INSERTION: usize = 20;
652
653    // True if the last partitioning was reasonably balanced.
654    let mut was_balanced = true;
655    // True if the last partitioning didn't shuffle elements (the slice was already partitioned).
656    let mut was_partitioned = true;
657
658    loop {
659        let len = v.len();
660
661        // Very short slices get sorted using insertion sort.
662        if len <= MAX_INSERTION {
663            insertion_sort(v, is_less);
664            return;
665        }
666
667        // If too many bad pivot choices were made, simply fall back to heapsort in order to
668        // guarantee `O(n log n)` worst-case.
669        if limit == 0 {
670            heapsort(v, is_less);
671            return;
672        }
673
674        // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
675        // some elements around. Hopefully we'll choose a better pivot this time.
676        if !was_balanced {
677            break_patterns(v);
678            limit -= 1;
679        }
680
681        // Choose a pivot and try guessing whether the slice is already sorted.
682        let (pivot, likely_sorted) = choose_pivot(v, is_less);
683
684        // If the last partitioning was decently balanced and didn't shuffle elements, and if pivot
685        // selection predicts the slice is likely already sorted...
686        if was_balanced && was_partitioned && likely_sorted {
687            // Try identifying several out-of-order elements and shifting them to correct
688            // positions. If the slice ends up being completely sorted, we're done.
689            if partial_insertion_sort(v, is_less) {
690                return;
691            }
692        }
693
694        // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
695        // slice. Partition the slice into elements equal to and elements greater than the pivot.
696        // This case is usually hit when the slice contains many duplicate elements.
697        if let Some(p) = pred {
698            if !is_less(p, &v[pivot]) {
699                let mid = partition_equal(v, pivot, is_less);
700
701                // Continue sorting elements greater than the pivot.
702                v = &mut {v}[mid..];
703                continue;
704            }
705        }
706
707        // Partition the slice.
708        let (mid, was_p) = partition(v, pivot, is_less);
709        was_balanced = cmp::min(mid, len - mid) >= len / 8;
710        was_partitioned = was_p;
711
712        // Split the slice into `left`, `pivot`, and `right`.
713        let (left, right) = {v}.split_at_mut(mid);
714        let (pivot, right) = right.split_at_mut(1);
715        let pivot = &pivot[0];
716
717        // Recurse into the shorter side only in order to minimize the total number of recursive
718        // calls and consume less stack space. Then just continue with the longer side (this is
719        // akin to tail recursion).
720        if left.len() < right.len() {
721            recurse(left, is_less, pred, limit);
722            v = right;
723            pred = Some(pivot);
724        } else {
725            recurse(right, is_less, Some(pivot), limit);
726            v = left;
727        }
728    }
729}
730
731/// Sorts `v` using pattern-defeating quicksort, which is `O(n log n)` worst-case.
732fn quicksort<T, F>(v: &mut [T], mut is_less: F)
733    where F: FnMut(&T, &T) -> bool
734{
735    // Sorting has no meaningful behavior on zero-sized types.
736    if mem::size_of::<T>() == 0 {
737        return;
738    }
739
740    // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
741    let limit = mem::size_of::<usize>() * 8 - v.len().leading_zeros() as usize;
742
743    recurse(v, &mut is_less, None, limit);
744}
745
746/// Sorts a slice.
747///
748/// This sort is in-place, unstable, and `O(n log n)` worst-case.
749///
750/// The implementation is based on Orson Peters' pattern-defeating quicksort.
751///
752/// # Examples
753///
754/// ```
755/// extern crate pdqsort;
756///
757/// let mut v = [-5, 4, 1, -3, 2];
758/// pdqsort::sort(&mut v);
759/// assert!(v == [-5, -3, 1, 2, 4]);
760/// ```
761pub fn sort<T>(v: &mut [T])
762    where T: Ord
763{
764    quicksort(v, |a, b| a.lt(b));
765}
766
767/// Sorts a slice using `compare` to compare elements.
768///
769/// This sort is in-place, unstable, and `O(n log n)` worst-case.
770///
771/// The implementation is based on Orson Peters' pattern-defeating quicksort.
772///
773/// # Examples
774///
775/// ```
776/// extern crate pdqsort;
777///
778/// let mut v = [5, 4, 1, 3, 2];
779/// pdqsort::sort_by(&mut v, |a, b| a.cmp(b));
780/// assert!(v == [1, 2, 3, 4, 5]);
781///
782/// // reverse sorting
783/// pdqsort::sort_by(&mut v, |a, b| b.cmp(a));
784/// assert!(v == [5, 4, 3, 2, 1]);
785/// ```
786pub fn sort_by<T, F>(v: &mut [T], mut compare: F)
787    where F: FnMut(&T, &T) -> Ordering
788{
789    quicksort(v, |a, b| compare(a, b) == Ordering::Less);
790}
791
792/// Sorts a slice using `f` to extract a key to compare elements by.
793///
794/// This sort is in-place, unstable, and `O(n log n)` worst-case.
795///
796/// The implementation is based on Orson Peters' pattern-defeating quicksort.
797///
798/// # Examples
799///
800/// ```
801/// extern crate pdqsort;
802///
803/// let mut v = [-5i32, 4, 1, -3, 2];
804/// pdqsort::sort_by_key(&mut v, |k| k.abs());
805/// assert!(v == [1, 2, -3, 4, -5]);
806/// ```
807pub fn sort_by_key<T, B, F>(v: &mut [T], mut f: F)
808    where F: FnMut(&T) -> B,
809          B: Ord
810{
811    quicksort(v, |a, b| f(a).lt(&f(b)));
812}
813
814#[cfg(test)]
815mod tests {
816    extern crate std;
817    extern crate rand;
818
819    use self::rand::{Rng, thread_rng};
820    use self::std::cmp::Ordering::{Greater, Less};
821    use self::std::prelude::v1::*;
822
823    #[test]
824    fn test_sort_zero_sized_type() {
825        // Should not panic.
826        [(); 10].sort();
827        [(); 100].sort();
828    }
829
830    #[test]
831    fn test_pdqsort() {
832        let mut rng = thread_rng();
833        for n in 0..16 {
834            for l in 0..16 {
835                let mut v = rng.gen_iter::<u64>()
836                    .map(|x| x % (1 << l))
837                    .take((1 << n))
838                    .collect::<Vec<_>>();
839                let mut v1 = v.clone();
840
841                super::sort(&mut v);
842                assert!(v.windows(2).all(|w| w[0] <= w[1]));
843
844                super::sort_by(&mut v1, |a, b| a.cmp(b));
845                assert!(v1.windows(2).all(|w| w[0] <= w[1]));
846
847                super::sort_by(&mut v1, |a, b| b.cmp(a));
848                assert!(v1.windows(2).all(|w| w[0] >= w[1]));
849            }
850        }
851
852        let mut v = [0xDEADBEEFu64];
853        super::sort(&mut v);
854        assert!(v == [0xDEADBEEF]);
855    }
856
857    #[test]
858    fn test_heapsort() {
859        let mut rng = thread_rng();
860        for n in 0..16 {
861            for l in 0..16 {
862                let mut v = rng.gen_iter::<u64>()
863                    .map(|x| x % (1 << l))
864                    .take((1 << n))
865                    .collect::<Vec<_>>();
866                let mut v1 = v.clone();
867
868                super::heapsort(&mut v, &mut |a, b| a.lt(b));
869                assert!(v.windows(2).all(|w| w[0] <= w[1]));
870
871                super::heapsort(&mut v1, &mut |a, b| a.cmp(b) == Less);
872                assert!(v1.windows(2).all(|w| w[0] <= w[1]));
873
874                super::heapsort(&mut v1, &mut |a, b| b.cmp(a) == Less);
875                assert!(v1.windows(2).all(|w| w[0] >= w[1]));
876            }
877        }
878
879        let mut v = [0xDEADBEEFu64];
880        super::heapsort(&mut v, &mut |a, b| a.lt(b));
881        assert!(v == [0xDEADBEEF]);
882    }
883
884    #[test]
885    fn test_crazy_compare() {
886        let mut rng = thread_rng();
887
888        let mut v = rng.gen_iter::<u64>()
889            .map(|x| x % 1000)
890            .take(100_000)
891            .collect::<Vec<_>>();
892
893        // Even though comparison is non-sensical, sorting must not panic.
894        super::sort_by(&mut v, |_, _| *rng.choose(&[Less, Greater]).unwrap());
895    }
896}