use core::cmp;
use core::mem::{self, MaybeUninit};
use core::ptr;
use core::slice;
use crate::iter::{IndexedParallelIterator, ParallelIterator};
use crate::slice::ParallelSliceMut;
use crate::SendPtr;
struct InsertionHole<T> {
src: *const T,
dest: *mut T,
}
impl<T> Drop for InsertionHole<T> {
fn drop(&mut self) {
unsafe {
ptr::copy_nonoverlapping(self.src, self.dest, 1);
}
}
}
unsafe fn insert_tail<T, F>(v: &mut [T], is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
debug_assert!(v.len() >= 2);
let arr_ptr = v.as_mut_ptr();
let i = v.len() - 1;
unsafe {
let i_ptr = arr_ptr.add(i);
if is_less(&*i_ptr, &*i_ptr.sub(1)) {
let tmp = mem::ManuallyDrop::new(ptr::read(i_ptr));
let mut hole = InsertionHole {
src: &*tmp,
dest: i_ptr.sub(1),
};
ptr::copy_nonoverlapping(hole.dest, i_ptr, 1);
for j in (0..(i - 1)).rev() {
let j_ptr = arr_ptr.add(j);
if !is_less(&*tmp, &*j_ptr) {
break;
}
ptr::copy_nonoverlapping(j_ptr, hole.dest, 1);
hole.dest = j_ptr;
}
}
}
}
unsafe fn insert_head<T, F>(v: &mut [T], is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
debug_assert!(v.len() >= 2);
unsafe {
if is_less(v.get_unchecked(1), v.get_unchecked(0)) {
let arr_ptr = v.as_mut_ptr();
let tmp = mem::ManuallyDrop::new(ptr::read(arr_ptr));
let mut hole = InsertionHole {
src: &*tmp,
dest: arr_ptr.add(1),
};
ptr::copy_nonoverlapping(arr_ptr.add(1), arr_ptr.add(0), 1);
for i in 2..v.len() {
if !is_less(v.get_unchecked(i), &*tmp) {
break;
}
ptr::copy_nonoverlapping(arr_ptr.add(i), arr_ptr.add(i - 1), 1);
hole.dest = arr_ptr.add(i);
}
}
}
}
#[inline(never)]
fn insertion_sort_shift_left<T, F>(v: &mut [T], offset: usize, is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
let len = v.len();
assert!(offset != 0 && offset <= len);
for i in offset..len {
unsafe {
insert_tail(&mut v[..=i], is_less);
}
}
}
#[inline(never)]
fn insertion_sort_shift_right<T, F>(v: &mut [T], offset: usize, is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
let len = v.len();
assert!(offset != 0 && offset <= len && len >= 2);
for i in (0..offset).rev() {
unsafe {
insert_head(&mut v[i..len], is_less);
}
}
}
#[cold]
fn partial_insertion_sort<T, F>(v: &mut [T], is_less: &F) -> bool
where
F: Fn(&T, &T) -> bool,
{
const MAX_STEPS: usize = 5;
const SHORTEST_SHIFTING: usize = 50;
let len = v.len();
let mut i = 1;
for _ in 0..MAX_STEPS {
unsafe {
while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) {
i += 1;
}
}
if i == len {
return true;
}
if len < SHORTEST_SHIFTING {
return false;
}
v.swap(i - 1, i);
if i >= 2 {
insertion_sort_shift_left(&mut v[..i], i - 1, is_less);
insertion_sort_shift_right(&mut v[..i], 1, is_less);
}
}
false
}
#[cold]
fn heapsort<T, F>(v: &mut [T], is_less: F)
where
F: Fn(&T, &T) -> bool,
{
let sift_down = |v: &mut [T], mut node| {
loop {
let mut child = 2 * node + 1;
if child >= v.len() {
break;
}
if child + 1 < v.len() {
child += is_less(&v[child], &v[child + 1]) as usize;
}
if !is_less(&v[node], &v[child]) {
break;
}
v.swap(node, child);
node = child;
}
};
for i in (0..v.len() / 2).rev() {
sift_down(v, i);
}
for i in (1..v.len()).rev() {
v.swap(0, i);
sift_down(&mut v[..i], 0);
}
}
fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &F) -> usize
where
F: Fn(&T, &T) -> bool,
{
const BLOCK: usize = 128;
let mut l = v.as_mut_ptr();
let mut block_l = BLOCK;
let mut start_l = ptr::null_mut();
let mut end_l = ptr::null_mut();
let mut offsets_l = [MaybeUninit::<u8>::uninit(); BLOCK];
let mut r = unsafe { l.add(v.len()) };
let mut block_r = BLOCK;
let mut start_r = ptr::null_mut();
let mut end_r = ptr::null_mut();
let mut offsets_r = [MaybeUninit::<u8>::uninit(); BLOCK];
fn width<T>(l: *mut T, r: *mut T) -> usize {
assert!(size_of::<T>() > 0);
(r as usize - l as usize) / size_of::<T>()
}
loop {
let is_done = width(l, r) <= 2 * BLOCK;
if is_done {
let mut rem = width(l, r);
if start_l < end_l || start_r < end_r {
rem -= BLOCK;
}
if start_l < end_l {
block_r = rem;
} else if start_r < end_r {
block_l = rem;
} else {
block_l = rem / 2;
block_r = rem - block_l;
}
debug_assert!(block_l <= BLOCK && block_r <= BLOCK);
debug_assert!(width(l, r) == block_l + block_r);
}
if start_l == end_l {
start_l = offsets_l.as_mut_ptr() as *mut u8;
end_l = start_l;
let mut elem = l;
for i in 0..block_l {
unsafe {
*end_l = i as u8;
end_l = end_l.add(!is_less(&*elem, pivot) as usize);
elem = elem.add(1);
}
}
}
if start_r == end_r {
start_r = offsets_r.as_mut_ptr() as *mut u8;
end_r = start_r;
let mut elem = r;
for i in 0..block_r {
unsafe {
elem = elem.sub(1);
*end_r = i as u8;
end_r = end_r.add(is_less(&*elem, pivot) as usize);
}
}
}
let count = cmp::min(width(start_l, end_l), width(start_r, end_r));
if count > 0 {
macro_rules! left {
() => {
l.add(usize::from(*start_l))
};
}
macro_rules! right {
() => {
r.sub(usize::from(*start_r) + 1)
};
}
unsafe {
let tmp = ptr::read(left!());
ptr::copy_nonoverlapping(right!(), left!(), 1);
for _ in 1..count {
start_l = start_l.add(1);
ptr::copy_nonoverlapping(left!(), right!(), 1);
start_r = start_r.add(1);
ptr::copy_nonoverlapping(right!(), left!(), 1);
}
ptr::copy_nonoverlapping(&tmp, right!(), 1);
mem::forget(tmp);
start_l = start_l.add(1);
start_r = start_r.add(1);
}
}
if start_l == end_l {
l = unsafe { l.add(block_l) };
}
if start_r == end_r {
r = unsafe { r.sub(block_r) };
}
if is_done {
break;
}
}
if start_l < end_l {
debug_assert_eq!(width(l, r), block_l);
while start_l < end_l {
unsafe {
end_l = end_l.sub(1);
ptr::swap(l.add(usize::from(*end_l)), r.sub(1));
r = r.sub(1);
}
}
width(v.as_mut_ptr(), r)
} else if start_r < end_r {
debug_assert_eq!(width(l, r), block_r);
while start_r < end_r {
unsafe {
end_r = end_r.sub(1);
ptr::swap(l, r.sub(usize::from(*end_r) + 1));
l = l.add(1);
}
}
width(v.as_mut_ptr(), l)
} else {
width(v.as_mut_ptr(), l)
}
}
fn partition<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> (usize, bool)
where
F: Fn(&T, &T) -> bool,
{
let (mid, was_partitioned) = {
v.swap(0, pivot);
let (pivot, v) = v.split_at_mut(1);
let pivot = &mut pivot[0];
let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
let _pivot_guard = InsertionHole {
src: &*tmp,
dest: pivot,
};
let pivot = &*tmp;
let mut l = 0;
let mut r = v.len();
unsafe {
while l < r && is_less(v.get_unchecked(l), pivot) {
l += 1;
}
while l < r && !is_less(v.get_unchecked(r - 1), pivot) {
r -= 1;
}
}
(
l + partition_in_blocks(&mut v[l..r], pivot, is_less),
l >= r,
)
};
v.swap(0, mid);
(mid, was_partitioned)
}
fn partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> usize
where
F: Fn(&T, &T) -> bool,
{
v.swap(0, pivot);
let (pivot, v) = v.split_at_mut(1);
let pivot = &mut pivot[0];
let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
let _pivot_guard = InsertionHole {
src: &*tmp,
dest: pivot,
};
let pivot = &*tmp;
let len = v.len();
if len == 0 {
return 0;
}
let mut l = 0;
let mut r = len;
loop {
unsafe {
while l < r && !is_less(pivot, v.get_unchecked(l)) {
l += 1;
}
loop {
r -= 1;
if l >= r || !is_less(pivot, v.get_unchecked(r)) {
break;
}
}
if l >= r {
break;
}
let ptr = v.as_mut_ptr();
ptr::swap(ptr.add(l), ptr.add(r));
l += 1;
}
}
l + 1
}
#[cold]
fn break_patterns<T>(v: &mut [T]) {
let len = v.len();
if len >= 8 {
let mut seed = len;
let mut gen_usize = || {
if usize::BITS <= 32 {
let mut r = seed as u32;
r ^= r << 13;
r ^= r >> 17;
r ^= r << 5;
seed = r as usize;
seed
} else {
let mut r = seed as u64;
r ^= r << 13;
r ^= r >> 7;
r ^= r << 17;
seed = r as usize;
seed
}
};
let modulus = len.next_power_of_two();
let pos = len / 4 * 2;
for i in 0..3 {
let mut other = gen_usize() & (modulus - 1);
if other >= len {
other -= len;
}
v.swap(pos - 1 + i, other);
}
}
}
fn choose_pivot<T, F>(v: &mut [T], is_less: &F) -> (usize, bool)
where
F: Fn(&T, &T) -> bool,
{
const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50;
const MAX_SWAPS: usize = 4 * 3;
let len = v.len();
#[allow(clippy::identity_op)]
let mut a = len / 4 * 1;
let mut b = len / 4 * 2;
let mut c = len / 4 * 3;
let mut swaps = 0;
if len >= 8 {
let mut sort2 = |a: &mut usize, b: &mut usize| unsafe {
if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) {
ptr::swap(a, b);
swaps += 1;
}
};
let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| {
sort2(a, b);
sort2(b, c);
sort2(a, b);
};
if len >= SHORTEST_MEDIAN_OF_MEDIANS {
let mut sort_adjacent = |a: &mut usize| {
let tmp = *a;
sort3(&mut (tmp - 1), a, &mut (tmp + 1));
};
sort_adjacent(&mut a);
sort_adjacent(&mut b);
sort_adjacent(&mut c);
}
sort3(&mut a, &mut b, &mut c);
}
if swaps < MAX_SWAPS {
(b, swaps == 0)
} else {
v.reverse();
(len - 1 - b, true)
}
}
fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &F, mut pred: Option<&'a mut T>, mut limit: u32)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
const MAX_INSERTION: usize = 20;
const MAX_SEQUENTIAL: usize = 2000;
let mut was_balanced = true;
let mut was_partitioned = true;
loop {
let len = v.len();
if len <= MAX_INSERTION {
if len >= 2 {
insertion_sort_shift_left(v, 1, is_less);
}
return;
}
if limit == 0 {
heapsort(v, is_less);
return;
}
if !was_balanced {
break_patterns(v);
limit -= 1;
}
let (pivot, likely_sorted) = choose_pivot(v, is_less);
if was_balanced && was_partitioned && likely_sorted {
if partial_insertion_sort(v, is_less) {
return;
}
}
if let Some(&mut ref p) = pred {
if !is_less(p, &v[pivot]) {
let mid = partition_equal(v, pivot, is_less);
v = &mut v[mid..];
continue;
}
}
let (mid, was_p) = partition(v, pivot, is_less);
was_balanced = cmp::min(mid, len - mid) >= len / 8;
was_partitioned = was_p;
let (left, right) = v.split_at_mut(mid);
let (pivot, right) = right.split_at_mut(1);
let pivot = &mut pivot[0];
if Ord::max(left.len(), right.len()) <= MAX_SEQUENTIAL {
if left.len() < right.len() {
recurse(left, is_less, pred, limit);
v = right;
pred = Some(pivot);
} else {
recurse(right, is_less, Some(pivot), limit);
v = left;
}
} else {
rayon_core::join(
|| recurse(left, is_less, pred, limit),
|| recurse(right, is_less, Some(pivot), limit),
);
break;
}
}
}
pub(super) fn par_quicksort<T, F>(v: &mut [T], is_less: F)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
if size_of::<T>() == 0 {
return;
}
let limit = usize::BITS - v.len().leading_zeros();
recurse(v, &is_less, None, limit);
}
unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &F)
where
F: Fn(&T, &T) -> bool,
{
let len = v.len();
let v = v.as_mut_ptr();
let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
let mut hole;
if mid <= len - mid {
unsafe {
ptr::copy_nonoverlapping(v, buf, mid);
hole = MergeHole {
start: buf,
end: buf.add(mid),
dest: v,
};
}
let left = &mut hole.start;
let mut right = v_mid;
let out = &mut hole.dest;
while *left < hole.end && right < v_end {
unsafe {
let is_l = is_less(&*right, &**left);
let to_copy = if is_l { right } else { *left };
ptr::copy_nonoverlapping(to_copy, *out, 1);
*out = out.add(1);
right = right.add(is_l as usize);
*left = left.add(!is_l as usize);
}
}
} else {
unsafe {
ptr::copy_nonoverlapping(v_mid, buf, len - mid);
hole = MergeHole {
start: buf,
end: buf.add(len - mid),
dest: v_mid,
};
}
let left = &mut hole.dest;
let right = &mut hole.end;
let mut out = v_end;
while v < *left && buf < *right {
unsafe {
let is_l = is_less(&*right.sub(1), &*left.sub(1));
*left = left.sub(is_l as usize);
*right = right.sub(!is_l as usize);
let to_copy = if is_l { *left } else { *right };
out = out.sub(1);
ptr::copy_nonoverlapping(to_copy, out, 1);
}
}
}
}
struct MergeHole<T> {
start: *mut T,
end: *mut T,
dest: *mut T,
}
impl<T> Drop for MergeHole<T> {
fn drop(&mut self) {
unsafe {
let len = self.end.offset_from(self.start) as usize;
ptr::copy_nonoverlapping(self.start, self.dest, len);
}
}
}
#[must_use]
#[derive(Clone, Copy, PartialEq, Eq)]
enum MergeSortResult {
NonDescending,
Descending,
Sorted,
}
unsafe fn merge_sort<T, CmpF>(v: &mut [T], buf_ptr: *mut T, is_less: &CmpF) -> MergeSortResult
where
CmpF: Fn(&T, &T) -> bool,
{
debug_assert_ne!(size_of::<T>(), 0);
let len = v.len();
let mut runs = Vec::new();
let mut end = 0;
let mut start = 0;
while end < len {
let (streak_end, was_reversed) = find_streak(&v[start..], is_less);
end += streak_end;
if start == 0 && end == len {
return if was_reversed {
MergeSortResult::Descending
} else {
MergeSortResult::NonDescending
};
}
if was_reversed {
v[start..end].reverse();
}
end = provide_sorted_batch(v, start, end, is_less);
runs.push(TimSortRun {
start,
len: end - start,
});
start = end;
while let Some(r) = collapse(runs.as_slice(), len) {
let left = runs[r];
let right = runs[r + 1];
let merge_slice = &mut v[left.start..right.start + right.len];
unsafe {
merge(merge_slice, left.len, buf_ptr, is_less);
}
runs[r + 1] = TimSortRun {
start: left.start,
len: left.len + right.len,
};
runs.remove(r);
}
}
debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
return MergeSortResult::Sorted;
#[inline]
fn collapse(runs: &[TimSortRun], stop: usize) -> Option<usize> {
let n = runs.len();
if n >= 2
&& (runs[n - 1].start + runs[n - 1].len == stop
|| runs[n - 2].len <= runs[n - 1].len
|| (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
|| (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
{
if n >= 3 && runs[n - 3].len < runs[n - 1].len {
Some(n - 3)
} else {
Some(n - 2)
}
} else {
None
}
}
}
#[derive(Clone, Copy, Debug)]
struct TimSortRun {
len: usize,
start: usize,
}
fn provide_sorted_batch<T, F>(v: &mut [T], start: usize, mut end: usize, is_less: &F) -> usize
where
F: Fn(&T, &T) -> bool,
{
let len = v.len();
assert!(end >= start && end <= len);
const MIN_INSERTION_RUN: usize = 10;
let start_end_diff = end - start;
if start_end_diff < MIN_INSERTION_RUN && end < len {
end = cmp::min(start + MIN_INSERTION_RUN, len);
let presorted_start = cmp::max(start_end_diff, 1);
insertion_sort_shift_left(&mut v[start..end], presorted_start, is_less);
}
end
}
fn find_streak<T, F>(v: &[T], is_less: &F) -> (usize, bool)
where
F: Fn(&T, &T) -> bool,
{
let len = v.len();
if len < 2 {
return (len, false);
}
let mut end = 2;
unsafe {
let assume_reverse = is_less(v.get_unchecked(1), v.get_unchecked(0));
if assume_reverse {
while end < len && is_less(v.get_unchecked(end), v.get_unchecked(end - 1)) {
end += 1;
}
(end, true)
} else {
while end < len && !is_less(v.get_unchecked(end), v.get_unchecked(end - 1)) {
end += 1;
}
(end, false)
}
}
}
fn split_for_merge<T, F>(left: &[T], right: &[T], is_less: &F) -> (usize, usize)
where
F: Fn(&T, &T) -> bool,
{
let left_len = left.len();
let right_len = right.len();
if left_len >= right_len {
let left_mid = left_len / 2;
let mut a = 0;
let mut b = right_len;
while a < b {
let m = a + (b - a) / 2;
if is_less(&right[m], &left[left_mid]) {
a = m + 1;
} else {
b = m;
}
}
(left_mid, a)
} else {
let right_mid = right_len / 2;
let mut a = 0;
let mut b = left_len;
while a < b {
let m = a + (b - a) / 2;
if is_less(&right[right_mid], &left[m]) {
b = m;
} else {
a = m + 1;
}
}
(a, right_mid)
}
}
unsafe fn par_merge<T, F>(left: &mut [T], right: &mut [T], dest: *mut T, is_less: &F)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
const MAX_SEQUENTIAL: usize = 5000;
let left_len = left.len();
let right_len = right.len();
let mut s = State {
left_start: left.as_mut_ptr(),
left_end: left.as_mut_ptr().add(left_len),
right_start: right.as_mut_ptr(),
right_end: right.as_mut_ptr().add(right_len),
dest,
};
if left_len == 0 || right_len == 0 || left_len + right_len < MAX_SEQUENTIAL {
while s.left_start < s.left_end && s.right_start < s.right_end {
let is_l = is_less(&*s.right_start, &*s.left_start);
let to_copy = if is_l { s.right_start } else { s.left_start };
ptr::copy_nonoverlapping(to_copy, s.dest, 1);
s.dest = s.dest.add(1);
s.right_start = s.right_start.add(is_l as usize);
s.left_start = s.left_start.add(!is_l as usize);
}
} else {
let (left_mid, right_mid) = split_for_merge(left, right, is_less);
let (left_l, left_r) = left.split_at_mut(left_mid);
let (right_l, right_r) = right.split_at_mut(right_mid);
mem::forget(s);
let dest_l = SendPtr(dest);
let dest_r = SendPtr(dest.add(left_l.len() + right_l.len()));
rayon_core::join(
move || par_merge(left_l, right_l, dest_l.get(), is_less),
move || par_merge(left_r, right_r, dest_r.get(), is_less),
);
}
struct State<T> {
left_start: *mut T,
left_end: *mut T,
right_start: *mut T,
right_end: *mut T,
dest: *mut T,
}
impl<T> Drop for State<T> {
fn drop(&mut self) {
unsafe {
let left_len = self.left_end.offset_from(self.left_start) as usize;
ptr::copy_nonoverlapping(self.left_start, self.dest, left_len);
self.dest = self.dest.add(left_len);
let right_len = self.right_end.offset_from(self.right_start) as usize;
ptr::copy_nonoverlapping(self.right_start, self.dest, right_len);
}
}
}
}
unsafe fn merge_recurse<T, F>(
v: *mut T,
buf: *mut T,
chunks: &[(usize, usize)],
into_buf: bool,
is_less: &F,
) where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
let len = chunks.len();
debug_assert!(len > 0);
if len == 1 {
if into_buf {
let (start, end) = chunks[0];
let src = v.add(start);
let dest = buf.add(start);
ptr::copy_nonoverlapping(src, dest, end - start);
}
return;
}
let (start, _) = chunks[0];
let (mid, _) = chunks[len / 2];
let (_, end) = chunks[len - 1];
let (left, right) = chunks.split_at(len / 2);
let (src, dest) = if into_buf { (v, buf) } else { (buf, v) };
let guard = MergeHole {
start: src.add(start),
end: src.add(end),
dest: dest.add(start),
};
let v = SendPtr(v);
let buf = SendPtr(buf);
rayon_core::join(
move || merge_recurse(v.get(), buf.get(), left, !into_buf, is_less),
move || merge_recurse(v.get(), buf.get(), right, !into_buf, is_less),
);
mem::forget(guard);
let src_left = slice::from_raw_parts_mut(src.add(start), mid - start);
let src_right = slice::from_raw_parts_mut(src.add(mid), end - mid);
par_merge(src_left, src_right, dest.add(start), is_less);
}
pub(super) fn par_mergesort<T, F>(v: &mut [T], is_less: F)
where
T: Send,
F: Fn(&T, &T) -> bool + Sync,
{
const MAX_INSERTION: usize = 20;
const CHUNK_LENGTH: usize = 2000;
if size_of::<T>() == 0 {
return;
}
let len = v.len();
if len <= MAX_INSERTION {
if len >= 2 {
insertion_sort_shift_left(v, 1, &is_less);
}
return;
}
let mut buf = Vec::<T>::with_capacity(len);
let buf = buf.as_mut_ptr();
if len <= CHUNK_LENGTH {
let res = unsafe { merge_sort(v, buf, &is_less) };
if res == MergeSortResult::Descending {
v.reverse();
}
return;
}
let mut iter = {
let buf = SendPtr(buf);
let is_less = &is_less;
v.par_chunks_mut(CHUNK_LENGTH)
.with_max_len(1)
.enumerate()
.map(move |(i, chunk)| {
let l = CHUNK_LENGTH * i;
let r = l + chunk.len();
unsafe {
let buf = buf.get().add(l);
(l, r, merge_sort(chunk, buf, is_less))
}
})
.collect::<Vec<_>>()
.into_iter()
.peekable()
};
let mut chunks = Vec::with_capacity(iter.len());
while let Some((a, mut b, res)) = iter.next() {
if res != MergeSortResult::Sorted {
while let Some(&(x, y, r)) = iter.peek() {
if r == res && (r == MergeSortResult::Descending) == is_less(&v[x], &v[x - 1]) {
b = y;
iter.next();
} else {
break;
}
}
}
if res == MergeSortResult::Descending {
v[a..b].reverse();
}
chunks.push((a, b));
}
unsafe {
merge_recurse(v.as_mut_ptr(), buf, &chunks, false, &is_less);
}
}
#[cfg(test)]
mod tests {
use super::heapsort;
use super::split_for_merge;
use rand::distr::Uniform;
use rand::{rng, Rng};
#[test]
fn test_heapsort() {
let rng = &mut rng();
for len in (0..25).chain(500..501) {
for &modulus in &[5, 10, 100] {
let dist = Uniform::new(0, modulus).unwrap();
for _ in 0..100 {
let v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
let mut tmp = v.clone();
heapsort(&mut tmp, |a, b| a < b);
assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
let mut tmp = v.clone();
heapsort(&mut tmp, |a, b| a > b);
assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
}
}
}
let mut v: Vec<_> = (0..100).collect();
heapsort(&mut v, |_, _| rand::rng().random());
heapsort(&mut v, |a, b| a < b);
for (i, &entry) in v.iter().enumerate() {
assert_eq!(entry, i);
}
}
#[test]
fn test_split_for_merge() {
fn check(left: &[u32], right: &[u32]) {
let (l, r) = split_for_merge(left, right, &|&a, &b| a < b);
assert!(left[..l]
.iter()
.all(|&x| right[r..].iter().all(|&y| x <= y)));
assert!(right[..r].iter().all(|&x| left[l..].iter().all(|&y| x < y)));
}
check(&[1, 2, 2, 2, 2, 3], &[1, 2, 2, 2, 2, 3]);
check(&[1, 2, 2, 2, 2, 3], &[]);
check(&[], &[1, 2, 2, 2, 2, 3]);
let rng = &mut rng();
for _ in 0..100 {
let limit: u32 = rng.random_range(1..21);
let left_len: usize = rng.random_range(0..20);
let right_len: usize = rng.random_range(0..20);
let mut left = rng
.sample_iter(&Uniform::new(0, limit).unwrap())
.take(left_len)
.collect::<Vec<_>>();
let mut right = rng
.sample_iter(&Uniform::new(0, limit).unwrap())
.take(right_len)
.collect::<Vec<_>>();
left.sort();
right.sort();
check(&left, &right);
}
}
}