use std::fmt::{Debug};
use std::cmp::{self, Ordering};
use std::mem::swap;
use super::indices;
pub trait Data : Ord + Debug { }
impl<T: Ord + Debug> Data for T { }
#[cfg(debug_assertions)]
macro_rules! puts {
($($t:tt)*) => {
println!($($t)*)
}
}
#[cfg(not(debug_assertions))]
macro_rules! puts {
($($t:tt)*) => {
}
}
pub fn quicksort<T: Data>(v: &mut [T]) {
indices(v, |mut v, range| {
if let Ok(range) = range.nonempty() {
if range.len() <= 16 {
insertion_sort_ranges(&mut v[..], |x, y| x < y);
return;
}
let (l, m, r) = (range.first(), range.upper_middle(), range.last());
if r == l {
return;
}
let mut pivot = if v[l] <= v[m] && v[m] <= v[r] {
m
} else if v[m] <= v[l] && v[l] <= v[r] {
l
} else {
r
};
let mut scan = range;
'main: loop {
if v[scan.first()] > v[pivot] {
loop {
if v[scan.last()] <= v[pivot] {
v.swap(scan.first(), scan.last());
if scan.last() == pivot {
pivot = scan.first();
}
break;
}
if !scan.advance_back() {
break 'main;
}
}
}
if !scan.advance() {
v.swap(pivot, scan.first());
break;
}
}
let (a, b) = v.split_at(scan.first());
quicksort(&mut v[a]);
quicksort(&mut v[b]);
}
});
}
pub fn quicksort_bounds<T: Data>(v: &mut [T]) {
if v.len() <= 16 {
insertion_sort_ranges(&mut v[..], |x, y| x < y);
return;
}
let (l, m, r) = (0, v.len() / 2, v.len() - 1);
let mut pivot = if v[l] <= v[m] && v[m] <= v[r] {
m
} else if v[m] <= v[l] && v[l] <= v[r] {
l
} else {
r
};
let mut i = 0;
let mut j = v.len() - 1;
'main: loop {
if v[i] > v[pivot] {
loop {
if v[j] <= v[pivot] {
v.swap(i, j);
if j == pivot {
pivot = i;
}
break;
}
j -= 1;
if i >= j { break 'main; }
}
}
if i >= j {
v.swap(pivot, i);
break;
}
i += 1;
}
let (a, b) = v.split_at_mut(i);
quicksort_bounds(a);
quicksort_bounds(b);
}
#[test]
fn test_quicksort() {
let mut data = [1, 0];
quicksort(&mut data);
assert_eq!(&data, &[0, 1]);
let mut data = [1, 2, 2, 1, 3, 3, 2, 3];
quicksort(&mut data);
assert_eq!(&data, &[1, 1, 2, 2, 2, 3, 3, 3]);
let mut data = [1, 4, 2, 0, 3];
quicksort(&mut data);
assert_eq!(&data, &[0, 1, 2, 3, 4]);
let mut data = [4, 3, 2, 1, 0];
quicksort(&mut data);
assert_eq!(&data, &[0, 1, 2, 3, 4]);
let mut data = [0, 1, 2, 3, 4];
quicksort(&mut data);
assert_eq!(&data, &[0, 1, 2, 3, 4]);
let mut data = [0, 1, 5, 2, 3, 4];
quicksort(&mut data);
assert_eq!(&data, &[0, 1, 2, 3, 4, 5]);
let mut data = [0, 1, 1, -1, 0, -1];
quicksort(&mut data);
assert_eq!(&data, &[-1, -1, 0, 0, 1, 1]);
}
pub fn insertion_sort_indexes<T, F>(v: &mut [T], mut less_than: F) where F: FnMut(&T, &T) -> bool {
indices(v, move |mut v, r| {
for i in r {
let jtail = v.scan_from_rev(i, |j_elt| less_than(&v[i], j_elt));
v.rotate1_up(jtail);
}
});
}
pub fn insertion_sort_ranges<T, F>(v: &mut [T], mut less_than: F) where F: FnMut(&T, &T) -> bool {
indices(v, move |mut v, r| {
if let Ok(mut i) = r.nonempty() {
while i.advance() {
let jtail = v.scan_from_rev(i.first(), |j_elt| less_than(&v[i.first()], j_elt));
v.rotate1_up(jtail);
}
}
});
}
pub fn insertion_sort_pointerindex<T, F>(v: &mut [T], mut less_than: F) where F: FnMut(&T, &T) -> bool {
indices(v, move |mut v, _r| {
for i in v.pointer_range() {
let jtail = v.scan_tail_(i, |j_elt| less_than(&v[i], j_elt));
v.rotate1_(jtail);
}
});
}
pub fn insertion_sort_rust<T, F>(v: &mut [T], mut less_than: F) where F: FnMut(&T, &T) -> bool {
use std::mem;
use std::ptr;
let len = v.len() as isize;
let buf_v = v.as_mut_ptr();
for i in 1..len {
let mut j = i;
unsafe {
let read_ptr = buf_v.offset(i) as *const T;
while j > 0 && less_than(&*read_ptr, &*buf_v.offset(j - 1)) {
j -= 1;
}
if i != j {
let tmp = ptr::read(read_ptr);
ptr::copy(&*buf_v.offset(j),
buf_v.offset(j + 1),
(i - j) as usize);
ptr::copy_nonoverlapping(&tmp, buf_v.offset(j), 1);
mem::forget(tmp);
}
}
}
}
fn block_swap2<T>(a: &mut [T], b: &mut [T]) {
let count = cmp::min(a.len(), b.len());
let a = &mut a[..count];
let b = &mut b[..count];
for i in 0..count {
swap(&mut a[i], &mut b[i]);
}
}
pub fn merge_internal_indices<T: Ord>(data: &mut [T], left_end: usize, buffer: &mut [T]) {
debug_assert!(data.len() >= 1);
if left_end > data.len() || left_end > buffer.len() {
panic!("merge_internal: data or buffer too short");
}
indices(data, move |mut data, r| {
indices(&mut buffer[..left_end], |mut buffer, rb| {
let right_end = data.len();
if right_end - left_end == 0 {
return;
}
block_swap2(&mut data[r], &mut buffer[rb]);
let mut i = match rb.contains(0) {
Some(i) => i,
None => return,
};
let mut out = match r.contains(0) {
Some(i) => i,
None => return,
};
let mut j = match r.contains(left_end) {
Some(i) => i,
None => return,
};
loop {
if buffer[i] <= data[j] {
swap(&mut buffer[i], &mut data[out]);
data.forward(&mut out);
if !buffer.forward(&mut i) { break; }
} else {
data.swap(j, out);
data.forward(&mut out);
if !data.forward(&mut j) {
block_swap2(&mut buffer[i..], &mut data[out..]);
break;
}
}
}
})
})
}
pub fn merge_internal_ranges<T: Ord>(data: &mut [T], left_end: usize, buffer: &mut [T]) {
debug_assert!(data.len() >= 1);
if left_end > data.len() || left_end > buffer.len() {
panic!("merge_internal: data or buffer too short");
}
indices(data, |mut data, r| {
indices(&mut buffer[..left_end], |mut buffer, rb| {
let mut i = match rb.nonempty() {
Ok(r) => r,
Err(_) => return,
};
let mut out = match r.nonempty() {
Ok(r) => r,
Err(_) => return,
};
let mut j = match r.split_at(left_end).1.nonempty() {
Ok(r) => r,
Err(_) => return,
};
block_swap2(&mut data[r], &mut buffer[rb]);
loop {
if buffer[i.first()] <= data[j.first()] {
swap(&mut buffer[i.first()], &mut data[out.first()]);
if !out.advance() { return; }
if !i.advance() { return; }
} else {
data.swap(j.first(), out.first());
if !out.advance() { return; }
if !j.advance() {
block_swap2(&mut buffer[i], &mut data[out]);
break;
}
}
}
})
})
}
#[test]
fn test_merge_internal() {
let mut buffer = [0; 128];
let a = (0..15).collect::<Vec<_>>();
let b = (1..25).filter(|&x| x % 2 == 0).collect::<Vec<_>>();
let data = [&a[..], &b[..]].concat();
let mut ans = data.clone();
ans.sort();
{
let mut workspace = data.clone();
merge_internal_indices(&mut workspace, a.len(), &mut buffer);
assert_eq!(workspace, ans);
assert!(buffer.iter().all(|x| *x == 0));
}
{
let mut workspace = data.clone();
merge_internal_ranges(&mut workspace, a.len(), &mut buffer);
assert_eq!(workspace, ans);
assert!(buffer.iter().all(|x| *x == 0));
}
}
pub fn heapify<T: Data>(v: &mut [T]) {
indices(v, |mut v, range| {
let (left, _right) = range.split_in_half();
for i in left.into_iter().rev() {
let mut pos = i;
while let Ok(mut child) = v.vet(pos.integer() * 2 + 1) {
let mut right = child;
if v.forward(&mut right) && v[child] > v[right] {
child = right;
}
if v[pos] < v[child] {
break;
}
v.swap(pos, child);
pos = child;
}
}
});
}
#[test]
fn test_heapify() {
let mut data = [8, 12, 9, 7, 22, 3, 26, 14, 11, 15, 22];
let heap = [3, 7, 8, 11, 15, 9, 26, 14, 12, 22, 22];
heapify(&mut data);
assert_eq!(&data, &heap);
}
pub fn binary_search<T: Data>(v: &[T], elt: &T) -> Result<usize, usize> {
indices(v, move |v, mut range| {
loop {
let (a, b) = range.split_in_half();
if let Ok(b_) = b.nonempty() {
let mid = b_.first();
match v[mid].cmp(elt) {
Ordering::Equal => return Ok(mid.integer()),
Ordering::Greater => range = a,
Ordering::Less => range = b_.tail(),
}
} else {
break;
}
}
Err(range.start())
})
}
#[test]
fn test_binary_search() {
let data = [3, 7, 8, 11, 15, 22, 26];
assert_eq!(binary_search(&data, &3), Ok(0));
assert_eq!(binary_search(&data, &2), Err(0));
let elts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 25, 26, 27, 28];
for elt in &elts {
assert_eq!(binary_search(&data, elt), data.binary_search(elt));
}
}