#![allow(clippy::redundant_closure)]
use core::cmp::{self, Ordering};
use core::fmt;
use core::hash::{Hash, Hasher};
use core::mem::ManuallyDrop;
use core::ops::{Index, IndexMut, Range, RangeBounds};
use core::ptr;
use core::slice;
#[allow(unused_imports)]
use core::mem;
use crate::alloc::{Allocator, Global, SizedTypeProperties};
use crate::clone::TryClone;
use crate::error::Error;
use crate::iter::{TryExtend, TryFromIteratorIn};
use crate::raw_vec::RawVec;
use crate::slice::range as slice_range;
use crate::vec::Vec;
#[macro_use]
mod macros;
pub use self::drain::Drain;
mod drain;
pub use self::iter_mut::IterMut;
mod iter_mut;
pub use self::into_iter::IntoIter;
mod into_iter;
pub use self::iter::Iter;
mod iter;
pub use self::raw_iter::RawIter;
mod raw_iter;
pub struct VecDeque<T, A: Allocator = Global> {
head: usize,
len: usize,
buf: RawVec<T, A>,
}
impl<T: TryClone, A: Allocator + Clone> TryClone for VecDeque<T, A> {
fn try_clone(&self) -> Result<Self, Error> {
let mut deq = Self::try_with_capacity_in(self.len(), self.allocator().clone())?;
for value in self.iter() {
deq.try_push_back(value.try_clone()?)?;
}
Ok(deq)
}
fn try_clone_from(&mut self, other: &Self) -> Result<(), Error> {
self.clear();
for value in other.iter() {
self.try_push_back(value.try_clone()?)?;
}
Ok(())
}
}
#[cfg(rune_nightly)]
unsafe impl<#[may_dangle] T, A: Allocator> Drop for VecDeque<T, A> {
fn drop(&mut self) {
struct Dropper<'a, T>(&'a mut [T]);
impl<'a, T> Drop for Dropper<'a, T> {
fn drop(&mut self) {
unsafe {
ptr::drop_in_place(self.0);
}
}
}
let (front, back) = self.as_mut_slices();
unsafe {
let _back_dropper = Dropper(back);
ptr::drop_in_place(front);
}
}
}
#[cfg(not(rune_nightly))]
impl<T, A: Allocator> Drop for VecDeque<T, A> {
fn drop(&mut self) {
struct Dropper<'a, T>(&'a mut [T]);
impl<T> Drop for Dropper<'_, T> {
fn drop(&mut self) {
unsafe {
ptr::drop_in_place(self.0);
}
}
}
let (front, back) = self.as_mut_slices();
unsafe {
let _back_dropper = Dropper(back);
ptr::drop_in_place(front);
}
}
}
impl<T> Default for VecDeque<T> {
#[inline]
fn default() -> VecDeque<T> {
VecDeque::new()
}
}
impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
fn ptr(&self) -> *mut T {
self.buf.ptr()
}
#[inline]
unsafe fn buffer_read(&mut self, off: usize) -> T {
unsafe { ptr::read(self.ptr().add(off)) }
}
#[inline]
unsafe fn buffer_write(&mut self, off: usize, value: T) {
unsafe {
ptr::write(self.ptr().add(off), value);
}
}
#[inline]
unsafe fn buffer_range(&self, range: Range<usize>) -> *mut [T] {
unsafe {
ptr::slice_from_raw_parts_mut(self.ptr().add(range.start), range.end - range.start)
}
}
#[inline]
fn is_full(&self) -> bool {
self.len == self.capacity()
}
#[inline]
fn wrap_add(&self, idx: usize, addend: usize) -> usize {
wrap_index(idx.wrapping_add(addend), self.capacity())
}
#[inline]
fn to_physical_idx(&self, idx: usize) -> usize {
self.wrap_add(self.head, idx)
}
#[inline]
fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
wrap_index(
idx.wrapping_sub(subtrahend).wrapping_add(self.capacity()),
self.capacity(),
)
}
#[inline]
unsafe fn copy(&mut self, src: usize, dst: usize, len: usize) {
debug_assert!(
dst + len <= self.capacity(),
"cpy dst={} src={} len={} cap={}",
dst,
src,
len,
self.capacity()
);
debug_assert!(
src + len <= self.capacity(),
"cpy dst={} src={} len={} cap={}",
dst,
src,
len,
self.capacity()
);
unsafe {
ptr::copy(self.ptr().add(src), self.ptr().add(dst), len);
}
}
#[inline]
unsafe fn copy_nonoverlapping(&mut self, src: usize, dst: usize, len: usize) {
debug_assert!(
dst + len <= self.capacity(),
"cno dst={} src={} len={} cap={}",
dst,
src,
len,
self.capacity()
);
debug_assert!(
src + len <= self.capacity(),
"cno dst={} src={} len={} cap={}",
dst,
src,
len,
self.capacity()
);
unsafe {
ptr::copy_nonoverlapping(self.ptr().add(src), self.ptr().add(dst), len);
}
}
unsafe fn wrap_copy(&mut self, src: usize, dst: usize, len: usize) {
debug_assert!(
cmp::min(src.abs_diff(dst), self.capacity() - src.abs_diff(dst)) + len
<= self.capacity(),
"wrc dst={} src={} len={} cap={}",
dst,
src,
len,
self.capacity()
);
if T::IS_ZST || src == dst || len == 0 {
return;
}
let dst_after_src = self.wrap_sub(dst, src) < len;
let src_pre_wrap_len = self.capacity() - src;
let dst_pre_wrap_len = self.capacity() - dst;
let src_wraps = src_pre_wrap_len < len;
let dst_wraps = dst_pre_wrap_len < len;
match (dst_after_src, src_wraps, dst_wraps) {
(_, false, false) => {
unsafe {
self.copy(src, dst, len);
}
}
(false, false, true) => {
unsafe {
self.copy(src, dst, dst_pre_wrap_len);
self.copy(src + dst_pre_wrap_len, 0, len - dst_pre_wrap_len);
}
}
(true, false, true) => {
unsafe {
self.copy(src + dst_pre_wrap_len, 0, len - dst_pre_wrap_len);
self.copy(src, dst, dst_pre_wrap_len);
}
}
(false, true, false) => {
unsafe {
self.copy(src, dst, src_pre_wrap_len);
self.copy(0, dst + src_pre_wrap_len, len - src_pre_wrap_len);
}
}
(true, true, false) => {
unsafe {
self.copy(0, dst + src_pre_wrap_len, len - src_pre_wrap_len);
self.copy(src, dst, src_pre_wrap_len);
}
}
(false, true, true) => {
debug_assert!(dst_pre_wrap_len > src_pre_wrap_len);
let delta = dst_pre_wrap_len - src_pre_wrap_len;
unsafe {
self.copy(src, dst, src_pre_wrap_len);
self.copy(0, dst + src_pre_wrap_len, delta);
self.copy(delta, 0, len - dst_pre_wrap_len);
}
}
(true, true, true) => {
debug_assert!(src_pre_wrap_len > dst_pre_wrap_len);
let delta = src_pre_wrap_len - dst_pre_wrap_len;
unsafe {
self.copy(0, delta, len - src_pre_wrap_len);
self.copy(self.capacity() - delta, 0, delta);
self.copy(src, dst, dst_pre_wrap_len);
}
}
}
}
#[inline]
unsafe fn copy_slice(&mut self, dst: usize, src: &[T]) {
debug_assert!(src.len() <= self.capacity());
let head_room = self.capacity() - dst;
if src.len() <= head_room {
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.ptr().add(dst), src.len());
}
} else {
let (left, right) = src.split_at(head_room);
unsafe {
ptr::copy_nonoverlapping(left.as_ptr(), self.ptr().add(dst), left.len());
ptr::copy_nonoverlapping(right.as_ptr(), self.ptr(), right.len());
}
}
}
#[inline]
unsafe fn handle_capacity_increase(&mut self, old_capacity: usize) {
let new_capacity = self.capacity();
debug_assert!(new_capacity >= old_capacity);
if self.head <= old_capacity - self.len {
} else {
let head_len = old_capacity - self.head;
let tail_len = self.len - head_len;
if head_len > tail_len && new_capacity - old_capacity >= tail_len {
unsafe {
self.copy_nonoverlapping(0, old_capacity, tail_len);
}
} else {
let new_head = new_capacity - head_len;
unsafe {
self.copy(self.head, new_head, head_len);
}
self.head = new_head;
}
}
debug_assert!(self.head < self.capacity() || self.capacity() == 0);
}
}
impl<T> VecDeque<T> {
#[inline]
#[must_use]
pub const fn new() -> Self {
Self::new_in(Global)
}
pub fn try_with_capacity(capacity: usize) -> Result<Self, Error> {
Self::try_with_capacity_in(capacity, Global)
}
}
impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
pub const fn new_in(alloc: A) -> VecDeque<T, A> {
VecDeque {
head: 0,
len: 0,
buf: RawVec::new_in(alloc),
}
}
pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<VecDeque<T, A>, Error> {
Ok(VecDeque {
head: 0,
len: 0,
buf: RawVec::try_with_capacity_in(capacity, alloc)?,
})
}
pub fn get(&self, index: usize) -> Option<&T> {
if index < self.len {
let idx = self.to_physical_idx(index);
unsafe { Some(&*self.ptr().add(idx)) }
} else {
None
}
}
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
if index < self.len {
let idx = self.to_physical_idx(index);
unsafe { Some(&mut *self.ptr().add(idx)) }
} else {
None
}
}
pub fn swap(&mut self, i: usize, j: usize) {
assert!(i < self.len());
assert!(j < self.len());
let ri = self.to_physical_idx(i);
let rj = self.to_physical_idx(j);
unsafe { ptr::swap(self.ptr().add(ri), self.ptr().add(rj)) }
}
#[inline]
pub fn capacity(&self) -> usize {
if T::IS_ZST {
usize::MAX
} else {
self.buf.capacity()
}
}
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), Error> {
let new_cap = self
.len
.checked_add(additional)
.ok_or(Error::CapacityOverflow)?;
let old_cap = self.capacity();
if new_cap > old_cap {
self.buf.try_reserve_exact(self.len, additional)?;
unsafe {
self.handle_capacity_increase(old_cap);
}
}
Ok(())
}
pub fn try_reserve(&mut self, additional: usize) -> Result<(), Error> {
let new_cap = self
.len
.checked_add(additional)
.ok_or(Error::CapacityOverflow)?;
let old_cap = self.capacity();
if new_cap > old_cap {
self.buf.try_reserve(self.len, additional)?;
unsafe {
self.handle_capacity_increase(old_cap);
}
}
Ok(())
}
pub fn try_shrink_to_fit(&mut self) -> Result<(), Error> {
self.try_shrink_to(0)
}
pub fn try_shrink_to(&mut self, min_capacity: usize) -> Result<(), Error> {
let target_cap = min_capacity.max(self.len);
if T::IS_ZST || self.capacity() <= target_cap {
return Ok(());
}
let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
if self.len == 0 {
self.head = 0;
} else if self.head >= target_cap && tail_outside {
unsafe {
self.copy_nonoverlapping(self.head, 0, self.len);
}
self.head = 0;
} else if self.head < target_cap && tail_outside {
let len = self.head + self.len - target_cap;
unsafe {
self.copy_nonoverlapping(target_cap, 0, len);
}
} else if !self.is_contiguous() {
let head_len = self.capacity() - self.head;
let new_head = target_cap - head_len;
unsafe {
self.copy(self.head, new_head, head_len);
}
self.head = new_head;
}
self.buf.try_shrink_to_fit(target_cap)?;
debug_assert!(self.head < self.capacity() || self.capacity() == 0);
debug_assert!(self.len <= self.capacity());
Ok(())
}
pub fn truncate(&mut self, len: usize) {
struct Dropper<'a, T>(&'a mut [T]);
impl<T> Drop for Dropper<'_, T> {
fn drop(&mut self) {
unsafe {
ptr::drop_in_place(self.0);
}
}
}
unsafe {
if len >= self.len {
return;
}
let (front, back) = self.as_mut_slices();
if len > front.len() {
let begin = len - front.len();
let drop_back = back.get_unchecked_mut(begin..) as *mut _;
self.len = len;
ptr::drop_in_place(drop_back);
} else {
let drop_back = back as *mut _;
let drop_front = front.get_unchecked_mut(len..) as *mut _;
self.len = len;
let _back_dropper = Dropper(&mut *drop_back);
ptr::drop_in_place(drop_front);
}
}
}
#[inline]
pub fn allocator(&self) -> &A {
self.buf.allocator()
}
pub fn iter(&self) -> Iter<'_, T> {
let (a, b) = self.as_slices();
Iter::new(a.iter(), b.iter())
}
pub unsafe fn raw_iter(&self) -> RawIter<T> {
let (a, b) = self.as_slices();
RawIter::new(crate::slice::RawIter::new(a), crate::slice::RawIter::new(b))
}
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
let (a, b) = self.as_mut_slices();
IterMut::new(a.iter_mut(), b.iter_mut())
}
#[inline]
pub fn as_slices(&self) -> (&[T], &[T]) {
let (a_range, b_range) = self.slice_ranges(.., self.len);
unsafe { (&*self.buffer_range(a_range), &*self.buffer_range(b_range)) }
}
#[inline]
pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
let (a_range, b_range) = self.slice_ranges(.., self.len);
unsafe {
(
&mut *self.buffer_range(a_range),
&mut *self.buffer_range(b_range),
)
}
}
pub fn len(&self) -> usize {
self.len
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
fn slice_ranges<R>(&self, range: R, len: usize) -> (Range<usize>, Range<usize>)
where
R: RangeBounds<usize>,
{
let Range { start, end } = slice_range(range, ..len);
let len = end - start;
if len == 0 {
(0..0, 0..0)
} else {
let wrapped_start = self.to_physical_idx(start);
let head_len = self.capacity() - wrapped_start;
if head_len >= len {
(wrapped_start..wrapped_start + len, 0..0)
} else {
let tail_len = len - head_len;
(wrapped_start..self.capacity(), 0..tail_len)
}
}
}
#[inline]
pub fn range<R>(&self, range: R) -> Iter<'_, T>
where
R: RangeBounds<usize>,
{
let (a_range, b_range) = self.slice_ranges(range, self.len);
let a = unsafe { &*self.buffer_range(a_range) };
let b = unsafe { &*self.buffer_range(b_range) };
Iter::new(a.iter(), b.iter())
}
#[inline]
pub fn range_mut<R>(&mut self, range: R) -> IterMut<'_, T>
where
R: RangeBounds<usize>,
{
let (a_range, b_range) = self.slice_ranges(range, self.len);
let a = unsafe { &mut *self.buffer_range(a_range) };
let b = unsafe { &mut *self.buffer_range(b_range) };
IterMut::new(a.iter_mut(), b.iter_mut())
}
#[inline]
pub fn drain<R>(&mut self, range: R) -> Drain<'_, T, A>
where
R: RangeBounds<usize>,
{
let Range { start, end } = slice_range(range, ..self.len);
let drain_start = start;
let drain_len = end - start;
unsafe { Drain::new(self, drain_start, drain_len) }
}
#[inline]
pub fn clear(&mut self) {
self.truncate(0);
self.head = 0;
}
pub fn contains(&self, x: &T) -> bool
where
T: PartialEq<T>,
{
let (a, b) = self.as_slices();
a.contains(x) || b.contains(x)
}
pub fn front(&self) -> Option<&T> {
self.get(0)
}
pub fn front_mut(&mut self) -> Option<&mut T> {
self.get_mut(0)
}
pub fn back(&self) -> Option<&T> {
self.get(self.len.wrapping_sub(1))
}
pub fn back_mut(&mut self) -> Option<&mut T> {
self.get_mut(self.len.wrapping_sub(1))
}
pub fn pop_front(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
let old_head = self.head;
self.head = self.to_physical_idx(1);
self.len -= 1;
Some(unsafe { self.buffer_read(old_head) })
}
}
pub fn pop_back(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
self.len -= 1;
Some(unsafe { self.buffer_read(self.to_physical_idx(self.len)) })
}
}
pub fn try_push_front(&mut self, value: T) -> Result<(), Error> {
if self.is_full() {
self.try_grow()?;
}
self.head = self.wrap_sub(self.head, 1);
self.len += 1;
unsafe {
self.buffer_write(self.head, value);
}
Ok(())
}
pub fn try_push_back(&mut self, value: T) -> Result<(), Error> {
if self.is_full() {
self.try_grow()?;
}
unsafe { self.buffer_write(self.to_physical_idx(self.len), value) }
self.len += 1;
Ok(())
}
#[inline]
fn is_contiguous(&self) -> bool {
self.head <= self.capacity() - self.len
}
pub fn swap_remove_front(&mut self, index: usize) -> Option<T> {
let length = self.len;
if index < length && index != 0 {
self.swap(index, 0);
} else if index >= length {
return None;
}
self.pop_front()
}
pub fn swap_remove_back(&mut self, index: usize) -> Option<T> {
let length = self.len;
if length > 0 && index < length - 1 {
self.swap(index, length - 1);
} else if index >= length {
return None;
}
self.pop_back()
}
pub fn try_insert(&mut self, index: usize, value: T) -> Result<(), Error> {
assert!(index <= self.len(), "index out of bounds");
if self.is_full() {
self.try_grow()?;
}
let k = self.len - index;
if k < index {
unsafe {
self.wrap_copy(
self.to_physical_idx(index),
self.to_physical_idx(index + 1),
k,
);
self.buffer_write(self.to_physical_idx(index), value);
self.len += 1;
}
} else {
let old_head = self.head;
self.head = self.wrap_sub(self.head, 1);
unsafe {
self.wrap_copy(old_head, self.head, index);
self.buffer_write(self.to_physical_idx(index), value);
self.len += 1;
}
}
Ok(())
}
pub fn remove(&mut self, index: usize) -> Option<T> {
if self.len <= index {
return None;
}
let wrapped_idx = self.to_physical_idx(index);
let elem = unsafe { Some(self.buffer_read(wrapped_idx)) };
let k = self.len - index - 1;
if k < index {
unsafe { self.wrap_copy(self.wrap_add(wrapped_idx, 1), wrapped_idx, k) };
self.len -= 1;
} else {
let old_head = self.head;
self.head = self.to_physical_idx(1);
unsafe { self.wrap_copy(old_head, self.head, index) };
self.len -= 1;
}
elem
}
#[inline]
#[must_use = "use `.truncate()` if you don't need the other half"]
pub fn try_split_off(&mut self, at: usize) -> Result<Self, Error>
where
A: Clone,
{
let len = self.len;
assert!(at <= len, "`at` out of bounds");
let other_len = len - at;
let mut other = VecDeque::try_with_capacity_in(other_len, self.allocator().clone())?;
unsafe {
let (first_half, second_half) = self.as_slices();
let first_len = first_half.len();
let second_len = second_half.len();
if at < first_len {
let amount_in_first = first_len - at;
ptr::copy_nonoverlapping(first_half.as_ptr().add(at), other.ptr(), amount_in_first);
ptr::copy_nonoverlapping(
second_half.as_ptr(),
other.ptr().add(amount_in_first),
second_len,
);
} else {
let offset = at - first_len;
let amount_in_second = second_len - offset;
ptr::copy_nonoverlapping(
second_half.as_ptr().add(offset),
other.ptr(),
amount_in_second,
);
}
}
self.len = at;
other.len = other_len;
Ok(other)
}
#[inline]
pub fn try_append(&mut self, other: &mut Self) -> Result<(), Error> {
if T::IS_ZST {
self.len = self
.len
.checked_add(other.len)
.ok_or(Error::CapacityOverflow)?;
other.len = 0;
other.head = 0;
return Ok(());
}
self.try_reserve(other.len)?;
unsafe {
let (left, right) = other.as_slices();
self.copy_slice(self.to_physical_idx(self.len), left);
self.copy_slice(self.to_physical_idx(self.len + left.len()), right);
}
self.len += other.len;
other.len = 0;
other.head = 0;
Ok(())
}
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&T) -> bool,
{
self.retain_mut(|elem| f(elem));
}
pub fn retain_mut<F>(&mut self, mut f: F)
where
F: FnMut(&mut T) -> bool,
{
let len = self.len;
let mut idx = 0;
let mut cur = 0;
while cur < len {
if !f(&mut self[cur]) {
cur += 1;
break;
}
cur += 1;
idx += 1;
}
while cur < len {
if !f(&mut self[cur]) {
cur += 1;
continue;
}
self.swap(idx, cur);
cur += 1;
idx += 1;
}
if cur != idx {
self.truncate(idx);
}
}
#[inline(never)]
fn try_grow(&mut self) -> Result<(), Error> {
debug_assert!(self.is_full());
let old_cap = self.capacity();
self.buf.try_reserve_for_push(old_cap)?;
unsafe {
self.handle_capacity_increase(old_cap);
}
debug_assert!(!self.is_full());
Ok(())
}
pub fn try_resize_with(
&mut self,
new_len: usize,
mut generator: impl FnMut() -> T,
) -> Result<(), Error> {
let len = self.len;
if new_len > len {
for _ in 0..new_len - len {
self.try_push_back(generator())?;
}
} else {
self.truncate(new_len);
}
Ok(())
}
pub fn make_contiguous(&mut self) -> &mut [T] {
if T::IS_ZST {
self.head = 0;
}
if self.is_contiguous() {
unsafe { return slice::from_raw_parts_mut(self.ptr().add(self.head), self.len) }
}
let &mut Self { head, len, .. } = self;
let ptr = self.ptr();
let cap = self.capacity();
let free = cap - len;
let head_len = cap - head;
let tail = len - head_len;
let tail_len = tail;
if free >= head_len {
unsafe {
self.copy(0, head_len, tail_len);
self.copy_nonoverlapping(head, 0, head_len);
}
self.head = 0;
} else if free >= tail_len {
unsafe {
self.copy(head, tail, head_len);
self.copy_nonoverlapping(0, tail + head_len, tail_len);
}
self.head = tail;
} else {
if head_len > tail_len {
unsafe {
if free != 0 {
self.copy(0, free, tail_len);
}
let slice = &mut *self.buffer_range(free..self.capacity());
slice.rotate_left(tail_len);
self.head = free;
}
} else {
unsafe {
if free != 0 {
self.copy(self.head, tail_len, head_len);
}
let slice = &mut *self.buffer_range(0..self.len);
slice.rotate_right(head_len);
self.head = 0;
}
}
}
unsafe { slice::from_raw_parts_mut(ptr.add(self.head), self.len) }
}
pub fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
let k = self.len - mid;
if mid <= k {
unsafe { self.rotate_left_inner(mid) }
} else {
unsafe { self.rotate_right_inner(k) }
}
}
pub fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
let mid = self.len - k;
if k <= mid {
unsafe { self.rotate_right_inner(k) }
} else {
unsafe { self.rotate_left_inner(mid) }
}
}
unsafe fn rotate_left_inner(&mut self, mid: usize) {
debug_assert!(mid * 2 <= self.len());
unsafe {
self.wrap_copy(self.head, self.to_physical_idx(self.len), mid);
}
self.head = self.to_physical_idx(mid);
}
unsafe fn rotate_right_inner(&mut self, k: usize) {
debug_assert!(k * 2 <= self.len());
self.head = self.wrap_sub(self.head, k);
unsafe {
self.wrap_copy(self.to_physical_idx(self.len), self.head, k);
}
}
#[inline]
pub fn binary_search(&self, x: &T) -> Result<usize, usize>
where
T: Ord,
{
self.binary_search_by(|e| e.cmp(x))
}
pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> Ordering,
{
let (front, back) = self.as_slices();
let cmp_back = back.first().map(|elem| f(elem));
if let Some(Ordering::Equal) = cmp_back {
Ok(front.len())
} else if let Some(Ordering::Less) = cmp_back {
back.binary_search_by(f)
.map(|idx| idx + front.len())
.map_err(|idx| idx + front.len())
} else {
front.binary_search_by(f)
}
}
#[inline]
pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> B,
B: Ord,
{
self.binary_search_by(|k| f(k).cmp(b))
}
pub fn partition_point<P>(&self, mut pred: P) -> usize
where
P: FnMut(&T) -> bool,
{
let (front, back) = self.as_slices();
if let Some(true) = back.first().map(|v| pred(v)) {
back.partition_point(pred) + front.len()
} else {
front.partition_point(pred)
}
}
}
impl<T: TryClone, A: Allocator> VecDeque<T, A> {
pub fn try_resize(&mut self, new_len: usize, value: T) -> Result<(), Error> {
if new_len > self.len() {
let extra = new_len - self.len();
for _ in 0..extra {
self.try_push_back(value.try_clone()?)?;
}
} else {
self.truncate(new_len);
}
Ok(())
}
}
#[inline]
fn wrap_index(logical_index: usize, capacity: usize) -> usize {
debug_assert!(
(logical_index == 0 && capacity == 0)
|| logical_index < capacity
|| (logical_index - capacity) < capacity
);
if logical_index >= capacity {
logical_index - capacity
} else {
logical_index
}
}
impl<T: PartialEq, A: Allocator> PartialEq for VecDeque<T, A> {
fn eq(&self, other: &Self) -> bool {
if self.len != other.len() {
return false;
}
let (sa, sb) = self.as_slices();
let (oa, ob) = other.as_slices();
if sa.len() == oa.len() {
sa == oa && sb == ob
} else if sa.len() < oa.len() {
let front = sa.len();
let mid = oa.len() - front;
let (oa_front, oa_mid) = oa.split_at(front);
let (sb_mid, sb_back) = sb.split_at(mid);
debug_assert_eq!(sa.len(), oa_front.len());
debug_assert_eq!(sb_mid.len(), oa_mid.len());
debug_assert_eq!(sb_back.len(), ob.len());
sa == oa_front && sb_mid == oa_mid && sb_back == ob
} else {
let front = oa.len();
let mid = sa.len() - front;
let (sa_front, sa_mid) = sa.split_at(front);
let (ob_mid, ob_back) = ob.split_at(mid);
debug_assert_eq!(sa_front.len(), oa.len());
debug_assert_eq!(sa_mid.len(), ob_mid.len());
debug_assert_eq!(sb.len(), ob_back.len());
sa_front == oa && sa_mid == ob_mid && sb == ob_back
}
}
}
impl<T: Eq, A: Allocator> Eq for VecDeque<T, A> {}
__impl_slice_eq1! { [] VecDeque<T, A>, Vec<U, A>, }
__impl_slice_eq1! { [] VecDeque<T, A>, &[U], }
__impl_slice_eq1! { [] VecDeque<T, A>, &mut [U], }
__impl_slice_eq1! { [const N: usize] VecDeque<T, A>, [U; N], }
__impl_slice_eq1! { [const N: usize] VecDeque<T, A>, &[U; N], }
__impl_slice_eq1! { [const N: usize] VecDeque<T, A>, &mut [U; N], }
impl<T: PartialOrd, A: Allocator> PartialOrd for VecDeque<T, A> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.iter().partial_cmp(other.iter())
}
}
impl<T: Ord, A: Allocator> Ord for VecDeque<T, A> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.iter().cmp(other.iter())
}
}
impl<T: Hash, A: Allocator> Hash for VecDeque<T, A> {
fn hash<H: Hasher>(&self, state: &mut H) {
state.write_usize(self.len);
self.iter().for_each(|elem| elem.hash(state));
}
}
impl<T, A: Allocator> Index<usize> for VecDeque<T, A> {
type Output = T;
#[inline]
fn index(&self, index: usize) -> &T {
self.get(index).expect("Out of bounds access")
}
}
impl<T, A: Allocator> IndexMut<usize> for VecDeque<T, A> {
#[inline]
fn index_mut(&mut self, index: usize) -> &mut T {
self.get_mut(index).expect("Out of bounds access")
}
}
impl<T, A: Allocator> IntoIterator for VecDeque<T, A> {
type Item = T;
type IntoIter = IntoIter<T, A>;
fn into_iter(self) -> IntoIter<T, A> {
IntoIter::new(self)
}
}
impl<'a, T, A: Allocator> IntoIterator for &'a VecDeque<T, A> {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
impl<'a, T, A: Allocator> IntoIterator for &'a mut VecDeque<T, A> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
impl<T: fmt::Debug, A: Allocator> fmt::Debug for VecDeque<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
impl<T, A: Allocator> From<Vec<T, A>> for VecDeque<T, A> {
#[inline]
fn from(other: Vec<T, A>) -> Self {
let (buf, len) = other.into_raw_vec();
Self { head: 0, len, buf }
}
}
impl<T, A: Allocator> From<VecDeque<T, A>> for Vec<T, A> {
fn from(mut other: VecDeque<T, A>) -> Self {
other.make_contiguous();
unsafe {
let other = ManuallyDrop::new(other);
let buf = other.buf.ptr();
let len = other.len();
let cap = other.capacity();
let alloc = ptr::read(other.allocator());
if other.head != 0 {
ptr::copy(buf.add(other.head), buf, len);
}
Vec::from_raw_parts_in(buf, len, cap, alloc)
}
}
}
impl<T, const N: usize> TryFrom<[T; N]> for VecDeque<T> {
type Error = Error;
fn try_from(arr: [T; N]) -> Result<Self, Self::Error> {
Ok(VecDeque::from(Vec::try_from(arr)?))
}
}
impl<T, A: Allocator> TryFromIteratorIn<T, A> for VecDeque<T, A> {
fn try_from_iter_in<I>(iter: I, alloc: A) -> Result<Self, Error>
where
I: IntoIterator<Item = T>,
{
let mut this = VecDeque::new_in(alloc);
this.try_extend(iter)?;
Ok(this)
}
}
impl<T, A: Allocator> TryExtend<T> for VecDeque<T, A> {
#[inline]
fn try_extend<I: IntoIterator<Item = T>>(&mut self, iter: I) -> Result<(), Error> {
for value in iter {
self.try_push_back(value)?;
}
Ok(())
}
}