#![no_std]
#![warn(clippy::pedantic, missing_docs)]
extern crate alloc;
mod r#impl;
mod as_mut;
mod as_ref;
mod borrow;
mod clone;
mod debug;
mod default;
mod deref;
mod drop;
mod eq;
mod extend;
mod from;
mod from_iterator;
mod hash;
mod index;
mod into_iterator;
mod ord;
mod partial_eq;
#[cfg(feature = "serde")]
mod serde;
use crate::r#impl::drain::make_drain_iterator;
use crate::r#impl::drain_filter::make_drain_filter_iterator;
use crate::r#impl::helpers::{make_layout, max_align, next_aligned, next_capacity};
use crate::r#impl::splice::make_splice_iterator;
pub use crate::r#impl::{Drain, DrainFilter, IntoIter, Splice};
#[repr(transparent)]
pub struct MiniVec<T> {
buf: core::ptr::NonNull<u8>,
phantom: core::marker::PhantomData<T>,
}
#[derive(core::fmt::Debug)]
pub enum LayoutErr {
AlignmentTooSmall,
AlignmentNotDivisibleByTwo,
}
#[derive(Clone, Copy)]
struct Header {
len: usize,
cap: usize,
alignment: usize,
}
#[test]
#[allow(clippy::clone_on_copy)]
fn header_clone() {
let header = Header {
len: 0,
cap: 0,
alignment: 0,
};
let header2 = header.clone();
assert_eq!(header2.len, header.len);
assert_eq!(header2.cap, header.cap);
assert_eq!(header2.alignment, header.alignment);
}
static DEFAULT_U8: u8 = 137;
impl<T> MiniVec<T> {
#[allow(clippy::cast_ptr_alignment)]
fn is_default(&self) -> bool {
core::ptr::eq(self.buf.as_ptr(), &DEFAULT_U8)
}
fn header(&self) -> &Header {
#[allow(clippy::cast_ptr_alignment)]
unsafe {
&*(self.buf.as_ptr() as *const Header)
}
}
fn header_mut(&mut self) -> &mut Header {
#[allow(clippy::cast_ptr_alignment)]
unsafe {
&mut *self.buf.as_ptr().cast::<Header>()
}
}
fn data(&self) -> *mut T {
debug_assert!(!self.is_default());
let count = next_aligned(core::mem::size_of::<Header>(), self.alignment());
unsafe { self.buf.as_ptr().add(count).cast::<T>() }
}
fn alignment(&self) -> usize {
if self.capacity() == 0 {
max_align::<T>()
} else {
self.header().alignment
}
}
fn grow(&mut self, capacity: usize, alignment: usize) {
debug_assert!(capacity >= self.len());
let old_capacity = self.capacity();
let new_capacity = capacity;
if new_capacity == old_capacity {
return;
}
let new_layout = make_layout::<T>(new_capacity, alignment);
let len = self.len();
let new_buf = if self.is_default() {
unsafe { alloc::alloc::alloc(new_layout) }
} else {
let old_layout = make_layout::<T>(old_capacity, alignment);
unsafe { alloc::alloc::realloc(self.buf.as_ptr(), old_layout, new_layout.size()) }
};
if new_buf.is_null() {
alloc::alloc::handle_alloc_error(new_layout);
}
let header = Header {
len,
cap: new_capacity,
alignment,
};
#[allow(clippy::cast_ptr_alignment)]
unsafe {
core::ptr::write(new_buf.cast::<Header>(), header);
}
self.buf = unsafe { core::ptr::NonNull::<u8>::new_unchecked(new_buf) };
}
pub fn append(&mut self, other: &mut MiniVec<T>) {
if other.is_empty() {
return;
}
let other_len = other.len();
self.reserve(other_len);
unsafe {
core::ptr::copy_nonoverlapping(other.as_ptr(), self.as_mut_ptr().add(self.len()), other_len);
};
unsafe {
other.set_len(0);
self.set_len(self.len() + other_len);
};
}
pub fn as_mut_ptr(&mut self) -> *mut T {
if self.is_default() {
return core::ptr::null_mut();
}
self.data()
}
pub fn as_mut_slice(&mut self) -> &mut [T] {
self
}
#[must_use]
pub fn as_ptr(&self) -> *const T {
if self.is_default() {
return core::ptr::null();
}
self.data()
}
#[must_use]
pub fn as_slice(&self) -> &[T] {
self
}
#[must_use]
pub fn capacity(&self) -> usize {
if self.is_default() {
0
} else {
self.header().cap
}
}
pub fn clear(&mut self) {
self.truncate(0);
}
pub fn dedup(&mut self)
where
T: PartialEq,
{
self.dedup_by(|x, y| x == y);
}
pub fn dedup_by<F>(&mut self, mut pred: F)
where
F: FnMut(&mut T, &mut T) -> bool,
{
let len = self.len();
if len < 2 {
return;
}
let data = self.as_mut_ptr();
let mut read = unsafe { data.add(1) };
let mut write = read;
let last = unsafe { data.add(len) };
while read < last {
let matches = unsafe { pred(&mut *read, &mut *write.sub(1)) };
if !matches {
if read != write {
unsafe {
core::mem::swap(&mut *read, &mut *write);
}
}
write = unsafe { write.add(1) };
}
read = unsafe { read.add(1) };
}
self.truncate((write as usize - data as usize) / core::mem::size_of::<T>());
}
pub fn dedup_by_key<F, K>(&mut self, mut key: F)
where
F: FnMut(&mut T) -> K,
K: PartialEq<K>,
{
self.dedup_by(|a, b| key(a) == key(b));
}
pub fn drain<R>(&mut self, range: R) -> Drain<T>
where
R: core::ops::RangeBounds<usize>,
{
let len = self.len();
let start_idx = match range.start_bound() {
core::ops::Bound::Included(&n) => n,
core::ops::Bound::Excluded(&n) => {
n.checked_add(1).expect("Start idx exceeded numeric limits")
}
core::ops::Bound::Unbounded => 0,
};
let end_idx = match range.end_bound() {
core::ops::Bound::Included(&n) => n.checked_add(1).expect("End idx exceeded numeric limits"),
core::ops::Bound::Excluded(&n) => n,
core::ops::Bound::Unbounded => len,
};
if start_idx > end_idx {
panic!(
"start drain index (is {}) should be <= end drain index (is {})",
start_idx, end_idx
);
}
if end_idx > len {
panic!(
"end drain index (is {}) should be <= len (is {})",
end_idx, len
);
}
let data = self.as_mut_ptr();
if !data.is_null() {
unsafe {
self.set_len(start_idx);
}
}
make_drain_iterator(self, data, len - end_idx, start_idx, end_idx)
}
pub fn drain_filter<F>(&mut self, pred: F) -> DrainFilter<'_, T, F>
where
F: core::ops::FnMut(&mut T) -> bool,
{
make_drain_filter_iterator(self, pred)
}
#[inline]
#[must_use]
pub fn drain_vec(&mut self) -> Self {
let mut result = Self::new();
core::mem::swap(&mut result, self);
result
}
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn from_raw_part(ptr: *mut T) -> MiniVec<T> {
debug_assert!(!ptr.is_null());
let header_size = core::mem::size_of::<Header>();
let aligned = next_aligned(header_size, core::mem::align_of::<T>());
let p = ptr.cast::<u8>();
let buf = p.sub(aligned);
MiniVec {
buf: core::ptr::NonNull::<u8>::new_unchecked(buf),
phantom: core::marker::PhantomData,
}
}
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> MiniVec<T> {
debug_assert!(!ptr.is_null());
let header_size = core::mem::size_of::<Header>();
let aligned = next_aligned(header_size, core::mem::align_of::<T>());
let p = ptr.cast::<u8>();
let buf = p.sub(aligned);
debug_assert!((*buf.cast::<Header>()).len == length);
debug_assert!((*buf.cast::<Header>()).cap == capacity);
MiniVec {
buf: core::ptr::NonNull::<u8>::new_unchecked(buf),
phantom: core::marker::PhantomData,
}
}
pub fn insert(&mut self, index: usize, element: T) {
let len = self.len();
if index > len {
panic!(
"insertion index (is {}) should be <= len (is {})",
index, len
);
}
if len == self.capacity() {
self.reserve(1);
}
let p = unsafe { self.as_mut_ptr().add(index) };
unsafe {
core::ptr::copy(p, p.add(1), len - index);
core::ptr::write(p, element);
self.set_len(len + 1);
}
}
#[must_use]
pub fn into_raw_parts(self) -> (*mut T, usize, usize) {
let mut v = core::mem::ManuallyDrop::new(self);
(v.as_mut_ptr(), v.len(), v.capacity())
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[must_use]
pub fn leak<'a>(vec: MiniVec<T>) -> &'a mut [T]
where
T: 'a,
{
let len = vec.len();
let mut vec = core::mem::ManuallyDrop::new(vec);
let vec: &mut MiniVec<T> = &mut *vec;
unsafe { core::slice::from_raw_parts_mut(vec.as_mut_ptr(), len) }
}
#[must_use]
pub fn len(&self) -> usize {
if self.is_default() {
0
} else {
self.header().len
}
}
#[must_use]
#[allow(clippy::ptr_as_ptr)]
pub fn new() -> MiniVec<T> {
assert!(
core::mem::size_of::<T>() > 0,
"ZSTs currently not supported"
);
let buf =
unsafe { core::ptr::NonNull::<u8>::new_unchecked(&DEFAULT_U8 as *const u8 as *mut u8) };
MiniVec {
buf,
phantom: core::marker::PhantomData,
}
}
pub fn pop(&mut self) -> Option<T> {
let len = self.len();
if len == 0 {
return None;
}
let v = unsafe { core::ptr::read(self.as_ptr().add(len - 1)) };
unsafe {
self.set_len(len - 1);
}
Some(v)
}
pub fn push(&mut self, value: T) -> &mut T {
let (len, capacity, alignment) = (self.len(), self.capacity(), self.alignment());
if len == capacity {
self.grow(next_capacity::<T>(capacity), alignment);
}
let len = self.len();
let data = self.data();
let dst = unsafe { data.add(len) };
unsafe {
core::ptr::write(dst, value);
};
let mut header = self.header_mut();
header.len += 1;
unsafe { &mut *dst }
}
pub fn remove(&mut self, index: usize) -> T {
let len = self.len();
if index >= len {
panic!("removal index (is {}) should be < len (is {})", index, len);
}
unsafe {
let p = self.as_mut_ptr().add(index);
let x = core::ptr::read(p);
let src = p.add(1);
let dst = p;
let count = len - index - 1;
core::ptr::copy(src, dst, count);
self.set_len(len - 1);
x
}
}
pub fn remove_item<V>(&mut self, item: &V) -> Option<T>
where
T: PartialEq<V>,
{
let len = self.len();
for i in 0..len {
if self[i] == *item {
return Some(self.remove(i));
}
}
None
}
pub fn reserve(&mut self, additional: usize) {
let capacity = self.capacity();
let total_required = self.len() + additional;
if total_required <= capacity {
return;
}
let mut new_capacity = next_capacity::<T>(capacity);
while new_capacity < total_required {
new_capacity = next_capacity::<T>(new_capacity);
}
self.grow(new_capacity, self.alignment());
}
pub fn reserve_exact(&mut self, additional: usize) {
let capacity = self.capacity();
let len = self.len();
let total_required = len + additional;
if capacity >= total_required {
return;
}
self.grow(total_required, self.alignment());
}
pub fn resize(&mut self, new_len: usize, value: T)
where
T: Clone,
{
let len = self.len();
match new_len.cmp(&len) {
core::cmp::Ordering::Equal => {}
core::cmp::Ordering::Greater => {
let num_elems = new_len - len;
self.reserve(num_elems);
for _i in 0..num_elems {
self.push(value.clone());
}
}
core::cmp::Ordering::Less => {
self.truncate(new_len);
}
}
}
pub fn resize_with<F>(&mut self, new_len: usize, mut f: F)
where
F: FnMut() -> T,
{
use core::cmp::Ordering::{Equal, Greater, Less};
let len = self.len();
match new_len.cmp(&len) {
Equal => {}
Greater => {
let num_elems = new_len - len;
self.reserve(num_elems);
for _i in 0..num_elems {
self.push(f());
}
}
Less => {
self.truncate(new_len);
}
}
}
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&T) -> bool,
{
let len = self.len();
let data = self.as_mut_ptr();
let mut read = data;
let mut write = read;
let last = unsafe { data.add(len) };
while read < last {
let should_retain = unsafe { f(&mut *read) };
if should_retain {
if read != write {
unsafe {
core::mem::swap(&mut *read, &mut *write);
}
}
write = unsafe { write.add(1) };
}
read = unsafe { read.add(1) };
}
self.truncate((write as usize - data as usize) / core::mem::size_of::<T>());
}
pub unsafe fn set_len(&mut self, len: usize) {
self.header_mut().len = len;
}
pub fn shrink_to(&mut self, min_capacity: usize) {
let (len, capacity) = (self.len(), self.capacity());
if min_capacity < len {
self.shrink_to_fit();
return;
}
if capacity == min_capacity {
return;
}
if capacity < min_capacity {
panic!("Tried to shrink to a larger capacity");
}
self.grow(min_capacity, self.alignment());
}
pub fn shrink_to_fit(&mut self) {
let len = self.len();
if len == self.capacity() {
return;
}
let capacity = len;
self.grow(capacity, self.alignment());
}
pub fn spare_capacity_mut(&mut self) -> &mut [core::mem::MaybeUninit<T>] {
let capacity = self.capacity();
if capacity == 0 {
return &mut [];
}
let len = self.len();
let data = unsafe { self.data().add(len).cast::<core::mem::MaybeUninit<T>>() };
let spare_len = capacity - len;
unsafe { core::slice::from_raw_parts_mut(data, spare_len) }
}
pub fn splice<R, I>(&mut self, range: R, replace_with: I) -> Splice<<I as IntoIterator>::IntoIter>
where
I: IntoIterator<Item = T>,
R: core::ops::RangeBounds<usize>,
{
let len = self.len();
let start_idx = match range.start_bound() {
core::ops::Bound::Included(&n) => n,
core::ops::Bound::Excluded(&n) => {
n.checked_add(1).expect("Start idx exceeded numeric limits")
}
core::ops::Bound::Unbounded => 0,
};
let end_idx = match range.end_bound() {
core::ops::Bound::Included(&n) => n.checked_add(1).expect("End idx exceeded numeric limits"),
core::ops::Bound::Excluded(&n) => n,
core::ops::Bound::Unbounded => len,
};
if start_idx > end_idx {
panic!(
"start splice index (is {}) should be <= end splice index (is {})",
start_idx, end_idx
);
}
if end_idx > len {
panic!(
"end splice index (is {}) should be <= len (is {})",
end_idx, len
);
}
let data = self.as_mut_ptr();
if !data.is_null() {
unsafe {
self.set_len(start_idx);
}
}
make_splice_iterator(
self,
data,
len - end_idx,
start_idx,
end_idx,
replace_with.into_iter(),
)
}
pub fn split_at_spare_mut(&mut self) -> (&mut [T], &mut [core::mem::MaybeUninit<T>]) {
let capacity = self.capacity();
if capacity == 0 {
return (&mut [], &mut []);
}
let (p, len) = (self.as_mut_ptr(), self.len());
let init = unsafe { core::slice::from_raw_parts_mut(p, len) };
let uninit = unsafe {
core::slice::from_raw_parts_mut(
p.add(len).cast::<core::mem::MaybeUninit<T>>(),
capacity - len,
)
};
(init, uninit)
}
#[allow(clippy::ptr_as_ptr)]
pub fn split_off(&mut self, at: usize) -> MiniVec<T> {
let len = self.len();
if at > len {
panic!("`at` split index (is {}) should be <= len (is {})", at, len);
}
if len == 0 {
let other = if self.capacity() > 0 {
MiniVec::with_capacity(self.capacity())
} else {
MiniVec::new()
};
return other;
}
if at == 0 {
let orig_cap = self.capacity();
let other = MiniVec {
buf: self.buf,
phantom: core::marker::PhantomData,
};
self.buf =
unsafe { core::ptr::NonNull::<u8>::new_unchecked(&DEFAULT_U8 as *const u8 as *mut u8) };
self.reserve_exact(orig_cap);
return other;
}
let mut other = MiniVec::with_capacity(self.capacity());
unsafe {
self.set_len(at);
other.set_len(len - at);
}
let src = unsafe { self.as_ptr().add(at) };
let dst = other.as_mut_ptr();
let count = len - at;
unsafe {
core::ptr::copy_nonoverlapping(src, dst, count);
}
other
}
pub fn swap_remove(&mut self, index: usize) -> T {
let len = self.len();
if index >= len {
panic!(
"swap_remove index (is {}) should be < len (is {})",
index, len
);
}
let src = unsafe { core::ptr::read(self.as_ptr().add(len - 1)) };
self.header_mut().len -= 1;
let dst = unsafe { self.as_mut_ptr().add(index) };
unsafe { core::ptr::replace(dst, src) }
}
pub fn truncate(&mut self, len: usize) {
let self_len = self.len();
if len >= self_len {
return;
}
self.header_mut().len = len;
if !core::mem::needs_drop::<T>() {
return;
}
let s = unsafe { core::slice::from_raw_parts_mut(self.data().add(len), self_len - len) };
unsafe {
core::ptr::drop_in_place(s);
}
}
pub fn with_alignment(capacity: usize, alignment: usize) -> Result<MiniVec<T>, LayoutErr> {
if alignment < max_align::<T>() {
return Err(LayoutErr::AlignmentTooSmall);
}
if alignment % 2 > 0 {
return Err(LayoutErr::AlignmentNotDivisibleByTwo);
}
let mut v = MiniVec::new();
v.grow(capacity, alignment);
Ok(v)
}
#[must_use]
pub fn with_capacity(capacity: usize) -> MiniVec<T> {
let mut v = MiniVec::new();
v.reserve_exact(capacity);
v
}
#[doc(hidden)]
pub unsafe fn unsafe_write(&mut self, idx: usize, elem: T) {
self.data().add(idx).write(elem);
}
}
impl<T: Clone> MiniVec<T> {
pub fn extend_from_slice(&mut self, elems: &[T]) {
self.reserve(elems.len());
for x in elems {
self.push((*x).clone());
}
}
pub fn extend_from_within<Range>(&mut self, range: Range)
where
Range: core::ops::RangeBounds<usize>,
{
struct PanicGuard<'a, T>
where
T: Clone,
{
count: usize,
start_idx: usize,
end_idx: usize,
vec: &'a mut MiniVec<T>,
}
impl<'a, T> Drop for PanicGuard<'a, T>
where
T: Clone,
{
fn drop(&mut self) {
unsafe {
self.vec.set_len(self.vec.len() + self.count);
}
}
}
impl<'a, 'b, T> PanicGuard<'a, T>
where
T: Clone,
{
fn extend(&mut self) {
let count = &mut self.count;
let (init, uninit) = self.vec.split_at_spare_mut();
init[self.start_idx..self.end_idx]
.iter()
.cloned()
.zip(uninit.iter_mut())
.for_each(|(val, p)| {
*p = core::mem::MaybeUninit::new(val);
*count += 1;
});
}
}
let len = self.len();
let start_idx = match range.start_bound() {
core::ops::Bound::Included(&n) => n,
core::ops::Bound::Excluded(&n) => {
n.checked_add(1).expect("Start idx exceeded numeric limits")
}
core::ops::Bound::Unbounded => 0,
};
let end_idx = match range.end_bound() {
core::ops::Bound::Included(&n) => n.checked_add(1).expect("End idx exceeded numeric limits"),
core::ops::Bound::Excluded(&n) => n,
core::ops::Bound::Unbounded => len,
};
if start_idx > end_idx {
panic!(
"start extend_from_within index (is {}) should be <= end (is {})",
start_idx, end_idx
);
}
if end_idx > len {
panic!(
"end extend_from_within index (is {}) should be <= len (is {})",
end_idx, len
);
}
if len == 0 {
return;
}
self.reserve(end_idx - start_idx);
let mut guard = PanicGuard {
count: 0,
start_idx,
end_idx,
vec: self,
};
guard.extend();
}
}
impl<T> MiniVec<core::mem::MaybeUninit<T>> {
#[must_use]
pub unsafe fn assume_minivec_init(self) -> MiniVec<T> {
let (ptr, len, cap) = self.into_raw_parts();
MiniVec::<T>::from_raw_parts(ptr.cast::<T>(), len, cap)
}
}
unsafe impl<T: core::marker::Send> core::marker::Send for MiniVec<T> {}
unsafe impl<T: core::marker::Sync> core::marker::Sync for MiniVec<T> {}
#[macro_export]
macro_rules! mini_vec {
() => (
$crate::MiniVec::new()
);
($elem:expr; $n:expr) => {
{
let len = $n;
let mut tmp = $crate::MiniVec::with_capacity(len);
for idx in 0..len {
unsafe { tmp.unsafe_write(idx, $elem.clone()) };
}
if len > 0 {
unsafe { tmp.set_len(len) };
}
tmp
}
};
($($x:expr),+ $(,)?) => {
{
let mut tmp = $crate::MiniVec::new();
$(
tmp.push($x);
)*
tmp
}
};
}