#![allow(unsafe_code)]
#![warn(missing_docs)]
use core::fmt::Debug;
use core::iter::FromIterator;
use core::mem::MaybeUninit;
use core::ops::Deref;
use core::ptr::NonNull;
use atomic_polyfill as atomic;
#[repr(C)]
struct SharedVectorHeader {
    refcount: atomic::AtomicIsize,
    size: usize,
    capacity: usize,
}
#[repr(C)]
struct SharedVectorInner<T> {
    header: SharedVectorHeader,
    data: MaybeUninit<T>,
}
fn compute_inner_layout<T>(capacity: usize) -> core::alloc::Layout {
    core::alloc::Layout::new::<SharedVectorHeader>()
        .extend(core::alloc::Layout::array::<T>(capacity).unwrap())
        .unwrap()
        .0
}
unsafe fn drop_inner<T>(mut inner: NonNull<SharedVectorInner<T>>) {
    debug_assert_eq!(inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed), 0);
    let data_ptr = inner.as_mut().data.as_mut_ptr();
    for x in 0..inner.as_ref().header.size {
        core::ptr::drop_in_place(data_ptr.add(x));
    }
    alloc::alloc::dealloc(
        inner.as_ptr() as *mut u8,
        compute_inner_layout::<T>(inner.as_ref().header.capacity),
    )
}
fn alloc_with_capacity<T>(capacity: usize) -> NonNull<SharedVectorInner<T>> {
    let ptr = unsafe { ::alloc::alloc::alloc(compute_inner_layout::<T>(capacity)) };
    assert!(!ptr.is_null(), "allocation of {:?} bytes failed", capacity);
    unsafe {
        core::ptr::write(
            ptr as *mut SharedVectorHeader,
            SharedVectorHeader { refcount: 1.into(), size: 0, capacity },
        );
    }
    NonNull::new(ptr).unwrap().cast()
}
fn capacity_for_grow(current_cap: usize, required_cap: usize, elem_size: usize) -> usize {
    if current_cap >= required_cap {
        return current_cap;
    }
    let cap = core::cmp::max(current_cap * 2, required_cap);
    let min_non_zero_cap = if elem_size == 1 {
        8
    } else if elem_size <= 1024 {
        4
    } else {
        1
    };
    core::cmp::max(min_non_zero_cap, cap)
}
#[repr(C)]
pub struct SharedVector<T> {
    inner: NonNull<SharedVectorInner<T>>,
}
unsafe impl<T: Send> Send for SharedVector<T> {}
impl<T> Drop for SharedVector<T> {
    fn drop(&mut self) {
        unsafe {
            if self
                .inner
                .cast::<SharedVectorHeader>()
                .as_ref()
                .refcount
                .load(atomic::Ordering::Relaxed)
                < 0
            {
                return;
            }
            if self.inner.as_ref().header.refcount.fetch_sub(1, atomic::Ordering::SeqCst) == 1 {
                drop_inner(self.inner)
            }
        }
    }
}
impl<T> Clone for SharedVector<T> {
    fn clone(&self) -> Self {
        unsafe {
            if self
                .inner
                .cast::<SharedVectorHeader>()
                .as_ref()
                .refcount
                .load(atomic::Ordering::Relaxed)
                > 0
            {
                self.inner.as_ref().header.refcount.fetch_add(1, atomic::Ordering::SeqCst);
            }
            SharedVector { inner: self.inner }
        }
    }
}
impl<T> SharedVector<T> {
    pub fn with_capacity(capacity: usize) -> Self {
        Self { inner: alloc_with_capacity(capacity) }
    }
    fn as_ptr(&self) -> *const T {
        unsafe { self.inner.as_ref().data.as_ptr() }
    }
    pub fn len(&self) -> usize {
        unsafe { self.inner.cast::<SharedVectorHeader>().as_ref().size }
    }
    pub fn is_empty(&self) -> bool {
        self.len() == 0
    }
    pub fn as_slice(&self) -> &[T] {
        if self.is_empty() {
            &[]
        } else {
            unsafe { core::slice::from_raw_parts(self.as_ptr(), self.len()) }
        }
    }
    fn capacity(&self) -> usize {
        unsafe { self.inner.cast::<SharedVectorHeader>().as_ref().capacity }
    }
}
impl<T: Clone> SharedVector<T> {
    pub fn from_slice(slice: &[T]) -> SharedVector<T> {
        Self::from(slice)
    }
    fn detach(&mut self, new_capacity: usize) {
        let is_shared =
            unsafe { self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) } != 1;
        if !is_shared && new_capacity <= self.capacity() {
            return;
        }
        let mut new_array = SharedVector::with_capacity(new_capacity);
        core::mem::swap(&mut self.inner, &mut new_array.inner);
        let mut size = 0;
        for x in new_array.into_iter() {
            assert_ne!(size, new_capacity);
            unsafe {
                core::ptr::write(self.inner.as_mut().data.as_mut_ptr().add(size), x);
                size += 1;
                self.inner.as_mut().header.size = size;
            }
            if size == new_capacity {
                break;
            }
        }
    }
    pub fn make_mut_slice(&mut self) -> &mut [T] {
        self.detach(self.len());
        unsafe { core::slice::from_raw_parts_mut(self.as_ptr() as *mut T, self.len()) }
    }
    pub fn push(&mut self, value: T) {
        self.detach(capacity_for_grow(self.capacity(), self.len() + 1, core::mem::size_of::<T>()));
        unsafe {
            core::ptr::write(
                self.inner.as_mut().data.as_mut_ptr().add(self.inner.as_mut().header.size),
                value,
            );
            self.inner.as_mut().header.size += 1;
        }
    }
    pub fn resize(&mut self, new_len: usize, value: T) {
        if self.len() == new_len {
            return;
        }
        self.detach(new_len);
        let inner = unsafe { self.inner.as_mut() };
        if inner.header.size >= new_len {
            self.shrink(new_len);
        } else {
            while inner.header.size < new_len {
                unsafe {
                    core::ptr::write(inner.data.as_mut_ptr().add(inner.header.size), value.clone());
                }
                inner.header.size += 1;
            }
        }
    }
    fn shrink(&mut self, new_len: usize) {
        if self.len() == new_len {
            return;
        }
        assert!(
            unsafe { self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) } == 1
        );
        let inner = unsafe { self.inner.as_mut() };
        while inner.header.size > new_len {
            inner.header.size -= 1;
            unsafe {
                core::ptr::drop_in_place(inner.data.as_mut_ptr().add(inner.header.size));
            }
        }
    }
    pub fn clear(&mut self) {
        let is_shared =
            unsafe { self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) } != 1;
        if is_shared {
            *self = SharedVector::default();
        } else {
            self.shrink(0)
        }
    }
}
impl<T> Deref for SharedVector<T> {
    type Target = [T];
    fn deref(&self) -> &Self::Target {
        self.as_slice()
    }
}
impl<T: Clone> From<&[T]> for SharedVector<T> {
    fn from(slice: &[T]) -> Self {
        let capacity = slice.len();
        let mut result = Self::with_capacity(capacity);
        for x in slice {
            unsafe {
                core::ptr::write(
                    result.inner.as_mut().data.as_mut_ptr().add(result.inner.as_mut().header.size),
                    x.clone(),
                );
                result.inner.as_mut().header.size += 1;
            }
        }
        result
    }
}
macro_rules! from_array {
    ($($n:literal)*) => { $(
        impl<T: Clone> From<[T; $n]> for SharedVector<T> {
            fn from(array: [T; $n]) -> Self {
                array.iter().cloned().collect()
            }
        }
    )+ };
}
from_array! {0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31}
impl<T> FromIterator<T> for SharedVector<T> {
    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
        let mut iter = iter.into_iter();
        let mut capacity = iter.size_hint().0;
        let mut result = Self::with_capacity(capacity);
        let mut size = 0;
        while let Some(x) = iter.next() {
            if size >= capacity {
                capacity = capacity_for_grow(
                    capacity,
                    size + 1 + iter.size_hint().0,
                    core::mem::size_of::<T>(),
                );
                unsafe {
                    result.inner.as_ref().header.refcount.store(0, atomic::Ordering::Relaxed)
                };
                let mut iter = IntoIter(IntoIterInner::UnShared(result.inner, 0));
                result.inner = alloc_with_capacity::<T>(capacity);
                match &mut iter.0 {
                    IntoIterInner::UnShared(old_inner, begin) => {
                        while *begin < size {
                            unsafe {
                                core::ptr::write(
                                    result.inner.as_mut().data.as_mut_ptr().add(*begin),
                                    core::ptr::read(old_inner.as_ref().data.as_ptr().add(*begin)),
                                );
                                *begin += 1;
                                result.inner.as_mut().header.size = *begin;
                            }
                        }
                    }
                    _ => unreachable!(),
                }
            }
            debug_assert_eq!(result.len(), size);
            debug_assert!(result.capacity() > size);
            unsafe {
                core::ptr::write(result.inner.as_mut().data.as_mut_ptr().add(size), x);
                size += 1;
                result.inner.as_mut().header.size = size;
            }
        }
        result
    }
}
impl<T: Clone> Extend<T> for SharedVector<T> {
    fn extend<X: IntoIterator<Item = T>>(&mut self, iter: X) {
        let iter = iter.into_iter();
        let hint = iter.size_hint().0;
        if hint > 0 {
            self.detach(capacity_for_grow(
                self.capacity(),
                self.len() + hint,
                core::mem::size_of::<T>(),
            ));
        }
        for item in iter {
            self.push(item);
        }
    }
}
static SHARED_NULL: SharedVectorHeader =
    SharedVectorHeader { refcount: atomic::AtomicIsize::new(-1), size: 0, capacity: 0 };
impl<T> Default for SharedVector<T> {
    fn default() -> Self {
        SharedVector { inner: NonNull::from(&SHARED_NULL).cast() }
    }
}
impl<T: Debug> Debug for SharedVector<T> {
    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
        self.as_slice().fmt(f)
    }
}
impl<T> AsRef<[T]> for SharedVector<T> {
    #[inline]
    fn as_ref(&self) -> &[T] {
        self.as_slice()
    }
}
impl<T, U> PartialEq<U> for SharedVector<T>
where
    U: ?Sized + AsRef<[T]>,
    T: PartialEq,
{
    fn eq(&self, other: &U) -> bool {
        self.as_slice() == other.as_ref()
    }
}
impl<T: Eq> Eq for SharedVector<T> {}
impl<T: Clone> IntoIterator for SharedVector<T> {
    type Item = T;
    type IntoIter = IntoIter<T>;
    fn into_iter(self) -> Self::IntoIter {
        IntoIter(unsafe {
            if self.inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed) == 1 {
                let inner = self.inner;
                core::mem::forget(self);
                inner.as_ref().header.refcount.store(0, atomic::Ordering::Relaxed);
                IntoIterInner::UnShared(inner, 0)
            } else {
                IntoIterInner::Shared(self, 0)
            }
        })
    }
}
#[cfg(feature = "serde")]
use serde::ser::SerializeSeq;
#[cfg(feature = "serde")]
impl<T> serde::Serialize for SharedVector<T>
where
    T: serde::Serialize,
{
    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
    where
        S: serde::Serializer,
    {
        let mut seq = serializer.serialize_seq(Some(self.len()))?;
        for item in self.into_iter() {
            seq.serialize_element(item)?;
        }
        seq.end()
    }
}
#[cfg(feature = "serde")]
impl<'de, T> serde::Deserialize<'de> for SharedVector<T>
where
    T: Clone + serde::Deserialize<'de>,
{
    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
    where
        D: serde::Deserializer<'de>,
    {
        let mut elements: Vec<T> = serde::Deserialize::deserialize(deserializer)?;
        let mut shared_vec = SharedVector::with_capacity(elements.len());
        for elem in elements.drain(..) {
            shared_vec.push(elem);
        }
        Ok(shared_vec)
    }
}
enum IntoIterInner<T> {
    Shared(SharedVector<T>, usize),
    UnShared(NonNull<SharedVectorInner<T>>, usize),
}
impl<T> Drop for IntoIterInner<T> {
    fn drop(&mut self) {
        match self {
            IntoIterInner::Shared(..) => { }
            IntoIterInner::UnShared(mut inner, begin) => unsafe {
                debug_assert_eq!(inner.as_ref().header.refcount.load(atomic::Ordering::Relaxed), 0);
                let data_ptr = inner.as_mut().data.as_mut_ptr();
                for x in (*begin)..inner.as_ref().header.size {
                    core::ptr::drop_in_place(data_ptr.add(x));
                }
                ::alloc::alloc::dealloc(
                    inner.as_ptr() as *mut u8,
                    compute_inner_layout::<T>(inner.as_ref().header.capacity),
                )
            },
        }
    }
}
pub struct IntoIter<T>(IntoIterInner<T>);
impl<T: Clone> Iterator for IntoIter<T> {
    type Item = T;
    fn next(&mut self) -> Option<Self::Item> {
        match &mut self.0 {
            IntoIterInner::Shared(array, moved) => {
                let result = array.as_slice().get(*moved).cloned();
                *moved += 1;
                result
            }
            IntoIterInner::UnShared(inner, begin) => unsafe {
                if *begin < inner.as_ref().header.size {
                    let r = core::ptr::read(inner.as_ref().data.as_ptr().add(*begin));
                    *begin += 1;
                    Some(r)
                } else {
                    None
                }
            },
        }
    }
}
#[test]
fn simple_test() {
    let x: SharedVector<i32> = SharedVector::from([1, 2, 3]);
    let y: SharedVector<i32> = SharedVector::from([3, 2, 1]);
    assert_eq!(x, x.clone());
    assert_ne!(x, y);
    let z: [i32; 3] = [1, 2, 3];
    assert_eq!(z, x.as_slice());
    let vec: Vec<i32> = vec![1, 2, 3];
    assert_eq!(x, vec);
    let def: SharedVector<i32> = Default::default();
    assert_eq!(def, SharedVector::<i32>::default());
    assert_ne!(def, x);
}
#[test]
fn push_test() {
    let mut x: SharedVector<i32> = SharedVector::from([1, 2, 3]);
    let y = x.clone();
    x.push(4);
    x.push(5);
    x.push(6);
    assert_eq!(x.as_slice(), &[1, 2, 3, 4, 5, 6]);
    assert_eq!(y.as_slice(), &[1, 2, 3]);
}
#[test]
#[should_panic]
fn invalid_capacity_test() {
    let _: SharedVector<u8> = SharedVector::with_capacity(usize::MAX / 2 - 1000);
}
#[test]
fn collect_from_iter_with_no_size_hint() {
    struct NoSizeHintIter<'a> {
        data: &'a [&'a str],
        i: usize,
    }
    impl<'a> Iterator for NoSizeHintIter<'a> {
        type Item = String;
        fn next(&mut self) -> Option<Self::Item> {
            if self.i >= self.data.len() {
                return None;
            }
            let item = self.data[self.i];
            self.i += 1;
            Some(item.to_string())
        }
        fn size_hint(&self) -> (usize, Option<usize>) {
            (0, None)
        }
    }
    let input = NoSizeHintIter { data: &["Hello", "sweet", "world", "of", "iterators"], i: 0 };
    let shared_vec: SharedVector<String> = input.collect();
    assert_eq!(shared_vec.as_slice(), &["Hello", "sweet", "world", "of", "iterators"]);
}
#[test]
fn test_capacity_grows_only_when_needed() {
    let mut vec: SharedVector<u8> = SharedVector::with_capacity(2);
    vec.push(0);
    assert_eq!(vec.capacity(), 2);
    vec.push(0);
    assert_eq!(vec.capacity(), 2);
    vec.push(0);
    assert_eq!(vec.len(), 3);
    assert!(vec.capacity() > 2);
}
#[test]
fn test_vector_clear() {
    let mut vec: SharedVector<String> = Default::default();
    vec.clear();
    vec.push("Hello".into());
    vec.push("World".into());
    vec.push("of".into());
    vec.push("Vectors".into());
    let mut copy = vec.clone();
    assert_eq!(vec.len(), 4);
    let orig_cap = vec.capacity();
    assert!(orig_cap >= vec.len());
    vec.clear();
    assert_eq!(vec.len(), 0);
    assert_eq!(vec.capacity(), 0); vec.push("Welcome back".into());
    assert_eq!(vec.len(), 1);
    assert!(vec.capacity() >= vec.len());
    assert_eq!(copy.len(), 4);
    assert_eq!(copy.capacity(), orig_cap);
    copy.clear(); assert_eq!(copy.capacity(), orig_cap);
}
#[cfg(feature = "ffi")]
pub(crate) mod ffi {
    use super::*;
    #[no_mangle]
    pub unsafe extern "C" fn slint_shared_vector_allocate(size: usize, align: usize) -> *mut u8 {
        alloc::alloc::alloc(alloc::alloc::Layout::from_size_align(size, align).unwrap())
    }
    #[no_mangle]
    pub unsafe extern "C" fn slint_shared_vector_free(ptr: *mut u8, size: usize, align: usize) {
        alloc::alloc::dealloc(ptr, alloc::alloc::Layout::from_size_align(size, align).unwrap())
    }
    #[no_mangle]
    pub unsafe extern "C" fn slint_shared_vector_empty() -> *const u8 {
        &SHARED_NULL as *const _ as *const u8
    }
}
#[cfg(feature = "serde")]
#[test]
fn test_serialize_deserialize_sharedvector() {
    let v = SharedVector::from([1, 2, 3]);
    let serialized = serde_json::to_string(&v).unwrap();
    let deserialized: SharedVector<i32> = serde_json::from_str(&serialized).unwrap();
    assert_eq!(v, deserialized);
}