use crate::alloc::Deallocation;
use crate::buffer::Buffer;
use crate::native::ArrowNativeType;
use crate::MutableBuffer;
use std::fmt::Formatter;
use std::marker::PhantomData;
use std::ops::Deref;
#[derive(Clone)]
pub struct ScalarBuffer<T: ArrowNativeType> {
    buffer: Buffer,
    phantom: PhantomData<T>,
}
impl<T: ArrowNativeType> std::fmt::Debug for ScalarBuffer<T> {
    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
        f.debug_tuple("ScalarBuffer").field(&self.as_ref()).finish()
    }
}
impl<T: ArrowNativeType> ScalarBuffer<T> {
    pub fn new(buffer: Buffer, offset: usize, len: usize) -> Self {
        let size = std::mem::size_of::<T>();
        let byte_offset = offset.checked_mul(size).expect("offset overflow");
        let byte_len = len.checked_mul(size).expect("length overflow");
        buffer.slice_with_length(byte_offset, byte_len).into()
    }
    pub fn slice(&self, offset: usize, len: usize) -> Self {
        Self::new(self.buffer.clone(), offset, len)
    }
    pub fn inner(&self) -> &Buffer {
        &self.buffer
    }
    pub fn into_inner(self) -> Buffer {
        self.buffer
    }
    #[inline]
    pub fn ptr_eq(&self, other: &Self) -> bool {
        self.buffer.ptr_eq(&other.buffer)
    }
}
impl<T: ArrowNativeType> Deref for ScalarBuffer<T> {
    type Target = [T];
    #[inline]
    fn deref(&self) -> &Self::Target {
        unsafe {
            std::slice::from_raw_parts(
                self.buffer.as_ptr() as *const T,
                self.buffer.len() / std::mem::size_of::<T>(),
            )
        }
    }
}
impl<T: ArrowNativeType> AsRef<[T]> for ScalarBuffer<T> {
    #[inline]
    fn as_ref(&self) -> &[T] {
        self
    }
}
impl<T: ArrowNativeType> From<MutableBuffer> for ScalarBuffer<T> {
    fn from(value: MutableBuffer) -> Self {
        Buffer::from(value).into()
    }
}
impl<T: ArrowNativeType> From<Buffer> for ScalarBuffer<T> {
    fn from(buffer: Buffer) -> Self {
        let align = std::mem::align_of::<T>();
        let is_aligned = buffer.as_ptr().align_offset(align) == 0;
        match buffer.deallocation() {
            Deallocation::Standard(_) => assert!(
                is_aligned,
                "Memory pointer is not aligned with the specified scalar type"
            ),
            Deallocation::Custom(_) =>
                assert!(is_aligned, "Memory pointer from external source (e.g, FFI) is not aligned with the specified scalar type. Before importing buffer through FFI, please make sure the allocation is aligned."),
        }
        Self {
            buffer,
            phantom: Default::default(),
        }
    }
}
impl<T: ArrowNativeType> From<Vec<T>> for ScalarBuffer<T> {
    fn from(value: Vec<T>) -> Self {
        Self {
            buffer: Buffer::from_vec(value),
            phantom: Default::default(),
        }
    }
}
impl<T: ArrowNativeType> FromIterator<T> for ScalarBuffer<T> {
    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
        iter.into_iter().collect::<Vec<_>>().into()
    }
}
impl<'a, T: ArrowNativeType> IntoIterator for &'a ScalarBuffer<T> {
    type Item = &'a T;
    type IntoIter = std::slice::Iter<'a, T>;
    fn into_iter(self) -> Self::IntoIter {
        self.as_ref().iter()
    }
}
impl<T: ArrowNativeType, S: AsRef<[T]> + ?Sized> PartialEq<S> for ScalarBuffer<T> {
    fn eq(&self, other: &S) -> bool {
        self.as_ref().eq(other.as_ref())
    }
}
impl<T: ArrowNativeType, const N: usize> PartialEq<ScalarBuffer<T>> for [T; N] {
    fn eq(&self, other: &ScalarBuffer<T>) -> bool {
        self.as_ref().eq(other.as_ref())
    }
}
impl<T: ArrowNativeType> PartialEq<ScalarBuffer<T>> for [T] {
    fn eq(&self, other: &ScalarBuffer<T>) -> bool {
        self.as_ref().eq(other.as_ref())
    }
}
impl<T: ArrowNativeType> PartialEq<ScalarBuffer<T>> for Vec<T> {
    fn eq(&self, other: &ScalarBuffer<T>) -> bool {
        self.as_slice().eq(other.as_ref())
    }
}
#[cfg(test)]
mod tests {
    use super::*;
    #[test]
    fn test_basic() {
        let expected = [0_i32, 1, 2];
        let buffer = Buffer::from_iter(expected.iter().cloned());
        let typed = ScalarBuffer::<i32>::new(buffer.clone(), 0, 3);
        assert_eq!(*typed, expected);
        let typed = ScalarBuffer::<i32>::new(buffer.clone(), 1, 2);
        assert_eq!(*typed, expected[1..]);
        let typed = ScalarBuffer::<i32>::new(buffer.clone(), 1, 0);
        assert!(typed.is_empty());
        let typed = ScalarBuffer::<i32>::new(buffer, 3, 0);
        assert!(typed.is_empty());
    }
    #[test]
    fn test_debug() {
        let buffer = ScalarBuffer::from(vec![1, 2, 3]);
        assert_eq!(format!("{buffer:?}"), "ScalarBuffer([1, 2, 3])");
    }
    #[test]
    #[should_panic(
        expected = "Memory pointer is not aligned with the specified scalar type"
    )]
    fn test_unaligned() {
        let expected = [0_i32, 1, 2];
        let buffer = Buffer::from_iter(expected.iter().cloned());
        let buffer = buffer.slice(1);
        ScalarBuffer::<i32>::new(buffer, 0, 2);
    }
    #[test]
    #[should_panic(
        expected = "the offset of the new Buffer cannot exceed the existing length"
    )]
    fn test_length_out_of_bounds() {
        let buffer = Buffer::from_iter([0_i32, 1, 2]);
        ScalarBuffer::<i32>::new(buffer, 1, 3);
    }
    #[test]
    #[should_panic(
        expected = "the offset of the new Buffer cannot exceed the existing length"
    )]
    fn test_offset_out_of_bounds() {
        let buffer = Buffer::from_iter([0_i32, 1, 2]);
        ScalarBuffer::<i32>::new(buffer, 4, 0);
    }
    #[test]
    #[should_panic(expected = "offset overflow")]
    fn test_length_overflow() {
        let buffer = Buffer::from_iter([0_i32, 1, 2]);
        ScalarBuffer::<i32>::new(buffer, usize::MAX, 1);
    }
    #[test]
    #[should_panic(expected = "offset overflow")]
    fn test_start_overflow() {
        let buffer = Buffer::from_iter([0_i32, 1, 2]);
        ScalarBuffer::<i32>::new(buffer, usize::MAX / 4 + 1, 0);
    }
    #[test]
    #[should_panic(expected = "length overflow")]
    fn test_end_overflow() {
        let buffer = Buffer::from_iter([0_i32, 1, 2]);
        ScalarBuffer::<i32>::new(buffer, 0, usize::MAX / 4 + 1);
    }
}