use std::marker::PhantomData;
use std::mem;
use std::ops::Range;
use std::sync::Arc;
use buffer::traits::BufferAccess;
use buffer::traits::BufferInner;
use buffer::traits::TypedBufferAccess;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use image::ImageAccess;
use sync::AccessError;
pub struct BufferSlice<T: ?Sized, B> {
marker: PhantomData<T>,
resource: B,
offset: usize,
size: usize,
}
impl<T: ?Sized, B> Clone for BufferSlice<T, B>
where B: Clone
{
#[inline]
fn clone(&self) -> Self {
BufferSlice {
marker: PhantomData,
resource: self.resource.clone(),
offset: self.offset,
size: self.size,
}
}
}
impl<T: ?Sized, B> BufferSlice<T, B> {
#[inline]
pub fn from_typed_buffer_access(r: B) -> BufferSlice<T, B>
where B: TypedBufferAccess<Content = T>
{
let size = r.size();
BufferSlice {
marker: PhantomData,
resource: r,
offset: 0,
size: size,
}
}
pub fn buffer(&self) -> &B {
&self.resource
}
#[inline]
pub fn offset(&self) -> usize {
self.offset
}
#[inline]
pub fn size(&self) -> usize {
self.size
}
#[inline]
pub unsafe fn slice_custom<F, R: ?Sized>(self, f: F) -> BufferSlice<R, B>
where F: for<'r> FnOnce(&'r T) -> &'r R {
let data: &T = mem::zeroed();
let result = f(data);
let size = mem::size_of_val(result);
let result = result as *const R as *const () as usize;
assert!(result <= self.size());
assert!(result + size <= self.size());
BufferSlice {
marker: PhantomData,
resource: self.resource,
offset: self.offset + result,
size: size,
}
}
#[inline]
pub unsafe fn reinterpret<R: ?Sized>(self) -> BufferSlice<R, B>
{
BufferSlice {
marker: PhantomData,
resource: self.resource,
offset: self.offset,
size: self.size,
}
}
}
impl<T, B> BufferSlice<[T], B> {
#[inline]
pub fn len(&self) -> usize {
debug_assert_eq!(self.size() % mem::size_of::<T>(), 0);
self.size() / mem::size_of::<T>()
}
#[inline]
pub fn index(self, index: usize) -> Option<BufferSlice<T, B>> {
if index >= self.len() {
return None;
}
Some(BufferSlice {
marker: PhantomData,
resource: self.resource,
offset: self.offset + index * mem::size_of::<T>(),
size: mem::size_of::<T>(),
})
}
#[inline]
pub fn slice(self, range: Range<usize>) -> Option<BufferSlice<[T], B>> {
if range.end > self.len() {
return None;
}
Some(BufferSlice {
marker: PhantomData,
resource: self.resource,
offset: self.offset + range.start * mem::size_of::<T>(),
size: (range.end - range.start) * mem::size_of::<T>(),
})
}
}
unsafe impl<T: ?Sized, B> BufferAccess for BufferSlice<T, B>
where B: BufferAccess
{
#[inline]
fn inner(&self) -> BufferInner {
let inner = self.resource.inner();
BufferInner {
buffer: inner.buffer,
offset: inner.offset + self.offset,
}
}
#[inline]
fn size(&self) -> usize {
self.size
}
#[inline]
fn conflicts_buffer(&self, other: &dyn BufferAccess) -> bool {
self.resource.conflicts_buffer(other)
}
#[inline]
fn conflicts_image(&self, other: &dyn ImageAccess) -> bool {
self.resource.conflicts_image(other)
}
#[inline]
fn conflict_key(&self) -> (u64, usize) {
self.resource.conflict_key()
}
#[inline]
fn try_gpu_lock(&self, exclusive_access: bool, queue: &Queue) -> Result<(), AccessError> {
self.resource.try_gpu_lock(exclusive_access, queue)
}
#[inline]
unsafe fn increase_gpu_lock(&self) {
self.resource.increase_gpu_lock()
}
#[inline]
unsafe fn unlock(&self) {
self.resource.unlock()
}
}
unsafe impl<T: ?Sized, B> TypedBufferAccess for BufferSlice<T, B>
where B: BufferAccess
{
type Content = T;
}
unsafe impl<T: ?Sized, B> DeviceOwned for BufferSlice<T, B>
where B: DeviceOwned
{
#[inline]
fn device(&self) -> &Arc<Device> {
self.resource.device()
}
}
impl<T, B> From<BufferSlice<T, B>> for BufferSlice<[T], B> {
#[inline]
fn from(r: BufferSlice<T, B>) -> BufferSlice<[T], B> {
BufferSlice {
marker: PhantomData,
resource: r.resource,
offset: r.offset,
size: r.size,
}
}
}
#[macro_export]
macro_rules! buffer_slice_field {
($slice:expr, $field:ident) => (
unsafe { $slice.slice_custom(|s| &s.$field) }
)
}