use crate::buffer::sys::BufferCreationError;
use crate::buffer::sys::UnsafeBuffer;
use crate::buffer::traits::BufferAccess;
use crate::buffer::traits::BufferInner;
use crate::buffer::traits::TypedBufferAccess;
use crate::buffer::BufferUsage;
use crate::buffer::CpuAccessibleBuffer;
use crate::command_buffer::AutoCommandBufferBuilder;
use crate::command_buffer::CommandBufferExecFuture;
use crate::command_buffer::CommandBufferUsage;
use crate::command_buffer::PrimaryAutoCommandBuffer;
use crate::command_buffer::PrimaryCommandBuffer;
use crate::device::Device;
use crate::device::DeviceOwned;
use crate::device::Queue;
use crate::image::ImageAccess;
use crate::instance::QueueFamily;
use crate::memory::pool::AllocFromRequirementsFilter;
use crate::memory::pool::AllocLayout;
use crate::memory::pool::MappingRequirement;
use crate::memory::pool::MemoryPool;
use crate::memory::pool::MemoryPoolAlloc;
use crate::memory::pool::PotentialDedicatedAllocation;
use crate::memory::pool::StdMemoryPoolAlloc;
use crate::memory::DedicatedAlloc;
use crate::memory::DeviceMemoryAllocError;
use crate::sync::AccessError;
use crate::sync::NowFuture;
use crate::sync::Sharing;
use smallvec::SmallVec;
use std::hash::Hash;
use std::hash::Hasher;
use std::marker::PhantomData;
use std::mem;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::sync::Arc;
pub struct ImmutableBuffer<T: ?Sized, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>> {
inner: UnsafeBuffer,
memory: A,
initialized: AtomicBool,
queue_families: SmallVec<[u32; 4]>,
marker: PhantomData<Box<T>>,
}
type ImmutableBufferFromBufferFuture = CommandBufferExecFuture<NowFuture, PrimaryAutoCommandBuffer>;
impl<T: ?Sized> ImmutableBuffer<T> {
pub fn from_data(
data: T,
usage: BufferUsage,
queue: Arc<Queue>,
) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
where
T: 'static + Copy + Send + Sync + Sized,
{
let source = CpuAccessibleBuffer::from_data(
queue.device().clone(),
BufferUsage::transfer_source(),
false,
data,
)?;
ImmutableBuffer::from_buffer(source, usage, queue)
}
pub fn from_buffer<B>(
source: B,
usage: BufferUsage,
queue: Arc<Queue>,
) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
where
B: BufferAccess + TypedBufferAccess<Content = T> + 'static + Clone + Send + Sync,
T: 'static + Send + Sync,
{
unsafe {
let actual_usage = BufferUsage {
transfer_destination: true,
..usage
};
let (buffer, init) = ImmutableBuffer::raw(
source.device().clone(),
source.size(),
actual_usage,
source.device().active_queue_families(),
)?;
let mut cbb = AutoCommandBufferBuilder::primary(
source.device().clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)?;
cbb.copy_buffer(source, init).unwrap();
let cb = cbb.build().unwrap();
let future = match cb.execute(queue) {
Ok(f) => f,
Err(_) => unreachable!(),
};
Ok((buffer, future))
}
}
}
impl<T> ImmutableBuffer<T> {
#[inline]
pub unsafe fn uninitialized(
device: Arc<Device>,
usage: BufferUsage,
) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError>
{
ImmutableBuffer::raw(
device.clone(),
mem::size_of::<T>(),
usage,
device.active_queue_families(),
)
}
}
impl<T> ImmutableBuffer<[T]> {
pub fn from_iter<D>(
data: D,
usage: BufferUsage,
queue: Arc<Queue>,
) -> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
where
D: ExactSizeIterator<Item = T>,
T: 'static + Send + Sync + Sized,
{
let source = CpuAccessibleBuffer::from_iter(
queue.device().clone(),
BufferUsage::transfer_source(),
false,
data,
)?;
ImmutableBuffer::from_buffer(source, usage, queue)
}
#[inline]
pub unsafe fn uninitialized_array(
device: Arc<Device>,
len: usize,
usage: BufferUsage,
) -> Result<
(
Arc<ImmutableBuffer<[T]>>,
ImmutableBufferInitialization<[T]>,
),
DeviceMemoryAllocError,
> {
ImmutableBuffer::raw(
device.clone(),
len * mem::size_of::<T>(),
usage,
device.active_queue_families(),
)
}
}
impl<T: ?Sized> ImmutableBuffer<T> {
#[inline]
pub unsafe fn raw<'a, I>(
device: Arc<Device>,
size: usize,
usage: BufferUsage,
queue_families: I,
) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError>
where
I: IntoIterator<Item = QueueFamily<'a>>,
{
let queue_families = queue_families.into_iter().map(|f| f.id()).collect();
ImmutableBuffer::raw_impl(device, size, usage, queue_families)
}
unsafe fn raw_impl(
device: Arc<Device>,
size: usize,
usage: BufferUsage,
queue_families: SmallVec<[u32; 4]>,
) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError>
{
let (buffer, mem_reqs) = {
let sharing = if queue_families.len() >= 2 {
Sharing::Concurrent(queue_families.iter().cloned())
} else {
Sharing::Exclusive
};
match UnsafeBuffer::new(device.clone(), size, usage, sharing, None) {
Ok(b) => b,
Err(BufferCreationError::AllocError(err)) => return Err(err),
Err(_) => unreachable!(),
}
};
let mem = MemoryPool::alloc_from_requirements(
&Device::standard_pool(&device),
&mem_reqs,
AllocLayout::Linear,
MappingRequirement::DoNotMap,
DedicatedAlloc::Buffer(&buffer),
|t| {
if t.is_device_local() {
AllocFromRequirementsFilter::Preferred
} else {
AllocFromRequirementsFilter::Allowed
}
},
)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
buffer.bind_memory(mem.memory(), mem.offset())?;
let final_buf = Arc::new(ImmutableBuffer {
inner: buffer,
memory: mem,
queue_families: queue_families,
initialized: AtomicBool::new(false),
marker: PhantomData,
});
let initialization = ImmutableBufferInitialization {
buffer: final_buf.clone(),
used: Arc::new(AtomicBool::new(false)),
};
Ok((final_buf, initialization))
}
}
impl<T: ?Sized, A> ImmutableBuffer<T, A> {
#[inline]
pub fn device(&self) -> &Arc<Device> {
self.inner.device()
}
#[inline]
pub fn queue_families(&self) -> Vec<QueueFamily> {
self.queue_families
.iter()
.map(|&num| {
self.device()
.physical_device()
.queue_family_by_id(num)
.unwrap()
})
.collect()
}
}
unsafe impl<T: ?Sized, A> BufferAccess for ImmutableBuffer<T, A> {
#[inline]
fn inner(&self) -> BufferInner {
BufferInner {
buffer: &self.inner,
offset: 0,
}
}
#[inline]
fn size(&self) -> usize {
self.inner.size()
}
#[inline]
fn conflicts_buffer(&self, other: &dyn BufferAccess) -> bool {
self.conflict_key() == other.conflict_key()
}
#[inline]
fn conflicts_image(&self, other: &dyn ImageAccess) -> bool {
false
}
#[inline]
fn conflict_key(&self) -> (u64, usize) {
(self.inner.key(), 0)
}
#[inline]
fn try_gpu_lock(&self, exclusive_access: bool, _: &Queue) -> Result<(), AccessError> {
if exclusive_access {
return Err(AccessError::ExclusiveDenied);
}
if !self.initialized.load(Ordering::Relaxed) {
return Err(AccessError::BufferNotInitialized);
}
Ok(())
}
#[inline]
unsafe fn increase_gpu_lock(&self) {}
#[inline]
unsafe fn unlock(&self) {}
}
unsafe impl<T: ?Sized, A> TypedBufferAccess for ImmutableBuffer<T, A> {
type Content = T;
}
unsafe impl<T: ?Sized, A> DeviceOwned for ImmutableBuffer<T, A> {
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
impl<T: ?Sized, A> PartialEq for ImmutableBuffer<T, A> {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.inner() == other.inner() && self.size() == other.size()
}
}
impl<T: ?Sized, A> Eq for ImmutableBuffer<T, A> {}
impl<T: ?Sized, A> Hash for ImmutableBuffer<T, A> {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.inner().hash(state);
self.size().hash(state);
}
}
pub struct ImmutableBufferInitialization<
T: ?Sized,
A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>,
> {
buffer: Arc<ImmutableBuffer<T, A>>,
used: Arc<AtomicBool>,
}
unsafe impl<T: ?Sized, A> BufferAccess for ImmutableBufferInitialization<T, A> {
#[inline]
fn inner(&self) -> BufferInner {
self.buffer.inner()
}
#[inline]
fn size(&self) -> usize {
self.buffer.size()
}
#[inline]
fn conflicts_buffer(&self, other: &dyn BufferAccess) -> bool {
self.conflict_key() == other.conflict_key()
}
#[inline]
fn conflicts_image(&self, other: &dyn ImageAccess) -> bool {
false
}
#[inline]
fn conflict_key(&self) -> (u64, usize) {
(self.buffer.inner.key(), 0)
}
#[inline]
fn try_gpu_lock(&self, _: bool, _: &Queue) -> Result<(), AccessError> {
if self.buffer.initialized.load(Ordering::Relaxed) {
return Err(AccessError::AlreadyInUse);
}
if !self
.used
.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
.unwrap_or_else(|e| e)
{
Ok(())
} else {
Err(AccessError::AlreadyInUse)
}
}
#[inline]
unsafe fn increase_gpu_lock(&self) {
debug_assert!(self.used.load(Ordering::Relaxed));
}
#[inline]
unsafe fn unlock(&self) {
self.buffer.initialized.store(true, Ordering::Relaxed);
}
}
unsafe impl<T: ?Sized, A> TypedBufferAccess for ImmutableBufferInitialization<T, A> {
type Content = T;
}
unsafe impl<T: ?Sized, A> DeviceOwned for ImmutableBufferInitialization<T, A> {
#[inline]
fn device(&self) -> &Arc<Device> {
self.buffer.inner.device()
}
}
impl<T: ?Sized, A> Clone for ImmutableBufferInitialization<T, A> {
#[inline]
fn clone(&self) -> ImmutableBufferInitialization<T, A> {
ImmutableBufferInitialization {
buffer: self.buffer.clone(),
used: self.used.clone(),
}
}
}
impl<T: ?Sized, A> PartialEq for ImmutableBufferInitialization<T, A> {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.inner() == other.inner() && self.size() == other.size()
}
}
impl<T: ?Sized, A> Eq for ImmutableBufferInitialization<T, A> {}
impl<T: ?Sized, A> Hash for ImmutableBufferInitialization<T, A> {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.inner().hash(state);
self.size().hash(state);
}
}
#[cfg(test)]
mod tests {
use crate::buffer::cpu_access::CpuAccessibleBuffer;
use crate::buffer::immutable::ImmutableBuffer;
use crate::buffer::BufferUsage;
use crate::command_buffer::AutoCommandBufferBuilder;
use crate::command_buffer::CommandBufferUsage;
use crate::command_buffer::PrimaryCommandBuffer;
use crate::sync::GpuFuture;
#[test]
fn from_data_working() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) =
ImmutableBuffer::from_data(12u32, BufferUsage::all(), queue.clone()).unwrap();
let destination =
CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), false, 0).unwrap();
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(buffer, destination.clone()).unwrap();
let _ = cbb
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
let destination_content = destination.read().unwrap();
assert_eq!(*destination_content, 12);
}
#[test]
fn from_iter_working() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = ImmutableBuffer::from_iter(
(0..512u32).map(|n| n * 2),
BufferUsage::all(),
queue.clone(),
)
.unwrap();
let destination = CpuAccessibleBuffer::from_iter(
device.clone(),
BufferUsage::all(),
false,
(0..512).map(|_| 0u32),
)
.unwrap();
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(buffer, destination.clone()).unwrap();
let _ = cbb
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
let destination_content = destination.read().unwrap();
for (n, &v) in destination_content.iter().enumerate() {
assert_eq!(n * 2, v as usize);
}
}
#[test]
fn writing_forbidden() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) =
ImmutableBuffer::from_data(12u32, BufferUsage::all(), queue.clone()).unwrap();
assert_should_panic!({
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.fill_buffer(buffer, 50).unwrap();
let _ = cbb
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
});
}
#[test]
fn read_uninitialized_forbidden() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
};
let source =
CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), false, 0).unwrap();
assert_should_panic!({
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(source, buffer).unwrap();
let _ = cbb
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
});
}
#[test]
fn init_then_read_same_cb() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, init) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
};
let source =
CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), false, 0).unwrap();
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(source.clone(), init)
.unwrap()
.copy_buffer(buffer, source.clone())
.unwrap();
let _ = cbb
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
}
#[test]
#[ignore]
fn init_then_read_same_future() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, init) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
};
let source =
CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), false, 0).unwrap();
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(source.clone(), init).unwrap();
let cb1 = cbb.build().unwrap();
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(buffer, source.clone()).unwrap();
let cb2 = cbb.build().unwrap();
let _ = cb1
.execute(queue.clone())
.unwrap()
.then_execute(queue.clone(), cb2)
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
}
#[test]
fn create_buffer_zero_size_data() {
let (device, queue) = gfx_dev_and_queue!();
let _ = ImmutableBuffer::from_data((), BufferUsage::all(), queue.clone());
}
}