use crate::backend::MkGpuBackend;
use crate::buffer::MkDeviceBuffer;
use crate::staging::MkStagingBuffer;
use crate::MkGpu;
pub struct MkGpuTransfer {
id: u64,
complete: bool,
}
impl MkGpuTransfer {
pub(crate) fn new(id: u64) -> Self {
Self { id, complete: false }
}
pub fn id(&self) -> u64 {
self.id
}
pub fn is_complete(&self) -> bool {
self.complete
}
pub(crate) fn mark_complete(&mut self) {
self.complete = true;
}
}
pub struct MkTransferBuilder<'a, B: MkGpuBackend> {
gpu: &'a MkGpu<B>,
staging: &'a MkStagingBuffer<B>,
device: &'a MkDeviceBuffer<B>,
offset: usize,
size: usize,
}
impl<'a, B: MkGpuBackend> MkTransferBuilder<'a, B> {
pub fn new(
gpu: &'a MkGpu<B>,
staging: &'a MkStagingBuffer<B>,
device: &'a MkDeviceBuffer<B>,
) -> Self {
Self {
gpu,
staging,
device,
offset: 0,
size: staging.size().min(device.size()),
}
}
pub fn offset(mut self, offset: usize) -> Self {
self.offset = offset;
self
}
pub fn size(mut self, size: usize) -> Self {
self.size = size;
self
}
pub fn submit(self) -> Result<MkGpuTransfer, B::Error> {
self.gpu.transfer(self.staging, self.device)
}
}
pub struct MkBatchTransfer<'a, B: MkGpuBackend> {
gpu: &'a MkGpu<B>,
transfers: Vec<(B::BufferHandle, B::BufferHandle, usize)>,
}
impl<'a, B: MkGpuBackend> MkBatchTransfer<'a, B> {
pub(crate) fn new(gpu: &'a MkGpu<B>) -> Self {
Self {
gpu,
transfers: Vec::new(),
}
}
pub fn copy(
mut self,
src: &MkStagingBuffer<B>,
dst: &MkDeviceBuffer<B>,
) -> Self {
let size = src.size().min(dst.size());
self.transfers.push((src.handle().clone(), dst.handle().clone(), size));
self
}
pub fn submit_and_wait(self) -> Result<(), B::Error> {
for (src, dst, size) in self.transfers {
self.gpu.backend().copy_buffer(&src, &dst, size)?;
}
self.gpu.wait_idle()
}
pub fn submit(self) -> Result<Vec<MkGpuTransfer>, B::Error> {
let mut results = Vec::new();
for (src, dst, size) in self.transfers {
self.gpu.backend().copy_buffer(&src, &dst, size)?;
results.push(MkGpuTransfer::new(results.len() as u64));
}
Ok(results)
}
}