memkit_gpu/
transfer.rs

1//! GPU memory transfer operations.
2
3use crate::backend::MkGpuBackend;
4use crate::buffer::MkDeviceBuffer;
5use crate::staging::MkStagingBuffer;
6use crate::MkGpu;
7
8/// A pending GPU memory transfer.
9pub struct MkGpuTransfer {
10    id: u64,
11    complete: bool,
12}
13
14impl MkGpuTransfer {
15    /// Create a new transfer handle.
16    pub(crate) fn new(id: u64) -> Self {
17        Self { id, complete: false }
18    }
19
20    /// Get the transfer ID.
21    pub fn id(&self) -> u64 {
22        self.id
23    }
24
25    /// Check if the transfer is complete.
26    pub fn is_complete(&self) -> bool {
27        self.complete
28    }
29
30    /// Mark as complete.
31    pub(crate) fn mark_complete(&mut self) {
32        self.complete = true;
33    }
34}
35
36/// Builder for GPU transfer operations.
37pub struct MkTransferBuilder<'a, B: MkGpuBackend> {
38    gpu: &'a MkGpu<B>,
39    staging: &'a MkStagingBuffer<B>,
40    device: &'a MkDeviceBuffer<B>,
41    offset: usize,
42    size: usize,
43}
44
45impl<'a, B: MkGpuBackend> MkTransferBuilder<'a, B> {
46    /// Create a new transfer builder.
47    pub fn new(
48        gpu: &'a MkGpu<B>,
49        staging: &'a MkStagingBuffer<B>,
50        device: &'a MkDeviceBuffer<B>,
51    ) -> Self {
52        Self {
53            gpu,
54            staging,
55            device,
56            offset: 0,
57            size: staging.size().min(device.size()),
58        }
59    }
60
61    /// Set the destination offset.
62    pub fn offset(mut self, offset: usize) -> Self {
63        self.offset = offset;
64        self
65    }
66
67    /// Set the transfer size.
68    pub fn size(mut self, size: usize) -> Self {
69        self.size = size;
70        self
71    }
72
73    /// Submit the transfer.
74    pub fn submit(self) -> Result<MkGpuTransfer, B::Error> {
75        self.gpu.transfer(self.staging, self.device)
76    }
77}
78
79/// Batch transfer builder for multiple transfers.
80pub struct MkBatchTransfer<'a, B: MkGpuBackend> {
81    gpu: &'a MkGpu<B>,
82    transfers: Vec<(B::BufferHandle, B::BufferHandle, usize)>,
83}
84
85impl<'a, B: MkGpuBackend> MkBatchTransfer<'a, B> {
86    /// Create a new batch transfer.
87    pub(crate) fn new(gpu: &'a MkGpu<B>) -> Self {
88        Self {
89            gpu,
90            transfers: Vec::new(),
91        }
92    }
93
94    /// Add a copy operation to the batch.
95    pub fn copy(
96        mut self,
97        src: &MkStagingBuffer<B>,
98        dst: &MkDeviceBuffer<B>,
99    ) -> Self {
100        let size = src.size().min(dst.size());
101        self.transfers.push((src.handle().clone(), dst.handle().clone(), size));
102        self
103    }
104
105    /// Submit all transfers and wait for completion.
106    pub fn submit_and_wait(self) -> Result<(), B::Error> {
107        for (src, dst, size) in self.transfers {
108            self.gpu.backend().copy_buffer(&src, &dst, size)?;
109        }
110        self.gpu.wait_idle()
111    }
112
113    /// Submit all transfers without waiting.
114    pub fn submit(self) -> Result<Vec<MkGpuTransfer>, B::Error> {
115        let mut results = Vec::new();
116        for (src, dst, size) in self.transfers {
117            self.gpu.backend().copy_buffer(&src, &dst, size)?;
118            results.push(MkGpuTransfer::new(results.len() as u64));
119        }
120        Ok(results)
121    }
122}