Skip to main content

scry_gpu/
ticket.rs

1// SPDX-License-Identifier: MIT OR Apache-2.0
2//! Non-blocking GPU submission handles.
3//!
4//! A [`Ticket`] represents an in-flight GPU submission. It is created by
5//! [`Batch::submit_async`](crate::Batch::submit_async) and allows the caller
6//! to overlap CPU work with GPU execution.
7//!
8//! # Example
9//!
10//! ```ignore
11//! let mut batch = gpu.batch()?;
12//! batch.run(&kernel, &[&input, &output], n)?;
13//! let ticket = batch.submit_async()?;
14//!
15//! // CPU work while GPU runs...
16//!
17//! ticket.wait()?;
18//! let result: Vec<f32> = output.download()?;
19//! ```
20//!
21//! # Drop guarantee
22//!
23//! If a `Ticket` is dropped without calling [`wait`](Ticket::wait), the
24//! destructor blocks until the GPU work finishes. This prevents leaking
25//! in-flight resources but may introduce unexpected stalls. Prefer calling
26//! `wait()` explicitly.
27
28use crate::error::Result;
29
30/// A handle to an in-flight GPU submission.
31///
32/// Created by [`Batch::submit_async`](crate::Batch::submit_async). The GPU
33/// work is already queued; this handle lets you poll for completion or block
34/// until done.
35pub struct Ticket {
36    inner: Option<TicketInner>,
37}
38
39enum TicketInner {
40    #[cfg(feature = "vulkan")]
41    Vulkan(crate::backend::vulkan::VulkanTicket),
42    #[cfg(feature = "cuda")]
43    Cuda(crate::backend::cuda::CudaTicket),
44}
45
46impl Ticket {
47    #[cfg(feature = "vulkan")]
48    pub(crate) const fn new_vulkan(ticket: crate::backend::vulkan::VulkanTicket) -> Self {
49        Self {
50            inner: Some(TicketInner::Vulkan(ticket)),
51        }
52    }
53
54    #[cfg(feature = "cuda")]
55    pub(crate) const fn new_cuda(ticket: crate::backend::cuda::CudaTicket) -> Self {
56        Self {
57            inner: Some(TicketInner::Cuda(ticket)),
58        }
59    }
60
61    /// Block until the GPU work completes.
62    ///
63    /// If the GPU work has already finished (e.g. [`is_ready`](Ticket::is_ready)
64    /// returned `true`), this returns immediately. Consumes the ticket and
65    /// recycles backend resources.
66    pub fn wait(mut self) -> Result<()> {
67        self.wait_inner()
68    }
69
70    /// Poll whether the GPU work has completed without blocking.
71    ///
72    /// Returns `Ok(true)` if all dispatches have finished, `Ok(false)` if
73    /// still in progress.
74    ///
75    /// # Panics
76    ///
77    /// Panics if called after [`wait`](Ticket::wait) (which consumes `self`,
78    /// so this can only happen via internal misuse).
79    pub fn is_ready(&self) -> Result<bool> {
80        match self.inner.as_ref().expect("ticket already consumed") {
81            #[cfg(feature = "vulkan")]
82            TicketInner::Vulkan(t) => t.is_ready(),
83            #[cfg(feature = "cuda")]
84            TicketInner::Cuda(t) => Ok(t.is_ready()),
85        }
86    }
87
88    /// Internal wait callable from both `wait()` and `Drop`.
89    fn wait_inner(&mut self) -> Result<()> {
90        if let Some(inner) = self.inner.take() {
91            match inner {
92                #[cfg(feature = "vulkan")]
93                TicketInner::Vulkan(t) => t.wait()?,
94                #[cfg(feature = "cuda")]
95                TicketInner::Cuda(t) => t.wait()?,
96            }
97        }
98        Ok(())
99    }
100}
101
102impl Drop for Ticket {
103    fn drop(&mut self) {
104        // Best-effort: block until GPU finishes so resources can be
105        // safely recycled. Errors are discarded because Drop cannot
106        // propagate them.
107        let _ = self.wait_inner();
108    }
109}
110
111impl std::fmt::Debug for Ticket {
112    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
113        f.debug_struct("Ticket")
114            .field("pending", &self.inner.is_some())
115            .finish_non_exhaustive()
116    }
117}