Skip to main content

rave_core/
backend.rs

1//! Upscale backend trait — the GPU-only inference contract.
2
3use async_trait::async_trait;
4
5use crate::error::Result;
6use crate::types::GpuTexture;
7
8/// Metadata extracted from an ONNX model's input/output tensor descriptors.
9#[derive(Clone, Debug)]
10pub struct ModelMetadata {
11    /// Model identifier string from the ONNX graph name field.
12    pub name: String,
13    /// Spatial upscale factor (e.g. `2` for 2×, `4` for 4×).
14    pub scale: u32,
15    /// Name of the model's input tensor node.
16    pub input_name: String,
17    /// Name of the model's output tensor node.
18    pub output_name: String,
19    /// Number of input channels (typically `3` for RGB planar).
20    pub input_channels: u32,
21    /// Minimum supported input resolution as `(height, width)`.
22    pub min_input_hw: (u32, u32),
23    /// Maximum supported input resolution as `(height, width)`.
24    pub max_input_hw: (u32, u32),
25}
26
27/// GPU-only super-resolution inference backend.
28///
29/// All methods operate entirely on the GPU — no host staging, no implicit
30/// device synchronization outside of shutdown.
31#[async_trait]
32pub trait UpscaleBackend: Send + Sync {
33    /// Warm up the backend: load the model, allocate buffers, build engine plans.
34    async fn initialize(&self) -> Result<()>;
35    /// Run a single upscale pass on the given GPU texture, returning the upscaled output.
36    async fn process(&self, input: GpuTexture) -> Result<GpuTexture>;
37    /// Flush any pending work, synchronize streams, and release GPU resources.
38    async fn shutdown(&self) -> Result<()>;
39    /// Return the model metadata extracted during [`initialize`](Self::initialize).
40    fn metadata(&self) -> Result<&ModelMetadata>;
41}