trueno/backends/gpu/
mod.rs1#![allow(missing_docs)]
2#[cfg(any(feature = "gpu", feature = "gpu-wasm"))]
31mod batch;
32
33#[cfg(any(feature = "gpu", feature = "gpu-wasm"))]
34mod device;
35
36#[cfg(all(feature = "gpu", not(target_arch = "wasm32")))]
37mod pool;
38
39#[cfg(any(feature = "gpu", feature = "gpu-wasm"))]
40pub mod shaders;
41
42#[cfg(any(feature = "gpu", feature = "gpu-wasm"))]
43pub mod runtime;
44
45mod partition_view;
47mod tensor_view;
48mod tiled_reduction;
49
50pub use partition_view::{PartitionView, TileInfo};
51pub use tensor_view::{MemoryLayout, TensorView};
52pub use tiled_reduction::{
53 tiled_max_2d, tiled_min_2d, tiled_reduce_2d, tiled_reduce_partial, tiled_sum_2d, MaxOp, MinOp,
54 ReduceOp, SumOp, TILE_SIZE,
55};
56
57#[cfg(all(feature = "gpu", not(target_arch = "wasm32")))]
58pub use batch::{BufferId, GpuCommandBatch, PipelineCache};
59
60#[cfg(any(feature = "gpu", feature = "gpu-wasm"))]
62pub use device::GpuDevice;
63
64#[cfg(any(feature = "gpu", feature = "gpu-wasm"))]
67pub use wgpu;
68
69#[cfg(all(feature = "gpu", not(target_arch = "wasm32")))]
70pub use pool::GpuDevicePool;
71
72#[cfg(any(feature = "gpu", feature = "gpu-wasm"))]
74pub use device::linalg::cached_matmul::GpuMatmulCache;
75
76#[cfg(any(feature = "gpu", feature = "gpu-wasm"))]
78pub use device::linalg::wgsl_forward::{QkvLoRA, WgslForwardPass};
79
80#[cfg(all(feature = "gpu", not(target_arch = "wasm32")))]
81mod backend_ops;
82
83#[cfg(all(feature = "gpu", not(target_arch = "wasm32")))]
85#[derive(Clone)]
86pub struct GpuBackend {
87 device: Option<GpuDevice>,
88}
89
90#[cfg(all(feature = "gpu", not(target_arch = "wasm32")))]
91impl GpuBackend {
92 pub fn new() -> Self {
94 Self { device: None }
95 }
96
97 fn ensure_device(&mut self) -> Result<&GpuDevice, String> {
99 if self.device.is_none() {
100 self.device = Some(GpuDevice::new()?);
101 }
102 Ok(self.device.as_ref().expect("device initialized above"))
103 }
104
105 pub fn is_available() -> bool {
107 GpuDevice::is_available()
108 }
109}
110
111#[cfg(all(feature = "gpu", not(target_arch = "wasm32")))]
112impl Default for GpuBackend {
113 fn default() -> Self {
114 Self::new()
115 }
116}
117
118#[cfg(any(not(feature = "gpu"), target_arch = "wasm32"))]
120#[derive(Clone)]
121pub struct GpuBackend;
122
123#[cfg(any(not(feature = "gpu"), target_arch = "wasm32"))]
124impl GpuBackend {
125 pub fn new() -> Self {
126 Self
127 }
128
129 pub fn is_available() -> bool {
130 false
131 }
132}
133
134#[cfg(any(not(feature = "gpu"), target_arch = "wasm32"))]
135impl Default for GpuBackend {
136 fn default() -> Self {
137 Self
138 }
139}
140
141#[cfg(test)]
143#[cfg(not(feature = "gpu"))]
144mod stub_tests {
145 use super::*;
146
147 #[test]
148 fn test_gpu_backend_stub_new() {
149 let _backend = GpuBackend::new();
150 }
151
152 #[test]
153 fn test_gpu_backend_stub_is_available() {
154 assert!(!GpuBackend::is_available());
155 }
156
157 #[test]
158 fn test_gpu_backend_stub_default() {
159 let _ = GpuBackend;
160 }
161
162 #[test]
163 fn test_gpu_backend_stub_clone() {
164 let backend = GpuBackend::new();
165 let _cloned = backend.clone();
166 }
167}
168
169#[cfg(test)]
170#[cfg(feature = "gpu")]
171mod tests_gpu;