Skip to main content

arcane_core/renderer/
test_harness.rs

1//! GPU test harness for headless rendering tests.
2//!
3//! Provides utilities for creating a GPU context without a window,
4//! rendering to textures, and reading back pixel data for verification.
5
6use anyhow::{Context, Result};
7
8use super::camera::Camera2D;
9use super::geometry::GeometryBatch;
10use super::postprocess::PostProcessPipeline;
11use super::rendertarget::RenderTargetStore;
12use super::shader::ShaderStore;
13use super::sprite::SpritePipeline;
14use super::texture::TextureStore;
15
16/// Headless GPU context for testing (no window/surface required).
17pub struct TestGpu {
18    pub device: wgpu::Device,
19    pub queue: wgpu::Queue,
20    pub format: wgpu::TextureFormat,
21}
22
23/// A GPU context for headless testing that provides the same interface
24/// as GpuContext but without requiring a window surface.
25pub struct TestGpuContext {
26    pub device: wgpu::Device,
27    pub queue: wgpu::Queue,
28    pub config: wgpu::SurfaceConfiguration,
29}
30
31impl TestGpuContext {
32    /// Create from TestGpu with default 64x64 dimensions.
33    pub fn from_test_gpu(gpu: &TestGpu) -> Self {
34        Self::from_test_gpu_sized(gpu, 64, 64)
35    }
36
37    /// Create from TestGpu with specified dimensions.
38    pub fn from_test_gpu_sized(gpu: &TestGpu, width: u32, height: u32) -> Self {
39        Self {
40            device: gpu.device.clone(),
41            queue: gpu.queue.clone(),
42            config: wgpu::SurfaceConfiguration {
43                usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
44                format: gpu.format,
45                width,
46                height,
47                present_mode: wgpu::PresentMode::AutoVsync,
48                alpha_mode: wgpu::CompositeAlphaMode::Auto,
49                view_formats: vec![],
50                desired_maximum_frame_latency: 2,
51            },
52        }
53    }
54}
55
56impl TestGpu {
57    /// Create a headless GPU context for testing.
58    /// Returns None if no suitable GPU adapter is available.
59    pub fn new() -> Option<Self> {
60        let instance = wgpu::Instance::new(&wgpu::InstanceDescriptor {
61            backends: wgpu::Backends::all(),
62            ..Default::default()
63        });
64
65        let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
66            power_preference: wgpu::PowerPreference::default(),
67            compatible_surface: None, // headless
68            force_fallback_adapter: false,
69        }))?;
70
71        let (device, queue) = pollster::block_on(adapter.request_device(
72            &wgpu::DeviceDescriptor {
73                label: Some("test_device"),
74                required_features: wgpu::Features::empty(),
75                required_limits: wgpu::Limits::default(),
76                ..Default::default()
77            },
78            None,
79        ))
80        .ok()?;
81
82        Some(Self {
83            device,
84            queue,
85            format: wgpu::TextureFormat::Rgba8Unorm,
86        })
87    }
88
89    /// Create a render target texture that can be read back.
90    pub fn create_target(&self, width: u32, height: u32) -> TestRenderTarget {
91        let texture = self.device.create_texture(&wgpu::TextureDescriptor {
92            label: Some("test_target"),
93            size: wgpu::Extent3d {
94                width,
95                height,
96                depth_or_array_layers: 1,
97            },
98            mip_level_count: 1,
99            sample_count: 1,
100            dimension: wgpu::TextureDimension::D2,
101            format: self.format,
102            usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
103            view_formats: &[],
104        });
105
106        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
107
108        // Buffer for reading back pixels (4 bytes per pixel, aligned to 256)
109        let bytes_per_row = ((width * 4 + 255) / 256) * 256;
110        let buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
111            label: Some("test_readback"),
112            size: (bytes_per_row * height) as u64,
113            usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
114            mapped_at_creation: false,
115        });
116
117        TestRenderTarget {
118            texture,
119            view,
120            buffer,
121            width,
122            height,
123            bytes_per_row,
124        }
125    }
126
127    // ── Pipeline factory methods ──────────────────────────────────────────────
128
129    /// Create a SpritePipeline for headless testing.
130    pub fn create_sprite_pipeline(&self) -> SpritePipeline {
131        SpritePipeline::new_headless(&self.device, &self.queue, self.format)
132    }
133
134    /// Create a TextureStore for headless testing.
135    pub fn create_texture_store(&self) -> TextureStore {
136        TextureStore::new()
137    }
138
139    /// Create a GeometryBatch for headless testing.
140    pub fn create_geometry_batch(&self) -> GeometryBatch {
141        GeometryBatch::new_headless(&self.device, self.format)
142    }
143
144    /// Create a PostProcessPipeline for headless testing.
145    pub fn create_postprocess(&self) -> PostProcessPipeline {
146        PostProcessPipeline::new_headless(&self.device, self.format)
147    }
148
149    /// Create a ShaderStore for headless testing.
150    pub fn create_shader_store(&self) -> ShaderStore {
151        ShaderStore::new_headless(&self.device, self.format)
152    }
153
154    /// Create a RenderTargetStore for headless testing.
155    pub fn create_render_target_store(&self) -> RenderTargetStore {
156        RenderTargetStore::new()
157    }
158
159    /// Create a default Camera2D for testing.
160    pub fn create_camera(&self, width: f32, height: f32) -> Camera2D {
161        Camera2D {
162            x: width / 2.0,
163            y: height / 2.0,
164            zoom: 1.0,
165            viewport_size: [width, height],
166            ..Camera2D::default()
167        }
168    }
169}
170
171/// A render target that can be read back to CPU memory.
172pub struct TestRenderTarget {
173    pub texture: wgpu::Texture,
174    pub view: wgpu::TextureView,
175    buffer: wgpu::Buffer,
176    pub width: u32,
177    pub height: u32,
178    bytes_per_row: u32,
179}
180
181impl TestRenderTarget {
182    /// Read back the rendered pixels as RGBA bytes.
183    /// Call this after submitting render commands.
184    pub fn read_pixels(&self, gpu: &TestGpu) -> Result<Vec<u8>> {
185        let mut encoder = gpu.device.create_command_encoder(
186            &wgpu::CommandEncoderDescriptor { label: Some("readback_encoder") },
187        );
188
189        encoder.copy_texture_to_buffer(
190            wgpu::TexelCopyTextureInfo {
191                texture: &self.texture,
192                mip_level: 0,
193                origin: wgpu::Origin3d::ZERO,
194                aspect: wgpu::TextureAspect::All,
195            },
196            wgpu::TexelCopyBufferInfo {
197                buffer: &self.buffer,
198                layout: wgpu::TexelCopyBufferLayout {
199                    offset: 0,
200                    bytes_per_row: Some(self.bytes_per_row),
201                    rows_per_image: Some(self.height),
202                },
203            },
204            wgpu::Extent3d {
205                width: self.width,
206                height: self.height,
207                depth_or_array_layers: 1,
208            },
209        );
210
211        gpu.queue.submit(std::iter::once(encoder.finish()));
212
213        // Map the buffer and read pixels
214        let buffer_slice = self.buffer.slice(..);
215        let (tx, rx) = std::sync::mpsc::channel();
216        buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
217            tx.send(result).unwrap();
218        });
219        gpu.device.poll(wgpu::Maintain::Wait);
220        rx.recv().unwrap().context("Failed to map buffer")?;
221
222        let data = buffer_slice.get_mapped_range();
223
224        // Copy data, removing row padding
225        let mut pixels = Vec::with_capacity((self.width * self.height * 4) as usize);
226        for y in 0..self.height {
227            let start = (y * self.bytes_per_row) as usize;
228            let end = start + (self.width * 4) as usize;
229            pixels.extend_from_slice(&data[start..end]);
230        }
231
232        drop(data);
233        self.buffer.unmap();
234
235        Ok(pixels)
236    }
237
238    /// Get a pixel color at (x, y) as [R, G, B, A].
239    pub fn get_pixel(&self, pixels: &[u8], x: u32, y: u32) -> [u8; 4] {
240        let idx = ((y * self.width + x) * 4) as usize;
241        [pixels[idx], pixels[idx + 1], pixels[idx + 2], pixels[idx + 3]]
242    }
243
244    /// Check if a pixel approximately matches an expected color (within tolerance).
245    pub fn pixel_matches(&self, pixels: &[u8], x: u32, y: u32, expected: [u8; 4], tolerance: u8) -> bool {
246        let actual = self.get_pixel(pixels, x, y);
247        actual.iter().zip(expected.iter()).all(|(a, e)| {
248            (*a as i16 - *e as i16).abs() <= tolerance as i16
249        })
250    }
251}
252
253/// Helper to clear a render target to a solid color.
254pub fn clear_target(gpu: &TestGpu, target: &TestRenderTarget, color: [f32; 4]) {
255    let mut encoder = gpu.device.create_command_encoder(
256        &wgpu::CommandEncoderDescriptor { label: Some("clear_encoder") },
257    );
258
259    {
260        let _pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
261            label: Some("clear_pass"),
262            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
263                view: &target.view,
264                resolve_target: None,
265                ops: wgpu::Operations {
266                    load: wgpu::LoadOp::Clear(wgpu::Color {
267                        r: color[0] as f64,
268                        g: color[1] as f64,
269                        b: color[2] as f64,
270                        a: color[3] as f64,
271                    }),
272                    store: wgpu::StoreOp::Store,
273                },
274            })],
275            depth_stencil_attachment: None,
276            timestamp_writes: None,
277            occlusion_query_set: None,
278        });
279    }
280
281    gpu.queue.submit(std::iter::once(encoder.finish()));
282}
283
284#[cfg(test)]
285mod tests {
286    use super::*;
287
288    #[test]
289    #[ignore] // requires GPU
290    fn test_gpu_context_creation() {
291        let gpu = TestGpu::new().expect("Failed to create GPU context");
292        // If we got here, the GPU context was created successfully
293        let _target = gpu.create_target(16, 16);
294    }
295
296    #[test]
297    #[ignore] // requires GPU
298    fn test_clear_and_readback() {
299        let gpu = TestGpu::new().expect("Failed to create GPU context");
300        let target = gpu.create_target(64, 64);
301
302        // Clear to red
303        clear_target(&gpu, &target, [1.0, 0.0, 0.0, 1.0]);
304
305        let pixels = target.read_pixels(&gpu).expect("Failed to read pixels");
306
307        // Check center pixel is red
308        assert!(target.pixel_matches(&pixels, 32, 32, [255, 0, 0, 255], 1));
309    }
310
311    #[test]
312    #[ignore] // requires GPU
313    fn test_clear_to_green() {
314        let gpu = TestGpu::new().expect("Failed to create GPU context");
315        let target = gpu.create_target(32, 32);
316
317        clear_target(&gpu, &target, [0.0, 1.0, 0.0, 1.0]);
318
319        let pixels = target.read_pixels(&gpu).expect("Failed to read pixels");
320        assert!(target.pixel_matches(&pixels, 16, 16, [0, 255, 0, 255], 1));
321    }
322}