Skip to main content

proof_engine/wgpu_backend/
abstraction.rs

1//! Unified GPU abstraction layer: `GpuDevice` and `GpuQueue` traits that
2//! provide a single API surface regardless of the underlying graphics backend.
3
4use std::collections::HashMap;
5use std::sync::atomic::{AtomicU64, Ordering};
6
7use super::backend::{
8    BufferHandle, BufferUsage, ComputePipelineHandle, PipelineHandle, PipelineLayout,
9    ShaderHandle, ShaderStage, TextureFormat, TextureHandle,
10};
11
12// ---------------------------------------------------------------------------
13// Blend / colour-target state
14// ---------------------------------------------------------------------------
15
16/// Blend factor.
17#[derive(Debug, Clone, Copy, PartialEq, Eq)]
18pub enum BlendFactor {
19    Zero,
20    One,
21    SrcAlpha,
22    OneMinusSrcAlpha,
23    DstAlpha,
24    OneMinusDstAlpha,
25    SrcColor,
26    OneMinusSrcColor,
27    DstColor,
28    OneMinusDstColor,
29}
30
31/// Blend operation.
32#[derive(Debug, Clone, Copy, PartialEq, Eq)]
33pub enum BlendOp {
34    Add,
35    Subtract,
36    ReverseSubtract,
37    Min,
38    Max,
39}
40
41/// Blend state for a single colour target.
42#[derive(Debug, Clone, Copy, PartialEq, Eq)]
43pub struct BlendState {
44    pub src_factor: BlendFactor,
45    pub dst_factor: BlendFactor,
46    pub operation: BlendOp,
47}
48
49impl BlendState {
50    pub const ALPHA: BlendState = BlendState {
51        src_factor: BlendFactor::SrcAlpha,
52        dst_factor: BlendFactor::OneMinusSrcAlpha,
53        operation: BlendOp::Add,
54    };
55
56    pub const ADDITIVE: BlendState = BlendState {
57        src_factor: BlendFactor::One,
58        dst_factor: BlendFactor::One,
59        operation: BlendOp::Add,
60    };
61
62    pub const REPLACE: BlendState = BlendState {
63        src_factor: BlendFactor::One,
64        dst_factor: BlendFactor::Zero,
65        operation: BlendOp::Add,
66    };
67}
68
69/// State for a single colour output attachment.
70#[derive(Debug, Clone)]
71pub struct ColorTargetState {
72    pub format: TextureFormat,
73    pub blend: Option<BlendState>,
74}
75
76// ---------------------------------------------------------------------------
77// Depth/stencil
78// ---------------------------------------------------------------------------
79
80/// Compare function used for depth/stencil testing.
81#[derive(Debug, Clone, Copy, PartialEq, Eq)]
82pub enum CompareFunction {
83    Never,
84    Less,
85    LessEqual,
86    Equal,
87    GreaterEqual,
88    Greater,
89    NotEqual,
90    Always,
91}
92
93/// Depth-stencil state.
94#[derive(Debug, Clone)]
95pub struct DepthStencilState {
96    pub format: TextureFormat,
97    pub depth_write_enabled: bool,
98    pub depth_compare: CompareFunction,
99}
100
101impl Default for DepthStencilState {
102    fn default() -> Self {
103        Self {
104            format: TextureFormat::Depth32F,
105            depth_write_enabled: true,
106            depth_compare: CompareFunction::Less,
107        }
108    }
109}
110
111// ---------------------------------------------------------------------------
112// Pipeline descriptors
113// ---------------------------------------------------------------------------
114
115/// Describes a render pipeline.
116#[derive(Debug, Clone)]
117pub struct RenderPipelineDesc {
118    pub vertex: ShaderHandle,
119    pub fragment: ShaderHandle,
120    pub vertex_layout: Vec<VertexBufferLayout>,
121    pub color_targets: Vec<ColorTargetState>,
122    pub depth_stencil: Option<DepthStencilState>,
123}
124
125/// Layout of one vertex buffer.
126#[derive(Debug, Clone)]
127pub struct VertexBufferLayout {
128    pub stride: u32,
129    pub step_mode: StepMode,
130    pub attributes: Vec<VertexAttr>,
131}
132
133/// Per-vertex or per-instance stepping.
134#[derive(Debug, Clone, Copy, PartialEq, Eq)]
135pub enum StepMode {
136    Vertex,
137    Instance,
138}
139
140/// A single vertex attribute descriptor.
141#[derive(Debug, Clone)]
142pub struct VertexAttr {
143    pub location: u32,
144    pub offset: u32,
145    pub format: super::renderer::AttributeFormat,
146}
147
148/// Describes a compute pipeline.
149#[derive(Debug, Clone)]
150pub struct ComputePipelineDesc {
151    pub compute_shader: ShaderHandle,
152    pub bind_groups: Vec<BindGroup>,
153}
154
155// ---------------------------------------------------------------------------
156// Bind groups
157// ---------------------------------------------------------------------------
158
159/// A concrete bind group with bound resources.
160#[derive(Debug, Clone)]
161pub struct BindGroup {
162    pub entries: Vec<BindGroupEntry>,
163}
164
165impl BindGroup {
166    pub fn new() -> Self { Self { entries: Vec::new() } }
167
168    pub fn push(mut self, entry: BindGroupEntry) -> Self {
169        self.entries.push(entry);
170        self
171    }
172}
173
174impl Default for BindGroup {
175    fn default() -> Self { Self::new() }
176}
177
178/// One entry in a bind group.
179#[derive(Debug, Clone)]
180pub struct BindGroupEntry {
181    pub binding: u32,
182    pub resource: BoundResource,
183}
184
185/// A resource bound at a specific slot.
186#[derive(Debug, Clone)]
187pub enum BoundResource {
188    Buffer(BufferHandle),
189    Texture(TextureHandle),
190    Sampler(u64), // sampler handle
191}
192
193// ---------------------------------------------------------------------------
194// GpuDevice trait
195// ---------------------------------------------------------------------------
196
197/// The unified GPU device trait.  Every concrete backend implements this.
198pub trait GpuDevice: Send + Sync {
199    // Resource creation
200    fn create_buffer(&mut self, size: usize, usage: BufferUsage) -> BufferHandle;
201    fn create_texture(&mut self, width: u32, height: u32, format: TextureFormat) -> TextureHandle;
202    fn create_shader(&mut self, source: &str, stage: ShaderStage) -> ShaderHandle;
203    fn create_pipeline(&mut self, desc: &RenderPipelineDesc) -> PipelineHandle;
204    fn create_compute_pipeline(&mut self, desc: &ComputePipelineDesc) -> ComputePipelineHandle;
205
206    // Data transfer
207    fn write_buffer(&mut self, buffer: BufferHandle, offset: usize, data: &[u8]);
208    fn read_buffer(&self, buffer: BufferHandle) -> Vec<u8>;
209    fn write_texture(&mut self, texture: TextureHandle, data: &[u8]);
210
211    // Render pass
212    fn begin_render_pass(&mut self, color: &[TextureHandle], depth: Option<TextureHandle>);
213    fn end_render_pass(&mut self);
214
215    // Compute pass
216    fn begin_compute_pass(&mut self);
217    fn end_compute_pass(&mut self);
218
219    // Draw commands (only valid between begin/end render pass)
220    fn set_pipeline(&mut self, pipeline: PipelineHandle);
221    fn set_vertex_buffer(&mut self, slot: u32, buffer: BufferHandle);
222    fn set_index_buffer(&mut self, buffer: BufferHandle);
223    fn set_bind_group(&mut self, index: u32, group: &BindGroup);
224    fn draw(&mut self, vertex_count: u32, instance_count: u32);
225    fn draw_indexed(&mut self, index_count: u32, instance_count: u32);
226    fn draw_indirect(&mut self, buffer: BufferHandle, offset: u64);
227
228    // Compute commands (only valid between begin/end compute pass)
229    fn set_compute_pipeline(&mut self, pipeline: ComputePipelineHandle);
230    fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32);
231    fn dispatch_indirect(&mut self, buffer: BufferHandle, offset: u64);
232
233    // Destruction
234    fn destroy_buffer(&mut self, buffer: BufferHandle);
235    fn destroy_texture(&mut self, texture: TextureHandle);
236
237    fn name(&self) -> &str;
238}
239
240// ---------------------------------------------------------------------------
241// GpuQueue trait
242// ---------------------------------------------------------------------------
243
244/// Command queue for submitting work to the GPU.
245pub trait GpuQueue: Send + Sync {
246    fn submit(&mut self, commands: Vec<RecordedCommand>);
247    fn on_completion(&mut self, callback: Box<dyn FnOnce() + Send>);
248}
249
250/// A recorded GPU command (opaque).
251#[derive(Debug, Clone)]
252pub enum RecordedCommand {
253    Draw { pipeline: PipelineHandle, vertex_count: u32, instance_count: u32 },
254    DrawIndexed { pipeline: PipelineHandle, index_count: u32, instance_count: u32 },
255    Dispatch { pipeline: ComputePipelineHandle, x: u32, y: u32, z: u32 },
256    WriteBuffer { buffer: BufferHandle, data: Vec<u8> },
257    CopyBuffer { src: BufferHandle, dst: BufferHandle, size: usize },
258}
259
260// ---------------------------------------------------------------------------
261// NullDevice — no-op implementation for testing
262// ---------------------------------------------------------------------------
263
264/// A device that does nothing. Every call is a successful no-op.
265pub struct NullDevice {
266    next_id: AtomicU64,
267}
268
269impl NullDevice {
270    pub fn new() -> Self {
271        Self { next_id: AtomicU64::new(1) }
272    }
273
274    fn next_id(&self) -> u64 {
275        self.next_id.fetch_add(1, Ordering::Relaxed)
276    }
277}
278
279impl Default for NullDevice {
280    fn default() -> Self { Self::new() }
281}
282
283impl GpuDevice for NullDevice {
284    fn create_buffer(&mut self, _size: usize, _usage: BufferUsage) -> BufferHandle {
285        BufferHandle(self.next_id())
286    }
287    fn create_texture(&mut self, _w: u32, _h: u32, _fmt: TextureFormat) -> TextureHandle {
288        TextureHandle(self.next_id())
289    }
290    fn create_shader(&mut self, _src: &str, _stage: ShaderStage) -> ShaderHandle {
291        ShaderHandle(self.next_id())
292    }
293    fn create_pipeline(&mut self, _desc: &RenderPipelineDesc) -> PipelineHandle {
294        PipelineHandle(self.next_id())
295    }
296    fn create_compute_pipeline(&mut self, _desc: &ComputePipelineDesc) -> ComputePipelineHandle {
297        ComputePipelineHandle(self.next_id())
298    }
299    fn write_buffer(&mut self, _buf: BufferHandle, _off: usize, _data: &[u8]) {}
300    fn read_buffer(&self, _buf: BufferHandle) -> Vec<u8> { Vec::new() }
301    fn write_texture(&mut self, _tex: TextureHandle, _data: &[u8]) {}
302    fn begin_render_pass(&mut self, _color: &[TextureHandle], _depth: Option<TextureHandle>) {}
303    fn end_render_pass(&mut self) {}
304    fn begin_compute_pass(&mut self) {}
305    fn end_compute_pass(&mut self) {}
306    fn set_pipeline(&mut self, _p: PipelineHandle) {}
307    fn set_vertex_buffer(&mut self, _slot: u32, _buf: BufferHandle) {}
308    fn set_index_buffer(&mut self, _buf: BufferHandle) {}
309    fn set_bind_group(&mut self, _idx: u32, _grp: &BindGroup) {}
310    fn draw(&mut self, _vc: u32, _ic: u32) {}
311    fn draw_indexed(&mut self, _ic: u32, _inst: u32) {}
312    fn draw_indirect(&mut self, _buf: BufferHandle, _off: u64) {}
313    fn set_compute_pipeline(&mut self, _p: ComputePipelineHandle) {}
314    fn dispatch_workgroups(&mut self, _x: u32, _y: u32, _z: u32) {}
315    fn dispatch_indirect(&mut self, _buf: BufferHandle, _off: u64) {}
316    fn destroy_buffer(&mut self, _buf: BufferHandle) {}
317    fn destroy_texture(&mut self, _tex: TextureHandle) {}
318    fn name(&self) -> &str { "Null" }
319}
320
321// ---------------------------------------------------------------------------
322// SoftwareDevice — CPU fallback
323// ---------------------------------------------------------------------------
324
325struct SwBuffer {
326    data: Vec<u8>,
327    usage: BufferUsage,
328}
329
330struct SwTexture {
331    data: Vec<u8>,
332    width: u32,
333    height: u32,
334    format: TextureFormat,
335}
336
337/// CPU-backed device.
338pub struct SoftwareDevice {
339    next_id: u64,
340    buffers: HashMap<u64, SwBuffer>,
341    textures: HashMap<u64, SwTexture>,
342    in_render_pass: bool,
343    in_compute_pass: bool,
344    current_pipeline: Option<PipelineHandle>,
345    current_compute_pipeline: Option<ComputePipelineHandle>,
346    current_vertex_buffers: HashMap<u32, BufferHandle>,
347    current_index_buffer: Option<BufferHandle>,
348    draw_log: Vec<RecordedCommand>,
349}
350
351impl SoftwareDevice {
352    pub fn new() -> Self {
353        Self {
354            next_id: 1,
355            buffers: HashMap::new(),
356            textures: HashMap::new(),
357            in_render_pass: false,
358            in_compute_pass: false,
359            current_pipeline: None,
360            current_compute_pipeline: None,
361            current_vertex_buffers: HashMap::new(),
362            current_index_buffer: None,
363            draw_log: Vec::new(),
364        }
365    }
366
367    fn alloc_id(&mut self) -> u64 {
368        let id = self.next_id;
369        self.next_id += 1;
370        id
371    }
372
373    /// Return all recorded commands since last drain.
374    pub fn drain_log(&mut self) -> Vec<RecordedCommand> {
375        std::mem::take(&mut self.draw_log)
376    }
377}
378
379impl Default for SoftwareDevice {
380    fn default() -> Self { Self::new() }
381}
382
383impl GpuDevice for SoftwareDevice {
384    fn create_buffer(&mut self, size: usize, usage: BufferUsage) -> BufferHandle {
385        let id = self.alloc_id();
386        self.buffers.insert(id, SwBuffer { data: vec![0u8; size], usage });
387        BufferHandle(id)
388    }
389
390    fn create_texture(&mut self, width: u32, height: u32, format: TextureFormat) -> TextureHandle {
391        let id = self.alloc_id();
392        let size = (width as usize) * (height as usize) * format.bytes_per_pixel();
393        self.textures.insert(id, SwTexture { data: vec![0u8; size], width, height, format });
394        TextureHandle(id)
395    }
396
397    fn create_shader(&mut self, _src: &str, _stage: ShaderStage) -> ShaderHandle {
398        ShaderHandle(self.alloc_id())
399    }
400
401    fn create_pipeline(&mut self, _desc: &RenderPipelineDesc) -> PipelineHandle {
402        PipelineHandle(self.alloc_id())
403    }
404
405    fn create_compute_pipeline(&mut self, _desc: &ComputePipelineDesc) -> ComputePipelineHandle {
406        ComputePipelineHandle(self.alloc_id())
407    }
408
409    fn write_buffer(&mut self, buffer: BufferHandle, offset: usize, data: &[u8]) {
410        if let Some(buf) = self.buffers.get_mut(&buffer.0) {
411            let end = (offset + data.len()).min(buf.data.len());
412            let len = end.saturating_sub(offset);
413            if len > 0 {
414                buf.data[offset..offset + len].copy_from_slice(&data[..len]);
415            }
416        }
417    }
418
419    fn read_buffer(&self, buffer: BufferHandle) -> Vec<u8> {
420        self.buffers.get(&buffer.0).map(|b| b.data.clone()).unwrap_or_default()
421    }
422
423    fn write_texture(&mut self, texture: TextureHandle, data: &[u8]) {
424        if let Some(tex) = self.textures.get_mut(&texture.0) {
425            let len = data.len().min(tex.data.len());
426            tex.data[..len].copy_from_slice(&data[..len]);
427        }
428    }
429
430    fn begin_render_pass(&mut self, _color: &[TextureHandle], _depth: Option<TextureHandle>) {
431        self.in_render_pass = true;
432    }
433
434    fn end_render_pass(&mut self) {
435        self.in_render_pass = false;
436        self.current_pipeline = None;
437        self.current_vertex_buffers.clear();
438        self.current_index_buffer = None;
439    }
440
441    fn begin_compute_pass(&mut self) {
442        self.in_compute_pass = true;
443    }
444
445    fn end_compute_pass(&mut self) {
446        self.in_compute_pass = false;
447        self.current_compute_pipeline = None;
448    }
449
450    fn set_pipeline(&mut self, pipeline: PipelineHandle) {
451        self.current_pipeline = Some(pipeline);
452    }
453
454    fn set_vertex_buffer(&mut self, slot: u32, buffer: BufferHandle) {
455        self.current_vertex_buffers.insert(slot, buffer);
456    }
457
458    fn set_index_buffer(&mut self, buffer: BufferHandle) {
459        self.current_index_buffer = Some(buffer);
460    }
461
462    fn set_bind_group(&mut self, _index: u32, _group: &BindGroup) {
463        // In software: bind group state is noted but not used for actual rendering.
464    }
465
466    fn draw(&mut self, vertex_count: u32, instance_count: u32) {
467        if let Some(pipe) = self.current_pipeline {
468            self.draw_log.push(RecordedCommand::Draw {
469                pipeline: pipe,
470                vertex_count,
471                instance_count,
472            });
473        }
474    }
475
476    fn draw_indexed(&mut self, index_count: u32, instance_count: u32) {
477        if let Some(pipe) = self.current_pipeline {
478            self.draw_log.push(RecordedCommand::DrawIndexed {
479                pipeline: pipe,
480                index_count,
481                instance_count,
482            });
483        }
484    }
485
486    fn draw_indirect(&mut self, buffer: BufferHandle, _offset: u64) {
487        // Read indirect args from buffer: vertex_count(u32), instance_count(u32)
488        if let Some(buf) = self.buffers.get(&buffer.0) {
489            if buf.data.len() >= 8 {
490                let vc = u32::from_le_bytes([buf.data[0], buf.data[1], buf.data[2], buf.data[3]]);
491                let ic = u32::from_le_bytes([buf.data[4], buf.data[5], buf.data[6], buf.data[7]]);
492                self.draw(vc, ic);
493            }
494        }
495    }
496
497    fn set_compute_pipeline(&mut self, pipeline: ComputePipelineHandle) {
498        self.current_compute_pipeline = Some(pipeline);
499    }
500
501    fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) {
502        if let Some(pipe) = self.current_compute_pipeline {
503            self.draw_log.push(RecordedCommand::Dispatch {
504                pipeline: pipe,
505                x, y, z,
506            });
507        }
508    }
509
510    fn dispatch_indirect(&mut self, buffer: BufferHandle, _offset: u64) {
511        if let Some(buf) = self.buffers.get(&buffer.0) {
512            if buf.data.len() >= 12 {
513                let x = u32::from_le_bytes([buf.data[0], buf.data[1], buf.data[2], buf.data[3]]);
514                let y = u32::from_le_bytes([buf.data[4], buf.data[5], buf.data[6], buf.data[7]]);
515                let z = u32::from_le_bytes([buf.data[8], buf.data[9], buf.data[10], buf.data[11]]);
516                self.dispatch_workgroups(x, y, z);
517            }
518        }
519    }
520
521    fn destroy_buffer(&mut self, buffer: BufferHandle) {
522        self.buffers.remove(&buffer.0);
523    }
524
525    fn destroy_texture(&mut self, texture: TextureHandle) {
526        self.textures.remove(&texture.0);
527    }
528
529    fn name(&self) -> &str { "Software" }
530}
531
532// ---------------------------------------------------------------------------
533// OpenGLDevice — wraps glow (delegates to SoftwareDevice in headless)
534// ---------------------------------------------------------------------------
535
536/// OpenGL device implementation.  Without a live GL context this delegates
537/// entirely to `SoftwareDevice`.
538pub struct OpenGLDevice {
539    inner: SoftwareDevice,
540}
541
542impl OpenGLDevice {
543    pub fn new() -> Self {
544        Self { inner: SoftwareDevice::new() }
545    }
546}
547
548impl Default for OpenGLDevice {
549    fn default() -> Self { Self::new() }
550}
551
552impl GpuDevice for OpenGLDevice {
553    fn create_buffer(&mut self, size: usize, usage: BufferUsage) -> BufferHandle { self.inner.create_buffer(size, usage) }
554    fn create_texture(&mut self, w: u32, h: u32, f: TextureFormat) -> TextureHandle { self.inner.create_texture(w, h, f) }
555    fn create_shader(&mut self, s: &str, st: ShaderStage) -> ShaderHandle { self.inner.create_shader(s, st) }
556    fn create_pipeline(&mut self, d: &RenderPipelineDesc) -> PipelineHandle { self.inner.create_pipeline(d) }
557    fn create_compute_pipeline(&mut self, d: &ComputePipelineDesc) -> ComputePipelineHandle { self.inner.create_compute_pipeline(d) }
558    fn write_buffer(&mut self, b: BufferHandle, o: usize, d: &[u8]) { self.inner.write_buffer(b, o, d) }
559    fn read_buffer(&self, b: BufferHandle) -> Vec<u8> { self.inner.read_buffer(b) }
560    fn write_texture(&mut self, t: TextureHandle, d: &[u8]) { self.inner.write_texture(t, d) }
561    fn begin_render_pass(&mut self, c: &[TextureHandle], d: Option<TextureHandle>) { self.inner.begin_render_pass(c, d) }
562    fn end_render_pass(&mut self) { self.inner.end_render_pass() }
563    fn begin_compute_pass(&mut self) { self.inner.begin_compute_pass() }
564    fn end_compute_pass(&mut self) { self.inner.end_compute_pass() }
565    fn set_pipeline(&mut self, p: PipelineHandle) { self.inner.set_pipeline(p) }
566    fn set_vertex_buffer(&mut self, s: u32, b: BufferHandle) { self.inner.set_vertex_buffer(s, b) }
567    fn set_index_buffer(&mut self, b: BufferHandle) { self.inner.set_index_buffer(b) }
568    fn set_bind_group(&mut self, i: u32, g: &BindGroup) { self.inner.set_bind_group(i, g) }
569    fn draw(&mut self, v: u32, i: u32) { self.inner.draw(v, i) }
570    fn draw_indexed(&mut self, i: u32, inst: u32) { self.inner.draw_indexed(i, inst) }
571    fn draw_indirect(&mut self, b: BufferHandle, o: u64) { self.inner.draw_indirect(b, o) }
572    fn set_compute_pipeline(&mut self, p: ComputePipelineHandle) { self.inner.set_compute_pipeline(p) }
573    fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) { self.inner.dispatch_workgroups(x, y, z) }
574    fn dispatch_indirect(&mut self, b: BufferHandle, o: u64) { self.inner.dispatch_indirect(b, o) }
575    fn destroy_buffer(&mut self, b: BufferHandle) { self.inner.destroy_buffer(b) }
576    fn destroy_texture(&mut self, t: TextureHandle) { self.inner.destroy_texture(t) }
577    fn name(&self) -> &str { "OpenGL" }
578}
579
580// ---------------------------------------------------------------------------
581// SimpleQueue — basic command queue
582// ---------------------------------------------------------------------------
583
584/// A simple in-order queue that executes commands immediately.
585pub struct SimpleQueue {
586    pending_callbacks: std::sync::Mutex<Vec<Box<dyn FnOnce() + Send>>>,
587}
588
589impl SimpleQueue {
590    pub fn new() -> Self {
591        Self { pending_callbacks: std::sync::Mutex::new(Vec::new()) }
592    }
593
594    /// Flush all pending completion callbacks.
595    pub fn flush(&mut self) {
596        let mut cbs = self.pending_callbacks.lock().unwrap();
597        for cb in cbs.drain(..) {
598            cb();
599        }
600    }
601}
602
603impl Default for SimpleQueue {
604    fn default() -> Self { Self::new() }
605}
606
607impl GpuQueue for SimpleQueue {
608    fn submit(&mut self, _commands: Vec<RecordedCommand>) {
609        // In a real implementation this would encode and submit to the GPU.
610        // Here we just note completion.
611    }
612
613    fn on_completion(&mut self, callback: Box<dyn FnOnce() + Send>) {
614        self.pending_callbacks.lock().unwrap().push(callback);
615    }
616}
617
618// ---------------------------------------------------------------------------
619// Tests
620// ---------------------------------------------------------------------------
621
622#[cfg(test)]
623mod tests {
624    use super::*;
625    use super::super::renderer::AttributeFormat;
626
627    fn make_shader_handle(id: u64) -> ShaderHandle { ShaderHandle(id) }
628
629    #[test]
630    fn blend_state_presets() {
631        assert_eq!(BlendState::ALPHA.src_factor, BlendFactor::SrcAlpha);
632        assert_eq!(BlendState::ADDITIVE.operation, BlendOp::Add);
633        assert_eq!(BlendState::REPLACE.dst_factor, BlendFactor::Zero);
634    }
635
636    #[test]
637    fn null_device_creates_unique_handles() {
638        let mut dev = NullDevice::new();
639        let a = dev.create_buffer(64, BufferUsage::VERTEX);
640        let b = dev.create_buffer(64, BufferUsage::VERTEX);
641        assert_ne!(a, b);
642    }
643
644    #[test]
645    fn null_device_all_operations() {
646        let mut dev = NullDevice::new();
647        let buf = dev.create_buffer(16, BufferUsage::UNIFORM);
648        dev.write_buffer(buf, 0, &[1, 2, 3]);
649        assert!(dev.read_buffer(buf).is_empty()); // Null device returns nothing
650        let tex = dev.create_texture(4, 4, TextureFormat::RGBA8);
651        dev.write_texture(tex, &[0; 64]);
652
653        dev.begin_render_pass(&[tex], None);
654        let vs = dev.create_shader("v", ShaderStage::Vertex);
655        let fs = dev.create_shader("f", ShaderStage::Fragment);
656        let pipe = dev.create_pipeline(&RenderPipelineDesc {
657            vertex: vs,
658            fragment: fs,
659            vertex_layout: Vec::new(),
660            color_targets: Vec::new(),
661            depth_stencil: None,
662        });
663        dev.set_pipeline(pipe);
664        dev.set_vertex_buffer(0, buf);
665        dev.set_index_buffer(buf);
666        dev.set_bind_group(0, &BindGroup::new());
667        dev.draw(3, 1);
668        dev.draw_indexed(6, 1);
669        dev.draw_indirect(buf, 0);
670        dev.end_render_pass();
671
672        dev.begin_compute_pass();
673        let cp = dev.create_compute_pipeline(&ComputePipelineDesc {
674            compute_shader: vs,
675            bind_groups: Vec::new(),
676        });
677        dev.set_compute_pipeline(cp);
678        dev.dispatch_workgroups(4, 1, 1);
679        dev.dispatch_indirect(buf, 0);
680        dev.end_compute_pass();
681
682        dev.destroy_buffer(buf);
683        dev.destroy_texture(tex);
684        assert_eq!(dev.name(), "Null");
685    }
686
687    #[test]
688    fn software_device_buffer_write_read() {
689        let mut dev = SoftwareDevice::new();
690        let buf = dev.create_buffer(16, BufferUsage::STORAGE);
691        dev.write_buffer(buf, 0, &[10, 20, 30, 40]);
692        let data = dev.read_buffer(buf);
693        assert_eq!(&data[..4], &[10, 20, 30, 40]);
694        assert_eq!(data.len(), 16);
695    }
696
697    #[test]
698    fn software_device_buffer_write_with_offset() {
699        let mut dev = SoftwareDevice::new();
700        let buf = dev.create_buffer(8, BufferUsage::STORAGE);
701        dev.write_buffer(buf, 4, &[0xAA, 0xBB, 0xCC, 0xDD]);
702        let data = dev.read_buffer(buf);
703        assert_eq!(&data[4..8], &[0xAA, 0xBB, 0xCC, 0xDD]);
704    }
705
706    #[test]
707    fn software_device_texture() {
708        let mut dev = SoftwareDevice::new();
709        let tex = dev.create_texture(2, 2, TextureFormat::RGBA8);
710        let pixels = vec![255u8; 16];
711        dev.write_texture(tex, &pixels);
712    }
713
714    #[test]
715    fn software_device_render_pass() {
716        let mut dev = SoftwareDevice::new();
717        let vs = dev.create_shader("v", ShaderStage::Vertex);
718        let fs = dev.create_shader("f", ShaderStage::Fragment);
719        let pipe = dev.create_pipeline(&RenderPipelineDesc {
720            vertex: vs,
721            fragment: fs,
722            vertex_layout: Vec::new(),
723            color_targets: vec![ColorTargetState {
724                format: TextureFormat::RGBA8,
725                blend: Some(BlendState::ALPHA),
726            }],
727            depth_stencil: None,
728        });
729        let vbuf = dev.create_buffer(48, BufferUsage::VERTEX);
730        let tex = dev.create_texture(800, 600, TextureFormat::RGBA8);
731
732        dev.begin_render_pass(&[tex], None);
733        dev.set_pipeline(pipe);
734        dev.set_vertex_buffer(0, vbuf);
735        dev.draw(3, 1);
736        dev.end_render_pass();
737
738        let log = dev.drain_log();
739        assert_eq!(log.len(), 1);
740        assert!(matches!(log[0], RecordedCommand::Draw { vertex_count: 3, instance_count: 1, .. }));
741    }
742
743    #[test]
744    fn software_device_compute_pass() {
745        let mut dev = SoftwareDevice::new();
746        let cs = dev.create_shader("c", ShaderStage::Compute);
747        let cp = dev.create_compute_pipeline(&ComputePipelineDesc {
748            compute_shader: cs,
749            bind_groups: Vec::new(),
750        });
751
752        dev.begin_compute_pass();
753        dev.set_compute_pipeline(cp);
754        dev.dispatch_workgroups(4, 2, 1);
755        dev.end_compute_pass();
756
757        let log = dev.drain_log();
758        assert_eq!(log.len(), 1);
759        assert!(matches!(log[0], RecordedCommand::Dispatch { x: 4, y: 2, z: 1, .. }));
760    }
761
762    #[test]
763    fn software_device_draw_indirect() {
764        let mut dev = SoftwareDevice::new();
765        let vs = dev.create_shader("v", ShaderStage::Vertex);
766        let fs = dev.create_shader("f", ShaderStage::Fragment);
767        let pipe = dev.create_pipeline(&RenderPipelineDesc {
768            vertex: vs, fragment: fs,
769            vertex_layout: Vec::new(), color_targets: Vec::new(), depth_stencil: None,
770        });
771
772        // Indirect args: vertex_count=6, instance_count=2
773        let mut args = Vec::new();
774        args.extend_from_slice(&6u32.to_le_bytes());
775        args.extend_from_slice(&2u32.to_le_bytes());
776        let buf = dev.create_buffer(8, BufferUsage::INDIRECT);
777        dev.write_buffer(buf, 0, &args);
778
779        dev.begin_render_pass(&[], None);
780        dev.set_pipeline(pipe);
781        dev.draw_indirect(buf, 0);
782        dev.end_render_pass();
783
784        let log = dev.drain_log();
785        assert_eq!(log.len(), 1);
786        assert!(matches!(log[0], RecordedCommand::Draw { vertex_count: 6, instance_count: 2, .. }));
787    }
788
789    #[test]
790    fn software_device_dispatch_indirect() {
791        let mut dev = SoftwareDevice::new();
792        let cs = dev.create_shader("c", ShaderStage::Compute);
793        let cp = dev.create_compute_pipeline(&ComputePipelineDesc {
794            compute_shader: cs, bind_groups: Vec::new(),
795        });
796
797        let mut args = Vec::new();
798        args.extend_from_slice(&8u32.to_le_bytes());
799        args.extend_from_slice(&4u32.to_le_bytes());
800        args.extend_from_slice(&1u32.to_le_bytes());
801        let buf = dev.create_buffer(12, BufferUsage::INDIRECT);
802        dev.write_buffer(buf, 0, &args);
803
804        dev.begin_compute_pass();
805        dev.set_compute_pipeline(cp);
806        dev.dispatch_indirect(buf, 0);
807        dev.end_compute_pass();
808
809        let log = dev.drain_log();
810        assert_eq!(log.len(), 1);
811        assert!(matches!(log[0], RecordedCommand::Dispatch { x: 8, y: 4, z: 1, .. }));
812    }
813
814    #[test]
815    fn software_device_destroy() {
816        let mut dev = SoftwareDevice::new();
817        let buf = dev.create_buffer(8, BufferUsage::VERTEX);
818        dev.destroy_buffer(buf);
819        assert!(dev.read_buffer(buf).is_empty());
820    }
821
822    #[test]
823    fn opengl_device_delegates() {
824        let mut dev = OpenGLDevice::new();
825        assert_eq!(dev.name(), "OpenGL");
826        let buf = dev.create_buffer(8, BufferUsage::UNIFORM);
827        dev.write_buffer(buf, 0, &[1, 2, 3, 4, 5, 6, 7, 8]);
828        assert_eq!(dev.read_buffer(buf), vec![1, 2, 3, 4, 5, 6, 7, 8]);
829    }
830
831    #[test]
832    fn bind_group_builder() {
833        let bg = BindGroup::new()
834            .push(BindGroupEntry { binding: 0, resource: BoundResource::Buffer(BufferHandle(1)) })
835            .push(BindGroupEntry { binding: 1, resource: BoundResource::Texture(TextureHandle(2)) });
836        assert_eq!(bg.entries.len(), 2);
837    }
838
839    #[test]
840    fn simple_queue_flush() {
841        use std::sync::atomic::{AtomicBool, Ordering};
842        use std::sync::Arc;
843
844        let mut queue = SimpleQueue::new();
845        let called = Arc::new(AtomicBool::new(false));
846        let called2 = called.clone();
847        queue.submit(Vec::new());
848        queue.on_completion(Box::new(move || {
849            called2.store(true, Ordering::SeqCst);
850        }));
851        queue.flush();
852        assert!(called.load(Ordering::SeqCst));
853    }
854
855    #[test]
856    fn depth_stencil_default() {
857        let ds = DepthStencilState::default();
858        assert!(ds.depth_write_enabled);
859        assert_eq!(ds.depth_compare, CompareFunction::Less);
860        assert_eq!(ds.format, TextureFormat::Depth32F);
861    }
862
863    #[test]
864    fn color_target_with_blend() {
865        let ct = ColorTargetState {
866            format: TextureFormat::RGBA8,
867            blend: Some(BlendState::ALPHA),
868        };
869        assert_eq!(ct.format, TextureFormat::RGBA8);
870        assert_eq!(ct.blend.unwrap().src_factor, BlendFactor::SrcAlpha);
871    }
872}