Skip to main content

arcane_core/renderer/
mod.rs

1mod gpu;
2mod sprite;
3mod texture;
4pub mod camera;
5mod tilemap;
6mod lighting;
7pub mod font;
8pub mod msdf;
9pub mod shader;
10pub mod postprocess;
11pub mod radiance;
12pub mod geometry;
13pub mod rendertarget;
14pub mod sdf;
15// Test harness is always public for integration tests
16pub mod test_harness;
17
18pub use gpu::GpuContext;
19pub use sprite::{SpriteCommand, SpritePipeline};
20pub use texture::{TextureId, TextureStore};
21pub use camera::Camera2D;
22pub use tilemap::{Tilemap, TilemapStore};
23pub use lighting::{LightingState, LightingUniform, PointLight, LightData, MAX_LIGHTS};
24pub use msdf::{MsdfFont, MsdfFontStore, MsdfGlyph};
25pub use shader::ShaderStore;
26pub use postprocess::PostProcessPipeline;
27pub use radiance::{RadiancePipeline, RadianceState, EmissiveSurface, Occluder, DirectionalLight, SpotLight};
28pub use geometry::GeometryBatch;
29pub use rendertarget::RenderTargetStore;
30pub use sdf::{SdfPipelineStore, SdfCommand, SdfFill};
31
32use crate::scripting::geometry_ops::GeoCommand;
33use crate::scripting::sdf_ops::SdfDrawCommand;
34use anyhow::Result;
35
36/// Convert a scripting-layer SdfDrawCommand to a rendering-layer SdfCommand.
37fn convert_sdf_draw_command(c: SdfDrawCommand) -> SdfCommand {
38    let fill = match c.fill_type {
39        0 => SdfFill::Solid { color: c.color },
40        1 => SdfFill::Outline { color: c.color, thickness: c.fill_param },
41        2 => SdfFill::SolidWithOutline { fill: c.color, outline: c.color2, thickness: c.fill_param },
42        3 => SdfFill::Gradient { from: c.color, to: c.color2, angle: c.fill_param, scale: c.gradient_scale },
43        4 => SdfFill::Glow { color: c.color, intensity: c.fill_param },
44        5 => SdfFill::CosinePalette {
45            a: [c.palette_params[0], c.palette_params[1], c.palette_params[2]],
46            b: [c.palette_params[3], c.palette_params[4], c.palette_params[5]],
47            c: [c.palette_params[6], c.palette_params[7], c.palette_params[8]],
48            d: [c.palette_params[9], c.palette_params[10], c.palette_params[11]],
49        },
50        _ => SdfFill::Solid { color: c.color },
51    };
52    SdfCommand {
53        sdf_expr: c.sdf_expr,
54        fill,
55        x: c.x,
56        y: c.y,
57        bounds: c.bounds,
58        layer: c.layer,
59        rotation: c.rotation,
60        scale: c.scale,
61        opacity: c.opacity,
62    }
63}
64
65/// A single step in the interleaved render schedule.
66/// Sprites, geometry, and SDF commands are merged by layer so that layer ordering
67/// is respected across all pipeline types.
68#[derive(Debug, PartialEq)]
69enum RenderOp {
70    /// Render a contiguous range of sorted sprite commands.
71    Sprites { start: usize, end: usize },
72    /// Render a contiguous range of sorted geometry commands.
73    Geometry { start: usize, end: usize },
74    /// Render a contiguous range of sorted SDF commands.
75    Sdf { start: usize, end: usize },
76}
77
78/// Build an interleaved render schedule from sorted sprite, geometry, and SDF commands.
79///
80/// All input slices must be pre-sorted by layer. The schedule merges them so that
81/// lower layers render first. At the same layer, the order is: sprites, then geometry, then SDF.
82fn build_render_schedule(
83    sprites: &[SpriteCommand],
84    geo: &[GeoCommand],
85    sdf: &[SdfCommand],
86) -> Vec<RenderOp> {
87    let mut schedule = Vec::new();
88    let mut si = 0;
89    let mut gi = 0;
90    let mut di = 0;
91
92    while si < sprites.len() || gi < geo.len() || di < sdf.len() {
93        // Get current layer for each type (MAX if exhausted)
94        let sprite_layer = if si < sprites.len() { sprites[si].layer } else { i32::MAX };
95        let geo_layer = if gi < geo.len() { geo[gi].layer() } else { i32::MAX };
96        let sdf_layer = if di < sdf.len() { sdf[di].layer } else { i32::MAX };
97
98        // Find minimum layer
99        let min_layer = sprite_layer.min(geo_layer).min(sdf_layer);
100
101        // At the same layer: sprites first, then geo, then SDF
102        if sprite_layer == min_layer {
103            let start = si;
104            // Consume all sprites at or before the next geo/sdf layer
105            let bound = geo_layer.min(sdf_layer);
106            while si < sprites.len() && sprites[si].layer <= bound {
107                si += 1;
108            }
109            schedule.push(RenderOp::Sprites { start, end: si });
110        } else if geo_layer == min_layer {
111            let start = gi;
112            // Consume geo commands at layers < next sprite layer and <= next sdf layer
113            // (sprites come before geo at same layer, but geo comes before sdf)
114            let sprite_bound = if si < sprites.len() { sprites[si].layer } else { i32::MAX };
115            let sdf_bound = if di < sdf.len() { sdf[di].layer } else { i32::MAX };
116            while gi < geo.len() && geo[gi].layer() < sprite_bound && geo[gi].layer() <= sdf_bound {
117                gi += 1;
118            }
119            schedule.push(RenderOp::Geometry { start, end: gi });
120        } else {
121            let start = di;
122            // Consume SDF commands at layers < next sprite/geo layer
123            let sprite_bound = if si < sprites.len() { sprites[si].layer } else { i32::MAX };
124            let geo_bound = if gi < geo.len() { geo[gi].layer() } else { i32::MAX };
125            while di < sdf.len() && sdf[di].layer < sprite_bound && sdf[di].layer < geo_bound {
126                di += 1;
127            }
128            schedule.push(RenderOp::Sdf { start, end: di });
129        }
130    }
131
132    schedule
133}
134
135/// Top-level renderer that owns the GPU context, sprite pipeline, and textures.
136pub struct Renderer {
137    pub gpu: GpuContext,
138    pub sprites: SpritePipeline,
139    pub geometry: GeometryBatch,
140    pub shaders: ShaderStore,
141    pub postprocess: PostProcessPipeline,
142    pub textures: TextureStore,
143    pub camera: Camera2D,
144    pub lighting: LightingState,
145    pub radiance: RadiancePipeline,
146    pub radiance_state: RadianceState,
147    /// Off-screen render targets (owns the GPU textures; bind groups in TextureStore).
148    pub render_targets: RenderTargetStore,
149    /// Sprite commands queued for the current frame.
150    pub frame_commands: Vec<SpriteCommand>,
151    /// Geometry commands queued for the current frame (drained from GeoState).
152    pub geo_commands: Vec<GeoCommand>,
153    /// SDF commands queued for the current frame (drained from SdfState).
154    pub sdf_commands: Vec<SdfCommand>,
155    /// SDF pipeline store for rendering signed distance field shapes.
156    pub sdf_pipeline: SdfPipelineStore,
157    /// Display scale factor (e.g. 2.0 on Retina). Used to convert physical → logical pixels.
158    pub scale_factor: f32,
159    /// Clear color for the render pass background. Default: dark blue-gray.
160    pub clear_color: [f32; 4],
161    /// Elapsed time in seconds (accumulated, for shader built-ins).
162    pub elapsed_time: f32,
163    /// Frame delta time in seconds (for shader built-ins).
164    pub delta_time: f32,
165    /// Mouse position in screen pixels (for shader built-ins).
166    pub mouse_pos: [f32; 2],
167    /// When true, the next render_frame() will capture the surface to a PNG.
168    pub capture_pending: bool,
169    /// Options for the pending frame capture (scale, region, etc.).
170    pub capture_options: crate::agent::CaptureFrameOptions,
171    /// PNG bytes from the last capture (taken by the frame callback).
172    pub capture_result: Option<Vec<u8>>,
173}
174
175impl Renderer {
176    /// Create a new renderer attached to a winit window.
177    pub fn new(window: std::sync::Arc<winit::window::Window>) -> Result<Self> {
178        let scale_factor = window.scale_factor() as f32;
179        let gpu = GpuContext::new(window)?;
180        let sprites = SpritePipeline::new(&gpu);
181        let geometry = GeometryBatch::new(&gpu);
182        let shaders = ShaderStore::new(&gpu);
183        let postprocess = PostProcessPipeline::new(&gpu);
184        let sdf_pipeline = SdfPipelineStore::new(&gpu);
185        let radiance_pipeline = RadiancePipeline::new(&gpu);
186        let textures = TextureStore::new();
187        // Set camera viewport to logical pixels so world units are DPI-independent
188        let logical_w = gpu.config.width as f32 / scale_factor;
189        let logical_h = gpu.config.height as f32 / scale_factor;
190        let camera = Camera2D {
191            viewport_size: [logical_w, logical_h],
192            ..Camera2D::default()
193        };
194        Ok(Self {
195            gpu,
196            sprites,
197            geometry,
198            shaders,
199            postprocess,
200            radiance: radiance_pipeline,
201            radiance_state: RadianceState::new(),
202            textures,
203            camera,
204            lighting: LightingState::default(),
205            render_targets: RenderTargetStore::new(),
206            frame_commands: Vec::new(),
207            geo_commands: Vec::new(),
208            sdf_commands: Vec::new(),
209            sdf_pipeline,
210            scale_factor,
211            clear_color: [0.1, 0.1, 0.15, 1.0],
212            elapsed_time: 0.0,
213            delta_time: 0.0,
214            mouse_pos: [0.0, 0.0],
215            capture_pending: false,
216            capture_options: crate::agent::CaptureFrameOptions::default(),
217            capture_result: None,
218        })
219    }
220
221    /// Set geometry commands for the current frame (drained from GeoState in dev.rs).
222    pub fn set_geo_commands(&mut self, cmds: Vec<GeoCommand>) {
223        self.geo_commands = cmds;
224    }
225
226    /// Set SDF commands for the current frame.
227    /// Converts SdfDrawCommand (from scripting ops) to SdfCommand (for rendering).
228    pub fn set_sdf_commands(&mut self, cmds: Vec<SdfDrawCommand>) {
229        self.sdf_commands = cmds.into_iter().map(convert_sdf_draw_command).collect();
230    }
231
232    /// Render the current frame's sprite, geometry, and SDF commands, interleaved by layer.
233    pub fn render_frame(&mut self) -> Result<()> {
234        let output = self.gpu.surface.get_current_texture()?;
235        let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());
236
237        let mut encoder = self.gpu.device.create_command_encoder(
238            &wgpu::CommandEncoderDescriptor { label: Some("frame_encoder") },
239        );
240
241        // Sort sprites by layer → shader_id → blend_mode → texture_id for batching
242        self.frame_commands.sort_by(|a, b| {
243            a.layer
244                .cmp(&b.layer)
245                .then(a.shader_id.cmp(&b.shader_id))
246                .then(a.blend_mode.cmp(&b.blend_mode))
247                .then(a.texture_id.cmp(&b.texture_id))
248        });
249
250        // Sort geometry commands by layer
251        self.geo_commands.sort_by_key(|c| c.layer());
252
253        // Sort SDF commands by layer
254        self.sdf_commands.sort_by_key(|c| c.layer);
255
256        // Build interleaved render schedule
257        let schedule = build_render_schedule(&self.frame_commands, &self.geo_commands, &self.sdf_commands);
258
259        // Flush custom shader uniforms with auto-injected built-ins
260        self.shaders.flush(
261            &self.gpu.queue,
262            self.elapsed_time,
263            self.delta_time,
264            self.camera.viewport_size,
265            self.mouse_pos,
266        );
267
268        let lighting_uniform = self.lighting.to_uniform();
269        let clear_color = wgpu::Color {
270            r: self.clear_color[0] as f64,
271            g: self.clear_color[1] as f64,
272            b: self.clear_color[2] as f64,
273            a: self.clear_color[3] as f64,
274        };
275
276        // Write camera + lighting uniforms once for the whole frame
277        self.sprites.prepare(&self.gpu.device, &self.gpu.queue, &self.camera, &lighting_uniform);
278        self.sdf_pipeline.prepare(&self.gpu.queue, &self.camera, 0.0);
279
280        // Run radiance cascade GI compute pass (if enabled)
281        let gi_active = self.radiance.compute(
282            &self.gpu,
283            &mut encoder,
284            &self.radiance_state,
285            &self.lighting,
286            self.camera.x,
287            self.camera.y,
288            self.camera.viewport_size[0],
289            self.camera.viewport_size[1],
290        );
291
292        if self.postprocess.has_effects() {
293            // Render to offscreen target, then apply effects to surface
294            {
295                let sprite_target = self.postprocess.sprite_target(&self.gpu);
296                let camera_bg = self.sprites.camera_bind_group();
297
298                if schedule.is_empty() {
299                    // No commands at all — still need to clear
300                    self.sprites.render(
301                        &self.gpu.device, &self.gpu.queue, &self.textures, &self.shaders,
302                        &[], sprite_target, &mut encoder, Some(clear_color),
303                    );
304                } else {
305                    let mut first = true;
306                    for op in &schedule {
307                        let cc = if first { Some(clear_color) } else { None };
308                        first = false;
309                        match op {
310                            RenderOp::Sprites { start, end } => {
311                                self.sprites.render(
312                                    &self.gpu.device, &self.gpu.queue, &self.textures, &self.shaders,
313                                    &self.frame_commands[*start..*end],
314                                    sprite_target, &mut encoder, cc,
315                                );
316                            }
317                            RenderOp::Geometry { start, end } => {
318                                self.geometry.flush_commands(
319                                    &self.gpu.device, &mut encoder, sprite_target,
320                                    camera_bg, &self.geo_commands[*start..*end], cc,
321                                );
322                            }
323                            RenderOp::Sdf { start, end } => {
324                                self.sdf_pipeline.render(
325                                    &self.gpu.device, &mut encoder, sprite_target,
326                                    &self.sdf_commands[*start..*end], cc,
327                                );
328                            }
329                        }
330                    }
331                }
332            }
333            // Apply GI light texture to the offscreen target before post-processing
334            if gi_active {
335                let sprite_target = self.postprocess.sprite_target(&self.gpu);
336                self.radiance.compose(&mut encoder, sprite_target);
337            }
338            self.postprocess.apply(&self.gpu, &mut encoder, &view);
339        } else {
340            // No effects — render directly to surface
341            let camera_bg = self.sprites.camera_bind_group();
342
343            if schedule.is_empty() {
344                // No commands at all — still need to clear
345                self.sprites.render(
346                    &self.gpu.device, &self.gpu.queue, &self.textures, &self.shaders,
347                    &[], &view, &mut encoder, Some(clear_color),
348                );
349            } else {
350                let mut first = true;
351                for op in &schedule {
352                    let cc = if first { Some(clear_color) } else { None };
353                    first = false;
354                    match op {
355                        RenderOp::Sprites { start, end } => {
356                            self.sprites.render(
357                                &self.gpu.device, &self.gpu.queue, &self.textures, &self.shaders,
358                                &self.frame_commands[*start..*end],
359                                &view, &mut encoder, cc,
360                            );
361                        }
362                        RenderOp::Geometry { start, end } => {
363                            self.geometry.flush_commands(
364                                &self.gpu.device, &mut encoder, &view,
365                                camera_bg, &self.geo_commands[*start..*end], cc,
366                            );
367                        }
368                        RenderOp::Sdf { start, end } => {
369                            self.sdf_pipeline.render(
370                                &self.gpu.device, &mut encoder, &view,
371                                &self.sdf_commands[*start..*end], cc,
372                            );
373                        }
374                    }
375                }
376            }
377            // Apply GI light texture to the surface
378            if gi_active {
379                self.radiance.compose(&mut encoder, &view);
380            }
381        }
382
383        self.gpu.queue.submit(std::iter::once(encoder.finish()));
384
385        // Capture the rendered frame if requested (before present consumes the surface)
386        if self.capture_pending {
387            self.capture_pending = false;
388            let opts = self.capture_options.clone();
389            self.capture_result = self.capture_surface(&output.texture, &opts);
390        }
391
392        output.present();
393
394        self.frame_commands.clear();
395        self.geo_commands.clear();
396        self.sdf_commands.clear();
397        Ok(())
398    }
399
400    /// Resize the surface when the window size changes.
401    /// GPU surface uses physical pixels; camera viewport uses logical pixels.
402    pub fn resize(&mut self, physical_width: u32, physical_height: u32, scale_factor: f32) {
403        if physical_width > 0 && physical_height > 0 {
404            self.scale_factor = scale_factor;
405            self.gpu.config.width = physical_width;
406            self.gpu.config.height = physical_height;
407            self.gpu.surface.configure(&self.gpu.device, &self.gpu.config);
408            // Camera uses logical pixels so 1 world unit ≈ 1 logical pixel at zoom 1
409            self.camera.viewport_size = [
410                physical_width as f32 / scale_factor,
411                physical_height as f32 / scale_factor,
412            ];
413        }
414    }
415
416    // ── Frame capture ─────────────────────────────────────────────────────
417
418    /// Copy the surface texture to a CPU-side PNG with optional scaling. Returns None on failure.
419    fn capture_surface(&self, texture: &wgpu::Texture, opts: &crate::agent::CaptureFrameOptions) -> Option<Vec<u8>> {
420        let base_width = self.gpu.config.width;
421        let base_height = self.gpu.config.height;
422
423        // Apply region cropping first
424        let (read_x, read_y, read_w, read_h) = if let Some((x, y, w, h)) = opts.region {
425            // Clamp region to texture bounds
426            let x = x.min(base_width);
427            let y = y.min(base_height);
428            let w = w.min(base_width - x);
429            let h = h.min(base_height - y);
430            (x, y, w, h)
431        } else {
432            (0, 0, base_width, base_height)
433        };
434
435        // Auto-downscale if capture would exceed 10MB hard limit
436        let max_size_bytes = 10_000_000;
437        let estimated = opts.estimate_size(read_w, read_h);
438        let final_scale = if estimated > max_size_bytes {
439            // Scale factor to fit within limit: new_size ≈ old_size * scale²
440            let ratio = max_size_bytes as f32 / estimated as f32;
441            let auto_scale = (ratio.sqrt() * opts.scale).clamp(0.1, 1.0);
442            eprintln!("[capture] Frame would be {:.1}MB, auto-downscaling from {:.1}x to {:.1}x",
443                      estimated as f32 / 1_000_000.0, opts.scale, auto_scale);
444            auto_scale
445        } else {
446            opts.scale
447        };
448
449        let width = read_w;
450        let height = read_h;
451        let bytes_per_pixel: u32 = 4;
452        let unpadded_bytes_per_row = width * bytes_per_pixel;
453        let padded_bytes_per_row = ((unpadded_bytes_per_row + 255) / 256) * 256;
454
455        let buffer = self.gpu.device.create_buffer(&wgpu::BufferDescriptor {
456            label: Some("capture_readback"),
457            size: (padded_bytes_per_row * height) as u64,
458            usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
459            mapped_at_creation: false,
460        });
461
462        let mut encoder = self.gpu.device.create_command_encoder(
463            &wgpu::CommandEncoderDescriptor { label: Some("capture_encoder") },
464        );
465
466        encoder.copy_texture_to_buffer(
467            wgpu::TexelCopyTextureInfo {
468                texture,
469                mip_level: 0,
470                origin: wgpu::Origin3d { x: read_x, y: read_y, z: 0 },
471                aspect: wgpu::TextureAspect::All,
472            },
473            wgpu::TexelCopyBufferInfo {
474                buffer: &buffer,
475                layout: wgpu::TexelCopyBufferLayout {
476                    offset: 0,
477                    bytes_per_row: Some(padded_bytes_per_row),
478                    rows_per_image: Some(height),
479                },
480            },
481            wgpu::Extent3d { width, height, depth_or_array_layers: 1 },
482        );
483
484        self.gpu.queue.submit(std::iter::once(encoder.finish()));
485
486        // Map the buffer synchronously
487        let buffer_slice = buffer.slice(..);
488        let (tx, rx) = std::sync::mpsc::channel();
489        buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
490            let _ = tx.send(result);
491        });
492        self.gpu.device.poll(wgpu::Maintain::Wait);
493
494        if rx.recv().ok()?.ok().is_none() {
495            return None;
496        }
497
498        let data = buffer_slice.get_mapped_range();
499
500        // Strip row padding and handle BGRA→RGBA if needed
501        let is_bgra = format!("{:?}", self.gpu.config.format).contains("Bgra");
502        let mut pixels = Vec::with_capacity((width * height * 4) as usize);
503        for y in 0..height {
504            let start = (y * padded_bytes_per_row) as usize;
505            let end = start + (width * 4) as usize;
506            let row = &data[start..end];
507            if is_bgra {
508                // Swap B↔R for each pixel
509                for chunk in row.chunks_exact(4) {
510                    pixels.extend_from_slice(&[chunk[2], chunk[1], chunk[0], chunk[3]]);
511                }
512            } else {
513                pixels.extend_from_slice(row);
514            }
515        }
516
517        drop(data);
518        buffer.unmap();
519
520        // Apply scaling if requested (or auto-downscaled)
521        let (final_pixels, final_width, final_height) = if (final_scale - 1.0).abs() > 0.01 {
522            // Use the `image` crate to load, scale, and extract pixels
523            let img = image::ImageBuffer::<image::Rgba<u8>, _>::from_raw(width, height, pixels.clone())?;
524            let scaled_w = (width as f32 * final_scale).ceil() as u32;
525            let scaled_h = (height as f32 * final_scale).ceil() as u32;
526            if scaled_w == 0 || scaled_h == 0 {
527                return None;
528            }
529            let scaled = image::imageops::resize(&img, scaled_w, scaled_h, image::imageops::FilterType::Gaussian);
530            let scaled_pixels = scaled.into_raw();
531            (scaled_pixels, scaled_w, scaled_h)
532        } else {
533            (pixels, width, height)
534        };
535
536        // Encode to PNG using the `image` crate
537        use image::ImageEncoder;
538        let mut png_bytes = Vec::new();
539        let encoder = image::codecs::png::PngEncoder::new(&mut png_bytes);
540        if encoder.write_image(&final_pixels, final_width, final_height, image::ExtendedColorType::Rgba8).is_err() {
541            return None;
542        }
543
544        Some(png_bytes)
545    }
546
547    // ── Render target helpers ──────────────────────────────────────────────
548
549    /// Allocate a new off-screen render target and register it as a samplable texture.
550    pub fn create_render_target(&mut self, id: u32, width: u32, height: u32) {
551        let surface_format = self.gpu.config.format;
552        self.render_targets.create(&self.gpu.device, id, width, height, surface_format);
553        if let Some(view) = self.render_targets.get_view(id) {
554            self.textures.register_render_target(
555                &self.gpu.device,
556                &self.sprites.texture_bind_group_layout,
557                id,
558                view,
559                width,
560                height,
561            );
562        }
563    }
564
565    /// Free a render target's GPU resources and remove it from the texture store.
566    pub fn destroy_render_target(&mut self, id: u32) {
567        self.render_targets.destroy(id);
568        self.textures.unregister_render_target(id);
569    }
570
571    /// Render sprite commands into each queued render target (off-screen pre-pass).
572    ///
573    /// Call this BEFORE `render_frame()` so targets are ready as sprite inputs.
574    /// Uses a separate command encoder + GPU submit to avoid ordering conflicts.
575    pub fn render_targets_prepass(
576        &mut self,
577        target_queues: std::collections::HashMap<u32, Vec<SpriteCommand>>,
578    ) {
579        if target_queues.is_empty() {
580            return;
581        }
582
583        let mut encoder = self.gpu.device.create_command_encoder(
584            &wgpu::CommandEncoderDescriptor { label: Some("rt_encoder") },
585        );
586        let lighting_uniform = self.lighting.to_uniform();
587
588        for (target_id, mut cmds) in target_queues {
589            let view = self.render_targets.get_view(target_id);
590            let dims = self.render_targets.get_dims(target_id);
591            if let (Some(view), Some((tw, th))) = (view, dims) {
592                // Sort by layer → shader_id → blend_mode → texture_id
593                cmds.sort_by(|a, b| {
594                    a.layer
595                        .cmp(&b.layer)
596                        .then(a.shader_id.cmp(&b.shader_id))
597                        .then(a.blend_mode.cmp(&b.blend_mode))
598                        .then(a.texture_id.cmp(&b.texture_id))
599                });
600                // Orthographic camera: (0,0) = top-left of the render target
601                let target_camera = Camera2D {
602                    x: tw as f32 / 2.0,
603                    y: th as f32 / 2.0,
604                    zoom: 1.0,
605                    viewport_size: [tw as f32, th as f32],
606                    ..Camera2D::default()
607                };
608                self.sprites.prepare(&self.gpu.device, &self.gpu.queue, &target_camera, &lighting_uniform);
609                self.sprites.render(
610                    &self.gpu.device,
611                    &self.gpu.queue,
612                    &self.textures,
613                    &self.shaders,
614                    &cmds,
615                    view,
616                    &mut encoder,
617                    Some(wgpu::Color { r: 0.0, g: 0.0, b: 0.0, a: 0.0 }),
618                );
619            }
620        }
621
622        self.gpu.queue.submit(std::iter::once(encoder.finish()));
623    }
624}
625
626#[cfg(test)]
627mod tests {
628    use super::*;
629
630    // ── Test helpers ─────────────────────────────────────────────────────
631
632    fn sprite(layer: i32) -> SpriteCommand {
633        SpriteCommand {
634            texture_id: 1, x: 0.0, y: 0.0, w: 16.0, h: 16.0, layer,
635            uv_x: 0.0, uv_y: 0.0, uv_w: 1.0, uv_h: 1.0,
636            tint_r: 1.0, tint_g: 1.0, tint_b: 1.0, tint_a: 1.0,
637            rotation: 0.0, origin_x: 0.5, origin_y: 0.5,
638            flip_x: false, flip_y: false, opacity: 1.0,
639            blend_mode: 0, shader_id: 0,
640        }
641    }
642
643    fn geo(layer: i32) -> GeoCommand {
644        GeoCommand::Triangle {
645            x1: 0.0, y1: 0.0, x2: 16.0, y2: 0.0, x3: 8.0, y3: 16.0,
646            r: 1.0, g: 1.0, b: 1.0, a: 1.0, layer,
647        }
648    }
649
650    fn sdf(layer: i32) -> SdfCommand {
651        SdfCommand {
652            sdf_expr: "length(p) - 10.0".to_string(),
653            fill: SdfFill::Solid { color: [1.0, 0.0, 0.0, 1.0] },
654            x: 32.0, y: 32.0, bounds: 15.0, layer,
655            rotation: 0.0, scale: 1.0, opacity: 1.0,
656        }
657    }
658
659    fn sdf_draw(fill_type: u32) -> SdfDrawCommand {
660        SdfDrawCommand {
661            sdf_expr: "length(p) - 10.0".to_string(),
662            fill_type,
663            color: [1.0, 0.0, 0.0, 1.0],
664            color2: [0.0, 1.0, 0.0, 1.0],
665            fill_param: 2.0,
666            palette_params: [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.0, 1.0, 1.0, 0.0, 0.33, 0.67],
667            gradient_scale: 1.5,
668            x: 10.0, y: 20.0, bounds: 30.0, layer: 5,
669            rotation: 0.1, scale: 2.0, opacity: 0.8,
670        }
671    }
672
673    // ── build_render_schedule tests ──────────────────────────────────────
674
675    #[test]
676    fn test_schedule_empty_inputs() {
677        let schedule = build_render_schedule(&[], &[], &[]);
678        assert!(schedule.is_empty());
679    }
680
681    #[test]
682    fn test_schedule_sprites_only() {
683        let sprites = vec![sprite(0), sprite(1)];
684        let schedule = build_render_schedule(&sprites, &[], &[]);
685        assert_eq!(schedule, vec![RenderOp::Sprites { start: 0, end: 2 }]);
686    }
687
688    #[test]
689    fn test_schedule_geo_only() {
690        let geo_cmds = vec![geo(0), geo(1)];
691        let schedule = build_render_schedule(&[], &geo_cmds, &[]);
692        assert_eq!(schedule, vec![RenderOp::Geometry { start: 0, end: 2 }]);
693    }
694
695    #[test]
696    fn test_schedule_sdf_only() {
697        let sdf_cmds = vec![sdf(0), sdf(1)];
698        let schedule = build_render_schedule(&[], &[], &sdf_cmds);
699        assert_eq!(schedule, vec![RenderOp::Sdf { start: 0, end: 2 }]);
700    }
701
702    #[test]
703    fn test_schedule_same_layer_order() {
704        // All at layer 0: sprites first, then geo, then sdf
705        let sprites = vec![sprite(0)];
706        let geo_cmds = vec![geo(0)];
707        let sdf_cmds = vec![sdf(0)];
708        let schedule = build_render_schedule(&sprites, &geo_cmds, &sdf_cmds);
709        assert_eq!(schedule.len(), 3);
710        assert_eq!(schedule[0], RenderOp::Sprites { start: 0, end: 1 });
711        assert_eq!(schedule[1], RenderOp::Geometry { start: 0, end: 1 });
712        assert_eq!(schedule[2], RenderOp::Sdf { start: 0, end: 1 });
713    }
714
715    #[test]
716    fn test_schedule_interleaved_layers() {
717        // sprites at 0, geo at 1, sdf at 2
718        let sprites = vec![sprite(0)];
719        let geo_cmds = vec![geo(1)];
720        let sdf_cmds = vec![sdf(2)];
721        let schedule = build_render_schedule(&sprites, &geo_cmds, &sdf_cmds);
722        assert_eq!(schedule.len(), 3);
723        assert_eq!(schedule[0], RenderOp::Sprites { start: 0, end: 1 });
724        assert_eq!(schedule[1], RenderOp::Geometry { start: 0, end: 1 });
725        assert_eq!(schedule[2], RenderOp::Sdf { start: 0, end: 1 });
726    }
727
728    #[test]
729    fn test_schedule_mixed_layers() {
730        // sprites at 0 and 2, geo at 1
731        let sprites = vec![sprite(0), sprite(2)];
732        let geo_cmds = vec![geo(1)];
733        let schedule = build_render_schedule(&sprites, &geo_cmds, &[]);
734        // Sprite at layer 0 first, then geo at layer 1, then sprite at layer 2
735        assert!(schedule.len() >= 2);
736        assert!(matches!(schedule[0], RenderOp::Sprites { .. }));
737    }
738
739    #[test]
740    fn test_schedule_all_consumed() {
741        // Verify all commands are consumed (no gaps in ranges)
742        let sprites = vec![sprite(0), sprite(0), sprite(1)];
743        let geo_cmds = vec![geo(0), geo(2)];
744        let sdf_cmds = vec![sdf(1)];
745        let schedule = build_render_schedule(&sprites, &geo_cmds, &sdf_cmds);
746
747        let mut sprite_count = 0;
748        let mut geo_count = 0;
749        let mut sdf_count = 0;
750        for op in &schedule {
751            match op {
752                RenderOp::Sprites { start, end } => sprite_count += end - start,
753                RenderOp::Geometry { start, end } => geo_count += end - start,
754                RenderOp::Sdf { start, end } => sdf_count += end - start,
755            }
756        }
757        assert_eq!(sprite_count, 3, "all sprites consumed");
758        assert_eq!(geo_count, 2, "all geo consumed");
759        assert_eq!(sdf_count, 1, "all sdf consumed");
760    }
761
762    // ── convert_sdf_draw_command tests ───────────────────────────────────
763
764    #[test]
765    fn test_convert_sdf_solid() {
766        let cmd = convert_sdf_draw_command(sdf_draw(0));
767        assert!(matches!(cmd.fill, SdfFill::Solid { color } if color == [1.0, 0.0, 0.0, 1.0]));
768    }
769
770    #[test]
771    fn test_convert_sdf_outline() {
772        let cmd = convert_sdf_draw_command(sdf_draw(1));
773        assert!(matches!(cmd.fill, SdfFill::Outline { color, thickness }
774            if color == [1.0, 0.0, 0.0, 1.0] && thickness == 2.0));
775    }
776
777    #[test]
778    fn test_convert_sdf_solid_with_outline() {
779        let cmd = convert_sdf_draw_command(sdf_draw(2));
780        assert!(matches!(cmd.fill, SdfFill::SolidWithOutline { fill, outline, thickness }
781            if fill == [1.0, 0.0, 0.0, 1.0] && outline == [0.0, 1.0, 0.0, 1.0] && thickness == 2.0));
782    }
783
784    #[test]
785    fn test_convert_sdf_gradient() {
786        let cmd = convert_sdf_draw_command(sdf_draw(3));
787        assert!(matches!(cmd.fill, SdfFill::Gradient { from, to, angle, scale }
788            if from == [1.0, 0.0, 0.0, 1.0] && to == [0.0, 1.0, 0.0, 1.0]
789            && angle == 2.0 && scale == 1.5));
790    }
791
792    #[test]
793    fn test_convert_sdf_glow() {
794        let cmd = convert_sdf_draw_command(sdf_draw(4));
795        assert!(matches!(cmd.fill, SdfFill::Glow { color, intensity }
796            if color == [1.0, 0.0, 0.0, 1.0] && intensity == 2.0));
797    }
798
799    #[test]
800    fn test_convert_sdf_cosine_palette() {
801        let cmd = convert_sdf_draw_command(sdf_draw(5));
802        assert!(matches!(cmd.fill, SdfFill::CosinePalette { a, b, c, d }
803            if a == [0.5, 0.5, 0.5] && b == [0.5, 0.5, 0.5]
804            && c == [1.0, 1.0, 1.0] && d == [0.0, 0.33, 0.67]));
805    }
806
807    #[test]
808    fn test_convert_sdf_unknown_fallback() {
809        let cmd = convert_sdf_draw_command(sdf_draw(99));
810        assert!(matches!(cmd.fill, SdfFill::Solid { color } if color == [1.0, 0.0, 0.0, 1.0]));
811    }
812
813    #[test]
814    fn test_convert_sdf_field_passthrough() {
815        let cmd = convert_sdf_draw_command(sdf_draw(0));
816        assert_eq!(cmd.sdf_expr, "length(p) - 10.0");
817        assert_eq!(cmd.x, 10.0);
818        assert_eq!(cmd.y, 20.0);
819        assert_eq!(cmd.bounds, 30.0);
820        assert_eq!(cmd.layer, 5);
821        assert_eq!(cmd.rotation, 0.1);
822        assert_eq!(cmd.scale, 2.0);
823        assert_eq!(cmd.opacity, 0.8);
824    }
825}