Skip to main content

arcane_core/renderer/
mod.rs

1mod gpu;
2mod sprite;
3mod texture;
4pub mod camera;
5mod tilemap;
6mod lighting;
7pub mod font;
8pub mod msdf;
9pub mod shader;
10pub mod postprocess;
11pub mod radiance;
12pub mod geometry;
13pub mod rendertarget;
14pub mod sdf;
15// Test harness is always public for integration tests
16pub mod test_harness;
17
18pub use gpu::GpuContext;
19pub use sprite::{SpriteCommand, SpritePipeline};
20pub use texture::{TextureId, TextureStore};
21pub use camera::Camera2D;
22pub use tilemap::{Tilemap, TilemapStore};
23pub use lighting::{LightingState, LightingUniform, PointLight, LightData, MAX_LIGHTS};
24pub use msdf::{MsdfFont, MsdfFontStore, MsdfGlyph};
25pub use shader::ShaderStore;
26pub use postprocess::PostProcessPipeline;
27pub use radiance::{RadiancePipeline, RadianceState, EmissiveSurface, Occluder, DirectionalLight, SpotLight};
28pub use geometry::GeometryBatch;
29pub use rendertarget::RenderTargetStore;
30pub use sdf::{SdfPipelineStore, SdfCommand, SdfFill};
31
32use crate::scripting::geometry_ops::GeoCommand;
33use crate::scripting::sdf_ops::SdfDrawCommand;
34use anyhow::Result;
35
36/// A single step in the interleaved render schedule.
37/// Sprites, geometry, and SDF commands are merged by layer so that layer ordering
38/// is respected across all pipeline types.
39enum RenderOp {
40    /// Render a contiguous range of sorted sprite commands.
41    Sprites { start: usize, end: usize },
42    /// Render a contiguous range of sorted geometry commands.
43    Geometry { start: usize, end: usize },
44    /// Render a contiguous range of sorted SDF commands.
45    Sdf { start: usize, end: usize },
46}
47
48/// Build an interleaved render schedule from sorted sprite, geometry, and SDF commands.
49///
50/// All input slices must be pre-sorted by layer. The schedule merges them so that
51/// lower layers render first. At the same layer, the order is: sprites, then geometry, then SDF.
52fn build_render_schedule(
53    sprites: &[SpriteCommand],
54    geo: &[GeoCommand],
55    sdf: &[SdfCommand],
56) -> Vec<RenderOp> {
57    let mut schedule = Vec::new();
58    let mut si = 0;
59    let mut gi = 0;
60    let mut di = 0;
61
62    while si < sprites.len() || gi < geo.len() || di < sdf.len() {
63        // Get current layer for each type (MAX if exhausted)
64        let sprite_layer = if si < sprites.len() { sprites[si].layer } else { i32::MAX };
65        let geo_layer = if gi < geo.len() { geo[gi].layer() } else { i32::MAX };
66        let sdf_layer = if di < sdf.len() { sdf[di].layer } else { i32::MAX };
67
68        // Find minimum layer
69        let min_layer = sprite_layer.min(geo_layer).min(sdf_layer);
70
71        // At the same layer: sprites first, then geo, then SDF
72        if sprite_layer == min_layer {
73            let start = si;
74            // Consume all sprites at or before the next geo/sdf layer
75            let bound = geo_layer.min(sdf_layer);
76            while si < sprites.len() && sprites[si].layer <= bound {
77                si += 1;
78            }
79            schedule.push(RenderOp::Sprites { start, end: si });
80        } else if geo_layer == min_layer {
81            let start = gi;
82            // Consume geo commands at layers < next sprite layer and <= next sdf layer
83            // (sprites come before geo at same layer, but geo comes before sdf)
84            let sprite_bound = if si < sprites.len() { sprites[si].layer } else { i32::MAX };
85            let sdf_bound = if di < sdf.len() { sdf[di].layer } else { i32::MAX };
86            while gi < geo.len() && geo[gi].layer() < sprite_bound && geo[gi].layer() <= sdf_bound {
87                gi += 1;
88            }
89            schedule.push(RenderOp::Geometry { start, end: gi });
90        } else {
91            let start = di;
92            // Consume SDF commands at layers < next sprite/geo layer
93            let sprite_bound = if si < sprites.len() { sprites[si].layer } else { i32::MAX };
94            let geo_bound = if gi < geo.len() { geo[gi].layer() } else { i32::MAX };
95            while di < sdf.len() && sdf[di].layer < sprite_bound && sdf[di].layer < geo_bound {
96                di += 1;
97            }
98            schedule.push(RenderOp::Sdf { start, end: di });
99        }
100    }
101
102    schedule
103}
104
105/// Top-level renderer that owns the GPU context, sprite pipeline, and textures.
106pub struct Renderer {
107    pub gpu: GpuContext,
108    pub sprites: SpritePipeline,
109    pub geometry: GeometryBatch,
110    pub shaders: ShaderStore,
111    pub postprocess: PostProcessPipeline,
112    pub textures: TextureStore,
113    pub camera: Camera2D,
114    pub lighting: LightingState,
115    pub radiance: RadiancePipeline,
116    pub radiance_state: RadianceState,
117    /// Off-screen render targets (owns the GPU textures; bind groups in TextureStore).
118    pub render_targets: RenderTargetStore,
119    /// Sprite commands queued for the current frame.
120    pub frame_commands: Vec<SpriteCommand>,
121    /// Geometry commands queued for the current frame (drained from GeoState).
122    pub geo_commands: Vec<GeoCommand>,
123    /// SDF commands queued for the current frame (drained from SdfState).
124    pub sdf_commands: Vec<SdfCommand>,
125    /// SDF pipeline store for rendering signed distance field shapes.
126    pub sdf_pipeline: SdfPipelineStore,
127    /// Display scale factor (e.g. 2.0 on Retina). Used to convert physical → logical pixels.
128    pub scale_factor: f32,
129    /// Clear color for the render pass background. Default: dark blue-gray.
130    pub clear_color: [f32; 4],
131    /// Elapsed time in seconds (accumulated, for shader built-ins).
132    pub elapsed_time: f32,
133    /// Frame delta time in seconds (for shader built-ins).
134    pub delta_time: f32,
135    /// Mouse position in screen pixels (for shader built-ins).
136    pub mouse_pos: [f32; 2],
137    /// When true, the next render_frame() will capture the surface to a PNG.
138    pub capture_pending: bool,
139    /// PNG bytes from the last capture (taken by the frame callback).
140    pub capture_result: Option<Vec<u8>>,
141}
142
143impl Renderer {
144    /// Create a new renderer attached to a winit window.
145    pub fn new(window: std::sync::Arc<winit::window::Window>) -> Result<Self> {
146        let scale_factor = window.scale_factor() as f32;
147        let gpu = GpuContext::new(window)?;
148        let sprites = SpritePipeline::new(&gpu);
149        let geometry = GeometryBatch::new(&gpu);
150        let shaders = ShaderStore::new(&gpu);
151        let postprocess = PostProcessPipeline::new(&gpu);
152        let sdf_pipeline = SdfPipelineStore::new(&gpu);
153        let radiance_pipeline = RadiancePipeline::new(&gpu);
154        let textures = TextureStore::new();
155        // Set camera viewport to logical pixels so world units are DPI-independent
156        let logical_w = gpu.config.width as f32 / scale_factor;
157        let logical_h = gpu.config.height as f32 / scale_factor;
158        let camera = Camera2D {
159            viewport_size: [logical_w, logical_h],
160            ..Camera2D::default()
161        };
162        Ok(Self {
163            gpu,
164            sprites,
165            geometry,
166            shaders,
167            postprocess,
168            radiance: radiance_pipeline,
169            radiance_state: RadianceState::new(),
170            textures,
171            camera,
172            lighting: LightingState::default(),
173            render_targets: RenderTargetStore::new(),
174            frame_commands: Vec::new(),
175            geo_commands: Vec::new(),
176            sdf_commands: Vec::new(),
177            sdf_pipeline,
178            scale_factor,
179            clear_color: [0.1, 0.1, 0.15, 1.0],
180            elapsed_time: 0.0,
181            delta_time: 0.0,
182            mouse_pos: [0.0, 0.0],
183            capture_pending: false,
184            capture_result: None,
185        })
186    }
187
188    /// Set geometry commands for the current frame (drained from GeoState in dev.rs).
189    pub fn set_geo_commands(&mut self, cmds: Vec<GeoCommand>) {
190        self.geo_commands = cmds;
191    }
192
193    /// Set SDF commands for the current frame.
194    /// Converts SdfDrawCommand (from scripting ops) to SdfCommand (for rendering).
195    pub fn set_sdf_commands(&mut self, cmds: Vec<SdfDrawCommand>) {
196        self.sdf_commands = cmds.into_iter().map(|c| {
197            let fill = match c.fill_type {
198                0 => SdfFill::Solid { color: c.color },
199                1 => SdfFill::Outline { color: c.color, thickness: c.fill_param },
200                2 => SdfFill::SolidWithOutline { fill: c.color, outline: c.color2, thickness: c.fill_param },
201                3 => SdfFill::Gradient { from: c.color, to: c.color2, angle: c.fill_param, scale: c.gradient_scale },
202                4 => SdfFill::Glow { color: c.color, intensity: c.fill_param },
203                5 => SdfFill::CosinePalette {
204                    a: [c.palette_params[0], c.palette_params[1], c.palette_params[2]],
205                    b: [c.palette_params[3], c.palette_params[4], c.palette_params[5]],
206                    c: [c.palette_params[6], c.palette_params[7], c.palette_params[8]],
207                    d: [c.palette_params[9], c.palette_params[10], c.palette_params[11]],
208                },
209                _ => SdfFill::Solid { color: c.color },
210            };
211            SdfCommand {
212                sdf_expr: c.sdf_expr,
213                fill,
214                x: c.x,
215                y: c.y,
216                bounds: c.bounds,
217                layer: c.layer,
218                rotation: c.rotation,
219                scale: c.scale,
220                opacity: c.opacity,
221            }
222        }).collect();
223    }
224
225    /// Render the current frame's sprite, geometry, and SDF commands, interleaved by layer.
226    pub fn render_frame(&mut self) -> Result<()> {
227        let output = self.gpu.surface.get_current_texture()?;
228        let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());
229
230        let mut encoder = self.gpu.device.create_command_encoder(
231            &wgpu::CommandEncoderDescriptor { label: Some("frame_encoder") },
232        );
233
234        // Sort sprites by layer → shader_id → blend_mode → texture_id for batching
235        self.frame_commands.sort_by(|a, b| {
236            a.layer
237                .cmp(&b.layer)
238                .then(a.shader_id.cmp(&b.shader_id))
239                .then(a.blend_mode.cmp(&b.blend_mode))
240                .then(a.texture_id.cmp(&b.texture_id))
241        });
242
243        // Sort geometry commands by layer
244        self.geo_commands.sort_by_key(|c| c.layer());
245
246        // Sort SDF commands by layer
247        self.sdf_commands.sort_by_key(|c| c.layer);
248
249        // Build interleaved render schedule
250        let schedule = build_render_schedule(&self.frame_commands, &self.geo_commands, &self.sdf_commands);
251
252        // Flush custom shader uniforms with auto-injected built-ins
253        self.shaders.flush(
254            &self.gpu.queue,
255            self.elapsed_time,
256            self.delta_time,
257            self.camera.viewport_size,
258            self.mouse_pos,
259        );
260
261        let lighting_uniform = self.lighting.to_uniform();
262        let clear_color = wgpu::Color {
263            r: self.clear_color[0] as f64,
264            g: self.clear_color[1] as f64,
265            b: self.clear_color[2] as f64,
266            a: self.clear_color[3] as f64,
267        };
268
269        // Write camera + lighting uniforms once for the whole frame
270        self.sprites.prepare(&self.gpu.device, &self.gpu.queue, &self.camera, &lighting_uniform);
271        self.sdf_pipeline.prepare(&self.gpu.queue, &self.camera, 0.0);
272
273        // Run radiance cascade GI compute pass (if enabled)
274        let gi_active = self.radiance.compute(
275            &self.gpu,
276            &mut encoder,
277            &self.radiance_state,
278            &self.lighting,
279            self.camera.x,
280            self.camera.y,
281            self.camera.viewport_size[0],
282            self.camera.viewport_size[1],
283        );
284
285        if self.postprocess.has_effects() {
286            // Render to offscreen target, then apply effects to surface
287            {
288                let sprite_target = self.postprocess.sprite_target(&self.gpu);
289                let camera_bg = self.sprites.camera_bind_group();
290
291                if schedule.is_empty() {
292                    // No commands at all — still need to clear
293                    self.sprites.render(
294                        &self.gpu.device, &self.gpu.queue, &self.textures, &self.shaders,
295                        &[], sprite_target, &mut encoder, Some(clear_color),
296                    );
297                } else {
298                    let mut first = true;
299                    for op in &schedule {
300                        let cc = if first { Some(clear_color) } else { None };
301                        first = false;
302                        match op {
303                            RenderOp::Sprites { start, end } => {
304                                self.sprites.render(
305                                    &self.gpu.device, &self.gpu.queue, &self.textures, &self.shaders,
306                                    &self.frame_commands[*start..*end],
307                                    sprite_target, &mut encoder, cc,
308                                );
309                            }
310                            RenderOp::Geometry { start, end } => {
311                                self.geometry.flush_commands(
312                                    &self.gpu.device, &mut encoder, sprite_target,
313                                    camera_bg, &self.geo_commands[*start..*end], cc,
314                                );
315                            }
316                            RenderOp::Sdf { start, end } => {
317                                self.sdf_pipeline.render(
318                                    &self.gpu.device, &mut encoder, sprite_target,
319                                    &self.sdf_commands[*start..*end], cc,
320                                );
321                            }
322                        }
323                    }
324                }
325            }
326            // Apply GI light texture to the offscreen target before post-processing
327            if gi_active {
328                let sprite_target = self.postprocess.sprite_target(&self.gpu);
329                self.radiance.compose(&mut encoder, sprite_target);
330            }
331            self.postprocess.apply(&self.gpu, &mut encoder, &view);
332        } else {
333            // No effects — render directly to surface
334            let camera_bg = self.sprites.camera_bind_group();
335
336            if schedule.is_empty() {
337                // No commands at all — still need to clear
338                self.sprites.render(
339                    &self.gpu.device, &self.gpu.queue, &self.textures, &self.shaders,
340                    &[], &view, &mut encoder, Some(clear_color),
341                );
342            } else {
343                let mut first = true;
344                for op in &schedule {
345                    let cc = if first { Some(clear_color) } else { None };
346                    first = false;
347                    match op {
348                        RenderOp::Sprites { start, end } => {
349                            self.sprites.render(
350                                &self.gpu.device, &self.gpu.queue, &self.textures, &self.shaders,
351                                &self.frame_commands[*start..*end],
352                                &view, &mut encoder, cc,
353                            );
354                        }
355                        RenderOp::Geometry { start, end } => {
356                            self.geometry.flush_commands(
357                                &self.gpu.device, &mut encoder, &view,
358                                camera_bg, &self.geo_commands[*start..*end], cc,
359                            );
360                        }
361                        RenderOp::Sdf { start, end } => {
362                            self.sdf_pipeline.render(
363                                &self.gpu.device, &mut encoder, &view,
364                                &self.sdf_commands[*start..*end], cc,
365                            );
366                        }
367                    }
368                }
369            }
370            // Apply GI light texture to the surface
371            if gi_active {
372                self.radiance.compose(&mut encoder, &view);
373            }
374        }
375
376        self.gpu.queue.submit(std::iter::once(encoder.finish()));
377
378        // Capture the rendered frame if requested (before present consumes the surface)
379        if self.capture_pending {
380            self.capture_pending = false;
381            self.capture_result = self.capture_surface(&output.texture);
382        }
383
384        output.present();
385
386        self.frame_commands.clear();
387        self.geo_commands.clear();
388        self.sdf_commands.clear();
389        Ok(())
390    }
391
392    /// Resize the surface when the window size changes.
393    /// GPU surface uses physical pixels; camera viewport uses logical pixels.
394    pub fn resize(&mut self, physical_width: u32, physical_height: u32, scale_factor: f32) {
395        if physical_width > 0 && physical_height > 0 {
396            self.scale_factor = scale_factor;
397            self.gpu.config.width = physical_width;
398            self.gpu.config.height = physical_height;
399            self.gpu.surface.configure(&self.gpu.device, &self.gpu.config);
400            // Camera uses logical pixels so 1 world unit ≈ 1 logical pixel at zoom 1
401            self.camera.viewport_size = [
402                physical_width as f32 / scale_factor,
403                physical_height as f32 / scale_factor,
404            ];
405        }
406    }
407
408    // ── Frame capture ─────────────────────────────────────────────────────
409
410    /// Copy the surface texture to a CPU-side PNG. Returns None on failure.
411    fn capture_surface(&self, texture: &wgpu::Texture) -> Option<Vec<u8>> {
412        let width = self.gpu.config.width;
413        let height = self.gpu.config.height;
414        let bytes_per_pixel: u32 = 4;
415        let unpadded_bytes_per_row = width * bytes_per_pixel;
416        let padded_bytes_per_row = ((unpadded_bytes_per_row + 255) / 256) * 256;
417
418        let buffer = self.gpu.device.create_buffer(&wgpu::BufferDescriptor {
419            label: Some("capture_readback"),
420            size: (padded_bytes_per_row * height) as u64,
421            usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
422            mapped_at_creation: false,
423        });
424
425        let mut encoder = self.gpu.device.create_command_encoder(
426            &wgpu::CommandEncoderDescriptor { label: Some("capture_encoder") },
427        );
428
429        encoder.copy_texture_to_buffer(
430            wgpu::TexelCopyTextureInfo {
431                texture,
432                mip_level: 0,
433                origin: wgpu::Origin3d::ZERO,
434                aspect: wgpu::TextureAspect::All,
435            },
436            wgpu::TexelCopyBufferInfo {
437                buffer: &buffer,
438                layout: wgpu::TexelCopyBufferLayout {
439                    offset: 0,
440                    bytes_per_row: Some(padded_bytes_per_row),
441                    rows_per_image: Some(height),
442                },
443            },
444            wgpu::Extent3d { width, height, depth_or_array_layers: 1 },
445        );
446
447        self.gpu.queue.submit(std::iter::once(encoder.finish()));
448
449        // Map the buffer synchronously
450        let buffer_slice = buffer.slice(..);
451        let (tx, rx) = std::sync::mpsc::channel();
452        buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
453            let _ = tx.send(result);
454        });
455        self.gpu.device.poll(wgpu::Maintain::Wait);
456
457        if rx.recv().ok()?.ok().is_none() {
458            return None;
459        }
460
461        let data = buffer_slice.get_mapped_range();
462
463        // Strip row padding and handle BGRA→RGBA if needed
464        let is_bgra = format!("{:?}", self.gpu.config.format).contains("Bgra");
465        let mut pixels = Vec::with_capacity((width * height * 4) as usize);
466        for y in 0..height {
467            let start = (y * padded_bytes_per_row) as usize;
468            let end = start + (width * 4) as usize;
469            let row = &data[start..end];
470            if is_bgra {
471                // Swap B↔R for each pixel
472                for chunk in row.chunks_exact(4) {
473                    pixels.extend_from_slice(&[chunk[2], chunk[1], chunk[0], chunk[3]]);
474                }
475            } else {
476                pixels.extend_from_slice(row);
477            }
478        }
479
480        drop(data);
481        buffer.unmap();
482
483        // Encode to PNG using the `image` crate
484        use image::ImageEncoder;
485        let mut png_bytes = Vec::new();
486        let encoder = image::codecs::png::PngEncoder::new(&mut png_bytes);
487        if encoder.write_image(&pixels, width, height, image::ExtendedColorType::Rgba8).is_err() {
488            return None;
489        }
490
491        Some(png_bytes)
492    }
493
494    // ── Render target helpers ──────────────────────────────────────────────
495
496    /// Allocate a new off-screen render target and register it as a samplable texture.
497    pub fn create_render_target(&mut self, id: u32, width: u32, height: u32) {
498        let surface_format = self.gpu.config.format;
499        self.render_targets.create(&self.gpu.device, id, width, height, surface_format);
500        if let Some(view) = self.render_targets.get_view(id) {
501            self.textures.register_render_target(
502                &self.gpu.device,
503                &self.sprites.texture_bind_group_layout,
504                id,
505                view,
506                width,
507                height,
508            );
509        }
510    }
511
512    /// Free a render target's GPU resources and remove it from the texture store.
513    pub fn destroy_render_target(&mut self, id: u32) {
514        self.render_targets.destroy(id);
515        self.textures.unregister_render_target(id);
516    }
517
518    /// Render sprite commands into each queued render target (off-screen pre-pass).
519    ///
520    /// Call this BEFORE `render_frame()` so targets are ready as sprite inputs.
521    /// Uses a separate command encoder + GPU submit to avoid ordering conflicts.
522    pub fn render_targets_prepass(
523        &mut self,
524        target_queues: std::collections::HashMap<u32, Vec<SpriteCommand>>,
525    ) {
526        if target_queues.is_empty() {
527            return;
528        }
529
530        let mut encoder = self.gpu.device.create_command_encoder(
531            &wgpu::CommandEncoderDescriptor { label: Some("rt_encoder") },
532        );
533        let lighting_uniform = self.lighting.to_uniform();
534
535        for (target_id, mut cmds) in target_queues {
536            let view = self.render_targets.get_view(target_id);
537            let dims = self.render_targets.get_dims(target_id);
538            if let (Some(view), Some((tw, th))) = (view, dims) {
539                // Sort by layer → shader_id → blend_mode → texture_id
540                cmds.sort_by(|a, b| {
541                    a.layer
542                        .cmp(&b.layer)
543                        .then(a.shader_id.cmp(&b.shader_id))
544                        .then(a.blend_mode.cmp(&b.blend_mode))
545                        .then(a.texture_id.cmp(&b.texture_id))
546                });
547                // Orthographic camera: (0,0) = top-left of the render target
548                let target_camera = Camera2D {
549                    x: tw as f32 / 2.0,
550                    y: th as f32 / 2.0,
551                    zoom: 1.0,
552                    viewport_size: [tw as f32, th as f32],
553                    ..Camera2D::default()
554                };
555                self.sprites.prepare(&self.gpu.device, &self.gpu.queue, &target_camera, &lighting_uniform);
556                self.sprites.render(
557                    &self.gpu.device,
558                    &self.gpu.queue,
559                    &self.textures,
560                    &self.shaders,
561                    &cmds,
562                    view,
563                    &mut encoder,
564                    Some(wgpu::Color { r: 0.0, g: 0.0, b: 0.0, a: 0.0 }),
565                );
566            }
567        }
568
569        self.gpu.queue.submit(std::iter::once(encoder.finish()));
570    }
571}