Skip to main content

arcane_core/renderer/
mod.rs

1mod gpu;
2mod sprite;
3mod texture;
4pub mod camera;
5mod tilemap;
6mod lighting;
7pub mod font;
8pub mod msdf;
9pub mod shader;
10pub mod postprocess;
11pub mod radiance;
12pub mod geometry;
13pub mod rendertarget;
14
15pub use gpu::GpuContext;
16pub use sprite::{SpriteCommand, SpritePipeline};
17pub use texture::{TextureId, TextureStore};
18pub use camera::Camera2D;
19pub use tilemap::{Tilemap, TilemapStore};
20pub use lighting::{LightingState, LightingUniform, PointLight, LightData, MAX_LIGHTS};
21pub use msdf::{MsdfFont, MsdfFontStore, MsdfGlyph};
22pub use shader::ShaderStore;
23pub use postprocess::PostProcessPipeline;
24pub use radiance::{RadiancePipeline, RadianceState, EmissiveSurface, Occluder, DirectionalLight, SpotLight};
25pub use geometry::GeometryBatch;
26pub use rendertarget::RenderTargetStore;
27
28use anyhow::Result;
29
30/// Top-level renderer that owns the GPU context, sprite pipeline, and textures.
31pub struct Renderer {
32    pub gpu: GpuContext,
33    pub sprites: SpritePipeline,
34    pub geometry: GeometryBatch,
35    pub shaders: ShaderStore,
36    pub postprocess: PostProcessPipeline,
37    pub textures: TextureStore,
38    pub camera: Camera2D,
39    pub lighting: LightingState,
40    pub radiance: RadiancePipeline,
41    pub radiance_state: RadianceState,
42    /// Off-screen render targets (owns the GPU textures; bind groups in TextureStore).
43    pub render_targets: RenderTargetStore,
44    /// Sprite commands queued for the current frame.
45    pub frame_commands: Vec<SpriteCommand>,
46    /// Display scale factor (e.g. 2.0 on Retina). Used to convert physical → logical pixels.
47    pub scale_factor: f32,
48    /// Clear color for the render pass background. Default: dark blue-gray.
49    pub clear_color: [f32; 4],
50}
51
52impl Renderer {
53    /// Create a new renderer attached to a winit window.
54    pub fn new(window: std::sync::Arc<winit::window::Window>) -> Result<Self> {
55        let scale_factor = window.scale_factor() as f32;
56        let gpu = GpuContext::new(window)?;
57        let sprites = SpritePipeline::new(&gpu);
58        let geometry = GeometryBatch::new(&gpu);
59        let shaders = ShaderStore::new(&gpu);
60        let postprocess = PostProcessPipeline::new(&gpu);
61        let radiance_pipeline = RadiancePipeline::new(&gpu);
62        let textures = TextureStore::new();
63        // Set camera viewport to logical pixels so world units are DPI-independent
64        let logical_w = gpu.config.width as f32 / scale_factor;
65        let logical_h = gpu.config.height as f32 / scale_factor;
66        let camera = Camera2D {
67            viewport_size: [logical_w, logical_h],
68            ..Camera2D::default()
69        };
70        Ok(Self {
71            gpu,
72            sprites,
73            geometry,
74            shaders,
75            postprocess,
76            radiance: radiance_pipeline,
77            radiance_state: RadianceState::new(),
78            textures,
79            camera,
80            lighting: LightingState::default(),
81            render_targets: RenderTargetStore::new(),
82            frame_commands: Vec::new(),
83            scale_factor,
84            clear_color: [0.1, 0.1, 0.15, 1.0],
85        })
86    }
87
88    /// Render the current frame's sprite commands and present.
89    pub fn render_frame(&mut self) -> Result<()> {
90        let output = self.gpu.surface.get_current_texture()?;
91        let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());
92
93        let mut encoder = self.gpu.device.create_command_encoder(
94            &wgpu::CommandEncoderDescriptor { label: Some("frame_encoder") },
95        );
96
97        // Sort by layer → shader_id → blend_mode → texture_id for batching
98        self.frame_commands.sort_by(|a, b| {
99            a.layer
100                .cmp(&b.layer)
101                .then(a.shader_id.cmp(&b.shader_id))
102                .then(a.blend_mode.cmp(&b.blend_mode))
103                .then(a.texture_id.cmp(&b.texture_id))
104        });
105
106        // Flush dirty custom shader uniforms
107        self.shaders.flush(&self.gpu);
108
109        let lighting_uniform = self.lighting.to_uniform();
110        let clear_color = wgpu::Color {
111            r: self.clear_color[0] as f64,
112            g: self.clear_color[1] as f64,
113            b: self.clear_color[2] as f64,
114            a: self.clear_color[3] as f64,
115        };
116
117        // Run radiance cascade GI compute pass (if enabled)
118        let gi_active = self.radiance.compute(
119            &self.gpu,
120            &mut encoder,
121            &self.radiance_state,
122            &self.lighting,
123            self.camera.x,
124            self.camera.y,
125            self.camera.viewport_size[0],
126            self.camera.viewport_size[1],
127        );
128
129        if self.postprocess.has_effects() {
130            // Render sprites to offscreen target, then apply effects to surface
131            {
132                let sprite_target = self.postprocess.sprite_target(&self.gpu);
133                self.sprites.render(
134                    &self.gpu,
135                    &self.textures,
136                    &self.shaders,
137                    &self.camera,
138                    &lighting_uniform,
139                    &self.frame_commands,
140                    sprite_target,
141                    &mut encoder,
142                    clear_color,
143                );
144                // Geometry overlays on sprites before post-processing
145                let camera_bg = self.sprites.camera_bind_group();
146                self.geometry.flush(&self.gpu, &mut encoder, sprite_target, camera_bg);
147            }
148            // Apply GI light texture to the offscreen target before post-processing
149            if gi_active {
150                let sprite_target = self.postprocess.sprite_target(&self.gpu);
151                self.radiance.compose(&mut encoder, sprite_target);
152            }
153            self.postprocess.apply(&self.gpu, &mut encoder, &view);
154        } else {
155            // No effects — render directly to surface
156            self.sprites.render(
157                &self.gpu,
158                &self.textures,
159                &self.shaders,
160                &self.camera,
161                &lighting_uniform,
162                &self.frame_commands,
163                &view,
164                &mut encoder,
165                clear_color,
166            );
167            // Geometry overlays on sprites
168            let camera_bg = self.sprites.camera_bind_group();
169            self.geometry.flush(&self.gpu, &mut encoder, &view, camera_bg);
170            // Apply GI light texture to the surface
171            if gi_active {
172                self.radiance.compose(&mut encoder, &view);
173            }
174        }
175
176        self.gpu.queue.submit(std::iter::once(encoder.finish()));
177        output.present();
178
179        self.frame_commands.clear();
180        Ok(())
181    }
182
183    /// Resize the surface when the window size changes.
184    /// GPU surface uses physical pixels; camera viewport uses logical pixels.
185    pub fn resize(&mut self, physical_width: u32, physical_height: u32, scale_factor: f32) {
186        if physical_width > 0 && physical_height > 0 {
187            self.scale_factor = scale_factor;
188            self.gpu.config.width = physical_width;
189            self.gpu.config.height = physical_height;
190            self.gpu.surface.configure(&self.gpu.device, &self.gpu.config);
191            // Camera uses logical pixels so 1 world unit ≈ 1 logical pixel at zoom 1
192            self.camera.viewport_size = [
193                physical_width as f32 / scale_factor,
194                physical_height as f32 / scale_factor,
195            ];
196        }
197    }
198
199    // ── Render target helpers ──────────────────────────────────────────────
200
201    /// Allocate a new off-screen render target and register it as a samplable texture.
202    pub fn create_render_target(&mut self, id: u32, width: u32, height: u32) {
203        let surface_format = self.gpu.config.format;
204        self.render_targets.create(&self.gpu, id, width, height, surface_format);
205        if let Some(view) = self.render_targets.get_view(id) {
206            self.textures.register_render_target(
207                &self.gpu,
208                &self.sprites.texture_bind_group_layout,
209                id,
210                view,
211                width,
212                height,
213            );
214        }
215    }
216
217    /// Free a render target's GPU resources and remove it from the texture store.
218    pub fn destroy_render_target(&mut self, id: u32) {
219        self.render_targets.destroy(id);
220        self.textures.unregister_render_target(id);
221    }
222
223    /// Render sprite commands into each queued render target (off-screen pre-pass).
224    ///
225    /// Call this BEFORE `render_frame()` so targets are ready as sprite inputs.
226    /// Uses a separate command encoder + GPU submit to avoid ordering conflicts.
227    pub fn render_targets_prepass(
228        &mut self,
229        target_queues: std::collections::HashMap<u32, Vec<SpriteCommand>>,
230    ) {
231        if target_queues.is_empty() {
232            return;
233        }
234
235        let mut encoder = self.gpu.device.create_command_encoder(
236            &wgpu::CommandEncoderDescriptor { label: Some("rt_encoder") },
237        );
238        let lighting_uniform = self.lighting.to_uniform();
239
240        for (target_id, mut cmds) in target_queues {
241            let view = self.render_targets.get_view(target_id);
242            let dims = self.render_targets.get_dims(target_id);
243            if let (Some(view), Some((tw, th))) = (view, dims) {
244                // Sort by layer → shader_id → blend_mode → texture_id
245                cmds.sort_by(|a, b| {
246                    a.layer
247                        .cmp(&b.layer)
248                        .then(a.shader_id.cmp(&b.shader_id))
249                        .then(a.blend_mode.cmp(&b.blend_mode))
250                        .then(a.texture_id.cmp(&b.texture_id))
251                });
252                // Orthographic camera: (0,0) = top-left of the render target
253                let target_camera = Camera2D {
254                    x: tw as f32 / 2.0,
255                    y: th as f32 / 2.0,
256                    zoom: 1.0,
257                    viewport_size: [tw as f32, th as f32],
258                    ..Camera2D::default()
259                };
260                self.sprites.render(
261                    &self.gpu,
262                    &self.textures,
263                    &self.shaders,
264                    &target_camera,
265                    &lighting_uniform,
266                    &cmds,
267                    view,
268                    &mut encoder,
269                    wgpu::Color { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
270                );
271            }
272        }
273
274        self.gpu.queue.submit(std::iter::once(encoder.finish()));
275    }
276}