1#![cfg_attr(docsrs, feature(doc_cfg))]
4#![allow(rustdoc::private_intra_doc_links)]
5#![doc(
6 html_logo_url = "https://raw.githubusercontent.com/AzurIce/ranim/refs/heads/main/assets/ranim.svg",
7 html_favicon_url = "https://raw.githubusercontent.com/AzurIce/ranim/refs/heads/main/assets/ranim.svg"
8)]
9pub mod pipelines;
11pub mod primitives;
13pub mod utils;
15
16use glam::{Mat4, Vec2};
17use image::{ImageBuffer, Rgba};
18use pipelines::{Map3dTo2dPipeline, VItemPipeline};
19use primitives::RenderCommand;
20
21use crate::primitives::{RenderPool, vitem::VItemRenderInstance};
22use ranim_core::{
23 primitives::{camera_frame::CameraFrame, vitem::VItemPrimitive},
24 store::CoreItemStore,
25};
26use utils::{PipelinesStorage, WgpuBuffer, WgpuContext};
27
28pub(crate) const OUTPUT_TEXTURE_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8UnormSrgb;
29const ALIGNMENT: usize = 256;
30
31#[cfg(feature = "profiling")]
32pub static PUFFIN_GPU_PROFILER: std::sync::LazyLock<std::sync::Mutex<puffin::GlobalProfiler>> =
36 std::sync::LazyLock::new(|| std::sync::Mutex::new(puffin::GlobalProfiler::default()));
37
38#[allow(unused)]
39#[cfg(feature = "profiling")]
40mod profiling_utils {
41 use wgpu_profiler::GpuTimerQueryResult;
42
43 pub fn scopes_to_console_recursive(results: &[GpuTimerQueryResult], indentation: u32) {
44 for scope in results {
45 if indentation > 0 {
46 print!("{:<width$}", "|", width = 4);
47 }
48
49 if let Some(time) = &scope.time {
50 println!(
51 "{:.3}μs - {}",
52 (time.end - time.start) * 1000.0 * 1000.0,
53 scope.label
54 );
55 } else {
56 println!("n/a - {}", scope.label);
57 }
58
59 if !scope.nested_queries.is_empty() {
60 scopes_to_console_recursive(&scope.nested_queries, indentation + 1);
61 }
62 }
63 }
64
65 pub fn console_output(
66 results: &Option<Vec<GpuTimerQueryResult>>,
67 enabled_features: wgpu::Features,
68 ) {
69 puffin::profile_scope!("console_output");
70 print!("\x1B[2J\x1B[1;1H"); println!("Welcome to wgpu_profiler demo!");
72 println!();
73 println!(
74 "Press space to write out a trace file that can be viewed in chrome's chrome://tracing"
75 );
76 println!();
77 match results {
78 Some(results) => {
79 scopes_to_console_recursive(results, 0);
80 }
81 None => println!("No profiling results available yet!"),
82 }
83 }
84}
85
86#[repr(C, align(16))]
163#[derive(Debug, Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
164pub struct CameraUniforms {
166 proj_mat: Mat4,
167 view_mat: Mat4,
168 half_frame_size: Vec2,
169 _padding: [f32; 2],
170}
171
172impl CameraUniforms {
173 pub fn from_camera_frame(camera_frame: &CameraFrame, frame_height: f64, ratio: f64) -> Self {
174 Self {
175 proj_mat: camera_frame
176 .projection_matrix(frame_height, ratio)
177 .as_mat4(),
178 view_mat: camera_frame.view_matrix().as_mat4(),
179 half_frame_size: Vec2::new(
180 (frame_height * ratio) as f32 / 2.0,
181 frame_height as f32 / 2.0,
182 ),
183 _padding: [0.0; 2],
184 }
185 }
186 pub(crate) fn as_bind_group_layout_entry(binding: u32) -> wgpu::BindGroupLayoutEntry {
187 wgpu::BindGroupLayoutEntry {
188 binding,
189 visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::VERTEX_FRAGMENT,
190 ty: wgpu::BindingType::Buffer {
191 ty: wgpu::BufferBindingType::Uniform,
192 has_dynamic_offset: false,
193 min_binding_size: None,
194 },
195 count: None,
196 }
197 }
198}
199
200pub(crate) struct CameraUniformsBindGroup {
201 pub(crate) bind_group: wgpu::BindGroup,
202}
203
204impl AsRef<wgpu::BindGroup> for CameraUniformsBindGroup {
205 fn as_ref(&self) -> &wgpu::BindGroup {
206 &self.bind_group
207 }
208}
209
210impl CameraUniformsBindGroup {
211 pub(crate) fn bind_group_layout(ctx: &WgpuContext) -> wgpu::BindGroupLayout {
212 ctx.device
213 .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
214 label: Some("Simple Pipeline Uniforms"),
215 entries: &[CameraUniforms::as_bind_group_layout_entry(0)],
216 })
217 }
218
219 pub(crate) fn new(ctx: &WgpuContext, uniforms_buffer: &WgpuBuffer<CameraUniforms>) -> Self {
220 let bind_group = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor {
221 label: Some("Camera Uniforms"),
222 layout: &Self::bind_group_layout(ctx),
223 entries: &[wgpu::BindGroupEntry {
224 binding: 0,
225 resource: wgpu::BindingResource::Buffer(
226 uniforms_buffer.as_ref().as_entire_buffer_binding(),
227 ),
228 }],
229 });
230 Self { bind_group }
231 }
232}
233
234pub struct Renderer {
237 size: (usize, usize),
238 pub(crate) pipelines: PipelinesStorage,
239
240 pub render_textures: RenderTextures,
241 pub camera_state: Camera,
242
243 output_staging_buffer: wgpu::Buffer,
244 output_texture_data: Option<Vec<u8>>,
245 pub(crate) output_texture_updated: bool,
246
247 #[cfg(feature = "profiling")]
248 pub(crate) profiler: wgpu_profiler::GpuProfiler,
249}
250
251pub struct Camera {
252 frame_height: f64,
253 ratio: f64,
254 uniforms_buffer: WgpuBuffer<CameraUniforms>,
255 uniforms_bind_group: CameraUniformsBindGroup,
256}
257
258impl Camera {
259 pub fn new(ctx: &WgpuContext, camera: &CameraFrame, frame_height: f64, ratio: f64) -> Self {
260 let uniforms = CameraUniforms::from_camera_frame(camera, frame_height, ratio);
261 let uniforms_buffer = WgpuBuffer::new_init(
262 ctx,
263 Some("Uniforms Buffer"),
264 wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
265 uniforms,
266 );
267 let uniforms_bind_group = CameraUniformsBindGroup::new(ctx, &uniforms_buffer);
268 Self {
269 frame_height,
270 ratio,
271 uniforms_buffer,
272 uniforms_bind_group,
273 }
274 }
275 pub fn update_uniforms(&mut self, wgpu_ctx: &WgpuContext, camera_frame: &CameraFrame) {
276 self.uniforms_buffer.set(
277 wgpu_ctx,
278 CameraUniforms::from_camera_frame(camera_frame, self.frame_height, self.ratio),
279 );
280 }
281}
282
283impl Renderer {
284 pub fn new(ctx: &WgpuContext, frame_height: f64, width: usize, height: usize) -> Self {
285 let camera = CameraFrame::new();
286
287 let render_textures = RenderTextures::new(ctx, width, height);
288 let camera_state = Camera::new(ctx, &camera, frame_height, width as f64 / height as f64);
289 let bytes_per_row = ((width * 4) as f32 / ALIGNMENT as f32).ceil() as usize * ALIGNMENT;
290 let output_staging_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
291 label: None,
292 size: (bytes_per_row * height) as u64,
293 usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
294 mapped_at_creation: false,
295 });
296
297 #[cfg(feature = "profiling")]
304 let profiler = wgpu_profiler::GpuProfiler::new(
305 &ctx.device,
306 wgpu_profiler::GpuProfilerSettings::default(),
307 )
308 .unwrap();
309
310 Self {
311 size: (width, height),
312 pipelines: PipelinesStorage::default(),
313 render_textures,
314 output_staging_buffer,
316 output_texture_data: None,
317 output_texture_updated: false,
318 camera_state,
320 #[cfg(feature = "profiling")]
322 profiler,
323 }
324 }
325
326 pub fn clear_screen(&mut self, ctx: &WgpuContext, clear_color: wgpu::Color) {
328 #[cfg(feature = "profiling")]
329 profiling::scope!("clear_screen");
330 let mut encoder = ctx
332 .device
333 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
334 label: Some("Encoder"),
335 });
336
337 {
339 let RenderTextures {
340 render_view,
341 ..
344 } = &self.render_textures;
345 let _ = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
346 label: Some("VMobject Clear Pass"),
347 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
348 depth_slice: None,
351 view: render_view,
352 resolve_target: None,
353 ops: wgpu::Operations {
354 load: wgpu::LoadOp::Clear(clear_color),
355 store: wgpu::StoreOp::Store,
356 },
357 })],
358 depth_stencil_attachment: None,
370 occlusion_query_set: None,
371 timestamp_writes: None,
372 });
373 }
374 ctx.queue.submit(Some(encoder.finish()));
375 self.output_texture_updated = false;
376 }
377
378 pub fn render_store_with_pool(
379 &mut self,
380 ctx: &WgpuContext,
381 clear_color: wgpu::Color,
382 store: &CoreItemStore,
383 pool: &mut RenderPool,
384 ) {
385 let camera_frame = &store.camera_frames[0];
387 let visual_items = store
388 .vitems
389 .iter()
390 .map(|x| pool.alloc::<VItemRenderInstance, VItemPrimitive>(ctx, x))
391 .collect::<Vec<_>>();
392 let render_primitives = visual_items
393 .into_iter()
394 .filter_map(|k| pool.get(*k).map(|x| x as &dyn RenderCommand))
395 .collect::<Vec<_>>();
396
397 self.camera_state.update_uniforms(ctx, camera_frame);
398
399 {
400 #[cfg(feature = "profiling")]
401 profiling::scope!("render");
402
403 self.render(ctx, clear_color, &render_primitives);
404 }
405
406 drop(render_primitives);
407 }
408
409 pub fn render(
410 &mut self,
411 ctx: &WgpuContext,
412 clear_color: wgpu::Color,
413 renderable: &impl RenderCommand,
414 ) {
415 self.clear_screen(ctx, clear_color);
416 let mut encoder = ctx
417 .device
418 .create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
419
420 {
422 #[cfg(feature = "profiling")]
423 let mut scope = self.profiler.scope("compute pass", &mut encoder);
424 #[cfg(feature = "profiling")]
425 let mut cpass = scope.scoped_compute_pass("VItem Map Points Compute Pass");
426 #[cfg(not(feature = "profiling"))]
427 let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
428 label: Some("VItem Map Points Compute Pass"),
429 timestamp_writes: None,
430 });
431 cpass.set_pipeline(self.pipelines.get_or_init::<Map3dTo2dPipeline>(ctx));
432 cpass.set_bind_group(0, &self.camera_state.uniforms_bind_group.bind_group, &[]);
433
434 renderable.encode_compute_pass_command(&mut cpass);
435 }
436 {
437 #[cfg(feature = "profiling")]
438 let mut scope = self.profiler.scope("render pass", &mut encoder);
439 let RenderTextures {
440 render_view,
442 ..
443 } = &mut self.render_textures;
444 let rpass_desc = wgpu::RenderPassDescriptor {
445 label: Some("VItem Render Pass"),
446 color_attachments: &[Some(wgpu::RenderPassColorAttachment {
447 depth_slice: None,
450 view: render_view,
451 resolve_target: None,
452 ops: wgpu::Operations {
453 load: wgpu::LoadOp::Load,
454 store: wgpu::StoreOp::Store,
455 },
456 })],
457 depth_stencil_attachment: None,
458 timestamp_writes: None,
459 occlusion_query_set: None,
460 };
461 #[cfg(feature = "profiling")]
462 let mut rpass = scope.scoped_render_pass("VItem Render Pass", rpass_desc);
463 #[cfg(not(feature = "profiling"))]
464 let mut rpass = encoder.begin_render_pass(&rpass_desc);
465 rpass.set_pipeline(self.pipelines.get_or_init::<VItemPipeline>(ctx));
466 rpass.set_bind_group(0, &self.camera_state.uniforms_bind_group.bind_group, &[]);
467
468 renderable.encode_render_pass_command(&mut rpass);
469 }
470 #[cfg(not(feature = "profiling"))]
481 ctx.queue.submit(Some(encoder.finish()));
482
483 #[cfg(feature = "profiling")]
484 {
485 self.profiler.resolve_queries(&mut encoder);
486 {
487 profiling::scope!("submit");
488 ctx.queue.submit(Some(encoder.finish()));
489 }
490
491 renderable.debug(ctx);
492
493 self.profiler.end_frame().unwrap();
495
496 ctx.device
498 .poll(wgpu::PollType::wait_indefinitely())
499 .unwrap();
500 let latest_profiler_results = self
501 .profiler
502 .process_finished_frame(ctx.queue.get_timestamp_period());
503 let mut gpu_profiler = PUFFIN_GPU_PROFILER.lock().unwrap();
505 wgpu_profiler::puffin::output_frame_to_puffin(
506 &mut gpu_profiler,
507 &latest_profiler_results.unwrap(),
508 );
509 gpu_profiler.new_frame();
510 }
511
512 self.output_texture_updated = false;
513 }
514
515 fn update_rendered_texture_data(&mut self, ctx: &WgpuContext) {
516 let bytes_per_row =
517 ((self.size.0 * 4) as f64 / ALIGNMENT as f64).ceil() as usize * ALIGNMENT;
518 let mut texture_data =
519 self.output_texture_data
520 .take()
521 .unwrap_or(vec![0; self.size.0 * self.size.1 * 4]);
522
523 let mut encoder = ctx
524 .device
525 .create_command_encoder(&wgpu::CommandEncoderDescriptor {
526 label: Some("Render Encoder"),
527 });
528
529 let RenderTextures { render_texture, .. } = &self.render_textures;
530 encoder.copy_texture_to_buffer(
531 wgpu::TexelCopyTextureInfo {
532 aspect: wgpu::TextureAspect::All,
533 texture: render_texture,
534 mip_level: 0,
535 origin: wgpu::Origin3d::ZERO,
536 },
537 wgpu::TexelCopyBufferInfo {
538 buffer: &self.output_staging_buffer,
539 layout: wgpu::TexelCopyBufferLayout {
540 offset: 0,
541 bytes_per_row: Some(bytes_per_row as u32),
542 rows_per_image: Some(self.size.1 as u32),
543 },
544 },
545 render_texture.size(),
546 );
547 ctx.queue.submit(Some(encoder.finish()));
548
549 {
550 let buffer_slice = self.output_staging_buffer.slice(..);
551
552 let (tx, rx) = async_channel::bounded(1);
555 buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
556 pollster::block_on(tx.send(result)).unwrap()
557 });
558 ctx.device
559 .poll(wgpu::PollType::wait_indefinitely())
560 .unwrap();
561 pollster::block_on(rx.recv()).unwrap().unwrap();
562
563 {
564 let view = buffer_slice.get_mapped_range();
565 for y in 0..self.size.1 {
567 let src_row_start = y * bytes_per_row;
568 let dst_row_start = y * self.size.0 * 4;
569
570 texture_data[dst_row_start..dst_row_start + self.size.0 * 4]
571 .copy_from_slice(&view[src_row_start..src_row_start + self.size.0 * 4]);
572 }
573 }
574 };
575 self.output_staging_buffer.unmap();
576
577 self.output_texture_data = Some(texture_data);
578 self.output_texture_updated = true;
579 }
580
581 pub fn get_rendered_texture_data(&mut self, ctx: &WgpuContext) -> &[u8] {
586 if !self.output_texture_updated {
587 self.update_rendered_texture_data(ctx);
589 }
590 self.output_texture_data.as_ref().unwrap()
591 }
592 pub fn get_rendered_texture_img_buffer(
593 &mut self,
594 ctx: &WgpuContext,
595 ) -> ImageBuffer<Rgba<u8>, &[u8]> {
596 let size = self.size;
597 let data = self.get_rendered_texture_data(ctx);
598 ImageBuffer::from_raw(size.0 as u32, size.1 as u32, data).unwrap()
599 }
600}
601
602#[allow(unused)]
605pub struct RenderTextures {
606 pub render_texture: wgpu::Texture,
607 pub render_view: wgpu::TextureView,
610 pub linear_render_view: wgpu::TextureView,
611 }
614
615impl RenderTextures {
616 pub(crate) fn new(ctx: &WgpuContext, width: usize, height: usize) -> Self {
617 let format = OUTPUT_TEXTURE_FORMAT;
618 let render_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
619 label: Some("Target Texture"),
620 size: wgpu::Extent3d {
621 width: width as u32,
622 height: height as u32,
623 depth_or_array_layers: 1,
624 },
625 mip_level_count: 1,
626 sample_count: 1,
627 dimension: wgpu::TextureDimension::D2,
628 format,
629 usage: wgpu::TextureUsages::RENDER_ATTACHMENT
630 | wgpu::TextureUsages::COPY_SRC
631 | wgpu::TextureUsages::COPY_DST
632 | wgpu::TextureUsages::TEXTURE_BINDING,
633 view_formats: &[
634 wgpu::TextureFormat::Rgba8UnormSrgb,
635 wgpu::TextureFormat::Rgba8Unorm,
636 ],
637 });
638 let render_view = render_texture.create_view(&wgpu::TextureViewDescriptor {
670 format: Some(format),
671 ..Default::default()
672 });
673 let linear_render_view = render_texture.create_view(&wgpu::TextureViewDescriptor {
674 format: Some(wgpu::TextureFormat::Rgba8Unorm),
675 ..Default::default()
676 });
677 Self {
685 render_texture,
686 render_view,
689 linear_render_view,
690 }
693 }
694}
695
696pub(crate) trait RenderResource {
698 fn new(ctx: &WgpuContext) -> Self
699 where
700 Self: Sized;
701}