ranim_render/
lib.rs

1//! Rendering stuff in ranim
2// #![warn(missing_docs)]
3#![cfg_attr(docsrs, feature(doc_cfg))]
4#![allow(rustdoc::private_intra_doc_links)]
5#![doc(
6    html_logo_url = "https://raw.githubusercontent.com/AzurIce/ranim/refs/heads/main/assets/ranim.svg",
7    html_favicon_url = "https://raw.githubusercontent.com/AzurIce/ranim/refs/heads/main/assets/ranim.svg"
8)]
9/// The pipelines
10pub mod pipelines;
11/// The basic renderable structs
12pub mod primitives;
13/// Rendering related utils
14pub mod utils;
15
16use glam::{Mat4, Vec2};
17use image::{ImageBuffer, Rgba};
18use pipelines::{Map3dTo2dPipeline, VItemPipeline};
19use primitives::RenderCommand;
20
21use crate::primitives::{RenderPool, vitem::VItemRenderInstance};
22use ranim_core::{
23    primitives::{camera_frame::CameraFrame, vitem::VItemPrimitive},
24    store::CoreItemStore,
25};
26use utils::{PipelinesStorage, WgpuBuffer, WgpuContext};
27
28pub(crate) const OUTPUT_TEXTURE_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8UnormSrgb;
29const ALIGNMENT: usize = 256;
30
31#[cfg(feature = "profiling")]
32// Since the timing information we get from WGPU may be several frames behind the CPU, we can't report these frames to
33// the singleton returned by `puffin::GlobalProfiler::lock`. Instead, we need our own `puffin::GlobalProfiler` that we
34// can be several frames behind puffin's main global profiler singleton.
35pub static PUFFIN_GPU_PROFILER: std::sync::LazyLock<std::sync::Mutex<puffin::GlobalProfiler>> =
36    std::sync::LazyLock::new(|| std::sync::Mutex::new(puffin::GlobalProfiler::default()));
37
38#[allow(unused)]
39#[cfg(feature = "profiling")]
40mod profiling_utils {
41    use wgpu_profiler::GpuTimerQueryResult;
42
43    pub fn scopes_to_console_recursive(results: &[GpuTimerQueryResult], indentation: u32) {
44        for scope in results {
45            if indentation > 0 {
46                print!("{:<width$}", "|", width = 4);
47            }
48
49            if let Some(time) = &scope.time {
50                println!(
51                    "{:.3}μs - {}",
52                    (time.end - time.start) * 1000.0 * 1000.0,
53                    scope.label
54                );
55            } else {
56                println!("n/a - {}", scope.label);
57            }
58
59            if !scope.nested_queries.is_empty() {
60                scopes_to_console_recursive(&scope.nested_queries, indentation + 1);
61            }
62        }
63    }
64
65    pub fn console_output(
66        results: &Option<Vec<GpuTimerQueryResult>>,
67        enabled_features: wgpu::Features,
68    ) {
69        puffin::profile_scope!("console_output");
70        print!("\x1B[2J\x1B[1;1H"); // Clear terminal and put cursor to first row first column
71        println!("Welcome to wgpu_profiler demo!");
72        println!();
73        println!(
74            "Press space to write out a trace file that can be viewed in chrome's chrome://tracing"
75        );
76        println!();
77        match results {
78            Some(results) => {
79                scopes_to_console_recursive(results, 0);
80            }
81            None => println!("No profiling results available yet!"),
82        }
83    }
84}
85
86// MARK: TimelineEvalResult
87
88// /// Ext for [`SealedRanimScene`] to eval to [`TimelineEvalResult`]
89// pub trait RenderEval {
90//     /// Get the total seconds of the
91//     fn total_secs(&self) -> f64;
92//     /// Evaluate the state of timelines at `target_sec`
93//     fn eval_sec(&self, target_sec: f64) -> TimelineEvalResult;
94//     /// Evaluate the state of timelines at `alpha`
95//     fn eval_alpha(&self, alpha: f64) -> TimelineEvalResult {
96//         self.eval_sec(alpha * self.total_secs())
97//     }
98// }
99
100// impl RenderEval for SealedRanimScene {
101//     fn total_secs(&self) -> f64 {
102//         self.total_secs()
103//     }
104//     // MARK: eval_sec
105//     /// Evaluate the state of timelines at `target_sec`
106//     fn eval_sec(&self, target_sec: f64) -> TimelineEvalResult {
107//         let primitives = self.eval_primitives_at_sec(target_sec).collect::<Vec<_>>();
108
109//         let mut visual_items = Vec::with_capacity(self.timelines_cnt());
110//         let mut camera_frames = Vec::new();
111
112//         for (primitive, id_hash) in primitives {
113//             match primitive {
114//                 Primitives::CameraFrame(x) => {
115//                     if let Some(x) = x.into_iter().next() {
116//                         camera_frames.push((x, id_hash));
117//                     }
118//                 }
119//                 Primitives::VItemPrimitive(x) => {
120//                     visual_items.push((Box::new(x) as Box::<dyn Renderable>, id_hash));
121//                 }
122//             }
123//         }
124
125//         if camera_frames.is_empty() {
126//             warn!("No camera frame found at sec {target_sec}");
127//         }
128
129//         TimelineEvalResult {
130//             camera_frames,
131//             visual_items,
132//         }
133//     }
134// }
135
136// trait RenderableResult {
137//     fn convert(self) -> EvalResult<dyn Renderable>;
138// }
139
140// impl<T: Renderable + 'static> RenderableResult for EvalResult<T> {
141//     fn convert(self) -> EvalResult<dyn Renderable> {
142//         match self {
143//             Self::Dynamic(t) => EvalResult::Dynamic(t as Box<dyn Renderable>),
144//             Self::Static(rc) => EvalResult::Static(rc as Arc<dyn Renderable>),
145//         }
146//     }
147// }
148
149// /// The evaluation result
150// ///
151// /// This is produced from [`SealedRanimScene::eval_alpha`] or [`SealedRanimScene::eval_sec`]
152// #[allow(clippy::type_complexity)]
153// pub struct TimelineEvalResult {
154//     /// (`EvalResult<CameraFrame>`, id hash)
155//     pub camera_frames: Vec<(CameraFrame, u64)>,
156//     /// (`id`, `EvalResult<Box<dyn RenderableItem>>`, id hash)
157//     pub visual_items: Vec<(Box<dyn Renderable>, u64)>,
158// }
159
160// MARK: CameraUniforms
161
162#[repr(C, align(16))]
163#[derive(Debug, Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
164/// Uniforms for the camera
165pub struct CameraUniforms {
166    proj_mat: Mat4,
167    view_mat: Mat4,
168    half_frame_size: Vec2,
169    _padding: [f32; 2],
170}
171
172impl CameraUniforms {
173    pub fn from_camera_frame(camera_frame: &CameraFrame, frame_height: f64, ratio: f64) -> Self {
174        Self {
175            proj_mat: camera_frame
176                .projection_matrix(frame_height, ratio)
177                .as_mat4(),
178            view_mat: camera_frame.view_matrix().as_mat4(),
179            half_frame_size: Vec2::new(
180                (frame_height * ratio) as f32 / 2.0,
181                frame_height as f32 / 2.0,
182            ),
183            _padding: [0.0; 2],
184        }
185    }
186    pub(crate) fn as_bind_group_layout_entry(binding: u32) -> wgpu::BindGroupLayoutEntry {
187        wgpu::BindGroupLayoutEntry {
188            binding,
189            visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::VERTEX_FRAGMENT,
190            ty: wgpu::BindingType::Buffer {
191                ty: wgpu::BufferBindingType::Uniform,
192                has_dynamic_offset: false,
193                min_binding_size: None,
194            },
195            count: None,
196        }
197    }
198}
199
200pub(crate) struct CameraUniformsBindGroup {
201    pub(crate) bind_group: wgpu::BindGroup,
202}
203
204impl AsRef<wgpu::BindGroup> for CameraUniformsBindGroup {
205    fn as_ref(&self) -> &wgpu::BindGroup {
206        &self.bind_group
207    }
208}
209
210impl CameraUniformsBindGroup {
211    pub(crate) fn bind_group_layout(ctx: &WgpuContext) -> wgpu::BindGroupLayout {
212        ctx.device
213            .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
214                label: Some("Simple Pipeline Uniforms"),
215                entries: &[CameraUniforms::as_bind_group_layout_entry(0)],
216            })
217    }
218
219    pub(crate) fn new(ctx: &WgpuContext, uniforms_buffer: &WgpuBuffer<CameraUniforms>) -> Self {
220        let bind_group = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor {
221            label: Some("Camera Uniforms"),
222            layout: &Self::bind_group_layout(ctx),
223            entries: &[wgpu::BindGroupEntry {
224                binding: 0,
225                resource: wgpu::BindingResource::Buffer(
226                    uniforms_buffer.as_ref().as_entire_buffer_binding(),
227                ),
228            }],
229        });
230        Self { bind_group }
231    }
232}
233
234// MARK: Renderer
235
236pub struct Renderer {
237    size: (usize, usize),
238    pub(crate) pipelines: PipelinesStorage,
239
240    pub render_textures: RenderTextures,
241    pub camera_state: Camera,
242
243    output_staging_buffer: wgpu::Buffer,
244    output_texture_data: Option<Vec<u8>>,
245    pub(crate) output_texture_updated: bool,
246
247    #[cfg(feature = "profiling")]
248    pub(crate) profiler: wgpu_profiler::GpuProfiler,
249}
250
251pub struct Camera {
252    frame_height: f64,
253    ratio: f64,
254    uniforms_buffer: WgpuBuffer<CameraUniforms>,
255    uniforms_bind_group: CameraUniformsBindGroup,
256}
257
258impl Camera {
259    pub fn new(ctx: &WgpuContext, camera: &CameraFrame, frame_height: f64, ratio: f64) -> Self {
260        let uniforms = CameraUniforms::from_camera_frame(camera, frame_height, ratio);
261        let uniforms_buffer = WgpuBuffer::new_init(
262            ctx,
263            Some("Uniforms Buffer"),
264            wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
265            uniforms,
266        );
267        let uniforms_bind_group = CameraUniformsBindGroup::new(ctx, &uniforms_buffer);
268        Self {
269            frame_height,
270            ratio,
271            uniforms_buffer,
272            uniforms_bind_group,
273        }
274    }
275    pub fn update_uniforms(&mut self, wgpu_ctx: &WgpuContext, camera_frame: &CameraFrame) {
276        self.uniforms_buffer.set(
277            wgpu_ctx,
278            CameraUniforms::from_camera_frame(camera_frame, self.frame_height, self.ratio),
279        );
280    }
281}
282
283impl Renderer {
284    pub fn new(ctx: &WgpuContext, frame_height: f64, width: usize, height: usize) -> Self {
285        let camera = CameraFrame::new();
286
287        let render_textures = RenderTextures::new(ctx, width, height);
288        let camera_state = Camera::new(ctx, &camera, frame_height, width as f64 / height as f64);
289        let bytes_per_row = ((width * 4) as f32 / ALIGNMENT as f32).ceil() as usize * ALIGNMENT;
290        let output_staging_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
291            label: None,
292            size: (bytes_per_row * height) as u64,
293            usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
294            mapped_at_creation: false,
295        });
296
297        // trace!("init renderer uniform: {:?}", uniforms);
298
299        // let bg = rgba8(0x33, 0x33, 0x33, 0xff).convert::<LinearSrgb>();
300        // let [r, g, b, a] = bg.components.map(|x| x as f64);
301        // let clear_color = wgpu::Color { r, g, b, a };
302
303        #[cfg(feature = "profiling")]
304        let profiler = wgpu_profiler::GpuProfiler::new(
305            &ctx.device,
306            wgpu_profiler::GpuProfilerSettings::default(),
307        )
308        .unwrap();
309
310        Self {
311            size: (width, height),
312            pipelines: PipelinesStorage::default(),
313            render_textures,
314            // Outputs
315            output_staging_buffer,
316            output_texture_data: None,
317            output_texture_updated: false,
318            // Camera State
319            camera_state,
320            // Profiler
321            #[cfg(feature = "profiling")]
322            profiler,
323        }
324    }
325
326    /// Clears the screen with `Renderer::clear_color`
327    pub fn clear_screen(&mut self, ctx: &WgpuContext, clear_color: wgpu::Color) {
328        #[cfg(feature = "profiling")]
329        profiling::scope!("clear_screen");
330        // trace!("clear screen {:?}", self.clear_color);
331        let mut encoder = ctx
332            .device
333            .create_command_encoder(&wgpu::CommandEncoderDescriptor {
334                label: Some("Encoder"),
335            });
336
337        // Clear
338        {
339            let RenderTextures {
340                render_view,
341                // multisample_view,
342                // depth_stencil_view,
343                ..
344            } = &self.render_textures;
345            let _ = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
346                label: Some("VMobject Clear Pass"),
347                color_attachments: &[Some(wgpu::RenderPassColorAttachment {
348                    // view: multisample_view,
349                    // resolve_target: Some(render_view),
350                    depth_slice: None,
351                    view: render_view,
352                    resolve_target: None,
353                    ops: wgpu::Operations {
354                        load: wgpu::LoadOp::Clear(clear_color),
355                        store: wgpu::StoreOp::Store,
356                    },
357                })],
358                // depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
359                //     view: depth_stencil_view,
360                //     depth_ops: Some(wgpu::Operations {
361                //         load: wgpu::LoadOp::Clear(1.0),
362                //         store: wgpu::StoreOp::Store,
363                //     }),
364                //     stencil_ops: Some(wgpu::Operations {
365                //         load: wgpu::LoadOp::Clear(0),
366                //         store: wgpu::StoreOp::Store,
367                //     }),
368                // }),
369                depth_stencil_attachment: None,
370                occlusion_query_set: None,
371                timestamp_writes: None,
372            });
373        }
374        ctx.queue.submit(Some(encoder.finish()));
375        self.output_texture_updated = false;
376    }
377
378    pub fn render_store_with_pool(
379        &mut self,
380        ctx: &WgpuContext,
381        clear_color: wgpu::Color,
382        store: &CoreItemStore,
383        pool: &mut RenderPool,
384    ) {
385        // println!("camera: {}, vitems: {}", store.camera_frames.len(), store.vitems.len());
386        let camera_frame = &store.camera_frames[0];
387        let visual_items = store
388            .vitems
389            .iter()
390            .map(|x| pool.alloc::<VItemRenderInstance, VItemPrimitive>(ctx, x))
391            .collect::<Vec<_>>();
392        let render_primitives = visual_items
393            .into_iter()
394            .filter_map(|k| pool.get(*k).map(|x| x as &dyn RenderCommand))
395            .collect::<Vec<_>>();
396
397        self.camera_state.update_uniforms(ctx, camera_frame);
398
399        {
400            #[cfg(feature = "profiling")]
401            profiling::scope!("render");
402
403            self.render(ctx, clear_color, &render_primitives);
404        }
405
406        drop(render_primitives);
407    }
408
409    pub fn render(
410        &mut self,
411        ctx: &WgpuContext,
412        clear_color: wgpu::Color,
413        renderable: &impl RenderCommand,
414    ) {
415        self.clear_screen(ctx, clear_color);
416        let mut encoder = ctx
417            .device
418            .create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
419
420        // renderable.update_clip_info(&ctx.wgpu_ctx, &self.camera);
421        {
422            #[cfg(feature = "profiling")]
423            let mut scope = self.profiler.scope("compute pass", &mut encoder);
424            #[cfg(feature = "profiling")]
425            let mut cpass = scope.scoped_compute_pass("VItem Map Points Compute Pass");
426            #[cfg(not(feature = "profiling"))]
427            let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
428                label: Some("VItem Map Points Compute Pass"),
429                timestamp_writes: None,
430            });
431            cpass.set_pipeline(self.pipelines.get_or_init::<Map3dTo2dPipeline>(ctx));
432            cpass.set_bind_group(0, &self.camera_state.uniforms_bind_group.bind_group, &[]);
433
434            renderable.encode_compute_pass_command(&mut cpass);
435        }
436        {
437            #[cfg(feature = "profiling")]
438            let mut scope = self.profiler.scope("render pass", &mut encoder);
439            let RenderTextures {
440                // multisample_view,
441                render_view,
442                ..
443            } = &mut self.render_textures;
444            let rpass_desc = wgpu::RenderPassDescriptor {
445                label: Some("VItem Render Pass"),
446                color_attachments: &[Some(wgpu::RenderPassColorAttachment {
447                    // view: multisample_view,
448                    // resolve_target: Some(render_view),
449                    depth_slice: None,
450                    view: render_view,
451                    resolve_target: None,
452                    ops: wgpu::Operations {
453                        load: wgpu::LoadOp::Load,
454                        store: wgpu::StoreOp::Store,
455                    },
456                })],
457                depth_stencil_attachment: None,
458                timestamp_writes: None,
459                occlusion_query_set: None,
460            };
461            #[cfg(feature = "profiling")]
462            let mut rpass = scope.scoped_render_pass("VItem Render Pass", rpass_desc);
463            #[cfg(not(feature = "profiling"))]
464            let mut rpass = encoder.begin_render_pass(&rpass_desc);
465            rpass.set_pipeline(self.pipelines.get_or_init::<VItemPipeline>(ctx));
466            rpass.set_bind_group(0, &self.camera_state.uniforms_bind_group.bind_group, &[]);
467
468            renderable.encode_render_pass_command(&mut rpass);
469        }
470        // renderable.encode_render_command(
471        //     &ctx.wgpu_ctx,
472        //     &mut ctx.pipelines,
473        //     &mut encoder,
474        //     &self.uniforms_bind_group.bind_group,
475        //     &self.render_textures,
476        //     #[cfg(feature = "profiling")]
477        //     &mut self.profiler,
478        // );
479
480        #[cfg(not(feature = "profiling"))]
481        ctx.queue.submit(Some(encoder.finish()));
482
483        #[cfg(feature = "profiling")]
484        {
485            self.profiler.resolve_queries(&mut encoder);
486            {
487                profiling::scope!("submit");
488                ctx.queue.submit(Some(encoder.finish()));
489            }
490
491            renderable.debug(ctx);
492
493            // Signal to the profiler that the frame is finished.
494            self.profiler.end_frame().unwrap();
495
496            // Query for oldest finished frame (this is almost certainly not the one we just submitted!) and display results in the command line.
497            ctx.device
498                .poll(wgpu::PollType::wait_indefinitely())
499                .unwrap();
500            let latest_profiler_results = self
501                .profiler
502                .process_finished_frame(ctx.queue.get_timestamp_period());
503            // profiling_utils::console_output(&latest_profiler_results, ctx.wgpu_ctx.device.features());
504            let mut gpu_profiler = PUFFIN_GPU_PROFILER.lock().unwrap();
505            wgpu_profiler::puffin::output_frame_to_puffin(
506                &mut gpu_profiler,
507                &latest_profiler_results.unwrap(),
508            );
509            gpu_profiler.new_frame();
510        }
511
512        self.output_texture_updated = false;
513    }
514
515    fn update_rendered_texture_data(&mut self, ctx: &WgpuContext) {
516        let bytes_per_row =
517            ((self.size.0 * 4) as f64 / ALIGNMENT as f64).ceil() as usize * ALIGNMENT;
518        let mut texture_data =
519            self.output_texture_data
520                .take()
521                .unwrap_or(vec![0; self.size.0 * self.size.1 * 4]);
522
523        let mut encoder = ctx
524            .device
525            .create_command_encoder(&wgpu::CommandEncoderDescriptor {
526                label: Some("Render Encoder"),
527            });
528
529        let RenderTextures { render_texture, .. } = &self.render_textures;
530        encoder.copy_texture_to_buffer(
531            wgpu::TexelCopyTextureInfo {
532                aspect: wgpu::TextureAspect::All,
533                texture: render_texture,
534                mip_level: 0,
535                origin: wgpu::Origin3d::ZERO,
536            },
537            wgpu::TexelCopyBufferInfo {
538                buffer: &self.output_staging_buffer,
539                layout: wgpu::TexelCopyBufferLayout {
540                    offset: 0,
541                    bytes_per_row: Some(bytes_per_row as u32),
542                    rows_per_image: Some(self.size.1 as u32),
543                },
544            },
545            render_texture.size(),
546        );
547        ctx.queue.submit(Some(encoder.finish()));
548
549        {
550            let buffer_slice = self.output_staging_buffer.slice(..);
551
552            // NOTE: We have to create the mapping THEN device.poll() before await
553            // the future. Otherwise the application will freeze.
554            let (tx, rx) = async_channel::bounded(1);
555            buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
556                pollster::block_on(tx.send(result)).unwrap()
557            });
558            ctx.device
559                .poll(wgpu::PollType::wait_indefinitely())
560                .unwrap();
561            pollster::block_on(rx.recv()).unwrap().unwrap();
562
563            {
564                let view = buffer_slice.get_mapped_range();
565                // texture_data.copy_from_slice(&view);
566                for y in 0..self.size.1 {
567                    let src_row_start = y * bytes_per_row;
568                    let dst_row_start = y * self.size.0 * 4;
569
570                    texture_data[dst_row_start..dst_row_start + self.size.0 * 4]
571                        .copy_from_slice(&view[src_row_start..src_row_start + self.size.0 * 4]);
572                }
573            }
574        };
575        self.output_staging_buffer.unmap();
576
577        self.output_texture_data = Some(texture_data);
578        self.output_texture_updated = true;
579    }
580
581    // pub(crate) fn get_render_texture(&self) -> &wgpu::Texture {
582    //     &self.render_texture
583    // }
584
585    pub fn get_rendered_texture_data(&mut self, ctx: &WgpuContext) -> &[u8] {
586        if !self.output_texture_updated {
587            // trace!("[Camera] Updating rendered texture data...");
588            self.update_rendered_texture_data(ctx);
589        }
590        self.output_texture_data.as_ref().unwrap()
591    }
592    pub fn get_rendered_texture_img_buffer(
593        &mut self,
594        ctx: &WgpuContext,
595    ) -> ImageBuffer<Rgba<u8>, &[u8]> {
596        let size = self.size;
597        let data = self.get_rendered_texture_data(ctx);
598        ImageBuffer::from_raw(size.0 as u32, size.1 as u32, data).unwrap()
599    }
600}
601
602// MARK: RenderTextures
603/// Texture resources used for rendering
604#[allow(unused)]
605pub struct RenderTextures {
606    pub render_texture: wgpu::Texture,
607    // multisample_texture: wgpu::Texture,
608    // depth_stencil_texture: wgpu::Texture,
609    pub render_view: wgpu::TextureView,
610    pub linear_render_view: wgpu::TextureView,
611    // pub(crate) multisample_view: wgpu::TextureView,
612    // pub(crate) depth_stencil_view: wgpu::TextureView,
613}
614
615impl RenderTextures {
616    pub(crate) fn new(ctx: &WgpuContext, width: usize, height: usize) -> Self {
617        let format = OUTPUT_TEXTURE_FORMAT;
618        let render_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
619            label: Some("Target Texture"),
620            size: wgpu::Extent3d {
621                width: width as u32,
622                height: height as u32,
623                depth_or_array_layers: 1,
624            },
625            mip_level_count: 1,
626            sample_count: 1,
627            dimension: wgpu::TextureDimension::D2,
628            format,
629            usage: wgpu::TextureUsages::RENDER_ATTACHMENT
630                | wgpu::TextureUsages::COPY_SRC
631                | wgpu::TextureUsages::COPY_DST
632                | wgpu::TextureUsages::TEXTURE_BINDING,
633            view_formats: &[
634                wgpu::TextureFormat::Rgba8UnormSrgb,
635                wgpu::TextureFormat::Rgba8Unorm,
636            ],
637        });
638        // let multisample_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
639        //     label: Some("Multisample Texture"),
640        //     size: wgpu::Extent3d {
641        //         width: width as u32,
642        //         height: height as u32,
643        //         depth_or_array_layers: 1,
644        //     },
645        //     mip_level_count: 1,
646        //     sample_count: 4,
647        //     dimension: wgpu::TextureDimension::D2,
648        //     format,
649        //     usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
650        //     view_formats: &[
651        //         wgpu::TextureFormat::Rgba8UnormSrgb,
652        //         wgpu::TextureFormat::Rgba8Unorm,
653        //     ],
654        // });
655        // let depth_stencil_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
656        //     label: Some("Depth Stencil Texture"),
657        //     size: wgpu::Extent3d {
658        //         width: width as u32,
659        //         height: height as u32,
660        //         depth_or_array_layers: 1,
661        //     },
662        //     mip_level_count: 1,
663        //     sample_count: 1,
664        //     dimension: wgpu::TextureDimension::D2,
665        //     format: wgpu::TextureFormat::Depth24PlusStencil8,
666        //     usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
667        //     view_formats: &[],
668        // });
669        let render_view = render_texture.create_view(&wgpu::TextureViewDescriptor {
670            format: Some(format),
671            ..Default::default()
672        });
673        let linear_render_view = render_texture.create_view(&wgpu::TextureViewDescriptor {
674            format: Some(wgpu::TextureFormat::Rgba8Unorm),
675            ..Default::default()
676        });
677        // let multisample_view = multisample_texture.create_view(&wgpu::TextureViewDescriptor {
678        //     format: Some(format),
679        //     ..Default::default()
680        // });
681        // let depth_stencil_view =
682        //     depth_stencil_texture.create_view(&wgpu::TextureViewDescriptor::default());
683
684        Self {
685            render_texture,
686            // multisample_texture,
687            // depth_stencil_texture,
688            render_view,
689            linear_render_view,
690            // multisample_view,
691            // depth_stencil_view,
692        }
693    }
694}
695
696/// A render resource.
697pub(crate) trait RenderResource {
698    fn new(ctx: &WgpuContext) -> Self
699    where
700        Self: Sized;
701}