ranim_render/
utils.rs

1use std::{
2    any::{Any, TypeId},
3    collections::HashMap,
4    fmt::Debug,
5    marker::PhantomData,
6};
7
8use tracing::info;
9use wgpu::util::DeviceExt;
10
11use crate::RenderResource;
12
13/// Wgpu context
14pub struct WgpuContext {
15    /// The wgpu instance   
16    pub instance: wgpu::Instance,
17    /// The wgpu adapter
18    pub adapter: wgpu::Adapter,
19    /// The wgpu device
20    pub device: wgpu::Device,
21    /// The wgpu queue
22    pub queue: wgpu::Queue,
23}
24
25impl WgpuContext {
26    /// Create a new wgpu context
27    pub async fn new() -> Self {
28        let instance = wgpu::Instance::default();
29        let adapter = instance
30            .request_adapter(&wgpu::RequestAdapterOptions {
31                power_preference: wgpu::PowerPreference::HighPerformance,
32                ..Default::default()
33            })
34            .await
35            .unwrap();
36        info!("wgpu adapter info: {:?}", adapter.get_info());
37
38        #[cfg(feature = "profiling")]
39        let (device, queue) = adapter
40            .request_device(&wgpu::DeviceDescriptor {
41                label: None,
42                required_features: adapter.features()
43                    & wgpu_profiler::GpuProfiler::ALL_WGPU_TIMER_FEATURES,
44                required_limits: wgpu::Limits::default(),
45                memory_hints: wgpu::MemoryHints::default(),
46                trace: wgpu::Trace::Off,
47                experimental_features: wgpu::ExperimentalFeatures::disabled(),
48            })
49            .await
50            .unwrap();
51        #[cfg(not(feature = "profiling"))]
52        let (device, queue) = adapter
53            .request_device(&wgpu::DeviceDescriptor::default())
54            .await
55            .unwrap();
56
57        Self {
58            instance,
59            adapter,
60            device,
61            queue,
62        }
63    }
64}
65
66#[allow(unused)]
67pub(crate) struct WgpuBuffer<T: bytemuck::Pod + bytemuck::Zeroable + Debug> {
68    label: Option<&'static str>,
69    buffer: wgpu::Buffer,
70    usage: wgpu::BufferUsages,
71    inner: T,
72}
73
74impl<T: bytemuck::Pod + bytemuck::Zeroable + Debug> AsRef<wgpu::Buffer> for WgpuBuffer<T> {
75    fn as_ref(&self) -> &wgpu::Buffer {
76        &self.buffer
77    }
78}
79
80#[allow(unused)]
81impl<T: bytemuck::Pod + bytemuck::Zeroable + Debug> WgpuBuffer<T> {
82    pub(crate) fn new_init(
83        ctx: &WgpuContext,
84        label: Option<&'static str>,
85        usage: wgpu::BufferUsages,
86        data: T,
87    ) -> Self {
88        assert!(
89            usage.contains(wgpu::BufferUsages::COPY_DST),
90            "Buffer {label:?} does not contains COPY_DST"
91        );
92        // trace!("[WgpuBuffer]: new_init, {} {:?}", data.len(), usage);
93        Self {
94            label,
95            buffer: ctx
96                .device
97                .create_buffer_init(&wgpu::util::BufferInitDescriptor {
98                    label,
99                    contents: bytemuck::bytes_of(&data),
100                    usage,
101                }),
102            usage,
103            inner: data,
104        }
105    }
106
107    pub(crate) fn get(&self) -> &T {
108        &self.inner
109    }
110
111    pub(crate) fn set(&mut self, ctx: &WgpuContext, data: T) {
112        {
113            let mut view = ctx
114                .queue
115                .write_buffer_with(
116                    &self.buffer,
117                    0,
118                    wgpu::BufferSize::new(std::mem::size_of_val(&data) as u64).unwrap(),
119                )
120                .unwrap();
121            view.copy_from_slice(bytemuck::bytes_of(&data));
122        }
123        // ctx.queue.submit([]);
124        self.inner = data;
125    }
126
127    #[allow(unused)]
128    pub(crate) fn read_buffer(&self, ctx: &WgpuContext) -> Vec<u8> {
129        let size = std::mem::size_of::<T>();
130        let staging_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
131            label: Some("Debug Staging Buffer"),
132            size: size as u64,
133            usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
134            mapped_at_creation: false,
135        });
136
137        let mut encoder = ctx
138            .device
139            .create_command_encoder(&wgpu::CommandEncoderDescriptor {
140                label: Some("Debug Read Encoder"),
141            });
142
143        encoder.copy_buffer_to_buffer(&self.buffer, 0, &staging_buffer, 0, size as u64);
144        ctx.queue.submit(Some(encoder.finish()));
145
146        let buffer_slice = staging_buffer.slice(..);
147        let (tx, rx) = async_channel::bounded(1);
148        buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
149            pollster::block_on(tx.send(result)).unwrap()
150        });
151        ctx.device
152            .poll(wgpu::PollType::wait_indefinitely())
153            .unwrap();
154        pollster::block_on(rx.recv()).unwrap().unwrap();
155
156        buffer_slice.get_mapped_range().to_vec()
157    }
158}
159
160pub(crate) struct WgpuVecBuffer<T: Default + bytemuck::Pod + bytemuck::Zeroable + Debug> {
161    label: Option<&'static str>,
162    pub(crate) buffer: Option<wgpu::Buffer>,
163    usage: wgpu::BufferUsages,
164    /// Keep match to the buffer size
165    len: usize,
166    _phantom: PhantomData<T>,
167    // inner: Vec<T>,
168}
169
170impl<T: Default + bytemuck::Pod + bytemuck::Zeroable + Debug> WgpuVecBuffer<T> {
171    pub(crate) fn new(label: Option<&'static str>, usage: wgpu::BufferUsages) -> Self {
172        assert!(
173            usage.contains(wgpu::BufferUsages::COPY_DST),
174            "Buffer {label:?} does not contains COPY_DST"
175        );
176        Self {
177            label,
178            buffer: None,
179            usage,
180            len: 0,
181            _phantom: PhantomData,
182            // inner: vec![],
183        }
184    }
185
186    pub(crate) fn new_init(
187        ctx: &WgpuContext,
188        label: Option<&'static str>,
189        usage: wgpu::BufferUsages,
190        data: &[T],
191    ) -> Self {
192        let mut buffer = Self::new(label, usage);
193        buffer.set(ctx, data);
194        buffer
195    }
196
197    pub(crate) fn len(&self) -> usize {
198        self.len
199    }
200    // pub(crate) fn get(&self) -> &[T] {
201    //     self.inner.as_ref()
202    // }
203
204    pub(crate) fn resize(&mut self, ctx: &WgpuContext, len: usize) -> bool {
205        let size = (std::mem::size_of::<T>() * len) as u64;
206        let realloc = self
207            .buffer
208            .as_ref()
209            .map(|b| b.size() != size)
210            .unwrap_or(true);
211        if realloc {
212            self.len = len;
213            // self.inner.resize(len, T::default());
214            self.buffer = Some(ctx.device.create_buffer(&wgpu::BufferDescriptor {
215                label: self.label,
216                size,
217                usage: self.usage,
218                mapped_at_creation: false,
219            }))
220        }
221        realloc
222    }
223
224    pub(crate) fn set(&mut self, ctx: &WgpuContext, data: &[T]) -> bool {
225        // trace!("{} {}", self.inner.len(), data.len());
226        // self.inner.resize(data.len(), T::default());
227        // self.inner.copy_from_slice(data);
228        self.len = data.len();
229        let realloc = self
230            .buffer
231            .as_ref()
232            .map(|b| b.size() != (std::mem::size_of_val(data)) as u64)
233            .unwrap_or(true);
234
235        if realloc {
236            // info!("realloc");
237            // NOTE: create_buffer_init sometimes causes freezing in wasm
238            let buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
239                label: self.label,
240                size: (std::mem::size_of_val(data)) as u64,
241                usage: self.usage,
242                mapped_at_creation: false,
243            });
244            ctx.queue
245                .write_buffer(&buffer, 0, bytemuck::cast_slice(data));
246            // info!("new");
247            self.buffer = Some(buffer);
248        } else {
249            // info!("queue copy");
250            {
251                let mut view = ctx
252                    .queue
253                    .write_buffer_with(
254                        self.buffer.as_ref().unwrap(),
255                        0,
256                        wgpu::BufferSize::new((std::mem::size_of_val(data)) as u64).unwrap(),
257                    )
258                    .unwrap();
259                view.copy_from_slice(bytemuck::cast_slice(data));
260            }
261            // ctx.queue.submit([]);
262        }
263        // info!("done");
264        realloc
265    }
266
267    #[allow(unused)]
268    pub(crate) fn read_buffer(&self, ctx: &WgpuContext) -> Option<Vec<u8>> {
269        let buffer = self.buffer.as_ref()?;
270        let size = std::mem::size_of::<T>() * self.len;
271        let staging_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
272            label: Some("Debug Staging Buffer"),
273            size: size as u64,
274            usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
275            mapped_at_creation: false,
276        });
277
278        let mut encoder = ctx
279            .device
280            .create_command_encoder(&wgpu::CommandEncoderDescriptor {
281                label: Some("Debug Read Encoder"),
282            });
283
284        encoder.copy_buffer_to_buffer(buffer, 0, &staging_buffer, 0, size as u64);
285        ctx.queue.submit(Some(encoder.finish()));
286
287        let buffer_slice = staging_buffer.slice(..);
288        let (tx, rx) = async_channel::bounded(1);
289        buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
290            pollster::block_on(tx.send(result)).unwrap()
291        });
292        ctx.device
293            .poll(wgpu::PollType::wait_indefinitely())
294            .unwrap();
295        pollster::block_on(rx.recv()).unwrap().unwrap();
296
297        let x = buffer_slice.get_mapped_range().to_vec();
298        Some(x)
299    }
300}
301
302/// A storage for pipelines
303#[derive(Default)]
304pub struct PipelinesStorage {
305    inner: HashMap<TypeId, Box<dyn Any + Send + Sync>>,
306}
307
308impl PipelinesStorage {
309    pub(crate) fn get_or_init<P: RenderResource + Send + Sync + 'static>(
310        &mut self,
311        ctx: &WgpuContext,
312    ) -> &P {
313        let id = std::any::TypeId::of::<P>();
314        self.inner
315            .entry(id)
316            .or_insert_with(|| {
317                let pipeline = P::new(ctx);
318                Box::new(pipeline)
319            })
320            .downcast_ref::<P>()
321            .unwrap()
322    }
323    // pub(crate) fn get_or_init_mut<P: RenderResource + 'static>(
324    //     &mut self,
325    //     ctx: &WgpuContext,
326    // ) -> &mut P {
327    //     let id = std::any::TypeId::of::<P>();
328    //     self.inner
329    //         .entry(id)
330    //         .or_insert_with(|| {
331    //             let pipeline = P::new(ctx);
332    //             Box::new(pipeline)
333    //         })
334    //         .downcast_mut::<P>()
335    //         .unwrap()
336    // }
337}
338
339// Should not be called frequently
340/// Get texture data from a wgpu texture
341#[allow(unused)]
342pub(crate) fn get_texture_data(ctx: &WgpuContext, texture: &::wgpu::Texture) -> Vec<u8> {
343    const ALIGNMENT: usize = 256;
344    use ::wgpu;
345    let bytes_per_row =
346        ((texture.size().width * 4) as f32 / ALIGNMENT as f32).ceil() as usize * ALIGNMENT;
347    let mut texture_data = vec![0u8; bytes_per_row * texture.size().height as usize];
348
349    let output_staging_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
350        label: Some("Output Staging Buffer"),
351        size: (bytes_per_row * texture.size().height as usize) as u64,
352        usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
353        mapped_at_creation: false,
354    });
355
356    let mut encoder = ctx
357        .device
358        .create_command_encoder(&wgpu::CommandEncoderDescriptor {
359            label: Some("Get Texture Data"),
360        });
361    encoder.copy_texture_to_buffer(
362        wgpu::TexelCopyTextureInfo {
363            aspect: wgpu::TextureAspect::All,
364            texture,
365            mip_level: 0,
366            origin: wgpu::Origin3d::ZERO,
367        },
368        wgpu::TexelCopyBufferInfo {
369            buffer: &output_staging_buffer,
370            layout: wgpu::TexelCopyBufferLayout {
371                offset: 0,
372                bytes_per_row: Some(bytes_per_row as u32),
373                rows_per_image: Some(texture.size().height),
374            },
375        },
376        texture.size(),
377    );
378    ctx.queue.submit(Some(encoder.finish()));
379    pollster::block_on(async {
380        let buffer_slice = output_staging_buffer.slice(..);
381
382        // NOTE: We have to create the mapping THEN device.poll() before await
383        // the future. Otherwise the application will freeze.
384        let (tx, rx) = async_channel::bounded(1);
385        buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
386            pollster::block_on(tx.send(result)).unwrap()
387        });
388        ctx.device
389            .poll(wgpu::PollType::wait_indefinitely())
390            .unwrap();
391        rx.recv().await.unwrap().unwrap();
392
393        {
394            let view = buffer_slice.get_mapped_range();
395            // texture_data.copy_from_slice(&view);
396            for y in 0..texture.size().height as usize {
397                let src_row_start = y * bytes_per_row;
398                let dst_row_start = y * texture.size().width as usize * 4;
399
400                texture_data[dst_row_start..dst_row_start + texture.size().width as usize * 4]
401                    .copy_from_slice(
402                        &view[src_row_start..src_row_start + texture.size().width as usize * 4],
403                    );
404            }
405        }
406    });
407    output_staging_buffer.unmap();
408    texture_data
409}
410
411#[cfg(test)]
412mod test {
413    #[test]
414    fn test() {
415        // let x = vec![0, 1, 2, 3];
416        // assert_eq!(
417        //     bytemuck::bytes_of(&[x.as_slice()]),
418        //     bytemuck::bytes_of(&x)
419        // )
420    }
421}