limnus_wgpu_window/
lib.rs

1/*
2 * Copyright (c) Peter Bjorklund. All rights reserved. https://github.com/swamp/limnus
3 * Licensed under the MIT License. See LICENSE in the project root for license information.
4 */
5use limnus_app::prelude::{App, Plugin};
6use limnus_default_stages::RenderFirst;
7use limnus_local_resource::prelude::LocalResource;
8use limnus_screen::WindowMessage;
9use limnus_system_params::{LoReM, Msg};
10use std::default::Default;
11use std::sync::Arc;
12use tracing::{debug, info, trace, warn};
13use wgpu::{
14    Adapter, BackendOptions, Backends, Device, DeviceDescriptor, Features, Instance,
15    InstanceDescriptor, InstanceFlags, Limits, MemoryBudgetThresholds, MemoryHints,
16    PowerPreference, Queue, RequestAdapterOptions, Surface, SurfaceConfiguration, SurfaceError,
17    Trace,
18};
19use winit::dpi::PhysicalSize;
20use winit::window::Window;
21
22#[derive(Debug, LocalResource)]
23pub struct WgpuWindow {
24    surface: Arc<Surface<'static>>,
25    device: Arc<Device>,
26    queue: Arc<Queue>,
27
28    config: SurfaceConfiguration,
29}
30
31impl WgpuWindow {
32    #[must_use]
33    pub const fn queue(&self) -> &Arc<Queue> {
34        &self.queue
35    }
36}
37
38pub struct ReceiveAnnoyingAsync {
39    pub device_info: Option<BasicDeviceInfo>,
40}
41
42#[derive(Debug, LocalResource)]
43pub struct BasicDeviceInfo {
44    pub adapter: Adapter,
45    pub device: Arc<Device>,
46    pub queue: Arc<Queue>,
47    pub surface: Arc<Surface<'static>>,
48    pub physical_surface_size: PhysicalSize<u32>,
49}
50
51async fn try_pref(
52    instance: &Instance,
53    surface: Option<&Surface<'_>>,
54    pref: PowerPreference,
55) -> Option<wgpu::Adapter> {
56    debug!(?pref, "trying to find adapter with power preference");
57    let res = instance
58        .request_adapter(&RequestAdapterOptions {
59            power_preference: pref,
60            compatible_surface: surface,
61            force_fallback_adapter: false,
62        })
63        .await;
64
65    match res {
66        Ok(adapter) => Some(adapter),
67        Err(err) => {
68            warn!("request_adapter({pref:?}) failed: {err}");
69            None
70        }
71    }
72}
73
74/// # Errors
75///
76pub async fn pick_best_adapter(
77    instance: &Instance,
78    surface: Option<&Surface<'_>>,
79) -> Result<Adapter, InitError> {
80    for &pref in &[
81        PowerPreference::HighPerformance,
82        PowerPreference::LowPower,
83        PowerPreference::None,
84    ] {
85        if let Some(a) = try_pref(instance, surface, pref).await {
86            return Ok(a);
87        }
88    }
89    Err(InitError::NoAdapter)
90}
91
92#[derive(Debug)]
93pub enum WgpuInitError {
94    NoAdapter,
95    NoDevice,
96}
97
98#[derive(Debug, Clone, Copy)]
99pub enum LimitsTier {
100    ModernDefault,
101    Downlevel,
102    WebGL2,
103}
104
105#[derive(Debug)]
106pub struct DeviceSelection {
107    pub device: Device,
108    pub queue: Queue,
109    pub tier: LimitsTier,
110    pub applied_limits: Limits,
111}
112
113async fn try_with(adapter: &Adapter, base: Limits, tier: LimitsTier) -> Option<DeviceSelection> {
114    // Use .using_resolution(adapter.limits()) before request_device to clamp your
115    // desired baseline limits to what the GPU actually supports. This avoids
116    // “requested limits exceed adapter capabilities” errors on older or downlevel GPUs.
117    let applied = base.using_resolution(adapter.limits());
118
119    info!(?tier, "trying to find device with tier");
120
121    adapter
122        .request_device(&DeviceDescriptor {
123            label: Some("wgpu device"),
124            required_features: Features::empty(), // require nothing
125            required_limits: applied.clone(),
126            memory_hints: MemoryHints::default(),
127            trace: Trace::default(),
128        })
129        .await
130        .ok()
131        .map(|(device, queue)| DeviceSelection {
132            device,
133            queue,
134            tier,
135            applied_limits: applied,
136        })
137}
138
139/*
140    let device_descriptor = DeviceDescriptor {
141        label: None,
142        required_features: Features::empty(), // Specify features as needed
143        required_limits: if cfg!(target_arch = "wasm32") {
144            Limits::downlevel_webgl2_defaults() // TODO: Not sure if this is needed?
145        } else {
146            Limits::default()
147        },
148        memory_hints: MemoryHints::default(), // Use default memory hints
149        trace: Trace::default(),
150    };
151
152
153    debug!(?device_descriptor, "device descriptor");
154
155    let (device, queue) = adapter
156        .request_device(&device_descriptor)
157        .await
158        .expect("Failed to request device");
159    debug!(?device, "device");
160*/
161
162/// # Errors
163///
164pub async fn request_device_smart(adapter: &Adapter) -> Result<DeviceSelection, InitError> {
165    if let Some(sel) = try_with(adapter, Limits::default(), LimitsTier::ModernDefault).await {
166        return Ok(sel);
167    }
168    if let Some(sel) = try_with(adapter, Limits::downlevel_defaults(), LimitsTier::Downlevel).await
169    {
170        return Ok(sel);
171    }
172    if let Some(sel) = try_with(
173        adapter,
174        Limits::downlevel_webgl2_defaults(),
175        LimitsTier::WebGL2,
176    )
177    .await
178    {
179        return Ok(sel);
180    }
181
182    Err(InitError::NoDevice)
183}
184
185#[derive(Debug)]
186pub enum InitError {
187    CreateSurface(wgpu::CreateSurfaceError),
188    NoAdapter,
189    NoDevice,
190    RequestDevice(wgpu::RequestDeviceError),
191}
192
193/// # Errors
194///
195/// # Panics
196///
197pub async fn annoying_async_device_creation(
198    window: Arc<Window>,
199) -> Result<BasicDeviceInfo, InitError> {
200    let instance = Instance::new(&InstanceDescriptor {
201        backends: Backends::default(),
202        flags: InstanceFlags::default(),
203        memory_budget_thresholds: MemoryBudgetThresholds::default(),
204        backend_options: BackendOptions::default(),
205    });
206    debug!(?instance, "found instance");
207
208    let surface = instance
209        .create_surface(Arc::clone(&window))
210        .map_err(InitError::CreateSurface)?;
211    debug!(?surface, "found surface");
212
213    let adapter = pick_best_adapter(&instance, Some(&surface)).await?;
214    debug!(?adapter, "found adapter");
215
216    let device_selection = request_device_smart(&adapter).await?;
217    debug!(?device_selection, "found device selection");
218
219    let inner_size = window.inner_size();
220    debug!(?inner_size, "window size detected");
221
222    let device_info = BasicDeviceInfo {
223        adapter,
224        device: device_selection.device.into(),
225        queue: device_selection.queue.into(),
226        surface: surface.into(),
227        physical_surface_size: inner_size,
228    };
229
230    debug!(?device_info, "final device info selection");
231
232    Ok(device_info)
233}
234
235fn tick(mut wgpu_window: LoReM<WgpuWindow>, window_messages: Msg<WindowMessage>) {
236    for msg in window_messages.iter_previous() {
237        if let WindowMessage::Resized(size) = msg {
238            debug!("resized to {:?}", size);
239            wgpu_window.resize((size.x, size.y));
240        }
241    }
242}
243
244pub struct WgpuWindowPlugin;
245impl Plugin for WgpuWindowPlugin {
246    fn build(&self, _app: &mut App) {}
247
248    fn post_initialization(&self, app: &mut App) {
249        app.insert_local_resource(WgpuWindow::new(
250            app.local_resources().fetch::<BasicDeviceInfo>(),
251        ));
252        app.add_system(RenderFirst, tick);
253        info!("wgpu window plugin is done");
254    }
255}
256
257impl WgpuWindow {
258    #[must_use]
259    pub fn new(info: &BasicDeviceInfo) -> Self {
260        let config = Self::configure_render_surface(info);
261
262        Self {
263            device: Arc::clone(&info.device),
264            config,
265            queue: Arc::clone(&info.queue),
266            surface: Arc::clone(&info.surface),
267        }
268    }
269
270    #[must_use]
271    pub const fn device(&self) -> &Arc<Device> {
272        &self.device
273    }
274
275    fn configure_render_surface(info: &BasicDeviceInfo) -> SurfaceConfiguration {
276        let surface_caps = info.surface.get_capabilities(&info.adapter);
277        let surface_format = surface_caps
278            .formats
279            .iter()
280            .copied()
281            .find(wgpu::TextureFormat::is_srgb)
282            .unwrap_or(surface_caps.formats[0]);
283
284        let present_mode = wgpu::PresentMode::Fifo; // Fifo should be available on all devices
285        let config = wgpu::SurfaceConfiguration {
286            usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
287            format: surface_format,
288            width: info.physical_surface_size.width,
289            height: info.physical_surface_size.height,
290            present_mode,
291            alpha_mode: surface_caps.alpha_modes[0],
292            desired_maximum_frame_latency: 2,
293            view_formats: vec![],
294        };
295
296        info.surface.configure(&info.device, &config);
297
298        let alpha_mode = surface_caps.alpha_modes[0];
299        trace!(
300            "found surface format {:?} {:?} {:?}",
301            surface_format, present_mode, alpha_mode
302        );
303
304        config
305    }
306
307    #[must_use]
308    pub const fn texture_format(&self) -> wgpu::TextureFormat {
309        self.config.format
310    }
311
312    pub fn configure_surface(&mut self) {
313        debug!("configured surface! {:?}", self.config);
314        self.surface.configure(&self.device, &self.config);
315    }
316
317    pub fn resize(&mut self, new_size: (u16, u16)) {
318        let width = new_size.0 as usize;
319        let height = new_size.1 as usize;
320
321        if width == 0 || height == 0 {
322            return;
323        }
324
325        self.config.width = width as u32;
326        self.config.height = height as u32;
327        self.configure_surface();
328    }
329
330    pub fn render(
331        &self,
332        mut render_fn: impl FnMut(&mut wgpu::CommandEncoder, &wgpu::TextureView),
333    ) -> Result<(), SurfaceError> {
334        let surface_texture = match self.surface.get_current_texture() {
335            Ok(frame) => frame,
336            Err(wgpu::SurfaceError::Outdated) => {
337                // Surface is out of date: reconfigure and bail out of this frame.
338                // TODO: Get window size somehow
339                //                self.surface.configure(&self.device, &self.config);
340                return Ok(()); // hopefully next frame will work
341            }
342            Err(wgpu::SurfaceError::Lost) => {
343                // Lost is treated the same as Outdated in wgpu 0.13+
344                return Ok(()); // hopefully next frame will work
345            }
346            Err(wgpu::SurfaceError::OutOfMemory) => {
347                // fatal – bail out of the app
348                eprintln!("GPU out of memory, exiting");
349                std::process::exit(1);
350            }
351            Err(e) => {
352                // other errors (Timeout, etc): log and skip this frame
353                eprintln!("wgpu error getting next swap chain texture: {e:?}");
354                return Err(SurfaceError::Other);
355            }
356        };
357        let texture_view = surface_texture
358            .texture
359            .create_view(&wgpu::TextureViewDescriptor::default());
360
361        let mut encoder = self
362            .device
363            .create_command_encoder(&wgpu::CommandEncoderDescriptor {
364                label: Some("Render Encoder"),
365            });
366
367        render_fn(&mut encoder, &texture_view);
368
369        self.queue.submit(std::iter::once(encoder.finish()));
370
371        surface_texture.present();
372
373        Ok(())
374    }
375}