daedalus_gpu/
lib.rs

1//! GPU facade: backend selection, capability traits, opaque handles, and feature-gated backends.
2//! Feature matrix:
3//! - `gpu-noop` (default): deterministic noop backend, always available; zero-GPU environments still compile.
4//! - `gpu-mock`: deterministic mock backend for tests/CI.
5//! - `gpu-wgpu`: real wgpu backend (types remain internal), placeholder for now.
6//!   Concurrency: backends are `Send + Sync`; clone the handle (cheap `Arc`) to share across tasks.
7//!   Planner/runtime expectation: call `select_backend` once, inspect skipped reasons (for “why not GPU?”),
8//!   then use the returned handle to allocate buffers/images without depending on any concrete GPU type.
9
10#[cfg(feature = "gpu-async")]
11mod async_api;
12mod buffer;
13mod convert;
14mod handles;
15#[cfg(feature = "gpu-mock")]
16mod mock;
17mod noop;
18#[cfg(feature = "gpu-wgpu")]
19pub mod shader;
20mod traits;
21#[cfg(feature = "gpu-wgpu")]
22mod wgpu_backend;
23
24#[cfg(feature = "gpu-async")]
25pub use async_api::GpuAsyncBackend;
26pub use buffer::{BufferPool, SimpleBufferPool, TransferStats};
27pub use convert::{ErasedPayload, GpuSendable, Payload};
28pub use handles::{GpuBufferHandle, GpuBufferId, GpuImageHandle, GpuImageId};
29#[cfg(feature = "gpu-mock")]
30pub use mock::MockBackend;
31pub use noop::NoopBackend;
32pub use traits::{GpuBackend, GpuContext};
33#[cfg(feature = "gpu-wgpu")]
34pub use wgpu_backend::WgpuBackend;
35
36use bitflags::bitflags;
37use serde::{Deserialize, Serialize};
38use std::{fmt, sync::Arc};
39
40/// Backend kind identifiers.
41///
42/// ```
43/// use daedalus_gpu::GpuBackendKind;
44/// let kind = GpuBackendKind::Noop;
45/// assert_eq!(kind.as_str(), "noop");
46/// ```
47#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
48pub enum GpuBackendKind {
49    Noop,
50    Mock,
51    Wgpu,
52}
53
54impl GpuBackendKind {
55    /// Return a stable string representation.
56    pub fn as_str(self) -> &'static str {
57        match self {
58            GpuBackendKind::Noop => "noop",
59            GpuBackendKind::Mock => "mock",
60            GpuBackendKind::Wgpu => "wgpu",
61        }
62    }
63}
64
65/// Memory location hint for GPU resources.
66///
67/// ```
68/// use daedalus_gpu::GpuMemoryLocation;
69/// let loc = GpuMemoryLocation::Gpu;
70/// assert_eq!(loc, GpuMemoryLocation::Gpu);
71/// ```
72#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
73pub enum GpuMemoryLocation {
74    Cpu,
75    Gpu,
76    Shared,
77}
78
79/// Common GPU formats (minimal set for planner decisions).
80///
81/// ```
82/// use daedalus_gpu::GpuFormat;
83/// let fmt = GpuFormat::Rgba8Unorm;
84/// assert_eq!(fmt, GpuFormat::Rgba8Unorm);
85/// ```
86#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
87pub enum GpuFormat {
88    R8Unorm,
89    Rgba8Unorm,
90    Rgba16Float,
91    Depth24Stencil8,
92}
93
94/// Per-format feature flags for planner/runtime decisions.
95///
96/// ```
97/// use daedalus_gpu::{GpuFormat, GpuFormatFeatures};
98/// let feats = GpuFormatFeatures {
99///     format: GpuFormat::R8Unorm,
100///     sampleable: true,
101///     renderable: false,
102///     storage: true,
103///     max_samples: 1,
104/// };
105/// assert!(feats.sampleable);
106/// ```
107#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
108pub struct GpuFormatFeatures {
109    pub format: GpuFormat,
110    pub sampleable: bool,
111    pub renderable: bool,
112    pub storage: bool,
113    pub max_samples: u32,
114}
115
116/// Block/stride info for formats (useful if compressed formats are added later).
117///
118/// ```
119/// use daedalus_gpu::{GpuBlockInfo, GpuFormat};
120/// let info = GpuBlockInfo { format: GpuFormat::R8Unorm, block_width: 1, block_height: 1, bytes_per_block: 1 };
121/// assert_eq!(info.bytes_per_block, 1);
122/// ```
123#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
124pub struct GpuBlockInfo {
125    pub format: GpuFormat,
126    pub block_width: u32,
127    pub block_height: u32,
128    pub bytes_per_block: u32,
129}
130
131bitflags! {
132    /// Usage flags for buffers/images; combinations are allowed.
133    ///
134    /// ```
135    /// use daedalus_gpu::GpuUsage;
136    /// let usage = GpuUsage::UPLOAD | GpuUsage::STORAGE;
137    /// assert!(usage.contains(GpuUsage::UPLOAD));
138    /// ```
139    #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
140    pub struct GpuUsage: u32 {
141        const UPLOAD = 0b0001;
142        const DOWNLOAD = 0b0010;
143        const STORAGE = 0b0100;
144        const RENDER_TARGET = 0b1000;
145    }
146}
147
148/// Adapter information exposed to planner/runtime.
149///
150/// ```
151/// use daedalus_gpu::{GpuAdapterInfo, GpuBackendKind};
152/// let info = GpuAdapterInfo {
153///     name: "noop".into(),
154///     backend: GpuBackendKind::Noop,
155///     device_id: None,
156///     vendor_id: None,
157/// };
158/// assert_eq!(info.backend, GpuBackendKind::Noop);
159/// ```
160#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
161pub struct GpuAdapterInfo {
162    pub name: String,
163    pub backend: GpuBackendKind,
164    pub device_id: Option<String>,
165    pub vendor_id: Option<String>,
166}
167
168/// Adapter selection options.
169///
170/// ```
171/// use daedalus_gpu::{GpuOptions, GpuBackendKind};
172/// let opts = GpuOptions { preferred_backend: Some(GpuBackendKind::Noop), adapter_label: None, allow_software: true };
173/// assert!(opts.allow_software);
174/// ```
175#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
176pub struct GpuOptions {
177    pub preferred_backend: Option<GpuBackendKind>,
178    pub adapter_label: Option<String>,
179    pub allow_software: bool,
180}
181
182/// Request shape for resource creation.
183///
184/// ```
185/// use daedalus_gpu::{GpuRequest, GpuUsage};
186/// let req = GpuRequest { usage: GpuUsage::UPLOAD, format: None, size_bytes: 1024 };
187/// assert_eq!(req.size_bytes, 1024);
188/// ```
189#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
190pub struct GpuRequest {
191    pub usage: GpuUsage,
192    pub format: Option<GpuFormat>,
193    pub size_bytes: u64,
194}
195
196/// Request shape for image/texture creation.
197///
198/// ```
199/// use daedalus_gpu::{GpuImageRequest, GpuFormat, GpuUsage};
200/// let req = GpuImageRequest { format: GpuFormat::Rgba8Unorm, width: 16, height: 16, samples: 1, usage: GpuUsage::STORAGE };
201/// assert_eq!(req.width, 16);
202/// ```
203#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
204pub struct GpuImageRequest {
205    pub format: GpuFormat,
206    pub width: u32,
207    pub height: u32,
208    pub samples: u32,
209    pub usage: GpuUsage,
210}
211
212/// Capability query result.
213///
214/// ```
215/// use daedalus_gpu::{GpuCapabilities, GpuFormat, GpuFormatFeatures, GpuBlockInfo};
216/// let caps = GpuCapabilities {
217///     supported_formats: vec![GpuFormat::R8Unorm],
218///     format_features: vec![GpuFormatFeatures { format: GpuFormat::R8Unorm, sampleable: true, renderable: false, storage: true, max_samples: 1 }],
219///     format_blocks: vec![GpuBlockInfo { format: GpuFormat::R8Unorm, block_width: 1, block_height: 1, bytes_per_block: 1 }],
220///     max_buffer_size: 1,
221///     max_texture_dimension: 1,
222///     max_texture_samples: 1,
223///     staging_alignment: 1,
224///     max_inflight_copies: 1,
225///     queue_count: 1,
226///     min_buffer_copy_offset_alignment: 1,
227///     bytes_per_row_alignment: 1,
228///     rows_per_image_alignment: 1,
229///     has_transfer_queue: false,
230/// };
231/// assert_eq!(caps.supported_formats.len(), 1);
232/// ```
233#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
234pub struct GpuCapabilities {
235    pub supported_formats: Vec<GpuFormat>,
236    pub format_features: Vec<GpuFormatFeatures>,
237    pub format_blocks: Vec<GpuBlockInfo>,
238    pub max_buffer_size: u64,
239    pub max_texture_dimension: u32,
240    pub max_texture_samples: u32,
241    pub staging_alignment: u64,
242    pub max_inflight_copies: u32,
243    pub queue_count: u32,
244    pub min_buffer_copy_offset_alignment: u64,
245    pub bytes_per_row_alignment: u32,
246    pub rows_per_image_alignment: u32,
247    pub has_transfer_queue: bool,
248}
249
250/// GPU error codes for diagnostics.
251///
252/// ```
253/// use daedalus_gpu::GpuError;
254/// let err = GpuError::Unsupported;
255/// assert_eq!(format!("{err}"), "unsupported");
256/// ```
257#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
258pub enum GpuError {
259    Unsupported,
260    AllocationFailed,
261    AdapterUnavailable,
262    Internal(String),
263}
264
265impl fmt::Display for GpuError {
266    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
267        match self {
268            GpuError::Unsupported => write!(f, "unsupported"),
269            GpuError::AllocationFailed => write!(f, "allocation failed"),
270            GpuError::AdapterUnavailable => write!(f, "adapter unavailable"),
271            GpuError::Internal(msg) => write!(f, "internal: {msg}"),
272        }
273    }
274}
275
276impl std::error::Error for GpuError {}
277
278/// Reason a backend was skipped during selection.
279#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
280pub enum BackendSkipReason {
281    FeatureNotEnabled,
282    AdapterUnavailable,
283    PreferredMismatch,
284    Error(String),
285}
286
287/// Explanation for a backend that was skipped during selection.
288///
289/// ```
290/// use daedalus_gpu::{BackendSkip, BackendSkipReason, GpuBackendKind};
291/// let skip = BackendSkip { backend: GpuBackendKind::Wgpu, reason: BackendSkipReason::FeatureNotEnabled };
292/// assert!(skip.describe().contains("not built"));
293/// ```
294#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
295pub struct BackendSkip {
296    pub backend: GpuBackendKind,
297    pub reason: BackendSkipReason,
298}
299
300impl BackendSkip {
301    pub fn describe(&self) -> String {
302        match &self.reason {
303            BackendSkipReason::FeatureNotEnabled => {
304                format!("{:?} not built (feature disabled)", self.backend)
305            }
306            BackendSkipReason::AdapterUnavailable => {
307                format!("{:?} adapter unavailable", self.backend)
308            }
309            BackendSkipReason::PreferredMismatch => {
310                format!("{:?} not selected due to preference", self.backend)
311            }
312            BackendSkipReason::Error(e) => format!("{:?} failed: {}", self.backend, e),
313        }
314    }
315}
316
317/// Shared handle wrapping a selected backend and diagnostics for why other backends were skipped.
318#[derive(Clone)]
319pub struct GpuContextHandle {
320    backend: Arc<dyn GpuBackend>,
321    chosen: GpuBackendKind,
322    adapter: GpuAdapterInfo,
323    skipped: Vec<BackendSkip>,
324}
325
326impl GpuContextHandle {
327    pub fn backend_kind(&self) -> GpuBackendKind {
328        self.chosen
329    }
330
331    pub fn adapter_info(&self) -> &GpuAdapterInfo {
332        &self.adapter
333    }
334
335    pub fn skipped(&self) -> &[BackendSkip] {
336        &self.skipped
337    }
338
339    pub fn skipped_summary(&self) -> Vec<String> {
340        self.skipped.iter().map(|s| s.describe()).collect()
341    }
342
343    pub fn backend_ref(&self) -> &dyn GpuBackend {
344        self.backend.as_ref()
345    }
346
347    pub fn capabilities(&self) -> GpuCapabilities {
348        self.backend.capabilities()
349    }
350
351    pub fn stats(&self) -> TransferStats {
352        self.backend.stats()
353    }
354
355    pub fn take_stats(&self) -> TransferStats {
356        self.backend.take_stats()
357    }
358
359    pub fn reset_stats(&self) -> TransferStats {
360        self.backend.take_stats()
361    }
362
363    pub fn record_download(&self, bytes: u64) {
364        self.backend.record_download(bytes)
365    }
366
367    pub fn upload_texture(
368        &self,
369        req: &GpuImageRequest,
370        data: &[u8],
371    ) -> Result<GpuImageHandle, GpuError> {
372        validate_texture_bytes(req, &self.capabilities())?;
373        self.backend.upload_texture(req, data)
374    }
375
376    pub fn read_texture(&self, handle: &GpuImageHandle) -> Result<Vec<u8>, GpuError> {
377        self.backend.read_texture(handle)
378    }
379
380    pub fn create_buffer(&self, req: &GpuRequest) -> Result<GpuBufferHandle, GpuError> {
381        self.backend.create_buffer(req)
382    }
383
384    pub fn alloc_upload_buffer(&self, size_bytes: u64) -> Result<GpuBufferHandle, GpuError> {
385        self.create_buffer(&GpuRequest {
386            usage: GpuUsage::UPLOAD,
387            format: None,
388            size_bytes,
389        })
390    }
391
392    pub fn alloc_download_buffer(&self, size_bytes: u64) -> Result<GpuBufferHandle, GpuError> {
393        self.create_buffer(&GpuRequest {
394            usage: GpuUsage::DOWNLOAD,
395            format: None,
396            size_bytes,
397        })
398    }
399
400    pub fn create_image(&self, req: &GpuImageRequest) -> Result<GpuImageHandle, GpuError> {
401        self.backend.create_image(req)
402    }
403}
404
405pub fn format_bytes_per_pixel(format: GpuFormat) -> Option<u32> {
406    match format {
407        GpuFormat::R8Unorm => Some(1),
408        GpuFormat::Rgba8Unorm => Some(4),
409        GpuFormat::Rgba16Float => Some(8),
410        GpuFormat::Depth24Stencil8 => Some(4),
411    }
412}
413
414/// Convenience helper for uploading an R8 (single-channel) texture with basic size/length validation.
415pub fn upload_r8_texture(
416    ctx: &GpuContextHandle,
417    width: u32,
418    height: u32,
419    data: &[u8],
420) -> Result<GpuImageHandle, GpuError> {
421    let expected = width as usize * height as usize;
422    if data.len() != expected {
423        return Err(GpuError::AllocationFailed);
424    }
425    let req = GpuImageRequest {
426        format: GpuFormat::R8Unorm,
427        width,
428        height,
429        samples: 1,
430        usage: GpuUsage::RENDER_TARGET | GpuUsage::UPLOAD,
431    };
432    validate_texture_bytes(&req, &ctx.capabilities())?;
433    ctx.upload_texture(&req, data)
434}
435
436/// Simple upload helper: allocates an upload buffer and returns it with bytes staged length.
437pub fn upload_bytes(ctx: &GpuContextHandle, bytes: &[u8]) -> Result<GpuBufferHandle, GpuError> {
438    let buf = ctx.alloc_upload_buffer(bytes.len() as u64)?;
439    // Real implementations would map/stage here; we just track stats.
440    ctx.record_download(0);
441    Ok(buf)
442}
443
444/// Convenience helper for uploading an RGBA8 texture with basic size/length validation.
445pub fn upload_rgba8_texture(
446    ctx: &GpuContextHandle,
447    width: u32,
448    height: u32,
449    data: &[u8],
450) -> Result<GpuImageHandle, GpuError> {
451    let expected = width as usize * height as usize * 4;
452    if data.len() != expected {
453        return Err(GpuError::AllocationFailed);
454    }
455    let req = GpuImageRequest {
456        format: GpuFormat::Rgba8Unorm,
457        width,
458        height,
459        samples: 1,
460        usage: GpuUsage::RENDER_TARGET | GpuUsage::UPLOAD,
461    };
462    validate_texture_bytes(&req, &ctx.capabilities())?;
463    ctx.upload_texture(&req, data)
464}
465
466/// Validate texture copy layout against capabilities.
467pub fn validate_texture_bytes(
468    req: &GpuImageRequest,
469    caps: &GpuCapabilities,
470) -> Result<(), GpuError> {
471    let block = caps
472        .format_blocks
473        .iter()
474        .find(|b| b.format == req.format)
475        .cloned()
476        .ok_or(GpuError::Unsupported)?;
477    let blocks_x = (req.width as u64).div_ceil(block.block_width as u64);
478    let blocks_y = (req.height as u64).div_ceil(block.block_height as u64);
479    let bytes_per_row = blocks_x * block.bytes_per_block as u64;
480    if bytes_per_row == 0 {
481        return Err(GpuError::Unsupported);
482    }
483    if blocks_y == 0 {
484        return Err(GpuError::Unsupported);
485    }
486    Ok(())
487}
488
489/// Select the best available backend given build-time features and runtime options.
490/// Order: preferred backend (if set), then wgpu, mock, noop.
491pub fn select_backend(opts: &GpuOptions) -> Result<GpuContextHandle, GpuError> {
492    let mut skipped = Vec::new();
493    let mut order = Vec::new();
494    if let Some(pref) = opts.preferred_backend
495        && !order.contains(&pref)
496    {
497        order.push(pref);
498    }
499    for fallback in [
500        GpuBackendKind::Wgpu,
501        GpuBackendKind::Mock,
502        GpuBackendKind::Noop,
503    ] {
504        if !order.contains(&fallback) {
505            order.push(fallback);
506        }
507    }
508
509    for kind in order {
510        match try_build_backend(kind, opts) {
511            Ok((backend, adapter)) => {
512                return Ok(GpuContextHandle {
513                    chosen: kind,
514                    adapter,
515                    skipped,
516                    backend,
517                });
518            }
519            Err(reason) => skipped.push(BackendSkip {
520                backend: kind,
521                reason,
522            }),
523        }
524    }
525
526    Err(GpuError::AdapterUnavailable)
527}
528
529fn try_build_backend(
530    kind: GpuBackendKind,
531    opts: &GpuOptions,
532) -> Result<(Arc<dyn GpuBackend>, GpuAdapterInfo), BackendSkipReason> {
533    match kind {
534        GpuBackendKind::Wgpu => {
535            #[cfg(feature = "gpu-wgpu")]
536            {
537                let backend =
538                    WgpuBackend::new().map_err(|e| BackendSkipReason::Error(e.to_string()))?;
539                let adapter = backend
540                    .select_adapter(opts)
541                    .map_err(|_| BackendSkipReason::AdapterUnavailable)?;
542                Ok((Arc::new(backend), adapter))
543            }
544            #[cfg(not(feature = "gpu-wgpu"))]
545            {
546                Err(BackendSkipReason::FeatureNotEnabled)
547            }
548        }
549        GpuBackendKind::Mock => {
550            #[cfg(feature = "gpu-mock")]
551            {
552                let backend = MockBackend::default();
553                let adapter = backend
554                    .select_adapter(opts)
555                    .map_err(|_| BackendSkipReason::AdapterUnavailable)?;
556                Ok((Arc::new(backend), adapter))
557            }
558            #[cfg(not(feature = "gpu-mock"))]
559            {
560                Err(BackendSkipReason::FeatureNotEnabled)
561            }
562        }
563        GpuBackendKind::Noop => {
564            let backend = NoopBackend::default();
565            let adapter = backend
566                .select_adapter(opts)
567                .map_err(|_| BackendSkipReason::AdapterUnavailable)?;
568            Ok((Arc::new(backend), adapter))
569        }
570    }
571}
572
573impl GpuContext for GpuContextHandle {
574    fn backend(&self) -> GpuBackendKind {
575        self.backend_kind()
576    }
577
578    fn adapter_info(&self) -> GpuAdapterInfo {
579        self.adapter.clone()
580    }
581
582    fn capabilities(&self) -> GpuCapabilities {
583        self.capabilities()
584    }
585
586    fn stats(&self) -> TransferStats {
587        self.stats()
588    }
589
590    fn take_stats(&self) -> TransferStats {
591        self.backend.take_stats()
592    }
593
594    fn record_download(&self, bytes: u64) {
595        self.backend.record_download(bytes)
596    }
597}
598
599/// Convenience for callers that just need to know the active backend kind.
600pub fn active_backend() -> GpuBackendKind {
601    select_backend(&GpuOptions::default())
602        .map(|ctx| ctx.backend_kind())
603        .unwrap_or(GpuBackendKind::Noop)
604}
605
606#[cfg(test)]
607mod tests {
608    use super::*;
609    use std::{collections::HashSet, sync::Arc, thread};
610
611    #[test]
612    fn falls_back_to_noop_when_only_noop_is_built() {
613        let ctx = select_backend(&GpuOptions::default()).unwrap();
614        let kind = ctx.backend_kind();
615        if cfg!(not(any(feature = "gpu-mock", feature = "gpu-wgpu"))) {
616            assert_eq!(kind, GpuBackendKind::Noop);
617            assert!(!ctx.skipped().is_empty());
618        } else {
619            assert!(matches!(
620                kind,
621                GpuBackendKind::Mock | GpuBackendKind::Wgpu | GpuBackendKind::Noop
622            ));
623        }
624    }
625
626    #[cfg(feature = "gpu-mock")]
627    #[test]
628    fn prefers_mock_when_available() {
629        let ctx = select_backend(&GpuOptions::default()).unwrap();
630        assert_eq!(ctx.backend_kind(), GpuBackendKind::Mock);
631        assert_eq!(ctx.adapter_info().backend, GpuBackendKind::Mock);
632    }
633
634    #[cfg(feature = "gpu-wgpu")]
635    #[test]
636    fn can_select_wgpu_when_built() {
637        let opts = GpuOptions {
638            preferred_backend: Some(GpuBackendKind::Wgpu),
639            ..Default::default()
640        };
641        let ctx = select_backend(&opts).unwrap();
642        assert!(matches!(
643            ctx.backend_kind(),
644            GpuBackendKind::Wgpu | GpuBackendKind::Mock | GpuBackendKind::Noop
645        ));
646    }
647
648    #[test]
649    fn parallel_buffer_creates_are_unique() {
650        let ctx = Arc::new(select_backend(&GpuOptions::default()).unwrap());
651        let mut handles = Vec::new();
652        let mut threads = Vec::new();
653        for _ in 0..4 {
654            let ctx = ctx.clone();
655            threads.push(thread::spawn(move || {
656                let mut local = Vec::new();
657                for _ in 0..32 {
658                    let buf = ctx
659                        .create_buffer(&GpuRequest {
660                            usage: GpuUsage::UPLOAD,
661                            format: None,
662                            size_bytes: 256,
663                        })
664                        .unwrap();
665                    local.push(buf.id);
666                }
667                local
668            }));
669        }
670        for t in threads {
671            handles.extend(t.join().expect("thread join"));
672        }
673        let unique: HashSet<_> = handles.iter().copied().collect();
674        assert_eq!(unique.len(), handles.len());
675    }
676
677    #[test]
678    fn parallel_image_creates_are_unique() {
679        let ctx = Arc::new(select_backend(&GpuOptions::default()).unwrap());
680        let mut handles = Vec::new();
681        let mut threads = Vec::new();
682        for _ in 0..4 {
683            let ctx = ctx.clone();
684            threads.push(thread::spawn(move || {
685                let mut local = Vec::new();
686                for _ in 0..32 {
687                    let img = ctx
688                        .create_image(&GpuImageRequest {
689                            format: GpuFormat::Rgba8Unorm,
690                            width: 64,
691                            height: 16,
692                            samples: 1,
693                            usage: GpuUsage::RENDER_TARGET,
694                        })
695                        .unwrap();
696                    local.push(img.id);
697                }
698                local
699            }));
700        }
701        for t in threads {
702            handles.extend(t.join().expect("thread join"));
703        }
704        let unique: HashSet<_> = handles.iter().copied().collect();
705        assert_eq!(unique.len(), handles.len());
706    }
707}