gpu_descriptor_ash/
lib.rs

1use {
2    ash::{vk, Device},
3    gpu_descriptor_types::{
4        CreatePoolError, DescriptorDevice, DescriptorPoolCreateFlags, DescriptorTotalCount,
5        DeviceAllocationError,
6    },
7};
8
9#[repr(transparent)]
10pub struct AshDescriptorDevice {
11    device: Device,
12}
13
14impl AshDescriptorDevice {
15    pub fn wrap(device: &Device) -> &Self {
16        unsafe {
17            // Safe because `Self` is `repr(transparent)`
18            // with only non-zero-sized field being `D`.
19            &*(device as *const Device as *const Self)
20        }
21    }
22}
23
24impl DescriptorDevice<vk::DescriptorSetLayout, vk::DescriptorPool, vk::DescriptorSet>
25    for AshDescriptorDevice
26{
27    unsafe fn create_descriptor_pool(
28        &self,
29        descriptor_count: &DescriptorTotalCount,
30        max_sets: u32,
31        flags: DescriptorPoolCreateFlags,
32    ) -> Result<vk::DescriptorPool, CreatePoolError> {
33        let mut array = [vk::DescriptorPoolSize::default(); 13];
34        let mut len = 0;
35
36        if descriptor_count.sampler != 0 {
37            array[len].ty = vk::DescriptorType::SAMPLER;
38            array[len].descriptor_count = descriptor_count.sampler;
39            len += 1;
40        }
41
42        if descriptor_count.combined_image_sampler != 0 {
43            array[len].ty = vk::DescriptorType::COMBINED_IMAGE_SAMPLER;
44            array[len].descriptor_count = descriptor_count.combined_image_sampler;
45            len += 1;
46        }
47
48        if descriptor_count.sampled_image != 0 {
49            array[len].ty = vk::DescriptorType::SAMPLED_IMAGE;
50            array[len].descriptor_count = descriptor_count.sampled_image;
51            len += 1;
52        }
53
54        if descriptor_count.storage_image != 0 {
55            array[len].ty = vk::DescriptorType::STORAGE_IMAGE;
56            array[len].descriptor_count = descriptor_count.storage_image;
57            len += 1;
58        }
59
60        if descriptor_count.uniform_texel_buffer != 0 {
61            array[len].ty = vk::DescriptorType::UNIFORM_TEXEL_BUFFER;
62            array[len].descriptor_count = descriptor_count.uniform_texel_buffer;
63            len += 1;
64        }
65
66        if descriptor_count.storage_texel_buffer != 0 {
67            array[len].ty = vk::DescriptorType::STORAGE_TEXEL_BUFFER;
68            array[len].descriptor_count = descriptor_count.storage_texel_buffer;
69            len += 1;
70        }
71
72        if descriptor_count.uniform_buffer != 0 {
73            array[len].ty = vk::DescriptorType::UNIFORM_BUFFER;
74            array[len].descriptor_count = descriptor_count.uniform_buffer;
75            len += 1;
76        }
77
78        if descriptor_count.storage_buffer != 0 {
79            array[len].ty = vk::DescriptorType::STORAGE_BUFFER;
80            array[len].descriptor_count = descriptor_count.storage_buffer;
81            len += 1;
82        }
83
84        if descriptor_count.uniform_buffer_dynamic != 0 {
85            array[len].ty = vk::DescriptorType::UNIFORM_BUFFER_DYNAMIC;
86            array[len].descriptor_count = descriptor_count.uniform_buffer_dynamic;
87            len += 1;
88        }
89
90        if descriptor_count.storage_buffer_dynamic != 0 {
91            array[len].ty = vk::DescriptorType::STORAGE_BUFFER_DYNAMIC;
92            array[len].descriptor_count = descriptor_count.storage_buffer_dynamic;
93            len += 1;
94        }
95
96        if descriptor_count.input_attachment != 0 {
97            array[len].ty = vk::DescriptorType::INPUT_ATTACHMENT;
98            array[len].descriptor_count = descriptor_count.input_attachment;
99            len += 1;
100        }
101
102        if descriptor_count.acceleration_structure != 0 {
103            array[len].ty = vk::DescriptorType::ACCELERATION_STRUCTURE_KHR;
104            array[len].descriptor_count = descriptor_count.acceleration_structure;
105            len += 1;
106        }
107
108        if descriptor_count.inline_uniform_block_bytes != 0 {
109            panic!("Inline uniform blocks are not supported");
110        }
111
112        if descriptor_count.inline_uniform_block_bindings != 0 {
113            panic!("Inline uniform blocks are not supported");
114        }
115
116        let mut ash_flags = vk::DescriptorPoolCreateFlags::empty();
117
118        if flags.contains(DescriptorPoolCreateFlags::FREE_DESCRIPTOR_SET) {
119            ash_flags |= vk::DescriptorPoolCreateFlags::FREE_DESCRIPTOR_SET;
120        }
121
122        if flags.contains(DescriptorPoolCreateFlags::UPDATE_AFTER_BIND) {
123            ash_flags |= vk::DescriptorPoolCreateFlags::UPDATE_AFTER_BIND;
124        }
125
126        let result = self.device.create_descriptor_pool(
127            &vk::DescriptorPoolCreateInfo::default()
128                .max_sets(max_sets)
129                .pool_sizes(&array[..len])
130                .flags(ash_flags),
131            None,
132        );
133
134        match result {
135            Ok(pool) => Ok(pool),
136            Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(CreatePoolError::OutOfDeviceMemory),
137            Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(CreatePoolError::OutOfHostMemory),
138            Err(vk::Result::ERROR_FRAGMENTATION) => Err(CreatePoolError::Fragmentation),
139            Err(err) => panic!("Unexpected return code '{}'", err),
140        }
141    }
142
143    unsafe fn destroy_descriptor_pool(&self, pool: vk::DescriptorPool) {
144        self.device.destroy_descriptor_pool(pool, None)
145    }
146
147    unsafe fn alloc_descriptor_sets<'a>(
148        &self,
149        pool: &mut vk::DescriptorPool,
150        layouts: impl ExactSizeIterator<Item = &'a vk::DescriptorSetLayout>,
151        sets: &mut impl Extend<vk::DescriptorSet>,
152    ) -> Result<(), DeviceAllocationError> {
153        let set_layouts: smallvec::SmallVec<[_; 16]> = layouts.copied().collect();
154
155        match self.device.allocate_descriptor_sets(
156            &vk::DescriptorSetAllocateInfo::default()
157                .set_layouts(&set_layouts)
158                .descriptor_pool(*pool),
159        ) {
160            Ok(allocated) => {
161                sets.extend(allocated);
162                Ok(())
163            }
164            Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => {
165                Err(DeviceAllocationError::OutOfHostMemory)
166            }
167            Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => {
168                Err(DeviceAllocationError::OutOfDeviceMemory)
169            }
170            Err(vk::Result::ERROR_FRAGMENTED_POOL) => Err(DeviceAllocationError::OutOfPoolMemory),
171            Err(vk::Result::ERROR_OUT_OF_POOL_MEMORY) => Err(DeviceAllocationError::FragmentedPool),
172            Err(err) => panic!("Unexpected return code '{}'", err),
173        }
174    }
175
176    unsafe fn dealloc_descriptor_sets<'a>(
177        &self,
178        pool: &mut vk::DescriptorPool,
179        sets: impl Iterator<Item = vk::DescriptorSet>,
180    ) {
181        let sets: smallvec::SmallVec<[_; 16]> = sets.collect();
182        match self.device.free_descriptor_sets(*pool, &sets) {
183            Ok(()) => {}
184            Err(err) => panic!("Unexpected return code '{}'", err),
185        }
186    }
187}