vulkano/descriptor_set/
allocator.rs

1//! In the Vulkan API, descriptor sets must be allocated from *descriptor pools*.
2//!
3//! A descriptor pool holds and manages the memory of one or more descriptor sets. If you destroy a
4//! descriptor pool, all of its descriptor sets are automatically destroyed.
5//!
6//! In vulkano, creating a descriptor set requires passing an implementation of the
7//! [`DescriptorSetAllocator`] trait, which you can implement yourself or use the vulkano-provided
8//! [`StandardDescriptorSetAllocator`].
9
10use self::sorted_map::SortedMap;
11use super::{
12    layout::DescriptorSetLayout,
13    pool::{
14        DescriptorPool, DescriptorPoolAlloc, DescriptorPoolCreateFlags, DescriptorPoolCreateInfo,
15        DescriptorSetAllocateInfo,
16    },
17};
18use crate::{
19    descriptor_set::layout::DescriptorType,
20    device::{Device, DeviceOwned},
21    instance::InstanceOwnedDebugWrapper,
22    Validated, VulkanError,
23};
24use crossbeam_queue::ArrayQueue;
25use std::{
26    cell::UnsafeCell,
27    fmt::{Debug, Error as FmtError, Formatter},
28    mem,
29    num::NonZeroU64,
30    ptr,
31    sync::Arc,
32};
33use thread_local::ThreadLocal;
34
35const MAX_POOLS: usize = 32;
36
37/// Types that manage the memory of descriptor sets.
38///
39/// # Safety
40///
41/// A Vulkan descriptor pool must be externally synchronized as if it owned the descriptor sets
42/// that were allocated from it. This includes allocating from the pool, freeing from the pool and
43/// resetting the pool. The implementation of `DescriptorSetAllocator` is expected to manage this.
44///
45/// The implementation of `allocate` must return a valid allocation that stays allocated until
46/// either `deallocate` is called on it or the allocator is dropped. If the allocator is cloned, it
47/// must produce the same allocator, and an allocation must stay allocated until either
48/// `deallocate` is called on any of the clones or all clones have been dropped.
49///
50/// The implementation of `deallocate` is expected to free the descriptor set, reset its descriptor
51/// pool, or add it to a pool so that it gets reused. If the implementation frees the descriptor
52/// set or resets the descriptor pool, it must not forget that this operation must be externally
53/// synchronized. The implementation should not panic as it is used while dropping descriptor sets.
54pub unsafe trait DescriptorSetAllocator: DeviceOwned + Send + Sync + 'static {
55    /// Allocates a descriptor set.
56    fn allocate(
57        &self,
58        layout: &Arc<DescriptorSetLayout>,
59        variable_descriptor_count: u32,
60    ) -> Result<DescriptorSetAlloc, Validated<VulkanError>>;
61
62    /// Deallocates the given `allocation`.
63    ///
64    /// # Safety
65    ///
66    /// - `allocation` must refer to a **currently allocated** allocation of `self`.
67    unsafe fn deallocate(&self, allocation: DescriptorSetAlloc);
68}
69
70impl Debug for dyn DescriptorSetAllocator {
71    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
72        f.debug_struct("DescriptorSetAllocator")
73            .finish_non_exhaustive()
74    }
75}
76
77/// An allocation made using a [descriptor set allocator].
78///
79/// [descriptor set allocator]: DescriptorSetAllocator
80#[derive(Debug)]
81pub struct DescriptorSetAlloc {
82    /// The internal object that contains the descriptor set.
83    pub inner: DescriptorPoolAlloc,
84
85    /// The descriptor pool that the descriptor set was allocated from.
86    ///
87    /// Using this for anything other than looking at the pool's metadata will lead to a Bad
88    /// Time<sup>TM</sup>. That includes making additional references.
89    pub pool: Arc<DescriptorPool>,
90
91    /// An opaque handle identifying the allocation inside the allocator.
92    pub handle: AllocationHandle,
93}
94
95unsafe impl Send for DescriptorSetAlloc {}
96unsafe impl Sync for DescriptorSetAlloc {}
97
98/// An opaque handle identifying an allocation inside an allocator.
99#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
100#[cfg_attr(not(doc), repr(transparent))]
101pub struct AllocationHandle(*mut ());
102
103unsafe impl Send for AllocationHandle {}
104unsafe impl Sync for AllocationHandle {}
105
106impl AllocationHandle {
107    /// Creates a null `AllocationHandle`.
108    ///
109    /// Use this if you don't have anything that you need to associate with the allocation.
110    #[inline]
111    pub const fn null() -> Self {
112        AllocationHandle(ptr::null_mut())
113    }
114
115    /// Stores a pointer in an `AllocationHandle`.
116    ///
117    /// Use this if you want to associate an allocation with some (host) heap allocation.
118    #[inline]
119    pub const fn from_ptr(ptr: *mut ()) -> Self {
120        AllocationHandle(ptr)
121    }
122
123    /// Stores an index inside an `AllocationHandle`.
124    ///
125    /// Use this if you want to associate an allocation with some index.
126    #[allow(clippy::useless_transmute)]
127    #[inline]
128    pub const fn from_index(index: usize) -> Self {
129        // SAFETY: `usize` and `*mut ()` have the same layout.
130        AllocationHandle(unsafe { mem::transmute::<usize, *mut ()>(index) })
131    }
132
133    /// Retrieves a previously-stored pointer from the `AllocationHandle`.
134    ///
135    /// If this handle hasn't been created using [`from_ptr`] then this will return an invalid
136    /// pointer, dereferencing which is undefined behavior.
137    ///
138    /// [`from_ptr`]: Self::from_ptr
139    #[inline]
140    pub const fn as_ptr(self) -> *mut () {
141        self.0
142    }
143
144    /// Retrieves a previously-stored index from the `AllocationHandle`.
145    ///
146    /// If this handle hasn't been created using [`from_index`] then this will return a bogus
147    /// result.
148    ///
149    /// [`from_index`]: Self::from_index
150    #[allow(clippy::transmutes_expressible_as_ptr_casts)]
151    #[inline]
152    pub fn as_index(self) -> usize {
153        // SAFETY: `usize` and `*mut ()` have the same layout.
154        unsafe { mem::transmute::<*mut (), usize>(self.0) }
155    }
156}
157
158/// Standard implementation of a descriptor set allocator.
159///
160/// The intended way to use this allocator is to have one that is used globally for the duration of
161/// the program, in order to avoid creating and destroying [`DescriptorPool`]s, as that is
162/// expensive. Alternatively, you can have one locally on a thread for the duration of the thread.
163///
164/// Internally, this allocator uses one or more `DescriptorPool`s per descriptor set layout per
165/// thread, using Thread-Local Storage. When a thread first allocates, an entry is reserved for the
166/// thread and descriptor set layout combination. After a thread exits and the allocator wasn't
167/// dropped yet, its entries are freed, but the pools it used are not dropped. The next time a new
168/// thread allocates for the first time, the entries are reused along with the pools. If all
169/// threads drop their reference to the allocator, all entries along with the allocator are
170/// dropped, even if the threads didn't exit yet, which is why you should keep the allocator alive
171/// for as long as you need to allocate so that the pools can keep being reused.
172///
173/// This allocator only needs to lock when a thread first allocates or when a thread that
174/// previously allocated exits. In all other cases, allocation is lock-free.
175#[derive(Debug)]
176pub struct StandardDescriptorSetAllocator {
177    device: InstanceOwnedDebugWrapper<Arc<Device>>,
178    pools: ThreadLocal<UnsafeCell<SortedMap<NonZeroU64, Entry>>>,
179    create_info: StandardDescriptorSetAllocatorCreateInfo,
180}
181
182#[derive(Debug)]
183enum Entry {
184    Fixed(FixedEntry),
185    Variable(VariableEntry),
186}
187
188// This is needed because of the blanket impl of `Send` on `Arc<T>`, which requires that `T` is
189// `Send + Sync`. `FixedPool` and `VariablePool` are `Send + !Sync` because `DescriptorPool` is
190// `!Sync`. That's fine however because we never access the `DescriptorPool` concurrently.
191unsafe impl Send for Entry {}
192
193impl StandardDescriptorSetAllocator {
194    /// Creates a new `StandardDescriptorSetAllocator`.
195    #[inline]
196    pub fn new(
197        device: Arc<Device>,
198        create_info: StandardDescriptorSetAllocatorCreateInfo,
199    ) -> StandardDescriptorSetAllocator {
200        StandardDescriptorSetAllocator {
201            device: InstanceOwnedDebugWrapper(device),
202            pools: ThreadLocal::new(),
203            create_info,
204        }
205    }
206
207    /// Clears the entry for the given descriptor set layout and the current thread. This does not
208    /// mean that the pools are dropped immediately. A pool is kept alive for as long as descriptor
209    /// sets allocated from it exist.
210    ///
211    /// This has no effect if the entry was not initialized yet.
212    #[inline]
213    pub fn clear(&self, layout: &Arc<DescriptorSetLayout>) {
214        let entry_ptr = self.pools.get_or(Default::default).get();
215        unsafe { &mut *entry_ptr }.remove(layout.id())
216    }
217
218    /// Clears all entries for the current thread. This does not mean that the pools are dropped
219    /// immediately. A pool is kept alive for as long as descriptor sets allocated from it exist.
220    ///
221    /// This has no effect if no entries were initialized yet.
222    #[inline]
223    pub fn clear_all(&self) {
224        let entry_ptr = self.pools.get_or(Default::default).get();
225        unsafe { *entry_ptr = SortedMap::default() };
226    }
227}
228
229unsafe impl DescriptorSetAllocator for StandardDescriptorSetAllocator {
230    #[inline]
231    fn allocate(
232        &self,
233        layout: &Arc<DescriptorSetLayout>,
234        variable_descriptor_count: u32,
235    ) -> Result<DescriptorSetAlloc, Validated<VulkanError>> {
236        let is_fixed = layout.variable_descriptor_count() == 0;
237        let pools = self.pools.get_or_default();
238
239        let entry_ptr = pools.get();
240        let entry = unsafe { &mut *entry_ptr }.get_or_try_insert(layout.id(), || {
241            if is_fixed {
242                FixedEntry::new(layout, &self.create_info).map(Entry::Fixed)
243            } else {
244                VariableEntry::new(
245                    layout,
246                    &self.create_info,
247                    Arc::new(ArrayQueue::new(MAX_POOLS)),
248                )
249                .map(Entry::Variable)
250            }
251        })?;
252
253        match entry {
254            Entry::Fixed(entry) => entry.allocate(layout, &self.create_info),
255            Entry::Variable(entry) => {
256                entry.allocate(layout, variable_descriptor_count, &self.create_info)
257            }
258        }
259    }
260
261    #[inline]
262    unsafe fn deallocate(&self, allocation: DescriptorSetAlloc) {
263        let is_fixed = allocation.inner.variable_descriptor_count() == 0;
264        let ptr = allocation.handle.as_ptr();
265
266        if is_fixed {
267            // SAFETY: The caller must guarantee that `allocation` refers to one allocated by
268            // `self`, therefore `ptr` must be the same one we gave out on allocation. We also know
269            // that the pointer must be valid, because the caller must guarantee that the same
270            // allocation isn't deallocated more than once. That means that since we cloned the
271            // `Arc` on allocation, at least that strong reference must still keep it alive, and we
272            // can safely drop this clone at the end of the scope here.
273            let reserve = unsafe { Arc::from_raw(ptr.cast::<ArrayQueue<DescriptorPoolAlloc>>()) };
274
275            // This cannot happen because every allocation is (supposed to be) returned to the pool
276            // whence it came, so there must be enough room for it.
277            debug_assert!(reserve.push(allocation.inner).is_ok());
278        } else {
279            // SAFETY: Same as the `Arc::from_raw` above.
280            let reserve = unsafe { Arc::from_raw(ptr.cast::<ArrayQueue<Arc<DescriptorPool>>>()) };
281
282            let pool = allocation.pool;
283
284            // We have to make sure that we only reset the pool under this condition, because there
285            // could be other references in other allocations. If the user cloned the pool
286            // themself, that will most certainly cause a leak.
287            if Arc::strong_count(&pool) == 1 {
288                // If there is not enough space in the reserve, we destroy the pool. The only way
289                // this can happen is if something is resource hogging, forcing new pools to be
290                // created such that the number exceeds `MAX_POOLS`, and then drops them all at
291                // once.
292                let _ = reserve.push(pool);
293            }
294        }
295    }
296}
297
298unsafe impl<T: DescriptorSetAllocator> DescriptorSetAllocator for Arc<T> {
299    #[inline]
300    fn allocate(
301        &self,
302        layout: &Arc<DescriptorSetLayout>,
303        variable_descriptor_count: u32,
304    ) -> Result<DescriptorSetAlloc, Validated<VulkanError>> {
305        (**self).allocate(layout, variable_descriptor_count)
306    }
307
308    #[inline]
309    unsafe fn deallocate(&self, allocation: DescriptorSetAlloc) {
310        unsafe { (**self).deallocate(allocation) }
311    }
312}
313
314unsafe impl DeviceOwned for StandardDescriptorSetAllocator {
315    #[inline]
316    fn device(&self) -> &Arc<Device> {
317        &self.device
318    }
319}
320
321#[derive(Debug)]
322struct FixedEntry {
323    pool: Arc<DescriptorPool>,
324    reserve: Arc<ArrayQueue<DescriptorPoolAlloc>>,
325}
326
327impl FixedEntry {
328    fn new(
329        layout: &Arc<DescriptorSetLayout>,
330        create_info: &StandardDescriptorSetAllocatorCreateInfo,
331    ) -> Result<Self, Validated<VulkanError>> {
332        let pool = DescriptorPool::new(
333            layout.device().clone(),
334            DescriptorPoolCreateInfo {
335                flags: create_info
336                    .update_after_bind
337                    .then_some(DescriptorPoolCreateFlags::UPDATE_AFTER_BIND)
338                    .unwrap_or_default(),
339                max_sets: create_info.set_count as u32,
340                pool_sizes: layout
341                    .descriptor_counts()
342                    .iter()
343                    .map(|(&ty, &count)| {
344                        assert_ne!(ty, DescriptorType::InlineUniformBlock);
345                        (ty, count * create_info.set_count as u32)
346                    })
347                    .collect(),
348                ..Default::default()
349            },
350        )
351        .map_err(Validated::unwrap)?;
352
353        let allocate_infos =
354            (0..create_info.set_count).map(|_| DescriptorSetAllocateInfo::new(layout.clone()));
355
356        let allocs =
357            unsafe { pool.allocate_descriptor_sets(allocate_infos) }.map_err(|err| match err {
358                Validated::ValidationError(_) => err,
359                Validated::Error(vk_err) => match vk_err {
360                    VulkanError::OutOfHostMemory | VulkanError::OutOfDeviceMemory => err,
361                    // This can't happen as we don't free individual sets.
362                    VulkanError::FragmentedPool => unreachable!(),
363                    // We created the pool with an exact size.
364                    VulkanError::OutOfPoolMemory => unreachable!(),
365                    // Shouldn't ever be returned.
366                    _ => unreachable!(),
367                },
368            })?;
369
370        let reserve = ArrayQueue::new(create_info.set_count);
371
372        for alloc in allocs {
373            let _ = reserve.push(alloc);
374        }
375
376        Ok(FixedEntry {
377            pool: Arc::new(pool),
378            reserve: Arc::new(reserve),
379        })
380    }
381
382    fn allocate(
383        &mut self,
384        layout: &Arc<DescriptorSetLayout>,
385        create_info: &StandardDescriptorSetAllocatorCreateInfo,
386    ) -> Result<DescriptorSetAlloc, Validated<VulkanError>> {
387        let inner = if let Some(inner) = self.reserve.pop() {
388            inner
389        } else {
390            *self = FixedEntry::new(layout, create_info)?;
391
392            self.reserve.pop().unwrap()
393        };
394
395        Ok(DescriptorSetAlloc {
396            inner,
397            pool: self.pool.clone(),
398            handle: AllocationHandle::from_ptr(Arc::into_raw(self.reserve.clone()) as _),
399        })
400    }
401}
402
403#[derive(Debug)]
404struct VariableEntry {
405    pool: Arc<DescriptorPool>,
406    reserve: Arc<ArrayQueue<Arc<DescriptorPool>>>,
407    // The number of sets currently allocated from the Vulkan pool.
408    allocations: usize,
409}
410
411impl VariableEntry {
412    fn new(
413        layout: &DescriptorSetLayout,
414        create_info: &StandardDescriptorSetAllocatorCreateInfo,
415        reserve: Arc<ArrayQueue<Arc<DescriptorPool>>>,
416    ) -> Result<Self, Validated<VulkanError>> {
417        let pool = DescriptorPool::new(
418            layout.device().clone(),
419            DescriptorPoolCreateInfo {
420                flags: create_info
421                    .update_after_bind
422                    .then_some(DescriptorPoolCreateFlags::UPDATE_AFTER_BIND)
423                    .unwrap_or_default(),
424                max_sets: create_info.set_count as u32,
425                pool_sizes: layout
426                    .descriptor_counts()
427                    .iter()
428                    .map(|(&ty, &count)| {
429                        assert_ne!(ty, DescriptorType::InlineUniformBlock);
430                        (ty, count * create_info.set_count as u32)
431                    })
432                    .collect(),
433                ..Default::default()
434            },
435        )
436        .map_err(Validated::unwrap)?;
437
438        Ok(VariableEntry {
439            pool: Arc::new(pool),
440            reserve,
441            allocations: 0,
442        })
443    }
444
445    fn allocate(
446        &mut self,
447        layout: &Arc<DescriptorSetLayout>,
448        variable_descriptor_count: u32,
449        create_info: &StandardDescriptorSetAllocatorCreateInfo,
450    ) -> Result<DescriptorSetAlloc, Validated<VulkanError>> {
451        if self.allocations >= create_info.set_count {
452            // This can happen if there's only ever one allocation alive at any point in time. In
453            // that case, when deallocating the last set before reaching `set_count`, there will be
454            // 2 references to the pool (one here and one in the allocation) and so the pool won't
455            // be returned to the reserve when deallocating. However, since there are no other
456            // allocations alive, there would be no other allocations that could return it to the
457            // reserve. To avoid dropping the pool unnecessarily, we simply continue using it. In
458            // the case where there are other references, we drop ours, at which point an
459            // allocation still holding a reference will be able to put the pool into the reserve
460            // when deallocated. If the user created a reference themself that will most certainly
461            // lead to a memory leak.
462            //
463            // TODO: This can still run into the A/B/A problem causing the pool to be dropped.
464            if Arc::strong_count(&self.pool) == 1 {
465                // SAFETY: We checked that the pool has a single strong reference above, meaning
466                // that all the allocations we gave out must have been deallocated.
467                unsafe { self.pool.reset() }?;
468
469                self.allocations = 0;
470            } else {
471                if let Some(pool) = self.reserve.pop() {
472                    // SAFETY: We checked that the pool has a single strong reference when
473                    // deallocating, meaning that all the allocations we gave out must have been
474                    // deallocated.
475                    unsafe { pool.reset() }?;
476
477                    self.pool = pool;
478                    self.allocations = 0;
479                } else {
480                    *self = VariableEntry::new(layout, create_info, self.reserve.clone())?;
481                }
482            }
483        }
484
485        let allocate_info = DescriptorSetAllocateInfo {
486            variable_descriptor_count,
487            ..DescriptorSetAllocateInfo::new(layout.clone())
488        };
489
490        let mut sets = unsafe { self.pool.allocate_descriptor_sets([allocate_info]) }.map_err(
491            |err| match err {
492                Validated::ValidationError(_) => err,
493                Validated::Error(vk_err) => match vk_err {
494                    VulkanError::OutOfHostMemory | VulkanError::OutOfDeviceMemory => err,
495                    // This can't happen as we don't free individual sets.
496                    VulkanError::FragmentedPool => unreachable!(),
497                    // We created the pool to fit the maximum variable descriptor count.
498                    VulkanError::OutOfPoolMemory => unreachable!(),
499                    // Shouldn't ever be returned.
500                    _ => unreachable!(),
501                },
502            },
503        )?;
504
505        self.allocations += 1;
506
507        Ok(DescriptorSetAlloc {
508            inner: sets.next().unwrap(),
509            pool: self.pool.clone(),
510            handle: AllocationHandle::from_ptr(Arc::into_raw(self.reserve.clone()) as _),
511        })
512    }
513}
514
515/// Parameters to create a new `StandardDescriptorSetAllocator`.
516#[derive(Clone, Debug)]
517pub struct StandardDescriptorSetAllocatorCreateInfo {
518    /// How many descriptor sets should be allocated per pool.
519    ///
520    /// Each time a thread allocates using some descriptor set layout, and either no pools were
521    /// initialized yet or all pools are full, a new pool is allocated for that thread and
522    /// descriptor set layout combination. This option tells the allocator how many descriptor sets
523    /// should be allocated for that pool. For fixed-size descriptor set layouts, it always
524    /// allocates exactly this many descriptor sets at once for the pool, as that is more
525    /// performant than allocating them one-by-one. For descriptor set layouts with a variable
526    /// descriptor count, it allocates a pool capable of holding exactly this many descriptor sets,
527    /// but doesn't allocate any descriptor sets since the variable count isn't known. What this
528    /// means is that you should make sure that this isn't too large, so that you don't end up
529    /// wasting too much memory. You also don't want this to be too low, because that on the other
530    /// hand would mean that the pool would have to be reset more often, or that more pools would
531    /// need to be created, depending on the lifetime of the descriptor sets.
532    ///
533    /// The default value is `32`.
534    pub set_count: usize,
535
536    /// Whether to allocate descriptor pools with the
537    /// [`DescriptorPoolCreateFlags::UPDATE_AFTER_BIND`] flag set.
538    ///
539    /// The default value is `false`.
540    pub update_after_bind: bool,
541
542    pub _ne: crate::NonExhaustive,
543}
544
545impl Default for StandardDescriptorSetAllocatorCreateInfo {
546    #[inline]
547    fn default() -> Self {
548        StandardDescriptorSetAllocatorCreateInfo {
549            set_count: 32,
550            update_after_bind: false,
551            _ne: crate::NonExhaustive(()),
552        }
553    }
554}
555
556mod sorted_map {
557    use smallvec::SmallVec;
558
559    /// Minimal implementation of a `SortedMap`. This outperforms both a [`BTreeMap`] and
560    /// [`HashMap`] for small numbers of elements. In Vulkan, having too many descriptor set
561    /// layouts is highly discouraged, which is why this optimization makes sense.
562    #[derive(Debug)]
563    pub(super) struct SortedMap<K, V> {
564        inner: SmallVec<[(K, V); 8]>,
565    }
566
567    impl<K, V> Default for SortedMap<K, V> {
568        fn default() -> Self {
569            Self {
570                inner: SmallVec::default(),
571            }
572        }
573    }
574
575    impl<K: Ord + Copy, V> SortedMap<K, V> {
576        pub fn get_or_try_insert<E>(
577            &mut self,
578            key: K,
579            f: impl FnOnce() -> Result<V, E>,
580        ) -> Result<&mut V, E> {
581            match self.inner.binary_search_by_key(&key, |&(k, _)| k) {
582                Ok(index) => Ok(&mut self.inner[index].1),
583                Err(index) => {
584                    self.inner.insert(index, (key, f()?));
585                    Ok(&mut self.inner[index].1)
586                }
587            }
588        }
589
590        pub fn remove(&mut self, key: K) {
591            if let Ok(index) = self.inner.binary_search_by_key(&key, |&(k, _)| k) {
592                self.inner.remove(index);
593            }
594        }
595    }
596}