1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
use super::DescriptorSetAllocator;
use crossbeam_channel::{Receiver, Sender};
use rafx_api::{RafxDeviceContext, RafxResult};
use std::collections::VecDeque;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Mutex};

// This holds the allocator and the frame on which it was "borrowed" from the allocator manager
struct DescriptorSetAllocatorRefInner {
    allocator: Box<DescriptorSetAllocator>,
    checkout_frame: u64,
}

// A borrowed allocator that returns itself when it is dropped. It is expected that these borrows
// are short (i.e. within a single frame). Holding an allocator over multiple frames can delay
// releasing descriptor sets that have been dropped.
pub struct DescriptorSetAllocatorRef {
    // This should never be None. We always allocate this to a non-none value and we don't clear
    // it until the drop handler
    allocator: Option<DescriptorSetAllocatorRefInner>,
    drop_tx: Sender<DescriptorSetAllocatorRefInner>,
}

impl DescriptorSetAllocatorRef {
    fn new(
        allocator: DescriptorSetAllocatorRefInner,
        drop_tx: Sender<DescriptorSetAllocatorRefInner>,
    ) -> Self {
        DescriptorSetAllocatorRef {
            allocator: Some(allocator),
            drop_tx,
        }
    }
}

impl Deref for DescriptorSetAllocatorRef {
    type Target = DescriptorSetAllocator;

    fn deref(&self) -> &Self::Target {
        &self.allocator.as_ref().unwrap().allocator
    }
}

impl DerefMut for DescriptorSetAllocatorRef {
    fn deref_mut(&mut self) -> &mut Self::Target {
        &mut self.allocator.as_mut().unwrap().allocator
    }
}

impl Drop for DescriptorSetAllocatorRef {
    fn drop(&mut self) {
        let mut allocator = self.allocator.take().unwrap();
        allocator.allocator.flush_changes().unwrap();
        self.drop_tx.send(allocator).unwrap();
    }
}

// A pool of descriptor set allocators. The allocators themselves contain pools for descriptor set
// layouts.
pub struct DescriptorSetAllocatorManagerInner {
    device_context: RafxDeviceContext,
    allocators: Mutex<VecDeque<Box<DescriptorSetAllocator>>>,
    drop_tx: Sender<DescriptorSetAllocatorRefInner>,
    drop_rx: Receiver<DescriptorSetAllocatorRefInner>,
    frame_index: AtomicU64,
}

impl DescriptorSetAllocatorManagerInner {
    fn new(device_context: RafxDeviceContext) -> Self {
        let (drop_tx, drop_rx) = crossbeam_channel::unbounded();

        DescriptorSetAllocatorManagerInner {
            device_context,
            allocators: Default::default(),
            drop_tx,
            drop_rx,
            frame_index: AtomicU64::new(0),
        }
    }

    // Internally used to pull any dropped allocators back into the pool. If on_frame_complete
    // was called on the manager since it was borrowed, call it on the allocator. This lets us
    // drain any drops and schedule them for deletion after MAX_FRAMES_IN_FLIGHT passes. We only
    // call it once, even if several frames have passed, because we have no way of knowing if
    // descriptors were dropped recently or multiple frames ago.
    fn drain_drop_rx(
        drop_rx: &Receiver<DescriptorSetAllocatorRefInner>,
        allocators: &mut VecDeque<Box<DescriptorSetAllocator>>,
        frame_index: u64,
    ) {
        for mut allocator in drop_rx.try_iter() {
            if allocator.checkout_frame < frame_index {
                allocator.allocator.on_frame_complete();
            }

            if frame_index - allocator.checkout_frame > 1 {
                // Holding DescriptorSetAllocatorRefs for more than a frame will delay releasing
                // unused descriptors
                log::warn!("A DescriptorSetAllocatorRef was held for more than one frame.");
            }

            allocators.push_back(allocator.allocator);
        }
    }

    pub fn get_allocator(&self) -> DescriptorSetAllocatorRef {
        let frame_index = self.frame_index.load(Ordering::Relaxed);
        let allocator = {
            let mut allocators = self.allocators.lock().unwrap();
            Self::drain_drop_rx(&self.drop_rx, &mut *allocators, frame_index);

            allocators
                .pop_front()
                .map(|allocator| DescriptorSetAllocatorRefInner {
                    allocator,
                    checkout_frame: frame_index,
                })
        };

        let allocator = allocator.unwrap_or_else(|| {
            let allocator = Box::new(DescriptorSetAllocator::new(&self.device_context));

            DescriptorSetAllocatorRefInner {
                allocator,
                checkout_frame: frame_index,
            }
        });

        DescriptorSetAllocatorRef::new(allocator, self.drop_tx.clone())
    }

    pub fn on_frame_complete(&self) {
        let frame_index = self.frame_index.fetch_add(1, Ordering::Relaxed);
        let mut allocators = self.allocators.lock().unwrap();

        Self::drain_drop_rx(&self.drop_rx, &mut *allocators, frame_index);

        for allocator in allocators.iter_mut() {
            allocator.on_frame_complete();
        }
    }

    fn destroy(&self) -> RafxResult<()> {
        let frame_index = self.frame_index.load(Ordering::Relaxed);
        let mut allocators = self.allocators.lock().unwrap();

        Self::drain_drop_rx(&self.drop_rx, &mut *allocators, frame_index);

        for mut allocator in allocators.drain(..).into_iter() {
            allocator.destroy()?;
        }

        Ok(())
    }
}

pub struct DescriptorSetAllocatorProvider {
    inner: Arc<DescriptorSetAllocatorManagerInner>,
}

impl DescriptorSetAllocatorProvider {
    pub fn get_allocator(&self) -> DescriptorSetAllocatorRef {
        self.inner.get_allocator()
    }
}

pub struct DescriptorSetAllocatorManager {
    inner: Arc<DescriptorSetAllocatorManagerInner>,
}

impl DescriptorSetAllocatorManager {
    pub fn new(device_context: &RafxDeviceContext) -> Self {
        DescriptorSetAllocatorManager {
            inner: Arc::new(DescriptorSetAllocatorManagerInner::new(
                device_context.clone(),
            )),
        }
    }

    pub fn create_allocator_provider(&self) -> DescriptorSetAllocatorProvider {
        DescriptorSetAllocatorProvider {
            inner: self.inner.clone(),
        }
    }

    pub fn get_allocator(&self) -> DescriptorSetAllocatorRef {
        self.inner.get_allocator()
    }

    #[profiling::function]
    pub fn on_frame_complete(&self) {
        self.inner.on_frame_complete();
    }

    pub fn destroy(&mut self) -> RafxResult<()> {
        self.inner.destroy()
    }
}