1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
use super::internal::VkQueue;
use crate::vulkan::{
    RafxCommandBufferVulkan, RafxCommandPoolVulkan, RafxDeviceContextVulkan, RafxFenceVulkan,
    RafxSemaphoreVulkan, RafxSwapchainVulkan,
};
use crate::{RafxCommandPoolDef, RafxError, RafxPresentSuccessResult, RafxQueueType, RafxResult};
use ash::version::DeviceV1_0;
use ash::vk;

#[derive(Clone, Debug)]
pub struct RafxQueueVulkan {
    device_context: RafxDeviceContextVulkan,
    queue: VkQueue,
    queue_type: RafxQueueType,
}

impl RafxQueueVulkan {
    pub fn queue_id(&self) -> u32 {
        (self.queue.queue_family_index() << 16) | (self.queue.queue_index())
    }

    pub fn queue(&self) -> &VkQueue {
        &self.queue
    }

    pub fn queue_type(&self) -> RafxQueueType {
        self.queue_type
    }

    pub fn device_context(&self) -> &RafxDeviceContextVulkan {
        &self.device_context
    }

    pub fn create_command_pool(
        &self,
        command_pool_def: &RafxCommandPoolDef,
    ) -> RafxResult<RafxCommandPoolVulkan> {
        RafxCommandPoolVulkan::new(&self, command_pool_def)
    }

    pub fn new(
        device_context: &RafxDeviceContextVulkan,
        queue_type: RafxQueueType,
    ) -> RafxResult<RafxQueueVulkan> {
        let queue = match queue_type {
            RafxQueueType::Graphics => device_context
                .queue_allocator()
                .allocate_graphics_queue(&device_context),
            RafxQueueType::Compute => device_context
                .queue_allocator()
                .allocate_compute_queue(&device_context),
            RafxQueueType::Transfer => device_context
                .queue_allocator()
                .allocate_transfer_queue(&device_context),
        }
        .ok_or_else(|| format!("All queues of type {:?} already allocated", queue_type))?;

        Ok(RafxQueueVulkan {
            device_context: device_context.clone(),
            queue,
            queue_type,
        })
    }

    pub fn wait_for_queue_idle(&self) -> RafxResult<()> {
        let queue = self.queue.queue().lock().unwrap();
        unsafe {
            self.queue
                .device_context()
                .device()
                .queue_wait_idle(*queue)?;
        }

        Ok(())
    }

    pub fn submit(
        &self,
        command_buffers: &[&RafxCommandBufferVulkan],
        wait_semaphores: &[&RafxSemaphoreVulkan],
        signal_semaphores: &[&RafxSemaphoreVulkan],
        signal_fence: Option<&RafxFenceVulkan>,
    ) -> RafxResult<()> {
        let mut command_buffer_list = Vec::with_capacity(command_buffers.len());
        for command_buffer in command_buffers {
            command_buffer_list.push(command_buffer.vk_command_buffer());
        }

        let mut wait_semaphore_list = Vec::with_capacity(wait_semaphores.len());
        let mut wait_dst_stage_mask = Vec::with_capacity(wait_semaphores.len());
        for wait_semaphore in wait_semaphores {
            // Don't wait on a semaphore that will never signal
            //TODO: Assert or fail here?
            if wait_semaphore.signal_available() {
                wait_semaphore_list.push(wait_semaphore.vk_semaphore());
                wait_dst_stage_mask.push(vk::PipelineStageFlags::ALL_COMMANDS);

                wait_semaphore.set_signal_available(false);
            }
        }

        let mut signal_semaphore_list = Vec::with_capacity(signal_semaphores.len());
        for signal_semaphore in signal_semaphores {
            // Don't signal a semaphore if something is already going to signal it
            //TODO: Assert or fail here?
            if !signal_semaphore.signal_available() {
                signal_semaphore_list.push(signal_semaphore.vk_semaphore());
                signal_semaphore.set_signal_available(true);
            }
        }

        let submit_info = vk::SubmitInfo::builder()
            .wait_semaphores(&wait_semaphore_list)
            .wait_dst_stage_mask(&wait_dst_stage_mask)
            .signal_semaphores(&signal_semaphore_list)
            .command_buffers(&command_buffer_list);

        let fence = signal_fence
            .map(|x| x.vk_fence())
            .unwrap_or(vk::Fence::null());
        unsafe {
            let queue = self.queue.queue().lock().unwrap();
            log::trace!(
                "submit {} command buffers to queue {:?}",
                command_buffer_list.len(),
                *queue
            );
            self.queue
                .device_context()
                .device()
                .queue_submit(*queue, &[*submit_info], fence)?;
        }

        if let Some(signal_fence) = signal_fence {
            signal_fence.set_submitted(true);
        }

        Ok(())
    }

    pub fn present(
        &self,
        swapchain: &RafxSwapchainVulkan,
        wait_semaphores: &[&RafxSemaphoreVulkan],
        image_index: u32,
    ) -> RafxResult<RafxPresentSuccessResult> {
        let mut wait_semaphore_list = Vec::with_capacity(wait_semaphores.len());
        for wait_semaphore in wait_semaphores {
            if wait_semaphore.signal_available() {
                wait_semaphore_list.push(wait_semaphore.vk_semaphore());
                wait_semaphore.set_signal_available(false);
            }
        }

        let swapchains = [swapchain.vk_swapchain()];
        let image_indices = [image_index];
        let present_info = vk::PresentInfoKHR::builder()
            .wait_semaphores(&wait_semaphore_list)
            .swapchains(&swapchains)
            .image_indices(&image_indices);

        //TODO: PresentInfoKHRBuilder::results() is only useful for presenting multiple swapchains -
        // presumably that's for multiwindow cases.

        let result = self.present_to_given_or_dedicated_queue(swapchain, &*present_info);

        match result {
            Ok(is_suboptimial) => {
                if is_suboptimial {
                    Ok(RafxPresentSuccessResult::SuccessSuboptimal)
                } else {
                    Ok(RafxPresentSuccessResult::Success)
                }
            }
            Err(e) => match e {
                RafxError::VkError(vk::Result::ERROR_OUT_OF_DATE_KHR) => {
                    Ok(RafxPresentSuccessResult::DeviceReset)
                }
                e @ _ => Err(e),
            },
        }
    }

    // Make sure we always use the dedicated queue if it exists
    fn present_to_given_or_dedicated_queue(
        &self,
        swapchain: &RafxSwapchainVulkan,
        present_info: &vk::PresentInfoKHR,
    ) -> RafxResult<bool> {
        let is_suboptimal =
            if let Some(dedicated_present_queue) = swapchain.dedicated_present_queue() {
                // Because of the way we search for present-compatible queues, we don't necessarily have
                // the same underlying mutex in all instances of a dedicated present queue. So fallback
                // to a single global lock
                let _dedicated_present_lock = self
                    .device_context
                    .dedicated_present_queue_lock()
                    .lock()
                    .unwrap();
                unsafe {
                    log::trace!(
                        "present to dedicated present queue {:?}",
                        dedicated_present_queue
                    );
                    swapchain
                        .vk_swapchain_loader()
                        .queue_present(dedicated_present_queue, present_info)?
                }
            } else {
                let queue = self.queue.queue().lock().unwrap();
                log::trace!("present to dedicated present queue {:?}", *queue);
                unsafe {
                    swapchain
                        .vk_swapchain_loader()
                        .queue_present(*queue, &present_info)?
                }
            };

        Ok(is_suboptimal)
    }
}