use core::allocator_helper::AllocatorHelper;
use core::{self, Device};
use libc::c_void;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use std::mem;
use std::ptr;
use std::sync::Arc;
use vks;
use {TryDestroyError, TryDestroyErrorKind, VulkanObject};
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct QueryPool(Arc<Inner>);
impl VulkanObject for QueryPool {
type NativeVulkanObject = vks::VkQueryPool;
#[inline]
fn as_native_vulkan_object(&self) -> Self::NativeVulkanObject {
self.handle()
}
fn try_destroy(self) -> Result<(), TryDestroyError<Self>> {
let strong_count = Arc::strong_count(&self.0);
if strong_count == 1 {
Ok(())
}
else {
Err(TryDestroyError::new(self, TryDestroyErrorKind::InUse(Some(strong_count))))
}
}
}
impl QueryPool {
pub(crate) fn new(handle: vks::VkQueryPool, device: Device, allocator: Option<AllocatorHelper>) -> Self {
QueryPool(Arc::new(Inner {
handle: handle,
device: device,
allocator: allocator,
}))
}
#[inline]
pub(crate) fn handle(&self) -> vks::VkQueryPool {
self.0.handle
}
#[inline]
pub(crate) fn loader(&self) -> &vks::DeviceProcAddrLoader {
self.0.device.loader()
}
#[inline]
pub(crate) fn device_handle(&self) -> vks::VkDevice {
self.0.device.handle()
}
pub fn get_results(&self, first_query: u32, query_count: u32, stride: usize, flags: core::QueryResultFlags, results: &mut [core::QueryResult]) -> Result<bool, core::Error> {
if flags.contains(core::QUERY_RESULT_64_BIT) {
let mut data: Vec<u64> = Vec::with_capacity(results.len());
let data_size = results.len() * mem::size_of::<u64>();
let stride_u64 = (stride * mem::size_of::<u64>()) as u64;
let res = unsafe {
data.set_len(results.len());
(self.loader().core.vkGetQueryPoolResults)(self.device_handle(), self.handle(), first_query, query_count, data_size, data.as_mut_ptr() as *mut c_void, stride_u64, flags)
};
match res {
vks::VK_SUCCESS => {
for (&src, dst) in data.iter().zip(results.iter_mut()) {
*dst = core::QueryResult::U64(src);
}
Ok(true)
}
vks::VK_NOT_READY => Ok(false),
_ => Err(res.into()),
}
}
else {
let mut data: Vec<u32> = Vec::with_capacity(results.len());
let data_size = results.len() * mem::size_of::<u32>();
let stride_u32 = (stride * mem::size_of::<u32>()) as u64;
let res = unsafe {
data.set_len(results.len());
(self.loader().core.vkGetQueryPoolResults)(self.device_handle(), self.handle(), first_query, query_count, data_size, data.as_mut_ptr() as *mut c_void, stride_u32, flags)
};
match res {
vks::VK_SUCCESS => {
for (&src, dst) in data.iter().zip(results.iter_mut()) {
*dst = core::QueryResult::U32(src);
}
Ok(true)
}
vks::VK_NOT_READY => Ok(false),
_ => Err(res.into()),
}
}
}
}
#[derive(Debug)]
struct Inner {
handle: vks::VkQueryPool,
device: Device,
allocator: Option<AllocatorHelper>,
}
impl Drop for Inner {
fn drop(&mut self) {
let allocator = match self.allocator {
Some(ref allocator) => allocator.callbacks(),
None => ptr::null(),
};
unsafe {
(self.device.loader().core.vkDestroyQueryPool)(self.device.handle(), self.handle, allocator);
}
}
}
unsafe impl Send for Inner { }
unsafe impl Sync for Inner { }
impl PartialEq for Inner {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.handle == other.handle
}
}
impl Eq for Inner { }
impl PartialOrd for Inner {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.handle.partial_cmp(&other.handle)
}
}
impl Ord for Inner {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.handle.cmp(&other.handle)
}
}
impl Hash for Inner {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.handle.hash(state);
}
}