use core::allocator_helper::AllocatorHelper;
use core::{self, Device};
use libc::c_void;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use std::ptr;
use std::sync::Arc;
use vks;
use {TryDestroyError, TryDestroyErrorKind, VulkanObject};
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct DeviceMemory(Arc<Inner>);
impl VulkanObject for DeviceMemory {
type NativeVulkanObject = vks::VkDeviceMemory;
#[inline]
fn as_native_vulkan_object(&self) -> Self::NativeVulkanObject {
self.handle()
}
fn try_destroy(self) -> Result<(), TryDestroyError<Self>> {
let strong_count = Arc::strong_count(&self.0);
if strong_count == 1 {
Ok(())
}
else {
Err(TryDestroyError::new(self, TryDestroyErrorKind::InUse(Some(strong_count))))
}
}
}
impl DeviceMemory {
pub(crate) fn new(handle: vks::VkDeviceMemory, device: Device, allocator: Option<AllocatorHelper>, size: u64) -> Self {
DeviceMemory(Arc::new(Inner {
handle: handle,
device: device,
allocator: allocator,
size: size,
}))
}
#[inline]
pub(crate) fn handle(&self) -> vks::VkDeviceMemory {
self.0.handle
}
#[inline]
pub(crate) fn loader(&self) -> &vks::DeviceProcAddrLoader {
self.0.device.loader()
}
#[inline]
pub(crate) fn device_handle(&self) -> vks::VkDevice {
self.0.device.handle()
}
pub fn size(&self) -> u64 {
self.0.size
}
pub fn get_commitment(&self) -> u64 {
let mut commitment = 0;
unsafe {
(self.loader().core.vkGetDeviceMemoryCommitment)(self.device_handle(), self.handle(), &mut commitment)
};
commitment
}
pub fn map(&self, offset: u64, size: core::OptionalDeviceSize, flags: core::MemoryMapFlags) -> Result<MappedMemory, core::Error> {
let mut mapped = ptr::null_mut();
let res = unsafe {
(self.loader().core.vkMapMemory)(self.device_handle(), self.handle(), offset, size.into(), flags, &mut mapped)
};
if res == vks::VK_SUCCESS {
let size = match size {
core::OptionalDeviceSize::Size(size) => size,
core::OptionalDeviceSize::WholeSize => self.0.size - offset,
};
Ok(MappedMemory {
memory: self.clone(),
mapped: mapped,
offset: offset,
size: size,
})
}
else {
Err(res.into())
}
}
pub fn flush(ranges: &[core::MappedMemoryRange]) -> Result<(), core::Error> {
let loader = ranges[0].memory.loader();
let device_handle = ranges[0].memory.device_handle();
let ranges_wrappers: Vec<_> = ranges.iter().map(|r| core::VkMappedMemoryRangeWrapper::new(r, true)).collect();
let ranges: Vec<_> = ranges_wrappers.iter().map(|r| r.vks_struct).collect();
let res = unsafe {
(loader.core.vkFlushMappedMemoryRanges)(device_handle, ranges.len() as u32, ranges.as_ptr())
};
if res == vks::VK_SUCCESS {
Ok(())
}
else {
Err(res.into())
}
}
pub fn invalidate(ranges: &[core::MappedMemoryRange]) -> Result<(), core::Error> {
let loader = ranges[0].memory.loader();
let device_handle = ranges[0].memory.device_handle();
let ranges_wrappers: Vec<_> = ranges.iter().map(|r| core::VkMappedMemoryRangeWrapper::new(r, true)).collect();
let ranges: Vec<_> = ranges_wrappers.iter().map(|r| r.vks_struct).collect();
let res = unsafe {
(loader.core.vkInvalidateMappedMemoryRanges)(device_handle, ranges.len() as u32, ranges.as_ptr())
};
if res == vks::VK_SUCCESS {
Ok(())
}
else {
Err(res.into())
}
}
}
#[derive(Debug)]
pub struct MappedMemory {
memory: DeviceMemory,
mapped: *mut c_void,
offset: u64,
size: u64,
}
impl Drop for MappedMemory {
fn drop(&mut self) {
unsafe {
(self.memory.loader().core.vkUnmapMemory)(self.memory.device_handle(), self.memory.handle());
}
}
}
impl MappedMemory {
pub fn as_ptr(&self) -> *mut c_void {
self.mapped
}
pub fn offset(&self) -> u64 {
self.offset
}
pub fn size(&self) -> u64 {
self.size
}
pub fn flush(&self, chain: &Option<core::MappedMemoryRangeChain>) -> Result<(), core::Error> {
let (pnext, _) = core::MappedMemoryRangeChainWrapper::new_optional(chain, true);
let range = vks::VkMappedMemoryRange {
sType: vks::VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
pNext: pnext,
memory: self.memory.handle(),
offset: self.offset,
size: vks::VK_WHOLE_SIZE,
};
let res = unsafe {
(self.memory.loader().core.vkFlushMappedMemoryRanges)(self.memory.device_handle(), 1, &range)
};
if res == vks::VK_SUCCESS {
Ok(())
}
else {
Err(res.into())
}
}
pub fn invalidate(&self, chain: &Option<core::MappedMemoryRangeChain>) -> Result<(), core::Error> {
let (pnext, _) = core::MappedMemoryRangeChainWrapper::new_optional(chain, true);
let range = vks::VkMappedMemoryRange {
sType: vks::VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
pNext: pnext,
memory: self.memory.handle(),
offset: self.offset,
size: vks::VK_WHOLE_SIZE,
};
let res = unsafe {
(self.memory.loader().core.vkInvalidateMappedMemoryRanges)(self.memory.device_handle(), 1, &range)
};
if res == vks::VK_SUCCESS {
Ok(())
}
else {
Err(res.into())
}
}
}
#[derive(Debug)]
struct Inner {
handle: vks::VkDeviceMemory,
device: Device,
allocator: Option<AllocatorHelper>,
size: u64,
}
impl Drop for Inner {
fn drop(&mut self) {
let allocator = match self.allocator {
Some(ref allocator) => allocator.callbacks(),
None => ptr::null(),
};
unsafe {
(self.device.loader().core.vkFreeMemory)(self.device.handle(), self.handle, allocator);
}
}
}
unsafe impl Send for Inner { }
unsafe impl Sync for Inner { }
impl PartialEq for Inner {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.handle == other.handle
}
}
impl Eq for Inner { }
impl PartialOrd for Inner {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.handle.partial_cmp(&other.handle)
}
}
impl Ord for Inner {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.handle.cmp(&other.handle)
}
}
impl Hash for Inner {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.handle.hash(state);
}
}