ash-tray 0.19.0

A Tray to host Ash with Winit
//    Copyright 2019 Michael Mestnik

//    Licensed under the Apache License, Version 2.0 (the "License");
//    you may not use this file except in compliance with the License.
//    You may obtain a copy of the License at

//        http://www.apache.org/licenses/LICENSE-2.0

//    Unless required by applicable law or agreed to in writing, software
//    distributed under the License is distributed on an "AS IS" BASIS,
//    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//    See the License for the specific language governing permissions and
//    limitations under the License.

//! # Helpers for calling destroy() on ash objects
//!
//! In alphabetical order
//!

use std::cell::{Cell, RefCell, RefMut};
use std::collections::HashMap;
use std::convert::TryInto;
use std::mem::ManuallyDrop;
use std::ops::Deref;
use std::rc::Rc;
use std::slice;

use crate::vk_mem::{
    Allocation, AllocationCreateInfo, AllocationInfo, Allocator as VkMemAllocator,
    AllocatorCreateInfo,
};
use crate::winit::window::Window;
use ash::extensions::khr::{AccelerationStructure, RayTracingPipeline, Surface, Swapchain};
use ash::{vk, Device as VkDevice, Entry, Instance as VkInstance, LoadingError};
use derivative::*;
use snafu::{ResultExt, Snafu};

/// In the order used
#[derive(Debug, Snafu)]
pub enum Error {
    #[snafu(display("vk::create_acceleration_structure() error: {}", source))]
    VkCreateAccelerationStructure { source: vk::Result },
    #[snafu(display(
        "vk_mem::create_acceleration_structure_scratch_buffer() error: {}",
        source
    ))]
    VkMemCreateAccelerationStructureScratchBuffer { source: vk::Result },
    #[snafu(display("vk_mem::Allocator creation error: {}", source))]
    VkMemAllocatorNew { source: vk::Result },
    #[snafu(display("vk_mem::create_buffer() error: {}", source))]
    VkMemCreateBuffer { source: vk::Result },
    #[snafu(display("vk_mem::get_allocation_info() error: {}", source))]
    VkMemGetAllocationInfo { source: vk::Result },
    #[snafu(display("bad size and offset"))]
    GoodSizeAndOffset,
    #[snafu(display("vk_mem::flush_allocation() error: {}", source))]
    VkMemFlushAllocation { source: vk::Result },
    #[snafu(display("vk::allocate_command_buffers() error: {}", source))]
    VkAllocateCommandBuffers { source: vk::Result },
    #[snafu(display("Current pipeline layout as ref none."))]
    CurrentPipelineLayoutAsRef,
    #[snafu(display("Scratch data.acceleration structure as ref none."))]
    ScratchDataAccelerationStructure,
    #[snafu(display("Semaphore type Timeline used when Binary expected."))]
    WrongSemaphoreType,
    #[snafu(display("vk::queue_submit() error: {}", source))]
    VkQueueSubmit { source: vk::Result },
    #[snafu(display("vk::wait_for_fences() error: {}", source))]
    VkWaitForFences { source: vk::Result },
    #[snafu(display("vk::wait_semaphores() error: {}", source))]
    VkWaitSemaphores { source: vk::Result },
    #[snafu(display("vk::reset_fences() error: {}", source))]
    VkResetFences { source: vk::Result },
    #[snafu(display("vk::create_command_pool() error: {}", source))]
    VkCreateCommandPool { source: vk::Result },
    #[snafu(display("vk::create_descriptor_pool() error: {}", source))]
    VkCreateDescriptorPool { source: vk::Result },
    #[snafu(display("vk::allocate_descriptor_sets() error: {}", source))]
    VkAllocateDescriptorSets { source: vk::Result },
    #[snafu(display("Descriptor sets empty."))]
    DescriptorSetsEmpty,
    #[snafu(display("Descriptor count not found."))]
    DescriptorCountNotFound,
    #[snafu(display("vk::create_descriptor_set_layout() error: {}", source))]
    VkCreateDescriptorSetLayout { source: vk::Result },
    #[snafu(display("vk::create_device() error: {}", source))]
    VkCreateDevice { source: vk::Result },
    #[snafu(display("vk::create_fence() error: {}", source))]
    VkCreateFence { source: vk::Result },
    #[snafu(display("vk::create_framebuffer() error: {}", source))]
    VkCreateFramebuffer { source: vk::Result },
    #[snafu(display("vk_mem::create_image() error: {}", source))]
    VkMemCreateImage { source: vk::Result },
    #[snafu(display("vk::create_image_view() error: {}", source))]
    VkCreateImageView { source: vk::Result },
    #[snafu(display("ash::Entry::new() error: {}", source))]
    AshLibLoading { source: LoadingError },
    #[snafu(display("vk::create_instance() error: {}", source))]
    VkCreateInstance { source: vk::Result },
    #[snafu(display("Shader didn't have stage."))]
    HasShaderStage,
    #[snafu(display("vk::create_graphics_pipelines() error: {}", source))]
    VkCreateGraphicsPipelines { source: vk::Result },
    #[snafu(display("vk::create_pipeline_layout() error: {}", source))]
    VkCreatePipelineLayout { source: vk::Result },
    #[snafu(display("vk::create_ray_tracing_pipelines() error: {}", source))]
    VkCreateRayTracingPipelines { source: vk::Result },
    #[snafu(display("vk::create_render_pass() error: {}", source))]
    VkCreateRenderPass { source: vk::Result },
    #[snafu(display("vk::create_sampler() error: {}", source))]
    VkCreateSampler { source: vk::Result },
    #[snafu(display("vk::create_semaphore() error: {}", source))]
    VkCreateSemaphore { source: vk::Result },
    #[snafu(display("vk::create_shader_module() error: {}", source))]
    VkCreateShaderModule { source: vk::Result },
    #[snafu(display("vk::create_surface() error: {}", source))]
    VkCreateSurface { source: vk::Result },
    #[snafu(display("vk::enumerate_physical_devices() error: {}", source))]
    VkEnumeratePhysicalDevices { source: vk::Result },
    #[snafu(display("vk::enumerate_physical_devices() none sutable"))]
    VkEnumeratePhysicalDevicesNoneSuitable,
    #[snafu(display("vk::get_physical_device_surface_formats() error: {}", source))]
    VkGetPhysicalDeviceSurfaceFormats { source: vk::Result },
    #[snafu(display("vk::get_physical_device_surface_formats() none sutable"))]
    VkGetPhysicalDeviceSurfaceFormatsNoneSuitable,
    #[snafu(display("vk::create_swapchain() error: {}", source))]
    VkCreateSwapchain { source: vk::Result },
    #[snafu(display("vk::get_swapchain_images() error: {}", source))]
    VkGetSwapchainImages { source: vk::Result },
    #[snafu(display("Ran out of sync objects"))]
    RanOutSyncObjects,
    #[snafu(display("vk::acquire_next_images() error: {}", source))]
    VkAcquireNextImages { source: vk::Result },
    #[snafu(display("vk::queue_present() error: {}", source))]
    VkQueuePresent { source: vk::Result },
}

pub type Result<T> = std::result::Result<T, Error>;

impl<T> AccelerationStructureKHR<T> {
    pub fn new(
        create_info: vk::AccelerationStructureCreateInfoKHRBuilder,
        buffer: Buffer<T>,
        user: T,
    ) -> Result<Self> {
        let allocation_info = unsafe {
            buffer
                .allocator
                .borrow()
                .get_allocation_info(&buffer.allocation)
        }
        .unwrap();
        let create_info = create_info
            .buffer(**buffer)
            .offset(allocation_info.offset.try_into().unwrap())
            .size(allocation_info.size.try_into().unwrap());
        Ok(Self(Rc::new(RcAccelerationStructureKHR {
            inner: unsafe {
                buffer
                    .allocator
                    .device
                    .acceleration_structure_fn
                    .create_acceleration_structure(&create_info, None)
            }
            .context(VkCreateAccelerationStructureSnafu {})?,
            buffer,
            device_address: Default::default(),
            user,
        })))
    }

    pub fn get_device_address(&self) -> vk::DeviceAddress {
        let as_ = ***self;
        *self.device_address.borrow_mut().get_or_insert_with(|| {
            let dev_addr_info = &vk::AccelerationStructureDeviceAddressInfoKHR::builder()
                .acceleration_structure(as_);
            unsafe {
                self.buffer
                    .allocator
                    .device
                    .acceleration_structure_fn
                    .get_acceleration_structure_device_address(dev_addr_info)
            }
        })
    }
}

pub struct RcAccelerationStructureKHR<T> {
    pub inner: vk::AccelerationStructureKHR,
    pub buffer: Buffer<T>,
    pub device_address: RefCell<Option<vk::DeviceAddress>>,
    pub user: T,
}

impl<T> Drop for RcAccelerationStructureKHR<T> {
    fn drop(&mut self) {
        unsafe {
            self.buffer
                .allocator
                .device
                .acceleration_structure_fn
                .destroy_acceleration_structure(**self, None)
        };
    }
}

impl<T> Allocator<T> {
    pub fn new(
        instance: Instance<T>,
        physical_device: vk::PhysicalDevice,
        device: Device<T>,
        mut create_info: AllocatorCreateInfo<RcInstance<T>, RcDevice<T>>,
        user: T,
    ) -> Result<Self> {
        create_info.instance = instance.inner.clone();
        create_info.physical_device = physical_device;
        create_info.device = device.0.clone();
        Ok(Self(Rc::new(RcAllocator {
            inner: Rc::new(RefCell::new(
                VkMemAllocator::new(create_info).context(VkMemAllocatorNewSnafu {})?,
            )),
            physical_device,
            device,
            user,
        })))
    }
}

pub struct RcAllocator<T> {
    pub inner: Rc<RefCell<VkMemAllocator>>,
    pub physical_device: vk::PhysicalDevice,
    pub device: Device<T>,
    pub user: T,
}

impl<T> Deref for RcAllocator<T> {
    type Target = RefCell<VkMemAllocator>;

    fn deref(&self) -> &Self::Target {
        &self.inner
    }
}

impl<T> Buffer<T> {
    pub fn new(
        allocator: Allocator<T>,
        create_info: &vk::BufferCreateInfo,
        allocation_create_info: &AllocationCreateInfo,
        user: T,
    ) -> Result<Self> {
        use crate::vk_mem::Alloc;
        let (inner, allocation) = {
            let allocator_mut: RefMut<VkMemAllocator> = allocator.borrow_mut();
            unsafe { allocator_mut.create_buffer(create_info, allocation_create_info) }
                .context(VkMemCreateBufferSnafu {})?
        };
        Ok(Self(Rc::new(RcBuffer {
            inner,
            allocator,
            allocation: ManuallyDrop::new(allocation),
            acceleration_structure: None,
            device_address: Default::default(),
            user,
        })))
    }

    pub fn get_device_address(&self) -> vk::DeviceOrHostAddressConstKHR {
        let buffer = ***self;
        let device = &self.allocator.device;
        *self.device_address.borrow_mut().get_or_insert_with(|| {
            let mut ret = vk::DeviceOrHostAddressConstKHR::default();
            let buffer_address_info = &vk::BufferDeviceAddressInfoKHR::builder().buffer(buffer);
            let buffer_handle = unsafe { device.get_buffer_device_address(buffer_address_info) };
            ret.device_address = buffer_handle;
            ret
        })
    }

    pub fn flush(&self, offset: usize, size: usize) -> Result<()> {
        let allocatior_mut = self.allocator.borrow_mut();
        let max_size = unsafe { allocatior_mut.get_allocation_info(&self.allocation) }
            .context(VkMemGetAllocationInfoSnafu {})?
            .size as usize
            - offset;
        allocatior_mut
            .flush_allocation(
                &self.allocation,
                offset,
                std::num::NonZeroUsize::new(size)
                    .or_else(|| std::num::NonZeroUsize::new(max_size))
                    .ok_or(Error::GoodSizeAndOffset)?
                    .get(),
            )
            .context(VkMemFlushAllocationSnafu)
    }
}

pub struct RcBuffer<T> {
    pub inner: vk::Buffer,
    pub allocator: Allocator<T>,
    pub allocation: std::mem::ManuallyDrop<Allocation>,
    pub acceleration_structure: Option<(AccelerationStructureKHR<T>, vk::DeviceAddress)>,
    pub device_address: RefCell<Option<vk::DeviceOrHostAddressConstKHR>>,
    pub user: T,
}

impl<T> Drop for RcBuffer<T> {
    fn drop(&mut self) {
        let allocator_mut: RefMut<VkMemAllocator> = self.allocator.borrow_mut();
        unsafe { allocator_mut.destroy_buffer(**self, ManuallyDrop::take(&mut self.allocation)) };
    }
}

#[derive(Derivative)]
#[derivative(Clone(bound = ""))]
pub enum CommandBufferContents<T> {
    AccelerationStructure(AccelerationStructureKHR<T>),
    Buffer(Buffer<T>),
    DescriptorSet(DescriptorSet<T>),
    Image(Image<T>),
    Framebuffer(Framebuffer<T>),
    Pipeline(Pipeline<T>),
}

pub enum CommandBufferFence<T> {
    Fence(Fence<T>),
    TimelineSemaphore(TimelineSemaphore<T>, Cell<u64>),
}

impl<T> CommandBuffer<T> {
    pub fn new(
        command_pool: CommandPool<T>,
        fence: CommandBufferFence<T>,
        create_info: vk::CommandBufferAllocateInfoBuilder,
        user: T,
    ) -> Result<Self> {
        let create_info = create_info
            .command_pool(**command_pool)
            .command_buffer_count(1);
        Ok(Self(Rc::new(RcCommandBuffer {
            inner: unsafe { command_pool.device.allocate_command_buffers(&create_info) }
                .context(VkAllocateCommandBuffersSnafu {})?[0],
            command_pool,
            contents: Default::default(),
            current_pipeline_layout: Default::default(),
            fence,
            queue: Default::default(),
            wait_timeline_semaphores: Default::default(),
            wait_binary_semaphores: Default::default(),
            signal_timeline_semaphores: Default::default(),
            signal_binary_semaphores: Default::default(),
            user,
        })))
    }

    pub fn pipeline_barrier(
        &self,
        src_stage_mask: vk::PipelineStageFlags,
        dst_stage_mask: vk::PipelineStageFlags,
        dependency_flags: vk::DependencyFlags,
        memory_barriers: &[vk::MemoryBarrier],
        buffer_memory_barriers: Vec<(Buffer<T>, vk::BufferMemoryBarrierBuilder)>,
        image_memory_barriers: Vec<(Image<T>, vk::ImageMemoryBarrierBuilder)>,
    ) {
        let (mut buffers, buffer_memory_barriers): (Vec<_>, Vec<_>) = buffer_memory_barriers
            .into_iter()
            .map(|(buffer, barrier)| {
                (
                    CommandBufferContents::Buffer(buffer.clone()),
                    *barrier.buffer(**buffer),
                )
            })
            .unzip();
        let (mut images, image_memory_barriers): (Vec<_>, Vec<_>) = image_memory_barriers
            .into_iter()
            .map(|(image, barrier)| {
                (
                    CommandBufferContents::Image(image.clone()),
                    *barrier.image(**image),
                )
            })
            .unzip();
        unsafe {
            self.command_pool.device.cmd_pipeline_barrier(
                ***self,
                src_stage_mask,
                dst_stage_mask,
                dependency_flags,
                memory_barriers,
                &buffer_memory_barriers[..],
                &image_memory_barriers[..],
            )
        };
        self.contents.borrow_mut().append(&mut buffers);
        self.contents.borrow_mut().append(&mut images);
    }

    pub fn copy_buffer_to_image(
        &self,
        src_buffer: Buffer<T>,
        dst_image: Image<T>,
        dst_image_layout: vk::ImageLayout,
        regions: &[vk::BufferImageCopy],
    ) {
        unsafe {
            self.command_pool.device.cmd_copy_buffer_to_image(
                ***self,
                **src_buffer,
                **dst_image,
                dst_image_layout,
                regions,
            )
        };
        self.contents
            .borrow_mut()
            .push(CommandBufferContents::Buffer(src_buffer));
        self.contents
            .borrow_mut()
            .push(CommandBufferContents::Image(dst_image));
    }

    pub fn copy_image(
        &self,
        src_image: Image<T>,
        src_image_layout: vk::ImageLayout,
        dst_image: Image<T>,
        dst_image_layout: vk::ImageLayout,
        regions: &[vk::ImageCopy],
    ) {
        unsafe {
            self.command_pool.device.cmd_copy_image(
                ***self,
                **src_image,
                src_image_layout,
                **dst_image,
                dst_image_layout,
                regions,
            )
        };
        self.contents
            .borrow_mut()
            .push(CommandBufferContents::Image(src_image));
        self.contents
            .borrow_mut()
            .push(CommandBufferContents::Image(dst_image));
    }

    pub fn begin_render_pass(
        &self,
        framebuffer: Framebuffer<T>,
        begin_info: vk::RenderPassBeginInfoBuilder,
    ) {
        let begin_info = begin_info
            .framebuffer(**framebuffer)
            .render_pass(**framebuffer.render_pass);
        unsafe {
            self.command_pool.device.cmd_begin_render_pass(
                ***self,
                &begin_info,
                vk::SubpassContents::INLINE,
            );
        }
        self.contents
            .borrow_mut()
            .push(CommandBufferContents::Framebuffer(framebuffer));
    }

    pub fn bind_vertex_buffers(
        &self,
        first_binding: u32,
        buffers: Vec<Buffer<T>>,
        offsets: &[vk::DeviceSize],
    ) {
        unsafe {
            self.command_pool.device.cmd_bind_vertex_buffers(
                ***self,
                first_binding,
                &buffers.iter().map(|b| ***b).collect::<Vec<_>>(),
                offsets,
            );
            self.contents.borrow_mut().append(
                &mut buffers
                    .into_iter()
                    .map(CommandBufferContents::Buffer)
                    .collect::<Vec<_>>(),
            );
        }
    }

    pub fn bind_index_buffer(
        &self,
        buffer: Buffer<T>,
        offset: vk::DeviceSize,
        index_type: vk::IndexType,
    ) {
        unsafe {
            self.command_pool
                .device
                .cmd_bind_index_buffer(***self, **buffer, offset, index_type);
        }
        self.contents
            .borrow_mut()
            .push(CommandBufferContents::Buffer(buffer));
    }

    pub fn bind_pipeline(&self, pipeline: Pipeline<T>) {
        unsafe {
            self.command_pool.device.cmd_bind_pipeline(
                ***self,
                pipeline.pipeline_bind_point,
                **pipeline,
            );
        }
        self.current_pipeline_layout
            .replace(Some(pipeline.pipeline_layout.clone()));
        self.contents
            .borrow_mut()
            .push(CommandBufferContents::Pipeline(pipeline));
    }

    pub fn bind_descriptor_sets(
        &self,
        first_set: u32,
        descriptor_sets: Vec<DescriptorSet<T>>,
        dynamic_offsets: &[u32],
    ) -> Result<()> {
        unsafe {
            self.command_pool.device.cmd_bind_descriptor_sets(
                ***self,
                vk::PipelineBindPoint::GRAPHICS,
                ***self
                    .current_pipeline_layout
                    .borrow()
                    .as_ref()
                    .ok_or(Error::CurrentPipelineLayoutAsRef)?,
                first_set,
                &descriptor_sets.iter().map(|b| ***b).collect::<Vec<_>>(),
                dynamic_offsets,
            );
        }
        self.contents.borrow_mut().append(
            &mut descriptor_sets
                .into_iter()
                .map(CommandBufferContents::DescriptorSet)
                .collect::<Vec<_>>(),
        );
        Ok(())
    }

    #[allow(clippy::too_many_arguments)]
    pub fn trace_rays(
        &self,
        raygen: (Buffer<T>, vk::StridedDeviceAddressRegionKHRBuilder),
        miss: (Buffer<T>, vk::StridedDeviceAddressRegionKHRBuilder),
        hit: (Buffer<T>, vk::StridedDeviceAddressRegionKHRBuilder),
        callable: (Option<Buffer<T>>, vk::StridedDeviceAddressRegionKHRBuilder),
        width: u32,
        height: u32,
        depth: u32,
    ) {
        let mut contents = self.contents.borrow_mut();
        contents.push(CommandBufferContents::Buffer(raygen.0));
        contents.push(CommandBufferContents::Buffer(miss.0));
        contents.push(CommandBufferContents::Buffer(hit.0));
        if let Some(buffer) = callable.0 {
            contents.push(CommandBufferContents::Buffer(buffer));
        }
        unsafe {
            self.command_pool.device.ray_tracing_fn.cmd_trace_rays(
                ***self,
                &raygen.1,
                &miss.1,
                &hit.1,
                &callable.1,
                width,
                height,
                depth,
            );
        }
    }
}

pub enum AccelerationStructureGeometryContents<T> {
    Aabbs(Buffer<T>),
    Instance(Buffer<T>, Vec<AccelerationStructureKHR<T>>),
    Triangle {
        vertex_data: Buffer<T>,
        index_data: Option<Buffer<T>>,
        transform_data: Option<Buffer<T>>,
    },
}

impl<T> CommandBuffer<T> {
    pub fn build_acceleration_structure(
        &self,
        src_acceleration_structure: Option<AccelerationStructureKHR<T>>,
        dst_acceleration_structure: AccelerationStructureKHR<T>,
        geometries: &mut [(
            vk::AccelerationStructureGeometryKHRBuilder,
            AccelerationStructureGeometryContents<T>,
        )],
        scratch_data: Buffer<T>,
        info: vk::AccelerationStructureBuildGeometryInfoKHRBuilder,
        offset_infos: &[vk::AccelerationStructureBuildRangeInfoKHR],
    ) -> Result<()> {
        let mut contents = self.contents.borrow_mut();
        let mut scratch_handler = vk::DeviceOrHostAddressKHR::default();
        scratch_handler.device_address = scratch_data
            .acceleration_structure
            .as_ref()
            .ok_or(Error::ScratchDataAccelerationStructure)?
            .1;
        let geometries = geometries
            .iter_mut()
            .map(
                |(geometry, c)| -> Result<vk::AccelerationStructureGeometryKHR> {
                    use vk::GeometryTypeKHR;
                    use AccelerationStructureGeometryContents::*;
                    let geometry = &mut *geometry;
                    match c {
                        Aabbs(data) => {
                            geometry.geometry_type = GeometryTypeKHR::AABBS;
                            contents.push(CommandBufferContents::Buffer(data.clone()));
                            geometry.geometry.aabbs.data = data.get_device_address();
                        }
                        Instance(data, handels) => {
                            geometry.geometry_type = GeometryTypeKHR::INSTANCES;
                            contents.push(CommandBufferContents::Buffer(data.clone()));
                            geometry.geometry.instances.data = data.get_device_address();
                            let allocation_info = unsafe {
                                data.allocator
                                    .clone()
                                    .borrow_mut()
                                    .get_allocation_info(&data.allocation)
                            }
                            .context(VkMemGetAllocationInfoSnafu {})?;
                            let (mapped_data, mapped_size) =
                                (allocation_info.mapped_data, allocation_info.size);
                            let mut vertex_align = unsafe {
                                ash::util::Align::<vk::AccelerationStructureInstanceKHR>::new(
                                    std::ptr::NonNull::new_unchecked(mapped_data)
                                        .cast()
                                        .as_mut(),
                                    std::mem::align_of::<vk::AccelerationStructureInstanceKHR>()
                                        as u64,
                                    mapped_size as _,
                                )
                            };
                            handels
                                .iter()
                                .zip(vertex_align.iter_mut())
                                .for_each(|(as_, asi)| {
                                    contents.push(CommandBufferContents::AccelerationStructure(
                                        as_.clone(),
                                    ));
                                    asi.acceleration_structure_reference =
                                        vk::AccelerationStructureReferenceKHR {
                                            host_handle: ***as_,
                                        };
                                });
                        }
                        Triangle {
                            vertex_data,
                            index_data,
                            transform_data,
                        } => {
                            geometry.geometry_type = GeometryTypeKHR::TRIANGLES;
                            contents.push(CommandBufferContents::Buffer(vertex_data.clone()));
                            geometry.geometry.triangles.vertex_data =
                                vertex_data.get_device_address();
                            geometry.geometry.triangles.index_data = match index_data {
                                Some(ref mut b) => {
                                    contents.push(CommandBufferContents::Buffer(b.clone()));
                                    b.get_device_address()
                                }
                                None => Default::default(),
                            };
                            geometry.geometry.triangles.transform_data = match transform_data {
                                Some(ref mut b) => {
                                    contents.push(CommandBufferContents::Buffer(b.clone()));
                                    b.get_device_address()
                                }
                                None => Default::default(),
                            };
                        }
                    };
                    Ok(**geometry)
                },
            )
            .collect::<Result<Vec<_>>>()?;
        let info = (match src_acceleration_structure {
            Some(as_) => {
                contents.push(CommandBufferContents::AccelerationStructure(as_.clone()));
                info.src_acceleration_structure(**as_)
            }
            None => info,
        })
        .dst_acceleration_structure(**dst_acceleration_structure)
        .geometries(&geometries)
        .scratch_data(scratch_handler);
        contents.push(CommandBufferContents::Buffer(scratch_data));
        contents.push(CommandBufferContents::AccelerationStructure(
            dst_acceleration_structure,
        ));
        unsafe {
            self.command_pool
                .device
                .acceleration_structure_fn
                .cmd_build_acceleration_structures(
                    ***self,
                    slice::from_ref(&info),
                    slice::from_ref(&offset_infos),
                );
        };
        Ok(())
    }

    pub fn queue_submit(
        &self,
        queue: Queue<T>,
        wait_timeline_semaphores: Vec<(TimelineSemaphore<T>, u64)>,
        wait_binary_semaphores: Vec<BinarySemaphore<T>>,
        signal_timeline_semaphores: Vec<(TimelineSemaphore<T>, u64)>,
        signal_binary_semaphores: Vec<BinarySemaphore<T>>,
        submit_info: vk::SubmitInfoBuilder,
    ) -> Result<()> {
        let (wait_timeline_semaphores, wait_timeline_values): (Vec<_>, Vec<_>) =
            wait_timeline_semaphores.into_iter().unzip();
        let wait_semaphores_raw = wait_timeline_semaphores
            .iter()
            .map(|s| ***s)
            .chain(wait_binary_semaphores.iter().map(|s| ***s))
            .collect::<Vec<_>>();
        let (signal_timeline_semaphores, signal_timeline_values): (Vec<_>, Vec<_>) =
            signal_timeline_semaphores.into_iter().unzip();
        let signal_semaphores_raw = signal_timeline_semaphores
            .iter()
            .map(|s| ***s)
            .chain(signal_binary_semaphores.iter().map(|s| ***s))
            .collect::<Vec<_>>();
        let submit_info = submit_info
            .command_buffers(slice::from_ref(&***self))
            .wait_semaphores(&wait_semaphores_raw);
        self.queue.replace(Some(queue.clone()));
        self.wait_timeline_semaphores
            .replace(wait_timeline_semaphores);
        self.wait_binary_semaphores.replace(wait_binary_semaphores);
        self.signal_timeline_semaphores
            .replace(signal_timeline_semaphores);
        self.signal_binary_semaphores
            .replace(signal_binary_semaphores);
        if wait_timeline_values.is_empty() && signal_timeline_values.is_empty() {
            match &self.fence {
                CommandBufferFence::Fence(fence) => {
                    let submit_info =
                        submit_info.signal_semaphores(signal_semaphores_raw.as_slice());
                    unsafe {
                        self.command_pool.device.queue_submit(
                            **queue,
                            slice::from_ref(&*submit_info),
                            ***fence,
                        )
                    }
                }
                CommandBufferFence::TimelineSemaphore(semaphore, signal_semaphore_value) => {
                    let mut signal_semaphores_raw = signal_semaphores_raw.to_vec();
                    signal_semaphores_raw.insert(0, ***semaphore);
                    let mut signal_semaphore_values =
                        Vec::with_capacity(signal_semaphores_raw.len());
                    signal_semaphore_values.insert(0, signal_semaphore_value.get());
                    signal_semaphore_values
                        .resize_with(signal_semaphores_raw.len(), Default::default);
                    let mut timeline_semaphore_submit_info =
                        vk::TimelineSemaphoreSubmitInfo::builder()
                            .signal_semaphore_values(signal_semaphore_values.as_slice());
                    let submit_info = submit_info
                        .signal_semaphores(&signal_semaphores_raw)
                        .push_next(&mut timeline_semaphore_submit_info);
                    unsafe {
                        self.command_pool.device.queue_submit(
                            **queue,
                            slice::from_ref(&*submit_info),
                            vk::Fence::null(),
                        )
                    }
                }
            }
        } else {
            let mut wait_timeline_values = wait_timeline_values;
            let mut signal_timeline_values = signal_timeline_values;
            wait_timeline_values.resize_with(wait_semaphores_raw.len(), Default::default);
            signal_timeline_values.resize_with(signal_semaphores_raw.len(), Default::default);
            match &self.fence {
                CommandBufferFence::Fence(fence) => {
                    let mut timeline_semaphore_submit_info =
                        vk::TimelineSemaphoreSubmitInfo::builder()
                            .wait_semaphore_values(&wait_timeline_values)
                            .signal_semaphore_values(&signal_timeline_values);
                    let submit_info = submit_info
                        .signal_semaphores(signal_semaphores_raw.as_slice())
                        .push_next(&mut timeline_semaphore_submit_info);
                    unsafe {
                        self.command_pool.device.queue_submit(
                            **queue,
                            slice::from_ref(&*submit_info),
                            ***fence,
                        )
                    }
                }
                CommandBufferFence::TimelineSemaphore(semaphore, signal_semaphore_value) => {
                    let mut signal_semaphores_raw = signal_semaphores_raw;
                    signal_semaphores_raw.insert(0, ***semaphore);
                    signal_timeline_values.insert(0, signal_semaphore_value.get());
                    let mut timeline_semaphore_submit_info =
                        vk::TimelineSemaphoreSubmitInfo::builder()
                            .wait_semaphore_values(&wait_timeline_values)
                            .signal_semaphore_values(&signal_timeline_values);
                    let submit_info = submit_info
                        .signal_semaphores(&signal_semaphores_raw)
                        .push_next(&mut timeline_semaphore_submit_info);
                    unsafe {
                        self.command_pool.device.queue_submit(
                            **queue,
                            slice::from_ref(&*submit_info),
                            vk::Fence::null(),
                        )
                    }
                }
            }
        }
        .context(VkQueueSubmitSnafu {})
    }

    pub fn wait_and_reset(&self, timeout: u64) -> Result<()> {
        self.fence_wait(timeout)?;
        self.contents.replace(vec![]);
        self.current_pipeline_layout.replace(None);
        self.queue.replace(None);
        self.wait_timeline_semaphores.replace(vec![]);
        self.wait_binary_semaphores.replace(vec![]);
        match &self.fence {
            CommandBufferFence::Fence(fence) => unsafe {
                self.command_pool
                    .device
                    .reset_fences(slice::from_ref(&**fence))
            }
            .context(VkResetFencesSnafu {}),
            CommandBufferFence::TimelineSemaphore(_, _) => Ok(()),
        }
    }

    pub fn wait_and_destroy(self, timeout: u64) -> Result<()> {
        self.fence_wait(timeout)?;
        self.contents.replace(vec![]);
        self.current_pipeline_layout.replace(None);
        self.queue.replace(None);
        self.wait_timeline_semaphores.replace(vec![]);
        self.wait_binary_semaphores.replace(vec![]);
        Ok(())
    }

    pub fn set_signal_value(&self, v: u64) {
        match self.fence {
            CommandBufferFence::Fence(_) => unimplemented!("Shouldn't have signal value"),
            CommandBufferFence::TimelineSemaphore(_, ref t) => t.set(v),
        }
    }
}

pub struct RcCommandBuffer<T> {
    pub inner: vk::CommandBuffer,
    pub command_pool: CommandPool<T>,
    pub contents: RefCell<Vec<CommandBufferContents<T>>>,
    pub current_pipeline_layout: RefCell<Option<PipelineLayout<T>>>,
    pub fence: CommandBufferFence<T>,
    pub queue: RefCell<Option<Queue<T>>>,
    pub wait_timeline_semaphores: RefCell<Vec<TimelineSemaphore<T>>>,
    pub wait_binary_semaphores: RefCell<Vec<BinarySemaphore<T>>>,
    pub signal_timeline_semaphores: RefCell<Vec<TimelineSemaphore<T>>>,
    pub signal_binary_semaphores: RefCell<Vec<BinarySemaphore<T>>>,
    pub user: T,
}

impl<T> RcCommandBuffer<T> {
    pub fn fence_wait(&self, timeout: u64) -> Result<()> {
        match &self.fence {
            CommandBufferFence::Fence(fence) => unsafe {
                self.command_pool
                    .device
                    .wait_for_fences(slice::from_ref(&***fence), false, timeout)
            }
            .context(VkWaitForFencesSnafu {}),
            CommandBufferFence::TimelineSemaphore(semaphore, wait_semaphore_value) => {
                let wait_semaphore_value = wait_semaphore_value.get();
                let wait_info = vk::SemaphoreWaitInfo::builder()
                    .semaphores(slice::from_ref(&**semaphore))
                    .values(slice::from_ref(&wait_semaphore_value));
                unsafe {
                    self.command_pool
                        .device
                        .wait_semaphores(&wait_info, timeout)
                }
                .context(VkWaitSemaphoresSnafu {})
            }
        }
    }
}

impl<T> Drop for RcCommandBuffer<T> {
    fn drop(&mut self) {
        self.fence_wait(std::u64::MAX).unwrap();
        unsafe {
            self.command_pool
                .device
                .free_command_buffers(**self.command_pool, slice::from_ref(&**self));
        };
    }
}

impl<T> CommandPool<T> {
    pub fn new(
        device: Device<T>,
        create_info: &vk::CommandPoolCreateInfo,
        user: T,
    ) -> Result<Self> {
        Ok(Self(Rc::new(RcCommandPool {
            inner: unsafe { device.create_command_pool(create_info, None) }
                .context(VkCreateCommandPoolSnafu {})?,
            device,
            user,
        })))
    }
}

pub struct RcCommandPool<T> {
    pub inner: vk::CommandPool,
    pub device: Device<T>,
    pub user: T,
}

impl<T> Drop for RcCommandPool<T> {
    fn drop(&mut self) {
        unsafe {
            self.device.destroy_command_pool(**self, None);
        };
    }
}

impl<T> DescriptorPool<T> {
    pub fn new(
        device: Device<T>,
        create_info: &vk::DescriptorPoolCreateInfo,
        user: T,
    ) -> Result<Self> {
        Ok(Self(Rc::new(RcDescriptorPool {
            inner: unsafe { device.create_descriptor_pool(create_info, None) }
                .context(VkCreateDescriptorPoolSnafu {})?,
            device,
            user,
        })))
    }
}

pub struct RcDescriptorPool<T> {
    pub inner: vk::DescriptorPool,
    pub device: Device<T>,
    pub user: T,
}

impl<T> Drop for RcDescriptorPool<T> {
    fn drop(&mut self) {
        unsafe {
            self.device.destroy_descriptor_pool(**self, None);
        };
    }
}

#[derive(Derivative)]
#[derivative(Clone(bound = ""))]
pub struct DescriptorSet<T> {
    pub inner: Rc<RcDescriptorSet<T>>,
    pub descriptor_counts: Vec<(u32, u32)>,
}

#[derive(Debug, Eq, PartialEq, Clone, Hash)]
pub enum DescriptorSetContents<T> {
    AccelerationStructures(Vec<AccelerationStructureKHR<T>>),
    Sampler(Sampler<T>),
    CombinedImageSampler(Sampler<T>, ImageView<T>),
    CombinedImageSamplers(Vec<(Sampler<T>, ImageView<T>)>),
    SampledImages(Vec<ImageView<T>>),
    StorageImage(ImageView<T>),
    UniformTexelBuffer(Buffer<T>),
    StorageTexelBuffer(Buffer<T>),
    UniformBuffer(Buffer<T>),
    StorageBuffer(Buffer<T>),
    UniformBufferDynamic(Buffer<T>),
    StorageBufferDynamic(Buffer<T>),
    InputAttachment,
}

impl<T: Clone> DescriptorSet<T> {
    pub fn new(
        descriptor_pool: &DescriptorPool<T>,
        descriptor_set_layouts: Vec<DescriptorSetLayout<T>>,
        create_info: vk::DescriptorSetAllocateInfoBuilder,
        users_default: T,
        users: Vec<T>,
    ) -> Result<Vec<Self>> {
        let layouts: Vec<_> = descriptor_set_layouts.iter().map(|l| ***l).collect();
        let create_info = create_info
            .descriptor_pool(***descriptor_pool)
            .set_layouts(&layouts[..]);
        let descriptor_sets = unsafe {
            descriptor_pool
                .device
                .allocate_descriptor_sets(&create_info)
        }
        .context(VkAllocateDescriptorSetsSnafu {})?;

        let mut users = users;
        descriptor_sets
            .into_iter()
            .zip(
                descriptor_set_layouts
                    .iter()
                    .map(|l| l.descriptor_counts.clone()),
            )
            .map(|ret| {
                let contents: RefCell<Vec<Option<DescriptorSetContents<T>>>> = RefCell::default();
                contents.borrow_mut().resize_with(
                    ret.1.last().ok_or(Error::DescriptorSetsEmpty)?.1 as _,
                    Default::default,
                );
                Ok(Self {
                    inner: Rc::new(RcDescriptorSet {
                        inner: ret.0,
                        descriptor_pool: descriptor_pool.clone(),
                        descriptor_set_layouts: descriptor_set_layouts.clone(),
                        contents,
                        user: users.pop().unwrap_or_else(|| users_default.clone()),
                    }),
                    descriptor_counts: ret.1,
                })
            })
            .collect()
    }
}

impl<T> DescriptorSet<T> {
    pub fn write_acceleration_structures(
        &self,
        acceleration_structures: Vec<AccelerationStructureKHR<T>>,
        write_descriptor_set: vk::WriteDescriptorSetBuilder,
    ) -> Result<()> {
        let data = &acceleration_structures
            .iter()
            .map(|as_| ***as_)
            .collect::<Vec<_>>();
        let mut write_descriptor_set = *write_descriptor_set
            .push_next(
                &mut vk::WriteDescriptorSetAccelerationStructureKHR::builder()
                    .acceleration_structures(data),
            )
            .descriptor_type(vk::DescriptorType::ACCELERATION_STRUCTURE_KHR)
            .dst_set(***self);

        write_descriptor_set.descriptor_count = acceleration_structures.len() as _;

        let mut index: usize = 0;
        self.descriptor_counts
            .iter()
            .find(|t| {
                let ret = t.0 >= write_descriptor_set.dst_binding;
                if !ret {
                    index = t.1 as _;
                };
                ret
            })
            .ok_or(Error::DescriptorCountNotFound)?;

        self.contents.borrow_mut()[index] = Some(DescriptorSetContents::AccelerationStructures(
            acceleration_structures,
        ));

        unsafe {
            self.descriptor_pool
                .device
                .update_descriptor_sets(slice::from_ref(&write_descriptor_set), &[])
        };
        Ok(())
    }

    pub fn write_buffer(
        &self,
        buffer: Buffer<T>,
        buffer_info: vk::DescriptorBufferInfoBuilder,
        write_descriptor_set: vk::WriteDescriptorSetBuilder,
    ) -> Result<()> {
        let buffer_info = buffer_info.buffer(**buffer);

        let write_descriptor_set = write_descriptor_set
            .descriptor_type(vk::DescriptorType::UNIFORM_BUFFER)
            .buffer_info(slice::from_ref(&*buffer_info))
            .dst_set(***self);

        let mut index: usize = 0;
        self.descriptor_counts
            .iter()
            .find(|t| {
                let ret = t.0 >= write_descriptor_set.dst_binding;
                if !ret {
                    index = t.1 as _;
                };
                ret
            })
            .ok_or(Error::DescriptorCountNotFound)?;

        self.contents.borrow_mut()[index] = Some(DescriptorSetContents::UniformBuffer(buffer));

        unsafe {
            self.descriptor_pool
                .device
                .update_descriptor_sets(slice::from_ref(&*write_descriptor_set), &[])
        };
        Ok(())
    }

    pub fn write_combined_images(
        &self,
        images: Vec<(Sampler<T>, ImageView<T>, vk::DescriptorImageInfoBuilder)>,
        write_descriptor_set: vk::WriteDescriptorSetBuilder,
    ) -> Result<()> {
        let (combined_images, image_infos): (Vec<_>, Vec<_>) = images
            .into_iter()
            .map(|(sampler, image, image_info)| {
                let image_info = *image_info.sampler(**sampler).image_view(**image);
                ((sampler, image), image_info)
            })
            .unzip();

        let write_descriptor_set = write_descriptor_set
            .descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER)
            .image_info(&image_infos)
            .dst_set(***self);

        let mut index: usize = 0;
        self.descriptor_counts
            .iter()
            .find(|t| {
                let ret = t.0 >= write_descriptor_set.dst_binding;
                if !ret {
                    index = t.1 as _;
                };
                ret
            })
            .ok_or(Error::DescriptorCountNotFound)?;

        self.contents.borrow_mut()[index] = Some(DescriptorSetContents::CombinedImageSamplers(
            combined_images,
        ));

        unsafe {
            self.descriptor_pool
                .device
                .update_descriptor_sets(slice::from_ref(&*write_descriptor_set), &[])
        };
        Ok(())
    }

    pub fn write_image(
        &self,
        sampler: Sampler<T>,
        image_view: ImageView<T>,
        image_info: vk::DescriptorImageInfoBuilder,
        write_descriptor_set: vk::WriteDescriptorSetBuilder,
    ) -> Result<()> {
        let image_info = image_info.sampler(**sampler).image_view(**image_view);

        let write_descriptor_set = write_descriptor_set
            .descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER)
            .image_info(slice::from_ref(&*image_info))
            .dst_set(***self);

        let mut index: usize = 0;
        self.descriptor_counts
            .iter()
            .find(|t| {
                let ret = t.0 >= write_descriptor_set.dst_binding;
                if !ret {
                    index = t.1 as _;
                };
                ret
            })
            .ok_or(Error::DescriptorCountNotFound)?;

        self.contents.borrow_mut()[index] = Some(DescriptorSetContents::CombinedImageSampler(
            sampler, image_view,
        ));

        unsafe {
            self.descriptor_pool
                .device
                .update_descriptor_sets(slice::from_ref(&*write_descriptor_set), &[])
        };
        Ok(())
    }

    pub fn write_images(
        &self,
        images: Vec<(ImageView<T>, vk::DescriptorImageInfoBuilder)>,
        write_descriptor_set: vk::WriteDescriptorSetBuilder,
    ) -> Result<()> {
        let (image_views, image_infos): (Vec<_>, Vec<_>) = images
            .into_iter()
            .map(|(image, image_info)| {
                let image_info = *image_info.image_view(**image);
                (image, image_info)
            })
            .unzip();

        let write_descriptor_set = write_descriptor_set
            .descriptor_type(vk::DescriptorType::SAMPLED_IMAGE)
            .image_info(image_infos.as_slice())
            .dst_set(***self);

        let mut index: usize = 0;
        self.descriptor_counts
            .iter()
            .find(|t| {
                let ret = t.0 >= write_descriptor_set.dst_binding;
                if !ret {
                    index = t.1 as _;
                };
                ret
            })
            .ok_or(Error::DescriptorCountNotFound)?;

        self.contents.borrow_mut()[index] = Some(DescriptorSetContents::SampledImages(image_views));

        unsafe {
            self.descriptor_pool
                .device
                .update_descriptor_sets(slice::from_ref(&*write_descriptor_set), &[])
        };
        Ok(())
    }

    pub fn write_sampler(
        &self,
        sampler: Sampler<T>,
        image_info: vk::DescriptorImageInfoBuilder,
        write_descriptor_set: vk::WriteDescriptorSetBuilder,
    ) -> Result<()> {
        let image_info = image_info.sampler(**sampler);

        let write_descriptor_set = write_descriptor_set
            .descriptor_type(vk::DescriptorType::SAMPLER)
            .image_info(slice::from_ref(&*image_info))
            .dst_set(***self);

        let mut index: usize = 0;
        self.descriptor_counts
            .iter()
            .find(|t| {
                let ret = t.0 >= write_descriptor_set.dst_binding;
                if !ret {
                    index = t.1 as _;
                };
                ret
            })
            .ok_or(Error::DescriptorCountNotFound)?;

        self.contents.borrow_mut()[index] = Some(DescriptorSetContents::Sampler(sampler));

        unsafe {
            self.descriptor_pool
                .device
                .update_descriptor_sets(slice::from_ref(&*write_descriptor_set), &[])
        };
        Ok(())
    }
}

pub struct RcDescriptorSet<T> {
    pub inner: vk::DescriptorSet,
    pub descriptor_pool: DescriptorPool<T>,
    pub descriptor_set_layouts: Vec<DescriptorSetLayout<T>>,
    pub contents: RefCell<Vec<Option<DescriptorSetContents<T>>>>,
    pub user: T,
}

impl<T> Drop for RcDescriptorSet<T> {
    fn drop(&mut self) {
        unsafe {
            self.descriptor_pool
                .device
                .free_descriptor_sets(**self.descriptor_pool, slice::from_ref(&*self))
        }
        .unwrap();
    }
}

#[derive(Derivative)]
#[derivative(Clone(bound = ""))]
pub struct DescriptorSetLayout<T> {
    pub inner: Rc<RcDescriptorSetLayout<T>>,
    pub descriptor_counts: Vec<(u32, u32)>,
}

impl<T> DescriptorSetLayout<T> {
    pub fn new(
        device: Device<T>,
        mut bindings: Vec<vk::DescriptorSetLayoutBinding>,
        create_info: vk::DescriptorSetLayoutCreateInfoBuilder,
        user: T,
    ) -> Result<Self> {
        bindings.sort_by_key(|l| l.binding);
        let create_info = create_info.bindings(&bindings);
        let mut sum: u32 = 0;
        Ok(Self {
            inner: Rc::new(RcDescriptorSetLayout {
                inner: unsafe { device.create_descriptor_set_layout(&create_info, None) }
                    .context(VkCreateDescriptorSetLayoutSnafu {})?,
                device,
                user,
            }),
            descriptor_counts: bindings
                .iter()
                .map(move |l| {
                    sum += l.descriptor_count;
                    (l.binding, sum)
                })
                .collect(),
        })
    }
}

pub struct RcDescriptorSetLayout<T> {
    pub inner: vk::DescriptorSetLayout,
    pub device: Device<T>,
    pub user: T,
}

impl<T> Drop for RcDescriptorSetLayout<T> {
    fn drop(&mut self) {
        unsafe {
            self.device.destroy_descriptor_set_layout(**self, None);
        };
    }
}

impl<T> Device<T> {
    pub fn new(
        instance: Instance<T>,
        physical_device: vk::PhysicalDevice,
        create_info: &vk::DeviceCreateInfo,
        user: T,
    ) -> Result<Self> {
        let inner = unsafe { instance.create_device(physical_device, create_info, None) }
            .context(VkCreateDeviceSnafu {})?;
        let acceleration_structure_fn = AccelerationStructure::new(&**instance, &inner);
        let ray_tracing_fn = RayTracingPipeline::new(&**instance, &inner);
        Ok(Self(Rc::new(RcDevice {
            inner,
            instance,
            acceleration_structure_fn,
            ray_tracing_fn,
            user,
        })))
    }
}

pub struct RcDevice<T> {
    pub inner: VkDevice,
    pub instance: Instance<T>,
    pub acceleration_structure_fn: AccelerationStructure,
    pub ray_tracing_fn: RayTracingPipeline,
    pub user: T,
}

impl<T> Deref for RcDevice<T> {
    type Target = VkDevice;

    fn deref(&self) -> &Self::Target {
        &self.inner
    }
}

impl<T> Drop for RcDevice<T> {
    fn drop(&mut self) {
        unsafe {
            self.destroy_device(None);
        };
    }
}

impl<T> Fence<T> {
    pub fn new(device: Device<T>, create_info: &vk::FenceCreateInfo, user: T) -> Result<Self> {
        Ok(Self(Rc::new(RcFence {
            inner: unsafe { device.create_fence(create_info, None) }
                .context(VkCreateFenceSnafu {})?,
            device,
            user,
        })))
    }
}

pub struct RcFence<T> {
    pub inner: vk::Fence,
    pub device: Device<T>,
    pub user: T,
}

impl<T> Drop for RcFence<T> {
    fn drop(&mut self) {
        unsafe {
            self.device.destroy_fence(**self, None);
        };
    }
}

impl<T> Framebuffer<T> {
    pub fn new(
        render_pass: RenderPass<T>,
        attachments: Vec<ImageView<T>>,
        create_info: vk::FramebufferCreateInfoBuilder,
        user: T,
    ) -> Result<Self> {
        let attachments_info: Vec<_> = attachments.iter().map(|iv| ***iv).collect();
        let create_info = create_info
            .render_pass(**render_pass)
            .attachments(&attachments_info);
        Ok(Self(Rc::new(RcFramebuffer {
            inner: unsafe { render_pass.device.create_framebuffer(&create_info, None) }
                .context(VkCreateFramebufferSnafu {})?,
            render_pass,
            attachments,
            user,
        })))
    }
}

pub struct RcFramebuffer<T> {
    pub inner: vk::Framebuffer,
    pub render_pass: RenderPass<T>,
    pub attachments: Vec<ImageView<T>>,
    pub user: T,
}

impl<T> Drop for RcFramebuffer<T> {
    fn drop(&mut self) {
        unsafe {
            self.render_pass.device.destroy_framebuffer(**self, None);
        };
    }
}

pub enum RcImageDepends<T> {
    Allocator {
        allocator: Allocator<T>,
        allocation: ManuallyDrop<Allocation>,
    },
    SwapchainKHR(Rc<RcSwapchainKHR<T>>),
}

impl<T> Image<T> {
    pub fn new(
        allocator: Allocator<T>,
        create_info: &vk::ImageCreateInfo,
        allocation_create_info: &AllocationCreateInfo,
        user: T,
    ) -> Result<Self> {
        use crate::vk_mem::Alloc;
        let (inner, allocation) = {
            let allocator_mut: RefMut<VkMemAllocator> = allocator.borrow_mut();
            unsafe { allocator_mut.create_image(create_info, allocation_create_info) }
                .context(VkMemCreateImageSnafu {})?
        };
        Ok(Self(Rc::new(RcImage {
            inner,
            depends: RcImageDepends::Allocator {
                allocator,
                allocation: ManuallyDrop::new(allocation),
            },
            user,
        })))
    }
}

pub struct RcImage<T> {
    pub inner: vk::Image,
    pub depends: RcImageDepends<T>,
    pub user: T,
}

impl<T> RcImage<T> {
    fn get_device(&self) -> Device<T> {
        match &self.depends {
            RcImageDepends::Allocator { allocator, .. } => allocator.device.clone(),
            RcImageDepends::SwapchainKHR(swapchain) => swapchain.device.clone(),
        }
    }
}

impl<T> Drop for RcImage<T> {
    fn drop(&mut self) {
        match &mut self.depends {
            RcImageDepends::Allocator {
                allocator,
                allocation,
                ..
            } => {
                unsafe {
                    allocator
                        .borrow_mut()
                        .destroy_image(self.inner, ManuallyDrop::take(allocation))
                };
            }
            RcImageDepends::SwapchainKHR(_) => {
                /* The Vulkan SwapchainKHR is responsible for destroying these, we only need to make sure it is destroyed. */
            }
        }
    }
}

impl<T> ImageView<T> {
    pub fn new(
        image: Image<T>,
        create_info: vk::ImageViewCreateInfoBuilder,
        user: T,
    ) -> Result<Self> {
        let create_info = create_info.image(**image);
        Ok(Self(Rc::new(RcImageView {
            inner: unsafe { image.get_device().create_image_view(&create_info, None) }
                .context(VkCreateImageViewSnafu {})?,
            image,
            user,
        })))
    }
}

pub struct RcImageView<T> {
    pub inner: vk::ImageView,
    pub image: Image<T>,
    pub user: T,
}

impl<T> Drop for RcImageView<T> {
    fn drop(&mut self) {
        let device = self.image.get_device();
        unsafe {
            device.destroy_image_view(**self, None);
        };
    }
}

#[derive(Derivative)]
#[derivative(Clone(bound = ""))]
pub struct Instance<T> {
    pub inner: Rc<RcInstance<T>>,
    pub entry: Entry,
}

impl<T> Instance<T> {
    pub fn new_simple(create_info: &vk::InstanceCreateInfo, user: T) -> Result<Self> {
        let entry = unsafe { Entry::load() }.context(AshLibLoadingSnafu {})?;

        Self::new(entry, create_info, user)
    }

    pub fn new(entry: Entry, create_info: &vk::InstanceCreateInfo, user: T) -> Result<Self> {
        Ok(Self {
            inner: Rc::new(RcInstance {
                inner: unsafe { entry.create_instance(create_info, None) }
                    .context(VkCreateInstanceSnafu {})?,
                user,
            }),
            entry,
        })
    }
}

pub struct RcInstance<T> {
    pub inner: VkInstance,
    pub user: T,
}

impl<T> Deref for RcInstance<T> {
    type Target = VkInstance;

    fn deref(&self) -> &Self::Target {
        &self.inner
    }
}

impl<T> Drop for RcInstance<T> {
    fn drop(&mut self) {
        unsafe { self.destroy_instance(None) };
    }
}

impl<T> Pipeline<T> {
    pub fn new(
        pipeline_layout: PipelineLayout<T>,
        render_pass: RenderPass<T>,
        shaders: HashMap<vk::ShaderStageFlags, ShaderModule<T>>,
        shader_stages_create_info: Vec<vk::PipelineShaderStageCreateInfoBuilder>,
        create_info: vk::GraphicsPipelineCreateInfoBuilder,
        user: T,
    ) -> Result<Self> {
        let shader_stages_create_info = shader_stages_create_info
            .into_iter()
            .map(|create_info| {
                let stage = create_info.stage;
                Ok(*create_info.module(***shaders.get(&stage).ok_or(Error::HasShaderStage)?))
            })
            .collect::<Result<Vec<_>>>()?;

        let create_info = create_info
            .stages(&shader_stages_create_info)
            .layout(**pipeline_layout)
            .render_pass(**render_pass);

        Ok(Self(Rc::new(RcPipeline {
            inner: unsafe {
                pipeline_layout.device.create_graphics_pipelines(
                    vk::PipelineCache::null(),
                    slice::from_ref(&*create_info),
                    None,
                )
            }
            .map_err(|(p, e)| {
                for it in p {
                    unsafe {
                        pipeline_layout.device.destroy_pipeline(it, None);
                    };
                }
                e
            })
            .context(VkCreateGraphicsPipelinesSnafu {})?[0],
            pipeline_layout,
            render_pass: Some(render_pass),
            shaders,
            pipeline_bind_point: vk::PipelineBindPoint::GRAPHICS,
            user,
        })))
    }

    pub fn new_ray_tracing(
        pipeline_layout: PipelineLayout<T>,
        shaders: HashMap<vk::ShaderStageFlags, ShaderModule<T>>,
        shader_stages_create_info: Vec<vk::PipelineShaderStageCreateInfoBuilder>,
        create_info: vk::RayTracingPipelineCreateInfoKHRBuilder,
        user: T,
    ) -> Result<Self> {
        let shader_stages_create_info = shader_stages_create_info
            .into_iter()
            .map(|create_info| {
                let stage = create_info.stage;
                Ok(*create_info.module(***shaders.get(&stage).ok_or(Error::HasShaderStage)?))
            })
            .collect::<Result<Vec<_>>>()?;

        let create_info = create_info
            .stages(&shader_stages_create_info)
            .layout(**pipeline_layout);

        Ok(Self(Rc::new(RcPipeline {
            inner: unsafe {
                pipeline_layout
                    .device
                    .ray_tracing_fn
                    .create_ray_tracing_pipelines(
                        vk::DeferredOperationKHR::null(),
                        vk::PipelineCache::null(),
                        slice::from_ref(&*create_info),
                        None,
                    )
            }
            .context(VkCreateRayTracingPipelinesSnafu {})?[0],
            pipeline_layout,
            render_pass: None,
            shaders,
            pipeline_bind_point: vk::PipelineBindPoint::RAY_TRACING_KHR,
            user,
        })))
    }
}

pub struct RcPipeline<T> {
    pub inner: vk::Pipeline,
    pub pipeline_layout: PipelineLayout<T>,
    pub render_pass: Option<RenderPass<T>>,
    pub shaders: HashMap<vk::ShaderStageFlags, ShaderModule<T>>,
    pub pipeline_bind_point: vk::PipelineBindPoint,
    pub user: T,
}

impl<T> Drop for RcPipeline<T> {
    fn drop(&mut self) {
        unsafe {
            self.pipeline_layout.device.destroy_pipeline(**self, None);
        };
    }
}

impl<T> PipelineLayout<T> {
    pub fn new(
        device: Device<T>,
        descriptor_set_layouts: Vec<DescriptorSetLayout<T>>,
        create_info: vk::PipelineLayoutCreateInfoBuilder,
        user: T,
    ) -> Result<Self> {
        let layouts: Vec<_> = descriptor_set_layouts.iter().map(|l| ***l).collect();
        let create_info = create_info.set_layouts(&layouts[..]);
        Ok(Self(Rc::new(RcPipelineLayout {
            inner: unsafe { device.create_pipeline_layout(&create_info, None) }
                .context(VkCreatePipelineLayoutSnafu {})?,
            device,
            descriptor_set_layouts,
            user,
        })))
    }
}

pub struct RcPipelineLayout<T> {
    pub inner: vk::PipelineLayout,
    pub device: Device<T>,
    pub descriptor_set_layouts: Vec<DescriptorSetLayout<T>>,
    pub user: T,
}

impl<T> Drop for RcPipelineLayout<T> {
    fn drop(&mut self) {
        unsafe {
            self.device.destroy_pipeline_layout(**self, None);
        };
    }
}

impl<T> Queue<T> {
    pub fn new(device: Device<T>, queue_family_index: u32, queue_index: u32, user: T) -> Self {
        let inner = unsafe { device.get_device_queue(queue_family_index, queue_index) };
        Self(Rc::new(RcQueue {
            device,
            inner,
            user,
        }))
    }
}

pub struct RcQueue<T> {
    pub inner: vk::Queue,
    pub device: Device<T>,
    pub user: T,
}

impl<T> RenderPass<T> {
    pub fn new(device: Device<T>, create_info: &vk::RenderPassCreateInfo, user: T) -> Result<Self> {
        Ok(Self(Rc::new(RcRenderPass {
            inner: unsafe { device.create_render_pass(create_info, None) }
                .context(VkCreateRenderPassSnafu {})?,
            device,
            user,
        })))
    }
}

pub struct RcRenderPass<T> {
    pub inner: vk::RenderPass,
    pub device: Device<T>,
    pub user: T,
}

impl<T> Drop for RcRenderPass<T> {
    fn drop(&mut self) {
        unsafe {
            self.device.destroy_render_pass(**self, None);
        };
    }
}

impl<T> Sampler<T> {
    pub fn new(device: Device<T>, create_info: &vk::SamplerCreateInfo, user: T) -> Result<Self> {
        Ok(Self(Rc::new(RcSampler {
            inner: unsafe { device.create_sampler(create_info, None) }
                .context(VkCreateSamplerSnafu {})?,
            device,
            user,
        })))
    }
}

pub struct RcSampler<T> {
    pub inner: vk::Sampler,
    pub device: Device<T>,
    pub user: T,
}

impl<T> Drop for RcSampler<T> {
    fn drop(&mut self) {
        unsafe {
            self.device.destroy_sampler(**self, None);
        };
    }
}

impl<T> BinarySemaphore<T> {
    pub fn new(device: Device<T>, create_info: &vk::SemaphoreCreateInfo, user: T) -> Result<Self> {
        Ok(Self(Rc::new(RcBinarySemaphore {
            inner: unsafe { device.create_semaphore(create_info, None) }
                .context(VkCreateSemaphoreSnafu {})?,
            device,
            user,
        })))
    }
}

pub struct RcBinarySemaphore<T> {
    pub inner: vk::Semaphore,
    pub device: Device<T>,
    pub user: T,
}

impl<T> Deref for RcBinarySemaphore<T> {
    type Target = vk::Semaphore;

    fn deref(&self) -> &Self::Target {
        &self.inner
    }
}

impl<T> Drop for RcBinarySemaphore<T> {
    fn drop(&mut self) {
        unsafe {
            self.device.destroy_semaphore(**self, None);
        };
    }
}

impl<T> TimelineSemaphore<T> {
    pub fn new(
        device: Device<T>,
        create_info: vk::SemaphoreCreateInfoBuilder,
        initial_value: u64,
        user: T,
    ) -> Result<Self> {
        Ok(Self(Rc::new(RcTimelineSemaphore {
            inner: unsafe {
                device.create_semaphore(
                    &create_info.push_next(
                        &mut vk::SemaphoreTypeCreateInfo::builder()
                            .semaphore_type(vk::SemaphoreType::TIMELINE)
                            .initial_value(initial_value),
                    ),
                    None,
                )
            }
            .context(VkCreateSemaphoreSnafu {})?,
            device,
            user,
        })))
    }

    pub fn new_simple(
        device: Device<T>,
        create_info: vk::SemaphoreCreateInfoBuilder,
        user: T,
    ) -> Result<Self> {
        Self::new(device, create_info, 0, user)
    }
}

pub struct RcTimelineSemaphore<T> {
    pub inner: vk::Semaphore,
    pub device: Device<T>,
    pub user: T,
}

impl<T> Deref for RcTimelineSemaphore<T> {
    type Target = vk::Semaphore;

    fn deref(&self) -> &Self::Target {
        &self.inner
    }
}

impl<T> Drop for RcTimelineSemaphore<T> {
    fn drop(&mut self) {
        unsafe {
            self.device.destroy_semaphore(**self, None);
        };
    }
}

impl<T> ShaderModule<T> {
    pub fn new_simple(device: Device<T>, bytes: &[u8], user: T) -> Result<Self> {
        let code: Vec<u32> = bytes
            .chunks(4)
            .map(|array| {
                (u32::from(array[3]) << 24)
                    + (u32::from(array[2]) << 16)
                    + (u32::from(array[1]) << 8)
                    + u32::from(array[0])
            })
            .collect();
        let create_info = vk::ShaderModuleCreateInfo::builder().code(&code[..]);
        Self::new(device, &create_info, user)
    }

    pub fn new(
        device: Device<T>,
        create_info: &vk::ShaderModuleCreateInfo,
        user: T,
    ) -> Result<Self> {
        Ok(Self(Rc::new(RcShaderModule {
            inner: unsafe { device.create_shader_module(create_info, None) }
                .context(VkCreateShaderModuleSnafu {})?,
            device,
            user,
        })))
    }
}

pub struct RcShaderModule<T> {
    pub inner: vk::ShaderModule,
    pub device: Device<T>,
    pub user: T,
}

impl<T> Drop for RcShaderModule<T> {
    fn drop(&mut self) {
        unsafe {
            self.device.destroy_shader_module(**self, None);
        };
    }
}

#[derive(Derivative)]
#[derivative(Clone(bound = ""))]
pub struct SurfaceKHR<T> {
    pub inner: Rc<RcSurfaceKHR<T>>,
    pub physical_device: vk::PhysicalDevice,
    pub queue_family_index: u32,
    pub surface_format: vk::SurfaceFormatKHR,
}

impl<T> SurfaceKHR<T> {
    pub fn new(instance: Instance<T>, window: &Window, flags: vk::Flags, user: T) -> Result<Self> {
        use crate::WindowExt;
        let surface = window
            .create_surface(&instance.entry, &instance, flags, None)
            .context(VkCreateSurfaceSnafu {})?;
        let physical_devices = unsafe { instance.enumerate_physical_devices() }
            .context(VkEnumeratePhysicalDevicesSnafu {})?;
        let surface_loader = Surface::new(&instance.entry, &**instance);
        let (physical_device, queue_family_index) = physical_devices
            .iter()
            .map(|pdevice| {
                unsafe { instance.get_physical_device_queue_family_properties(*pdevice) }
                    .iter()
                    .enumerate()
                    .filter_map(|(index, info)| {
                        if info.queue_flags.contains(ash::vk::QueueFlags::GRAPHICS)
                            && unsafe {
                                surface_loader.get_physical_device_surface_support(
                                    *pdevice,
                                    index as u32,
                                    surface,
                                )
                            }
                            .unwrap_or(false)
                        {
                            Some((*pdevice, index))
                        } else {
                            None
                        }
                    })
                    .next()
            })
            .flatten()
            .next()
            .ok_or(Error::VkEnumeratePhysicalDevicesNoneSuitable)?;
        let queue_family_index = queue_family_index as u32;

        let surface_formats =
            unsafe { surface_loader.get_physical_device_surface_formats(physical_device, surface) }
                .context(VkGetPhysicalDeviceSurfaceFormatsSnafu {})?;
        let surface_format = surface_formats
            .iter()
            .map(|sfmt| match sfmt.format {
                ash::vk::Format::UNDEFINED => ash::vk::SurfaceFormatKHR {
                    format: ash::vk::Format::B8G8R8_UNORM,
                    color_space: sfmt.color_space,
                },
                _ => *sfmt,
            })
            .next()
            .ok_or(Error::VkGetPhysicalDeviceSurfaceFormatsNoneSuitable)?;
        Ok(Self {
            inner: Rc::new(RcSurfaceKHR {
                inner: surface,
                instance,
                surface_loader,
                user,
            }),
            physical_device,
            queue_family_index,
            surface_format,
        })
    }
}

pub struct RcSurfaceKHR<T> {
    pub inner: vk::SurfaceKHR,
    pub instance: Instance<T>,
    pub surface_loader: ash::extensions::khr::Surface,
    pub user: T,
}

impl<T> Drop for RcSurfaceKHR<T> {
    fn drop(&mut self) {
        unsafe {
            self.surface_loader.destroy_surface(**self, None);
        };
    }
}

#[derive(Derivative)]
#[derivative(Clone(bound = ""))]
pub struct SwapchainKHR<T> {
    pub inner: Rc<RcSwapchainKHR<T>>,
    pub present_images: Vec<Image<T>>,
}

impl<T: Clone> SwapchainKHR<T> {
    pub fn new(
        device: Device<T>,
        surface: SurfaceKHR<T>,
        create_info: vk::SwapchainCreateInfoKHRBuilder,
        sync_objects: Vec<(BinarySemaphore<T>, Fence<T>)>,
        user: T,
        image_users_default: T,
        image_users: Vec<T>,
    ) -> Result<Self> {
        let swapchain_loader = Swapchain::new(&**device.instance, &**device);
        let create_info = create_info.surface(**surface);
        let swapchain = unsafe { swapchain_loader.create_swapchain(&create_info, None) }
            .context(VkCreateSwapchainSnafu {})?;
        let acquire_objects = sync_objects
            .into_iter()
            .map(|(a, b)| (None, a, b))
            .collect();
        let inner = Rc::new(RcSwapchainKHR {
            inner: swapchain,
            device,
            surface,
            swapchain_loader: swapchain_loader.clone(),
            queue: Default::default(),
            wait_semaphores: Default::default(),
            acquire_objects: RefCell::new(acquire_objects),
            user,
        });
        let mut image_users = image_users;
        let present_images = unsafe { swapchain_loader.get_swapchain_images(swapchain) }
            .context(VkGetSwapchainImagesSnafu {})?
            .iter()
            .map(|image| {
                Image(Rc::new(RcImage {
                    inner: *image,
                    depends: RcImageDepends::SwapchainKHR(inner.clone()),
                    user: image_users
                        .pop()
                        .unwrap_or_else(|| image_users_default.clone()),
                }))
            })
            .collect();
        Ok(Self {
            inner,
            present_images,
        })
    }
}

impl<T> SwapchainKHR<T> {
    pub fn acquire_next_image(&self, timeout: u64) -> Result<(u32, bool, BinarySemaphore<T>)> {
        let mut objects = self.acquire_objects.borrow_mut();
        let object = objects
            .iter_mut()
            .find(|(x, _, _)| x.is_none())
            .ok_or(Error::RanOutSyncObjects)?;
        let ret = unsafe {
            self.swapchain_loader
                .acquire_next_image(***self, timeout, **object.1, **object.2)
        }
        .context(VkAcquireNextImagesSnafu {})?;
        self.queue.replace(None);
        object.0 = Some(ret.0);
        Ok((ret.0, ret.1, object.1.clone()))
    }

    pub fn queue_present(
        &self,
        queue: Queue<T>,
        wait_semaphores: Vec<BinarySemaphore<T>>,
        present_info: vk::PresentInfoKHRBuilder,
    ) -> Result<bool> {
        let wait_semaphores_raw = wait_semaphores.iter().map(|s| ***s).collect::<Vec<_>>();
        let create_info = present_info
            .swapchains(slice::from_ref(&***self))
            .wait_semaphores(&wait_semaphores_raw);
        let vk_queue = **queue;
        self.queue.replace(Some(queue));
        self.wait_semaphores.replace(wait_semaphores);
        unsafe { self.swapchain_loader.queue_present(vk_queue, &create_info) }
            .context(VkQueuePresentSnafu {})
    }

    pub fn wait_and_reset(&self, id: u32) -> Result<()> {
        let mut objects = self.acquire_objects.borrow_mut();
        let object = objects
            .iter_mut()
            .find(|(x, _, _)| *x == Some(id))
            .expect("Ran out of Swapchain Acquire Objects");
        unsafe {
            self.device
                .wait_for_fences(slice::from_ref(&**object.2), true, u64::MAX)
        }
        .context(VkWaitForFencesSnafu)?;
        unsafe { self.device.reset_fences(slice::from_ref(&**object.2)) }
            .context(VkResetFencesSnafu)?;
        object.0 = None;
        Ok(())
    }
}

pub struct RcSwapchainKHR<T> {
    pub inner: vk::SwapchainKHR,
    pub device: Device<T>,
    pub swapchain_loader: Swapchain,
    pub surface: SurfaceKHR<T>,
    pub queue: RefCell<Option<Queue<T>>>,
    pub wait_semaphores: RefCell<Vec<BinarySemaphore<T>>>,
    #[allow(clippy::type_complexity)]
    pub acquire_objects: RefCell<Vec<(Option<u32>, BinarySemaphore<T>, Fence<T>)>>,
    pub user: T,
}

impl<T> Drop for RcSwapchainKHR<T> {
    fn drop(&mut self) {
        unsafe {
            self.swapchain_loader.destroy_swapchain(**self, None);
        };
    }
}

macro_rules! vk_tuple_types {
    ($($t:ident)*) => ($(
        concat_idents::concat_idents! (RcName = Rc, $t {
            #[derive(Derivative)]
            #[derivative(Clone(bound=""))]
            pub struct $t<T> (pub Rc<RcName<T>>);
            impl<T> Deref for $t<T> {
                type Target = RcName<T>;

                fn deref(&self) -> &Self::Target {
                    &self.0
                }
            }
        });
    )*)
}

vk_tuple_types! {
    AccelerationStructureKHR Allocator Buffer CommandBuffer CommandPool DescriptorPool Device Fence Framebuffer Image ImageView Pipeline PipelineLayout Queue RenderPass Sampler BinarySemaphore TimelineSemaphore ShaderModule
}

mod impls {
    use super::*;
    use std::cmp::Ordering::{self, Equal, Greater, Less};
    use std::fmt;
    use std::hash::{Hash, Hasher};

    macro_rules! vk_types {
        ($($t:ident)*) => ($(
            impl<T> Eq for $t<T> {}
            impl<T> PartialEq for $t<T> {
                #[inline]
                fn eq(&self, other: &Self) -> bool { (***self) == (***other) }
            }
            impl<T> Ord for $t<T> {
                #[inline]
                fn cmp(&self, other: &Self) -> Ordering {
                    // The order here is important to generate more optimal assembly.
                    // See <https://github.com/rust-lang/rust/issues/63758> for more info.
                    if ***self < ***other { Less }
                    else if ***self == ***other { Equal }
                    else { Greater }
                }
            }
            impl<T> PartialOrd for $t<T> {
                #[inline]
                fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
                    match (self <= other, self >= other) {
                        (false, false) => None,
                        (false, true) => Some(Greater),
                        (true, false) => Some(Less),
                        (true, true) => Some(Equal),
                    }
                }
                #[inline]
                fn lt(&self, other: &Self) -> bool { (***self) < (***other) }
                #[inline]
                fn le(&self, other: &Self) -> bool { (***self) <= (***other) }
                #[inline]
                fn ge(&self, other: &Self) -> bool { (***self) >= (***other) }
                #[inline]
                fn gt(&self, other: &Self) -> bool { (***self) > (***other) }
            }
            impl<T> Hash for $t<T> {
                fn hash<H: Hasher>(&self, state: &mut H) {
                    (***self).hash(state)
                }
                fn hash_slice<H: Hasher>(data: &[Self], state: &mut H) {
                    <<<$t::<T> as Deref>::Target as Deref>::Target as Hash>::hash_slice(data.into_iter().map(|x|***x).collect::<Vec<_>>().as_slice(), state)
                }
            }
            impl<T> fmt::Debug for $t<T> {
                fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
                    write!(f, "{:?}", *self)
                }
            }
        )*)
    }

    macro_rules! vk_subinner_types {
        ($($t:ident)*) => ($(
            concat_idents::concat_idents! (RcName = Rc, $t {
                impl<T> Deref for RcName<T> {
                    type Target = vk::$t;

                    fn deref(&self) -> &Self::Target {
                        &self.inner
                    }
                }
            });
        )*)
    }

    macro_rules! vk_inner_types {
        ($($t:ident)*) => ($(
            concat_idents::concat_idents! (RcName = Rc, $t {
                impl<T> Deref for $t<T> {
                    type Target = RcName<T>;

                    fn deref(&self) -> &Self::Target {
                        &self.inner
                    }
                }
            });
        )*)
    }

    vk_inner_types! {
        DescriptorSet DescriptorSetLayout Instance SurfaceKHR SwapchainKHR
    }

    vk_subinner_types! {
        AccelerationStructureKHR Buffer CommandBuffer CommandPool DescriptorPool DescriptorSet DescriptorSetLayout Fence Framebuffer Image ImageView Pipeline PipelineLayout Queue RenderPass Sampler ShaderModule SurfaceKHR SwapchainKHR
    }

    vk_types! {
        AccelerationStructureKHR Buffer CommandBuffer CommandPool DescriptorPool Fence Framebuffer Image ImageView Pipeline PipelineLayout Queue RenderPass Sampler BinarySemaphore TimelineSemaphore ShaderModule
    }
}