use winapi::shared::dxgiformat::DXGI_FORMAT;
use winapi::shared::minwindef::UINT;
use winapi::um::d3d12;
use hal::{buffer, format, image, memory, pass, pso, DescriptorPool as HalDescriptorPool};
use native::{self, query};
use range_alloc::RangeAllocator;
use root_constants::RootConstant;
use {Backend, MAX_VERTEX_BUFFERS};
use std::collections::BTreeMap;
use std::ops::Range;
#[derive(Debug, Hash)]
pub enum ShaderModule {
Compiled(BTreeMap<String, native::Blob>),
Spirv(Vec<u8>),
}
unsafe impl Send for ShaderModule {}
unsafe impl Sync for ShaderModule {}
#[derive(Clone, Debug, Hash)]
pub struct BarrierDesc {
pub(crate) attachment_id: pass::AttachmentId,
pub(crate) states: Range<d3d12::D3D12_RESOURCE_STATES>,
pub(crate) flags: d3d12::D3D12_RESOURCE_BARRIER_FLAGS,
}
impl BarrierDesc {
pub(crate) fn new(
attachment_id: pass::AttachmentId,
states: Range<d3d12::D3D12_RESOURCE_STATES>,
) -> Self {
BarrierDesc {
attachment_id,
states,
flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE,
}
}
pub(crate) fn split(self) -> Range<Self> {
BarrierDesc {
flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_BEGIN_ONLY,
..self.clone()
}..BarrierDesc {
flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_END_ONLY,
..self
}
}
}
#[derive(Clone, Debug, Hash)]
pub struct SubpassDesc {
pub(crate) color_attachments: Vec<pass::AttachmentRef>,
pub(crate) depth_stencil_attachment: Option<pass::AttachmentRef>,
pub(crate) input_attachments: Vec<pass::AttachmentRef>,
pub(crate) resolve_attachments: Vec<pass::AttachmentRef>,
pub(crate) pre_barriers: Vec<BarrierDesc>,
pub(crate) post_barriers: Vec<BarrierDesc>,
}
impl SubpassDesc {
pub(crate) fn is_using(&self, at_id: pass::AttachmentId) -> bool {
self.color_attachments
.iter()
.chain(self.depth_stencil_attachment.iter())
.chain(self.input_attachments.iter())
.chain(self.resolve_attachments.iter())
.any(|&(id, _)| id == at_id)
}
}
#[derive(Clone, Debug, Hash)]
pub struct RenderPass {
pub(crate) attachments: Vec<pass::Attachment>,
pub(crate) subpasses: Vec<SubpassDesc>,
pub(crate) post_barriers: Vec<BarrierDesc>,
}
#[derive(Copy, Clone, Debug)]
pub struct VertexBinding {
pub mapped_binding: usize,
pub stride: UINT,
pub offset: u32,
}
#[derive(Debug)]
pub struct GraphicsPipeline {
pub(crate) raw: native::PipelineState,
pub(crate) signature: native::RootSignature, pub(crate) num_parameter_slots: usize, pub(crate) topology: d3d12::D3D12_PRIMITIVE_TOPOLOGY,
pub(crate) constants: Vec<RootConstant>,
pub(crate) vertex_bindings: [Option<VertexBinding>; MAX_VERTEX_BUFFERS],
pub(crate) baked_states: pso::BakedStates,
}
unsafe impl Send for GraphicsPipeline {}
unsafe impl Sync for GraphicsPipeline {}
#[derive(Debug)]
pub struct ComputePipeline {
pub(crate) raw: native::PipelineState,
pub(crate) signature: native::RootSignature, pub(crate) num_parameter_slots: usize, pub(crate) constants: Vec<RootConstant>,
}
unsafe impl Send for ComputePipeline {}
unsafe impl Sync for ComputePipeline {}
bitflags! {
pub struct SetTableTypes: u8 {
const SRV_CBV_UAV = 0x1;
const SAMPLERS = 0x2;
}
}
pub const SRV_CBV_UAV: SetTableTypes = SetTableTypes::SRV_CBV_UAV;
pub const SAMPLERS: SetTableTypes = SetTableTypes::SAMPLERS;
#[derive(Debug, Hash)]
pub struct PipelineLayout {
pub(crate) raw: native::RootSignature,
pub(crate) tables: Vec<SetTableTypes>,
pub(crate) root_constants: Vec<RootConstant>,
pub(crate) num_parameter_slots: usize,
}
unsafe impl Send for PipelineLayout {}
unsafe impl Sync for PipelineLayout {}
#[derive(Debug, Clone)]
pub struct Framebuffer {
pub(crate) attachments: Vec<ImageView>,
pub(crate) layers: image::Layer,
}
#[derive(Copy, Clone, Debug)]
pub struct BufferUnbound {
pub(crate) requirements: memory::Requirements,
pub(crate) usage: buffer::Usage,
}
#[derive(Derivative)]
#[derivative(Debug)]
pub struct BufferBound {
pub(crate) resource: native::Resource,
pub(crate) requirements: memory::Requirements,
#[derivative(Debug = "ignore")]
pub(crate) clear_uav: Option<native::CpuDescriptor>,
}
unsafe impl Send for BufferBound {}
unsafe impl Sync for BufferBound {}
#[derive(Derivative)]
#[derivative(Debug)]
pub enum Buffer {
Unbound(BufferUnbound),
Bound(BufferBound),
}
impl Buffer {
pub(crate) fn expect_unbound(&self) -> &BufferUnbound {
match *self {
Buffer::Unbound(ref unbound) => unbound,
Buffer::Bound(_) => panic!("Expected unbound buffer"),
}
}
pub(crate) fn expect_bound(&self) -> &BufferBound {
match *self {
Buffer::Unbound(_) => panic!("Expected bound buffer"),
Buffer::Bound(ref bound) => bound,
}
}
}
#[derive(Copy, Clone, Derivative)]
#[derivative(Debug)]
pub struct BufferView {
#[derivative(Debug = "ignore")]
pub(crate) handle_srv: native::CpuDescriptor,
#[derivative(Debug = "ignore")]
pub(crate) handle_uav: native::CpuDescriptor,
}
unsafe impl Send for BufferView {}
unsafe impl Sync for BufferView {}
#[derive(Clone)]
pub enum Place {
SwapChain,
Heap { raw: native::Heap, offset: u64 },
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct ImageBound {
pub(crate) resource: native::Resource,
#[derivative(Debug = "ignore")]
pub(crate) place: Place,
pub(crate) surface_type: format::SurfaceType,
pub(crate) kind: image::Kind,
pub(crate) usage: image::Usage,
pub(crate) default_view_format: Option<DXGI_FORMAT>,
pub(crate) view_caps: image::ViewCapabilities,
#[derivative(Debug = "ignore")]
pub(crate) descriptor: d3d12::D3D12_RESOURCE_DESC,
pub(crate) bytes_per_block: u8,
pub(crate) block_dim: (u8, u8),
#[derivative(Debug = "ignore")]
pub(crate) clear_cv: Vec<native::CpuDescriptor>,
#[derivative(Debug = "ignore")]
pub(crate) clear_dv: Vec<native::CpuDescriptor>,
#[derivative(Debug = "ignore")]
pub(crate) clear_sv: Vec<native::CpuDescriptor>,
pub(crate) requirements: memory::Requirements,
}
unsafe impl Send for ImageBound {}
unsafe impl Sync for ImageBound {}
impl ImageBound {
pub fn to_subresource_range(&self, aspects: format::Aspects) -> image::SubresourceRange {
image::SubresourceRange {
aspects,
levels: 0..self.descriptor.MipLevels as _,
layers: 0..self.kind.num_layers(),
}
}
pub fn calc_subresource(&self, mip_level: UINT, layer: UINT, plane: UINT) -> UINT {
mip_level
+ (layer * self.descriptor.MipLevels as UINT)
+ (plane * self.descriptor.MipLevels as UINT * self.kind.num_layers() as UINT)
}
}
#[derive(Copy, Clone, Derivative)]
#[derivative(Debug)]
pub struct ImageUnbound {
#[derivative(Debug = "ignore")]
pub(crate) desc: d3d12::D3D12_RESOURCE_DESC,
pub(crate) view_format: Option<DXGI_FORMAT>,
pub(crate) dsv_format: Option<DXGI_FORMAT>,
pub(crate) requirements: memory::Requirements,
pub(crate) format: format::Format,
pub(crate) kind: image::Kind,
pub(crate) usage: image::Usage,
pub(crate) tiling: image::Tiling,
pub(crate) view_caps: image::ViewCapabilities,
pub(crate) bytes_per_block: u8,
pub(crate) block_dim: (u8, u8),
pub(crate) num_levels: image::Level,
}
impl ImageUnbound {
pub fn calc_subresource(&self, mip_level: UINT, layer: UINT, plane: UINT) -> UINT {
mip_level
+ (layer * self.desc.MipLevels as UINT)
+ (plane * self.desc.MipLevels as UINT * self.kind.num_layers() as UINT)
}
}
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub enum Image {
Unbound(ImageUnbound),
Bound(ImageBound) ,
}
impl Image {
pub(crate) fn expect_unbound(&self) -> &ImageUnbound {
match *self {
Image::Unbound(ref unbound) => unbound,
Image::Bound(_) => panic!("Expected unbound image"),
}
}
pub(crate) fn expect_bound(&self) -> &ImageBound {
match *self {
Image::Unbound(_) => panic!("Expected bound image"),
Image::Bound(ref bound) => bound,
}
}
pub fn get_desc(&self) -> &d3d12::D3D12_RESOURCE_DESC {
match self {
Image::Bound(i) => &i.descriptor,
Image::Unbound(i) => &i.desc,
}
}
pub fn calc_subresource(&self, mip_level: UINT, layer: UINT, plane: UINT) -> UINT {
match self {
Image::Bound(i) => i.calc_subresource(mip_level, layer, plane),
Image::Unbound(i) => i.calc_subresource(mip_level, layer, plane),
}
}
}
#[derive(Copy, Derivative, Clone)]
#[derivative(Debug)]
pub struct ImageView {
#[derivative(Debug = "ignore")]
pub(crate) resource: native::Resource, #[derivative(Debug = "ignore")]
pub(crate) handle_srv: Option<native::CpuDescriptor>,
#[derivative(Debug = "ignore")]
pub(crate) handle_rtv: Option<native::CpuDescriptor>,
#[derivative(Debug = "ignore")]
pub(crate) handle_dsv: Option<native::CpuDescriptor>,
#[derivative(Debug = "ignore")]
pub(crate) handle_uav: Option<native::CpuDescriptor>,
pub(crate) dxgi_format: DXGI_FORMAT,
pub(crate) num_levels: image::Level,
pub(crate) mip_levels: (image::Level, image::Level),
pub(crate) layers: (image::Layer, image::Layer),
pub(crate) kind: image::Kind,
}
unsafe impl Send for ImageView {}
unsafe impl Sync for ImageView {}
impl ImageView {
pub fn calc_subresource(&self, mip_level: UINT, layer: UINT) -> UINT {
mip_level + (layer * self.num_levels as UINT)
}
}
#[derive(Derivative)]
#[derivative(Debug)]
pub struct Sampler {
#[derivative(Debug = "ignore")]
pub(crate) handle: native::CpuDescriptor,
}
#[derive(Debug)]
pub struct DescriptorSetLayout {
pub(crate) bindings: Vec<pso::DescriptorSetLayoutBinding>,
}
#[derive(Debug)]
pub struct Fence {
pub(crate) raw: native::Fence,
}
unsafe impl Send for Fence {}
unsafe impl Sync for Fence {}
#[derive(Debug)]
pub struct Semaphore {
pub(crate) raw: native::Fence,
}
unsafe impl Send for Semaphore {}
unsafe impl Sync for Semaphore {}
#[derive(Debug)]
pub struct Memory {
pub(crate) heap: native::Heap,
pub(crate) type_id: usize,
pub(crate) size: u64,
pub(crate) resource: Option<native::Resource>,
}
unsafe impl Send for Memory {}
unsafe impl Sync for Memory {}
#[derive(Debug)]
pub struct DescriptorRange {
pub(crate) handle: DualHandle,
pub(crate) ty: pso::DescriptorType,
pub(crate) handle_size: u64,
pub(crate) count: u64,
}
impl DescriptorRange {
pub(crate) fn at(&self, index: u64) -> native::CpuDescriptor {
assert!(index < self.count);
let ptr = self.handle.cpu.ptr + (self.handle_size * index) as usize;
native::CpuDescriptor { ptr }
}
}
#[derive(Debug, Default)]
pub struct DescriptorBindingInfo {
pub(crate) count: u64,
pub(crate) view_range: Option<DescriptorRange>,
pub(crate) sampler_range: Option<DescriptorRange>,
pub(crate) is_uav: bool,
}
#[derive(Derivative)]
#[derivative(Debug)]
pub struct DescriptorSet {
#[derivative(Debug = "ignore")]
pub(crate) heap_srv_cbv_uav: native::DescriptorHeap,
#[derivative(Debug = "ignore")]
pub(crate) heap_samplers: native::DescriptorHeap,
pub(crate) binding_infos: Vec<DescriptorBindingInfo>,
#[derivative(Debug = "ignore")]
pub(crate) first_gpu_sampler: Option<native::GpuDescriptor>,
#[derivative(Debug = "ignore")]
pub(crate) first_gpu_view: Option<native::GpuDescriptor>,
}
unsafe impl Send for DescriptorSet {}
unsafe impl Sync for DescriptorSet {}
impl DescriptorSet {
pub fn srv_cbv_uav_gpu_start(&self) -> native::GpuDescriptor {
self.heap_srv_cbv_uav.start_gpu_descriptor()
}
pub fn sampler_gpu_start(&self) -> native::GpuDescriptor {
self.heap_samplers.start_gpu_descriptor()
}
}
#[derive(Copy, Clone, Derivative)]
#[derivative(Debug)]
pub struct DualHandle {
#[derivative(Debug = "ignore")]
pub(crate) cpu: native::CpuDescriptor,
#[derivative(Debug = "ignore")]
pub(crate) gpu: native::GpuDescriptor,
pub(crate) size: u64,
}
#[derive(Derivative)]
#[derivative(Debug)]
pub struct DescriptorHeap {
#[derivative(Debug = "ignore")]
pub(crate) raw: native::DescriptorHeap,
pub(crate) handle_size: u64,
pub(crate) total_handles: u64,
pub(crate) start: DualHandle,
pub(crate) range_allocator: RangeAllocator<u64>,
}
impl DescriptorHeap {
pub(crate) fn at(&self, index: u64, size: u64) -> DualHandle {
assert!(index < self.total_handles);
DualHandle {
cpu: native::CpuDescriptor {
ptr: self.start.cpu.ptr + (self.handle_size * index) as usize,
},
gpu: native::GpuDescriptor {
ptr: self.start.gpu.ptr + self.handle_size * index,
},
size,
}
}
pub(crate) unsafe fn destroy(&self) {
self.raw.destroy();
}
}
#[derive(Debug)]
pub struct DescriptorHeapSlice {
pub(crate) heap: native::DescriptorHeap, pub(crate) start: DualHandle,
pub(crate) handle_size: u64,
pub(crate) range_allocator: RangeAllocator<u64>,
}
impl DescriptorHeapSlice {
pub(crate) fn alloc_handles(&mut self, count: u64) -> Option<DualHandle> {
self.range_allocator
.allocate_range(count)
.ok()
.map(|range| DualHandle {
cpu: native::CpuDescriptor {
ptr: self.start.cpu.ptr + (self.handle_size * range.start) as usize,
},
gpu: native::GpuDescriptor {
ptr: self.start.gpu.ptr + (self.handle_size * range.start) as u64,
},
size: count,
})
}
pub(crate) fn free_handles(&mut self, handle: DualHandle) {
let start = (handle.gpu.ptr - self.start.gpu.ptr) / self.handle_size;
let handle_range = start..start + handle.size as u64;
self.range_allocator.free_range(handle_range);
}
pub(crate) fn clear(&mut self) {
self.range_allocator.reset();
}
}
#[derive(Debug)]
pub struct DescriptorPool {
pub(crate) heap_srv_cbv_uav: DescriptorHeapSlice,
pub(crate) heap_sampler: DescriptorHeapSlice,
pub(crate) pools: Vec<pso::DescriptorRangeDesc>,
pub(crate) max_size: u64,
}
unsafe impl Send for DescriptorPool {}
unsafe impl Sync for DescriptorPool {}
impl HalDescriptorPool<Backend> for DescriptorPool {
unsafe fn allocate_set(
&mut self,
layout: &DescriptorSetLayout,
) -> Result<DescriptorSet, pso::AllocationError> {
let mut binding_infos = Vec::new();
let mut first_gpu_sampler = None;
let mut first_gpu_view = None;
for binding in &layout.bindings {
let HeapProperties {
has_view,
has_sampler,
is_uav,
} = HeapProperties::from(binding.ty);
while binding_infos.len() <= binding.binding as usize {
binding_infos.push(DescriptorBindingInfo::default());
}
binding_infos[binding.binding as usize] = DescriptorBindingInfo {
count: binding.count as _,
view_range: if has_view {
let handle = self
.heap_srv_cbv_uav
.alloc_handles(binding.count as u64)
.ok_or(pso::AllocationError::OutOfPoolMemory)?;
if first_gpu_view.is_none() {
first_gpu_view = Some(handle.gpu);
}
Some(DescriptorRange {
handle,
ty: binding.ty,
count: binding.count as _,
handle_size: self.heap_srv_cbv_uav.handle_size,
})
} else {
None
},
sampler_range: if has_sampler {
let handle = self
.heap_sampler
.alloc_handles(binding.count as u64)
.ok_or(pso::AllocationError::OutOfPoolMemory)?;
if first_gpu_sampler.is_none() {
first_gpu_sampler = Some(handle.gpu);
}
Some(DescriptorRange {
handle,
ty: binding.ty,
count: binding.count as _,
handle_size: self.heap_sampler.handle_size,
})
} else {
None
},
is_uav,
};
}
Ok(DescriptorSet {
heap_srv_cbv_uav: self.heap_srv_cbv_uav.heap.clone(),
heap_samplers: self.heap_sampler.heap.clone(),
binding_infos,
first_gpu_sampler,
first_gpu_view,
})
}
unsafe fn free_sets<I>(&mut self, descriptor_sets: I)
where
I: IntoIterator<Item = DescriptorSet>,
{
for descriptor_set in descriptor_sets {
for binding_info in &descriptor_set.binding_infos {
if let Some(ref view_range) = binding_info.view_range {
if HeapProperties::from(view_range.ty).has_view {
self.heap_srv_cbv_uav.free_handles(view_range.handle);
}
}
if let Some(ref sampler_range) = binding_info.sampler_range {
if HeapProperties::from(sampler_range.ty).has_sampler {
self.heap_sampler.free_handles(sampler_range.handle);
}
}
}
}
}
unsafe fn reset(&mut self) {
self.heap_srv_cbv_uav.clear();
self.heap_sampler.clear();
}
}
struct HeapProperties {
has_view: bool,
has_sampler: bool,
is_uav: bool,
}
impl HeapProperties {
pub fn new(has_view: bool, has_sampler: bool, is_uav: bool) -> Self {
HeapProperties {
has_view,
has_sampler,
is_uav,
}
}
fn from(ty: pso::DescriptorType) -> HeapProperties {
match ty {
pso::DescriptorType::Sampler => HeapProperties::new(false, true, false),
pso::DescriptorType::CombinedImageSampler => HeapProperties::new(true, true, false),
pso::DescriptorType::InputAttachment
| pso::DescriptorType::SampledImage
| pso::DescriptorType::UniformTexelBuffer
| pso::DescriptorType::UniformBufferDynamic
| pso::DescriptorType::UniformBuffer => HeapProperties::new(true, false, false),
pso::DescriptorType::StorageImage
| pso::DescriptorType::StorageTexelBuffer
| pso::DescriptorType::StorageBufferDynamic
| pso::DescriptorType::StorageBuffer => HeapProperties::new(true, false, true),
}
}
}
#[derive(Debug)]
pub struct QueryPool {
pub(crate) raw: native::QueryHeap,
pub(crate) ty: query::HeapType,
}
unsafe impl Send for QueryPool {}
unsafe impl Sync for QueryPool {}