#![allow(
non_camel_case_types,
non_snake_case,
clippy::bad_bit_mask,
clippy::let_unit_value,
clippy::missing_safety_doc,
clippy::missing_transmute_annotations,
clippy::needless_lifetimes,
clippy::too_many_arguments,
clippy::type_complexity,
clippy::unnecessary_cast,
clippy::upper_case_acronyms,
clippy::useless_transmute
)]
use alloc::vec::Vec;
use core::ffi::{c_int, c_void, CStr};
use core::mem::MaybeUninit;
use core::ptr;
use super::*;
pub trait AmdxShaderEnqueueExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_dispatch_graph_amdx(
&self,
command_buffer: CommandBuffer,
scratch: DeviceAddress,
scratch_size: DeviceSize,
count_info: &DispatchGraphCountInfoAMDX,
) {
let __result = (self.commands().cmd_dispatch_graph_amdx)(
command_buffer,
scratch,
scratch_size,
count_info,
);
}
#[inline]
unsafe fn cmd_dispatch_graph_indirect_amdx(
&self,
command_buffer: CommandBuffer,
scratch: DeviceAddress,
scratch_size: DeviceSize,
count_info: &DispatchGraphCountInfoAMDX,
) {
let __result = (self.commands().cmd_dispatch_graph_indirect_amdx)(
command_buffer,
scratch,
scratch_size,
count_info,
);
}
#[inline]
unsafe fn cmd_dispatch_graph_indirect_count_amdx(
&self,
command_buffer: CommandBuffer,
scratch: DeviceAddress,
scratch_size: DeviceSize,
count_info: DeviceAddress,
) {
let __result = (self.commands().cmd_dispatch_graph_indirect_count_amdx)(
command_buffer,
scratch,
scratch_size,
count_info,
);
}
#[inline]
unsafe fn cmd_initialize_graph_scratch_memory_amdx(
&self,
command_buffer: CommandBuffer,
execution_graph: Pipeline,
scratch: DeviceAddress,
scratch_size: DeviceSize,
) {
let __result = (self.commands().cmd_initialize_graph_scratch_memory_amdx)(
command_buffer,
execution_graph,
scratch,
scratch_size,
);
}
#[inline]
unsafe fn create_execution_graph_pipelines_amdx(
&self,
pipeline_cache: PipelineCache,
create_infos: &[impl Cast<Target = ExecutionGraphPipelineCreateInfoAMDX>],
allocator: Option<&AllocationCallbacks>,
) -> crate::VkSuccessResult<Vec<Pipeline>> {
let mut pipelines = Vec::with_capacity(create_infos.len() as usize);
let __result = (self.commands().create_execution_graph_pipelines_amdx)(
self.handle(),
pipeline_cache,
create_infos.len() as u32,
create_infos.as_ptr().cast(),
allocator.map_or(ptr::null(), |v| v),
pipelines.as_mut_ptr(),
);
pipelines.set_len(create_infos.len() as usize);
if __result >= Result::SUCCESS {
Ok((pipelines, __result.into()))
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_execution_graph_pipeline_node_index_amdx(
&self,
execution_graph: Pipeline,
node_info: &PipelineShaderStageNodeCreateInfoAMDX,
) -> crate::VkResult<u32> {
let mut node_index = MaybeUninit::<u32>::uninit();
let __result = (self.commands().get_execution_graph_pipeline_node_index_amdx)(
self.handle(),
execution_graph,
node_info,
node_index.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(node_index.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_execution_graph_pipeline_scratch_size_amdx(
&self,
execution_graph: Pipeline,
size_info: &mut ExecutionGraphPipelineScratchSizeAMDX,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_execution_graph_pipeline_scratch_size_amdx)(
self.handle(),
execution_graph,
size_info,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
#[cfg(feature = "provisional")]
impl<C: DeviceV1_0 + ?Sized> AmdxShaderEnqueueExtensionDeviceCommands for C {}
pub trait AmdAntiLagExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn anti_lag_update_amd(&self, data: &AntiLagDataAMD) {
let __result = (self.commands().anti_lag_update_amd)(self.handle(), data);
}
}
impl<C: DeviceV1_0 + ?Sized> AmdAntiLagExtensionDeviceCommands for C {}
pub trait AmdBufferMarkerExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_write_buffer_marker2_amd(
&self,
command_buffer: CommandBuffer,
stage: PipelineStageFlags2,
dst_buffer: Buffer,
dst_offset: DeviceSize,
marker: u32,
) {
let __result = (self.commands().cmd_write_buffer_marker2_amd)(
command_buffer,
stage,
dst_buffer,
dst_offset,
marker,
);
}
#[inline]
unsafe fn cmd_write_buffer_marker_amd(
&self,
command_buffer: CommandBuffer,
pipeline_stage: PipelineStageFlags,
dst_buffer: Buffer,
dst_offset: DeviceSize,
marker: u32,
) {
let __result = (self.commands().cmd_write_buffer_marker_amd)(
command_buffer,
pipeline_stage,
dst_buffer,
dst_offset,
marker,
);
}
}
impl<C: DeviceV1_0 + ?Sized> AmdBufferMarkerExtensionDeviceCommands for C {}
pub trait AmdDisplayNativeHdrExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn set_local_dimming_amd(&self, swap_chain: SwapchainKHR, local_dimming_enable: bool) {
let __result = (self.commands().set_local_dimming_amd)(
self.handle(),
swap_chain,
local_dimming_enable as Bool32,
);
}
}
impl<C: DeviceV1_0 + ?Sized> AmdDisplayNativeHdrExtensionDeviceCommands for C {}
pub trait AmdDrawIndirectCountExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_draw_indexed_indirect_count_amd(
&self,
command_buffer: CommandBuffer,
buffer: Buffer,
offset: DeviceSize,
count_buffer: Buffer,
count_buffer_offset: DeviceSize,
max_draw_count: u32,
stride: u32,
) {
let __result = (self.commands().cmd_draw_indexed_indirect_count_amd)(
command_buffer,
buffer,
offset,
count_buffer,
count_buffer_offset,
max_draw_count,
stride,
);
}
#[inline]
unsafe fn cmd_draw_indirect_count_amd(
&self,
command_buffer: CommandBuffer,
buffer: Buffer,
offset: DeviceSize,
count_buffer: Buffer,
count_buffer_offset: DeviceSize,
max_draw_count: u32,
stride: u32,
) {
let __result = (self.commands().cmd_draw_indirect_count_amd)(
command_buffer,
buffer,
offset,
count_buffer,
count_buffer_offset,
max_draw_count,
stride,
);
}
}
impl<C: DeviceV1_0 + ?Sized> AmdDrawIndirectCountExtensionDeviceCommands for C {}
pub trait AmdShaderInfoExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_shader_info_amd(
&self,
pipeline: Pipeline,
shader_stage: ShaderStageFlags,
info_type: ShaderInfoTypeAMD,
) -> crate::VkResult<Vec<u8>> {
let mut info_size = 0;
(self.commands().get_shader_info_amd)(
self.handle(),
pipeline,
shader_stage,
info_type,
&mut info_size,
ptr::null_mut(),
);
let mut info = Vec::with_capacity(info_size as usize);
let __result = (self.commands().get_shader_info_amd)(
self.handle(),
pipeline,
shader_stage,
info_type,
&mut info_size,
info.as_mut_ptr() as *mut c_void,
);
debug_assert!(info.capacity() >= info_size as usize);
info.set_len(info_size as usize);
if __result == Result::SUCCESS {
Ok(info)
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> AmdShaderInfoExtensionDeviceCommands for C {}
pub trait AndroidExternalMemoryAndroidHardwareBufferExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_android_hardware_buffer_properties_android(
&self,
buffer: *const AHardwareBuffer,
properties: &mut AndroidHardwareBufferPropertiesANDROID,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_android_hardware_buffer_properties_android)(
self.handle(), buffer, properties
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_memory_android_hardware_buffer_android(
&self,
info: &MemoryGetAndroidHardwareBufferInfoANDROID,
) -> crate::VkResult<*mut AHardwareBuffer> {
let mut buffer = MaybeUninit::<*mut AHardwareBuffer>::uninit();
let __result = (self.commands().get_memory_android_hardware_buffer_android)(
self.handle(),
info,
buffer.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(buffer.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> AndroidExternalMemoryAndroidHardwareBufferExtensionDeviceCommands
for C
{
}
pub trait ArmDataGraphExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn bind_data_graph_pipeline_session_memory_arm(
&self,
bind_infos: &[impl Cast<Target = BindDataGraphPipelineSessionMemoryInfoARM>],
) -> crate::VkResult<()> {
let __result = (self.commands().bind_data_graph_pipeline_session_memory_arm)(
self.handle(),
bind_infos.len() as u32,
bind_infos.as_ptr().cast(),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn cmd_dispatch_data_graph_arm(
&self,
command_buffer: CommandBuffer,
session: DataGraphPipelineSessionARM,
info: Option<&DataGraphPipelineDispatchInfoARM>,
) {
let __result = (self.commands().cmd_dispatch_data_graph_arm)(
command_buffer,
session,
info.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn create_data_graph_pipeline_session_arm(
&self,
create_info: &DataGraphPipelineSessionCreateInfoARM,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<DataGraphPipelineSessionARM> {
let mut session = MaybeUninit::<DataGraphPipelineSessionARM>::uninit();
let __result = (self.commands().create_data_graph_pipeline_session_arm)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
session.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(session.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn create_data_graph_pipelines_arm(
&self,
deferred_operation: DeferredOperationKHR,
pipeline_cache: PipelineCache,
create_infos: &[impl Cast<Target = DataGraphPipelineCreateInfoARM>],
allocator: Option<&AllocationCallbacks>,
) -> crate::VkSuccessResult<Vec<Pipeline>> {
let mut pipelines = Vec::with_capacity(create_infos.len() as usize);
let __result = (self.commands().create_data_graph_pipelines_arm)(
self.handle(),
deferred_operation,
pipeline_cache,
create_infos.len() as u32,
create_infos.as_ptr().cast(),
allocator.map_or(ptr::null(), |v| v),
pipelines.as_mut_ptr(),
);
pipelines.set_len(create_infos.len() as usize);
if __result >= Result::SUCCESS {
Ok((pipelines, __result.into()))
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_data_graph_pipeline_session_arm(
&self,
session: DataGraphPipelineSessionARM,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_data_graph_pipeline_session_arm)(
self.handle(),
session,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_data_graph_pipeline_available_properties_arm(
&self,
pipeline_info: &DataGraphPipelineInfoARM,
) -> crate::VkResult<Vec<DataGraphPipelinePropertyARM>> {
let mut properties_count = 0;
(self
.commands()
.get_data_graph_pipeline_available_properties_arm)(
self.handle(),
pipeline_info,
&mut properties_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(properties_count as usize);
let __result = (self
.commands()
.get_data_graph_pipeline_available_properties_arm)(
self.handle(),
pipeline_info,
&mut properties_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= properties_count as usize);
properties.set_len(properties_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_data_graph_pipeline_properties_arm(
&self,
pipeline_info: &DataGraphPipelineInfoARM,
properties: &mut [impl Cast<Target = DataGraphPipelinePropertyQueryResultARM>],
) -> crate::VkResult<()> {
let __result = (self.commands().get_data_graph_pipeline_properties_arm)(
self.handle(),
pipeline_info,
properties.len() as u32,
properties.as_mut_ptr().cast(),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_data_graph_pipeline_session_bind_point_requirements_arm(
&self,
info: &DataGraphPipelineSessionBindPointRequirementsInfoARM,
) -> crate::VkResult<Vec<DataGraphPipelineSessionBindPointRequirementARM>> {
let mut bind_point_requirement_count = 0;
(self
.commands()
.get_data_graph_pipeline_session_bind_point_requirements_arm)(
self.handle(),
info,
&mut bind_point_requirement_count,
ptr::null_mut(),
);
let mut bind_point_requirements = Vec::with_capacity(bind_point_requirement_count as usize);
let __result = (self
.commands()
.get_data_graph_pipeline_session_bind_point_requirements_arm)(
self.handle(),
info,
&mut bind_point_requirement_count,
bind_point_requirements.as_mut_ptr(),
);
debug_assert!(bind_point_requirements.capacity() >= bind_point_requirement_count as usize);
bind_point_requirements.set_len(bind_point_requirement_count as usize);
if __result == Result::SUCCESS {
Ok(bind_point_requirements)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_data_graph_pipeline_session_memory_requirements_arm(
&self,
info: &DataGraphPipelineSessionMemoryRequirementsInfoARM,
memory_requirements: &mut MemoryRequirements2,
) {
let __result = (self
.commands()
.get_data_graph_pipeline_session_memory_requirements_arm)(
self.handle(),
info,
memory_requirements,
);
}
}
impl<C: DeviceV1_0 + ?Sized> ArmDataGraphExtensionDeviceCommands for C {}
pub trait ArmDataGraphExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_queue_family_data_graph_processing_engine_properties_arm(
&self,
physical_device: PhysicalDevice,
queue_family_data_graph_processing_engine_info: &PhysicalDeviceQueueFamilyDataGraphProcessingEngineInfoARM,
queue_family_data_graph_processing_engine_properties: &mut QueueFamilyDataGraphProcessingEnginePropertiesARM,
) {
let __result = (self
.commands()
.get_physical_device_queue_family_data_graph_processing_engine_properties_arm)(
physical_device,
queue_family_data_graph_processing_engine_info,
queue_family_data_graph_processing_engine_properties,
);
}
#[inline]
unsafe fn get_physical_device_queue_family_data_graph_properties_arm(
&self,
physical_device: PhysicalDevice,
queue_family_index: u32,
) -> crate::VkResult<Vec<QueueFamilyDataGraphPropertiesARM>> {
let mut queue_family_data_graph_property_count = 0;
(self
.commands()
.get_physical_device_queue_family_data_graph_properties_arm)(
physical_device,
queue_family_index,
&mut queue_family_data_graph_property_count,
ptr::null_mut(),
);
let mut queue_family_data_graph_properties =
Vec::with_capacity(queue_family_data_graph_property_count as usize);
let __result = (self
.commands()
.get_physical_device_queue_family_data_graph_properties_arm)(
physical_device,
queue_family_index,
&mut queue_family_data_graph_property_count,
queue_family_data_graph_properties.as_mut_ptr(),
);
debug_assert!(
queue_family_data_graph_properties.capacity()
>= queue_family_data_graph_property_count as usize
);
queue_family_data_graph_properties.set_len(queue_family_data_graph_property_count as usize);
if __result == Result::SUCCESS {
Ok(queue_family_data_graph_properties)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> ArmDataGraphExtensionInstanceCommands for C {}
pub trait ArmPerformanceCountersByRegionExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn enumerate_physical_device_queue_family_performance_counters_by_region_arm(
&self,
physical_device: PhysicalDevice,
queue_family_index: u32,
) -> crate::VkResult<(
Vec<PerformanceCounterARM>,
Vec<PerformanceCounterDescriptionARM>,
)> {
let mut counter_count = 0;
(self
.commands()
.enumerate_physical_device_queue_family_performance_counters_by_region_arm)(
physical_device,
queue_family_index,
&mut counter_count,
ptr::null_mut(),
ptr::null_mut(),
);
let mut counters = Vec::with_capacity(counter_count as usize);
let mut counter_descriptions = Vec::with_capacity(counter_count as usize);
let __result = (self
.commands()
.enumerate_physical_device_queue_family_performance_counters_by_region_arm)(
physical_device,
queue_family_index,
&mut counter_count,
counters.as_mut_ptr(),
counter_descriptions.as_mut_ptr(),
);
debug_assert!(counters.capacity() >= counter_count as usize);
counters.set_len(counter_count as usize);
debug_assert!(counter_descriptions.capacity() >= counter_count as usize);
counter_descriptions.set_len(counter_count as usize);
if __result == Result::SUCCESS {
Ok((counters, counter_descriptions))
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> ArmPerformanceCountersByRegionExtensionInstanceCommands for C {}
pub trait ArmTensorsExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn bind_tensor_memory_arm(
&self,
bind_infos: &[impl Cast<Target = BindTensorMemoryInfoARM>],
) -> crate::VkResult<()> {
let __result = (self.commands().bind_tensor_memory_arm)(
self.handle(),
bind_infos.len() as u32,
bind_infos.as_ptr().cast(),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn cmd_copy_tensor_arm(
&self,
command_buffer: CommandBuffer,
copy_tensor_info: &CopyTensorInfoARM,
) {
let __result = (self.commands().cmd_copy_tensor_arm)(command_buffer, copy_tensor_info);
}
#[inline]
unsafe fn create_tensor_arm(
&self,
create_info: &TensorCreateInfoARM,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<TensorARM> {
let mut tensor = MaybeUninit::<TensorARM>::uninit();
let __result = (self.commands().create_tensor_arm)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
tensor.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(tensor.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn create_tensor_view_arm(
&self,
create_info: &TensorViewCreateInfoARM,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<TensorViewARM> {
let mut view = MaybeUninit::<TensorViewARM>::uninit();
let __result = (self.commands().create_tensor_view_arm)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
view.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(view.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_tensor_arm(
&self,
tensor: TensorARM,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_tensor_arm)(
self.handle(),
tensor,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn destroy_tensor_view_arm(
&self,
tensor_view: TensorViewARM,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_tensor_view_arm)(
self.handle(),
tensor_view,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_device_tensor_memory_requirements_arm(
&self,
info: &DeviceTensorMemoryRequirementsARM,
memory_requirements: &mut MemoryRequirements2,
) {
let __result = (self.commands().get_device_tensor_memory_requirements_arm)(
self.handle(),
info,
memory_requirements,
);
}
#[inline]
unsafe fn get_tensor_memory_requirements_arm(
&self,
info: &TensorMemoryRequirementsInfoARM,
memory_requirements: &mut MemoryRequirements2,
) {
let __result = (self.commands().get_tensor_memory_requirements_arm)(
self.handle(),
info,
memory_requirements,
);
}
#[inline]
unsafe fn get_tensor_opaque_capture_descriptor_data_arm(
&self,
info: &TensorCaptureDescriptorDataInfoARM,
data: *mut c_void,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_tensor_opaque_capture_descriptor_data_arm)(
self.handle(), info, data
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_tensor_view_opaque_capture_descriptor_data_arm(
&self,
info: &TensorViewCaptureDescriptorDataInfoARM,
data: *mut c_void,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_tensor_view_opaque_capture_descriptor_data_arm)(
self.handle(), info, data
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ArmTensorsExtensionDeviceCommands for C {}
pub trait ArmTensorsExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_external_tensor_properties_arm(
&self,
physical_device: PhysicalDevice,
external_tensor_info: &PhysicalDeviceExternalTensorInfoARM,
external_tensor_properties: &mut ExternalTensorPropertiesARM,
) {
let __result = (self
.commands()
.get_physical_device_external_tensor_properties_arm)(
physical_device,
external_tensor_info,
external_tensor_properties,
);
}
}
impl<C: InstanceV1_0 + ?Sized> ArmTensorsExtensionInstanceCommands for C {}
pub trait ExtAcquireDrmDisplayExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn acquire_drm_display_ext(
&self,
physical_device: PhysicalDevice,
drm_fd: i32,
display: DisplayKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().acquire_drm_display_ext)(physical_device, drm_fd, display);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_drm_display_ext(
&self,
physical_device: PhysicalDevice,
drm_fd: i32,
connector_id: u32,
) -> crate::VkResult<DisplayKHR> {
let mut display = MaybeUninit::<DisplayKHR>::uninit();
let __result = (self.commands().get_drm_display_ext)(
physical_device,
drm_fd,
connector_id,
display.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(display.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> ExtAcquireDrmDisplayExtensionInstanceCommands for C {}
pub trait ExtAcquireXlibDisplayExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn acquire_xlib_display_ext(
&self,
physical_device: PhysicalDevice,
display: DisplayKHR,
) -> crate::VkResult<Display> {
let mut dpy = MaybeUninit::<Display>::uninit();
let __result =
(self.commands().acquire_xlib_display_ext)(physical_device, dpy.as_mut_ptr(), display);
if __result == Result::SUCCESS {
Ok(dpy.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_rand_r_output_display_ext(
&self,
physical_device: PhysicalDevice,
rr_output: RROutput,
) -> crate::VkResult<(Display, DisplayKHR)> {
let mut dpy = MaybeUninit::<Display>::uninit();
let mut display = MaybeUninit::<DisplayKHR>::uninit();
let __result = (self.commands().get_rand_r_output_display_ext)(
physical_device,
dpy.as_mut_ptr(),
rr_output,
display.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok((dpy.assume_init(), display.assume_init()))
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> ExtAcquireXlibDisplayExtensionInstanceCommands for C {}
pub trait ExtAttachmentFeedbackLoopDynamicStateExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_attachment_feedback_loop_enable_ext(
&self,
command_buffer: CommandBuffer,
aspect_mask: ImageAspectFlags,
) {
let __result = (self.commands().cmd_set_attachment_feedback_loop_enable_ext)(
command_buffer,
aspect_mask,
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtAttachmentFeedbackLoopDynamicStateExtensionDeviceCommands for C {}
pub trait ExtBufferDeviceAddressExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_buffer_device_address_ext(
&self,
info: &BufferDeviceAddressInfo,
) -> DeviceAddress {
let __result = (self.commands().get_buffer_device_address_ext)(self.handle(), info);
__result
}
}
#[allow(deprecated)]
impl<C: DeviceV1_0 + ?Sized> ExtBufferDeviceAddressExtensionDeviceCommands for C {}
pub trait ExtCalibratedTimestampsExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_calibrated_timestamps_ext(
&self,
timestamp_infos: &[impl Cast<Target = CalibratedTimestampInfoKHR>],
) -> crate::VkResult<(Vec<u64>, u64)> {
let mut timestamps = Vec::with_capacity(timestamp_infos.len() as usize);
let mut max_deviation = MaybeUninit::<u64>::uninit();
let __result = (self.commands().get_calibrated_timestamps_ext)(
self.handle(),
timestamp_infos.len() as u32,
timestamp_infos.as_ptr().cast(),
timestamps.as_mut_ptr(),
max_deviation.as_mut_ptr(),
);
debug_assert!(timestamps.capacity() >= timestamp_infos.len() as usize);
timestamps.set_len(timestamp_infos.len() as usize);
if __result == Result::SUCCESS {
Ok((timestamps, max_deviation.assume_init()))
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtCalibratedTimestampsExtensionDeviceCommands for C {}
pub trait ExtCalibratedTimestampsExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_calibrateable_time_domains_ext(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<TimeDomainKHR>> {
let mut time_domain_count = 0;
(self
.commands()
.get_physical_device_calibrateable_time_domains_ext)(
physical_device,
&mut time_domain_count,
ptr::null_mut(),
);
let mut time_domains = Vec::with_capacity(time_domain_count as usize);
let __result = (self
.commands()
.get_physical_device_calibrateable_time_domains_ext)(
physical_device,
&mut time_domain_count,
time_domains.as_mut_ptr(),
);
debug_assert!(time_domains.capacity() >= time_domain_count as usize);
time_domains.set_len(time_domain_count as usize);
if __result == Result::SUCCESS {
Ok(time_domains)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> ExtCalibratedTimestampsExtensionInstanceCommands for C {}
pub trait ExtColorWriteEnableExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_color_write_enable_ext(
&self,
command_buffer: CommandBuffer,
color_write_enables: &[Bool32],
) {
let __result = (self.commands().cmd_set_color_write_enable_ext)(
command_buffer,
color_write_enables.len() as u32,
color_write_enables.as_ptr(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtColorWriteEnableExtensionDeviceCommands for C {}
pub trait ExtConditionalRenderingExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_begin_conditional_rendering_ext(
&self,
command_buffer: CommandBuffer,
conditional_rendering_begin: &ConditionalRenderingBeginInfoEXT,
) {
let __result = (self.commands().cmd_begin_conditional_rendering_ext)(
command_buffer,
conditional_rendering_begin,
);
}
#[inline]
unsafe fn cmd_end_conditional_rendering_ext(&self, command_buffer: CommandBuffer) {
let __result = (self.commands().cmd_end_conditional_rendering_ext)(command_buffer);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtConditionalRenderingExtensionDeviceCommands for C {}
pub trait ExtCustomResolveExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_begin_custom_resolve_ext(
&self,
command_buffer: CommandBuffer,
begin_custom_resolve_info: Option<&BeginCustomResolveInfoEXT>,
) {
let __result = (self.commands().cmd_begin_custom_resolve_ext)(
command_buffer,
begin_custom_resolve_info.map_or(ptr::null(), |v| v),
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtCustomResolveExtensionDeviceCommands for C {}
pub trait ExtDebugMarkerExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_debug_marker_begin_ext(
&self,
command_buffer: CommandBuffer,
marker_info: &DebugMarkerMarkerInfoEXT,
) {
let __result = (self.commands().cmd_debug_marker_begin_ext)(command_buffer, marker_info);
}
#[inline]
unsafe fn cmd_debug_marker_end_ext(&self, command_buffer: CommandBuffer) {
let __result = (self.commands().cmd_debug_marker_end_ext)(command_buffer);
}
#[inline]
unsafe fn cmd_debug_marker_insert_ext(
&self,
command_buffer: CommandBuffer,
marker_info: &DebugMarkerMarkerInfoEXT,
) {
let __result = (self.commands().cmd_debug_marker_insert_ext)(command_buffer, marker_info);
}
#[inline]
unsafe fn debug_marker_set_object_name_ext(
&self,
name_info: &DebugMarkerObjectNameInfoEXT,
) -> crate::VkResult<()> {
let __result = (self.commands().debug_marker_set_object_name_ext)(self.handle(), name_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn debug_marker_set_object_tag_ext(
&self,
tag_info: &DebugMarkerObjectTagInfoEXT,
) -> crate::VkResult<()> {
let __result = (self.commands().debug_marker_set_object_tag_ext)(self.handle(), tag_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtDebugMarkerExtensionDeviceCommands for C {}
pub trait ExtDebugReportExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_debug_report_callback_ext(
&self,
create_info: &DebugReportCallbackCreateInfoEXT,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<DebugReportCallbackEXT> {
let mut callback = MaybeUninit::<DebugReportCallbackEXT>::uninit();
let __result = (self.commands().create_debug_report_callback_ext)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
callback.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(callback.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn debug_report_message_ext(
&self,
flags: DebugReportFlagsEXT,
object_type: DebugReportObjectTypeEXT,
object: u64,
location: usize,
message_code: i32,
layer_prefix: &CStr,
message: &CStr,
) {
let __result = (self.commands().debug_report_message_ext)(
self.handle(),
flags,
object_type,
object,
location,
message_code,
layer_prefix.as_ptr().cast(),
message.as_ptr().cast(),
);
}
#[inline]
unsafe fn destroy_debug_report_callback_ext(
&self,
callback: DebugReportCallbackEXT,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_debug_report_callback_ext)(
self.handle(),
callback,
allocator.map_or(ptr::null(), |v| v),
);
}
}
#[allow(deprecated)]
impl<C: InstanceV1_0 + ?Sized> ExtDebugReportExtensionInstanceCommands for C {}
pub trait ExtDebugUtilsExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn cmd_begin_debug_utils_label_ext(
&self,
command_buffer: CommandBuffer,
label_info: &DebugUtilsLabelEXT,
) {
let __result =
(self.commands().cmd_begin_debug_utils_label_ext)(command_buffer, label_info);
}
#[inline]
unsafe fn cmd_end_debug_utils_label_ext(&self, command_buffer: CommandBuffer) {
let __result = (self.commands().cmd_end_debug_utils_label_ext)(command_buffer);
}
#[inline]
unsafe fn cmd_insert_debug_utils_label_ext(
&self,
command_buffer: CommandBuffer,
label_info: &DebugUtilsLabelEXT,
) {
let __result =
(self.commands().cmd_insert_debug_utils_label_ext)(command_buffer, label_info);
}
#[inline]
unsafe fn create_debug_utils_messenger_ext(
&self,
create_info: &DebugUtilsMessengerCreateInfoEXT,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<DebugUtilsMessengerEXT> {
let mut messenger = MaybeUninit::<DebugUtilsMessengerEXT>::uninit();
let __result = (self.commands().create_debug_utils_messenger_ext)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
messenger.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(messenger.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_debug_utils_messenger_ext(
&self,
messenger: DebugUtilsMessengerEXT,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_debug_utils_messenger_ext)(
self.handle(),
messenger,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn queue_begin_debug_utils_label_ext(
&self,
queue: Queue,
label_info: &DebugUtilsLabelEXT,
) {
let __result = (self.commands().queue_begin_debug_utils_label_ext)(queue, label_info);
}
#[inline]
unsafe fn queue_end_debug_utils_label_ext(&self, queue: Queue) {
let __result = (self.commands().queue_end_debug_utils_label_ext)(queue);
}
#[inline]
unsafe fn queue_insert_debug_utils_label_ext(
&self,
queue: Queue,
label_info: &DebugUtilsLabelEXT,
) {
let __result = (self.commands().queue_insert_debug_utils_label_ext)(queue, label_info);
}
#[inline]
unsafe fn set_debug_utils_object_name_ext(
&self,
device: Device,
name_info: &DebugUtilsObjectNameInfoEXT,
) -> crate::VkResult<()> {
let __result = (self.commands().set_debug_utils_object_name_ext)(device, name_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn set_debug_utils_object_tag_ext(
&self,
device: Device,
tag_info: &DebugUtilsObjectTagInfoEXT,
) -> crate::VkResult<()> {
let __result = (self.commands().set_debug_utils_object_tag_ext)(device, tag_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn submit_debug_utils_message_ext(
&self,
message_severity: DebugUtilsMessageSeverityFlagsEXT,
message_types: DebugUtilsMessageTypeFlagsEXT,
callback_data: &DebugUtilsMessengerCallbackDataEXT,
) {
let __result = (self.commands().submit_debug_utils_message_ext)(
self.handle(),
message_severity,
message_types,
callback_data,
);
}
}
impl<C: InstanceV1_0 + ?Sized> ExtDebugUtilsExtensionInstanceCommands for C {}
pub trait ExtDepthBiasControlExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_depth_bias2_ext(
&self,
command_buffer: CommandBuffer,
depth_bias_info: &DepthBiasInfoEXT,
) {
let __result = (self.commands().cmd_set_depth_bias2_ext)(command_buffer, depth_bias_info);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtDepthBiasControlExtensionDeviceCommands for C {}
pub trait ExtDepthClampControlExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_depth_clamp_range_ext(
&self,
command_buffer: CommandBuffer,
depth_clamp_mode: DepthClampModeEXT,
depth_clamp_range: Option<&DepthClampRangeEXT>,
) {
let __result = (self.commands().cmd_set_depth_clamp_range_ext)(
command_buffer,
depth_clamp_mode,
depth_clamp_range.map_or(ptr::null(), |v| v),
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtDepthClampControlExtensionDeviceCommands for C {}
pub trait ExtDescriptorBufferExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_bind_descriptor_buffer_embedded_samplers_ext(
&self,
command_buffer: CommandBuffer,
pipeline_bind_point: PipelineBindPoint,
layout: PipelineLayout,
set: u32,
) {
let __result = (self
.commands()
.cmd_bind_descriptor_buffer_embedded_samplers_ext)(
command_buffer,
pipeline_bind_point,
layout,
set,
);
}
#[inline]
unsafe fn cmd_bind_descriptor_buffers_ext(
&self,
command_buffer: CommandBuffer,
binding_infos: &[impl Cast<Target = DescriptorBufferBindingInfoEXT>],
) {
let __result = (self.commands().cmd_bind_descriptor_buffers_ext)(
command_buffer,
binding_infos.len() as u32,
binding_infos.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_set_descriptor_buffer_offsets_ext(
&self,
command_buffer: CommandBuffer,
pipeline_bind_point: PipelineBindPoint,
layout: PipelineLayout,
first_set: u32,
buffer_indices: &[u32],
offsets: &[DeviceSize],
) {
let __result = (self.commands().cmd_set_descriptor_buffer_offsets_ext)(
command_buffer,
pipeline_bind_point,
layout,
first_set,
buffer_indices.len() as u32,
buffer_indices.as_ptr(),
offsets.as_ptr(),
);
}
#[inline]
unsafe fn get_acceleration_structure_opaque_capture_descriptor_data_ext(
&self,
info: &AccelerationStructureCaptureDescriptorDataInfoEXT,
data: *mut c_void,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_acceleration_structure_opaque_capture_descriptor_data_ext)(
self.handle(),
info,
data,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_buffer_opaque_capture_descriptor_data_ext(
&self,
info: &BufferCaptureDescriptorDataInfoEXT,
data: *mut c_void,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_buffer_opaque_capture_descriptor_data_ext)(
self.handle(), info, data
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_descriptor_ext(
&self,
descriptor_info: &DescriptorGetInfoEXT,
descriptor: &mut [u8],
) {
let __result = (self.commands().get_descriptor_ext)(
self.handle(),
descriptor_info,
descriptor.len() as usize,
descriptor.as_mut_ptr() as *mut c_void,
);
}
#[inline]
unsafe fn get_descriptor_set_layout_binding_offset_ext(
&self,
layout: DescriptorSetLayout,
binding: u32,
) -> DeviceSize {
let mut offset = MaybeUninit::<DeviceSize>::uninit();
let __result = (self.commands().get_descriptor_set_layout_binding_offset_ext)(
self.handle(),
layout,
binding,
offset.as_mut_ptr(),
);
offset.assume_init()
}
#[inline]
unsafe fn get_descriptor_set_layout_size_ext(&self, layout: DescriptorSetLayout) -> DeviceSize {
let mut layout_size_in_bytes = MaybeUninit::<DeviceSize>::uninit();
let __result = (self.commands().get_descriptor_set_layout_size_ext)(
self.handle(),
layout,
layout_size_in_bytes.as_mut_ptr(),
);
layout_size_in_bytes.assume_init()
}
#[inline]
unsafe fn get_image_opaque_capture_descriptor_data_ext(
&self,
info: &ImageCaptureDescriptorDataInfoEXT,
data: *mut c_void,
) -> crate::VkResult<()> {
let __result = (self.commands().get_image_opaque_capture_descriptor_data_ext)(
self.handle(),
info,
data,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_image_view_opaque_capture_descriptor_data_ext(
&self,
info: &ImageViewCaptureDescriptorDataInfoEXT,
data: *mut c_void,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_image_view_opaque_capture_descriptor_data_ext)(
self.handle(), info, data
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_sampler_opaque_capture_descriptor_data_ext(
&self,
info: &SamplerCaptureDescriptorDataInfoEXT,
data: *mut c_void,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_sampler_opaque_capture_descriptor_data_ext)(
self.handle(), info, data
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
#[allow(deprecated)]
impl<C: DeviceV1_0 + ?Sized> ExtDescriptorBufferExtensionDeviceCommands for C {}
pub trait ExtDescriptorHeapExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_bind_resource_heap_ext(
&self,
command_buffer: CommandBuffer,
bind_info: &BindHeapInfoEXT,
) {
let __result = (self.commands().cmd_bind_resource_heap_ext)(command_buffer, bind_info);
}
#[inline]
unsafe fn cmd_bind_sampler_heap_ext(
&self,
command_buffer: CommandBuffer,
bind_info: &BindHeapInfoEXT,
) {
let __result = (self.commands().cmd_bind_sampler_heap_ext)(command_buffer, bind_info);
}
#[inline]
unsafe fn cmd_push_data_ext(
&self,
command_buffer: CommandBuffer,
push_data_info: &PushDataInfoEXT,
) {
let __result = (self.commands().cmd_push_data_ext)(command_buffer, push_data_info);
}
#[inline]
unsafe fn get_image_opaque_capture_data_ext(
&self,
images: &[Image],
) -> crate::VkResult<Vec<HostAddressRangeEXT>> {
let mut datas = Vec::with_capacity(images.len() as usize);
let __result = (self.commands().get_image_opaque_capture_data_ext)(
self.handle(),
images.len() as u32,
images.as_ptr(),
datas.as_mut_ptr(),
);
debug_assert!(datas.capacity() >= images.len() as usize);
datas.set_len(images.len() as usize);
if __result == Result::SUCCESS {
Ok(datas)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_tensor_opaque_capture_data_arm(
&self,
tensors: &[TensorARM],
) -> crate::VkResult<Vec<HostAddressRangeEXT>> {
let mut datas = Vec::with_capacity(tensors.len() as usize);
let __result = (self.commands().get_tensor_opaque_capture_data_arm)(
self.handle(),
tensors.len() as u32,
tensors.as_ptr(),
datas.as_mut_ptr(),
);
debug_assert!(datas.capacity() >= tensors.len() as usize);
datas.set_len(tensors.len() as usize);
if __result == Result::SUCCESS {
Ok(datas)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn register_custom_border_color_ext(
&self,
border_color: &SamplerCustomBorderColorCreateInfoEXT,
request_index: bool,
) -> crate::VkResult<u32> {
let mut index = MaybeUninit::<u32>::uninit();
let __result = (self.commands().register_custom_border_color_ext)(
self.handle(),
border_color,
request_index as Bool32,
index.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(index.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn unregister_custom_border_color_ext(&self, index: u32) {
let __result = (self.commands().unregister_custom_border_color_ext)(self.handle(), index);
}
#[inline]
unsafe fn write_resource_descriptors_ext(
&self,
resources: &[impl Cast<Target = ResourceDescriptorInfoEXT>],
descriptors: &[impl Cast<Target = HostAddressRangeEXT>],
) -> crate::VkResult<()> {
let __result = (self.commands().write_resource_descriptors_ext)(
self.handle(),
resources.len() as u32,
resources.as_ptr().cast(),
descriptors.as_ptr().cast(),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn write_sampler_descriptors_ext(
&self,
samplers: &[impl Cast<Target = SamplerCreateInfo>],
descriptors: &[impl Cast<Target = HostAddressRangeEXT>],
) -> crate::VkResult<()> {
let __result = (self.commands().write_sampler_descriptors_ext)(
self.handle(),
samplers.len() as u32,
samplers.as_ptr().cast(),
descriptors.as_ptr().cast(),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtDescriptorHeapExtensionDeviceCommands for C {}
pub trait ExtDescriptorHeapExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_descriptor_size_ext(
&self,
physical_device: PhysicalDevice,
descriptor_type: DescriptorType,
) -> DeviceSize {
let __result = (self.commands().get_physical_device_descriptor_size_ext)(
physical_device,
descriptor_type,
);
__result
}
}
impl<C: InstanceV1_0 + ?Sized> ExtDescriptorHeapExtensionInstanceCommands for C {}
pub trait ExtDeviceFaultExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_device_fault_info_ext(
&self,
fault_counts: &mut DeviceFaultCountsEXT,
fault_info: Option<&mut DeviceFaultInfoEXT>,
) -> crate::VkResult<()> {
let __result = (self.commands().get_device_fault_info_ext)(
self.handle(),
fault_counts,
fault_info.map_or(ptr::null_mut(), |v| v),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtDeviceFaultExtensionDeviceCommands for C {}
pub trait ExtDeviceGeneratedCommandsExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_execute_generated_commands_ext(
&self,
command_buffer: CommandBuffer,
is_preprocessed: bool,
generated_commands_info: &GeneratedCommandsInfoEXT,
) {
let __result = (self.commands().cmd_execute_generated_commands_ext)(
command_buffer,
is_preprocessed as Bool32,
generated_commands_info,
);
}
#[inline]
unsafe fn cmd_preprocess_generated_commands_ext(
&self,
command_buffer: CommandBuffer,
generated_commands_info: &GeneratedCommandsInfoEXT,
state_command_buffer: CommandBuffer,
) {
let __result = (self.commands().cmd_preprocess_generated_commands_ext)(
command_buffer,
generated_commands_info,
state_command_buffer,
);
}
#[inline]
unsafe fn create_indirect_commands_layout_ext(
&self,
create_info: &IndirectCommandsLayoutCreateInfoEXT,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<IndirectCommandsLayoutEXT> {
let mut indirect_commands_layout = MaybeUninit::<IndirectCommandsLayoutEXT>::uninit();
let __result = (self.commands().create_indirect_commands_layout_ext)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
indirect_commands_layout.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(indirect_commands_layout.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn create_indirect_execution_set_ext(
&self,
create_info: &IndirectExecutionSetCreateInfoEXT,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<IndirectExecutionSetEXT> {
let mut indirect_execution_set = MaybeUninit::<IndirectExecutionSetEXT>::uninit();
let __result = (self.commands().create_indirect_execution_set_ext)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
indirect_execution_set.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(indirect_execution_set.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_indirect_commands_layout_ext(
&self,
indirect_commands_layout: IndirectCommandsLayoutEXT,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_indirect_commands_layout_ext)(
self.handle(),
indirect_commands_layout,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn destroy_indirect_execution_set_ext(
&self,
indirect_execution_set: IndirectExecutionSetEXT,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_indirect_execution_set_ext)(
self.handle(),
indirect_execution_set,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_generated_commands_memory_requirements_ext(
&self,
info: &GeneratedCommandsMemoryRequirementsInfoEXT,
memory_requirements: &mut MemoryRequirements2,
) {
let __result = (self
.commands()
.get_generated_commands_memory_requirements_ext)(
self.handle(),
info,
memory_requirements,
);
}
#[inline]
unsafe fn update_indirect_execution_set_pipeline_ext(
&self,
indirect_execution_set: IndirectExecutionSetEXT,
execution_set_writes: &[impl Cast<Target = WriteIndirectExecutionSetPipelineEXT>],
) {
let __result = (self.commands().update_indirect_execution_set_pipeline_ext)(
self.handle(),
indirect_execution_set,
execution_set_writes.len() as u32,
execution_set_writes.as_ptr().cast(),
);
}
#[inline]
unsafe fn update_indirect_execution_set_shader_ext(
&self,
indirect_execution_set: IndirectExecutionSetEXT,
execution_set_writes: &[impl Cast<Target = WriteIndirectExecutionSetShaderEXT>],
) {
let __result = (self.commands().update_indirect_execution_set_shader_ext)(
self.handle(),
indirect_execution_set,
execution_set_writes.len() as u32,
execution_set_writes.as_ptr().cast(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtDeviceGeneratedCommandsExtensionDeviceCommands for C {}
pub trait ExtDirectModeDisplayExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn release_display_ext(
&self,
physical_device: PhysicalDevice,
display: DisplayKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().release_display_ext)(physical_device, display);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> ExtDirectModeDisplayExtensionInstanceCommands for C {}
pub trait ExtDirectfbSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_direct_fb_surface_ext(
&self,
create_info: &DirectFBSurfaceCreateInfoEXT,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_direct_fb_surface_ext)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_direct_fb_presentation_support_ext(
&self,
physical_device: PhysicalDevice,
queue_family_index: u32,
dfb: *mut IDirectFB,
) -> Bool32 {
let __result = (self
.commands()
.get_physical_device_direct_fb_presentation_support_ext)(
physical_device,
queue_family_index,
dfb,
);
__result
}
}
impl<C: InstanceV1_0 + ?Sized> ExtDirectfbSurfaceExtensionInstanceCommands for C {}
pub trait ExtDiscardRectanglesExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_discard_rectangle_ext(
&self,
command_buffer: CommandBuffer,
first_discard_rectangle: u32,
discard_rectangles: &[impl Cast<Target = Rect2D>],
) {
let __result = (self.commands().cmd_set_discard_rectangle_ext)(
command_buffer,
first_discard_rectangle,
discard_rectangles.len() as u32,
discard_rectangles.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_set_discard_rectangle_enable_ext(
&self,
command_buffer: CommandBuffer,
discard_rectangle_enable: bool,
) {
let __result = (self.commands().cmd_set_discard_rectangle_enable_ext)(
command_buffer,
discard_rectangle_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_discard_rectangle_mode_ext(
&self,
command_buffer: CommandBuffer,
discard_rectangle_mode: DiscardRectangleModeEXT,
) {
let __result = (self.commands().cmd_set_discard_rectangle_mode_ext)(
command_buffer,
discard_rectangle_mode,
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtDiscardRectanglesExtensionDeviceCommands for C {}
pub trait ExtDisplayControlExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn display_power_control_ext(
&self,
display: DisplayKHR,
display_power_info: &DisplayPowerInfoEXT,
) -> crate::VkResult<()> {
let __result =
(self.commands().display_power_control_ext)(self.handle(), display, display_power_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_swapchain_counter_ext(
&self,
swapchain: SwapchainKHR,
counter: SurfaceCounterFlagsEXT,
) -> crate::VkResult<u64> {
let mut counter_value = MaybeUninit::<u64>::uninit();
let __result = (self.commands().get_swapchain_counter_ext)(
self.handle(),
swapchain,
counter,
counter_value.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(counter_value.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn register_device_event_ext(
&self,
device_event_info: &DeviceEventInfoEXT,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<Fence> {
let mut fence = MaybeUninit::<Fence>::uninit();
let __result = (self.commands().register_device_event_ext)(
self.handle(),
device_event_info,
allocator.map_or(ptr::null(), |v| v),
fence.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(fence.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn register_display_event_ext(
&self,
display: DisplayKHR,
display_event_info: &DisplayEventInfoEXT,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<Fence> {
let mut fence = MaybeUninit::<Fence>::uninit();
let __result = (self.commands().register_display_event_ext)(
self.handle(),
display,
display_event_info,
allocator.map_or(ptr::null(), |v| v),
fence.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(fence.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtDisplayControlExtensionDeviceCommands for C {}
pub trait ExtDisplaySurfaceCounterExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_surface_capabilities2_ext(
&self,
physical_device: PhysicalDevice,
surface: SurfaceKHR,
surface_capabilities: &mut SurfaceCapabilities2EXT,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_physical_device_surface_capabilities2_ext)(
physical_device,
surface,
surface_capabilities,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> ExtDisplaySurfaceCounterExtensionInstanceCommands for C {}
pub trait ExtExtendedDynamicStateExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_bind_vertex_buffers2_ext(
&self,
command_buffer: CommandBuffer,
first_binding: u32,
buffers: &[Buffer],
offsets: &[DeviceSize],
sizes: &[DeviceSize],
strides: &[DeviceSize],
) {
let __result = (self.commands().cmd_bind_vertex_buffers2_ext)(
command_buffer,
first_binding,
buffers.len() as u32,
buffers.as_ptr(),
offsets.as_ptr(),
sizes.as_ptr(),
strides.as_ptr(),
);
}
#[inline]
unsafe fn cmd_set_cull_mode_ext(
&self,
command_buffer: CommandBuffer,
cull_mode: CullModeFlags,
) {
let __result = (self.commands().cmd_set_cull_mode_ext)(command_buffer, cull_mode);
}
#[inline]
unsafe fn cmd_set_depth_bounds_test_enable_ext(
&self,
command_buffer: CommandBuffer,
depth_bounds_test_enable: bool,
) {
let __result = (self.commands().cmd_set_depth_bounds_test_enable_ext)(
command_buffer,
depth_bounds_test_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_depth_compare_op_ext(
&self,
command_buffer: CommandBuffer,
depth_compare_op: CompareOp,
) {
let __result =
(self.commands().cmd_set_depth_compare_op_ext)(command_buffer, depth_compare_op);
}
#[inline]
unsafe fn cmd_set_depth_test_enable_ext(
&self,
command_buffer: CommandBuffer,
depth_test_enable: bool,
) {
let __result = (self.commands().cmd_set_depth_test_enable_ext)(
command_buffer,
depth_test_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_depth_write_enable_ext(
&self,
command_buffer: CommandBuffer,
depth_write_enable: bool,
) {
let __result = (self.commands().cmd_set_depth_write_enable_ext)(
command_buffer,
depth_write_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_front_face_ext(&self, command_buffer: CommandBuffer, front_face: FrontFace) {
let __result = (self.commands().cmd_set_front_face_ext)(command_buffer, front_face);
}
#[inline]
unsafe fn cmd_set_primitive_topology_ext(
&self,
command_buffer: CommandBuffer,
primitive_topology: PrimitiveTopology,
) {
let __result =
(self.commands().cmd_set_primitive_topology_ext)(command_buffer, primitive_topology);
}
#[inline]
unsafe fn cmd_set_scissor_with_count_ext(
&self,
command_buffer: CommandBuffer,
scissors: &[impl Cast<Target = Rect2D>],
) {
let __result = (self.commands().cmd_set_scissor_with_count_ext)(
command_buffer,
scissors.len() as u32,
scissors.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_set_stencil_op_ext(
&self,
command_buffer: CommandBuffer,
face_mask: StencilFaceFlags,
fail_op: StencilOp,
pass_op: StencilOp,
depth_fail_op: StencilOp,
compare_op: CompareOp,
) {
let __result = (self.commands().cmd_set_stencil_op_ext)(
command_buffer,
face_mask,
fail_op,
pass_op,
depth_fail_op,
compare_op,
);
}
#[inline]
unsafe fn cmd_set_stencil_test_enable_ext(
&self,
command_buffer: CommandBuffer,
stencil_test_enable: bool,
) {
let __result = (self.commands().cmd_set_stencil_test_enable_ext)(
command_buffer,
stencil_test_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_viewport_with_count_ext(
&self,
command_buffer: CommandBuffer,
viewports: &[impl Cast<Target = Viewport>],
) {
let __result = (self.commands().cmd_set_viewport_with_count_ext)(
command_buffer,
viewports.len() as u32,
viewports.as_ptr().cast(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtExtendedDynamicStateExtensionDeviceCommands for C {}
pub trait ExtExtendedDynamicState2ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_depth_bias_enable_ext(
&self,
command_buffer: CommandBuffer,
depth_bias_enable: bool,
) {
let __result = (self.commands().cmd_set_depth_bias_enable_ext)(
command_buffer,
depth_bias_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_logic_op_ext(&self, command_buffer: CommandBuffer, logic_op: LogicOp) {
let __result = (self.commands().cmd_set_logic_op_ext)(command_buffer, logic_op);
}
#[inline]
unsafe fn cmd_set_patch_control_points_ext(
&self,
command_buffer: CommandBuffer,
patch_control_points: u32,
) {
let __result = (self.commands().cmd_set_patch_control_points_ext)(
command_buffer,
patch_control_points,
);
}
#[inline]
unsafe fn cmd_set_primitive_restart_enable_ext(
&self,
command_buffer: CommandBuffer,
primitive_restart_enable: bool,
) {
let __result = (self.commands().cmd_set_primitive_restart_enable_ext)(
command_buffer,
primitive_restart_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_rasterizer_discard_enable_ext(
&self,
command_buffer: CommandBuffer,
rasterizer_discard_enable: bool,
) {
let __result = (self.commands().cmd_set_rasterizer_discard_enable_ext)(
command_buffer,
rasterizer_discard_enable as Bool32,
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtExtendedDynamicState2ExtensionDeviceCommands for C {}
pub trait ExtExtendedDynamicState3ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_alpha_to_coverage_enable_ext(
&self,
command_buffer: CommandBuffer,
alpha_to_coverage_enable: bool,
) {
let __result = (self.commands().cmd_set_alpha_to_coverage_enable_ext)(
command_buffer,
alpha_to_coverage_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_alpha_to_one_enable_ext(
&self,
command_buffer: CommandBuffer,
alpha_to_one_enable: bool,
) {
let __result = (self.commands().cmd_set_alpha_to_one_enable_ext)(
command_buffer,
alpha_to_one_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_color_blend_advanced_ext(
&self,
command_buffer: CommandBuffer,
first_attachment: u32,
color_blend_advanced: &[impl Cast<Target = ColorBlendAdvancedEXT>],
) {
let __result = (self.commands().cmd_set_color_blend_advanced_ext)(
command_buffer,
first_attachment,
color_blend_advanced.len() as u32,
color_blend_advanced.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_set_color_blend_enable_ext(
&self,
command_buffer: CommandBuffer,
first_attachment: u32,
color_blend_enables: &[Bool32],
) {
let __result = (self.commands().cmd_set_color_blend_enable_ext)(
command_buffer,
first_attachment,
color_blend_enables.len() as u32,
color_blend_enables.as_ptr(),
);
}
#[inline]
unsafe fn cmd_set_color_blend_equation_ext(
&self,
command_buffer: CommandBuffer,
first_attachment: u32,
color_blend_equations: &[impl Cast<Target = ColorBlendEquationEXT>],
) {
let __result = (self.commands().cmd_set_color_blend_equation_ext)(
command_buffer,
first_attachment,
color_blend_equations.len() as u32,
color_blend_equations.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_set_color_write_mask_ext(
&self,
command_buffer: CommandBuffer,
first_attachment: u32,
color_write_masks: &[ColorComponentFlags],
) {
let __result = (self.commands().cmd_set_color_write_mask_ext)(
command_buffer,
first_attachment,
color_write_masks.len() as u32,
color_write_masks.as_ptr(),
);
}
#[inline]
unsafe fn cmd_set_conservative_rasterization_mode_ext(
&self,
command_buffer: CommandBuffer,
conservative_rasterization_mode: ConservativeRasterizationModeEXT,
) {
let __result = (self.commands().cmd_set_conservative_rasterization_mode_ext)(
command_buffer,
conservative_rasterization_mode,
);
}
#[inline]
unsafe fn cmd_set_coverage_modulation_mode_nv(
&self,
command_buffer: CommandBuffer,
coverage_modulation_mode: CoverageModulationModeNV,
) {
let __result = (self.commands().cmd_set_coverage_modulation_mode_nv)(
command_buffer,
coverage_modulation_mode,
);
}
#[inline]
unsafe fn cmd_set_coverage_modulation_table_enable_nv(
&self,
command_buffer: CommandBuffer,
coverage_modulation_table_enable: bool,
) {
let __result = (self.commands().cmd_set_coverage_modulation_table_enable_nv)(
command_buffer,
coverage_modulation_table_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_coverage_modulation_table_nv(
&self,
command_buffer: CommandBuffer,
coverage_modulation_table: &[f32],
) {
let __result = (self.commands().cmd_set_coverage_modulation_table_nv)(
command_buffer,
coverage_modulation_table.len() as u32,
coverage_modulation_table.as_ptr(),
);
}
#[inline]
unsafe fn cmd_set_coverage_reduction_mode_nv(
&self,
command_buffer: CommandBuffer,
coverage_reduction_mode: CoverageReductionModeNV,
) {
let __result = (self.commands().cmd_set_coverage_reduction_mode_nv)(
command_buffer,
coverage_reduction_mode,
);
}
#[inline]
unsafe fn cmd_set_coverage_to_color_enable_nv(
&self,
command_buffer: CommandBuffer,
coverage_to_color_enable: bool,
) {
let __result = (self.commands().cmd_set_coverage_to_color_enable_nv)(
command_buffer,
coverage_to_color_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_coverage_to_color_location_nv(
&self,
command_buffer: CommandBuffer,
coverage_to_color_location: u32,
) {
let __result = (self.commands().cmd_set_coverage_to_color_location_nv)(
command_buffer,
coverage_to_color_location,
);
}
#[inline]
unsafe fn cmd_set_depth_clamp_enable_ext(
&self,
command_buffer: CommandBuffer,
depth_clamp_enable: bool,
) {
let __result = (self.commands().cmd_set_depth_clamp_enable_ext)(
command_buffer,
depth_clamp_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_depth_clip_enable_ext(
&self,
command_buffer: CommandBuffer,
depth_clip_enable: bool,
) {
let __result = (self.commands().cmd_set_depth_clip_enable_ext)(
command_buffer,
depth_clip_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_depth_clip_negative_one_to_one_ext(
&self,
command_buffer: CommandBuffer,
negative_one_to_one: bool,
) {
let __result = (self.commands().cmd_set_depth_clip_negative_one_to_one_ext)(
command_buffer,
negative_one_to_one as Bool32,
);
}
#[inline]
unsafe fn cmd_set_extra_primitive_overestimation_size_ext(
&self,
command_buffer: CommandBuffer,
extra_primitive_overestimation_size: f32,
) {
let __result = (self
.commands()
.cmd_set_extra_primitive_overestimation_size_ext)(
command_buffer,
extra_primitive_overestimation_size,
);
}
#[inline]
unsafe fn cmd_set_line_rasterization_mode_ext(
&self,
command_buffer: CommandBuffer,
line_rasterization_mode: LineRasterizationModeEXT,
) {
let __result = (self.commands().cmd_set_line_rasterization_mode_ext)(
command_buffer,
line_rasterization_mode,
);
}
#[inline]
unsafe fn cmd_set_line_stipple_enable_ext(
&self,
command_buffer: CommandBuffer,
stippled_line_enable: bool,
) {
let __result = (self.commands().cmd_set_line_stipple_enable_ext)(
command_buffer,
stippled_line_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_logic_op_enable_ext(
&self,
command_buffer: CommandBuffer,
logic_op_enable: bool,
) {
let __result = (self.commands().cmd_set_logic_op_enable_ext)(
command_buffer,
logic_op_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_polygon_mode_ext(
&self,
command_buffer: CommandBuffer,
polygon_mode: PolygonMode,
) {
let __result = (self.commands().cmd_set_polygon_mode_ext)(command_buffer, polygon_mode);
}
#[inline]
unsafe fn cmd_set_provoking_vertex_mode_ext(
&self,
command_buffer: CommandBuffer,
provoking_vertex_mode: ProvokingVertexModeEXT,
) {
let __result = (self.commands().cmd_set_provoking_vertex_mode_ext)(
command_buffer,
provoking_vertex_mode,
);
}
#[inline]
unsafe fn cmd_set_rasterization_samples_ext(
&self,
command_buffer: CommandBuffer,
rasterization_samples: SampleCountFlags,
) {
let __result = (self.commands().cmd_set_rasterization_samples_ext)(
command_buffer,
rasterization_samples,
);
}
#[inline]
unsafe fn cmd_set_rasterization_stream_ext(
&self,
command_buffer: CommandBuffer,
rasterization_stream: u32,
) {
let __result = (self.commands().cmd_set_rasterization_stream_ext)(
command_buffer,
rasterization_stream,
);
}
#[inline]
unsafe fn cmd_set_representative_fragment_test_enable_nv(
&self,
command_buffer: CommandBuffer,
representative_fragment_test_enable: bool,
) {
let __result = (self
.commands()
.cmd_set_representative_fragment_test_enable_nv)(
command_buffer,
representative_fragment_test_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_sample_locations_enable_ext(
&self,
command_buffer: CommandBuffer,
sample_locations_enable: bool,
) {
let __result = (self.commands().cmd_set_sample_locations_enable_ext)(
command_buffer,
sample_locations_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_sample_mask_ext(
&self,
command_buffer: CommandBuffer,
samples: SampleCountFlags,
sample_mask: Option<&SampleMask>,
) {
let __result = (self.commands().cmd_set_sample_mask_ext)(
command_buffer,
samples,
sample_mask.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn cmd_set_shading_rate_image_enable_nv(
&self,
command_buffer: CommandBuffer,
shading_rate_image_enable: bool,
) {
let __result = (self.commands().cmd_set_shading_rate_image_enable_nv)(
command_buffer,
shading_rate_image_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_tessellation_domain_origin_ext(
&self,
command_buffer: CommandBuffer,
domain_origin: TessellationDomainOrigin,
) {
let __result =
(self.commands().cmd_set_tessellation_domain_origin_ext)(command_buffer, domain_origin);
}
#[inline]
unsafe fn cmd_set_viewport_swizzle_nv(
&self,
command_buffer: CommandBuffer,
first_viewport: u32,
viewport_swizzles: &[impl Cast<Target = ViewportSwizzleNV>],
) {
let __result = (self.commands().cmd_set_viewport_swizzle_nv)(
command_buffer,
first_viewport,
viewport_swizzles.len() as u32,
viewport_swizzles.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_set_viewport_w_scaling_enable_nv(
&self,
command_buffer: CommandBuffer,
viewport_w_scaling_enable: bool,
) {
let __result = (self.commands().cmd_set_viewport_w_scaling_enable_nv)(
command_buffer,
viewport_w_scaling_enable as Bool32,
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtExtendedDynamicState3ExtensionDeviceCommands for C {}
pub trait ExtExternalMemoryHostExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_memory_host_pointer_properties_ext(
&self,
handle_type: ExternalMemoryHandleTypeFlags,
host_pointer: *const c_void,
memory_host_pointer_properties: &mut MemoryHostPointerPropertiesEXT,
) -> crate::VkResult<()> {
let __result = (self.commands().get_memory_host_pointer_properties_ext)(
self.handle(),
handle_type,
host_pointer,
memory_host_pointer_properties,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtExternalMemoryHostExtensionDeviceCommands for C {}
pub trait ExtExternalMemoryMetalExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_memory_metal_handle_ext(
&self,
get_metal_handle_info: &MemoryGetMetalHandleInfoEXT,
) -> crate::VkResult<*mut c_void> {
let mut handle = MaybeUninit::<*mut c_void>::uninit();
let __result = (self.commands().get_memory_metal_handle_ext)(
self.handle(),
get_metal_handle_info,
handle.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(handle.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_memory_metal_handle_properties_ext(
&self,
handle_type: ExternalMemoryHandleTypeFlags,
handle: *const c_void,
memory_metal_handle_properties: &mut MemoryMetalHandlePropertiesEXT,
) -> crate::VkResult<()> {
let __result = (self.commands().get_memory_metal_handle_properties_ext)(
self.handle(),
handle_type,
handle,
memory_metal_handle_properties,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtExternalMemoryMetalExtensionDeviceCommands for C {}
pub trait ExtFragmentDensityMapOffsetExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_end_rendering2_ext(
&self,
command_buffer: CommandBuffer,
rendering_end_info: Option<&RenderingEndInfoKHR>,
) {
let __result = (self.commands().cmd_end_rendering2_ext)(
command_buffer,
rendering_end_info.map_or(ptr::null(), |v| v),
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtFragmentDensityMapOffsetExtensionDeviceCommands for C {}
pub trait ExtFullScreenExclusiveExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn acquire_full_screen_exclusive_mode_ext(
&self,
swapchain: SwapchainKHR,
) -> crate::VkResult<()> {
let __result =
(self.commands().acquire_full_screen_exclusive_mode_ext)(self.handle(), swapchain);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_device_group_surface_present_modes2_ext(
&self,
surface_info: &PhysicalDeviceSurfaceInfo2KHR,
) -> crate::VkResult<DeviceGroupPresentModeFlagsKHR> {
let mut modes = MaybeUninit::<DeviceGroupPresentModeFlagsKHR>::uninit();
let __result = (self.commands().get_device_group_surface_present_modes2_ext)(
self.handle(),
surface_info,
modes.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(modes.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn release_full_screen_exclusive_mode_ext(
&self,
swapchain: SwapchainKHR,
) -> crate::VkResult<()> {
let __result =
(self.commands().release_full_screen_exclusive_mode_ext)(self.handle(), swapchain);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtFullScreenExclusiveExtensionDeviceCommands for C {}
pub trait ExtFullScreenExclusiveExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_surface_present_modes2_ext(
&self,
physical_device: PhysicalDevice,
surface_info: &PhysicalDeviceSurfaceInfo2KHR,
) -> crate::VkResult<Vec<PresentModeKHR>> {
let mut present_mode_count = 0;
(self
.commands()
.get_physical_device_surface_present_modes2_ext)(
physical_device,
surface_info,
&mut present_mode_count,
ptr::null_mut(),
);
let mut present_modes = Vec::with_capacity(present_mode_count as usize);
let __result = (self
.commands()
.get_physical_device_surface_present_modes2_ext)(
physical_device,
surface_info,
&mut present_mode_count,
present_modes.as_mut_ptr(),
);
debug_assert!(present_modes.capacity() >= present_mode_count as usize);
present_modes.set_len(present_mode_count as usize);
if __result == Result::SUCCESS {
Ok(present_modes)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> ExtFullScreenExclusiveExtensionInstanceCommands for C {}
pub trait ExtHdrMetadataExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn set_hdr_metadata_ext(
&self,
swapchains: &[SwapchainKHR],
metadata: &[impl Cast<Target = HdrMetadataEXT>],
) {
let __result = (self.commands().set_hdr_metadata_ext)(
self.handle(),
swapchains.len() as u32,
swapchains.as_ptr(),
metadata.as_ptr().cast(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtHdrMetadataExtensionDeviceCommands for C {}
pub trait ExtHeadlessSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_headless_surface_ext(
&self,
create_info: &HeadlessSurfaceCreateInfoEXT,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_headless_surface_ext)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> ExtHeadlessSurfaceExtensionInstanceCommands for C {}
pub trait ExtHostImageCopyExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn copy_image_to_image_ext(
&self,
copy_image_to_image_info: &CopyImageToImageInfo,
) -> crate::VkResult<()> {
let __result =
(self.commands().copy_image_to_image_ext)(self.handle(), copy_image_to_image_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn copy_image_to_memory_ext(
&self,
copy_image_to_memory_info: &CopyImageToMemoryInfo,
) -> crate::VkResult<()> {
let __result =
(self.commands().copy_image_to_memory_ext)(self.handle(), copy_image_to_memory_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn copy_memory_to_image_ext(
&self,
copy_memory_to_image_info: &CopyMemoryToImageInfo,
) -> crate::VkResult<()> {
let __result =
(self.commands().copy_memory_to_image_ext)(self.handle(), copy_memory_to_image_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_image_subresource_layout2_ext(
&self,
image: Image,
subresource: &ImageSubresource2,
layout: &mut SubresourceLayout2,
) {
let __result = (self.commands().get_image_subresource_layout2_ext)(
self.handle(),
image,
subresource,
layout,
);
}
#[inline]
unsafe fn transition_image_layout_ext(
&self,
transitions: &[impl Cast<Target = HostImageLayoutTransitionInfo>],
) -> crate::VkResult<()> {
let __result = (self.commands().transition_image_layout_ext)(
self.handle(),
transitions.len() as u32,
transitions.as_ptr().cast(),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtHostImageCopyExtensionDeviceCommands for C {}
pub trait ExtHostQueryResetExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn reset_query_pool_ext(
&self,
query_pool: QueryPool,
first_query: u32,
query_count: u32,
) {
let __result = (self.commands().reset_query_pool_ext)(
self.handle(),
query_pool,
first_query,
query_count,
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtHostQueryResetExtensionDeviceCommands for C {}
pub trait ExtImageCompressionControlExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_image_subresource_layout2_ext(
&self,
image: Image,
subresource: &ImageSubresource2,
layout: &mut SubresourceLayout2,
) {
let __result = (self.commands().get_image_subresource_layout2_ext)(
self.handle(),
image,
subresource,
layout,
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtImageCompressionControlExtensionDeviceCommands for C {}
pub trait ExtImageDrmFormatModifierExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_image_drm_format_modifier_properties_ext(
&self,
image: Image,
properties: &mut ImageDrmFormatModifierPropertiesEXT,
) -> crate::VkResult<()> {
let __result = (self.commands().get_image_drm_format_modifier_properties_ext)(
self.handle(),
image,
properties,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtImageDrmFormatModifierExtensionDeviceCommands for C {}
pub trait ExtLineRasterizationExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_line_stipple_ext(
&self,
command_buffer: CommandBuffer,
line_stipple_factor: u32,
line_stipple_pattern: u16,
) {
let __result = (self.commands().cmd_set_line_stipple_ext)(
command_buffer,
line_stipple_factor,
line_stipple_pattern,
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtLineRasterizationExtensionDeviceCommands for C {}
pub trait ExtMemoryDecompressionExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_decompress_memory_ext(
&self,
command_buffer: CommandBuffer,
decompress_memory_info_ext: &DecompressMemoryInfoEXT,
) {
let __result =
(self.commands().cmd_decompress_memory_ext)(command_buffer, decompress_memory_info_ext);
}
#[inline]
unsafe fn cmd_decompress_memory_indirect_count_ext(
&self,
command_buffer: CommandBuffer,
decompression_method: MemoryDecompressionMethodFlagsEXT,
indirect_commands_address: DeviceAddress,
indirect_commands_count_address: DeviceAddress,
max_decompression_count: u32,
stride: u32,
) {
let __result = (self.commands().cmd_decompress_memory_indirect_count_ext)(
command_buffer,
decompression_method,
indirect_commands_address,
indirect_commands_count_address,
max_decompression_count,
stride,
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtMemoryDecompressionExtensionDeviceCommands for C {}
pub trait ExtMeshShaderExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_draw_mesh_tasks_ext(
&self,
command_buffer: CommandBuffer,
group_count_x: u32,
group_count_y: u32,
group_count_z: u32,
) {
let __result = (self.commands().cmd_draw_mesh_tasks_ext)(
command_buffer,
group_count_x,
group_count_y,
group_count_z,
);
}
#[inline]
unsafe fn cmd_draw_mesh_tasks_indirect_count_ext(
&self,
command_buffer: CommandBuffer,
buffer: Buffer,
offset: DeviceSize,
count_buffer: Buffer,
count_buffer_offset: DeviceSize,
max_draw_count: u32,
stride: u32,
) {
let __result = (self.commands().cmd_draw_mesh_tasks_indirect_count_ext)(
command_buffer,
buffer,
offset,
count_buffer,
count_buffer_offset,
max_draw_count,
stride,
);
}
#[inline]
unsafe fn cmd_draw_mesh_tasks_indirect_ext(
&self,
command_buffer: CommandBuffer,
buffer: Buffer,
offset: DeviceSize,
draw_count: u32,
stride: u32,
) {
let __result = (self.commands().cmd_draw_mesh_tasks_indirect_ext)(
command_buffer,
buffer,
offset,
draw_count,
stride,
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtMeshShaderExtensionDeviceCommands for C {}
pub trait ExtMetalObjectsExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn export_metal_objects_ext(&self, metal_objects_info: &mut ExportMetalObjectsInfoEXT) {
let __result =
(self.commands().export_metal_objects_ext)(self.handle(), metal_objects_info);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtMetalObjectsExtensionDeviceCommands for C {}
pub trait ExtMetalSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_metal_surface_ext(
&self,
create_info: &MetalSurfaceCreateInfoEXT,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_metal_surface_ext)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> ExtMetalSurfaceExtensionInstanceCommands for C {}
pub trait ExtMultiDrawExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_draw_multi_ext(
&self,
command_buffer: CommandBuffer,
vertex_info: &[impl Cast<Target = MultiDrawInfoEXT>],
instance_count: u32,
first_instance: u32,
stride: u32,
) {
let __result = (self.commands().cmd_draw_multi_ext)(
command_buffer,
vertex_info.len() as u32,
vertex_info.as_ptr().cast(),
instance_count,
first_instance,
stride,
);
}
#[inline]
unsafe fn cmd_draw_multi_indexed_ext(
&self,
command_buffer: CommandBuffer,
index_info: &[impl Cast<Target = MultiDrawIndexedInfoEXT>],
instance_count: u32,
first_instance: u32,
stride: u32,
vertex_offset: Option<&i32>,
) {
let __result = (self.commands().cmd_draw_multi_indexed_ext)(
command_buffer,
index_info.len() as u32,
index_info.as_ptr().cast(),
instance_count,
first_instance,
stride,
vertex_offset.map_or(ptr::null(), |v| v),
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtMultiDrawExtensionDeviceCommands for C {}
pub trait ExtOpacityMicromapExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn build_micromaps_ext(
&self,
deferred_operation: DeferredOperationKHR,
infos: &[impl Cast<Target = MicromapBuildInfoEXT>],
) -> crate::VkResult<SuccessCode> {
let __result = (self.commands().build_micromaps_ext)(
self.handle(),
deferred_operation,
infos.len() as u32,
infos.as_ptr().cast(),
);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn cmd_build_micromaps_ext(
&self,
command_buffer: CommandBuffer,
infos: &[impl Cast<Target = MicromapBuildInfoEXT>],
) {
let __result = (self.commands().cmd_build_micromaps_ext)(
command_buffer,
infos.len() as u32,
infos.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_copy_memory_to_micromap_ext(
&self,
command_buffer: CommandBuffer,
info: &CopyMemoryToMicromapInfoEXT,
) {
let __result = (self.commands().cmd_copy_memory_to_micromap_ext)(command_buffer, info);
}
#[inline]
unsafe fn cmd_copy_micromap_ext(
&self,
command_buffer: CommandBuffer,
info: &CopyMicromapInfoEXT,
) {
let __result = (self.commands().cmd_copy_micromap_ext)(command_buffer, info);
}
#[inline]
unsafe fn cmd_copy_micromap_to_memory_ext(
&self,
command_buffer: CommandBuffer,
info: &CopyMicromapToMemoryInfoEXT,
) {
let __result = (self.commands().cmd_copy_micromap_to_memory_ext)(command_buffer, info);
}
#[inline]
unsafe fn cmd_write_micromaps_properties_ext(
&self,
command_buffer: CommandBuffer,
micromaps: &[MicromapEXT],
query_type: QueryType,
query_pool: QueryPool,
first_query: u32,
) {
let __result = (self.commands().cmd_write_micromaps_properties_ext)(
command_buffer,
micromaps.len() as u32,
micromaps.as_ptr(),
query_type,
query_pool,
first_query,
);
}
#[inline]
unsafe fn copy_memory_to_micromap_ext(
&self,
deferred_operation: DeferredOperationKHR,
info: &CopyMemoryToMicromapInfoEXT,
) -> crate::VkResult<SuccessCode> {
let __result =
(self.commands().copy_memory_to_micromap_ext)(self.handle(), deferred_operation, info);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn copy_micromap_ext(
&self,
deferred_operation: DeferredOperationKHR,
info: &CopyMicromapInfoEXT,
) -> crate::VkResult<SuccessCode> {
let __result = (self.commands().copy_micromap_ext)(self.handle(), deferred_operation, info);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn copy_micromap_to_memory_ext(
&self,
deferred_operation: DeferredOperationKHR,
info: &CopyMicromapToMemoryInfoEXT,
) -> crate::VkResult<SuccessCode> {
let __result =
(self.commands().copy_micromap_to_memory_ext)(self.handle(), deferred_operation, info);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn create_micromap_ext(
&self,
create_info: &MicromapCreateInfoEXT,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<MicromapEXT> {
let mut micromap = MaybeUninit::<MicromapEXT>::uninit();
let __result = (self.commands().create_micromap_ext)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
micromap.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(micromap.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_micromap_ext(
&self,
micromap: MicromapEXT,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_micromap_ext)(
self.handle(),
micromap,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_device_micromap_compatibility_ext(
&self,
version_info: &MicromapVersionInfoEXT,
) -> AccelerationStructureCompatibilityKHR {
let mut compatibility = MaybeUninit::<AccelerationStructureCompatibilityKHR>::uninit();
let __result = (self.commands().get_device_micromap_compatibility_ext)(
self.handle(),
version_info,
compatibility.as_mut_ptr(),
);
compatibility.assume_init()
}
#[inline]
unsafe fn get_micromap_build_sizes_ext(
&self,
build_type: AccelerationStructureBuildTypeKHR,
build_info: &MicromapBuildInfoEXT,
size_info: &mut MicromapBuildSizesInfoEXT,
) {
let __result = (self.commands().get_micromap_build_sizes_ext)(
self.handle(),
build_type,
build_info,
size_info,
);
}
#[inline]
unsafe fn write_micromaps_properties_ext(
&self,
micromaps: &[MicromapEXT],
query_type: QueryType,
data: &mut [u8],
stride: usize,
) -> crate::VkResult<()> {
let __result = (self.commands().write_micromaps_properties_ext)(
self.handle(),
micromaps.len() as u32,
micromaps.as_ptr(),
query_type,
data.len() as usize,
data.as_mut_ptr() as *mut c_void,
stride,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtOpacityMicromapExtensionDeviceCommands for C {}
pub trait ExtPageableDeviceLocalMemoryExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn set_device_memory_priority_ext(&self, memory: DeviceMemory, priority: f32) {
let __result =
(self.commands().set_device_memory_priority_ext)(self.handle(), memory, priority);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtPageableDeviceLocalMemoryExtensionDeviceCommands for C {}
pub trait ExtPipelinePropertiesExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_pipeline_properties_ext(
&self,
pipeline_info: &PipelineInfoEXT,
) -> crate::VkResult<BaseOutStructure> {
let mut pipeline_properties = MaybeUninit::<BaseOutStructure>::uninit();
let __result = (self.commands().get_pipeline_properties_ext)(
self.handle(),
pipeline_info,
pipeline_properties.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(pipeline_properties.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtPipelinePropertiesExtensionDeviceCommands for C {}
pub trait ExtPresentTimingExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_past_presentation_timing_ext(
&self,
past_presentation_timing_info: &PastPresentationTimingInfoEXT,
past_presentation_timing_properties: &mut PastPresentationTimingPropertiesEXT,
) -> crate::VkResult<()> {
let __result = (self.commands().get_past_presentation_timing_ext)(
self.handle(),
past_presentation_timing_info,
past_presentation_timing_properties,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_swapchain_time_domain_properties_ext(
&self,
swapchain: SwapchainKHR,
swapchain_time_domain_properties: &mut SwapchainTimeDomainPropertiesEXT,
) -> crate::VkResult<u64> {
let mut time_domains_counter = MaybeUninit::<u64>::uninit();
let __result = (self.commands().get_swapchain_time_domain_properties_ext)(
self.handle(),
swapchain,
swapchain_time_domain_properties,
time_domains_counter.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(time_domains_counter.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_swapchain_timing_properties_ext(
&self,
swapchain: SwapchainKHR,
swapchain_timing_properties: &mut SwapchainTimingPropertiesEXT,
) -> crate::VkSuccessResult<u64> {
let mut swapchain_timing_properties_counter = MaybeUninit::<u64>::uninit();
let __result = (self.commands().get_swapchain_timing_properties_ext)(
self.handle(),
swapchain,
swapchain_timing_properties,
swapchain_timing_properties_counter.as_mut_ptr(),
);
if __result >= Result::SUCCESS {
Ok((
swapchain_timing_properties_counter.assume_init(),
__result.into(),
))
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn set_swapchain_present_timing_queue_size_ext(
&self,
swapchain: SwapchainKHR,
size: u32,
) -> crate::VkResult<SuccessCode> {
let __result = (self.commands().set_swapchain_present_timing_queue_size_ext)(
self.handle(),
swapchain,
size,
);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtPresentTimingExtensionDeviceCommands for C {}
pub trait ExtPrivateDataExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn create_private_data_slot_ext(
&self,
create_info: &PrivateDataSlotCreateInfo,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<PrivateDataSlot> {
let mut private_data_slot = MaybeUninit::<PrivateDataSlot>::uninit();
let __result = (self.commands().create_private_data_slot_ext)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
private_data_slot.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(private_data_slot.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_private_data_slot_ext(
&self,
private_data_slot: PrivateDataSlot,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_private_data_slot_ext)(
self.handle(),
private_data_slot,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_private_data_ext(
&self,
object_type: ObjectType,
object_handle: u64,
private_data_slot: PrivateDataSlot,
) -> u64 {
let mut data = MaybeUninit::<u64>::uninit();
let __result = (self.commands().get_private_data_ext)(
self.handle(),
object_type,
object_handle,
private_data_slot,
data.as_mut_ptr(),
);
data.assume_init()
}
#[inline]
unsafe fn set_private_data_ext(
&self,
object_type: ObjectType,
object_handle: u64,
private_data_slot: PrivateDataSlot,
data: u64,
) -> crate::VkResult<()> {
let __result = (self.commands().set_private_data_ext)(
self.handle(),
object_type,
object_handle,
private_data_slot,
data,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtPrivateDataExtensionDeviceCommands for C {}
pub trait ExtSampleLocationsExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_sample_locations_ext(
&self,
command_buffer: CommandBuffer,
sample_locations_info: &SampleLocationsInfoEXT,
) {
let __result =
(self.commands().cmd_set_sample_locations_ext)(command_buffer, sample_locations_info);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtSampleLocationsExtensionDeviceCommands for C {}
pub trait ExtSampleLocationsExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_multisample_properties_ext(
&self,
physical_device: PhysicalDevice,
samples: SampleCountFlags,
multisample_properties: &mut MultisamplePropertiesEXT,
) {
let __result = (self
.commands()
.get_physical_device_multisample_properties_ext)(
physical_device,
samples,
multisample_properties,
);
}
}
impl<C: InstanceV1_0 + ?Sized> ExtSampleLocationsExtensionInstanceCommands for C {}
pub trait ExtShaderModuleIdentifierExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_shader_module_create_info_identifier_ext(
&self,
create_info: &ShaderModuleCreateInfo,
identifier: &mut ShaderModuleIdentifierEXT,
) {
let __result = (self.commands().get_shader_module_create_info_identifier_ext)(
self.handle(),
create_info,
identifier,
);
}
#[inline]
unsafe fn get_shader_module_identifier_ext(
&self,
shader_module: ShaderModule,
identifier: &mut ShaderModuleIdentifierEXT,
) {
let __result = (self.commands().get_shader_module_identifier_ext)(
self.handle(),
shader_module,
identifier,
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtShaderModuleIdentifierExtensionDeviceCommands for C {}
pub trait ExtShaderObjectExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_bind_shaders_ext(
&self,
command_buffer: CommandBuffer,
stages: &[ShaderStageFlags],
shaders: &[ShaderEXT],
) {
let __result = (self.commands().cmd_bind_shaders_ext)(
command_buffer,
stages.len() as u32,
stages.as_ptr(),
shaders.as_ptr(),
);
}
#[inline]
unsafe fn cmd_bind_vertex_buffers2_ext(
&self,
command_buffer: CommandBuffer,
first_binding: u32,
buffers: &[Buffer],
offsets: &[DeviceSize],
sizes: &[DeviceSize],
strides: &[DeviceSize],
) {
let __result = (self.commands().cmd_bind_vertex_buffers2_ext)(
command_buffer,
first_binding,
buffers.len() as u32,
buffers.as_ptr(),
offsets.as_ptr(),
sizes.as_ptr(),
strides.as_ptr(),
);
}
#[inline]
unsafe fn cmd_set_alpha_to_coverage_enable_ext(
&self,
command_buffer: CommandBuffer,
alpha_to_coverage_enable: bool,
) {
let __result = (self.commands().cmd_set_alpha_to_coverage_enable_ext)(
command_buffer,
alpha_to_coverage_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_alpha_to_one_enable_ext(
&self,
command_buffer: CommandBuffer,
alpha_to_one_enable: bool,
) {
let __result = (self.commands().cmd_set_alpha_to_one_enable_ext)(
command_buffer,
alpha_to_one_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_color_blend_advanced_ext(
&self,
command_buffer: CommandBuffer,
first_attachment: u32,
color_blend_advanced: &[impl Cast<Target = ColorBlendAdvancedEXT>],
) {
let __result = (self.commands().cmd_set_color_blend_advanced_ext)(
command_buffer,
first_attachment,
color_blend_advanced.len() as u32,
color_blend_advanced.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_set_color_blend_enable_ext(
&self,
command_buffer: CommandBuffer,
first_attachment: u32,
color_blend_enables: &[Bool32],
) {
let __result = (self.commands().cmd_set_color_blend_enable_ext)(
command_buffer,
first_attachment,
color_blend_enables.len() as u32,
color_blend_enables.as_ptr(),
);
}
#[inline]
unsafe fn cmd_set_color_blend_equation_ext(
&self,
command_buffer: CommandBuffer,
first_attachment: u32,
color_blend_equations: &[impl Cast<Target = ColorBlendEquationEXT>],
) {
let __result = (self.commands().cmd_set_color_blend_equation_ext)(
command_buffer,
first_attachment,
color_blend_equations.len() as u32,
color_blend_equations.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_set_color_write_mask_ext(
&self,
command_buffer: CommandBuffer,
first_attachment: u32,
color_write_masks: &[ColorComponentFlags],
) {
let __result = (self.commands().cmd_set_color_write_mask_ext)(
command_buffer,
first_attachment,
color_write_masks.len() as u32,
color_write_masks.as_ptr(),
);
}
#[inline]
unsafe fn cmd_set_conservative_rasterization_mode_ext(
&self,
command_buffer: CommandBuffer,
conservative_rasterization_mode: ConservativeRasterizationModeEXT,
) {
let __result = (self.commands().cmd_set_conservative_rasterization_mode_ext)(
command_buffer,
conservative_rasterization_mode,
);
}
#[inline]
unsafe fn cmd_set_coverage_modulation_mode_nv(
&self,
command_buffer: CommandBuffer,
coverage_modulation_mode: CoverageModulationModeNV,
) {
let __result = (self.commands().cmd_set_coverage_modulation_mode_nv)(
command_buffer,
coverage_modulation_mode,
);
}
#[inline]
unsafe fn cmd_set_coverage_modulation_table_enable_nv(
&self,
command_buffer: CommandBuffer,
coverage_modulation_table_enable: bool,
) {
let __result = (self.commands().cmd_set_coverage_modulation_table_enable_nv)(
command_buffer,
coverage_modulation_table_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_coverage_modulation_table_nv(
&self,
command_buffer: CommandBuffer,
coverage_modulation_table: &[f32],
) {
let __result = (self.commands().cmd_set_coverage_modulation_table_nv)(
command_buffer,
coverage_modulation_table.len() as u32,
coverage_modulation_table.as_ptr(),
);
}
#[inline]
unsafe fn cmd_set_coverage_reduction_mode_nv(
&self,
command_buffer: CommandBuffer,
coverage_reduction_mode: CoverageReductionModeNV,
) {
let __result = (self.commands().cmd_set_coverage_reduction_mode_nv)(
command_buffer,
coverage_reduction_mode,
);
}
#[inline]
unsafe fn cmd_set_coverage_to_color_enable_nv(
&self,
command_buffer: CommandBuffer,
coverage_to_color_enable: bool,
) {
let __result = (self.commands().cmd_set_coverage_to_color_enable_nv)(
command_buffer,
coverage_to_color_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_coverage_to_color_location_nv(
&self,
command_buffer: CommandBuffer,
coverage_to_color_location: u32,
) {
let __result = (self.commands().cmd_set_coverage_to_color_location_nv)(
command_buffer,
coverage_to_color_location,
);
}
#[inline]
unsafe fn cmd_set_cull_mode_ext(
&self,
command_buffer: CommandBuffer,
cull_mode: CullModeFlags,
) {
let __result = (self.commands().cmd_set_cull_mode_ext)(command_buffer, cull_mode);
}
#[inline]
unsafe fn cmd_set_depth_bias_enable_ext(
&self,
command_buffer: CommandBuffer,
depth_bias_enable: bool,
) {
let __result = (self.commands().cmd_set_depth_bias_enable_ext)(
command_buffer,
depth_bias_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_depth_bounds_test_enable_ext(
&self,
command_buffer: CommandBuffer,
depth_bounds_test_enable: bool,
) {
let __result = (self.commands().cmd_set_depth_bounds_test_enable_ext)(
command_buffer,
depth_bounds_test_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_depth_clamp_enable_ext(
&self,
command_buffer: CommandBuffer,
depth_clamp_enable: bool,
) {
let __result = (self.commands().cmd_set_depth_clamp_enable_ext)(
command_buffer,
depth_clamp_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_depth_clamp_range_ext(
&self,
command_buffer: CommandBuffer,
depth_clamp_mode: DepthClampModeEXT,
depth_clamp_range: Option<&DepthClampRangeEXT>,
) {
let __result = (self.commands().cmd_set_depth_clamp_range_ext)(
command_buffer,
depth_clamp_mode,
depth_clamp_range.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn cmd_set_depth_clip_enable_ext(
&self,
command_buffer: CommandBuffer,
depth_clip_enable: bool,
) {
let __result = (self.commands().cmd_set_depth_clip_enable_ext)(
command_buffer,
depth_clip_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_depth_clip_negative_one_to_one_ext(
&self,
command_buffer: CommandBuffer,
negative_one_to_one: bool,
) {
let __result = (self.commands().cmd_set_depth_clip_negative_one_to_one_ext)(
command_buffer,
negative_one_to_one as Bool32,
);
}
#[inline]
unsafe fn cmd_set_depth_compare_op_ext(
&self,
command_buffer: CommandBuffer,
depth_compare_op: CompareOp,
) {
let __result =
(self.commands().cmd_set_depth_compare_op_ext)(command_buffer, depth_compare_op);
}
#[inline]
unsafe fn cmd_set_depth_test_enable_ext(
&self,
command_buffer: CommandBuffer,
depth_test_enable: bool,
) {
let __result = (self.commands().cmd_set_depth_test_enable_ext)(
command_buffer,
depth_test_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_depth_write_enable_ext(
&self,
command_buffer: CommandBuffer,
depth_write_enable: bool,
) {
let __result = (self.commands().cmd_set_depth_write_enable_ext)(
command_buffer,
depth_write_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_extra_primitive_overestimation_size_ext(
&self,
command_buffer: CommandBuffer,
extra_primitive_overestimation_size: f32,
) {
let __result = (self
.commands()
.cmd_set_extra_primitive_overestimation_size_ext)(
command_buffer,
extra_primitive_overestimation_size,
);
}
#[inline]
unsafe fn cmd_set_front_face_ext(&self, command_buffer: CommandBuffer, front_face: FrontFace) {
let __result = (self.commands().cmd_set_front_face_ext)(command_buffer, front_face);
}
#[inline]
unsafe fn cmd_set_line_rasterization_mode_ext(
&self,
command_buffer: CommandBuffer,
line_rasterization_mode: LineRasterizationModeEXT,
) {
let __result = (self.commands().cmd_set_line_rasterization_mode_ext)(
command_buffer,
line_rasterization_mode,
);
}
#[inline]
unsafe fn cmd_set_line_stipple_enable_ext(
&self,
command_buffer: CommandBuffer,
stippled_line_enable: bool,
) {
let __result = (self.commands().cmd_set_line_stipple_enable_ext)(
command_buffer,
stippled_line_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_logic_op_ext(&self, command_buffer: CommandBuffer, logic_op: LogicOp) {
let __result = (self.commands().cmd_set_logic_op_ext)(command_buffer, logic_op);
}
#[inline]
unsafe fn cmd_set_logic_op_enable_ext(
&self,
command_buffer: CommandBuffer,
logic_op_enable: bool,
) {
let __result = (self.commands().cmd_set_logic_op_enable_ext)(
command_buffer,
logic_op_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_patch_control_points_ext(
&self,
command_buffer: CommandBuffer,
patch_control_points: u32,
) {
let __result = (self.commands().cmd_set_patch_control_points_ext)(
command_buffer,
patch_control_points,
);
}
#[inline]
unsafe fn cmd_set_polygon_mode_ext(
&self,
command_buffer: CommandBuffer,
polygon_mode: PolygonMode,
) {
let __result = (self.commands().cmd_set_polygon_mode_ext)(command_buffer, polygon_mode);
}
#[inline]
unsafe fn cmd_set_primitive_restart_enable_ext(
&self,
command_buffer: CommandBuffer,
primitive_restart_enable: bool,
) {
let __result = (self.commands().cmd_set_primitive_restart_enable_ext)(
command_buffer,
primitive_restart_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_primitive_topology_ext(
&self,
command_buffer: CommandBuffer,
primitive_topology: PrimitiveTopology,
) {
let __result =
(self.commands().cmd_set_primitive_topology_ext)(command_buffer, primitive_topology);
}
#[inline]
unsafe fn cmd_set_provoking_vertex_mode_ext(
&self,
command_buffer: CommandBuffer,
provoking_vertex_mode: ProvokingVertexModeEXT,
) {
let __result = (self.commands().cmd_set_provoking_vertex_mode_ext)(
command_buffer,
provoking_vertex_mode,
);
}
#[inline]
unsafe fn cmd_set_rasterization_samples_ext(
&self,
command_buffer: CommandBuffer,
rasterization_samples: SampleCountFlags,
) {
let __result = (self.commands().cmd_set_rasterization_samples_ext)(
command_buffer,
rasterization_samples,
);
}
#[inline]
unsafe fn cmd_set_rasterization_stream_ext(
&self,
command_buffer: CommandBuffer,
rasterization_stream: u32,
) {
let __result = (self.commands().cmd_set_rasterization_stream_ext)(
command_buffer,
rasterization_stream,
);
}
#[inline]
unsafe fn cmd_set_rasterizer_discard_enable_ext(
&self,
command_buffer: CommandBuffer,
rasterizer_discard_enable: bool,
) {
let __result = (self.commands().cmd_set_rasterizer_discard_enable_ext)(
command_buffer,
rasterizer_discard_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_representative_fragment_test_enable_nv(
&self,
command_buffer: CommandBuffer,
representative_fragment_test_enable: bool,
) {
let __result = (self
.commands()
.cmd_set_representative_fragment_test_enable_nv)(
command_buffer,
representative_fragment_test_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_sample_locations_enable_ext(
&self,
command_buffer: CommandBuffer,
sample_locations_enable: bool,
) {
let __result = (self.commands().cmd_set_sample_locations_enable_ext)(
command_buffer,
sample_locations_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_sample_mask_ext(
&self,
command_buffer: CommandBuffer,
samples: SampleCountFlags,
sample_mask: Option<&SampleMask>,
) {
let __result = (self.commands().cmd_set_sample_mask_ext)(
command_buffer,
samples,
sample_mask.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn cmd_set_scissor_with_count_ext(
&self,
command_buffer: CommandBuffer,
scissors: &[impl Cast<Target = Rect2D>],
) {
let __result = (self.commands().cmd_set_scissor_with_count_ext)(
command_buffer,
scissors.len() as u32,
scissors.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_set_shading_rate_image_enable_nv(
&self,
command_buffer: CommandBuffer,
shading_rate_image_enable: bool,
) {
let __result = (self.commands().cmd_set_shading_rate_image_enable_nv)(
command_buffer,
shading_rate_image_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_stencil_op_ext(
&self,
command_buffer: CommandBuffer,
face_mask: StencilFaceFlags,
fail_op: StencilOp,
pass_op: StencilOp,
depth_fail_op: StencilOp,
compare_op: CompareOp,
) {
let __result = (self.commands().cmd_set_stencil_op_ext)(
command_buffer,
face_mask,
fail_op,
pass_op,
depth_fail_op,
compare_op,
);
}
#[inline]
unsafe fn cmd_set_stencil_test_enable_ext(
&self,
command_buffer: CommandBuffer,
stencil_test_enable: bool,
) {
let __result = (self.commands().cmd_set_stencil_test_enable_ext)(
command_buffer,
stencil_test_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_tessellation_domain_origin_ext(
&self,
command_buffer: CommandBuffer,
domain_origin: TessellationDomainOrigin,
) {
let __result =
(self.commands().cmd_set_tessellation_domain_origin_ext)(command_buffer, domain_origin);
}
#[inline]
unsafe fn cmd_set_vertex_input_ext(
&self,
command_buffer: CommandBuffer,
vertex_binding_descriptions: &[impl Cast<Target = VertexInputBindingDescription2EXT>],
vertex_attribute_descriptions: &[impl Cast<Target = VertexInputAttributeDescription2EXT>],
) {
let __result = (self.commands().cmd_set_vertex_input_ext)(
command_buffer,
vertex_binding_descriptions.len() as u32,
vertex_binding_descriptions.as_ptr().cast(),
vertex_attribute_descriptions.len() as u32,
vertex_attribute_descriptions.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_set_viewport_swizzle_nv(
&self,
command_buffer: CommandBuffer,
first_viewport: u32,
viewport_swizzles: &[impl Cast<Target = ViewportSwizzleNV>],
) {
let __result = (self.commands().cmd_set_viewport_swizzle_nv)(
command_buffer,
first_viewport,
viewport_swizzles.len() as u32,
viewport_swizzles.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_set_viewport_w_scaling_enable_nv(
&self,
command_buffer: CommandBuffer,
viewport_w_scaling_enable: bool,
) {
let __result = (self.commands().cmd_set_viewport_w_scaling_enable_nv)(
command_buffer,
viewport_w_scaling_enable as Bool32,
);
}
#[inline]
unsafe fn cmd_set_viewport_with_count_ext(
&self,
command_buffer: CommandBuffer,
viewports: &[impl Cast<Target = Viewport>],
) {
let __result = (self.commands().cmd_set_viewport_with_count_ext)(
command_buffer,
viewports.len() as u32,
viewports.as_ptr().cast(),
);
}
#[inline]
unsafe fn create_shaders_ext(
&self,
create_infos: &[impl Cast<Target = ShaderCreateInfoEXT>],
allocator: Option<&AllocationCallbacks>,
) -> crate::VkSuccessResult<Vec<ShaderEXT>> {
let mut shaders = Vec::with_capacity(create_infos.len() as usize);
let __result = (self.commands().create_shaders_ext)(
self.handle(),
create_infos.len() as u32,
create_infos.as_ptr().cast(),
allocator.map_or(ptr::null(), |v| v),
shaders.as_mut_ptr(),
);
shaders.set_len(create_infos.len() as usize);
if __result >= Result::SUCCESS {
Ok((shaders, __result.into()))
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_shader_ext(
&self,
shader: ShaderEXT,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_shader_ext)(
self.handle(),
shader,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_shader_binary_data_ext(&self, shader: ShaderEXT) -> crate::VkResult<Vec<u8>> {
let mut data_size = 0;
(self.commands().get_shader_binary_data_ext)(
self.handle(),
shader,
&mut data_size,
ptr::null_mut(),
);
let mut data = Vec::with_capacity(data_size as usize);
let __result = (self.commands().get_shader_binary_data_ext)(
self.handle(),
shader,
&mut data_size,
data.as_mut_ptr() as *mut c_void,
);
debug_assert!(data.capacity() >= data_size as usize);
data.set_len(data_size as usize);
if __result == Result::SUCCESS {
Ok(data)
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtShaderObjectExtensionDeviceCommands for C {}
pub trait ExtSwapchainMaintenance1ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn release_swapchain_images_ext(
&self,
release_info: &ReleaseSwapchainImagesInfoKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().release_swapchain_images_ext)(self.handle(), release_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtSwapchainMaintenance1ExtensionDeviceCommands for C {}
pub trait ExtToolingInfoExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_tool_properties_ext(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<PhysicalDeviceToolProperties>> {
let mut tool_count = 0;
(self.commands().get_physical_device_tool_properties_ext)(
physical_device,
&mut tool_count,
ptr::null_mut(),
);
let mut tool_properties = Vec::with_capacity(tool_count as usize);
let __result = (self.commands().get_physical_device_tool_properties_ext)(
physical_device,
&mut tool_count,
tool_properties.as_mut_ptr(),
);
debug_assert!(tool_properties.capacity() >= tool_count as usize);
tool_properties.set_len(tool_count as usize);
if __result == Result::SUCCESS {
Ok(tool_properties)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> ExtToolingInfoExtensionInstanceCommands for C {}
pub trait ExtTransformFeedbackExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_begin_query_indexed_ext(
&self,
command_buffer: CommandBuffer,
query_pool: QueryPool,
query: u32,
flags: QueryControlFlags,
index: u32,
) {
let __result = (self.commands().cmd_begin_query_indexed_ext)(
command_buffer,
query_pool,
query,
flags,
index,
);
}
#[inline]
unsafe fn cmd_begin_transform_feedback_ext(
&self,
command_buffer: CommandBuffer,
first_counter_buffer: u32,
counter_buffers: &[Buffer],
counter_buffer_offsets: &[DeviceSize],
) {
let __result = (self.commands().cmd_begin_transform_feedback_ext)(
command_buffer,
first_counter_buffer,
counter_buffers.len() as u32,
counter_buffers.as_ptr(),
counter_buffer_offsets.as_ptr(),
);
}
#[inline]
unsafe fn cmd_bind_transform_feedback_buffers_ext(
&self,
command_buffer: CommandBuffer,
first_binding: u32,
buffers: &[Buffer],
offsets: &[DeviceSize],
sizes: &[DeviceSize],
) {
let __result = (self.commands().cmd_bind_transform_feedback_buffers_ext)(
command_buffer,
first_binding,
buffers.len() as u32,
buffers.as_ptr(),
offsets.as_ptr(),
sizes.as_ptr(),
);
}
#[inline]
unsafe fn cmd_draw_indirect_byte_count_ext(
&self,
command_buffer: CommandBuffer,
instance_count: u32,
first_instance: u32,
counter_buffer: Buffer,
counter_buffer_offset: DeviceSize,
counter_offset: u32,
vertex_stride: u32,
) {
let __result = (self.commands().cmd_draw_indirect_byte_count_ext)(
command_buffer,
instance_count,
first_instance,
counter_buffer,
counter_buffer_offset,
counter_offset,
vertex_stride,
);
}
#[inline]
unsafe fn cmd_end_query_indexed_ext(
&self,
command_buffer: CommandBuffer,
query_pool: QueryPool,
query: u32,
index: u32,
) {
let __result =
(self.commands().cmd_end_query_indexed_ext)(command_buffer, query_pool, query, index);
}
#[inline]
unsafe fn cmd_end_transform_feedback_ext(
&self,
command_buffer: CommandBuffer,
first_counter_buffer: u32,
counter_buffers: &[Buffer],
counter_buffer_offsets: &[DeviceSize],
) {
let __result = (self.commands().cmd_end_transform_feedback_ext)(
command_buffer,
first_counter_buffer,
counter_buffers.len() as u32,
counter_buffers.as_ptr(),
counter_buffer_offsets.as_ptr(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtTransformFeedbackExtensionDeviceCommands for C {}
pub trait ExtValidationCacheExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn create_validation_cache_ext(
&self,
create_info: &ValidationCacheCreateInfoEXT,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<ValidationCacheEXT> {
let mut validation_cache = MaybeUninit::<ValidationCacheEXT>::uninit();
let __result = (self.commands().create_validation_cache_ext)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
validation_cache.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(validation_cache.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_validation_cache_ext(
&self,
validation_cache: ValidationCacheEXT,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_validation_cache_ext)(
self.handle(),
validation_cache,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_validation_cache_data_ext(
&self,
validation_cache: ValidationCacheEXT,
) -> crate::VkResult<Vec<u8>> {
let mut data_size = 0;
(self.commands().get_validation_cache_data_ext)(
self.handle(),
validation_cache,
&mut data_size,
ptr::null_mut(),
);
let mut data = Vec::with_capacity(data_size as usize);
let __result = (self.commands().get_validation_cache_data_ext)(
self.handle(),
validation_cache,
&mut data_size,
data.as_mut_ptr() as *mut c_void,
);
debug_assert!(data.capacity() >= data_size as usize);
data.set_len(data_size as usize);
if __result == Result::SUCCESS {
Ok(data)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn merge_validation_caches_ext(
&self,
dst_cache: ValidationCacheEXT,
src_caches: &[ValidationCacheEXT],
) -> crate::VkResult<()> {
let __result = (self.commands().merge_validation_caches_ext)(
self.handle(),
dst_cache,
src_caches.len() as u32,
src_caches.as_ptr(),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> ExtValidationCacheExtensionDeviceCommands for C {}
pub trait ExtVertexInputDynamicStateExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_vertex_input_ext(
&self,
command_buffer: CommandBuffer,
vertex_binding_descriptions: &[impl Cast<Target = VertexInputBindingDescription2EXT>],
vertex_attribute_descriptions: &[impl Cast<Target = VertexInputAttributeDescription2EXT>],
) {
let __result = (self.commands().cmd_set_vertex_input_ext)(
command_buffer,
vertex_binding_descriptions.len() as u32,
vertex_binding_descriptions.as_ptr().cast(),
vertex_attribute_descriptions.len() as u32,
vertex_attribute_descriptions.as_ptr().cast(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> ExtVertexInputDynamicStateExtensionDeviceCommands for C {}
pub trait FuchsiaBufferCollectionExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn create_buffer_collection_fuchsia(
&self,
create_info: &BufferCollectionCreateInfoFUCHSIA,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<BufferCollectionFUCHSIA> {
let mut collection = MaybeUninit::<BufferCollectionFUCHSIA>::uninit();
let __result = (self.commands().create_buffer_collection_fuchsia)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
collection.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(collection.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_buffer_collection_fuchsia(
&self,
collection: BufferCollectionFUCHSIA,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_buffer_collection_fuchsia)(
self.handle(),
collection,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_buffer_collection_properties_fuchsia(
&self,
collection: BufferCollectionFUCHSIA,
properties: &mut BufferCollectionPropertiesFUCHSIA,
) -> crate::VkResult<()> {
let __result = (self.commands().get_buffer_collection_properties_fuchsia)(
self.handle(),
collection,
properties,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn set_buffer_collection_buffer_constraints_fuchsia(
&self,
collection: BufferCollectionFUCHSIA,
buffer_constraints_info: &BufferConstraintsInfoFUCHSIA,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.set_buffer_collection_buffer_constraints_fuchsia)(
self.handle(),
collection,
buffer_constraints_info,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn set_buffer_collection_image_constraints_fuchsia(
&self,
collection: BufferCollectionFUCHSIA,
image_constraints_info: &ImageConstraintsInfoFUCHSIA,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.set_buffer_collection_image_constraints_fuchsia)(
self.handle(),
collection,
image_constraints_info,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> FuchsiaBufferCollectionExtensionDeviceCommands for C {}
pub trait FuchsiaExternalMemoryExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_memory_zircon_handle_fuchsia(
&self,
get_zircon_handle_info: &MemoryGetZirconHandleInfoFUCHSIA,
) -> crate::VkResult<zx_handle_t> {
let mut zircon_handle = MaybeUninit::<zx_handle_t>::uninit();
let __result = (self.commands().get_memory_zircon_handle_fuchsia)(
self.handle(),
get_zircon_handle_info,
zircon_handle.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(zircon_handle.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_memory_zircon_handle_properties_fuchsia(
&self,
handle_type: ExternalMemoryHandleTypeFlags,
zircon_handle: zx_handle_t,
memory_zircon_handle_properties: &mut MemoryZirconHandlePropertiesFUCHSIA,
) -> crate::VkResult<()> {
let __result = (self.commands().get_memory_zircon_handle_properties_fuchsia)(
self.handle(),
handle_type,
zircon_handle,
memory_zircon_handle_properties,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> FuchsiaExternalMemoryExtensionDeviceCommands for C {}
pub trait FuchsiaExternalSemaphoreExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_semaphore_zircon_handle_fuchsia(
&self,
get_zircon_handle_info: &SemaphoreGetZirconHandleInfoFUCHSIA,
) -> crate::VkResult<zx_handle_t> {
let mut zircon_handle = MaybeUninit::<zx_handle_t>::uninit();
let __result = (self.commands().get_semaphore_zircon_handle_fuchsia)(
self.handle(),
get_zircon_handle_info,
zircon_handle.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(zircon_handle.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn import_semaphore_zircon_handle_fuchsia(
&self,
import_semaphore_zircon_handle_info: &ImportSemaphoreZirconHandleInfoFUCHSIA,
) -> crate::VkResult<()> {
let __result = (self.commands().import_semaphore_zircon_handle_fuchsia)(
self.handle(),
import_semaphore_zircon_handle_info,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> FuchsiaExternalSemaphoreExtensionDeviceCommands for C {}
pub trait FuchsiaImagepipeSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_image_pipe_surface_fuchsia(
&self,
create_info: &ImagePipeSurfaceCreateInfoFUCHSIA,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_image_pipe_surface_fuchsia)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> FuchsiaImagepipeSurfaceExtensionInstanceCommands for C {}
pub trait GgpStreamDescriptorSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_stream_descriptor_surface_ggp(
&self,
create_info: &StreamDescriptorSurfaceCreateInfoGGP,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_stream_descriptor_surface_ggp)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> GgpStreamDescriptorSurfaceExtensionInstanceCommands for C {}
pub trait GoogleDisplayTimingExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_past_presentation_timing_google(
&self,
swapchain: SwapchainKHR,
) -> crate::VkResult<Vec<PastPresentationTimingGOOGLE>> {
let mut presentation_timing_count = 0;
(self.commands().get_past_presentation_timing_google)(
self.handle(),
swapchain,
&mut presentation_timing_count,
ptr::null_mut(),
);
let mut presentation_timings = Vec::with_capacity(presentation_timing_count as usize);
let __result = (self.commands().get_past_presentation_timing_google)(
self.handle(),
swapchain,
&mut presentation_timing_count,
presentation_timings.as_mut_ptr(),
);
debug_assert!(presentation_timings.capacity() >= presentation_timing_count as usize);
presentation_timings.set_len(presentation_timing_count as usize);
if __result == Result::SUCCESS {
Ok(presentation_timings)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_refresh_cycle_duration_google(
&self,
swapchain: SwapchainKHR,
) -> crate::VkResult<RefreshCycleDurationGOOGLE> {
let mut display_timing_properties = MaybeUninit::<RefreshCycleDurationGOOGLE>::uninit();
let __result = (self.commands().get_refresh_cycle_duration_google)(
self.handle(),
swapchain,
display_timing_properties.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(display_timing_properties.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> GoogleDisplayTimingExtensionDeviceCommands for C {}
pub trait HuaweiClusterCullingShaderExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_draw_cluster_huawei(
&self,
command_buffer: CommandBuffer,
group_count_x: u32,
group_count_y: u32,
group_count_z: u32,
) {
let __result = (self.commands().cmd_draw_cluster_huawei)(
command_buffer,
group_count_x,
group_count_y,
group_count_z,
);
}
#[inline]
unsafe fn cmd_draw_cluster_indirect_huawei(
&self,
command_buffer: CommandBuffer,
buffer: Buffer,
offset: DeviceSize,
) {
let __result =
(self.commands().cmd_draw_cluster_indirect_huawei)(command_buffer, buffer, offset);
}
}
impl<C: DeviceV1_0 + ?Sized> HuaweiClusterCullingShaderExtensionDeviceCommands for C {}
pub trait HuaweiInvocationMaskExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_bind_invocation_mask_huawei(
&self,
command_buffer: CommandBuffer,
image_view: ImageView,
image_layout: ImageLayout,
) {
let __result = (self.commands().cmd_bind_invocation_mask_huawei)(
command_buffer,
image_view,
image_layout,
);
}
}
impl<C: DeviceV1_0 + ?Sized> HuaweiInvocationMaskExtensionDeviceCommands for C {}
pub trait HuaweiSubpassShadingExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_subpass_shading_huawei(&self, command_buffer: CommandBuffer) {
let __result = (self.commands().cmd_subpass_shading_huawei)(command_buffer);
}
#[inline]
unsafe fn get_device_subpass_shading_max_workgroup_size_huawei(
&self,
renderpass: RenderPass,
) -> crate::VkResult<Extent2D> {
let mut max_workgroup_size = MaybeUninit::<Extent2D>::uninit();
let __result = (self
.commands()
.get_device_subpass_shading_max_workgroup_size_huawei)(
self.handle(),
renderpass,
max_workgroup_size.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(max_workgroup_size.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> HuaweiSubpassShadingExtensionDeviceCommands for C {}
pub trait IntelPerformanceQueryExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn acquire_performance_configuration_intel(
&self,
acquire_info: &PerformanceConfigurationAcquireInfoINTEL,
) -> crate::VkResult<PerformanceConfigurationINTEL> {
let mut configuration = MaybeUninit::<PerformanceConfigurationINTEL>::uninit();
let __result = (self.commands().acquire_performance_configuration_intel)(
self.handle(),
acquire_info,
configuration.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(configuration.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn cmd_set_performance_marker_intel(
&self,
command_buffer: CommandBuffer,
marker_info: &PerformanceMarkerInfoINTEL,
) -> crate::VkResult<()> {
let __result =
(self.commands().cmd_set_performance_marker_intel)(command_buffer, marker_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn cmd_set_performance_override_intel(
&self,
command_buffer: CommandBuffer,
override_info: &PerformanceOverrideInfoINTEL,
) -> crate::VkResult<()> {
let __result =
(self.commands().cmd_set_performance_override_intel)(command_buffer, override_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn cmd_set_performance_stream_marker_intel(
&self,
command_buffer: CommandBuffer,
marker_info: &PerformanceStreamMarkerInfoINTEL,
) -> crate::VkResult<()> {
let __result =
(self.commands().cmd_set_performance_stream_marker_intel)(command_buffer, marker_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_performance_parameter_intel(
&self,
parameter: PerformanceParameterTypeINTEL,
) -> crate::VkResult<PerformanceValueINTEL> {
let mut value = MaybeUninit::<PerformanceValueINTEL>::uninit();
let __result = (self.commands().get_performance_parameter_intel)(
self.handle(),
parameter,
value.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(value.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn initialize_performance_api_intel(
&self,
initialize_info: &InitializePerformanceApiInfoINTEL,
) -> crate::VkResult<()> {
let __result =
(self.commands().initialize_performance_api_intel)(self.handle(), initialize_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn queue_set_performance_configuration_intel(
&self,
queue: Queue,
configuration: PerformanceConfigurationINTEL,
) -> crate::VkResult<()> {
let __result =
(self.commands().queue_set_performance_configuration_intel)(queue, configuration);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn release_performance_configuration_intel(
&self,
configuration: PerformanceConfigurationINTEL,
) -> crate::VkResult<()> {
let __result =
(self.commands().release_performance_configuration_intel)(self.handle(), configuration);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn uninitialize_performance_api_intel(&self) {
let __result = (self.commands().uninitialize_performance_api_intel)(self.handle());
}
}
impl<C: DeviceV1_0 + ?Sized> IntelPerformanceQueryExtensionDeviceCommands for C {}
pub trait KhrAccelerationStructureExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn build_acceleration_structures_khr(
&self,
deferred_operation: DeferredOperationKHR,
infos: &[impl Cast<Target = AccelerationStructureBuildGeometryInfoKHR>],
build_range_infos: &[&[impl Cast<Target = AccelerationStructureBuildRangeInfoKHR>]],
) -> crate::VkResult<SuccessCode> {
let __result = (self.commands().build_acceleration_structures_khr)(
self.handle(),
deferred_operation,
infos.len() as u32,
infos.as_ptr().cast(),
build_range_infos.as_ptr().cast(),
);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn cmd_build_acceleration_structures_indirect_khr(
&self,
command_buffer: CommandBuffer,
infos: &[impl Cast<Target = AccelerationStructureBuildGeometryInfoKHR>],
indirect_device_addresses: &[DeviceAddress],
indirect_strides: &[u32],
max_primitive_counts: &[&[u32]],
) {
let __result = (self
.commands()
.cmd_build_acceleration_structures_indirect_khr)(
command_buffer,
infos.len() as u32,
infos.as_ptr().cast(),
indirect_device_addresses.as_ptr(),
indirect_strides.as_ptr(),
max_primitive_counts.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_build_acceleration_structures_khr(
&self,
command_buffer: CommandBuffer,
infos: &[impl Cast<Target = AccelerationStructureBuildGeometryInfoKHR>],
build_range_infos: &[&[impl Cast<Target = AccelerationStructureBuildRangeInfoKHR>]],
) {
let __result = (self.commands().cmd_build_acceleration_structures_khr)(
command_buffer,
infos.len() as u32,
infos.as_ptr().cast(),
build_range_infos.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_copy_acceleration_structure_khr(
&self,
command_buffer: CommandBuffer,
info: &CopyAccelerationStructureInfoKHR,
) {
let __result = (self.commands().cmd_copy_acceleration_structure_khr)(command_buffer, info);
}
#[inline]
unsafe fn cmd_copy_acceleration_structure_to_memory_khr(
&self,
command_buffer: CommandBuffer,
info: &CopyAccelerationStructureToMemoryInfoKHR,
) {
let __result =
(self
.commands()
.cmd_copy_acceleration_structure_to_memory_khr)(command_buffer, info);
}
#[inline]
unsafe fn cmd_copy_memory_to_acceleration_structure_khr(
&self,
command_buffer: CommandBuffer,
info: &CopyMemoryToAccelerationStructureInfoKHR,
) {
let __result =
(self
.commands()
.cmd_copy_memory_to_acceleration_structure_khr)(command_buffer, info);
}
#[inline]
unsafe fn cmd_write_acceleration_structures_properties_khr(
&self,
command_buffer: CommandBuffer,
acceleration_structures: &[AccelerationStructureKHR],
query_type: QueryType,
query_pool: QueryPool,
first_query: u32,
) {
let __result = (self
.commands()
.cmd_write_acceleration_structures_properties_khr)(
command_buffer,
acceleration_structures.len() as u32,
acceleration_structures.as_ptr(),
query_type,
query_pool,
first_query,
);
}
#[inline]
unsafe fn copy_acceleration_structure_khr(
&self,
deferred_operation: DeferredOperationKHR,
info: &CopyAccelerationStructureInfoKHR,
) -> crate::VkResult<SuccessCode> {
let __result = (self.commands().copy_acceleration_structure_khr)(
self.handle(),
deferred_operation,
info,
);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn copy_acceleration_structure_to_memory_khr(
&self,
deferred_operation: DeferredOperationKHR,
info: &CopyAccelerationStructureToMemoryInfoKHR,
) -> crate::VkResult<SuccessCode> {
let __result = (self.commands().copy_acceleration_structure_to_memory_khr)(
self.handle(),
deferred_operation,
info,
);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn copy_memory_to_acceleration_structure_khr(
&self,
deferred_operation: DeferredOperationKHR,
info: &CopyMemoryToAccelerationStructureInfoKHR,
) -> crate::VkResult<SuccessCode> {
let __result = (self.commands().copy_memory_to_acceleration_structure_khr)(
self.handle(),
deferred_operation,
info,
);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn create_acceleration_structure_khr(
&self,
create_info: &AccelerationStructureCreateInfoKHR,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<AccelerationStructureKHR> {
let mut acceleration_structure = MaybeUninit::<AccelerationStructureKHR>::uninit();
let __result = (self.commands().create_acceleration_structure_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
acceleration_structure.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(acceleration_structure.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_acceleration_structure_khr(
&self,
acceleration_structure: AccelerationStructureKHR,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_acceleration_structure_khr)(
self.handle(),
acceleration_structure,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_acceleration_structure_build_sizes_khr(
&self,
build_type: AccelerationStructureBuildTypeKHR,
build_info: &AccelerationStructureBuildGeometryInfoKHR,
max_primitive_counts: &[u32],
size_info: &mut AccelerationStructureBuildSizesInfoKHR,
) {
let __result = (self.commands().get_acceleration_structure_build_sizes_khr)(
self.handle(),
build_type,
build_info,
max_primitive_counts.as_ptr(),
size_info,
);
}
#[inline]
unsafe fn get_acceleration_structure_device_address_khr(
&self,
info: &AccelerationStructureDeviceAddressInfoKHR,
) -> DeviceAddress {
let __result =
(self
.commands()
.get_acceleration_structure_device_address_khr)(self.handle(), info);
__result
}
#[inline]
unsafe fn get_device_acceleration_structure_compatibility_khr(
&self,
version_info: &AccelerationStructureVersionInfoKHR,
) -> AccelerationStructureCompatibilityKHR {
let mut compatibility = MaybeUninit::<AccelerationStructureCompatibilityKHR>::uninit();
let __result = (self
.commands()
.get_device_acceleration_structure_compatibility_khr)(
self.handle(),
version_info,
compatibility.as_mut_ptr(),
);
compatibility.assume_init()
}
#[inline]
unsafe fn write_acceleration_structures_properties_khr(
&self,
acceleration_structures: &[AccelerationStructureKHR],
query_type: QueryType,
data: &mut [u8],
stride: usize,
) -> crate::VkResult<()> {
let __result = (self.commands().write_acceleration_structures_properties_khr)(
self.handle(),
acceleration_structures.len() as u32,
acceleration_structures.as_ptr(),
query_type,
data.len() as usize,
data.as_mut_ptr() as *mut c_void,
stride,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrAccelerationStructureExtensionDeviceCommands for C {}
pub trait KhrAndroidSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_android_surface_khr(
&self,
create_info: &AndroidSurfaceCreateInfoKHR,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_android_surface_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrAndroidSurfaceExtensionInstanceCommands for C {}
pub trait KhrBindMemory2ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn bind_buffer_memory2_khr(
&self,
bind_infos: &[impl Cast<Target = BindBufferMemoryInfo>],
) -> crate::VkResult<()> {
let __result = (self.commands().bind_buffer_memory2_khr)(
self.handle(),
bind_infos.len() as u32,
bind_infos.as_ptr().cast(),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn bind_image_memory2_khr(
&self,
bind_infos: &[impl Cast<Target = BindImageMemoryInfo>],
) -> crate::VkResult<()> {
let __result = (self.commands().bind_image_memory2_khr)(
self.handle(),
bind_infos.len() as u32,
bind_infos.as_ptr().cast(),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrBindMemory2ExtensionDeviceCommands for C {}
pub trait KhrBufferDeviceAddressExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_buffer_device_address_khr(
&self,
info: &BufferDeviceAddressInfo,
) -> DeviceAddress {
let __result = (self.commands().get_buffer_device_address_khr)(self.handle(), info);
__result
}
#[inline]
unsafe fn get_buffer_opaque_capture_address_khr(&self, info: &BufferDeviceAddressInfo) -> u64 {
let __result = (self.commands().get_buffer_opaque_capture_address_khr)(self.handle(), info);
__result
}
#[inline]
unsafe fn get_device_memory_opaque_capture_address_khr(
&self,
info: &DeviceMemoryOpaqueCaptureAddressInfo,
) -> u64 {
let __result =
(self.commands().get_device_memory_opaque_capture_address_khr)(self.handle(), info);
__result
}
}
impl<C: DeviceV1_0 + ?Sized> KhrBufferDeviceAddressExtensionDeviceCommands for C {}
pub trait KhrCalibratedTimestampsExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_calibrated_timestamps_khr(
&self,
timestamp_infos: &[impl Cast<Target = CalibratedTimestampInfoKHR>],
) -> crate::VkResult<(Vec<u64>, u64)> {
let mut timestamps = Vec::with_capacity(timestamp_infos.len() as usize);
let mut max_deviation = MaybeUninit::<u64>::uninit();
let __result = (self.commands().get_calibrated_timestamps_khr)(
self.handle(),
timestamp_infos.len() as u32,
timestamp_infos.as_ptr().cast(),
timestamps.as_mut_ptr(),
max_deviation.as_mut_ptr(),
);
debug_assert!(timestamps.capacity() >= timestamp_infos.len() as usize);
timestamps.set_len(timestamp_infos.len() as usize);
if __result == Result::SUCCESS {
Ok((timestamps, max_deviation.assume_init()))
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrCalibratedTimestampsExtensionDeviceCommands for C {}
pub trait KhrCalibratedTimestampsExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_calibrateable_time_domains_khr(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<TimeDomainKHR>> {
let mut time_domain_count = 0;
(self
.commands()
.get_physical_device_calibrateable_time_domains_khr)(
physical_device,
&mut time_domain_count,
ptr::null_mut(),
);
let mut time_domains = Vec::with_capacity(time_domain_count as usize);
let __result = (self
.commands()
.get_physical_device_calibrateable_time_domains_khr)(
physical_device,
&mut time_domain_count,
time_domains.as_mut_ptr(),
);
debug_assert!(time_domains.capacity() >= time_domain_count as usize);
time_domains.set_len(time_domain_count as usize);
if __result == Result::SUCCESS {
Ok(time_domains)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrCalibratedTimestampsExtensionInstanceCommands for C {}
pub trait KhrCooperativeMatrixExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_cooperative_matrix_properties_khr(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<CooperativeMatrixPropertiesKHR>> {
let mut property_count = 0;
(self
.commands()
.get_physical_device_cooperative_matrix_properties_khr)(
physical_device,
&mut property_count,
ptr::null_mut(),
);
let mut properties =
::alloc::vec![CooperativeMatrixPropertiesKHR::default(); property_count as usize];
let __result = (self
.commands()
.get_physical_device_cooperative_matrix_properties_khr)(
physical_device,
&mut property_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= property_count as usize);
properties.set_len(property_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrCooperativeMatrixExtensionInstanceCommands for C {}
pub trait KhrCopyCommands2ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_blit_image2_khr(
&self,
command_buffer: CommandBuffer,
blit_image_info: &BlitImageInfo2,
) {
let __result = (self.commands().cmd_blit_image2_khr)(command_buffer, blit_image_info);
}
#[inline]
unsafe fn cmd_copy_buffer2_khr(
&self,
command_buffer: CommandBuffer,
copy_buffer_info: &CopyBufferInfo2,
) {
let __result = (self.commands().cmd_copy_buffer2_khr)(command_buffer, copy_buffer_info);
}
#[inline]
unsafe fn cmd_copy_buffer_to_image2_khr(
&self,
command_buffer: CommandBuffer,
copy_buffer_to_image_info: &CopyBufferToImageInfo2,
) {
let __result = (self.commands().cmd_copy_buffer_to_image2_khr)(
command_buffer,
copy_buffer_to_image_info,
);
}
#[inline]
unsafe fn cmd_copy_image2_khr(
&self,
command_buffer: CommandBuffer,
copy_image_info: &CopyImageInfo2,
) {
let __result = (self.commands().cmd_copy_image2_khr)(command_buffer, copy_image_info);
}
#[inline]
unsafe fn cmd_copy_image_to_buffer2_khr(
&self,
command_buffer: CommandBuffer,
copy_image_to_buffer_info: &CopyImageToBufferInfo2,
) {
let __result = (self.commands().cmd_copy_image_to_buffer2_khr)(
command_buffer,
copy_image_to_buffer_info,
);
}
#[inline]
unsafe fn cmd_resolve_image2_khr(
&self,
command_buffer: CommandBuffer,
resolve_image_info: &ResolveImageInfo2,
) {
let __result = (self.commands().cmd_resolve_image2_khr)(command_buffer, resolve_image_info);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrCopyCommands2ExtensionDeviceCommands for C {}
pub trait KhrCopyMemoryIndirectExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_copy_memory_indirect_khr(
&self,
command_buffer: CommandBuffer,
copy_memory_indirect_info: &CopyMemoryIndirectInfoKHR,
) {
let __result = (self.commands().cmd_copy_memory_indirect_khr)(
command_buffer,
copy_memory_indirect_info,
);
}
#[inline]
unsafe fn cmd_copy_memory_to_image_indirect_khr(
&self,
command_buffer: CommandBuffer,
copy_memory_to_image_indirect_info: &CopyMemoryToImageIndirectInfoKHR,
) {
let __result = (self.commands().cmd_copy_memory_to_image_indirect_khr)(
command_buffer,
copy_memory_to_image_indirect_info,
);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrCopyMemoryIndirectExtensionDeviceCommands for C {}
pub trait KhrCreateRenderpass2ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_begin_render_pass2_khr(
&self,
command_buffer: CommandBuffer,
render_pass_begin: &RenderPassBeginInfo,
subpass_begin_info: &SubpassBeginInfo,
) {
let __result = (self.commands().cmd_begin_render_pass2_khr)(
command_buffer,
render_pass_begin,
subpass_begin_info,
);
}
#[inline]
unsafe fn cmd_end_render_pass2_khr(
&self,
command_buffer: CommandBuffer,
subpass_end_info: &SubpassEndInfo,
) {
let __result = (self.commands().cmd_end_render_pass2_khr)(command_buffer, subpass_end_info);
}
#[inline]
unsafe fn cmd_next_subpass2_khr(
&self,
command_buffer: CommandBuffer,
subpass_begin_info: &SubpassBeginInfo,
subpass_end_info: &SubpassEndInfo,
) {
let __result = (self.commands().cmd_next_subpass2_khr)(
command_buffer,
subpass_begin_info,
subpass_end_info,
);
}
#[inline]
unsafe fn create_render_pass2_khr(
&self,
create_info: &RenderPassCreateInfo2,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<RenderPass> {
let mut render_pass = MaybeUninit::<RenderPass>::uninit();
let __result = (self.commands().create_render_pass2_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
render_pass.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(render_pass.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrCreateRenderpass2ExtensionDeviceCommands for C {}
pub trait KhrDeferredHostOperationsExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn create_deferred_operation_khr(
&self,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<DeferredOperationKHR> {
let mut deferred_operation = MaybeUninit::<DeferredOperationKHR>::uninit();
let __result = (self.commands().create_deferred_operation_khr)(
self.handle(),
allocator.map_or(ptr::null(), |v| v),
deferred_operation.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(deferred_operation.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn deferred_operation_join_khr(
&self,
operation: DeferredOperationKHR,
) -> crate::VkResult<SuccessCode> {
let __result = (self.commands().deferred_operation_join_khr)(self.handle(), operation);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_deferred_operation_khr(
&self,
operation: DeferredOperationKHR,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_deferred_operation_khr)(
self.handle(),
operation,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_deferred_operation_max_concurrency_khr(
&self,
operation: DeferredOperationKHR,
) -> u32 {
let __result =
(self.commands().get_deferred_operation_max_concurrency_khr)(self.handle(), operation);
__result
}
#[inline]
unsafe fn get_deferred_operation_result_khr(
&self,
operation: DeferredOperationKHR,
) -> crate::VkResult<SuccessCode> {
let __result =
(self.commands().get_deferred_operation_result_khr)(self.handle(), operation);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrDeferredHostOperationsExtensionDeviceCommands for C {}
pub trait KhrDescriptorUpdateTemplateExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_push_descriptor_set_with_template_khr(
&self,
command_buffer: CommandBuffer,
descriptor_update_template: DescriptorUpdateTemplate,
layout: PipelineLayout,
set: u32,
data: *const c_void,
) {
let __result = (self.commands().cmd_push_descriptor_set_with_template_khr)(
command_buffer,
descriptor_update_template,
layout,
set,
data,
);
}
#[inline]
unsafe fn create_descriptor_update_template_khr(
&self,
create_info: &DescriptorUpdateTemplateCreateInfo,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<DescriptorUpdateTemplate> {
let mut descriptor_update_template = MaybeUninit::<DescriptorUpdateTemplate>::uninit();
let __result = (self.commands().create_descriptor_update_template_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
descriptor_update_template.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(descriptor_update_template.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_descriptor_update_template_khr(
&self,
descriptor_update_template: DescriptorUpdateTemplate,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_descriptor_update_template_khr)(
self.handle(),
descriptor_update_template,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn update_descriptor_set_with_template_khr(
&self,
descriptor_set: DescriptorSet,
descriptor_update_template: DescriptorUpdateTemplate,
data: *const c_void,
) {
let __result = (self.commands().update_descriptor_set_with_template_khr)(
self.handle(),
descriptor_set,
descriptor_update_template,
data,
);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrDescriptorUpdateTemplateExtensionDeviceCommands for C {}
pub trait KhrDeviceGroupExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn acquire_next_image2_khr(
&self,
acquire_info: &AcquireNextImageInfoKHR,
) -> crate::VkSuccessResult<u32> {
let mut image_index = MaybeUninit::<u32>::uninit();
let __result = (self.commands().acquire_next_image2_khr)(
self.handle(),
acquire_info,
image_index.as_mut_ptr(),
);
if __result >= Result::SUCCESS {
Ok((image_index.assume_init(), __result.into()))
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn cmd_dispatch_base_khr(
&self,
command_buffer: CommandBuffer,
base_group_x: u32,
base_group_y: u32,
base_group_z: u32,
group_count_x: u32,
group_count_y: u32,
group_count_z: u32,
) {
let __result = (self.commands().cmd_dispatch_base_khr)(
command_buffer,
base_group_x,
base_group_y,
base_group_z,
group_count_x,
group_count_y,
group_count_z,
);
}
#[inline]
unsafe fn cmd_set_device_mask_khr(&self, command_buffer: CommandBuffer, device_mask: u32) {
let __result = (self.commands().cmd_set_device_mask_khr)(command_buffer, device_mask);
}
#[inline]
unsafe fn get_device_group_peer_memory_features_khr(
&self,
heap_index: u32,
local_device_index: u32,
remote_device_index: u32,
) -> PeerMemoryFeatureFlags {
let mut peer_memory_features = MaybeUninit::<PeerMemoryFeatureFlags>::uninit();
let __result = (self.commands().get_device_group_peer_memory_features_khr)(
self.handle(),
heap_index,
local_device_index,
remote_device_index,
peer_memory_features.as_mut_ptr(),
);
peer_memory_features.assume_init()
}
#[inline]
unsafe fn get_device_group_present_capabilities_khr(
&self,
device_group_present_capabilities: &mut DeviceGroupPresentCapabilitiesKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().get_device_group_present_capabilities_khr)(
self.handle(),
device_group_present_capabilities,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_device_group_surface_present_modes_khr(
&self,
surface: SurfaceKHR,
) -> crate::VkResult<DeviceGroupPresentModeFlagsKHR> {
let mut modes = MaybeUninit::<DeviceGroupPresentModeFlagsKHR>::uninit();
let __result = (self.commands().get_device_group_surface_present_modes_khr)(
self.handle(),
surface,
modes.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(modes.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrDeviceGroupExtensionDeviceCommands for C {}
pub trait KhrDeviceGroupExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_present_rectangles_khr(
&self,
physical_device: PhysicalDevice,
surface: SurfaceKHR,
) -> crate::VkResult<Vec<Rect2D>> {
let mut rect_count = 0;
(self.commands().get_physical_device_present_rectangles_khr)(
physical_device,
surface,
&mut rect_count,
ptr::null_mut(),
);
let mut rects = Vec::with_capacity(rect_count as usize);
let __result = (self.commands().get_physical_device_present_rectangles_khr)(
physical_device,
surface,
&mut rect_count,
rects.as_mut_ptr(),
);
debug_assert!(rects.capacity() >= rect_count as usize);
rects.set_len(rect_count as usize);
if __result == Result::SUCCESS {
Ok(rects)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrDeviceGroupExtensionInstanceCommands for C {}
pub trait KhrDeviceGroupCreationExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn enumerate_physical_device_groups_khr(
&self,
) -> crate::VkResult<Vec<PhysicalDeviceGroupProperties>> {
let mut physical_device_group_count = 0;
(self.commands().enumerate_physical_device_groups_khr)(
self.handle(),
&mut physical_device_group_count,
ptr::null_mut(),
);
let mut physical_device_group_properties =
Vec::with_capacity(physical_device_group_count as usize);
let __result = (self.commands().enumerate_physical_device_groups_khr)(
self.handle(),
&mut physical_device_group_count,
physical_device_group_properties.as_mut_ptr(),
);
debug_assert!(
physical_device_group_properties.capacity() >= physical_device_group_count as usize
);
physical_device_group_properties.set_len(physical_device_group_count as usize);
if __result == Result::SUCCESS {
Ok(physical_device_group_properties)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrDeviceGroupCreationExtensionInstanceCommands for C {}
pub trait KhrDisplayExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_display_mode_khr(
&self,
physical_device: PhysicalDevice,
display: DisplayKHR,
create_info: &DisplayModeCreateInfoKHR,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<DisplayModeKHR> {
let mut mode = MaybeUninit::<DisplayModeKHR>::uninit();
let __result = (self.commands().create_display_mode_khr)(
physical_device,
display,
create_info,
allocator.map_or(ptr::null(), |v| v),
mode.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(mode.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn create_display_plane_surface_khr(
&self,
create_info: &DisplaySurfaceCreateInfoKHR,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_display_plane_surface_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_display_mode_properties_khr(
&self,
physical_device: PhysicalDevice,
display: DisplayKHR,
) -> crate::VkResult<Vec<DisplayModePropertiesKHR>> {
let mut property_count = 0;
(self.commands().get_display_mode_properties_khr)(
physical_device,
display,
&mut property_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(property_count as usize);
let __result = (self.commands().get_display_mode_properties_khr)(
physical_device,
display,
&mut property_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= property_count as usize);
properties.set_len(property_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_display_plane_capabilities_khr(
&self,
physical_device: PhysicalDevice,
mode: DisplayModeKHR,
plane_index: u32,
) -> crate::VkResult<DisplayPlaneCapabilitiesKHR> {
let mut capabilities = MaybeUninit::<DisplayPlaneCapabilitiesKHR>::uninit();
let __result = (self.commands().get_display_plane_capabilities_khr)(
physical_device,
mode,
plane_index,
capabilities.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(capabilities.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_display_plane_supported_displays_khr(
&self,
physical_device: PhysicalDevice,
plane_index: u32,
) -> crate::VkResult<Vec<DisplayKHR>> {
let mut display_count = 0;
(self.commands().get_display_plane_supported_displays_khr)(
physical_device,
plane_index,
&mut display_count,
ptr::null_mut(),
);
let mut displays = Vec::with_capacity(display_count as usize);
let __result = (self.commands().get_display_plane_supported_displays_khr)(
physical_device,
plane_index,
&mut display_count,
displays.as_mut_ptr(),
);
debug_assert!(displays.capacity() >= display_count as usize);
displays.set_len(display_count as usize);
if __result == Result::SUCCESS {
Ok(displays)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_display_plane_properties_khr(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<DisplayPlanePropertiesKHR>> {
let mut property_count = 0;
(self
.commands()
.get_physical_device_display_plane_properties_khr)(
physical_device,
&mut property_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(property_count as usize);
let __result = (self
.commands()
.get_physical_device_display_plane_properties_khr)(
physical_device,
&mut property_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= property_count as usize);
properties.set_len(property_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_display_properties_khr(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<DisplayPropertiesKHR>> {
let mut property_count = 0;
(self.commands().get_physical_device_display_properties_khr)(
physical_device,
&mut property_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(property_count as usize);
let __result = (self.commands().get_physical_device_display_properties_khr)(
physical_device,
&mut property_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= property_count as usize);
properties.set_len(property_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrDisplayExtensionInstanceCommands for C {}
pub trait KhrDisplaySwapchainExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn create_shared_swapchains_khr(
&self,
create_infos: &[impl Cast<Target = SwapchainCreateInfoKHR>],
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<Vec<SwapchainKHR>> {
let mut swapchains = Vec::with_capacity(create_infos.len() as usize);
let __result = (self.commands().create_shared_swapchains_khr)(
self.handle(),
create_infos.len() as u32,
create_infos.as_ptr().cast(),
allocator.map_or(ptr::null(), |v| v),
swapchains.as_mut_ptr(),
);
swapchains.set_len(create_infos.len() as usize);
if __result == Result::SUCCESS {
Ok(swapchains)
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrDisplaySwapchainExtensionDeviceCommands for C {}
pub trait KhrDrawIndirectCountExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_draw_indexed_indirect_count_khr(
&self,
command_buffer: CommandBuffer,
buffer: Buffer,
offset: DeviceSize,
count_buffer: Buffer,
count_buffer_offset: DeviceSize,
max_draw_count: u32,
stride: u32,
) {
let __result = (self.commands().cmd_draw_indexed_indirect_count_khr)(
command_buffer,
buffer,
offset,
count_buffer,
count_buffer_offset,
max_draw_count,
stride,
);
}
#[inline]
unsafe fn cmd_draw_indirect_count_khr(
&self,
command_buffer: CommandBuffer,
buffer: Buffer,
offset: DeviceSize,
count_buffer: Buffer,
count_buffer_offset: DeviceSize,
max_draw_count: u32,
stride: u32,
) {
let __result = (self.commands().cmd_draw_indirect_count_khr)(
command_buffer,
buffer,
offset,
count_buffer,
count_buffer_offset,
max_draw_count,
stride,
);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrDrawIndirectCountExtensionDeviceCommands for C {}
pub trait KhrDynamicRenderingExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_begin_rendering_khr(
&self,
command_buffer: CommandBuffer,
rendering_info: &RenderingInfo,
) {
let __result = (self.commands().cmd_begin_rendering_khr)(command_buffer, rendering_info);
}
#[inline]
unsafe fn cmd_end_rendering_khr(&self, command_buffer: CommandBuffer) {
let __result = (self.commands().cmd_end_rendering_khr)(command_buffer);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrDynamicRenderingExtensionDeviceCommands for C {}
pub trait KhrDynamicRenderingLocalReadExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_rendering_attachment_locations_khr(
&self,
command_buffer: CommandBuffer,
location_info: &RenderingAttachmentLocationInfo,
) {
let __result = (self.commands().cmd_set_rendering_attachment_locations_khr)(
command_buffer,
location_info,
);
}
#[inline]
unsafe fn cmd_set_rendering_input_attachment_indices_khr(
&self,
command_buffer: CommandBuffer,
input_attachment_index_info: &RenderingInputAttachmentIndexInfo,
) {
let __result = (self
.commands()
.cmd_set_rendering_input_attachment_indices_khr)(
command_buffer,
input_attachment_index_info,
);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrDynamicRenderingLocalReadExtensionDeviceCommands for C {}
pub trait KhrExternalFenceCapabilitiesExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_external_fence_properties_khr(
&self,
physical_device: PhysicalDevice,
external_fence_info: &PhysicalDeviceExternalFenceInfo,
external_fence_properties: &mut ExternalFenceProperties,
) {
let __result = (self
.commands()
.get_physical_device_external_fence_properties_khr)(
physical_device,
external_fence_info,
external_fence_properties,
);
}
}
impl<C: InstanceV1_0 + ?Sized> KhrExternalFenceCapabilitiesExtensionInstanceCommands for C {}
pub trait KhrExternalFenceFdExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_fence_fd_khr(&self, get_fd_info: &FenceGetFdInfoKHR) -> crate::VkResult<c_int> {
let mut fd = MaybeUninit::<c_int>::uninit();
let __result =
(self.commands().get_fence_fd_khr)(self.handle(), get_fd_info, fd.as_mut_ptr());
if __result == Result::SUCCESS {
Ok(fd.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn import_fence_fd_khr(
&self,
import_fence_fd_info: &ImportFenceFdInfoKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().import_fence_fd_khr)(self.handle(), import_fence_fd_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrExternalFenceFdExtensionDeviceCommands for C {}
pub trait KhrExternalFenceWin32ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_fence_win32_handle_khr(
&self,
get_win32_handle_info: &FenceGetWin32HandleInfoKHR,
) -> crate::VkResult<HANDLE> {
let mut handle = MaybeUninit::<HANDLE>::uninit();
let __result = (self.commands().get_fence_win32_handle_khr)(
self.handle(),
get_win32_handle_info,
handle.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(handle.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn import_fence_win32_handle_khr(
&self,
import_fence_win32_handle_info: &ImportFenceWin32HandleInfoKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().import_fence_win32_handle_khr)(
self.handle(),
import_fence_win32_handle_info,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrExternalFenceWin32ExtensionDeviceCommands for C {}
pub trait KhrExternalMemoryCapabilitiesExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_external_buffer_properties_khr(
&self,
physical_device: PhysicalDevice,
external_buffer_info: &PhysicalDeviceExternalBufferInfo,
external_buffer_properties: &mut ExternalBufferProperties,
) {
let __result = (self
.commands()
.get_physical_device_external_buffer_properties_khr)(
physical_device,
external_buffer_info,
external_buffer_properties,
);
}
}
impl<C: InstanceV1_0 + ?Sized> KhrExternalMemoryCapabilitiesExtensionInstanceCommands for C {}
pub trait KhrExternalMemoryFdExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_memory_fd_khr(&self, get_fd_info: &MemoryGetFdInfoKHR) -> crate::VkResult<c_int> {
let mut fd = MaybeUninit::<c_int>::uninit();
let __result =
(self.commands().get_memory_fd_khr)(self.handle(), get_fd_info, fd.as_mut_ptr());
if __result == Result::SUCCESS {
Ok(fd.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_memory_fd_properties_khr(
&self,
handle_type: ExternalMemoryHandleTypeFlags,
fd: c_int,
memory_fd_properties: &mut MemoryFdPropertiesKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().get_memory_fd_properties_khr)(
self.handle(),
handle_type,
fd,
memory_fd_properties,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrExternalMemoryFdExtensionDeviceCommands for C {}
pub trait KhrExternalMemoryWin32ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_memory_win32_handle_khr(
&self,
get_win32_handle_info: &MemoryGetWin32HandleInfoKHR,
) -> crate::VkResult<HANDLE> {
let mut handle = MaybeUninit::<HANDLE>::uninit();
let __result = (self.commands().get_memory_win32_handle_khr)(
self.handle(),
get_win32_handle_info,
handle.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(handle.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_memory_win32_handle_properties_khr(
&self,
handle_type: ExternalMemoryHandleTypeFlags,
handle: HANDLE,
memory_win32_handle_properties: &mut MemoryWin32HandlePropertiesKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().get_memory_win32_handle_properties_khr)(
self.handle(),
handle_type,
handle,
memory_win32_handle_properties,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrExternalMemoryWin32ExtensionDeviceCommands for C {}
pub trait KhrExternalSemaphoreCapabilitiesExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_external_semaphore_properties_khr(
&self,
physical_device: PhysicalDevice,
external_semaphore_info: &PhysicalDeviceExternalSemaphoreInfo,
external_semaphore_properties: &mut ExternalSemaphoreProperties,
) {
let __result = (self
.commands()
.get_physical_device_external_semaphore_properties_khr)(
physical_device,
external_semaphore_info,
external_semaphore_properties,
);
}
}
impl<C: InstanceV1_0 + ?Sized> KhrExternalSemaphoreCapabilitiesExtensionInstanceCommands for C {}
pub trait KhrExternalSemaphoreFdExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_semaphore_fd_khr(
&self,
get_fd_info: &SemaphoreGetFdInfoKHR,
) -> crate::VkResult<c_int> {
let mut fd = MaybeUninit::<c_int>::uninit();
let __result =
(self.commands().get_semaphore_fd_khr)(self.handle(), get_fd_info, fd.as_mut_ptr());
if __result == Result::SUCCESS {
Ok(fd.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn import_semaphore_fd_khr(
&self,
import_semaphore_fd_info: &ImportSemaphoreFdInfoKHR,
) -> crate::VkResult<()> {
let __result =
(self.commands().import_semaphore_fd_khr)(self.handle(), import_semaphore_fd_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrExternalSemaphoreFdExtensionDeviceCommands for C {}
pub trait KhrExternalSemaphoreWin32ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_semaphore_win32_handle_khr(
&self,
get_win32_handle_info: &SemaphoreGetWin32HandleInfoKHR,
) -> crate::VkResult<HANDLE> {
let mut handle = MaybeUninit::<HANDLE>::uninit();
let __result = (self.commands().get_semaphore_win32_handle_khr)(
self.handle(),
get_win32_handle_info,
handle.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(handle.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn import_semaphore_win32_handle_khr(
&self,
import_semaphore_win32_handle_info: &ImportSemaphoreWin32HandleInfoKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().import_semaphore_win32_handle_khr)(
self.handle(),
import_semaphore_win32_handle_info,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrExternalSemaphoreWin32ExtensionDeviceCommands for C {}
pub trait KhrFragmentShadingRateExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_fragment_shading_rate_khr(
&self,
command_buffer: CommandBuffer,
fragment_size: &Extent2D,
combiner_ops: [FragmentShadingRateCombinerOpKHR; 2],
) {
let __result = (self.commands().cmd_set_fragment_shading_rate_khr)(
command_buffer,
fragment_size,
combiner_ops.as_ptr(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrFragmentShadingRateExtensionDeviceCommands for C {}
pub trait KhrFragmentShadingRateExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_fragment_shading_rates_khr(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<PhysicalDeviceFragmentShadingRateKHR>> {
let mut fragment_shading_rate_count = 0;
(self
.commands()
.get_physical_device_fragment_shading_rates_khr)(
physical_device,
&mut fragment_shading_rate_count,
ptr::null_mut(),
);
let mut fragment_shading_rates = Vec::with_capacity(fragment_shading_rate_count as usize);
let __result = (self
.commands()
.get_physical_device_fragment_shading_rates_khr)(
physical_device,
&mut fragment_shading_rate_count,
fragment_shading_rates.as_mut_ptr(),
);
debug_assert!(fragment_shading_rates.capacity() >= fragment_shading_rate_count as usize);
fragment_shading_rates.set_len(fragment_shading_rate_count as usize);
if __result == Result::SUCCESS {
Ok(fragment_shading_rates)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrFragmentShadingRateExtensionInstanceCommands for C {}
pub trait KhrGetDisplayProperties2ExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_display_mode_properties2_khr(
&self,
physical_device: PhysicalDevice,
display: DisplayKHR,
) -> crate::VkResult<Vec<DisplayModeProperties2KHR>> {
let mut property_count = 0;
(self.commands().get_display_mode_properties2_khr)(
physical_device,
display,
&mut property_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(property_count as usize);
let __result = (self.commands().get_display_mode_properties2_khr)(
physical_device,
display,
&mut property_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= property_count as usize);
properties.set_len(property_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_display_plane_capabilities2_khr(
&self,
physical_device: PhysicalDevice,
display_plane_info: &DisplayPlaneInfo2KHR,
capabilities: &mut DisplayPlaneCapabilities2KHR,
) -> crate::VkResult<()> {
let __result = (self.commands().get_display_plane_capabilities2_khr)(
physical_device,
display_plane_info,
capabilities,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_display_plane_properties2_khr(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<DisplayPlaneProperties2KHR>> {
let mut property_count = 0;
(self
.commands()
.get_physical_device_display_plane_properties2_khr)(
physical_device,
&mut property_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(property_count as usize);
let __result = (self
.commands()
.get_physical_device_display_plane_properties2_khr)(
physical_device,
&mut property_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= property_count as usize);
properties.set_len(property_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_display_properties2_khr(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<DisplayProperties2KHR>> {
let mut property_count = 0;
(self.commands().get_physical_device_display_properties2_khr)(
physical_device,
&mut property_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(property_count as usize);
let __result = (self.commands().get_physical_device_display_properties2_khr)(
physical_device,
&mut property_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= property_count as usize);
properties.set_len(property_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrGetDisplayProperties2ExtensionInstanceCommands for C {}
pub trait KhrGetMemoryRequirements2ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_buffer_memory_requirements2_khr(
&self,
info: &BufferMemoryRequirementsInfo2,
memory_requirements: &mut MemoryRequirements2,
) {
let __result = (self.commands().get_buffer_memory_requirements2_khr)(
self.handle(),
info,
memory_requirements,
);
}
#[inline]
unsafe fn get_image_memory_requirements2_khr(
&self,
info: &ImageMemoryRequirementsInfo2,
memory_requirements: &mut MemoryRequirements2,
) {
let __result = (self.commands().get_image_memory_requirements2_khr)(
self.handle(),
info,
memory_requirements,
);
}
#[inline]
unsafe fn get_image_sparse_memory_requirements2_khr(
&self,
info: &ImageSparseMemoryRequirementsInfo2,
) -> Vec<SparseImageMemoryRequirements2> {
let mut sparse_memory_requirement_count = 0;
(self.commands().get_image_sparse_memory_requirements2_khr)(
self.handle(),
info,
&mut sparse_memory_requirement_count,
ptr::null_mut(),
);
let mut sparse_memory_requirements =
Vec::with_capacity(sparse_memory_requirement_count as usize);
let __result = (self.commands().get_image_sparse_memory_requirements2_khr)(
self.handle(),
info,
&mut sparse_memory_requirement_count,
sparse_memory_requirements.as_mut_ptr(),
);
debug_assert!(
sparse_memory_requirements.capacity() >= sparse_memory_requirement_count as usize
);
sparse_memory_requirements.set_len(sparse_memory_requirement_count as usize);
sparse_memory_requirements
}
}
impl<C: DeviceV1_0 + ?Sized> KhrGetMemoryRequirements2ExtensionDeviceCommands for C {}
pub trait KhrGetPhysicalDeviceProperties2ExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_features2_khr(
&self,
physical_device: PhysicalDevice,
features: &mut PhysicalDeviceFeatures2,
) {
let __result =
(self.commands().get_physical_device_features2_khr)(physical_device, features);
}
#[inline]
unsafe fn get_physical_device_format_properties2_khr(
&self,
physical_device: PhysicalDevice,
format: Format,
format_properties: &mut FormatProperties2,
) {
let __result = (self.commands().get_physical_device_format_properties2_khr)(
physical_device,
format,
format_properties,
);
}
#[inline]
unsafe fn get_physical_device_image_format_properties2_khr(
&self,
physical_device: PhysicalDevice,
image_format_info: &PhysicalDeviceImageFormatInfo2,
image_format_properties: &mut ImageFormatProperties2,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_physical_device_image_format_properties2_khr)(
physical_device,
image_format_info,
image_format_properties,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_memory_properties2_khr(
&self,
physical_device: PhysicalDevice,
memory_properties: &mut PhysicalDeviceMemoryProperties2,
) {
let __result = (self.commands().get_physical_device_memory_properties2_khr)(
physical_device,
memory_properties,
);
}
#[inline]
unsafe fn get_physical_device_properties2_khr(
&self,
physical_device: PhysicalDevice,
properties: &mut PhysicalDeviceProperties2,
) {
let __result =
(self.commands().get_physical_device_properties2_khr)(physical_device, properties);
}
#[inline]
unsafe fn get_physical_device_queue_family_properties2_khr(
&self,
physical_device: PhysicalDevice,
) -> Vec<QueueFamilyProperties2> {
let mut queue_family_property_count = 0;
(self
.commands()
.get_physical_device_queue_family_properties2_khr)(
physical_device,
&mut queue_family_property_count,
ptr::null_mut(),
);
let mut queue_family_properties = Vec::with_capacity(queue_family_property_count as usize);
let __result = (self
.commands()
.get_physical_device_queue_family_properties2_khr)(
physical_device,
&mut queue_family_property_count,
queue_family_properties.as_mut_ptr(),
);
debug_assert!(queue_family_properties.capacity() >= queue_family_property_count as usize);
queue_family_properties.set_len(queue_family_property_count as usize);
queue_family_properties
}
#[inline]
unsafe fn get_physical_device_sparse_image_format_properties2_khr(
&self,
physical_device: PhysicalDevice,
format_info: &PhysicalDeviceSparseImageFormatInfo2,
) -> Vec<SparseImageFormatProperties2> {
let mut property_count = 0;
(self
.commands()
.get_physical_device_sparse_image_format_properties2_khr)(
physical_device,
format_info,
&mut property_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(property_count as usize);
let __result = (self
.commands()
.get_physical_device_sparse_image_format_properties2_khr)(
physical_device,
format_info,
&mut property_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= property_count as usize);
properties.set_len(property_count as usize);
properties
}
}
impl<C: InstanceV1_0 + ?Sized> KhrGetPhysicalDeviceProperties2ExtensionInstanceCommands for C {}
pub trait KhrGetSurfaceCapabilities2ExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_surface_capabilities2_khr(
&self,
physical_device: PhysicalDevice,
surface_info: &PhysicalDeviceSurfaceInfo2KHR,
surface_capabilities: &mut SurfaceCapabilities2KHR,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_physical_device_surface_capabilities2_khr)(
physical_device,
surface_info,
surface_capabilities,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_surface_formats2_khr(
&self,
physical_device: PhysicalDevice,
surface_info: &PhysicalDeviceSurfaceInfo2KHR,
) -> crate::VkResult<Vec<SurfaceFormat2KHR>> {
let mut surface_format_count = 0;
(self.commands().get_physical_device_surface_formats2_khr)(
physical_device,
surface_info,
&mut surface_format_count,
ptr::null_mut(),
);
let mut surface_formats = Vec::with_capacity(surface_format_count as usize);
let __result = (self.commands().get_physical_device_surface_formats2_khr)(
physical_device,
surface_info,
&mut surface_format_count,
surface_formats.as_mut_ptr(),
);
debug_assert!(surface_formats.capacity() >= surface_format_count as usize);
surface_formats.set_len(surface_format_count as usize);
if __result == Result::SUCCESS {
Ok(surface_formats)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrGetSurfaceCapabilities2ExtensionInstanceCommands for C {}
pub trait KhrLineRasterizationExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_line_stipple_khr(
&self,
command_buffer: CommandBuffer,
line_stipple_factor: u32,
line_stipple_pattern: u16,
) {
let __result = (self.commands().cmd_set_line_stipple_khr)(
command_buffer,
line_stipple_factor,
line_stipple_pattern,
);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrLineRasterizationExtensionDeviceCommands for C {}
pub trait KhrMaintenance1ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn trim_command_pool_khr(&self, command_pool: CommandPool, flags: CommandPoolTrimFlags) {
let __result = (self.commands().trim_command_pool_khr)(self.handle(), command_pool, flags);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrMaintenance1ExtensionDeviceCommands for C {}
pub trait KhrMaintenance10ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_end_rendering2_khr(
&self,
command_buffer: CommandBuffer,
rendering_end_info: Option<&RenderingEndInfoKHR>,
) {
let __result = (self.commands().cmd_end_rendering2_khr)(
command_buffer,
rendering_end_info.map_or(ptr::null(), |v| v),
);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrMaintenance10ExtensionDeviceCommands for C {}
pub trait KhrMaintenance3ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_descriptor_set_layout_support_khr(
&self,
create_info: &DescriptorSetLayoutCreateInfo,
support: &mut DescriptorSetLayoutSupport,
) {
let __result = (self.commands().get_descriptor_set_layout_support_khr)(
self.handle(),
create_info,
support,
);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrMaintenance3ExtensionDeviceCommands for C {}
pub trait KhrMaintenance4ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_device_buffer_memory_requirements_khr(
&self,
info: &DeviceBufferMemoryRequirements,
memory_requirements: &mut MemoryRequirements2,
) {
let __result = (self.commands().get_device_buffer_memory_requirements_khr)(
self.handle(),
info,
memory_requirements,
);
}
#[inline]
unsafe fn get_device_image_memory_requirements_khr(
&self,
info: &DeviceImageMemoryRequirements,
memory_requirements: &mut MemoryRequirements2,
) {
let __result = (self.commands().get_device_image_memory_requirements_khr)(
self.handle(),
info,
memory_requirements,
);
}
#[inline]
unsafe fn get_device_image_sparse_memory_requirements_khr(
&self,
info: &DeviceImageMemoryRequirements,
) -> Vec<SparseImageMemoryRequirements2> {
let mut sparse_memory_requirement_count = 0;
(self
.commands()
.get_device_image_sparse_memory_requirements_khr)(
self.handle(),
info,
&mut sparse_memory_requirement_count,
ptr::null_mut(),
);
let mut sparse_memory_requirements =
Vec::with_capacity(sparse_memory_requirement_count as usize);
let __result = (self
.commands()
.get_device_image_sparse_memory_requirements_khr)(
self.handle(),
info,
&mut sparse_memory_requirement_count,
sparse_memory_requirements.as_mut_ptr(),
);
debug_assert!(
sparse_memory_requirements.capacity() >= sparse_memory_requirement_count as usize
);
sparse_memory_requirements.set_len(sparse_memory_requirement_count as usize);
sparse_memory_requirements
}
}
impl<C: DeviceV1_0 + ?Sized> KhrMaintenance4ExtensionDeviceCommands for C {}
pub trait KhrMaintenance5ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_bind_index_buffer2_khr(
&self,
command_buffer: CommandBuffer,
buffer: Buffer,
offset: DeviceSize,
size: DeviceSize,
index_type: IndexType,
) {
let __result = (self.commands().cmd_bind_index_buffer2_khr)(
command_buffer,
buffer,
offset,
size,
index_type,
);
}
#[inline]
unsafe fn get_device_image_subresource_layout_khr(
&self,
info: &DeviceImageSubresourceInfo,
layout: &mut SubresourceLayout2,
) {
let __result =
(self.commands().get_device_image_subresource_layout_khr)(self.handle(), info, layout);
}
#[inline]
unsafe fn get_image_subresource_layout2_khr(
&self,
image: Image,
subresource: &ImageSubresource2,
layout: &mut SubresourceLayout2,
) {
let __result = (self.commands().get_image_subresource_layout2_khr)(
self.handle(),
image,
subresource,
layout,
);
}
#[inline]
unsafe fn get_rendering_area_granularity_khr(
&self,
rendering_area_info: &RenderingAreaInfo,
) -> Extent2D {
let mut granularity = MaybeUninit::<Extent2D>::uninit();
let __result = (self.commands().get_rendering_area_granularity_khr)(
self.handle(),
rendering_area_info,
granularity.as_mut_ptr(),
);
granularity.assume_init()
}
}
impl<C: DeviceV1_0 + ?Sized> KhrMaintenance5ExtensionDeviceCommands for C {}
pub trait KhrMaintenance6ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_bind_descriptor_buffer_embedded_samplers2_ext(
&self,
command_buffer: CommandBuffer,
bind_descriptor_buffer_embedded_samplers_info: &BindDescriptorBufferEmbeddedSamplersInfoEXT,
) {
let __result = (self
.commands()
.cmd_bind_descriptor_buffer_embedded_samplers2_ext)(
command_buffer,
bind_descriptor_buffer_embedded_samplers_info,
);
}
#[inline]
unsafe fn cmd_bind_descriptor_sets2_khr(
&self,
command_buffer: CommandBuffer,
bind_descriptor_sets_info: &BindDescriptorSetsInfo,
) {
let __result = (self.commands().cmd_bind_descriptor_sets2_khr)(
command_buffer,
bind_descriptor_sets_info,
);
}
#[inline]
unsafe fn cmd_push_constants2_khr(
&self,
command_buffer: CommandBuffer,
push_constants_info: &PushConstantsInfo,
) {
let __result =
(self.commands().cmd_push_constants2_khr)(command_buffer, push_constants_info);
}
#[inline]
unsafe fn cmd_push_descriptor_set2_khr(
&self,
command_buffer: CommandBuffer,
push_descriptor_set_info: &PushDescriptorSetInfo,
) {
let __result = (self.commands().cmd_push_descriptor_set2_khr)(
command_buffer,
push_descriptor_set_info,
);
}
#[inline]
unsafe fn cmd_push_descriptor_set_with_template2_khr(
&self,
command_buffer: CommandBuffer,
push_descriptor_set_with_template_info: &PushDescriptorSetWithTemplateInfo,
) {
let __result = (self.commands().cmd_push_descriptor_set_with_template2_khr)(
command_buffer,
push_descriptor_set_with_template_info,
);
}
#[inline]
unsafe fn cmd_set_descriptor_buffer_offsets2_ext(
&self,
command_buffer: CommandBuffer,
set_descriptor_buffer_offsets_info: &SetDescriptorBufferOffsetsInfoEXT,
) {
let __result = (self.commands().cmd_set_descriptor_buffer_offsets2_ext)(
command_buffer,
set_descriptor_buffer_offsets_info,
);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrMaintenance6ExtensionDeviceCommands for C {}
pub trait KhrMapMemory2ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn map_memory2_khr(
&self,
memory_map_info: &MemoryMapInfo,
) -> crate::VkResult<*mut c_void> {
let mut data = MaybeUninit::<*mut c_void>::uninit();
let __result =
(self.commands().map_memory2_khr)(self.handle(), memory_map_info, data.as_mut_ptr());
if __result == Result::SUCCESS {
Ok(data.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn unmap_memory2_khr(&self, memory_unmap_info: &MemoryUnmapInfo) -> crate::VkResult<()> {
let __result = (self.commands().unmap_memory2_khr)(self.handle(), memory_unmap_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrMapMemory2ExtensionDeviceCommands for C {}
pub trait KhrObjectRefreshExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_refresh_objects_khr(
&self,
command_buffer: CommandBuffer,
refresh_objects: &RefreshObjectListKHR,
) {
let __result = (self.commands().cmd_refresh_objects_khr)(command_buffer, refresh_objects);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrObjectRefreshExtensionDeviceCommands for C {}
pub trait KhrObjectRefreshExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_refreshable_object_types_khr(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<ObjectType>> {
let mut refreshable_object_type_count = 0;
(self
.commands()
.get_physical_device_refreshable_object_types_khr)(
physical_device,
&mut refreshable_object_type_count,
ptr::null_mut(),
);
let mut refreshable_object_types =
Vec::with_capacity(refreshable_object_type_count as usize);
let __result = (self
.commands()
.get_physical_device_refreshable_object_types_khr)(
physical_device,
&mut refreshable_object_type_count,
refreshable_object_types.as_mut_ptr(),
);
debug_assert!(
refreshable_object_types.capacity() >= refreshable_object_type_count as usize
);
refreshable_object_types.set_len(refreshable_object_type_count as usize);
if __result == Result::SUCCESS {
Ok(refreshable_object_types)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrObjectRefreshExtensionInstanceCommands for C {}
pub trait KhrPerformanceQueryExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn acquire_profiling_lock_khr(
&self,
info: &AcquireProfilingLockInfoKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().acquire_profiling_lock_khr)(self.handle(), info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn release_profiling_lock_khr(&self) {
let __result = (self.commands().release_profiling_lock_khr)(self.handle());
}
}
impl<C: DeviceV1_0 + ?Sized> KhrPerformanceQueryExtensionDeviceCommands for C {}
pub trait KhrPerformanceQueryExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn enumerate_physical_device_queue_family_performance_query_counters_khr(
&self,
physical_device: PhysicalDevice,
queue_family_index: u32,
) -> crate::VkResult<(
Vec<PerformanceCounterKHR>,
Vec<PerformanceCounterDescriptionKHR>,
)> {
let mut counter_count = 0;
(self
.commands()
.enumerate_physical_device_queue_family_performance_query_counters_khr)(
physical_device,
queue_family_index,
&mut counter_count,
ptr::null_mut(),
ptr::null_mut(),
);
let mut counters = Vec::with_capacity(counter_count as usize);
let mut counter_descriptions = Vec::with_capacity(counter_count as usize);
let __result = (self
.commands()
.enumerate_physical_device_queue_family_performance_query_counters_khr)(
physical_device,
queue_family_index,
&mut counter_count,
counters.as_mut_ptr(),
counter_descriptions.as_mut_ptr(),
);
debug_assert!(counters.capacity() >= counter_count as usize);
counters.set_len(counter_count as usize);
debug_assert!(counter_descriptions.capacity() >= counter_count as usize);
counter_descriptions.set_len(counter_count as usize);
if __result == Result::SUCCESS {
Ok((counters, counter_descriptions))
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_queue_family_performance_query_passes_khr(
&self,
physical_device: PhysicalDevice,
performance_query_create_info: &QueryPoolPerformanceCreateInfoKHR,
) -> u32 {
let mut num_passes = MaybeUninit::<u32>::uninit();
let __result = (self
.commands()
.get_physical_device_queue_family_performance_query_passes_khr)(
physical_device,
performance_query_create_info,
num_passes.as_mut_ptr(),
);
num_passes.assume_init()
}
}
impl<C: InstanceV1_0 + ?Sized> KhrPerformanceQueryExtensionInstanceCommands for C {}
pub trait KhrPipelineBinaryExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn create_pipeline_binaries_khr(
&self,
create_info: &PipelineBinaryCreateInfoKHR,
allocator: Option<&AllocationCallbacks>,
binaries: &mut PipelineBinaryHandlesInfoKHR,
) -> crate::VkResult<SuccessCode> {
let __result = (self.commands().create_pipeline_binaries_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
binaries,
);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_pipeline_binary_khr(
&self,
pipeline_binary: PipelineBinaryKHR,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_pipeline_binary_khr)(
self.handle(),
pipeline_binary,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_pipeline_binary_data_khr(
&self,
info: &PipelineBinaryDataInfoKHR,
pipeline_binary_key: &mut PipelineBinaryKeyKHR,
) -> crate::VkResult<Vec<u8>> {
let mut pipeline_binary_data_size = 0;
(self.commands().get_pipeline_binary_data_khr)(
self.handle(),
info,
ptr::null_mut(),
&mut pipeline_binary_data_size,
ptr::null_mut(),
);
let mut pipeline_binary_data = Vec::with_capacity(pipeline_binary_data_size as usize);
let __result = (self.commands().get_pipeline_binary_data_khr)(
self.handle(),
info,
pipeline_binary_key,
&mut pipeline_binary_data_size,
pipeline_binary_data.as_mut_ptr() as *mut c_void,
);
debug_assert!(pipeline_binary_data.capacity() >= pipeline_binary_data_size as usize);
pipeline_binary_data.set_len(pipeline_binary_data_size as usize);
if __result == Result::SUCCESS {
Ok(pipeline_binary_data)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_pipeline_key_khr(
&self,
pipeline_create_info: Option<&PipelineCreateInfoKHR>,
pipeline_key: &mut PipelineBinaryKeyKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().get_pipeline_key_khr)(
self.handle(),
pipeline_create_info.map_or(ptr::null(), |v| v),
pipeline_key,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn release_captured_pipeline_data_khr(
&self,
info: &ReleaseCapturedPipelineDataInfoKHR,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<()> {
let __result = (self.commands().release_captured_pipeline_data_khr)(
self.handle(),
info,
allocator.map_or(ptr::null(), |v| v),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrPipelineBinaryExtensionDeviceCommands for C {}
pub trait KhrPipelineExecutablePropertiesExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_pipeline_executable_internal_representations_khr(
&self,
executable_info: &PipelineExecutableInfoKHR,
) -> crate::VkResult<Vec<PipelineExecutableInternalRepresentationKHR>> {
let mut internal_representation_count = 0;
(self
.commands()
.get_pipeline_executable_internal_representations_khr)(
self.handle(),
executable_info,
&mut internal_representation_count,
ptr::null_mut(),
);
let mut internal_representations =
Vec::with_capacity(internal_representation_count as usize);
let __result = (self
.commands()
.get_pipeline_executable_internal_representations_khr)(
self.handle(),
executable_info,
&mut internal_representation_count,
internal_representations.as_mut_ptr(),
);
debug_assert!(
internal_representations.capacity() >= internal_representation_count as usize
);
internal_representations.set_len(internal_representation_count as usize);
if __result == Result::SUCCESS {
Ok(internal_representations)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_pipeline_executable_properties_khr(
&self,
pipeline_info: &PipelineInfoKHR,
) -> crate::VkResult<Vec<PipelineExecutablePropertiesKHR>> {
let mut executable_count = 0;
(self.commands().get_pipeline_executable_properties_khr)(
self.handle(),
pipeline_info,
&mut executable_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(executable_count as usize);
let __result = (self.commands().get_pipeline_executable_properties_khr)(
self.handle(),
pipeline_info,
&mut executable_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= executable_count as usize);
properties.set_len(executable_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_pipeline_executable_statistics_khr(
&self,
executable_info: &PipelineExecutableInfoKHR,
) -> crate::VkResult<Vec<PipelineExecutableStatisticKHR>> {
let mut statistic_count = 0;
(self.commands().get_pipeline_executable_statistics_khr)(
self.handle(),
executable_info,
&mut statistic_count,
ptr::null_mut(),
);
let mut statistics = Vec::with_capacity(statistic_count as usize);
let __result = (self.commands().get_pipeline_executable_statistics_khr)(
self.handle(),
executable_info,
&mut statistic_count,
statistics.as_mut_ptr(),
);
debug_assert!(statistics.capacity() >= statistic_count as usize);
statistics.set_len(statistic_count as usize);
if __result == Result::SUCCESS {
Ok(statistics)
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrPipelineExecutablePropertiesExtensionDeviceCommands for C {}
pub trait KhrPresentWaitExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn wait_for_present_khr(
&self,
swapchain: SwapchainKHR,
present_id: u64,
timeout: u64,
) -> crate::VkResult<SuccessCode> {
let __result =
(self.commands().wait_for_present_khr)(self.handle(), swapchain, present_id, timeout);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrPresentWaitExtensionDeviceCommands for C {}
pub trait KhrPresentWait2ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn wait_for_present2_khr(
&self,
swapchain: SwapchainKHR,
present_wait2_info: &PresentWait2InfoKHR,
) -> crate::VkResult<SuccessCode> {
let __result =
(self.commands().wait_for_present2_khr)(self.handle(), swapchain, present_wait2_info);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrPresentWait2ExtensionDeviceCommands for C {}
pub trait KhrPushDescriptorExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_push_descriptor_set_khr(
&self,
command_buffer: CommandBuffer,
pipeline_bind_point: PipelineBindPoint,
layout: PipelineLayout,
set: u32,
descriptor_writes: &[impl Cast<Target = WriteDescriptorSet>],
) {
let __result = (self.commands().cmd_push_descriptor_set_khr)(
command_buffer,
pipeline_bind_point,
layout,
set,
descriptor_writes.len() as u32,
descriptor_writes.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_push_descriptor_set_with_template_khr(
&self,
command_buffer: CommandBuffer,
descriptor_update_template: DescriptorUpdateTemplate,
layout: PipelineLayout,
set: u32,
data: *const c_void,
) {
let __result = (self.commands().cmd_push_descriptor_set_with_template_khr)(
command_buffer,
descriptor_update_template,
layout,
set,
data,
);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrPushDescriptorExtensionDeviceCommands for C {}
pub trait KhrRayTracingMaintenance1ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_trace_rays_indirect2_khr(
&self,
command_buffer: CommandBuffer,
indirect_device_address: DeviceAddress,
) {
let __result =
(self.commands().cmd_trace_rays_indirect2_khr)(command_buffer, indirect_device_address);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrRayTracingMaintenance1ExtensionDeviceCommands for C {}
pub trait KhrRayTracingPipelineExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_ray_tracing_pipeline_stack_size_khr(
&self,
command_buffer: CommandBuffer,
pipeline_stack_size: u32,
) {
let __result = (self.commands().cmd_set_ray_tracing_pipeline_stack_size_khr)(
command_buffer,
pipeline_stack_size,
);
}
#[inline]
unsafe fn cmd_trace_rays_indirect_khr(
&self,
command_buffer: CommandBuffer,
raygen_shader_binding_table: &StridedDeviceAddressRegionKHR,
miss_shader_binding_table: &StridedDeviceAddressRegionKHR,
hit_shader_binding_table: &StridedDeviceAddressRegionKHR,
callable_shader_binding_table: &StridedDeviceAddressRegionKHR,
indirect_device_address: DeviceAddress,
) {
let __result = (self.commands().cmd_trace_rays_indirect_khr)(
command_buffer,
raygen_shader_binding_table,
miss_shader_binding_table,
hit_shader_binding_table,
callable_shader_binding_table,
indirect_device_address,
);
}
#[inline]
unsafe fn cmd_trace_rays_khr(
&self,
command_buffer: CommandBuffer,
raygen_shader_binding_table: &StridedDeviceAddressRegionKHR,
miss_shader_binding_table: &StridedDeviceAddressRegionKHR,
hit_shader_binding_table: &StridedDeviceAddressRegionKHR,
callable_shader_binding_table: &StridedDeviceAddressRegionKHR,
width: u32,
height: u32,
depth: u32,
) {
let __result = (self.commands().cmd_trace_rays_khr)(
command_buffer,
raygen_shader_binding_table,
miss_shader_binding_table,
hit_shader_binding_table,
callable_shader_binding_table,
width,
height,
depth,
);
}
#[inline]
unsafe fn create_ray_tracing_pipelines_khr(
&self,
deferred_operation: DeferredOperationKHR,
pipeline_cache: PipelineCache,
create_infos: &[impl Cast<Target = RayTracingPipelineCreateInfoKHR>],
allocator: Option<&AllocationCallbacks>,
) -> crate::VkSuccessResult<Vec<Pipeline>> {
let mut pipelines = Vec::with_capacity(create_infos.len() as usize);
let __result = (self.commands().create_ray_tracing_pipelines_khr)(
self.handle(),
deferred_operation,
pipeline_cache,
create_infos.len() as u32,
create_infos.as_ptr().cast(),
allocator.map_or(ptr::null(), |v| v),
pipelines.as_mut_ptr(),
);
pipelines.set_len(create_infos.len() as usize);
if __result >= Result::SUCCESS {
Ok((pipelines, __result.into()))
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_ray_tracing_capture_replay_shader_group_handles_khr(
&self,
pipeline: Pipeline,
first_group: u32,
group_count: u32,
data: &mut [u8],
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_ray_tracing_capture_replay_shader_group_handles_khr)(
self.handle(),
pipeline,
first_group,
group_count,
data.len() as usize,
data.as_mut_ptr() as *mut c_void,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_ray_tracing_shader_group_handles_khr(
&self,
pipeline: Pipeline,
first_group: u32,
group_count: u32,
data: &mut [u8],
) -> crate::VkResult<()> {
let __result = (self.commands().get_ray_tracing_shader_group_handles_khr)(
self.handle(),
pipeline,
first_group,
group_count,
data.len() as usize,
data.as_mut_ptr() as *mut c_void,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_ray_tracing_shader_group_stack_size_khr(
&self,
pipeline: Pipeline,
group: u32,
group_shader: ShaderGroupShaderKHR,
) -> DeviceSize {
let __result = (self.commands().get_ray_tracing_shader_group_stack_size_khr)(
self.handle(),
pipeline,
group,
group_shader,
);
__result
}
}
impl<C: DeviceV1_0 + ?Sized> KhrRayTracingPipelineExtensionDeviceCommands for C {}
pub trait KhrSamplerYcbcrConversionExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn create_sampler_ycbcr_conversion_khr(
&self,
create_info: &SamplerYcbcrConversionCreateInfo,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SamplerYcbcrConversion> {
let mut ycbcr_conversion = MaybeUninit::<SamplerYcbcrConversion>::uninit();
let __result = (self.commands().create_sampler_ycbcr_conversion_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
ycbcr_conversion.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(ycbcr_conversion.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_sampler_ycbcr_conversion_khr(
&self,
ycbcr_conversion: SamplerYcbcrConversion,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_sampler_ycbcr_conversion_khr)(
self.handle(),
ycbcr_conversion,
allocator.map_or(ptr::null(), |v| v),
);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrSamplerYcbcrConversionExtensionDeviceCommands for C {}
pub trait KhrSharedPresentableImageExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_swapchain_status_khr(
&self,
swapchain: SwapchainKHR,
) -> crate::VkResult<SuccessCode> {
let __result = (self.commands().get_swapchain_status_khr)(self.handle(), swapchain);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrSharedPresentableImageExtensionDeviceCommands for C {}
pub trait KhrSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn destroy_surface_khr(
&self,
surface: SurfaceKHR,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_surface_khr)(
self.handle(),
surface,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_physical_device_surface_capabilities_khr(
&self,
physical_device: PhysicalDevice,
surface: SurfaceKHR,
) -> crate::VkResult<SurfaceCapabilitiesKHR> {
let mut surface_capabilities = MaybeUninit::<SurfaceCapabilitiesKHR>::uninit();
let __result = (self.commands().get_physical_device_surface_capabilities_khr)(
physical_device,
surface,
surface_capabilities.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface_capabilities.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_surface_formats_khr(
&self,
physical_device: PhysicalDevice,
surface: SurfaceKHR,
) -> crate::VkResult<Vec<SurfaceFormatKHR>> {
let mut surface_format_count = 0;
(self.commands().get_physical_device_surface_formats_khr)(
physical_device,
surface,
&mut surface_format_count,
ptr::null_mut(),
);
let mut surface_formats = Vec::with_capacity(surface_format_count as usize);
let __result = (self.commands().get_physical_device_surface_formats_khr)(
physical_device,
surface,
&mut surface_format_count,
surface_formats.as_mut_ptr(),
);
debug_assert!(surface_formats.capacity() >= surface_format_count as usize);
surface_formats.set_len(surface_format_count as usize);
if __result == Result::SUCCESS {
Ok(surface_formats)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_surface_present_modes_khr(
&self,
physical_device: PhysicalDevice,
surface: SurfaceKHR,
) -> crate::VkResult<Vec<PresentModeKHR>> {
let mut present_mode_count = 0;
(self
.commands()
.get_physical_device_surface_present_modes_khr)(
physical_device,
surface,
&mut present_mode_count,
ptr::null_mut(),
);
let mut present_modes = Vec::with_capacity(present_mode_count as usize);
let __result = (self
.commands()
.get_physical_device_surface_present_modes_khr)(
physical_device,
surface,
&mut present_mode_count,
present_modes.as_mut_ptr(),
);
debug_assert!(present_modes.capacity() >= present_mode_count as usize);
present_modes.set_len(present_mode_count as usize);
if __result == Result::SUCCESS {
Ok(present_modes)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_surface_support_khr(
&self,
physical_device: PhysicalDevice,
queue_family_index: u32,
surface: SurfaceKHR,
) -> crate::VkResult<bool> {
let mut supported = MaybeUninit::<Bool32>::uninit();
let __result = (self.commands().get_physical_device_surface_support_khr)(
physical_device,
queue_family_index,
surface,
supported.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(supported.assume_init() == TRUE)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrSurfaceExtensionInstanceCommands for C {}
pub trait KhrSwapchainExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn acquire_next_image2_khr(
&self,
acquire_info: &AcquireNextImageInfoKHR,
) -> crate::VkSuccessResult<u32> {
let mut image_index = MaybeUninit::<u32>::uninit();
let __result = (self.commands().acquire_next_image2_khr)(
self.handle(),
acquire_info,
image_index.as_mut_ptr(),
);
if __result >= Result::SUCCESS {
Ok((image_index.assume_init(), __result.into()))
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn acquire_next_image_khr(
&self,
swapchain: SwapchainKHR,
timeout: u64,
semaphore: Semaphore,
fence: Fence,
) -> crate::VkSuccessResult<u32> {
let mut image_index = MaybeUninit::<u32>::uninit();
let __result = (self.commands().acquire_next_image_khr)(
self.handle(),
swapchain,
timeout,
semaphore,
fence,
image_index.as_mut_ptr(),
);
if __result >= Result::SUCCESS {
Ok((image_index.assume_init(), __result.into()))
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn create_swapchain_khr(
&self,
create_info: &SwapchainCreateInfoKHR,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SwapchainKHR> {
let mut swapchain = MaybeUninit::<SwapchainKHR>::uninit();
let __result = (self.commands().create_swapchain_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
swapchain.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(swapchain.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_swapchain_khr(
&self,
swapchain: SwapchainKHR,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_swapchain_khr)(
self.handle(),
swapchain,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_device_group_present_capabilities_khr(
&self,
device_group_present_capabilities: &mut DeviceGroupPresentCapabilitiesKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().get_device_group_present_capabilities_khr)(
self.handle(),
device_group_present_capabilities,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_device_group_surface_present_modes_khr(
&self,
surface: SurfaceKHR,
) -> crate::VkResult<DeviceGroupPresentModeFlagsKHR> {
let mut modes = MaybeUninit::<DeviceGroupPresentModeFlagsKHR>::uninit();
let __result = (self.commands().get_device_group_surface_present_modes_khr)(
self.handle(),
surface,
modes.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(modes.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_swapchain_images_khr(
&self,
swapchain: SwapchainKHR,
) -> crate::VkResult<Vec<Image>> {
let mut swapchain_image_count = 0;
(self.commands().get_swapchain_images_khr)(
self.handle(),
swapchain,
&mut swapchain_image_count,
ptr::null_mut(),
);
let mut swapchain_images = Vec::with_capacity(swapchain_image_count as usize);
let __result = (self.commands().get_swapchain_images_khr)(
self.handle(),
swapchain,
&mut swapchain_image_count,
swapchain_images.as_mut_ptr(),
);
debug_assert!(swapchain_images.capacity() >= swapchain_image_count as usize);
swapchain_images.set_len(swapchain_image_count as usize);
if __result == Result::SUCCESS {
Ok(swapchain_images)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn queue_present_khr(
&self,
queue: Queue,
present_info: &PresentInfoKHR,
) -> crate::VkResult<SuccessCode> {
let __result = (self.commands().queue_present_khr)(queue, present_info);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrSwapchainExtensionDeviceCommands for C {}
pub trait KhrSwapchainExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_present_rectangles_khr(
&self,
physical_device: PhysicalDevice,
surface: SurfaceKHR,
) -> crate::VkResult<Vec<Rect2D>> {
let mut rect_count = 0;
(self.commands().get_physical_device_present_rectangles_khr)(
physical_device,
surface,
&mut rect_count,
ptr::null_mut(),
);
let mut rects = Vec::with_capacity(rect_count as usize);
let __result = (self.commands().get_physical_device_present_rectangles_khr)(
physical_device,
surface,
&mut rect_count,
rects.as_mut_ptr(),
);
debug_assert!(rects.capacity() >= rect_count as usize);
rects.set_len(rect_count as usize);
if __result == Result::SUCCESS {
Ok(rects)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrSwapchainExtensionInstanceCommands for C {}
pub trait KhrSwapchainMaintenance1ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn release_swapchain_images_khr(
&self,
release_info: &ReleaseSwapchainImagesInfoKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().release_swapchain_images_khr)(self.handle(), release_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrSwapchainMaintenance1ExtensionDeviceCommands for C {}
pub trait KhrSynchronization2ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_pipeline_barrier2_khr(
&self,
command_buffer: CommandBuffer,
dependency_info: &DependencyInfo,
) {
let __result = (self.commands().cmd_pipeline_barrier2_khr)(command_buffer, dependency_info);
}
#[inline]
unsafe fn cmd_reset_event2_khr(
&self,
command_buffer: CommandBuffer,
event: Event,
stage_mask: PipelineStageFlags2,
) {
let __result = (self.commands().cmd_reset_event2_khr)(command_buffer, event, stage_mask);
}
#[inline]
unsafe fn cmd_set_event2_khr(
&self,
command_buffer: CommandBuffer,
event: Event,
dependency_info: &DependencyInfo,
) {
let __result = (self.commands().cmd_set_event2_khr)(command_buffer, event, dependency_info);
}
#[inline]
unsafe fn cmd_wait_events2_khr(
&self,
command_buffer: CommandBuffer,
events: &[Event],
dependency_infos: &[impl Cast<Target = DependencyInfo>],
) {
let __result = (self.commands().cmd_wait_events2_khr)(
command_buffer,
events.len() as u32,
events.as_ptr(),
dependency_infos.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_write_timestamp2_khr(
&self,
command_buffer: CommandBuffer,
stage: PipelineStageFlags2,
query_pool: QueryPool,
query: u32,
) {
let __result =
(self.commands().cmd_write_timestamp2_khr)(command_buffer, stage, query_pool, query);
}
#[inline]
unsafe fn queue_submit2_khr(
&self,
queue: Queue,
submits: &[impl Cast<Target = SubmitInfo2>],
fence: Fence,
) -> crate::VkResult<()> {
let __result = (self.commands().queue_submit2_khr)(
queue,
submits.len() as u32,
submits.as_ptr().cast(),
fence,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrSynchronization2ExtensionDeviceCommands for C {}
pub trait KhrTimelineSemaphoreExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_semaphore_counter_value_khr(&self, semaphore: Semaphore) -> crate::VkResult<u64> {
let mut value = MaybeUninit::<u64>::uninit();
let __result = (self.commands().get_semaphore_counter_value_khr)(
self.handle(),
semaphore,
value.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(value.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn signal_semaphore_khr(
&self,
signal_info: &SemaphoreSignalInfo,
) -> crate::VkResult<()> {
let __result = (self.commands().signal_semaphore_khr)(self.handle(), signal_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn wait_semaphores_khr(
&self,
wait_info: &SemaphoreWaitInfo,
timeout: u64,
) -> crate::VkResult<SuccessCode> {
let __result = (self.commands().wait_semaphores_khr)(self.handle(), wait_info, timeout);
if __result >= Result::SUCCESS {
Ok(__result.into())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrTimelineSemaphoreExtensionDeviceCommands for C {}
pub trait KhrVideoDecodeQueueExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_decode_video_khr(
&self,
command_buffer: CommandBuffer,
decode_info: &VideoDecodeInfoKHR,
) {
let __result = (self.commands().cmd_decode_video_khr)(command_buffer, decode_info);
}
}
impl<C: DeviceV1_0 + ?Sized> KhrVideoDecodeQueueExtensionDeviceCommands for C {}
pub trait KhrVideoEncodeQueueExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_encode_video_khr(
&self,
command_buffer: CommandBuffer,
encode_info: &VideoEncodeInfoKHR,
) {
let __result = (self.commands().cmd_encode_video_khr)(command_buffer, encode_info);
}
#[inline]
unsafe fn get_encoded_video_session_parameters_khr(
&self,
video_session_parameters_info: &VideoEncodeSessionParametersGetInfoKHR,
feedback_info: Option<&mut VideoEncodeSessionParametersFeedbackInfoKHR>,
) -> crate::VkResult<Vec<u8>> {
let mut data_size = 0;
(self.commands().get_encoded_video_session_parameters_khr)(
self.handle(),
video_session_parameters_info,
ptr::null_mut(),
&mut data_size,
ptr::null_mut(),
);
let mut data = Vec::with_capacity(data_size as usize);
let __result = (self.commands().get_encoded_video_session_parameters_khr)(
self.handle(),
video_session_parameters_info,
feedback_info.map_or(ptr::null_mut(), |v| v),
&mut data_size,
data.as_mut_ptr() as *mut c_void,
);
debug_assert!(data.capacity() >= data_size as usize);
data.set_len(data_size as usize);
if __result == Result::SUCCESS {
Ok(data)
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrVideoEncodeQueueExtensionDeviceCommands for C {}
pub trait KhrVideoEncodeQueueExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_video_encode_quality_level_properties_khr(
&self,
physical_device: PhysicalDevice,
quality_level_info: &PhysicalDeviceVideoEncodeQualityLevelInfoKHR,
quality_level_properties: &mut VideoEncodeQualityLevelPropertiesKHR,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_physical_device_video_encode_quality_level_properties_khr)(
physical_device,
quality_level_info,
quality_level_properties,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrVideoEncodeQueueExtensionInstanceCommands for C {}
pub trait KhrVideoQueueExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn bind_video_session_memory_khr(
&self,
video_session: VideoSessionKHR,
bind_session_memory_infos: &[impl Cast<Target = BindVideoSessionMemoryInfoKHR>],
) -> crate::VkResult<()> {
let __result = (self.commands().bind_video_session_memory_khr)(
self.handle(),
video_session,
bind_session_memory_infos.len() as u32,
bind_session_memory_infos.as_ptr().cast(),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn cmd_begin_video_coding_khr(
&self,
command_buffer: CommandBuffer,
begin_info: &VideoBeginCodingInfoKHR,
) {
let __result = (self.commands().cmd_begin_video_coding_khr)(command_buffer, begin_info);
}
#[inline]
unsafe fn cmd_control_video_coding_khr(
&self,
command_buffer: CommandBuffer,
coding_control_info: &VideoCodingControlInfoKHR,
) {
let __result =
(self.commands().cmd_control_video_coding_khr)(command_buffer, coding_control_info);
}
#[inline]
unsafe fn cmd_end_video_coding_khr(
&self,
command_buffer: CommandBuffer,
end_coding_info: &VideoEndCodingInfoKHR,
) {
let __result = (self.commands().cmd_end_video_coding_khr)(command_buffer, end_coding_info);
}
#[inline]
unsafe fn create_video_session_khr(
&self,
create_info: &VideoSessionCreateInfoKHR,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<VideoSessionKHR> {
let mut video_session = MaybeUninit::<VideoSessionKHR>::uninit();
let __result = (self.commands().create_video_session_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
video_session.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(video_session.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn create_video_session_parameters_khr(
&self,
create_info: &VideoSessionParametersCreateInfoKHR,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<VideoSessionParametersKHR> {
let mut video_session_parameters = MaybeUninit::<VideoSessionParametersKHR>::uninit();
let __result = (self.commands().create_video_session_parameters_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
video_session_parameters.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(video_session_parameters.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_video_session_khr(
&self,
video_session: VideoSessionKHR,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_video_session_khr)(
self.handle(),
video_session,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn destroy_video_session_parameters_khr(
&self,
video_session_parameters: VideoSessionParametersKHR,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_video_session_parameters_khr)(
self.handle(),
video_session_parameters,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_video_session_memory_requirements_khr(
&self,
video_session: VideoSessionKHR,
) -> crate::VkResult<Vec<VideoSessionMemoryRequirementsKHR>> {
let mut memory_requirements_count = 0;
(self.commands().get_video_session_memory_requirements_khr)(
self.handle(),
video_session,
&mut memory_requirements_count,
ptr::null_mut(),
);
let mut memory_requirements = Vec::with_capacity(memory_requirements_count as usize);
let __result = (self.commands().get_video_session_memory_requirements_khr)(
self.handle(),
video_session,
&mut memory_requirements_count,
memory_requirements.as_mut_ptr(),
);
debug_assert!(memory_requirements.capacity() >= memory_requirements_count as usize);
memory_requirements.set_len(memory_requirements_count as usize);
if __result == Result::SUCCESS {
Ok(memory_requirements)
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn update_video_session_parameters_khr(
&self,
video_session_parameters: VideoSessionParametersKHR,
update_info: &VideoSessionParametersUpdateInfoKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().update_video_session_parameters_khr)(
self.handle(),
video_session_parameters,
update_info,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> KhrVideoQueueExtensionDeviceCommands for C {}
pub trait KhrVideoQueueExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_video_capabilities_khr(
&self,
physical_device: PhysicalDevice,
video_profile: &VideoProfileInfoKHR,
capabilities: &mut VideoCapabilitiesKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().get_physical_device_video_capabilities_khr)(
physical_device,
video_profile,
capabilities,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_video_format_properties_khr(
&self,
physical_device: PhysicalDevice,
video_format_info: &PhysicalDeviceVideoFormatInfoKHR,
) -> crate::VkResult<Vec<VideoFormatPropertiesKHR>> {
let mut video_format_property_count = 0;
(self
.commands()
.get_physical_device_video_format_properties_khr)(
physical_device,
video_format_info,
&mut video_format_property_count,
ptr::null_mut(),
);
let mut video_format_properties = Vec::with_capacity(video_format_property_count as usize);
let __result = (self
.commands()
.get_physical_device_video_format_properties_khr)(
physical_device,
video_format_info,
&mut video_format_property_count,
video_format_properties.as_mut_ptr(),
);
debug_assert!(video_format_properties.capacity() >= video_format_property_count as usize);
video_format_properties.set_len(video_format_property_count as usize);
if __result == Result::SUCCESS {
Ok(video_format_properties)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> KhrVideoQueueExtensionInstanceCommands for C {}
pub trait KhrWaylandSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_wayland_surface_khr(
&self,
create_info: &WaylandSurfaceCreateInfoKHR,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_wayland_surface_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_wayland_presentation_support_khr(
&self,
physical_device: PhysicalDevice,
queue_family_index: u32,
display: *mut wl_display,
) -> Bool32 {
let __result = (self
.commands()
.get_physical_device_wayland_presentation_support_khr)(
physical_device,
queue_family_index,
display,
);
__result
}
}
impl<C: InstanceV1_0 + ?Sized> KhrWaylandSurfaceExtensionInstanceCommands for C {}
pub trait KhrWin32SurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_win32_surface_khr(
&self,
create_info: &Win32SurfaceCreateInfoKHR,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_win32_surface_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_win32_presentation_support_khr(
&self,
physical_device: PhysicalDevice,
queue_family_index: u32,
) -> Bool32 {
let __result = (self
.commands()
.get_physical_device_win32_presentation_support_khr)(
physical_device,
queue_family_index,
);
__result
}
}
impl<C: InstanceV1_0 + ?Sized> KhrWin32SurfaceExtensionInstanceCommands for C {}
pub trait KhrXcbSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_xcb_surface_khr(
&self,
create_info: &XcbSurfaceCreateInfoKHR,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_xcb_surface_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_xcb_presentation_support_khr(
&self,
physical_device: PhysicalDevice,
queue_family_index: u32,
connection: *mut xcb_connection_t,
visual_id: xcb_visualid_t,
) -> Bool32 {
let __result = (self
.commands()
.get_physical_device_xcb_presentation_support_khr)(
physical_device,
queue_family_index,
connection,
visual_id,
);
__result
}
}
impl<C: InstanceV1_0 + ?Sized> KhrXcbSurfaceExtensionInstanceCommands for C {}
pub trait KhrXlibSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_xlib_surface_khr(
&self,
create_info: &XlibSurfaceCreateInfoKHR,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_xlib_surface_khr)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_xlib_presentation_support_khr(
&self,
physical_device: PhysicalDevice,
queue_family_index: u32,
visual_id: VisualID,
) -> (Bool32, Display) {
let mut dpy = MaybeUninit::<Display>::uninit();
let __result = (self
.commands()
.get_physical_device_xlib_presentation_support_khr)(
physical_device,
queue_family_index,
dpy.as_mut_ptr(),
visual_id,
);
(__result, dpy.assume_init())
}
}
impl<C: InstanceV1_0 + ?Sized> KhrXlibSurfaceExtensionInstanceCommands for C {}
pub trait MvkIosSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_ios_surface_mvk(
&self,
create_info: &IOSSurfaceCreateInfoMVK,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_ios_surface_mvk)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
}
#[allow(deprecated)]
impl<C: InstanceV1_0 + ?Sized> MvkIosSurfaceExtensionInstanceCommands for C {}
pub trait MvkMacosSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_mac_os_surface_mvk(
&self,
create_info: &MacOSSurfaceCreateInfoMVK,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_mac_os_surface_mvk)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
}
#[allow(deprecated)]
impl<C: InstanceV1_0 + ?Sized> MvkMacosSurfaceExtensionInstanceCommands for C {}
pub trait NnViSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_vi_surface_nn(
&self,
create_info: &ViSurfaceCreateInfoNN,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_vi_surface_nn)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> NnViSurfaceExtensionInstanceCommands for C {}
pub trait NvxBinaryImportExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_cu_launch_kernel_nvx(
&self,
command_buffer: CommandBuffer,
launch_info: &CuLaunchInfoNVX,
) {
let __result = (self.commands().cmd_cu_launch_kernel_nvx)(command_buffer, launch_info);
}
#[inline]
unsafe fn create_cu_function_nvx(
&self,
create_info: &CuFunctionCreateInfoNVX,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<CuFunctionNVX> {
let mut function = MaybeUninit::<CuFunctionNVX>::uninit();
let __result = (self.commands().create_cu_function_nvx)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
function.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(function.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn create_cu_module_nvx(
&self,
create_info: &CuModuleCreateInfoNVX,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<CuModuleNVX> {
let mut module = MaybeUninit::<CuModuleNVX>::uninit();
let __result = (self.commands().create_cu_module_nvx)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
module.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(module.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_cu_function_nvx(
&self,
function: CuFunctionNVX,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_cu_function_nvx)(
self.handle(),
function,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn destroy_cu_module_nvx(
&self,
module: CuModuleNVX,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_cu_module_nvx)(
self.handle(),
module,
allocator.map_or(ptr::null(), |v| v),
);
}
}
impl<C: DeviceV1_0 + ?Sized> NvxBinaryImportExtensionDeviceCommands for C {}
pub trait NvxImageViewHandleExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_device_combined_image_sampler_index_nvx(
&self,
image_view_index: u64,
sampler_index: u64,
) -> u64 {
let __result = (self.commands().get_device_combined_image_sampler_index_nvx)(
self.handle(),
image_view_index,
sampler_index,
);
__result
}
#[inline]
unsafe fn get_image_view_address_nvx(
&self,
image_view: ImageView,
properties: &mut ImageViewAddressPropertiesNVX,
) -> crate::VkResult<()> {
let __result =
(self.commands().get_image_view_address_nvx)(self.handle(), image_view, properties);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_image_view_handle64_nvx(&self, info: &ImageViewHandleInfoNVX) -> u64 {
let __result = (self.commands().get_image_view_handle64_nvx)(self.handle(), info);
__result
}
#[inline]
unsafe fn get_image_view_handle_nvx(&self, info: &ImageViewHandleInfoNVX) -> u32 {
let __result = (self.commands().get_image_view_handle_nvx)(self.handle(), info);
__result
}
}
impl<C: DeviceV1_0 + ?Sized> NvxImageViewHandleExtensionDeviceCommands for C {}
pub trait NvAcquireWinrtDisplayExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn acquire_winrt_display_nv(
&self,
physical_device: PhysicalDevice,
display: DisplayKHR,
) -> crate::VkResult<()> {
let __result = (self.commands().acquire_winrt_display_nv)(physical_device, display);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_winrt_display_nv(
&self,
physical_device: PhysicalDevice,
device_relative_id: u32,
) -> crate::VkResult<DisplayKHR> {
let mut display = MaybeUninit::<DisplayKHR>::uninit();
let __result = (self.commands().get_winrt_display_nv)(
physical_device,
device_relative_id,
display.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(display.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> NvAcquireWinrtDisplayExtensionInstanceCommands for C {}
pub trait NvClipSpaceWScalingExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_viewport_w_scaling_nv(
&self,
command_buffer: CommandBuffer,
first_viewport: u32,
viewport_w_scalings: &[impl Cast<Target = ViewportWScalingNV>],
) {
let __result = (self.commands().cmd_set_viewport_w_scaling_nv)(
command_buffer,
first_viewport,
viewport_w_scalings.len() as u32,
viewport_w_scalings.as_ptr().cast(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> NvClipSpaceWScalingExtensionDeviceCommands for C {}
pub trait NvClusterAccelerationStructureExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_build_cluster_acceleration_structure_indirect_nv(
&self,
command_buffer: CommandBuffer,
command_infos: &ClusterAccelerationStructureCommandsInfoNV,
) {
let __result = (self
.commands()
.cmd_build_cluster_acceleration_structure_indirect_nv)(
command_buffer, command_infos
);
}
#[inline]
unsafe fn get_cluster_acceleration_structure_build_sizes_nv(
&self,
info: &ClusterAccelerationStructureInputInfoNV,
size_info: &mut AccelerationStructureBuildSizesInfoKHR,
) {
let __result = (self
.commands()
.get_cluster_acceleration_structure_build_sizes_nv)(
self.handle(), info, size_info
);
}
}
impl<C: DeviceV1_0 + ?Sized> NvClusterAccelerationStructureExtensionDeviceCommands for C {}
pub trait NvComputeOccupancyPriorityExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_compute_occupancy_priority_nv(
&self,
command_buffer: CommandBuffer,
parameters: &ComputeOccupancyPriorityParametersNV,
) {
let __result =
(self.commands().cmd_set_compute_occupancy_priority_nv)(command_buffer, parameters);
}
}
impl<C: DeviceV1_0 + ?Sized> NvComputeOccupancyPriorityExtensionDeviceCommands for C {}
pub trait NvCooperativeMatrixExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_cooperative_matrix_properties_nv(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<CooperativeMatrixPropertiesNV>> {
let mut property_count = 0;
(self
.commands()
.get_physical_device_cooperative_matrix_properties_nv)(
physical_device,
&mut property_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(property_count as usize);
let __result = (self
.commands()
.get_physical_device_cooperative_matrix_properties_nv)(
physical_device,
&mut property_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= property_count as usize);
properties.set_len(property_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> NvCooperativeMatrixExtensionInstanceCommands for C {}
pub trait NvCooperativeMatrix2ExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_cooperative_matrix_flexible_dimensions_properties_nv(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<CooperativeMatrixFlexibleDimensionsPropertiesNV>> {
let mut property_count = 0;
(self
.commands()
.get_physical_device_cooperative_matrix_flexible_dimensions_properties_nv)(
physical_device,
&mut property_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(property_count as usize);
let __result = (self
.commands()
.get_physical_device_cooperative_matrix_flexible_dimensions_properties_nv)(
physical_device,
&mut property_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= property_count as usize);
properties.set_len(property_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> NvCooperativeMatrix2ExtensionInstanceCommands for C {}
pub trait NvCooperativeVectorExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_convert_cooperative_vector_matrix_nv(
&self,
command_buffer: CommandBuffer,
infos: &[impl Cast<Target = ConvertCooperativeVectorMatrixInfoNV>],
) {
let __result = (self.commands().cmd_convert_cooperative_vector_matrix_nv)(
command_buffer,
infos.len() as u32,
infos.as_ptr().cast(),
);
}
#[inline]
unsafe fn convert_cooperative_vector_matrix_nv(
&self,
info: &ConvertCooperativeVectorMatrixInfoNV,
) -> crate::VkResult<()> {
let __result = (self.commands().convert_cooperative_vector_matrix_nv)(self.handle(), info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> NvCooperativeVectorExtensionDeviceCommands for C {}
pub trait NvCooperativeVectorExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_cooperative_vector_properties_nv(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<CooperativeVectorPropertiesNV>> {
let mut property_count = 0;
(self
.commands()
.get_physical_device_cooperative_vector_properties_nv)(
physical_device,
&mut property_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(property_count as usize);
let __result = (self
.commands()
.get_physical_device_cooperative_vector_properties_nv)(
physical_device,
&mut property_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= property_count as usize);
properties.set_len(property_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> NvCooperativeVectorExtensionInstanceCommands for C {}
pub trait NvCopyMemoryIndirectExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_copy_memory_indirect_nv(
&self,
command_buffer: CommandBuffer,
copy_buffer_address: DeviceAddress,
copy_count: u32,
stride: u32,
) {
let __result = (self.commands().cmd_copy_memory_indirect_nv)(
command_buffer,
copy_buffer_address,
copy_count,
stride,
);
}
#[inline]
unsafe fn cmd_copy_memory_to_image_indirect_nv(
&self,
command_buffer: CommandBuffer,
copy_buffer_address: DeviceAddress,
copy_count: u32,
stride: u32,
dst_image: Image,
dst_image_layout: ImageLayout,
image_subresources: &[impl Cast<Target = ImageSubresourceLayers>],
) {
let __result = (self.commands().cmd_copy_memory_to_image_indirect_nv)(
command_buffer,
copy_buffer_address,
copy_count,
stride,
dst_image,
dst_image_layout,
image_subresources.as_ptr().cast(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> NvCopyMemoryIndirectExtensionDeviceCommands for C {}
pub trait NvCoverageReductionModeExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_supported_framebuffer_mixed_samples_combinations_nv(
&self,
physical_device: PhysicalDevice,
) -> crate::VkResult<Vec<FramebufferMixedSamplesCombinationNV>> {
let mut combination_count = 0;
(self
.commands()
.get_physical_device_supported_framebuffer_mixed_samples_combinations_nv)(
physical_device,
&mut combination_count,
ptr::null_mut(),
);
let mut combinations = Vec::with_capacity(combination_count as usize);
let __result = (self
.commands()
.get_physical_device_supported_framebuffer_mixed_samples_combinations_nv)(
physical_device,
&mut combination_count,
combinations.as_mut_ptr(),
);
debug_assert!(combinations.capacity() >= combination_count as usize);
combinations.set_len(combination_count as usize);
if __result == Result::SUCCESS {
Ok(combinations)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> NvCoverageReductionModeExtensionInstanceCommands for C {}
pub trait NvCudaKernelLaunchExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_cuda_launch_kernel_nv(
&self,
command_buffer: CommandBuffer,
launch_info: &CudaLaunchInfoNV,
) {
let __result = (self.commands().cmd_cuda_launch_kernel_nv)(command_buffer, launch_info);
}
#[inline]
unsafe fn create_cuda_function_nv(
&self,
create_info: &CudaFunctionCreateInfoNV,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<CudaFunctionNV> {
let mut function = MaybeUninit::<CudaFunctionNV>::uninit();
let __result = (self.commands().create_cuda_function_nv)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
function.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(function.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn create_cuda_module_nv(
&self,
create_info: &CudaModuleCreateInfoNV,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<CudaModuleNV> {
let mut module = MaybeUninit::<CudaModuleNV>::uninit();
let __result = (self.commands().create_cuda_module_nv)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
module.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(module.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_cuda_function_nv(
&self,
function: CudaFunctionNV,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_cuda_function_nv)(
self.handle(),
function,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn destroy_cuda_module_nv(
&self,
module: CudaModuleNV,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_cuda_module_nv)(
self.handle(),
module,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_cuda_module_cache_nv(&self, module: CudaModuleNV) -> crate::VkResult<Vec<u8>> {
let mut cache_size = 0;
(self.commands().get_cuda_module_cache_nv)(
self.handle(),
module,
&mut cache_size,
ptr::null_mut(),
);
let mut cache_data = Vec::with_capacity(cache_size as usize);
let __result = (self.commands().get_cuda_module_cache_nv)(
self.handle(),
module,
&mut cache_size,
cache_data.as_mut_ptr() as *mut c_void,
);
debug_assert!(cache_data.capacity() >= cache_size as usize);
cache_data.set_len(cache_size as usize);
if __result == Result::SUCCESS {
Ok(cache_data)
} else {
Err(__result.into())
}
}
}
#[cfg(feature = "provisional")]
impl<C: DeviceV1_0 + ?Sized> NvCudaKernelLaunchExtensionDeviceCommands for C {}
pub trait NvDeviceDiagnosticCheckpointsExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_checkpoint_nv(
&self,
command_buffer: CommandBuffer,
checkpoint_marker: *const c_void,
) {
let __result = (self.commands().cmd_set_checkpoint_nv)(command_buffer, checkpoint_marker);
}
#[inline]
unsafe fn get_queue_checkpoint_data2_nv(&self, queue: Queue) -> Vec<CheckpointData2NV> {
let mut checkpoint_data_count = 0;
(self.commands().get_queue_checkpoint_data2_nv)(
queue,
&mut checkpoint_data_count,
ptr::null_mut(),
);
let mut checkpoint_data = Vec::with_capacity(checkpoint_data_count as usize);
let __result = (self.commands().get_queue_checkpoint_data2_nv)(
queue,
&mut checkpoint_data_count,
checkpoint_data.as_mut_ptr(),
);
debug_assert!(checkpoint_data.capacity() >= checkpoint_data_count as usize);
checkpoint_data.set_len(checkpoint_data_count as usize);
checkpoint_data
}
#[inline]
unsafe fn get_queue_checkpoint_data_nv(&self, queue: Queue) -> Vec<CheckpointDataNV> {
let mut checkpoint_data_count = 0;
(self.commands().get_queue_checkpoint_data_nv)(
queue,
&mut checkpoint_data_count,
ptr::null_mut(),
);
let mut checkpoint_data = Vec::with_capacity(checkpoint_data_count as usize);
let __result = (self.commands().get_queue_checkpoint_data_nv)(
queue,
&mut checkpoint_data_count,
checkpoint_data.as_mut_ptr(),
);
debug_assert!(checkpoint_data.capacity() >= checkpoint_data_count as usize);
checkpoint_data.set_len(checkpoint_data_count as usize);
checkpoint_data
}
}
impl<C: DeviceV1_0 + ?Sized> NvDeviceDiagnosticCheckpointsExtensionDeviceCommands for C {}
pub trait NvDeviceGeneratedCommandsExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_bind_pipeline_shader_group_nv(
&self,
command_buffer: CommandBuffer,
pipeline_bind_point: PipelineBindPoint,
pipeline: Pipeline,
group_index: u32,
) {
let __result = (self.commands().cmd_bind_pipeline_shader_group_nv)(
command_buffer,
pipeline_bind_point,
pipeline,
group_index,
);
}
#[inline]
unsafe fn cmd_execute_generated_commands_nv(
&self,
command_buffer: CommandBuffer,
is_preprocessed: bool,
generated_commands_info: &GeneratedCommandsInfoNV,
) {
let __result = (self.commands().cmd_execute_generated_commands_nv)(
command_buffer,
is_preprocessed as Bool32,
generated_commands_info,
);
}
#[inline]
unsafe fn cmd_preprocess_generated_commands_nv(
&self,
command_buffer: CommandBuffer,
generated_commands_info: &GeneratedCommandsInfoNV,
) {
let __result = (self.commands().cmd_preprocess_generated_commands_nv)(
command_buffer,
generated_commands_info,
);
}
#[inline]
unsafe fn create_indirect_commands_layout_nv(
&self,
create_info: &IndirectCommandsLayoutCreateInfoNV,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<IndirectCommandsLayoutNV> {
let mut indirect_commands_layout = MaybeUninit::<IndirectCommandsLayoutNV>::uninit();
let __result = (self.commands().create_indirect_commands_layout_nv)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
indirect_commands_layout.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(indirect_commands_layout.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_indirect_commands_layout_nv(
&self,
indirect_commands_layout: IndirectCommandsLayoutNV,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_indirect_commands_layout_nv)(
self.handle(),
indirect_commands_layout,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_generated_commands_memory_requirements_nv(
&self,
info: &GeneratedCommandsMemoryRequirementsInfoNV,
memory_requirements: &mut MemoryRequirements2,
) {
let __result = (self
.commands()
.get_generated_commands_memory_requirements_nv)(
self.handle(),
info,
memory_requirements,
);
}
}
impl<C: DeviceV1_0 + ?Sized> NvDeviceGeneratedCommandsExtensionDeviceCommands for C {}
pub trait NvDeviceGeneratedCommandsComputeExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_update_pipeline_indirect_buffer_nv(
&self,
command_buffer: CommandBuffer,
pipeline_bind_point: PipelineBindPoint,
pipeline: Pipeline,
) {
let __result = (self.commands().cmd_update_pipeline_indirect_buffer_nv)(
command_buffer,
pipeline_bind_point,
pipeline,
);
}
#[inline]
unsafe fn get_pipeline_indirect_device_address_nv(
&self,
info: &PipelineIndirectDeviceAddressInfoNV,
) -> DeviceAddress {
let __result =
(self.commands().get_pipeline_indirect_device_address_nv)(self.handle(), info);
__result
}
#[inline]
unsafe fn get_pipeline_indirect_memory_requirements_nv(
&self,
create_info: &ComputePipelineCreateInfo,
memory_requirements: &mut MemoryRequirements2,
) {
let __result = (self.commands().get_pipeline_indirect_memory_requirements_nv)(
self.handle(),
create_info,
memory_requirements,
);
}
}
impl<C: DeviceV1_0 + ?Sized> NvDeviceGeneratedCommandsComputeExtensionDeviceCommands for C {}
pub trait NvExternalComputeQueueExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn create_external_compute_queue_nv(
&self,
create_info: &ExternalComputeQueueCreateInfoNV,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<ExternalComputeQueueNV> {
let mut external_queue = MaybeUninit::<ExternalComputeQueueNV>::uninit();
let __result = (self.commands().create_external_compute_queue_nv)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
external_queue.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(external_queue.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_external_compute_queue_nv(
&self,
external_queue: ExternalComputeQueueNV,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_external_compute_queue_nv)(
self.handle(),
external_queue,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_external_compute_queue_data_nv(
&self,
external_queue: ExternalComputeQueueNV,
params: &mut ExternalComputeQueueDataParamsNV,
data: *mut c_void,
) {
let __result =
(self.commands().get_external_compute_queue_data_nv)(external_queue, params, data);
}
}
impl<C: DeviceV1_0 + ?Sized> NvExternalComputeQueueExtensionDeviceCommands for C {}
pub trait NvExternalMemoryCapabilitiesExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_external_image_format_properties_nv(
&self,
physical_device: PhysicalDevice,
format: Format,
type_: ImageType,
tiling: ImageTiling,
usage: ImageUsageFlags,
flags: ImageCreateFlags,
external_handle_type: ExternalMemoryHandleTypeFlagsNV,
) -> crate::VkResult<ExternalImageFormatPropertiesNV> {
let mut external_image_format_properties =
MaybeUninit::<ExternalImageFormatPropertiesNV>::uninit();
let __result = (self
.commands()
.get_physical_device_external_image_format_properties_nv)(
physical_device,
format,
type_,
tiling,
usage,
flags,
external_handle_type,
external_image_format_properties.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(external_image_format_properties.assume_init())
} else {
Err(__result.into())
}
}
}
#[allow(deprecated)]
impl<C: InstanceV1_0 + ?Sized> NvExternalMemoryCapabilitiesExtensionInstanceCommands for C {}
pub trait NvExternalMemoryRdmaExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_memory_remote_address_nv(
&self,
memory_get_remote_address_info: &MemoryGetRemoteAddressInfoNV,
) -> crate::VkResult<RemoteAddressNV> {
let mut address = MaybeUninit::<RemoteAddressNV>::uninit();
let __result = (self.commands().get_memory_remote_address_nv)(
self.handle(),
memory_get_remote_address_info,
address.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(address.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> NvExternalMemoryRdmaExtensionDeviceCommands for C {}
pub trait NvExternalMemorySciBufExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_memory_sci_buf_nv(
&self,
get_sci_buf_info: &MemoryGetSciBufInfoNV,
) -> crate::VkResult<NvSciBufObj> {
let mut handle = MaybeUninit::<NvSciBufObj>::uninit();
let __result = (self.commands().get_memory_sci_buf_nv)(
self.handle(),
get_sci_buf_info,
handle.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(handle.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> NvExternalMemorySciBufExtensionDeviceCommands for C {}
pub trait NvExternalMemorySciBufExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_external_memory_sci_buf_properties_nv(
&self,
physical_device: PhysicalDevice,
handle_type: ExternalMemoryHandleTypeFlags,
handle: NvSciBufObj,
memory_sci_buf_properties: &mut MemorySciBufPropertiesNV,
) -> crate::VkResult<()> {
let __result = (self
.commands()
.get_physical_device_external_memory_sci_buf_properties_nv)(
physical_device,
handle_type,
handle,
memory_sci_buf_properties,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_sci_buf_attributes_nv(
&self,
physical_device: PhysicalDevice,
attributes: NvSciBufAttrList,
) -> crate::VkResult<()> {
let __result = (self.commands().get_physical_device_sci_buf_attributes_nv)(
physical_device,
attributes,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> NvExternalMemorySciBufExtensionInstanceCommands for C {}
pub trait NvExternalMemoryWin32ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_memory_win32_handle_nv(
&self,
memory: DeviceMemory,
handle_type: ExternalMemoryHandleTypeFlagsNV,
) -> crate::VkResult<HANDLE> {
let mut handle = MaybeUninit::<HANDLE>::uninit();
let __result = (self.commands().get_memory_win32_handle_nv)(
self.handle(),
memory,
handle_type,
handle.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(handle.assume_init())
} else {
Err(__result.into())
}
}
}
#[allow(deprecated)]
impl<C: DeviceV1_0 + ?Sized> NvExternalMemoryWin32ExtensionDeviceCommands for C {}
pub trait NvExternalSciSyncExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_fence_sci_sync_fence_nv(
&self,
get_sci_sync_handle_info: &FenceGetSciSyncInfoNV,
handle: *mut c_void,
) -> crate::VkResult<()> {
let __result = (self.commands().get_fence_sci_sync_fence_nv)(
self.handle(),
get_sci_sync_handle_info,
handle,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_fence_sci_sync_obj_nv(
&self,
get_sci_sync_handle_info: &FenceGetSciSyncInfoNV,
handle: *mut c_void,
) -> crate::VkResult<()> {
let __result = (self.commands().get_fence_sci_sync_obj_nv)(
self.handle(),
get_sci_sync_handle_info,
handle,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_semaphore_sci_sync_obj_nv(
&self,
get_sci_sync_info: &SemaphoreGetSciSyncInfoNV,
handle: *mut c_void,
) -> crate::VkResult<()> {
let __result = (self.commands().get_semaphore_sci_sync_obj_nv)(
self.handle(),
get_sci_sync_info,
handle,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn import_fence_sci_sync_fence_nv(
&self,
import_fence_sci_sync_info: &ImportFenceSciSyncInfoNV,
) -> crate::VkResult<()> {
let __result = (self.commands().import_fence_sci_sync_fence_nv)(
self.handle(),
import_fence_sci_sync_info,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn import_fence_sci_sync_obj_nv(
&self,
import_fence_sci_sync_info: &ImportFenceSciSyncInfoNV,
) -> crate::VkResult<()> {
let __result = (self.commands().import_fence_sci_sync_obj_nv)(
self.handle(),
import_fence_sci_sync_info,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn import_semaphore_sci_sync_obj_nv(
&self,
import_semaphore_sci_sync_info: &ImportSemaphoreSciSyncInfoNV,
) -> crate::VkResult<()> {
let __result = (self.commands().import_semaphore_sci_sync_obj_nv)(
self.handle(),
import_semaphore_sci_sync_info,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
#[allow(deprecated)]
impl<C: DeviceV1_0 + ?Sized> NvExternalSciSyncExtensionDeviceCommands for C {}
pub trait NvExternalSciSyncExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_sci_sync_attributes_nv(
&self,
physical_device: PhysicalDevice,
sci_sync_attributes_info: &SciSyncAttributesInfoNV,
attributes: NvSciSyncAttrList,
) -> crate::VkResult<()> {
let __result = (self.commands().get_physical_device_sci_sync_attributes_nv)(
physical_device,
sci_sync_attributes_info,
attributes,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
#[allow(deprecated)]
impl<C: InstanceV1_0 + ?Sized> NvExternalSciSyncExtensionInstanceCommands for C {}
pub trait NvExternalSciSync2ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn create_semaphore_sci_sync_pool_nv(
&self,
create_info: &SemaphoreSciSyncPoolCreateInfoNV,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SemaphoreSciSyncPoolNV> {
let mut semaphore_pool = MaybeUninit::<SemaphoreSciSyncPoolNV>::uninit();
let __result = (self.commands().create_semaphore_sci_sync_pool_nv)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
semaphore_pool.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(semaphore_pool.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_semaphore_sci_sync_pool_nv(
&self,
semaphore_pool: SemaphoreSciSyncPoolNV,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_semaphore_sci_sync_pool_nv)(
self.handle(),
semaphore_pool,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_fence_sci_sync_fence_nv(
&self,
get_sci_sync_handle_info: &FenceGetSciSyncInfoNV,
handle: *mut c_void,
) -> crate::VkResult<()> {
let __result = (self.commands().get_fence_sci_sync_fence_nv)(
self.handle(),
get_sci_sync_handle_info,
handle,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_fence_sci_sync_obj_nv(
&self,
get_sci_sync_handle_info: &FenceGetSciSyncInfoNV,
handle: *mut c_void,
) -> crate::VkResult<()> {
let __result = (self.commands().get_fence_sci_sync_obj_nv)(
self.handle(),
get_sci_sync_handle_info,
handle,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn import_fence_sci_sync_fence_nv(
&self,
import_fence_sci_sync_info: &ImportFenceSciSyncInfoNV,
) -> crate::VkResult<()> {
let __result = (self.commands().import_fence_sci_sync_fence_nv)(
self.handle(),
import_fence_sci_sync_info,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn import_fence_sci_sync_obj_nv(
&self,
import_fence_sci_sync_info: &ImportFenceSciSyncInfoNV,
) -> crate::VkResult<()> {
let __result = (self.commands().import_fence_sci_sync_obj_nv)(
self.handle(),
import_fence_sci_sync_info,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> NvExternalSciSync2ExtensionDeviceCommands for C {}
pub trait NvExternalSciSync2ExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_sci_sync_attributes_nv(
&self,
physical_device: PhysicalDevice,
sci_sync_attributes_info: &SciSyncAttributesInfoNV,
attributes: NvSciSyncAttrList,
) -> crate::VkResult<()> {
let __result = (self.commands().get_physical_device_sci_sync_attributes_nv)(
physical_device,
sci_sync_attributes_info,
attributes,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> NvExternalSciSync2ExtensionInstanceCommands for C {}
pub trait NvFragmentShadingRateEnumsExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_fragment_shading_rate_enum_nv(
&self,
command_buffer: CommandBuffer,
shading_rate: FragmentShadingRateNV,
combiner_ops: [FragmentShadingRateCombinerOpKHR; 2],
) {
let __result = (self.commands().cmd_set_fragment_shading_rate_enum_nv)(
command_buffer,
shading_rate,
combiner_ops.as_ptr(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> NvFragmentShadingRateEnumsExtensionDeviceCommands for C {}
pub trait NvLowLatency2ExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_latency_timings_nv(
&self,
swapchain: SwapchainKHR,
latency_marker_info: &mut GetLatencyMarkerInfoNV,
) {
let __result =
(self.commands().get_latency_timings_nv)(self.handle(), swapchain, latency_marker_info);
}
#[inline]
unsafe fn latency_sleep_nv(
&self,
swapchain: SwapchainKHR,
sleep_info: &LatencySleepInfoNV,
) -> crate::VkResult<()> {
let __result = (self.commands().latency_sleep_nv)(self.handle(), swapchain, sleep_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn queue_notify_out_of_band_nv(
&self,
queue: Queue,
queue_type_info: &OutOfBandQueueTypeInfoNV,
) {
let __result = (self.commands().queue_notify_out_of_band_nv)(queue, queue_type_info);
}
#[inline]
unsafe fn set_latency_marker_nv(
&self,
swapchain: SwapchainKHR,
latency_marker_info: &SetLatencyMarkerInfoNV,
) {
let __result =
(self.commands().set_latency_marker_nv)(self.handle(), swapchain, latency_marker_info);
}
#[inline]
unsafe fn set_latency_sleep_mode_nv(
&self,
swapchain: SwapchainKHR,
sleep_mode_info: &LatencySleepModeInfoNV,
) -> crate::VkResult<()> {
let __result =
(self.commands().set_latency_sleep_mode_nv)(self.handle(), swapchain, sleep_mode_info);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> NvLowLatency2ExtensionDeviceCommands for C {}
pub trait NvMemoryDecompressionExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_decompress_memory_indirect_count_nv(
&self,
command_buffer: CommandBuffer,
indirect_commands_address: DeviceAddress,
indirect_commands_count_address: DeviceAddress,
stride: u32,
) {
let __result = (self.commands().cmd_decompress_memory_indirect_count_nv)(
command_buffer,
indirect_commands_address,
indirect_commands_count_address,
stride,
);
}
#[inline]
unsafe fn cmd_decompress_memory_nv(
&self,
command_buffer: CommandBuffer,
decompress_memory_regions: &[impl Cast<Target = DecompressMemoryRegionNV>],
) {
let __result = (self.commands().cmd_decompress_memory_nv)(
command_buffer,
decompress_memory_regions.len() as u32,
decompress_memory_regions.as_ptr().cast(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> NvMemoryDecompressionExtensionDeviceCommands for C {}
pub trait NvMeshShaderExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_draw_mesh_tasks_indirect_count_nv(
&self,
command_buffer: CommandBuffer,
buffer: Buffer,
offset: DeviceSize,
count_buffer: Buffer,
count_buffer_offset: DeviceSize,
max_draw_count: u32,
stride: u32,
) {
let __result = (self.commands().cmd_draw_mesh_tasks_indirect_count_nv)(
command_buffer,
buffer,
offset,
count_buffer,
count_buffer_offset,
max_draw_count,
stride,
);
}
#[inline]
unsafe fn cmd_draw_mesh_tasks_indirect_nv(
&self,
command_buffer: CommandBuffer,
buffer: Buffer,
offset: DeviceSize,
draw_count: u32,
stride: u32,
) {
let __result = (self.commands().cmd_draw_mesh_tasks_indirect_nv)(
command_buffer,
buffer,
offset,
draw_count,
stride,
);
}
#[inline]
unsafe fn cmd_draw_mesh_tasks_nv(
&self,
command_buffer: CommandBuffer,
task_count: u32,
first_task: u32,
) {
let __result =
(self.commands().cmd_draw_mesh_tasks_nv)(command_buffer, task_count, first_task);
}
}
impl<C: DeviceV1_0 + ?Sized> NvMeshShaderExtensionDeviceCommands for C {}
pub trait NvOpticalFlowExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn bind_optical_flow_session_image_nv(
&self,
session: OpticalFlowSessionNV,
binding_point: OpticalFlowSessionBindingPointNV,
view: ImageView,
layout: ImageLayout,
) -> crate::VkResult<()> {
let __result = (self.commands().bind_optical_flow_session_image_nv)(
self.handle(),
session,
binding_point,
view,
layout,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn cmd_optical_flow_execute_nv(
&self,
command_buffer: CommandBuffer,
session: OpticalFlowSessionNV,
execute_info: &OpticalFlowExecuteInfoNV,
) {
let __result =
(self.commands().cmd_optical_flow_execute_nv)(command_buffer, session, execute_info);
}
#[inline]
unsafe fn create_optical_flow_session_nv(
&self,
create_info: &OpticalFlowSessionCreateInfoNV,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<OpticalFlowSessionNV> {
let mut session = MaybeUninit::<OpticalFlowSessionNV>::uninit();
let __result = (self.commands().create_optical_flow_session_nv)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
session.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(session.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_optical_flow_session_nv(
&self,
session: OpticalFlowSessionNV,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_optical_flow_session_nv)(
self.handle(),
session,
allocator.map_or(ptr::null(), |v| v),
);
}
}
impl<C: DeviceV1_0 + ?Sized> NvOpticalFlowExtensionDeviceCommands for C {}
pub trait NvOpticalFlowExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn get_physical_device_optical_flow_image_formats_nv(
&self,
physical_device: PhysicalDevice,
optical_flow_image_format_info: &OpticalFlowImageFormatInfoNV,
) -> crate::VkResult<Vec<OpticalFlowImageFormatPropertiesNV>> {
let mut format_count = 0;
(self
.commands()
.get_physical_device_optical_flow_image_formats_nv)(
physical_device,
optical_flow_image_format_info,
&mut format_count,
ptr::null_mut(),
);
let mut image_format_properties = Vec::with_capacity(format_count as usize);
let __result = (self
.commands()
.get_physical_device_optical_flow_image_formats_nv)(
physical_device,
optical_flow_image_format_info,
&mut format_count,
image_format_properties.as_mut_ptr(),
);
debug_assert!(image_format_properties.capacity() >= format_count as usize);
image_format_properties.set_len(format_count as usize);
if __result == Result::SUCCESS {
Ok(image_format_properties)
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> NvOpticalFlowExtensionInstanceCommands for C {}
pub trait NvPartitionedAccelerationStructureExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_build_partitioned_acceleration_structures_nv(
&self,
command_buffer: CommandBuffer,
build_info: &BuildPartitionedAccelerationStructureInfoNV,
) {
let __result = (self
.commands()
.cmd_build_partitioned_acceleration_structures_nv)(
command_buffer, build_info
);
}
#[inline]
unsafe fn get_partitioned_acceleration_structures_build_sizes_nv(
&self,
info: &PartitionedAccelerationStructureInstancesInputNV,
size_info: &mut AccelerationStructureBuildSizesInfoKHR,
) {
let __result = (self
.commands()
.get_partitioned_acceleration_structures_build_sizes_nv)(
self.handle(),
info,
size_info,
);
}
}
impl<C: DeviceV1_0 + ?Sized> NvPartitionedAccelerationStructureExtensionDeviceCommands for C {}
pub trait NvRayTracingExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn bind_acceleration_structure_memory_nv(
&self,
bind_infos: &[impl Cast<Target = BindAccelerationStructureMemoryInfoNV>],
) -> crate::VkResult<()> {
let __result = (self.commands().bind_acceleration_structure_memory_nv)(
self.handle(),
bind_infos.len() as u32,
bind_infos.as_ptr().cast(),
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn cmd_build_acceleration_structure_nv(
&self,
command_buffer: CommandBuffer,
info: &AccelerationStructureInfoNV,
instance_data: Buffer,
instance_offset: DeviceSize,
update: bool,
dst: AccelerationStructureNV,
src: AccelerationStructureNV,
scratch: Buffer,
scratch_offset: DeviceSize,
) {
let __result = (self.commands().cmd_build_acceleration_structure_nv)(
command_buffer,
info,
instance_data,
instance_offset,
update as Bool32,
dst,
src,
scratch,
scratch_offset,
);
}
#[inline]
unsafe fn cmd_copy_acceleration_structure_nv(
&self,
command_buffer: CommandBuffer,
dst: AccelerationStructureNV,
src: AccelerationStructureNV,
mode: CopyAccelerationStructureModeKHR,
) {
let __result =
(self.commands().cmd_copy_acceleration_structure_nv)(command_buffer, dst, src, mode);
}
#[inline]
unsafe fn cmd_trace_rays_nv(
&self,
command_buffer: CommandBuffer,
raygen_shader_binding_table_buffer: Buffer,
raygen_shader_binding_offset: DeviceSize,
miss_shader_binding_table_buffer: Buffer,
miss_shader_binding_offset: DeviceSize,
miss_shader_binding_stride: DeviceSize,
hit_shader_binding_table_buffer: Buffer,
hit_shader_binding_offset: DeviceSize,
hit_shader_binding_stride: DeviceSize,
callable_shader_binding_table_buffer: Buffer,
callable_shader_binding_offset: DeviceSize,
callable_shader_binding_stride: DeviceSize,
width: u32,
height: u32,
depth: u32,
) {
let __result = (self.commands().cmd_trace_rays_nv)(
command_buffer,
raygen_shader_binding_table_buffer,
raygen_shader_binding_offset,
miss_shader_binding_table_buffer,
miss_shader_binding_offset,
miss_shader_binding_stride,
hit_shader_binding_table_buffer,
hit_shader_binding_offset,
hit_shader_binding_stride,
callable_shader_binding_table_buffer,
callable_shader_binding_offset,
callable_shader_binding_stride,
width,
height,
depth,
);
}
#[inline]
unsafe fn cmd_write_acceleration_structures_properties_nv(
&self,
command_buffer: CommandBuffer,
acceleration_structures: &[AccelerationStructureNV],
query_type: QueryType,
query_pool: QueryPool,
first_query: u32,
) {
let __result = (self
.commands()
.cmd_write_acceleration_structures_properties_nv)(
command_buffer,
acceleration_structures.len() as u32,
acceleration_structures.as_ptr(),
query_type,
query_pool,
first_query,
);
}
#[inline]
unsafe fn compile_deferred_nv(&self, pipeline: Pipeline, shader: u32) -> crate::VkResult<()> {
let __result = (self.commands().compile_deferred_nv)(self.handle(), pipeline, shader);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn create_acceleration_structure_nv(
&self,
create_info: &AccelerationStructureCreateInfoNV,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<AccelerationStructureNV> {
let mut acceleration_structure = MaybeUninit::<AccelerationStructureNV>::uninit();
let __result = (self.commands().create_acceleration_structure_nv)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
acceleration_structure.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(acceleration_structure.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn create_ray_tracing_pipelines_nv(
&self,
pipeline_cache: PipelineCache,
create_infos: &[impl Cast<Target = RayTracingPipelineCreateInfoNV>],
allocator: Option<&AllocationCallbacks>,
) -> crate::VkSuccessResult<Vec<Pipeline>> {
let mut pipelines = Vec::with_capacity(create_infos.len() as usize);
let __result = (self.commands().create_ray_tracing_pipelines_nv)(
self.handle(),
pipeline_cache,
create_infos.len() as u32,
create_infos.as_ptr().cast(),
allocator.map_or(ptr::null(), |v| v),
pipelines.as_mut_ptr(),
);
pipelines.set_len(create_infos.len() as usize);
if __result >= Result::SUCCESS {
Ok((pipelines, __result.into()))
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn destroy_acceleration_structure_nv(
&self,
acceleration_structure: AccelerationStructureNV,
allocator: Option<&AllocationCallbacks>,
) {
let __result = (self.commands().destroy_acceleration_structure_nv)(
self.handle(),
acceleration_structure,
allocator.map_or(ptr::null(), |v| v),
);
}
#[inline]
unsafe fn get_acceleration_structure_handle_nv(
&self,
acceleration_structure: AccelerationStructureNV,
data: &mut [u8],
) -> crate::VkResult<()> {
let __result = (self.commands().get_acceleration_structure_handle_nv)(
self.handle(),
acceleration_structure,
data.len() as usize,
data.as_mut_ptr() as *mut c_void,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_acceleration_structure_memory_requirements_nv(
&self,
info: &AccelerationStructureMemoryRequirementsInfoNV,
) -> MemoryRequirements2KHR {
let mut memory_requirements = MaybeUninit::<MemoryRequirements2KHR>::uninit();
let __result = (self
.commands()
.get_acceleration_structure_memory_requirements_nv)(
self.handle(),
info,
memory_requirements.as_mut_ptr(),
);
memory_requirements.assume_init()
}
#[inline]
unsafe fn get_ray_tracing_shader_group_handles_nv(
&self,
pipeline: Pipeline,
first_group: u32,
group_count: u32,
data: &mut [u8],
) -> crate::VkResult<()> {
let __result = (self.commands().get_ray_tracing_shader_group_handles_nv)(
self.handle(),
pipeline,
first_group,
group_count,
data.len() as usize,
data.as_mut_ptr() as *mut c_void,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
#[allow(deprecated)]
impl<C: DeviceV1_0 + ?Sized> NvRayTracingExtensionDeviceCommands for C {}
pub trait NvScissorExclusiveExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_set_exclusive_scissor_enable_nv(
&self,
command_buffer: CommandBuffer,
first_exclusive_scissor: u32,
exclusive_scissor_enables: &[Bool32],
) {
let __result = (self.commands().cmd_set_exclusive_scissor_enable_nv)(
command_buffer,
first_exclusive_scissor,
exclusive_scissor_enables.len() as u32,
exclusive_scissor_enables.as_ptr(),
);
}
#[inline]
unsafe fn cmd_set_exclusive_scissor_nv(
&self,
command_buffer: CommandBuffer,
first_exclusive_scissor: u32,
exclusive_scissors: &[impl Cast<Target = Rect2D>],
) {
let __result = (self.commands().cmd_set_exclusive_scissor_nv)(
command_buffer,
first_exclusive_scissor,
exclusive_scissors.len() as u32,
exclusive_scissors.as_ptr().cast(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> NvScissorExclusiveExtensionDeviceCommands for C {}
pub trait NvShadingRateImageExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_bind_shading_rate_image_nv(
&self,
command_buffer: CommandBuffer,
image_view: ImageView,
image_layout: ImageLayout,
) {
let __result = (self.commands().cmd_bind_shading_rate_image_nv)(
command_buffer,
image_view,
image_layout,
);
}
#[inline]
unsafe fn cmd_set_coarse_sample_order_nv(
&self,
command_buffer: CommandBuffer,
sample_order_type: CoarseSampleOrderTypeNV,
custom_sample_orders: &[impl Cast<Target = CoarseSampleOrderCustomNV>],
) {
let __result = (self.commands().cmd_set_coarse_sample_order_nv)(
command_buffer,
sample_order_type,
custom_sample_orders.len() as u32,
custom_sample_orders.as_ptr().cast(),
);
}
#[inline]
unsafe fn cmd_set_viewport_shading_rate_palette_nv(
&self,
command_buffer: CommandBuffer,
first_viewport: u32,
shading_rate_palettes: &[impl Cast<Target = ShadingRatePaletteNV>],
) {
let __result = (self.commands().cmd_set_viewport_shading_rate_palette_nv)(
command_buffer,
first_viewport,
shading_rate_palettes.len() as u32,
shading_rate_palettes.as_ptr().cast(),
);
}
}
impl<C: DeviceV1_0 + ?Sized> NvShadingRateImageExtensionDeviceCommands for C {}
pub trait OhosExternalMemoryExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_memory_native_buffer_ohos(
&self,
info: &MemoryGetNativeBufferInfoOHOS,
) -> crate::VkResult<*mut OH_NativeBuffer> {
let mut buffer = MaybeUninit::<*mut OH_NativeBuffer>::uninit();
let __result = (self.commands().get_memory_native_buffer_ohos)(
self.handle(),
info,
buffer.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(buffer.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_native_buffer_properties_ohos(
&self,
buffer: &OH_NativeBuffer,
properties: &mut NativeBufferPropertiesOHOS,
) -> crate::VkResult<()> {
let __result =
(self.commands().get_native_buffer_properties_ohos)(self.handle(), buffer, properties);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> OhosExternalMemoryExtensionDeviceCommands for C {}
pub trait OhosSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_surface_ohos(
&self,
create_info: &SurfaceCreateInfoOHOS,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_surface_ohos)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
}
impl<C: InstanceV1_0 + ?Sized> OhosSurfaceExtensionInstanceCommands for C {}
pub trait QcomTileMemoryHeapExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_bind_tile_memory_qcom(
&self,
command_buffer: CommandBuffer,
tile_memory_bind_info: Option<&TileMemoryBindInfoQCOM>,
) {
let __result = (self.commands().cmd_bind_tile_memory_qcom)(
command_buffer,
tile_memory_bind_info.map_or(ptr::null(), |v| v),
);
}
}
impl<C: DeviceV1_0 + ?Sized> QcomTileMemoryHeapExtensionDeviceCommands for C {}
pub trait QcomTilePropertiesExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_dynamic_rendering_tile_properties_qcom(
&self,
rendering_info: &RenderingInfo,
properties: &mut TilePropertiesQCOM,
) -> crate::VkResult<()> {
let __result = (self.commands().get_dynamic_rendering_tile_properties_qcom)(
self.handle(),
rendering_info,
properties,
);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_framebuffer_tile_properties_qcom(
&self,
framebuffer: Framebuffer,
) -> crate::VkResult<Vec<TilePropertiesQCOM>> {
let mut properties_count = 0;
(self.commands().get_framebuffer_tile_properties_qcom)(
self.handle(),
framebuffer,
&mut properties_count,
ptr::null_mut(),
);
let mut properties = Vec::with_capacity(properties_count as usize);
let __result = (self.commands().get_framebuffer_tile_properties_qcom)(
self.handle(),
framebuffer,
&mut properties_count,
properties.as_mut_ptr(),
);
debug_assert!(properties.capacity() >= properties_count as usize);
properties.set_len(properties_count as usize);
if __result == Result::SUCCESS {
Ok(properties)
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> QcomTilePropertiesExtensionDeviceCommands for C {}
pub trait QcomTileShadingExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn cmd_begin_per_tile_execution_qcom(
&self,
command_buffer: CommandBuffer,
per_tile_begin_info: &PerTileBeginInfoQCOM,
) {
let __result = (self.commands().cmd_begin_per_tile_execution_qcom)(
command_buffer,
per_tile_begin_info,
);
}
#[inline]
unsafe fn cmd_dispatch_tile_qcom(
&self,
command_buffer: CommandBuffer,
dispatch_tile_info: &DispatchTileInfoQCOM,
) {
let __result = (self.commands().cmd_dispatch_tile_qcom)(command_buffer, dispatch_tile_info);
}
#[inline]
unsafe fn cmd_end_per_tile_execution_qcom(
&self,
command_buffer: CommandBuffer,
per_tile_end_info: &PerTileEndInfoQCOM,
) {
let __result =
(self.commands().cmd_end_per_tile_execution_qcom)(command_buffer, per_tile_end_info);
}
}
impl<C: DeviceV1_0 + ?Sized> QcomTileShadingExtensionDeviceCommands for C {}
pub trait QnxExternalMemoryScreenBufferExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_screen_buffer_properties_qnx(
&self,
buffer: &_screen_buffer,
properties: &mut ScreenBufferPropertiesQNX,
) -> crate::VkResult<()> {
let __result =
(self.commands().get_screen_buffer_properties_qnx)(self.handle(), buffer, properties);
if __result == Result::SUCCESS {
Ok(())
} else {
Err(__result.into())
}
}
}
impl<C: DeviceV1_0 + ?Sized> QnxExternalMemoryScreenBufferExtensionDeviceCommands for C {}
pub trait QnxScreenSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_screen_surface_qnx(
&self,
create_info: &ScreenSurfaceCreateInfoQNX,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_screen_surface_qnx)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_screen_presentation_support_qnx(
&self,
physical_device: PhysicalDevice,
queue_family_index: u32,
window: *mut _screen_window,
) -> Bool32 {
let __result = (self
.commands()
.get_physical_device_screen_presentation_support_qnx)(
physical_device,
queue_family_index,
window,
);
__result
}
}
impl<C: InstanceV1_0 + ?Sized> QnxScreenSurfaceExtensionInstanceCommands for C {}
pub trait SecUbmSurfaceExtensionInstanceCommands: InstanceV1_0 {
#[inline]
unsafe fn create_ubm_surface_sec(
&self,
create_info: &UbmSurfaceCreateInfoSEC,
allocator: Option<&AllocationCallbacks>,
) -> crate::VkResult<SurfaceKHR> {
let mut surface = MaybeUninit::<SurfaceKHR>::uninit();
let __result = (self.commands().create_ubm_surface_sec)(
self.handle(),
create_info,
allocator.map_or(ptr::null(), |v| v),
surface.as_mut_ptr(),
);
if __result == Result::SUCCESS {
Ok(surface.assume_init())
} else {
Err(__result.into())
}
}
#[inline]
unsafe fn get_physical_device_ubm_presentation_support_sec(
&self,
physical_device: PhysicalDevice,
queue_family_index: u32,
) -> (Bool32, ubm_device) {
let mut ubm_device = MaybeUninit::<ubm_device>::uninit();
let __result = (self
.commands()
.get_physical_device_ubm_presentation_support_sec)(
physical_device,
queue_family_index,
ubm_device.as_mut_ptr(),
);
(__result, ubm_device.assume_init())
}
}
impl<C: InstanceV1_0 + ?Sized> SecUbmSurfaceExtensionInstanceCommands for C {}
pub trait ValveDescriptorSetHostMappingExtensionDeviceCommands: DeviceV1_0 {
#[inline]
unsafe fn get_descriptor_set_host_mapping_valve(
&self,
descriptor_set: DescriptorSet,
) -> *mut c_void {
let mut data = MaybeUninit::<*mut c_void>::uninit();
let __result = (self.commands().get_descriptor_set_host_mapping_valve)(
self.handle(),
descriptor_set,
data.as_mut_ptr(),
);
data.assume_init()
}
#[inline]
unsafe fn get_descriptor_set_layout_host_mapping_info_valve(
&self,
binding_reference: &DescriptorSetBindingReferenceVALVE,
host_mapping: &mut DescriptorSetLayoutHostMappingInfoVALVE,
) {
let __result = (self
.commands()
.get_descriptor_set_layout_host_mapping_info_valve)(
self.handle(),
binding_reference,
host_mapping,
);
}
}
impl<C: DeviceV1_0 + ?Sized> ValveDescriptorSetHostMappingExtensionDeviceCommands for C {}