mod backend;
use crate::backend::native_gpu_future;
#[macro_use]
mod macros;
use arrayvec::ArrayVec;
use smallvec::SmallVec;
use std::{
ffi::CString,
ops::Range,
future::Future,
ptr,
slice,
thread,
};
pub use wgt::{
AddressMode,
Backend,
BackendBit,
BlendDescriptor,
BlendFactor,
BlendOperation,
BufferAddress,
BufferUsage,
Color,
ColorStateDescriptor,
ColorWrite,
CommandBufferDescriptor,
CompareFunction,
CullMode,
DepthStencilStateDescriptor,
DeviceDescriptor,
DynamicOffset,
Extensions,
Extent3d,
FilterMode,
FrontFace,
IndexFormat,
InputStepMode,
Limits,
LoadOp,
Origin3d,
PowerPreference,
PresentMode,
PrimitiveTopology,
RasterizationStateDescriptor,
SamplerDescriptor,
ShaderLocation,
ShaderStage,
StencilOperation,
StencilStateFaceDescriptor,
StoreOp,
SwapChainDescriptor,
TextureAspect,
TextureComponentType,
TextureDimension,
TextureFormat,
TextureUsage,
TextureViewDescriptor,
TextureViewDimension,
VertexAttributeDescriptor,
VertexFormat,
BIND_BUFFER_ALIGNMENT,
MAX_BIND_GROUPS,
read_spirv,
};
pub use wgc::instance::{
AdapterInfo,
DeviceType,
};
#[derive(Default, Debug)]
struct Temp {
}
#[derive(Debug, PartialEq)]
pub struct Adapter {
id: wgc::id::AdapterId,
}
#[derive(Clone, Debug)]
pub struct RequestAdapterOptions<'a> {
pub power_preference: PowerPreference,
pub compatible_surface: Option<&'a Surface>,
}
#[derive(Debug)]
pub struct Device {
id: wgc::id::DeviceId,
temp: Temp,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Maintain {
Wait,
Poll,
}
#[derive(Debug, PartialEq)]
pub struct Buffer {
id: wgc::id::BufferId,
device_id: wgc::id::DeviceId,
}
#[derive(Debug, PartialEq)]
pub struct Texture {
id: wgc::id::TextureId,
owned: bool,
}
#[derive(Debug, PartialEq)]
pub struct TextureView {
id: wgc::id::TextureViewId,
owned: bool,
}
#[derive(Debug, PartialEq)]
pub struct Sampler {
id: wgc::id::SamplerId,
}
#[derive(Debug, PartialEq)]
pub struct Surface {
id: wgc::id::SurfaceId,
}
#[derive(Debug, PartialEq)]
pub struct SwapChain {
id: wgc::id::SwapChainId,
}
#[derive(Debug, PartialEq)]
pub struct BindGroupLayout {
id: wgc::id::BindGroupLayoutId,
}
#[derive(Debug, PartialEq)]
pub struct BindGroup {
id: wgc::id::BindGroupId,
}
impl Drop for BindGroup {
fn drop(&mut self) {
wgn::wgpu_bind_group_destroy(self.id);
}
}
#[derive(Debug, PartialEq)]
pub struct ShaderModule {
id: wgc::id::ShaderModuleId,
}
#[derive(Debug, PartialEq)]
pub struct PipelineLayout {
id: wgc::id::PipelineLayoutId,
}
#[derive(Debug, PartialEq)]
pub struct RenderPipeline {
id: wgc::id::RenderPipelineId,
}
#[derive(Debug, PartialEq)]
pub struct ComputePipeline {
id: wgc::id::ComputePipelineId,
}
#[derive(Debug, PartialEq)]
pub struct CommandBuffer {
id: wgc::id::CommandBufferId,
}
#[derive(Debug)]
pub struct CommandEncoder {
id: wgc::id::CommandEncoderId,
_p: std::marker::PhantomData<*const u8>,
}
#[derive(Debug)]
pub struct RenderPass<'a> {
id: wgc::id::RenderPassId,
_parent: &'a mut CommandEncoder,
}
#[derive(Debug)]
pub struct ComputePass<'a> {
id: wgc::id::ComputePassId,
_parent: &'a mut CommandEncoder,
}
#[derive(Debug, PartialEq)]
pub struct Queue {
id: wgc::id::QueueId,
}
#[derive(Clone, Debug)]
pub enum BindingResource<'a> {
Buffer {
buffer: &'a Buffer,
range: Range<BufferAddress>,
},
Sampler(&'a Sampler),
TextureView(&'a TextureView),
}
#[derive(Clone, Debug)]
pub struct Binding<'a> {
pub binding: u32,
pub resource: BindingResource<'a>,
}
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub enum BindingType {
UniformBuffer {
dynamic: bool,
},
StorageBuffer {
dynamic: bool,
readonly: bool,
},
Sampler {
comparison: bool,
},
SampledTexture {
dimension: TextureViewDimension,
component_type: TextureComponentType,
multisampled: bool,
},
StorageTexture {
dimension: TextureViewDimension,
component_type: TextureComponentType,
format: TextureFormat,
readonly: bool,
},
}
#[derive(Clone, Debug, Hash)]
pub struct BindGroupLayoutEntry {
pub binding: u32,
pub visibility: ShaderStage,
pub ty: BindingType,
}
#[derive(Clone, Debug)]
pub struct BindGroupLayoutDescriptor<'a> {
pub bindings: &'a [BindGroupLayoutEntry],
pub label: Option<&'a str>,
}
#[derive(Clone, Debug)]
pub struct BindGroupDescriptor<'a> {
pub layout: &'a BindGroupLayout,
pub bindings: &'a [Binding<'a>],
pub label: Option<&'a str>,
}
#[derive(Clone, Debug)]
pub struct PipelineLayoutDescriptor<'a> {
pub bind_group_layouts: &'a [&'a BindGroupLayout],
}
#[derive(Clone, Debug)]
pub struct ProgrammableStageDescriptor<'a> {
pub module: &'a ShaderModule,
pub entry_point: &'a str,
}
#[derive(Clone, Debug)]
pub struct VertexStateDescriptor<'a> {
pub index_format: IndexFormat,
pub vertex_buffers: &'a [VertexBufferDescriptor<'a>],
}
#[derive(Clone, Debug)]
pub struct VertexBufferDescriptor<'a> {
pub stride: BufferAddress,
pub step_mode: InputStepMode,
pub attributes: &'a [VertexAttributeDescriptor],
}
#[derive(Clone, Debug)]
pub struct RenderPipelineDescriptor<'a> {
pub layout: &'a PipelineLayout,
pub vertex_stage: ProgrammableStageDescriptor<'a>,
pub fragment_stage: Option<ProgrammableStageDescriptor<'a>>,
pub rasterization_state: Option<RasterizationStateDescriptor>,
pub primitive_topology: PrimitiveTopology,
pub color_states: &'a [ColorStateDescriptor],
pub depth_stencil_state: Option<DepthStencilStateDescriptor>,
pub vertex_state: VertexStateDescriptor<'a>,
pub sample_count: u32,
pub sample_mask: u32,
pub alpha_to_coverage_enabled: bool,
}
#[derive(Clone, Debug)]
pub struct ComputePipelineDescriptor<'a> {
pub layout: &'a PipelineLayout,
pub compute_stage: ProgrammableStageDescriptor<'a>,
}
pub type RenderPassColorAttachmentDescriptor<'a> =
wgt::RenderPassColorAttachmentDescriptorBase<&'a TextureView>;
pub type RenderPassDepthStencilAttachmentDescriptor<'a> =
wgt::RenderPassDepthStencilAttachmentDescriptorBase<&'a TextureView>;
#[derive(Debug)]
pub struct RenderPassDescriptor<'a, 'b> {
pub color_attachments: &'b [RenderPassColorAttachmentDescriptor<'a>],
pub depth_stencil_attachment:
Option<RenderPassDepthStencilAttachmentDescriptor<'a>>,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct BufferDescriptor<'a> {
pub label: Option<&'a str>,
pub size: BufferAddress,
pub usage: BufferUsage,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct CommandEncoderDescriptor<'a> {
pub label: Option<&'a str>,
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct TextureDescriptor<'a> {
pub label: Option<&'a str>,
pub size: Extent3d,
pub array_layer_count: u32,
pub mip_level_count: u32,
pub sample_count: u32,
pub dimension: TextureDimension,
pub format: TextureFormat,
pub usage: TextureUsage,
}
#[derive(Debug)]
pub struct SwapChainOutput {
pub view: TextureView,
swap_chain_id: wgc::id::SwapChainId,
}
#[derive(Clone, Debug)]
pub struct BufferCopyView<'a> {
pub buffer: &'a Buffer,
pub offset: BufferAddress,
pub bytes_per_row: u32,
pub rows_per_image: u32,
}
impl BufferCopyView<'_> {
fn into_native(self) -> wgc::command::BufferCopyView {
wgc::command::BufferCopyView {
buffer: self.buffer.id,
offset: self.offset,
bytes_per_row: self.bytes_per_row,
rows_per_image: self.rows_per_image,
}
}
}
#[derive(Clone, Debug)]
pub struct TextureCopyView<'a> {
pub texture: &'a Texture,
pub mip_level: u32,
pub array_layer: u32,
pub origin: Origin3d,
}
impl<'a> TextureCopyView<'a> {
fn into_native(self) -> wgc::command::TextureCopyView {
wgc::command::TextureCopyView {
texture: self.texture.id,
mip_level: self.mip_level,
array_layer: self.array_layer,
origin: self.origin,
}
}
}
pub struct CreateBufferMapped<'a> {
id: wgc::id::BufferId,
pub data: &'a mut [u8],
device_id: wgc::id::DeviceId,
}
impl CreateBufferMapped<'_> {
pub fn finish(self) -> Buffer {
wgn::wgpu_buffer_unmap(self.id);
Buffer { device_id: self.device_id, id: self.id }
}
}
impl Surface {
pub fn create<W: raw_window_handle::HasRawWindowHandle>(window: &W) -> Self {
Surface {
id: wgn::wgpu_create_surface(window.raw_window_handle()),
}
}
#[cfg(any(target_os = "ios", target_os = "macos"))]
pub fn create_surface_from_core_animation_layer(layer: *mut std::ffi::c_void) -> Self {
Surface {
id: wgn::wgpu_create_surface_from_metal_layer(layer),
}
}
}
impl Adapter {
pub fn enumerate(backends: BackendBit) -> Vec<Self> {
wgn::wgpu_enumerate_adapters(backends)
.into_iter()
.map(|id| Adapter { id })
.collect()
}
pub async fn request(options: &RequestAdapterOptions<'_>, backends: BackendBit) -> Option<Self> {
unsafe extern "C" fn adapter_callback(
id: Option<wgc::id::AdapterId>,
user_data: *mut std::ffi::c_void,
) {
*(user_data as *mut Option<wgc::id::AdapterId>) = id;
}
let mut id_maybe = None;
unsafe {
wgn::wgpu_request_adapter_async(
Some(&wgc::instance::RequestAdapterOptions {
power_preference: options.power_preference,
compatible_surface: options.compatible_surface
.map(|surface| surface.id),
}),
backends,
adapter_callback,
&mut id_maybe as *mut _ as *mut std::ffi::c_void,
)
};
id_maybe.map(|id| Adapter { id })
}
pub async fn request_device(&self, desc: &DeviceDescriptor) -> (Device, Queue) {
let device = Device {
id: wgn::wgpu_adapter_request_device(self.id, Some(desc)),
temp: Temp::default(),
};
let queue = Queue {
id: wgn::wgpu_device_get_default_queue(device.id),
};
(device, queue)
}
pub fn get_info(&self) -> AdapterInfo {
wgn::adapter_get_info(self.id)
}
}
impl Device {
pub fn poll(&self, maintain: Maintain) {
wgn::wgpu_device_poll(self.id, match maintain {
Maintain::Poll => false,
Maintain::Wait => true,
});
}
pub fn create_shader_module(&self, spv: &[u32]) -> ShaderModule {
let desc = wgc::pipeline::ShaderModuleDescriptor {
code: wgc::U32Array {
bytes: spv.as_ptr(),
length: spv.len(),
},
};
ShaderModule {
id: wgn::wgpu_device_create_shader_module(self.id, &desc),
}
}
pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor) -> CommandEncoder {
let owned_label = OwnedLabel::new(desc.label.as_deref());
CommandEncoder {
id: wgn::wgpu_device_create_command_encoder(
self.id,
Some(&wgt::CommandEncoderDescriptor {
label: owned_label.as_ptr(),
}),
),
_p: Default::default(),
}
}
pub fn create_bind_group(&self, desc: &BindGroupDescriptor) -> BindGroup {
use wgc::binding_model as bm;
let bindings = desc
.bindings
.iter()
.map(|binding| bm::BindGroupEntry {
binding: binding.binding,
resource: match binding.resource {
BindingResource::Buffer {
ref buffer,
ref range,
} => bm::BindingResource::Buffer(bm::BufferBinding {
buffer: buffer.id,
offset: range.start,
size: range.end - range.start,
}),
BindingResource::Sampler(ref sampler) => {
bm::BindingResource::Sampler(sampler.id)
}
BindingResource::TextureView(ref texture_view) => {
bm::BindingResource::TextureView(texture_view.id)
}
},
})
.collect::<Vec<_>>();
let owned_label = OwnedLabel::new(desc.label.as_deref());
BindGroup {
id: wgn::wgpu_device_create_bind_group(
self.id,
&bm::BindGroupDescriptor {
layout: desc.layout.id,
entries: bindings.as_ptr(),
entries_length: bindings.len(),
label: owned_label.as_ptr(),
},
),
}
}
pub fn create_bind_group_layout(&self, desc: &BindGroupLayoutDescriptor) -> BindGroupLayout {
use wgc::binding_model as bm;
let temp_layouts = desc
.bindings
.iter()
.map(|bind| bm::BindGroupLayoutEntry {
binding: bind.binding,
visibility: bind.visibility,
ty: match bind.ty {
BindingType::UniformBuffer { .. } => bm::BindingType::UniformBuffer,
BindingType::StorageBuffer {
readonly: false, ..
} => bm::BindingType::StorageBuffer,
BindingType::StorageBuffer { readonly: true, .. } => {
bm::BindingType::ReadonlyStorageBuffer
}
BindingType::Sampler { comparison: false } => bm::BindingType::Sampler,
BindingType::Sampler { .. } => bm::BindingType::ComparisonSampler,
BindingType::SampledTexture { .. } => bm::BindingType::SampledTexture,
BindingType::StorageTexture { readonly: true, .. } => {
bm::BindingType::ReadonlyStorageTexture
}
BindingType::StorageTexture { .. } => {
bm::BindingType::WriteonlyStorageTexture
}
},
has_dynamic_offset: match bind.ty {
BindingType::UniformBuffer { dynamic } |
BindingType::StorageBuffer { dynamic, .. } => dynamic,
_ => false,
},
multisampled: match bind.ty {
BindingType::SampledTexture { multisampled, .. } => multisampled,
_ => false,
},
view_dimension: match bind.ty {
BindingType::SampledTexture { dimension, .. } |
BindingType::StorageTexture { dimension, .. } => dimension,
_ => TextureViewDimension::D2,
},
texture_component_type: match bind.ty {
BindingType::SampledTexture { component_type, .. } |
BindingType::StorageTexture { component_type, .. } => component_type,
_ => TextureComponentType::Float,
},
storage_texture_format: match bind.ty {
BindingType::StorageTexture { format, .. } => format,
_ => TextureFormat::Rgb10a2Unorm,
},
})
.collect::<Vec<_>>();
let owned_label = OwnedLabel::new(desc.label.as_deref());
BindGroupLayout {
id: wgn::wgpu_device_create_bind_group_layout(
self.id,
&bm::BindGroupLayoutDescriptor {
entries: temp_layouts.as_ptr(),
entries_length: temp_layouts.len(),
label: owned_label.as_ptr(),
},
),
}
}
pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor) -> PipelineLayout {
let temp_layouts = desc
.bind_group_layouts
.iter()
.map(|bgl| bgl.id)
.collect::<Vec<_>>();
PipelineLayout {
id: wgn::wgpu_device_create_pipeline_layout(
self.id,
&wgc::binding_model::PipelineLayoutDescriptor {
bind_group_layouts: temp_layouts.as_ptr(),
bind_group_layouts_length: temp_layouts.len(),
},
),
}
}
pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor) -> RenderPipeline {
use wgc::pipeline as pipe;
let vertex_entry_point = CString::new(desc.vertex_stage.entry_point).unwrap();
let vertex_stage = pipe::ProgrammableStageDescriptor {
module: desc.vertex_stage.module.id,
entry_point: vertex_entry_point.as_ptr(),
};
let (_fragment_entry_point, fragment_stage) =
if let Some(fragment_stage) = &desc.fragment_stage {
let fragment_entry_point = CString::new(fragment_stage.entry_point).unwrap();
let fragment_stage = pipe::ProgrammableStageDescriptor {
module: fragment_stage.module.id,
entry_point: fragment_entry_point.as_ptr(),
};
(fragment_entry_point, Some(fragment_stage))
} else {
(CString::default(), None)
};
let temp_color_states = desc.color_states.to_vec();
let temp_vertex_buffers = desc
.vertex_state.vertex_buffers
.iter()
.map(|vbuf| pipe::VertexBufferLayoutDescriptor {
array_stride: vbuf.stride,
step_mode: vbuf.step_mode,
attributes: vbuf.attributes.as_ptr(),
attributes_length: vbuf.attributes.len(),
})
.collect::<Vec<_>>();
RenderPipeline {
id: wgn::wgpu_device_create_render_pipeline(
self.id,
&pipe::RenderPipelineDescriptor {
layout: desc.layout.id,
vertex_stage,
fragment_stage: fragment_stage
.as_ref()
.map_or(ptr::null(), |fs| fs as *const _),
rasterization_state: desc
.rasterization_state
.as_ref()
.map_or(ptr::null(), |p| p as *const _),
primitive_topology: desc.primitive_topology,
color_states: temp_color_states.as_ptr(),
color_states_length: temp_color_states.len(),
depth_stencil_state: desc
.depth_stencil_state
.as_ref()
.map_or(ptr::null(), |p| p as *const _),
vertex_state: pipe::VertexStateDescriptor {
index_format: desc.vertex_state.index_format,
vertex_buffers: temp_vertex_buffers.as_ptr(),
vertex_buffers_length: temp_vertex_buffers.len(),
},
sample_count: desc.sample_count,
sample_mask: desc.sample_mask,
alpha_to_coverage_enabled: desc.alpha_to_coverage_enabled,
},
),
}
}
pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor) -> ComputePipeline {
use wgc::pipeline as pipe;
let entry_point = CString::new(desc.compute_stage.entry_point).unwrap();
ComputePipeline {
id: wgn::wgpu_device_create_compute_pipeline(
self.id,
&pipe::ComputePipelineDescriptor {
layout: desc.layout.id,
compute_stage: pipe::ProgrammableStageDescriptor {
module: desc.compute_stage.module.id,
entry_point: entry_point.as_ptr(),
},
},
),
}
}
pub fn create_buffer(&self, desc: &BufferDescriptor) -> Buffer {
let owned_label = OwnedLabel::new(desc.label.as_deref());
Buffer {
device_id: self.id,
id: wgn::wgpu_device_create_buffer(
self.id,
&wgt::BufferDescriptor {
label: owned_label.as_ptr(),
size: desc.size,
usage: desc.usage,
}
),
}
}
pub fn create_buffer_mapped(&self, desc: &BufferDescriptor) -> CreateBufferMapped<'_> {
assert_ne!(desc.size, 0);
let owned_label = OwnedLabel::new(desc.label.as_deref());
let mut data_ptr: *mut u8 = std::ptr::null_mut();
let (id, data) = unsafe {
let id = wgn::wgpu_device_create_buffer_mapped(
self.id,
&wgt::BufferDescriptor {
label: owned_label.as_ptr(),
size: desc.size,
usage: desc.usage,
},
&mut data_ptr as *mut *mut u8);
let data = std::slice::from_raw_parts_mut(data_ptr as *mut u8, desc.size as usize);
(id, data)
};
CreateBufferMapped { device_id: self.id, id, data }
}
pub fn create_buffer_with_data(&self, data: &[u8], usage: BufferUsage) -> Buffer {
let mapped = self.create_buffer_mapped(&BufferDescriptor {
size: data.len() as u64,
usage,
label: None,
});
mapped.data.copy_from_slice(data);
mapped.finish()
}
pub fn create_texture(&self, desc: &TextureDescriptor) -> Texture {
let owned_label = OwnedLabel::new(desc.label.as_deref());
Texture {
id: wgn::wgpu_device_create_texture(self.id, &wgt::TextureDescriptor {
label: owned_label.as_ptr(),
size: desc.size,
array_layer_count: desc.array_layer_count,
mip_level_count: desc.mip_level_count,
sample_count: desc.sample_count,
dimension: desc.dimension,
format: desc.format,
usage: desc.usage,
}),
owned: true,
}
}
pub fn create_sampler(&self, desc: &SamplerDescriptor) -> Sampler {
Sampler {
id: wgn::wgpu_device_create_sampler(self.id, desc),
}
}
pub fn create_swap_chain(&self, surface: &Surface, desc: &SwapChainDescriptor) -> SwapChain {
SwapChain {
id: wgn::wgpu_device_create_swap_chain(self.id, surface.id, desc),
}
}
}
impl Drop for Device {
fn drop(&mut self) {
wgn::wgpu_device_poll(self.id, true);
#[cfg(feature = "metal-auto-capture")]
wgn::wgpu_device_destroy(self.id);
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct BufferAsyncErr;
pub struct BufferReadMapping {
data: *const u8,
size: usize,
buffer_id: wgc::id::BufferId,
}
unsafe impl Send for BufferReadMapping {}
unsafe impl Sync for BufferReadMapping {}
impl BufferReadMapping
{
pub fn as_slice(&self) -> &[u8] {
unsafe {
slice::from_raw_parts(self.data as *const u8, self.size)
}
}
}
impl Drop for BufferReadMapping {
fn drop(&mut self) {
wgn::wgpu_buffer_unmap(self.buffer_id);
}
}
pub struct BufferWriteMapping {
data: *mut u8,
size: usize,
buffer_id: wgc::id::BufferId,
}
unsafe impl Send for BufferWriteMapping {}
unsafe impl Sync for BufferWriteMapping {}
impl BufferWriteMapping
{
pub fn as_slice(&mut self) -> &mut [u8] {
unsafe {
slice::from_raw_parts_mut(self.data as *mut u8, self.size)
}
}
}
impl Drop for BufferWriteMapping {
fn drop(&mut self) {
wgn::wgpu_buffer_unmap(self.buffer_id);
}
}
impl Buffer {
pub fn map_read(&self, start: BufferAddress, size: BufferAddress) -> impl Future<Output = Result<BufferReadMapping, BufferAsyncErr>>
{
let (future, completion) = native_gpu_future::new_gpu_future(
self.id,
size,
);
extern "C" fn buffer_map_read_future_wrapper(
status: wgc::resource::BufferMapAsyncStatus,
data: *const u8,
user_data: *mut u8,
)
{
let completion = unsafe {
native_gpu_future::GpuFutureCompletion::from_raw(user_data as _)
};
let (buffer_id, size) = completion.get_buffer_info();
if let wgc::resource::BufferMapAsyncStatus::Success = status {
completion.complete(Ok(BufferReadMapping {
data,
size: size as usize,
buffer_id,
}));
} else {
completion.complete(Err(BufferAsyncErr));
}
}
wgn::wgpu_buffer_map_read_async(
self.id,
start,
size,
buffer_map_read_future_wrapper,
completion.to_raw() as _,
);
future
}
pub fn map_write(&self, start: BufferAddress, size: BufferAddress) -> impl Future<Output = Result<BufferWriteMapping, BufferAsyncErr>>
{
let (future, completion) = native_gpu_future::new_gpu_future(
self.id,
size,
);
extern "C" fn buffer_map_write_future_wrapper(
status: wgc::resource::BufferMapAsyncStatus,
data: *mut u8,
user_data: *mut u8,
)
{
let completion = unsafe {
native_gpu_future::GpuFutureCompletion::from_raw(user_data as _)
};
let (buffer_id, size) = completion.get_buffer_info();
if let wgc::resource::BufferMapAsyncStatus::Success = status {
completion.complete(Ok(BufferWriteMapping {
data,
size: size as usize,
buffer_id,
}));
} else {
completion.complete(Err(BufferAsyncErr));
}
}
wgn::wgpu_buffer_map_write_async(
self.id,
start,
size,
buffer_map_write_future_wrapper,
completion.to_raw() as _,
);
future
}
pub fn unmap(&self) {
wgn::wgpu_buffer_unmap(self.id);
}
}
impl Drop for Buffer {
fn drop(&mut self) {
wgn::wgpu_buffer_destroy(self.id);
}
}
impl Texture {
pub fn create_view(&self, desc: &TextureViewDescriptor) -> TextureView {
TextureView {
id: wgn::wgpu_texture_create_view(self.id, Some(desc)),
owned: true,
}
}
pub fn create_default_view(&self) -> TextureView {
TextureView {
id: wgn::wgpu_texture_create_view(self.id, None),
owned: true,
}
}
}
impl Drop for Texture {
fn drop(&mut self) {
if self.owned {
wgn::wgpu_texture_destroy(self.id);
}
}
}
impl Drop for TextureView {
fn drop(&mut self) {
if self.owned {
wgn::wgpu_texture_view_destroy(self.id);
}
}
}
impl CommandEncoder {
pub fn finish(self) -> CommandBuffer {
CommandBuffer {
id: wgn::wgpu_command_encoder_finish(self.id, None),
}
}
pub fn begin_render_pass<'a>(
&'a mut self,
desc: &RenderPassDescriptor<'a, '_>,
) -> RenderPass<'a> {
let colors = desc
.color_attachments
.iter()
.map(|ca| wgc::command::RenderPassColorAttachmentDescriptor {
attachment: ca.attachment.id,
resolve_target: ca.resolve_target.map(|rt| rt.id),
load_op: ca.load_op,
store_op: ca.store_op,
clear_color: ca.clear_color,
})
.collect::<ArrayVec<[_; 4]>>();
let depth_stencil = desc.depth_stencil_attachment.as_ref().map(|dsa| {
wgc::command::RenderPassDepthStencilAttachmentDescriptor {
attachment: dsa.attachment.id,
depth_load_op: dsa.depth_load_op,
depth_store_op: dsa.depth_store_op,
clear_depth: dsa.clear_depth,
stencil_load_op: dsa.stencil_load_op,
stencil_store_op: dsa.stencil_store_op,
clear_stencil: dsa.clear_stencil,
}
});
RenderPass {
id: unsafe {
wgn::wgpu_command_encoder_begin_render_pass(
self.id,
&wgc::command::RenderPassDescriptor {
color_attachments: colors.as_ptr(),
color_attachments_length: colors.len(),
depth_stencil_attachment: depth_stencil.as_ref(),
},
)
},
_parent: self,
}
}
pub fn begin_compute_pass(&mut self) -> ComputePass {
ComputePass {
id: unsafe {
wgn::wgpu_command_encoder_begin_compute_pass(self.id, None)
},
_parent: self,
}
}
pub fn copy_buffer_to_buffer(
&mut self,
source: &Buffer,
source_offset: BufferAddress,
destination: &Buffer,
destination_offset: BufferAddress,
copy_size: BufferAddress,
) {
wgn::wgpu_command_encoder_copy_buffer_to_buffer(
self.id,
source.id,
source_offset,
destination.id,
destination_offset,
copy_size,
);
}
pub fn copy_buffer_to_texture(
&mut self,
source: BufferCopyView,
destination: TextureCopyView,
copy_size: Extent3d,
) {
wgn::wgpu_command_encoder_copy_buffer_to_texture(
self.id,
&source.into_native(),
&destination.into_native(),
copy_size,
);
}
pub fn copy_texture_to_buffer(
&mut self,
source: TextureCopyView,
destination: BufferCopyView,
copy_size: Extent3d,
) {
wgn::wgpu_command_encoder_copy_texture_to_buffer(
self.id,
&source.into_native(),
&destination.into_native(),
copy_size,
);
}
pub fn copy_texture_to_texture(
&mut self,
source: TextureCopyView,
destination: TextureCopyView,
copy_size: Extent3d,
) {
wgn::wgpu_command_encoder_copy_texture_to_texture(
self.id,
&source.into_native(),
&destination.into_native(),
copy_size,
);
}
pub fn insert_debug_marker(&mut self, label: &str) {
let cstring = CString::new(label).unwrap();
unsafe {
wgn::wgpu_command_encoder_insert_debug_marker(
self.id,
cstring.as_ptr(),
0,
)
};
}
pub fn push_debug_group(&mut self, label: &str) {
let cstring = CString::new(label).unwrap();
unsafe {
wgn::wgpu_command_encoder_push_debug_group(
self.id,
cstring.as_ptr(),
0,
)
};
}
pub fn pop_debug_group(&mut self) {
unsafe {
wgn::wgpu_command_encoder_pop_debug_group(self.id)
};
}
}
impl<'a> RenderPass<'a> {
pub fn set_bind_group(
&mut self,
index: u32,
bind_group: &'a BindGroup,
offsets: &[DynamicOffset],
) {
unsafe {
wgn::wgpu_render_pass_set_bind_group(
self.id.as_mut().unwrap(),
index,
bind_group.id,
offsets.as_ptr(),
offsets.len(),
);
}
}
pub fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
unsafe {
wgn::wgpu_render_pass_set_pipeline(
self.id.as_mut().unwrap(),
pipeline.id,
);
}
}
pub fn set_blend_color(&mut self, color: Color) {
unsafe {
wgn::wgpu_render_pass_set_blend_color(
self.id.as_mut().unwrap(),
&color,
);
}
}
pub fn set_index_buffer(
&mut self,
buffer: &'a Buffer,
offset: BufferAddress,
size: BufferAddress,
) {
unsafe {
wgn::wgpu_render_pass_set_index_buffer(
self.id.as_mut().unwrap(),
buffer.id,
offset,
size,
);
}
}
pub fn set_vertex_buffer(
&mut self,
slot: u32,
buffer: &'a Buffer,
offset: BufferAddress,
size: BufferAddress,
) {
unsafe {
wgn::wgpu_render_pass_set_vertex_buffer(
self.id.as_mut().unwrap(),
slot,
buffer.id,
offset,
size,
)
};
}
pub fn set_scissor_rect(&mut self, x: u32, y: u32, w: u32, h: u32) {
unsafe {
wgn::wgpu_render_pass_set_scissor_rect(
self.id.as_mut().unwrap(),
x, y, w, h,
);
}
}
pub fn set_viewport(&mut self, x: f32, y: f32, w: f32, h: f32, min_depth: f32, max_depth: f32) {
unsafe {
wgn::wgpu_render_pass_set_viewport(
self.id.as_mut().unwrap(),
x, y, w, h,
min_depth, max_depth,
);
}
}
pub fn set_stencil_reference(&mut self, reference: u32) {
unsafe {
wgn::wgpu_render_pass_set_stencil_reference(
self.id.as_mut().unwrap(),
reference,
);
}
}
pub fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
unsafe {
wgn::wgpu_render_pass_draw(
self.id.as_mut().unwrap(),
vertices.end - vertices.start,
instances.end - instances.start,
vertices.start,
instances.start,
);
}
}
pub fn insert_debug_marker(&mut self, label: &str) {
let cstring = CString::new(label).unwrap();
unsafe {
wgn::wgpu_render_pass_insert_debug_marker(
self.id.as_mut().unwrap(),
cstring.as_ptr(),
0,
);
}
}
pub fn push_debug_group(&mut self, label: &str) {
let cstring = CString::new(label).unwrap();
unsafe {
wgn::wgpu_render_pass_push_debug_group(
self.id.as_mut().unwrap(),
cstring.as_ptr(),
0,
);
}
}
pub fn pop_debug_group(&mut self) {
unsafe {
wgn::wgpu_render_pass_pop_debug_group(self.id.as_mut().unwrap());
}
}
pub fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
unsafe {
wgn::wgpu_render_pass_draw_indexed(
self.id.as_mut().unwrap(),
indices.end - indices.start,
instances.end - instances.start,
indices.start,
base_vertex,
instances.start,
);
}
}
pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
unsafe {
wgn::wgpu_render_pass_draw_indirect(
self.id.as_mut().unwrap(),
indirect_buffer.id,
indirect_offset,
);
}
}
pub fn draw_indexed_indirect(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
) {
unsafe {
wgn::wgpu_render_pass_draw_indexed_indirect(
self.id.as_mut().unwrap(),
indirect_buffer.id,
indirect_offset,
);
}
}
}
impl<'a> Drop for RenderPass<'a> {
fn drop(&mut self) {
if !thread::panicking() {
unsafe {
wgn::wgpu_render_pass_end_pass(self.id);
}
}
}
}
impl<'a> ComputePass<'a> {
pub fn set_bind_group(
&mut self,
index: u32,
bind_group: &'a BindGroup,
offsets: &[DynamicOffset],
) {
unsafe {
wgn::wgpu_compute_pass_set_bind_group(
self.id.as_mut().unwrap(),
index,
bind_group.id,
offsets.as_ptr(),
offsets.len(),
);
}
}
pub fn set_pipeline(&mut self, pipeline: &'a ComputePipeline) {
unsafe {
wgn::wgpu_compute_pass_set_pipeline(
self.id.as_mut().unwrap(),
pipeline.id,
);
}
}
pub fn insert_debug_marker(&mut self, label: &str) {
let cstring = CString::new(label).unwrap();
unsafe {
wgn::wgpu_compute_pass_insert_debug_marker(
self.id.as_mut().unwrap(),
cstring.as_ptr(),
0,
);
}
}
pub fn push_debug_group(&mut self, label: &str) {
let cstring = CString::new(label).unwrap();
unsafe {
wgn::wgpu_compute_pass_push_debug_group(
self.id.as_mut().unwrap(),
cstring.as_ptr(),
0,
);
}
}
pub fn pop_debug_group(&mut self) {
unsafe {
wgn::wgpu_compute_pass_pop_debug_group(self.id.as_mut().unwrap());
}
}
pub fn dispatch(&mut self, x: u32, y: u32, z: u32) {
unsafe {
wgn::wgpu_compute_pass_dispatch(
self.id.as_mut().unwrap(),
x, y, z,
);
}
}
pub fn dispatch_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
unsafe {
wgn::wgpu_compute_pass_dispatch_indirect(
self.id.as_mut().unwrap(),
indirect_buffer.id,
indirect_offset,
);
}
}
}
impl<'a> Drop for ComputePass<'a> {
fn drop(&mut self) {
if !thread::panicking() {
unsafe {
wgn::wgpu_compute_pass_end_pass(self.id);
}
}
}
}
impl Queue {
pub fn submit(&self, command_buffers: &[CommandBuffer]) {
let temp_command_buffers = command_buffers.iter()
.map(|cb| cb.id)
.collect::<SmallVec<[_; 4]>>();
unsafe {
wgn::wgpu_queue_submit(
self.id,
temp_command_buffers.as_ptr(),
command_buffers.len(),
)
};
}
}
impl Drop for SwapChainOutput {
fn drop(&mut self) {
if !thread::panicking() {
wgn::wgpu_swap_chain_present(self.swap_chain_id);
}
}
}
#[derive(Clone, Debug)]
pub struct TimeOut;
impl SwapChain {
pub fn get_next_texture(&mut self) -> Result<SwapChainOutput, TimeOut> {
let output = wgn::wgpu_swap_chain_get_next_texture(self.id);
match output.view_id {
Some(id) => Ok(SwapChainOutput {
view: TextureView { id, owned: false },
swap_chain_id: self.id,
}),
None => Err(TimeOut),
}
}
}
struct OwnedLabel(Option<CString>);
impl OwnedLabel {
fn new(text: Option<&str>) -> Self {
Self(text.map(|t| CString::new(t).expect("invalid label")))
}
fn as_ptr(&self) -> *const std::os::raw::c_char {
match self.0 {
Some(ref c_string) => c_string.as_ptr(),
None => ptr::null(),
}
}
}