mod fragment_only;
mod game;
mod limits;
pub use fragment_only::FragmentOnlyRenderBundleEncoder;
pub use fragment_only::FragmentOnlyRenderBundleEncoderDescriptor;
pub use fragment_only::FragmentOnlyRenderPass;
pub use fragment_only::FragmentOnlyRenderPassDescriptor;
pub use fragment_only::FragmentOnlyRenderPipeline;
pub use fragment_only::FragmentOnlyRenderPipelineDescriptor;
pub use game::window_size::WindowSizeDependent;
pub use game::Game;
pub use game::GameCommand;
pub use game::GameData;
pub use game::GameInitialisationFailure;
use wgpu::util::DeviceExt;
pub mod input {
pub use crate::game::input::*;
}
fn next_multiple_of(
value: wgpu::BufferAddress,
multiple: wgpu::BufferAddress,
) -> wgpu::BufferAddress {
match value % multiple {
0 => value,
r => value + (multiple - r),
}
}
pub const BUFFER_PADDING: wgpu::BufferAddress = 256;
mod sealed {
pub trait SealedDevice {}
impl SealedDevice for wgpu::Device {}
pub trait SealedInstance {}
impl SealedInstance for wgpu::Instance {}
pub trait SealedCommandEncoder {}
impl SealedCommandEncoder for wgpu::CommandEncoder {}
pub trait SealedLimits {}
impl SealedLimits for wgpu::Limits {}
pub trait SealedBuffer {}
impl SealedBuffer for wgpu::Buffer {}
pub trait SealedQueue {}
impl SealedQueue for wgpu::Queue {}
pub trait SealedBindGroupLayoutEntry {}
impl SealedBindGroupLayoutEntry for wgpu::BindGroupLayoutEntry {}
pub trait SealedGame {}
impl<T: crate::Game> SealedGame for T {}
}
pub struct PaddedBufferInitDescriptor<'a> {
pub label: wgpu::Label<'a>,
pub contents: Vec<u8>,
pub usage: wgpu::BufferUsages,
}
pub trait LfDeviceExt: sealed::SealedDevice {
fn create_buffer_padded(&self, desc: wgpu::BufferDescriptor) -> wgpu::Buffer;
fn create_buffer_init_padded(&self, desc: PaddedBufferInitDescriptor) -> wgpu::Buffer;
fn create_fragment_only_render_bundle_encoder(
&self,
desc: &FragmentOnlyRenderBundleEncoderDescriptor,
) -> FragmentOnlyRenderBundleEncoder;
fn create_fragment_only_render_pipeline(
&self,
desc: &FragmentOnlyRenderPipelineDescriptor,
) -> FragmentOnlyRenderPipeline;
unsafe fn create_shader_module_unchecked_on_release(
&self,
desc: wgpu::ShaderModuleDescriptor,
) -> wgpu::ShaderModule;
}
impl LfDeviceExt for wgpu::Device {
fn create_buffer_padded(&self, mut desc: wgpu::BufferDescriptor) -> wgpu::Buffer {
desc.size = next_multiple_of(desc.size, BUFFER_PADDING);
self.create_buffer(&desc)
}
fn create_buffer_init_padded(&self, mut desc: PaddedBufferInitDescriptor) -> wgpu::Buffer {
let new_len = next_multiple_of(desc.contents.len() as wgpu::BufferAddress, BUFFER_PADDING);
desc.contents.resize(new_len as usize, 0u8);
self.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: desc.label,
contents: &desc.contents,
usage: desc.usage,
})
}
fn create_fragment_only_render_bundle_encoder(
&self,
desc: &FragmentOnlyRenderBundleEncoderDescriptor,
) -> FragmentOnlyRenderBundleEncoder {
FragmentOnlyRenderBundleEncoder::new(self, desc)
}
fn create_fragment_only_render_pipeline(
&self,
desc: &FragmentOnlyRenderPipelineDescriptor,
) -> FragmentOnlyRenderPipeline {
FragmentOnlyRenderPipeline::new(self, desc)
}
unsafe fn create_shader_module_unchecked_on_release(
&self,
desc: wgpu::ShaderModuleDescriptor,
) -> wgpu::ShaderModule {
#[cfg(any(target_arch = "wasm", debug_assertions))]
{
self.create_shader_module(desc)
}
#[cfg(not(any(target_arch = "wasm", debug_assertions)))]
{
self.create_shader_module_unchecked(desc)
}
}
}
pub trait LfCommandEncoderExt: sealed::SealedCommandEncoder {
fn begin_fragment_only_render_pass<'pass>(
&'pass mut self,
desc: &FragmentOnlyRenderPassDescriptor<'pass, '_>,
) -> FragmentOnlyRenderPass<'pass>;
}
impl LfCommandEncoderExt for wgpu::CommandEncoder {
fn begin_fragment_only_render_pass<'pass>(
&'pass mut self,
desc: &FragmentOnlyRenderPassDescriptor<'pass, '_>,
) -> FragmentOnlyRenderPass<'pass> {
FragmentOnlyRenderPass::new(self, desc)
}
}
pub trait LfLimitsExt: sealed::SealedLimits {
fn intersection<'a>(&self, other: &wgpu::Limits) -> wgpu::Limits;
fn union<'a>(&self, other: &wgpu::Limits) -> wgpu::Limits;
}
impl LfLimitsExt for wgpu::Limits {
fn intersection<'a>(&self, other: &wgpu::Limits) -> wgpu::Limits {
crate::limits::limits_intersection(self, other)
}
fn union<'a>(&self, other: &wgpu::Limits) -> wgpu::Limits {
crate::limits::limits_union(self, other)
}
}
pub trait LfQueueExt: sealed::SealedQueue {
fn write_buffer_padded(
&self,
buffer: &wgpu::Buffer,
offset: wgpu::BufferAddress,
data: Vec<u8>,
);
}
impl LfQueueExt for wgpu::Queue {
fn write_buffer_padded(
&self,
buffer: &wgpu::Buffer,
offset: wgpu::BufferAddress,
mut data: Vec<u8>,
) {
const PAD_ALIGNMENT: usize = BUFFER_PADDING as usize;
let len = data.len();
let target_size = match len % PAD_ALIGNMENT {
0 => len,
r => len + (PAD_ALIGNMENT - r),
};
data.resize(target_size, 0u8);
self.write_buffer(buffer, offset, &data)
}
}
pub trait LfBufferExt: sealed::SealedBuffer {
fn debug_read_blocking(&self, device: &wgpu::Device, queue: &wgpu::Queue) -> Vec<u8>;
}
impl LfBufferExt for wgpu::Buffer {
fn debug_read_blocking(&self, device: &wgpu::Device, queue: &wgpu::Queue) -> Vec<u8> {
assert!(self.usage().contains(wgpu::BufferUsages::COPY_SRC));
let staging = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("debug-read-staging"),
size: self.size(),
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
let mut cmd = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("debug-read-cmd-encoder"),
});
cmd.copy_buffer_to_buffer(self, 0, &staging, 0, self.size());
queue.submit(vec![cmd.finish()]);
let (sender, receiver) = std::sync::mpsc::channel();
staging.slice(..).map_async(wgpu::MapMode::Read, move |e| {
sender.send(e).expect("failed to send result of map");
});
device.poll(wgpu::Maintain::Wait);
receiver
.recv()
.expect("failed to get result of map")
.expect("failed to read buffer");
let slice = staging.slice(..).get_mapped_range();
slice.to_vec()
}
}
pub trait LfBindGroupLayoutEntryExt: sealed::SealedBindGroupLayoutEntry {
fn read_only_compute_storage(binding: u32) -> Self;
fn mutable_compute_storage(binding: u32) -> Self;
}
impl LfBindGroupLayoutEntryExt for wgpu::BindGroupLayoutEntry {
fn read_only_compute_storage(binding: u32) -> Self {
wgpu::BindGroupLayoutEntry {
binding,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}
}
fn mutable_compute_storage(binding: u32) -> Self {
wgpu::BindGroupLayoutEntry {
binding,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}
}
}
pub trait LfGameExt: sealed::SealedGame {
type InitData;
fn run(init: Self::InitData);
}
impl<T: Game + 'static> LfGameExt for T {
type InitData = T::InitData;
fn run(init: T::InitData) {
game::GameState::<T>::run(init);
}
}