pub struct WasmBackend { /* private fields */ }Expand description
WebGPU compute backend for WASM (browser) targets.
Wraps WebGpuBackend and adds browser-specific initialisation paths.
Implements ComputeBackend by delegating all compute operations to the
inner WebGpuBackend, which already supports WASM via wgpu’s web-sys
backend.
§Notes
Synchronous ComputeBackend trait methods use pollster::block_on to
bridge async wgpu calls. In production browser deployments, prefer using
the async initialisation helpers directly and scheduling GPU work on web
workers where blocking is acceptable.
Implementations§
Source§impl WasmBackend
impl WasmBackend
Sourcepub async fn init_from_canvas(_canvas_id: &str) -> Result<Self, WebGpuError>
pub async fn init_from_canvas(_canvas_id: &str) -> Result<Self, WebGpuError>
Initialise the backend from an HTML canvas element by ID.
This is the recommended browser entry point. The canvas is not used for rendering but is required by some WebGPU implementations to obtain a valid adapter.
§Errors
Returns an error if no WebGPU adapter is available or device creation fails.
Trait Implementations§
Source§impl ComputeBackend for WasmBackend
impl ComputeBackend for WasmBackend
Source§fn init(&mut self) -> BackendResult<()>
fn init(&mut self) -> BackendResult<()>
Source§fn is_initialized(&self) -> bool
fn is_initialized(&self) -> bool
true if the backend is ready for operations.Source§fn gemm(
&self,
trans_a: BackendTranspose,
trans_b: BackendTranspose,
m: usize,
n: usize,
k: usize,
alpha: f64,
a_ptr: u64,
lda: usize,
b_ptr: u64,
ldb: usize,
beta: f64,
c_ptr: u64,
ldc: usize,
) -> BackendResult<()>
fn gemm( &self, trans_a: BackendTranspose, trans_b: BackendTranspose, m: usize, n: usize, k: usize, alpha: f64, a_ptr: u64, lda: usize, b_ptr: u64, ldb: usize, beta: f64, c_ptr: u64, ldc: usize, ) -> BackendResult<()>
C = alpha * op(A) * op(B) + beta * C. Read moreSource§fn conv2d_forward(
&self,
input_ptr: u64,
input_shape: &[usize],
filter_ptr: u64,
filter_shape: &[usize],
output_ptr: u64,
output_shape: &[usize],
stride: &[usize],
padding: &[usize],
) -> BackendResult<()>
fn conv2d_forward( &self, input_ptr: u64, input_shape: &[usize], filter_ptr: u64, filter_shape: &[usize], output_ptr: u64, output_shape: &[usize], stride: &[usize], padding: &[usize], ) -> BackendResult<()>
Source§fn attention(
&self,
q_ptr: u64,
k_ptr: u64,
v_ptr: u64,
o_ptr: u64,
batch: usize,
heads: usize,
seq_q: usize,
seq_kv: usize,
head_dim: usize,
scale: f64,
causal: bool,
) -> BackendResult<()>
fn attention( &self, q_ptr: u64, k_ptr: u64, v_ptr: u64, o_ptr: u64, batch: usize, heads: usize, seq_q: usize, seq_kv: usize, head_dim: usize, scale: f64, causal: bool, ) -> BackendResult<()>
Source§fn reduce(
&self,
op: ReduceOp,
input_ptr: u64,
output_ptr: u64,
shape: &[usize],
axis: usize,
) -> BackendResult<()>
fn reduce( &self, op: ReduceOp, input_ptr: u64, output_ptr: u64, shape: &[usize], axis: usize, ) -> BackendResult<()>
Source§fn unary(
&self,
op: UnaryOp,
input_ptr: u64,
output_ptr: u64,
n: usize,
) -> BackendResult<()>
fn unary( &self, op: UnaryOp, input_ptr: u64, output_ptr: u64, n: usize, ) -> BackendResult<()>
Source§fn binary(
&self,
op: BinaryOp,
a_ptr: u64,
b_ptr: u64,
output_ptr: u64,
n: usize,
) -> BackendResult<()>
fn binary( &self, op: BinaryOp, a_ptr: u64, b_ptr: u64, output_ptr: u64, n: usize, ) -> BackendResult<()>
Source§fn synchronize(&self) -> BackendResult<()>
fn synchronize(&self) -> BackendResult<()>
Source§fn free(&self, ptr: u64) -> BackendResult<()>
fn free(&self, ptr: u64) -> BackendResult<()>
alloc.Source§fn copy_htod(&self, dst: u64, src: &[u8]) -> BackendResult<()>
fn copy_htod(&self, dst: u64, src: &[u8]) -> BackendResult<()>
Source§fn copy_dtoh(&self, dst: &mut [u8], src: u64) -> BackendResult<()>
fn copy_dtoh(&self, dst: &mut [u8], src: u64) -> BackendResult<()>
Source§fn batched_gemm(
&self,
trans_a: BackendTranspose,
trans_b: BackendTranspose,
m: usize,
n: usize,
k: usize,
alpha: f64,
a_ptr: u64,
lda: usize,
stride_a: usize,
b_ptr: u64,
ldb: usize,
stride_b: usize,
beta: f64,
c_ptr: u64,
ldc: usize,
stride_c: usize,
batch_count: usize,
) -> Result<(), BackendError>
fn batched_gemm( &self, trans_a: BackendTranspose, trans_b: BackendTranspose, m: usize, n: usize, k: usize, alpha: f64, a_ptr: u64, lda: usize, stride_a: usize, b_ptr: u64, ldb: usize, stride_b: usize, beta: f64, c_ptr: u64, ldc: usize, stride_c: usize, batch_count: usize, ) -> Result<(), BackendError>
b in 0..batch_count,
compute C_b = alpha * op(A_b) * op(B_b) + beta * C_b
where A_b starts at a_ptr + b * stride_a * 4 bytes (f32 elements), etc. Read more