pub trait Backend: Send + Sync {
// Required methods
fn dot_product(&self, a: &[f32], b: &[f32]) -> f32;
fn sparse_matmul(
&self,
matrix: &Array2<f32>,
input: &[f32],
rows: &[usize],
) -> Vec<f32>;
fn sparse_matmul_accumulate(
&self,
matrix: &Array2<f32>,
input: &[f32],
cols: &[usize],
output: &mut [f32],
);
fn activation(&self, data: &mut [f32], activation_type: ActivationType);
fn add(&self, a: &mut [f32], b: &[f32]);
fn axpy(&self, a: &mut [f32], b: &[f32], scalar: f32);
fn name(&self) -> &'static str;
fn simd_width(&self) -> usize;
}Expand description
Backend trait for SIMD/vectorized operations
Required Methods§
Sourcefn dot_product(&self, a: &[f32], b: &[f32]) -> f32
fn dot_product(&self, a: &[f32], b: &[f32]) -> f32
Dot product of two vectors
Sourcefn sparse_matmul(
&self,
matrix: &Array2<f32>,
input: &[f32],
rows: &[usize],
) -> Vec<f32>
fn sparse_matmul( &self, matrix: &Array2<f32>, input: &[f32], rows: &[usize], ) -> Vec<f32>
Sparse matrix-vector multiplication
Only computes rows specified in rows
Sourcefn sparse_matmul_accumulate(
&self,
matrix: &Array2<f32>,
input: &[f32],
cols: &[usize],
output: &mut [f32],
)
fn sparse_matmul_accumulate( &self, matrix: &Array2<f32>, input: &[f32], cols: &[usize], output: &mut [f32], )
Sparse matrix-vector multiplication with column-major accumulation
Sourcefn activation(&self, data: &mut [f32], activation_type: ActivationType)
fn activation(&self, data: &mut [f32], activation_type: ActivationType)
Apply activation function in-place
Sourcefn axpy(&self, a: &mut [f32], b: &[f32], scalar: f32)
fn axpy(&self, a: &mut [f32], b: &[f32], scalar: f32)
Vectorized multiply-add: a[i] += b[i] * scalar
Sourcefn simd_width(&self) -> usize
fn simd_width(&self) -> usize
SIMD width (number of f32s per vector register)