Backend

Trait Backend 

Source
pub trait Backend: Send + Sync {
    // Required methods
    fn dot_product(&self, a: &[f32], b: &[f32]) -> f32;
    fn sparse_matmul(
        &self,
        matrix: &Array2<f32>,
        input: &[f32],
        rows: &[usize],
    ) -> Vec<f32>;
    fn sparse_matmul_accumulate(
        &self,
        matrix: &Array2<f32>,
        input: &[f32],
        cols: &[usize],
        output: &mut [f32],
    );
    fn activation(&self, data: &mut [f32], activation_type: ActivationType);
    fn add(&self, a: &mut [f32], b: &[f32]);
    fn axpy(&self, a: &mut [f32], b: &[f32], scalar: f32);
    fn name(&self) -> &'static str;
    fn simd_width(&self) -> usize;
}
Expand description

Backend trait for SIMD/vectorized operations

Required Methods§

Source

fn dot_product(&self, a: &[f32], b: &[f32]) -> f32

Dot product of two vectors

Source

fn sparse_matmul( &self, matrix: &Array2<f32>, input: &[f32], rows: &[usize], ) -> Vec<f32>

Sparse matrix-vector multiplication Only computes rows specified in rows

Source

fn sparse_matmul_accumulate( &self, matrix: &Array2<f32>, input: &[f32], cols: &[usize], output: &mut [f32], )

Sparse matrix-vector multiplication with column-major accumulation

Source

fn activation(&self, data: &mut [f32], activation_type: ActivationType)

Apply activation function in-place

Source

fn add(&self, a: &mut [f32], b: &[f32])

Vectorized addition

Source

fn axpy(&self, a: &mut [f32], b: &[f32], scalar: f32)

Vectorized multiply-add: a[i] += b[i] * scalar

Source

fn name(&self) -> &'static str

Backend name for debugging

Source

fn simd_width(&self) -> usize

SIMD width (number of f32s per vector register)

Implementors§