pub struct SimdOps { /* private fields */ }Expand description
SIMD-optimized operations for pipeline processing
Implementations§
Source§impl SimdOps
impl SimdOps
Sourcepub fn new(config: SimdConfig) -> Self
pub fn new(config: SimdConfig) -> Self
Create new SIMD operations instance
Sourcepub fn add_arrays(
&self,
a: &ArrayView1<'_, Float>,
b: &ArrayView1<'_, Float>,
) -> SklResult<Array1<Float>>
pub fn add_arrays( &self, a: &ArrayView1<'_, Float>, b: &ArrayView1<'_, Float>, ) -> SklResult<Array1<Float>>
Vectorized addition of two arrays
Sourcepub fn matrix_multiply(
&self,
a: &ArrayView2<'_, Float>,
b: &ArrayView2<'_, Float>,
) -> SklResult<Array2<Float>>
pub fn matrix_multiply( &self, a: &ArrayView2<'_, Float>, b: &ArrayView2<'_, Float>, ) -> SklResult<Array2<Float>>
Vectorized matrix multiplication
Sourcepub fn dot_product(
&self,
a: &ArrayView1<'_, Float>,
b: &ArrayView1<'_, Float>,
) -> SklResult<Float>
pub fn dot_product( &self, a: &ArrayView1<'_, Float>, b: &ArrayView1<'_, Float>, ) -> SklResult<Float>
Vectorized dot product
Sourcepub fn elementwise_op<F>(
&self,
a: &ArrayView1<'_, Float>,
op: F,
) -> SklResult<Array1<Float>>
pub fn elementwise_op<F>( &self, a: &ArrayView1<'_, Float>, op: F, ) -> SklResult<Array1<Float>>
Vectorized element-wise operations
Sourcepub fn normalize_l2(
&self,
a: &ArrayView1<'_, Float>,
) -> SklResult<Array1<Float>>
pub fn normalize_l2( &self, a: &ArrayView1<'_, Float>, ) -> SklResult<Array1<Float>>
Vectorized normalization (L2 norm)
Sourcepub fn scale(
&self,
a: &ArrayView1<'_, Float>,
scale_factor: Float,
) -> SklResult<Array1<Float>>
pub fn scale( &self, a: &ArrayView1<'_, Float>, scale_factor: Float, ) -> SklResult<Array1<Float>>
Vectorized scaling
Sourcepub fn aligned_array_1d(&self, size: usize) -> Array1<Float>
pub fn aligned_array_1d(&self, size: usize) -> Array1<Float>
Memory-aligned array allocation for optimal SIMD performance
Sourcepub fn aligned_array_2d(&self, rows: usize, cols: usize) -> Array2<Float>
pub fn aligned_array_2d(&self, rows: usize, cols: usize) -> Array2<Float>
Memory-aligned matrix allocation
Sourcepub fn is_aligned(&self, ptr: *const Float) -> bool
pub fn is_aligned(&self, ptr: *const Float) -> bool
Check if arrays are properly aligned for SIMD operations
Sourcepub fn optimal_chunk_size(&self, total_size: usize) -> usize
pub fn optimal_chunk_size(&self, total_size: usize) -> usize
Get optimal chunk size for parallel SIMD operations
Sourcepub fn standardize_features(
&self,
data: &ArrayView2<'_, Float>,
) -> SklResult<Array2<Float>>
pub fn standardize_features( &self, data: &ArrayView2<'_, Float>, ) -> SklResult<Array2<Float>>
Vectorized feature standardization (z-score normalization)
Sourcepub fn min_max_scale(
&self,
data: &ArrayView2<'_, Float>,
) -> SklResult<Array2<Float>>
pub fn min_max_scale( &self, data: &ArrayView2<'_, Float>, ) -> SklResult<Array2<Float>>
Vectorized min-max scaling
Sourcepub fn polynomial_features(
&self,
data: &ArrayView2<'_, Float>,
degree: usize,
) -> SklResult<Array2<Float>>
pub fn polynomial_features( &self, data: &ArrayView2<'_, Float>, degree: usize, ) -> SklResult<Array2<Float>>
Generate polynomial features up to specified degree
Sourcepub fn vectorized_sum(&self, data: &ArrayView1<'_, Float>) -> SklResult<Float>
pub fn vectorized_sum(&self, data: &ArrayView1<'_, Float>) -> SklResult<Float>
Vectorized sum with SIMD optimization
Sourcepub unsafe fn fast_aligned_copy(
&self,
src: *const Float,
dst: *mut Float,
count: usize,
) -> SklResult<()>
pub unsafe fn fast_aligned_copy( &self, src: *const Float, dst: *mut Float, count: usize, ) -> SklResult<()>
Ultra-fast unsafe memory copy optimized for aligned data
§Safety
This function is unsafe because:
- It assumes src and dst pointers are valid for
countelements - It assumes proper memory alignment for SIMD operations
- It performs unchecked pointer arithmetic
Sourcepub unsafe fn fast_small_matrix_mul(
&self,
a: *const Float,
b: *const Float,
c: *mut Float,
m: usize,
n: usize,
k: usize,
) -> SklResult<()>
pub unsafe fn fast_small_matrix_mul( &self, a: *const Float, b: *const Float, c: *mut Float, m: usize, n: usize, k: usize, ) -> SklResult<()>
Unsafe unrolled matrix multiplication for small fixed-size matrices
§Safety
This function is unsafe because:
- It assumes matrix pointers are valid and properly aligned
- It performs manual loop unrolling with unchecked indexing
- It assumes matrices are in row-major order
Sourcepub unsafe fn cache_oblivious_transpose(
&self,
src: *const Float,
dst: *mut Float,
src_rows: usize,
src_cols: usize,
dst_cols: usize,
row_start: usize,
col_start: usize,
block_rows: usize,
block_cols: usize,
) -> SklResult<()>
pub unsafe fn cache_oblivious_transpose( &self, src: *const Float, dst: *mut Float, src_rows: usize, src_cols: usize, dst_cols: usize, row_start: usize, col_start: usize, block_rows: usize, block_cols: usize, ) -> SklResult<()>
Unsafe cache-oblivious matrix transpose for better cache performance
§Safety
This function is unsafe because:
- It performs unchecked pointer arithmetic
- It assumes valid memory layouts for source and destination
- It uses recursive divide-and-conquer with raw pointers
Sourcepub unsafe fn fast_vectorized_sum(
&self,
data: *const Float,
len: usize,
) -> SklResult<Float>
pub unsafe fn fast_vectorized_sum( &self, data: *const Float, len: usize, ) -> SklResult<Float>
Unsafe vectorized sum with manual unrolling and FMA
§Safety
This function is unsafe because:
- It assumes proper memory alignment and validity
- It performs unchecked SIMD operations
- It uses manual loop unrolling for maximum performance
Auto Trait Implementations§
impl Freeze for SimdOps
impl RefUnwindSafe for SimdOps
impl Send for SimdOps
impl Sync for SimdOps
impl Unpin for SimdOps
impl UnwindSafe for SimdOps
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more