scirs2_core/ndarray_ext/elementwise/functions_6.rs
1//! Auto-generated module
2//!
3//! š¤ Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use crate::numeric::Float;
6use crate::simd_ops::{AutoOptimizer, SimdUnifiedOps};
7use ::ndarray::{Array1, ArrayView1};
8
9/// SIMD-accelerated Chebyshev distance
10///
11/// Computes max(|a - b|)
12///
13/// # Arguments
14/// * `a` - First vector
15/// * `b` - Second vector
16///
17/// # Returns
18/// Chebyshev (Lā) distance between vectors
19///
20/// # Examples
21/// ```
22/// use scirs2_core::ndarray::array;
23/// use scirs2_core::ndarray_ext::elementwise::distance_chebyshev_simd;
24///
25/// let a = array![1.0_f64, 2.0, 3.0];
26/// let b = array![4.0_f64, 0.0, 3.0];
27/// let d = distance_chebyshev_simd::<f64>(&a.view(), &b.view());
28/// // max(|1-4|, |2-0|, |3-3|) = max(3, 2, 0) = 3
29/// assert!((d - 3.0).abs() < 1e-14);
30/// ```
31///
32/// # Use Cases
33/// - Chess king distance
34/// - Maximum deviation
35/// - Robust outlier detection
36/// - Image processing
37pub fn distance_chebyshev_simd<F>(a: &ArrayView1<F>, b: &ArrayView1<F>) -> F
38where
39 F: Float + SimdUnifiedOps,
40{
41 if a.is_empty() || b.is_empty() {
42 return F::zero();
43 }
44 F::simd_distance_chebyshev(a, b)
45}
46/// SIMD-accelerated cosine distance
47///
48/// Computes 1 - cosine_similarity(a, b)
49///
50/// # Arguments
51/// * `a` - First vector
52/// * `b` - Second vector
53///
54/// # Returns
55/// Cosine distance (0 = identical direction, 2 = opposite direction)
56///
57/// # Examples
58/// ```
59/// use scirs2_core::ndarray::array;
60/// use scirs2_core::ndarray_ext::elementwise::distance_cosine_simd;
61///
62/// let a = array![1.0_f64, 0.0, 0.0];
63/// let b = array![0.0_f64, 1.0, 0.0];
64/// let d = distance_cosine_simd::<f64>(&a.view(), &b.view());
65/// // Orthogonal vectors have cosine similarity 0, distance 1
66/// assert!((d - 1.0).abs() < 1e-10);
67/// ```
68///
69/// # Use Cases
70/// - Text similarity (TF-IDF vectors)
71/// - Recommendation systems
72/// - Document clustering
73/// - Image retrieval
74pub fn distance_cosine_simd<F>(a: &ArrayView1<F>, b: &ArrayView1<F>) -> F
75where
76 F: Float + SimdUnifiedOps,
77{
78 if a.is_empty() || b.is_empty() {
79 return F::one();
80 }
81 F::simd_distance_cosine(a, b)
82}
83/// SIMD-accelerated cosine similarity
84///
85/// Computes (a Ā· b) / (||a|| * ||b||)
86///
87/// # Arguments
88/// * `a` - First vector
89/// * `b` - Second vector
90///
91/// # Returns
92/// Cosine similarity (-1 to 1, where 1 = identical direction)
93///
94/// # Examples
95/// ```
96/// use scirs2_core::ndarray::array;
97/// use scirs2_core::ndarray_ext::elementwise::cosine_similarity_simd;
98///
99/// let a = array![1.0_f64, 2.0, 3.0];
100/// let b = array![2.0_f64, 4.0, 6.0];
101/// let sim = cosine_similarity_simd::<f64>(&a.view(), &b.view());
102/// // Parallel vectors have cosine similarity 1
103/// assert!((sim - 1.0).abs() < 1e-10);
104/// ```
105///
106/// # Use Cases
107/// - Word embedding similarity
108/// - Document comparison
109/// - Recommendation scoring
110/// - Semantic search
111pub fn cosine_similarity_simd<F>(a: &ArrayView1<F>, b: &ArrayView1<F>) -> F
112where
113 F: Float + SimdUnifiedOps,
114{
115 if a.is_empty() || b.is_empty() {
116 return F::zero();
117 }
118 F::simd_cosine_similarity(a, b)
119}
120/// SIMD-accelerated element-wise addition
121///
122/// Computes a + b element-wise.
123///
124/// # Arguments
125/// * `a` - First array
126/// * `b` - Second array
127///
128/// # Returns
129/// Element-wise sum
130///
131/// # Examples
132/// ```
133/// use scirs2_core::ndarray::array;
134/// use scirs2_core::ndarray_ext::elementwise::add_simd;
135///
136/// let a = array![1.0_f64, 2.0, 3.0];
137/// let b = array![4.0_f64, 5.0, 6.0];
138/// let c = add_simd::<f64>(&a.view(), &b.view());
139/// assert!((c[0] - 5.0).abs() < 1e-14);
140/// assert!((c[1] - 7.0).abs() < 1e-14);
141/// assert!((c[2] - 9.0).abs() < 1e-14);
142/// ```
143///
144/// # Use Cases
145/// - Vector arithmetic
146/// - Residual connections
147/// - Bias addition
148/// - Signal combination
149pub fn add_simd<F>(a: &ArrayView1<F>, b: &ArrayView1<F>) -> Array1<F>
150where
151 F: Float + SimdUnifiedOps,
152{
153 if a.is_empty() || b.is_empty() {
154 return Array1::zeros(0);
155 }
156 F::simd_add(a, b)
157}
158/// SIMD-accelerated element-wise subtraction
159///
160/// Computes a - b element-wise.
161///
162/// # Arguments
163/// * `a` - First array
164/// * `b` - Second array
165///
166/// # Returns
167/// Element-wise difference
168///
169/// # Examples
170/// ```
171/// use scirs2_core::ndarray::array;
172/// use scirs2_core::ndarray_ext::elementwise::sub_simd;
173///
174/// let a = array![5.0_f64, 7.0, 9.0];
175/// let b = array![1.0_f64, 2.0, 3.0];
176/// let c = sub_simd::<f64>(&a.view(), &b.view());
177/// assert!((c[0] - 4.0).abs() < 1e-14);
178/// assert!((c[1] - 5.0).abs() < 1e-14);
179/// assert!((c[2] - 6.0).abs() < 1e-14);
180/// ```
181///
182/// # Use Cases
183/// - Gradient computation
184/// - Error calculation
185/// - Differencing signals
186/// - Relative positioning
187pub fn sub_simd<F>(a: &ArrayView1<F>, b: &ArrayView1<F>) -> Array1<F>
188where
189 F: Float + SimdUnifiedOps,
190{
191 if a.is_empty() || b.is_empty() {
192 return Array1::zeros(0);
193 }
194 F::simd_sub(a, b)
195}
196/// SIMD-accelerated element-wise multiplication
197///
198/// Computes a * b element-wise (Hadamard product).
199///
200/// # Arguments
201/// * `a` - First array
202/// * `b` - Second array
203///
204/// # Returns
205/// Element-wise product
206///
207/// # Examples
208/// ```
209/// use scirs2_core::ndarray::array;
210/// use scirs2_core::ndarray_ext::elementwise::mul_simd;
211///
212/// let a = array![2.0_f64, 3.0, 4.0];
213/// let b = array![5.0_f64, 6.0, 7.0];
214/// let c = mul_simd::<f64>(&a.view(), &b.view());
215/// assert!((c[0] - 10.0).abs() < 1e-14);
216/// assert!((c[1] - 18.0).abs() < 1e-14);
217/// assert!((c[2] - 28.0).abs() < 1e-14);
218/// ```
219///
220/// # Use Cases
221/// - Attention weights application
222/// - Feature gating
223/// - Gradient scaling
224/// - Signal modulation
225pub fn mul_simd<F>(a: &ArrayView1<F>, b: &ArrayView1<F>) -> Array1<F>
226where
227 F: Float + SimdUnifiedOps,
228{
229 if a.is_empty() || b.is_empty() {
230 return Array1::zeros(0);
231 }
232 F::simd_mul(a, b)
233}
234/// SIMD-accelerated element-wise division
235///
236/// Computes a / b element-wise.
237///
238/// # Arguments
239/// * `a` - Numerator array
240/// * `b` - Denominator array
241///
242/// # Returns
243/// Element-wise quotient
244///
245/// # Examples
246/// ```
247/// use scirs2_core::ndarray::array;
248/// use scirs2_core::ndarray_ext::elementwise::div_simd;
249///
250/// let a = array![10.0_f64, 20.0, 30.0];
251/// let b = array![2.0_f64, 4.0, 5.0];
252/// let c = div_simd::<f64>(&a.view(), &b.view());
253/// assert!((c[0] - 5.0).abs() < 1e-14);
254/// assert!((c[1] - 5.0).abs() < 1e-14);
255/// assert!((c[2] - 6.0).abs() < 1e-14);
256/// ```
257///
258/// # Use Cases
259/// - Normalization
260/// - Ratio computation
261/// - Scaling by variable factors
262/// - Probability normalization
263pub fn div_simd<F>(a: &ArrayView1<F>, b: &ArrayView1<F>) -> Array1<F>
264where
265 F: Float + SimdUnifiedOps,
266{
267 if a.is_empty() || b.is_empty() {
268 return Array1::zeros(0);
269 }
270 F::simd_div(a, b)
271}
272/// SIMD-accelerated element-wise maximum
273///
274/// Computes max(a, b) element-wise.
275///
276/// # Arguments
277/// * `a` - First array
278/// * `b` - Second array
279///
280/// # Returns
281/// Element-wise maximum
282///
283/// # Examples
284/// ```ignore
285/// use scirs2_core::ndarray::array;
286/// use scirs2_core::ndarray_ext::elementwise::max_simd;
287///
288/// let a = array![1.0_f64, 5.0, 3.0];
289/// let b = array![4.0_f64, 2.0, 6.0];
290/// let c = max_simd::<f64>(&a.view(), &b.view());
291/// assert!((c[0] - 4.0_f64).abs() < 1e-14);
292/// assert!((c[1] - 5.0_f64).abs() < 1e-14);
293/// assert!((c[2] - 6.0_f64).abs() < 1e-14);
294/// ```
295///
296/// # Use Cases
297/// - ReLU activation: max(0, x)
298/// - Soft clipping
299/// - Envelope detection
300/// - Upper bound enforcement
301pub fn max_simd<F>(a: &ArrayView1<F>, b: &ArrayView1<F>) -> Array1<F>
302where
303 F: Float + SimdUnifiedOps,
304{
305 if a.is_empty() || b.is_empty() {
306 return Array1::zeros(0);
307 }
308 F::simd_max(a, b)
309}
310/// SIMD-accelerated element-wise minimum
311///
312/// Computes min(a, b) element-wise.
313///
314/// # Arguments
315/// * `a` - First array
316/// * `b` - Second array
317///
318/// # Returns
319/// Element-wise minimum
320///
321/// # Examples
322/// ```ignore
323/// use scirs2_core::ndarray::array;
324/// use scirs2_core::ndarray_ext::elementwise::min_simd;
325///
326/// let a = array![1.0_f64, 5.0, 3.0];
327/// let b = array![4.0_f64, 2.0, 6.0];
328/// let c = min_simd::<f64>(&a.view(), &b.view());
329/// assert!((c[0] - 1.0_f64).abs() < 1e-14);
330/// assert!((c[1] - 2.0_f64).abs() < 1e-14);
331/// assert!((c[2] - 3.0_f64).abs() < 1e-14);
332/// ```
333///
334/// # Use Cases
335/// - Gradient clipping
336/// - Soft clipping
337/// - Lower bound enforcement
338/// - Minimum filter in signal processing
339pub fn min_simd<F>(a: &ArrayView1<F>, b: &ArrayView1<F>) -> Array1<F>
340where
341 F: Float + SimdUnifiedOps,
342{
343 if a.is_empty() || b.is_empty() {
344 return Array1::zeros(0);
345 }
346 F::simd_min(a, b)
347}
348/// SIMD-accelerated scalar multiplication
349///
350/// Computes a * scalar for all elements.
351///
352/// # Arguments
353/// * `a` - Input array
354/// * `scalar` - Scalar multiplier
355///
356/// # Returns
357/// Scaled array
358///
359/// # Examples
360/// ```
361/// use scirs2_core::ndarray::array;
362/// use scirs2_core::ndarray_ext::elementwise::scalar_mul_simd;
363///
364/// let a = array![1.0_f64, 2.0, 3.0];
365/// let c = scalar_mul_simd::<f64>(&a.view(), 2.5);
366/// assert!((c[0] - 2.5).abs() < 1e-14);
367/// assert!((c[1] - 5.0).abs() < 1e-14);
368/// assert!((c[2] - 7.5).abs() < 1e-14);
369/// ```
370///
371/// # Use Cases
372/// - Learning rate scaling
373/// - Normalization
374/// - Unit conversion
375/// - Signal amplification
376pub fn scalar_mul_simd<F>(a: &ArrayView1<F>, scalar: F) -> Array1<F>
377where
378 F: Float + SimdUnifiedOps,
379{
380 if a.is_empty() {
381 return Array1::zeros(0);
382 }
383 F::simd_scalar_mul(a, scalar)
384}
385/// SIMD-accelerated fused multiply-add
386///
387/// Computes a * b + c element-wise in a single operation.
388/// More accurate and efficient than separate multiply and add.
389///
390/// # Arguments
391/// * `a` - First multiplicand
392/// * `b` - Second multiplicand
393/// * `c` - Addend
394///
395/// # Returns
396/// Element-wise fused multiply-add result
397///
398/// # Examples
399/// ```ignore
400/// use scirs2_core::ndarray::array;
401/// use scirs2_core::ndarray_ext::elementwise::fma_simd;
402///
403/// let a = array![1.0_f64, 2.0, 3.0];
404/// let b = array![4.0_f64, 5.0, 6.0];
405/// let c = array![7.0_f64, 8.0, 9.0];
406/// let result = fma_simd::<f64>(&a.view(), &b.view(), &c.view());
407/// // 1*4+7=11, 2*5+8=18, 3*6+9=27
408/// assert!((result[0] - 11.0_f64).abs() < 1e-14);
409/// assert!((result[1] - 18.0_f64).abs() < 1e-14);
410/// assert!((result[2] - 27.0_f64).abs() < 1e-14);
411/// ```
412///
413/// # Use Cases
414/// - Matrix multiplication inner loop
415/// - Polynomial evaluation (Horner's method)
416/// - Linear combinations
417/// - Neural network forward pass
418pub fn fma_simd<F>(a: &ArrayView1<F>, b: &ArrayView1<F>, c: &ArrayView1<F>) -> Array1<F>
419where
420 F: Float + SimdUnifiedOps,
421{
422 if a.is_empty() || b.is_empty() || c.is_empty() {
423 return Array1::zeros(0);
424 }
425 F::simd_fma(a, b, c)
426}
427/// SIMD-accelerated dot product
428///
429/// Computes sum(a * b).
430///
431/// # Arguments
432/// * `a` - First vector
433/// * `b` - Second vector
434///
435/// # Returns
436/// Dot product (scalar)
437///
438/// # Examples
439/// ```
440/// use scirs2_core::ndarray::array;
441/// use scirs2_core::ndarray_ext::elementwise::dot_simd;
442///
443/// let a = array![1.0_f64, 2.0, 3.0];
444/// let b = array![4.0_f64, 5.0, 6.0];
445/// let d = dot_simd::<f64>(&a.view(), &b.view());
446/// // 1*4 + 2*5 + 3*6 = 4 + 10 + 18 = 32
447/// assert!((d - 32.0).abs() < 1e-14);
448/// ```
449///
450/// # Use Cases
451/// - Projection calculations
452/// - Cosine similarity numerator
453/// - Attention scores
454/// - Linear layer computation
455pub fn dot_simd<F>(a: &ArrayView1<F>, b: &ArrayView1<F>) -> F
456where
457 F: Float + SimdUnifiedOps,
458{
459 if a.is_empty() || b.is_empty() {
460 return F::zero();
461 }
462 F::simd_dot(a, b)
463}
464/// SIMD-accelerated ReLU (Rectified Linear Unit)
465///
466/// Computes max(0, x) element-wise.
467///
468/// # Arguments
469/// * `a` - Input array
470///
471/// # Returns
472/// ReLU-activated array
473///
474/// # Examples
475/// ```
476/// use scirs2_core::ndarray::array;
477/// use scirs2_core::ndarray_ext::elementwise::relu_simd;
478///
479/// let x = array![-2.0_f64, -1.0, 0.0, 1.0, 2.0];
480/// let result = relu_simd::<f64>(&x.view());
481/// assert!((result[0] - 0.0).abs() < 1e-14);
482/// assert!((result[1] - 0.0).abs() < 1e-14);
483/// assert!((result[2] - 0.0).abs() < 1e-14);
484/// assert!((result[3] - 1.0).abs() < 1e-14);
485/// assert!((result[4] - 2.0).abs() < 1e-14);
486/// ```
487///
488/// # Use Cases
489/// - Neural network activation
490/// - Sparse representations
491/// - Thresholding signals
492/// - Feature rectification
493pub fn relu_simd<F>(a: &ArrayView1<F>) -> Array1<F>
494where
495 F: Float + SimdUnifiedOps,
496{
497 if a.is_empty() {
498 return Array1::zeros(0);
499 }
500 F::simd_relu(a)
501}
502/// SIMD-accelerated L2 normalization
503///
504/// Normalizes the vector to unit length: x / ||x||ā
505///
506/// # Arguments
507/// * `a` - Input array
508///
509/// # Returns
510/// Unit-normalized array (or zero if input is zero)
511///
512/// # Examples
513/// ```
514/// use scirs2_core::ndarray::array;
515/// use scirs2_core::ndarray_ext::elementwise::normalize_simd;
516///
517/// let x = array![3.0_f64, 4.0];
518/// let result = normalize_simd::<f64>(&x.view());
519/// // ||result|| = 1
520/// let norm = (result[0]*result[0] + result[1]*result[1]).sqrt();
521/// assert!((norm - 1.0).abs() < 1e-10);
522/// // x/5 = [0.6, 0.8]
523/// assert!((result[0] - 0.6).abs() < 1e-10);
524/// assert!((result[1] - 0.8).abs() < 1e-10);
525/// ```
526///
527/// # Use Cases
528/// - Unit vector computation
529/// - Cosine similarity preparation
530/// - Gradient normalization
531/// - Direction extraction
532pub fn normalize_simd<F>(a: &ArrayView1<F>) -> Array1<F>
533where
534 F: Float + SimdUnifiedOps,
535{
536 if a.is_empty() {
537 return Array1::zeros(0);
538 }
539 F::simd_normalize(a)
540}
541/// SIMD-accelerated standardization (z-score normalization)
542///
543/// Transforms to zero mean and unit variance: (x - mean) / std
544///
545/// # Arguments
546/// * `a` - Input array
547///
548/// # Returns
549/// Standardized array
550///
551/// # Examples
552/// ```
553/// use scirs2_core::ndarray::array;
554/// use scirs2_core::ndarray_ext::elementwise::standardize_simd;
555///
556/// let x = array![2.0_f64, 4.0, 4.0, 4.0, 5.0, 5.0, 7.0, 9.0];
557/// let result = standardize_simd::<f64>(&x.view());
558/// // Mean should be ~0, std should be ~1
559/// let mean: f64 = result.iter().sum::<f64>() / result.len() as f64;
560/// assert!(mean.abs() < 1e-10);
561/// ```
562///
563/// # Use Cases
564/// - Feature scaling for ML
565/// - Batch normalization
566/// - Statistical preprocessing
567/// - Anomaly scoring
568pub fn standardize_simd<F>(a: &ArrayView1<F>) -> Array1<F>
569where
570 F: Float + SimdUnifiedOps,
571{
572 if a.is_empty() {
573 return Array1::zeros(0);
574 }
575 F::simd_standardize(a)
576}
577/// SIMD-accelerated softmax
578///
579/// Computes exp(x - max(x)) / sum(exp(x - max(x))) for numerical stability.
580/// Output probabilities sum to 1.
581///
582/// # Arguments
583/// * `a` - Input array (logits)
584///
585/// # Returns
586/// Probability distribution (sums to 1)
587///
588/// # Examples
589/// ```
590/// use scirs2_core::ndarray::array;
591/// use scirs2_core::ndarray_ext::elementwise::softmax_simd;
592///
593/// let x = array![1.0_f64, 2.0, 3.0];
594/// let result = softmax_simd::<f64>(&x.view());
595/// // Probabilities should sum to 1
596/// let sum: f64 = result.iter().sum();
597/// assert!((sum - 1.0).abs() < 1e-10);
598/// // Higher logits should have higher probabilities
599/// assert!(result[2] > result[1]);
600/// assert!(result[1] > result[0]);
601/// ```
602///
603/// # Use Cases
604/// - Classification output layer
605/// - Attention weights
606/// - Policy probabilities in RL
607/// - Mixture model weights
608pub fn softmax_simd<F>(a: &ArrayView1<F>) -> Array1<F>
609where
610 F: Float + SimdUnifiedOps,
611{
612 if a.is_empty() {
613 return Array1::zeros(0);
614 }
615 F::simd_softmax(a)
616}
617/// SIMD-accelerated truncation (round towards zero)
618///
619/// Removes the fractional part, rounding towards zero.
620///
621/// # Arguments
622/// * `a` - Input array
623///
624/// # Returns
625/// Truncated array
626///
627/// # Examples
628/// ```
629/// use scirs2_core::ndarray::array;
630/// use scirs2_core::ndarray_ext::elementwise::trunc_simd;
631///
632/// let x = array![2.7_f64, -2.7, 0.9, -0.9];
633/// let result = trunc_simd::<f64>(&x.view());
634/// assert!((result[0] - 2.0).abs() < 1e-14);
635/// assert!((result[1] - (-2.0)).abs() < 1e-14);
636/// assert!((result[2] - 0.0).abs() < 1e-14);
637/// assert!((result[3] - 0.0).abs() < 1e-14);
638/// ```
639///
640/// # Use Cases
641/// - Integer conversion
642/// - Discretization
643/// - Quantization
644/// - Index calculation
645pub fn trunc_simd<F>(a: &ArrayView1<F>) -> Array1<F>
646where
647 F: Float + SimdUnifiedOps,
648{
649 if a.is_empty() {
650 return Array1::zeros(0);
651 }
652 F::simd_trunc(a)
653}