sklears_multioutput/regularization/
simd_ops.rs1use scirs2_core::ndarray::{Array1, Array2};
10
11pub fn simd_l2_norm(data: &[f64]) -> f64 {
13 let mut sum_of_squares = 0.0;
14 for &val in data {
15 sum_of_squares += val * val;
16 }
17 sum_of_squares.sqrt()
18}
19
20pub fn simd_l1_norm(data: &[f64]) -> f64 {
22 let mut sum_abs = 0.0;
23 for &val in data {
24 sum_abs += val.abs();
25 }
26 sum_abs
27}
28
29pub fn simd_soft_threshold(input: &[f64], threshold: f64, output: &mut [f64]) {
31 for i in 0..input.len() {
32 let val = input[i];
33 output[i] = if val > threshold {
34 val - threshold
35 } else if val < -threshold {
36 val + threshold
37 } else {
38 0.0
39 };
40 }
41}
42
43pub fn simd_group_norm(coefficients: &Array2<f64>, groups: &[Vec<usize>]) -> Vec<f64> {
46 let mut group_norms = Vec::with_capacity(groups.len());
47
48 for group in groups {
49 let mut group_sum = 0.0;
50
51 for &feature_idx in group {
52 if feature_idx < coefficients.nrows() {
53 let feature_coefs = coefficients.row(feature_idx);
54 let coef_slice = feature_coefs.as_slice().unwrap();
55 let norm_squared = simd_dot_product(coef_slice, coef_slice);
56 group_sum += norm_squared;
57 }
58 }
59
60 group_norms.push(group_sum.sqrt());
61 }
62
63 group_norms
64}
65
66pub fn simd_dot_product(a: &[f64], b: &[f64]) -> f64 {
68 let min_len = a.len().min(b.len());
69 let mut dot_product = 0.0;
70
71 for i in 0..min_len {
72 dot_product += a[i] * b[i];
73 }
74
75 dot_product
76}
77
78pub fn simd_residuals(predictions: &[f64], targets: &[f64], residuals: &mut [f64]) {
80 let min_len = predictions.len().min(targets.len()).min(residuals.len());
81
82 for i in 0..min_len {
83 residuals[i] = predictions[i] - targets[i];
84 }
85}
86
87pub fn simd_coefficient_update(coefficients: &mut [f64], gradients: &[f64], learning_rate: f64) {
89 let min_len = coefficients.len().min(gradients.len());
90
91 for i in 0..min_len {
92 coefficients[i] -= learning_rate * gradients[i];
93 }
94}
95
96pub fn simd_nuclear_norm_penalty(matrix: &Array2<f64>, lambda: f64) -> f64 {
99 let trace = simd_trace(matrix);
101 let frobenius = simd_frobenius_norm(matrix);
102
103 lambda * (trace * frobenius).sqrt()
105}
106
107pub fn simd_trace(matrix: &Array2<f64>) -> f64 {
109 let mut trace = 0.0;
110 let min_dim = matrix.nrows().min(matrix.ncols());
111
112 for i in 0..min_dim {
113 trace += matrix[(i, i)];
114 }
115
116 trace
117}
118
119pub fn simd_frobenius_norm(matrix: &Array2<f64>) -> f64 {
122 let mut sum_squares = 0.0;
123
124 for row in matrix.rows() {
125 let row_slice = row.as_slice().unwrap();
126 sum_squares += simd_dot_product(row_slice, row_slice);
127 }
128
129 sum_squares.sqrt()
130}
131
132pub fn simd_task_similarity(task1_coefs: &Array1<f64>, task2_coefs: &Array1<f64>) -> f64 {
135 let coef1_slice = task1_coefs.as_slice().unwrap();
136 let coef2_slice = task2_coefs.as_slice().unwrap();
137
138 let dot_product = simd_dot_product(coef1_slice, coef2_slice);
139 let norm1 = simd_l2_norm(coef1_slice);
140 let norm2 = simd_l2_norm(coef2_slice);
141
142 if norm1 == 0.0 || norm2 == 0.0 {
143 0.0
144 } else {
145 dot_product / (norm1 * norm2)
146 }
147}
148
149pub fn simd_max_change(old_coefs: &[f64], new_coefs: &[f64]) -> f64 {
151 let min_len = old_coefs.len().min(new_coefs.len());
152 let mut max_change: f64 = 0.0;
153
154 for i in 0..min_len {
155 let change = (new_coefs[i] - old_coefs[i]).abs();
156 max_change = max_change.max(change);
157 }
158
159 max_change
160}