1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
//! Arithmetic operations with SIMD acceleration
//!
//! This module provides optimized implementations of arithmetic operations.
//! Currently includes scalar multiplication, with more operations to be added.
use ndarray::{Array1, ArrayView1};
pub fn simd_scalar_mul_f32(a: &ArrayView1<f32>, scalar: f32) -> Array1<f32> {
let len = a.len();
let mut result = Array1::zeros(len);
let a_slice = a.as_slice().expect("Operation failed");
let result_slice: &mut [f32] = result.as_slice_mut().expect("Operation failed");
#[cfg(target_arch = "x86_64")]
{
use std::arch::x86_64::*;
if is_x86_feature_detected!("avx2") {
unsafe {
let scalar_vec = _mm256_set1_ps(scalar);
let mut i = 0;
// Process 8 f32s at a time with AVX2 - direct pointer writes
while i + 8 <= len {
let a_vec = _mm256_loadu_ps(a_slice.as_ptr().add(i));
let result_vec = _mm256_mul_ps(a_vec, scalar_vec);
_mm256_storeu_ps(result_slice.as_mut_ptr().add(i), result_vec);
i += 8;
}
// Handle remaining elements
for j in i..len {
result_slice[j] = a_slice[j] * scalar;
}
return result;
}
} else if is_x86_feature_detected!("sse") {
unsafe {
let scalar_vec = _mm_set1_ps(scalar);
let mut i = 0;
// Process 4 f32s at a time with SSE
while i + 4 <= len {
let a_vec = _mm_loadu_ps(a_slice.as_ptr().add(i));
let result_vec = _mm_mul_ps(a_vec, scalar_vec);
_mm_storeu_ps(result_slice.as_mut_ptr().add(i), result_vec);
i += 4;
}
// Handle remaining elements
for j in i..len {
result_slice[j] = a_slice[j] * scalar;
}
return result;
}
}
}
#[cfg(target_arch = "aarch64")]
{
use std::arch::aarch64::*;
if std::arch::is_aarch64_feature_detected!("neon") {
unsafe {
let scalar_vec = vdupq_n_f32(scalar);
let mut i = 0;
// Process 4 f32s at a time with NEON
while i + 4 <= len {
let a_vec = vld1q_f32(a_slice.as_ptr().add(i));
let result_vec = vmulq_f32(a_vec, scalar_vec);
vst1q_f32(result_slice.as_mut_ptr().add(i), result_vec);
i += 4;
}
// Handle remaining elements
for j in i..len {
result_slice[j] = a_slice[j] * scalar;
}
return result;
}
}
}
// Fallback to scalar implementation
for i in 0..len {
result_slice[i] = a_slice[i] * scalar;
}
result
}
/// Apply scalar multiplication to an f64 array using unified SIMD operations
#[allow(dead_code)]
pub fn simd_scalar_mul_f64(a: &ArrayView1<f64>, scalar: f64) -> Array1<f64> {
let len = a.len();
let mut result = Array1::zeros(len);
let a_slice = a.as_slice().expect("Operation failed");
let result_slice: &mut [f64] = result.as_slice_mut().expect("Operation failed");
#[cfg(target_arch = "x86_64")]
{
use std::arch::x86_64::*;
if is_x86_feature_detected!("avx2") {
unsafe {
let scalar_vec = _mm256_set1_pd(scalar);
let mut i = 0;
// Process 4 f64s at a time with AVX2 - direct pointer writes
while i + 4 <= len {
let a_vec = _mm256_loadu_pd(a_slice.as_ptr().add(i));
let result_vec = _mm256_mul_pd(a_vec, scalar_vec);
_mm256_storeu_pd(result_slice.as_mut_ptr().add(i), result_vec);
i += 4;
}
// Handle remaining elements
for j in i..len {
result_slice[j] = a_slice[j] * scalar;
}
return result;
}
} else if is_x86_feature_detected!("sse2") {
unsafe {
let scalar_vec = _mm_set1_pd(scalar);
let mut i = 0;
// Process 2 f64s at a time with SSE2
while i + 2 <= len {
let a_vec = _mm_loadu_pd(a_slice.as_ptr().add(i));
let result_vec = _mm_mul_pd(a_vec, scalar_vec);
_mm_storeu_pd(result_slice.as_mut_ptr().add(i), result_vec);
i += 2;
}
// Handle remaining elements
for j in i..len {
result_slice[j] = a_slice[j] * scalar;
}
return result;
}
}
}
#[cfg(target_arch = "aarch64")]
{
use std::arch::aarch64::*;
if std::arch::is_aarch64_feature_detected!("neon") {
unsafe {
let scalar_vec = vdupq_n_f64(scalar);
let mut i = 0;
// Process 2 f64s at a time with NEON
while i + 2 <= len {
let a_vec = vld1q_f64(a_slice.as_ptr().add(i));
let result_vec = vmulq_f64(a_vec, scalar_vec);
vst1q_f64(result_slice.as_mut_ptr().add(i), result_vec);
i += 2;
}
// Handle remaining elements
for j in i..len {
result_slice[j] = a_slice[j] * scalar;
}
return result;
}
}
}
// Fallback to scalar implementation
for i in 0..len {
result_slice[i] = a_slice[i] * scalar;
}
result
}
/// SIMD accelerated linspace function for f32 values
///
/// Creates a linearly spaced array between start and end (inclusive)
/// using SIMD instructions for better performance.
///
/// # Arguments
///
/// * `start` - Start value
/// * `end` - End value (inclusive)
/// * `num` - Number of points
///
/// # Returns
///
/// * Array of linearly spaced values
#[allow(dead_code)]
pub fn linspace_f32(startval: f32, end: f32, num: usize) -> Array1<f32> {
if num < 2 {
return Array1::from_vec(vec![startval]);
}
let mut result = Array1::zeros(num);
let step = (end - startval) / (num as f32 - 1.0);
// Use scalar implementation for now - could be optimized with SIMD
for (i, elem) in result.iter_mut().enumerate() {
*elem = startval + step * i as f32;
}
// Make sure the last value is exactly end to avoid floating point precision issues
if let Some(last) = result.last_mut() {
*last = end;
}
result
}
/// SIMD accelerated linspace function for f64 values
#[allow(dead_code)]
pub fn linspace_f64(startval: f64, end: f64, num: usize) -> Array1<f64> {
if num < 2 {
return Array1::from_vec(vec![startval]);
}
let mut result = Array1::zeros(num);
let step = (end - startval) / (num as f64 - 1.0);
// Use scalar implementation for now - could be optimized with SIMD
for (i, elem) in result.iter_mut().enumerate() {
*elem = startval + step * i as f64;
}
// Make sure the last value is exactly end to avoid floating point precision issues
if let Some(last) = result.last_mut() {
*last = end;
}
result
}