1#[cfg(feature = "python")]
2use pyo3::exceptions::PyValueError;
3#[cfg(feature = "python")]
4use pyo3::prelude::*;
5
6#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
7use serde::{Deserialize, Serialize};
8#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
9use wasm_bindgen::prelude::*;
10
11use crate::utilities::data_loader::{source_type, Candles};
12use crate::utilities::enums::Kernel;
13use crate::utilities::helpers::{
14 alloc_with_nan_prefix, detect_best_batch_kernel, detect_best_kernel, init_matrix_prefixes,
15 make_uninit_matrix,
16};
17#[cfg(feature = "python")]
18use crate::utilities::kernel_validation::validate_kernel;
19use aligned_vec::{AVec, CACHELINE_ALIGN};
20
21#[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
22use core::arch::x86_64::*;
23
24#[cfg(not(target_arch = "wasm32"))]
25use rayon::prelude::*;
26
27use std::convert::AsRef;
28use std::error::Error;
29use std::mem::MaybeUninit;
30use thiserror::Error;
31
32use crate::indicators::moving_averages::vwma::{
33 vwma_into_slice, vwma_with_kernel, VwmaInput, VwmaParams,
34};
35use crate::indicators::sma::{sma_into_slice, sma_with_kernel, SmaInput, SmaParams};
36
37#[derive(Debug, Clone)]
38pub enum AvslData<'a> {
39 Candles {
40 candles: &'a Candles,
41 close_source: &'a str,
42 low_source: &'a str,
43 },
44 Slices {
45 close: &'a [f64],
46 low: &'a [f64],
47 volume: &'a [f64],
48 },
49}
50
51#[derive(Debug, Clone)]
52pub struct AvslOutput {
53 pub values: Vec<f64>,
54}
55
56#[derive(Debug, Clone)]
57#[cfg_attr(
58 all(target_arch = "wasm32", feature = "wasm"),
59 derive(Serialize, Deserialize)
60)]
61pub struct AvslParams {
62 pub fast_period: Option<usize>,
63 pub slow_period: Option<usize>,
64 pub multiplier: Option<f64>,
65}
66
67impl Default for AvslParams {
68 fn default() -> Self {
69 Self {
70 fast_period: Some(12),
71 slow_period: Some(26),
72 multiplier: Some(2.0),
73 }
74 }
75}
76
77#[derive(Debug, Clone)]
78pub struct AvslInput<'a> {
79 pub data: AvslData<'a>,
80 pub params: AvslParams,
81}
82
83impl<'a> AvslInput<'a> {
84 #[inline]
85 pub fn from_candles(
86 c: &'a Candles,
87 close_source: &'a str,
88 low_source: &'a str,
89 p: AvslParams,
90 ) -> Self {
91 Self {
92 data: AvslData::Candles {
93 candles: c,
94 close_source,
95 low_source,
96 },
97 params: p,
98 }
99 }
100
101 #[inline]
102 pub fn from_slices(close: &'a [f64], low: &'a [f64], volume: &'a [f64], p: AvslParams) -> Self {
103 Self {
104 data: AvslData::Slices { close, low, volume },
105 params: p,
106 }
107 }
108
109 #[inline]
110 pub fn with_default_candles(c: &'a Candles) -> Self {
111 Self::from_candles(c, "close", "low", AvslParams::default())
112 }
113
114 #[inline]
115 pub fn get_fast_period(&self) -> usize {
116 self.params.fast_period.unwrap_or(12)
117 }
118
119 #[inline]
120 pub fn get_slow_period(&self) -> usize {
121 self.params.slow_period.unwrap_or(26)
122 }
123
124 #[inline]
125 pub fn get_multiplier(&self) -> f64 {
126 self.params.multiplier.unwrap_or(2.0)
127 }
128}
129
130#[derive(Copy, Clone, Debug)]
131pub struct AvslBuilder {
132 fast_period: Option<usize>,
133 slow_period: Option<usize>,
134 multiplier: Option<f64>,
135 kernel: Kernel,
136}
137
138impl Default for AvslBuilder {
139 fn default() -> Self {
140 Self {
141 fast_period: None,
142 slow_period: None,
143 multiplier: None,
144 kernel: Kernel::Auto,
145 }
146 }
147}
148
149impl AvslBuilder {
150 #[inline(always)]
151 pub fn new() -> Self {
152 Self::default()
153 }
154
155 #[inline(always)]
156 pub fn fast_period(mut self, val: usize) -> Self {
157 self.fast_period = Some(val);
158 self
159 }
160
161 #[inline(always)]
162 pub fn slow_period(mut self, val: usize) -> Self {
163 self.slow_period = Some(val);
164 self
165 }
166
167 #[inline(always)]
168 pub fn multiplier(mut self, val: f64) -> Self {
169 self.multiplier = Some(val);
170 self
171 }
172
173 #[inline(always)]
174 pub fn kernel(mut self, k: Kernel) -> Self {
175 self.kernel = k;
176 self
177 }
178
179 #[inline(always)]
180 pub fn apply(self, c: &Candles) -> Result<AvslOutput, AvslError> {
181 let p = AvslParams {
182 fast_period: self.fast_period,
183 slow_period: self.slow_period,
184 multiplier: self.multiplier,
185 };
186 let i = AvslInput::from_candles(c, "close", "low", p);
187 avsl_with_kernel(&i, self.kernel)
188 }
189
190 #[inline(always)]
191 pub fn apply_slices(
192 self,
193 close: &[f64],
194 low: &[f64],
195 volume: &[f64],
196 ) -> Result<AvslOutput, AvslError> {
197 let p = AvslParams {
198 fast_period: self.fast_period,
199 slow_period: self.slow_period,
200 multiplier: self.multiplier,
201 };
202 let i = AvslInput::from_slices(close, low, volume, p);
203 avsl_with_kernel(&i, self.kernel)
204 }
205
206 #[inline(always)]
207 pub fn into_stream(self) -> Result<AvslStream, AvslError> {
208 let p = AvslParams {
209 fast_period: self.fast_period,
210 slow_period: self.slow_period,
211 multiplier: self.multiplier,
212 };
213 AvslStream::try_new(p)
214 }
215}
216
217#[derive(Debug, Error)]
218pub enum AvslError {
219 #[error("avsl: Input data slice is empty.")]
220 EmptyInputData,
221
222 #[error("avsl: All values are NaN.")]
223 AllValuesNaN,
224
225 #[error("avsl: Invalid period: period = {period}, data length = {data_len}")]
226 InvalidPeriod { period: usize, data_len: usize },
227
228 #[error("avsl: Not enough valid data: needed = {needed}, valid = {valid}")]
229 NotEnoughValidData { needed: usize, valid: usize },
230
231 #[error(
232 "avsl: Data length mismatch: close = {close_len}, low = {low_len}, volume = {volume_len}"
233 )]
234 DataLengthMismatch {
235 close_len: usize,
236 low_len: usize,
237 volume_len: usize,
238 },
239
240 #[error("avsl: Invalid multiplier: {multiplier}")]
241 InvalidMultiplier { multiplier: f64 },
242
243 #[error("avsl: Output length mismatch: expected = {expected}, got = {got}")]
244 OutputLengthMismatch { expected: usize, got: usize },
245
246 #[error("avsl: Invalid range: start={start} end={end} step={step}")]
247 InvalidRange {
248 start: usize,
249 end: usize,
250 step: usize,
251 },
252
253 #[error("avsl: Invalid kernel for batch path: {0:?}")]
254 InvalidKernelForBatch(Kernel),
255
256 #[error("avsl: {0}")]
257 ComputationError(String),
258}
259
260#[inline(always)]
261fn first_valid_max3(a: &[f64], b: &[f64], c: &[f64]) -> Option<usize> {
262 let fa = a.iter().position(|x| !x.is_nan())?;
263 let fb = b.iter().position(|x| !x.is_nan())?;
264 let fc = c.iter().position(|x| !x.is_nan())?;
265 Some(fa.max(fb).max(fc))
266}
267
268#[inline]
269pub fn avsl(input: &AvslInput) -> Result<AvslOutput, AvslError> {
270 avsl_with_kernel(input, Kernel::Auto)
271}
272
273pub fn avsl_with_kernel(input: &AvslInput, kernel: Kernel) -> Result<AvslOutput, AvslError> {
274 let (close, low, volume, fast_period, slow_period, multiplier, first, chosen) =
275 avsl_prepare(input, kernel)?;
276
277 let mut out = alloc_with_nan_prefix(close.len(), first + slow_period - 1);
278
279 avsl_compute_into(
280 close,
281 low,
282 volume,
283 fast_period,
284 slow_period,
285 multiplier,
286 first,
287 chosen,
288 &mut out,
289 )?;
290
291 Ok(AvslOutput { values: out })
292}
293
294#[inline]
295pub fn avsl_into_slice(dst: &mut [f64], input: &AvslInput, kern: Kernel) -> Result<(), AvslError> {
296 let (close, low, volume, fast_period, slow_period, multiplier, first, chosen) =
297 avsl_prepare(input, kern)?;
298
299 if dst.len() != close.len() {
300 return Err(AvslError::OutputLengthMismatch {
301 expected: close.len(),
302 got: dst.len(),
303 });
304 }
305
306 avsl_compute_into(
307 close,
308 low,
309 volume,
310 fast_period,
311 slow_period,
312 multiplier,
313 first,
314 chosen,
315 dst,
316 )?;
317
318 let warmup_end = first + slow_period - 1;
319 for v in &mut dst[..warmup_end] {
320 *v = f64::NAN;
321 }
322
323 Ok(())
324}
325
326#[inline(always)]
327fn avsl_prepare<'a>(
328 input: &'a AvslInput,
329 kernel: Kernel,
330) -> Result<
331 (
332 &'a [f64],
333 &'a [f64],
334 &'a [f64],
335 usize,
336 usize,
337 f64,
338 usize,
339 Kernel,
340 ),
341 AvslError,
342> {
343 let (close, low, volume) = match &input.data {
344 AvslData::Candles {
345 candles,
346 close_source,
347 low_source,
348 } => (
349 source_type(candles, close_source),
350 source_type(candles, low_source),
351 candles.volume.as_slice(),
352 ),
353 AvslData::Slices { close, low, volume } => (*close, *low, *volume),
354 };
355
356 let len = close.len();
357 if len == 0 {
358 return Err(AvslError::EmptyInputData);
359 }
360 if close.len() != low.len() || close.len() != volume.len() {
361 return Err(AvslError::DataLengthMismatch {
362 close_len: close.len(),
363 low_len: low.len(),
364 volume_len: volume.len(),
365 });
366 }
367
368 let first = first_valid_max3(close, low, volume).ok_or(AvslError::AllValuesNaN)?;
369 let fast_period = input.get_fast_period();
370 let slow_period = input.get_slow_period();
371 let multiplier = input.get_multiplier();
372
373 if fast_period == 0 || fast_period > len {
374 return Err(AvslError::InvalidPeriod {
375 period: fast_period,
376 data_len: len,
377 });
378 }
379 if slow_period == 0 || slow_period > len {
380 return Err(AvslError::InvalidPeriod {
381 period: slow_period,
382 data_len: len,
383 });
384 }
385 if len - first < slow_period {
386 return Err(AvslError::NotEnoughValidData {
387 needed: slow_period,
388 valid: len - first,
389 });
390 }
391 if multiplier <= 0.0 || !multiplier.is_finite() {
392 return Err(AvslError::InvalidMultiplier { multiplier });
393 }
394
395 let chosen = match kernel {
396 Kernel::Auto => Kernel::Scalar,
397 k => k,
398 };
399 Ok((
400 close,
401 low,
402 volume,
403 fast_period,
404 slow_period,
405 multiplier,
406 first,
407 chosen,
408 ))
409}
410
411#[inline(always)]
412fn avsl_compute_into(
413 close: &[f64],
414 low: &[f64],
415 volume: &[f64],
416 fast_period: usize,
417 slow_period: usize,
418 multiplier: f64,
419 first: usize,
420 kernel: Kernel,
421 out: &mut [f64],
422) -> Result<(), AvslError> {
423 unsafe {
424 #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))]
425 {
426 if matches!(kernel, Kernel::Scalar | Kernel::ScalarBatch) {
427 return avsl_simd128(
428 close,
429 low,
430 volume,
431 fast_period,
432 slow_period,
433 multiplier,
434 first,
435 out,
436 );
437 }
438 }
439
440 match kernel {
441 Kernel::Scalar | Kernel::ScalarBatch => avsl_scalar(
442 close,
443 low,
444 volume,
445 fast_period,
446 slow_period,
447 multiplier,
448 first,
449 out,
450 ),
451 #[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
452 Kernel::Avx2 | Kernel::Avx2Batch => avsl_avx2(
453 close,
454 low,
455 volume,
456 fast_period,
457 slow_period,
458 multiplier,
459 first,
460 out,
461 ),
462 #[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
463 Kernel::Avx512 | Kernel::Avx512Batch => avsl_avx512(
464 close,
465 low,
466 volume,
467 fast_period,
468 slow_period,
469 multiplier,
470 first,
471 out,
472 ),
473 #[cfg(not(all(feature = "nightly-avx", target_arch = "x86_64")))]
474 Kernel::Avx2 | Kernel::Avx2Batch | Kernel::Avx512 | Kernel::Avx512Batch => avsl_scalar(
475 close,
476 low,
477 volume,
478 fast_period,
479 slow_period,
480 multiplier,
481 first,
482 out,
483 ),
484 _ => unreachable!(),
485 }
486 }
487}
488
489#[inline]
490pub fn avsl_scalar(
491 close: &[f64],
492 low: &[f64],
493 volume: &[f64],
494 fast_period: usize,
495 slow_period: usize,
496 multiplier: f64,
497 first_val: usize,
498 out: &mut [f64],
499) -> Result<(), AvslError> {
500 let len = close.len();
501
502 if len == 0 {
503 return Err(AvslError::EmptyInputData);
504 }
505
506 let base = first_val + slow_period - 1;
507 let warmup2 = base + slow_period - 1;
508
509 if base >= len {
510 let upto = warmup2.min(len);
511 for v in &mut out[..upto] {
512 *v = f64::NAN;
513 }
514 for v in &mut out[upto..] {
515 *v = f64::NAN;
516 }
517 return Ok(());
518 }
519
520 let inv_fast = 1.0 / (fast_period as f64);
521 let inv_slow = 1.0 / (slow_period as f64);
522
523 let mut sum_close_f = 0.0_f64;
524 let mut sum_close_s = 0.0_f64;
525 let mut sum_vol_f = 0.0_f64;
526 let mut sum_vol_s = 0.0_f64;
527 let mut sum_cxv_f = 0.0_f64;
528 let mut sum_cxv_s = 0.0_f64;
529
530 const MAX_WIN: usize = 200;
531 let mut ring_vpc: [f64; MAX_WIN] = [0.0; MAX_WIN];
532 let mut ring_vpr: [f64; MAX_WIN] = [1.0; MAX_WIN];
533 let mut ring_pos: usize = 0;
534
535 let mut pre_ring: Vec<f64> = vec![0.0; slow_period];
536 let mut pre_pos: usize = 0;
537 let mut pre_sum: f64 = 0.0;
538 let mut pre_cnt: usize = 0;
539
540 unsafe {
541 let c_ptr = close.as_ptr();
542 let l_ptr = low.as_ptr();
543 let v_ptr = volume.as_ptr();
544
545 for i in 0..len {
546 if i >= first_val {
547 let c = *c_ptr.add(i);
548 let v = *v_ptr.add(i);
549 let cv = c * v;
550
551 sum_close_f += c;
552 sum_vol_f += v;
553 sum_cxv_f += cv;
554 sum_close_s += c;
555 sum_vol_s += v;
556 sum_cxv_s += cv;
557
558 if i + 1 > fast_period + first_val {
559 let k = i + 1 - fast_period - 1;
560 let c_old = *c_ptr.add(k);
561 let v_old = *v_ptr.add(k);
562 sum_close_f -= c_old;
563 sum_vol_f -= v_old;
564 sum_cxv_f -= c_old * v_old;
565 }
566 if i + 1 > slow_period + first_val {
567 let k = i + 1 - slow_period - 1;
568 let c_old = *c_ptr.add(k);
569 let v_old = *v_ptr.add(k);
570 sum_close_s -= c_old;
571 sum_vol_s -= v_old;
572 sum_cxv_s -= c_old * v_old;
573 }
574 }
575
576 if i >= base {
577 let sma_f = sum_close_f * inv_fast;
578 let sma_s = sum_close_s * inv_slow;
579 let vwma_f = if sum_vol_f != 0.0 {
580 sum_cxv_f / sum_vol_f
581 } else {
582 sma_f
583 };
584 let vwma_s = if sum_vol_s != 0.0 {
585 sum_cxv_s / sum_vol_s
586 } else {
587 sma_s
588 };
589
590 let vpc = vwma_s - sma_s;
591 let vpr = if sma_f != 0.0 { vwma_f / sma_f } else { 1.0 };
592 let vol_f = sum_vol_f * inv_fast;
593 let vol_s = sum_vol_s * inv_slow;
594 let vm = if vol_s != 0.0 { vol_f / vol_s } else { 1.0 };
595 let vpci = vpc * vpr * vm;
596
597 let len_v = {
598 let t = if vpc < 0.0 {
599 (vpci - 3.0).abs().round()
600 } else {
601 (vpci + 3.0).round()
602 };
603 let m = if t < 1.0 { 1.0 } else { t };
604 let m = if m > MAX_WIN as f64 {
605 MAX_WIN as f64
606 } else {
607 m
608 };
609 m as usize
610 };
611
612 ring_vpc[ring_pos] = vpc;
613 ring_vpr[ring_pos] = vpr;
614 ring_pos += 1;
615 if ring_pos == MAX_WIN {
616 ring_pos = 0;
617 }
618
619 let take = len_v.min(i + 1);
620 let hist_n = (i - base + 1).min(take);
621 let pref_n = take - hist_n;
622 let mut acc = 0.0_f64;
623
624 if hist_n > 0 {
625 let mut rp = if ring_pos == 0 {
626 MAX_WIN - 1
627 } else {
628 ring_pos - 1
629 };
630 for j in 0..hist_n {
631 let idx_r = rp;
632 rp = if rp == 0 { MAX_WIN - 1 } else { rp - 1 };
633 let x = *ring_vpc.get_unchecked(idx_r);
634 let adj = if x > -1.0 && x < 0.0 {
635 -1.0
636 } else if x >= 0.0 && x < 1.0 {
637 1.0
638 } else {
639 x
640 };
641 let r = *ring_vpr.get_unchecked(idx_r);
642 if adj != 0.0 && r != 0.0 {
643 acc += *l_ptr.add(i - j) / (adj * r);
644 }
645 }
646 }
647
648 if pref_n > 0 {
649 let start_idx = i + 1 - (hist_n + pref_n);
650 let end_idx_excl = i + 1 - hist_n;
651 let mut s = 0.0_f64;
652 let mut k = start_idx;
653 while k + 4 <= end_idx_excl {
654 let a = *l_ptr.add(k);
655 let b = *l_ptr.add(k + 1);
656 let c = *l_ptr.add(k + 2);
657 let d = *l_ptr.add(k + 3);
658 s += a + b + c + d;
659 k += 4;
660 }
661 while k < end_idx_excl {
662 s += *l_ptr.add(k);
663 k += 1;
664 }
665 acc += s;
666 }
667
668 let price_v = (acc / (len_v as f64)) * 0.01;
669 let dev = (multiplier.mul_add(vpci, 0.0)) * vm;
670 let pre_i = (*l_ptr.add(i) - price_v) + dev;
671
672 pre_sum += pre_i;
673 if pre_cnt < slow_period {
674 pre_ring[pre_pos] = pre_i;
675 pre_pos += 1;
676 if pre_pos == slow_period {
677 pre_pos = 0;
678 }
679 pre_cnt += 1;
680 } else {
681 pre_sum -= pre_ring[pre_pos];
682 pre_ring[pre_pos] = pre_i;
683 pre_pos += 1;
684 if pre_pos == slow_period {
685 pre_pos = 0;
686 }
687 }
688
689 if i >= warmup2 {
690 *out.get_unchecked_mut(i) = pre_sum * inv_slow;
691 }
692 }
693 }
694 }
695
696 let upto = warmup2.min(len);
697 for v in &mut out[..upto] {
698 *v = f64::NAN;
699 }
700 Ok(())
701}
702
703#[inline]
704fn avsl_scalar_ref(
705 close: &[f64],
706 low: &[f64],
707 volume: &[f64],
708 fast_period: usize,
709 slow_period: usize,
710 multiplier: f64,
711 first_val: usize,
712 out: &mut [f64],
713) -> Result<(), AvslError> {
714 let len = close.len();
715
716 let rows = 7usize;
717 let cols = len;
718
719 let mut mu = make_uninit_matrix(rows, cols);
720 let warm = [
721 first_val + fast_period - 1,
722 first_val + slow_period - 1,
723 first_val + fast_period - 1,
724 first_val + slow_period - 1,
725 first_val + fast_period - 1,
726 first_val + slow_period - 1,
727 first_val + slow_period - 1,
728 ];
729 init_matrix_prefixes(&mut mu, cols, &warm);
730
731 let mut guard = core::mem::ManuallyDrop::new(mu);
732 let flat: &mut [f64] =
733 unsafe { core::slice::from_raw_parts_mut(guard.as_mut_ptr() as *mut f64, guard.len()) };
734
735 let (row0, rest) = flat.split_at_mut(cols);
736 let (row1, rest) = rest.split_at_mut(cols);
737 let (row2, rest) = rest.split_at_mut(cols);
738 let (row3, rest) = rest.split_at_mut(cols);
739 let (row4, rest) = rest.split_at_mut(cols);
740 let (row5, row6) = rest.split_at_mut(cols);
741
742 {
743 let inp = VwmaInput::from_slice(
744 close,
745 volume,
746 VwmaParams {
747 period: Some(fast_period),
748 },
749 );
750 vwma_into_slice(row0, &inp, Kernel::Scalar)
751 .map_err(|e| AvslError::ComputationError(format!("VWMA Fast error: {}", e)))?;
752
753 let inp = VwmaInput::from_slice(
754 close,
755 volume,
756 VwmaParams {
757 period: Some(slow_period),
758 },
759 );
760 vwma_into_slice(row1, &inp, Kernel::Scalar)
761 .map_err(|e| AvslError::ComputationError(format!("VWMA Slow error: {}", e)))?;
762
763 let inp = SmaInput::from_slice(
764 close,
765 SmaParams {
766 period: Some(fast_period),
767 },
768 );
769 sma_into_slice(row2, &inp, Kernel::Scalar)
770 .map_err(|e| AvslError::ComputationError(format!("SMA Fast error: {}", e)))?;
771 let inp = SmaInput::from_slice(
772 close,
773 SmaParams {
774 period: Some(slow_period),
775 },
776 );
777 sma_into_slice(row3, &inp, Kernel::Scalar)
778 .map_err(|e| AvslError::ComputationError(format!("SMA Slow error: {}", e)))?;
779
780 let inp = SmaInput::from_slice(
781 volume,
782 SmaParams {
783 period: Some(fast_period),
784 },
785 );
786 sma_into_slice(row4, &inp, Kernel::Scalar)
787 .map_err(|e| AvslError::ComputationError(format!("Volume SMA Fast error: {}", e)))?;
788 let inp = SmaInput::from_slice(
789 volume,
790 SmaParams {
791 period: Some(slow_period),
792 },
793 );
794 sma_into_slice(row5, &inp, Kernel::Scalar)
795 .map_err(|e| AvslError::ComputationError(format!("Volume SMA Slow error: {}", e)))?;
796 }
797
798 let vwma_f = &row0[..];
799 let vwma_s = &row1[..];
800 let sma_f = &row2[..];
801 let sma_s = &row3[..];
802 let vol_f = &row4[..];
803 let vol_s = &row5[..];
804 let pre = row6;
805
806 let start = first_val + slow_period - 1;
807 for i in start..len {
808 let vpc = vwma_s[i] - sma_s[i];
809 let vpr = if sma_f[i] != 0.0 {
810 vwma_f[i] / sma_f[i]
811 } else {
812 1.0
813 };
814 let vm = if vol_s[i] != 0.0 {
815 vol_f[i] / vol_s[i]
816 } else {
817 1.0
818 };
819 let vpci = vpc * vpr * vm;
820 let len_v = if vpc < 0.0 {
821 ((vpci - 3.0).abs().round() as usize).max(1).min(200)
822 } else {
823 ((vpci + 3.0).round() as usize).max(1).min(200)
824 };
825 let adj = |x: f64| {
826 if (-1.0..0.0).contains(&x) {
827 -1.0
828 } else if (0.0..1.0).contains(&x) {
829 1.0
830 } else {
831 x
832 }
833 };
834 let mut acc = 0.0;
835 let base = first_val + slow_period - 1;
836 let take = len_v.min(i + 1);
837 for j in 0..take {
838 let idx = i - j;
839 let vpc_c_j = if idx >= base {
840 adj(vwma_s[idx] - sma_s[idx])
841 } else {
842 1.0
843 };
844 let vpr_j = if idx >= base && sma_f[idx] != 0.0 {
845 vwma_f[idx] / sma_f[idx]
846 } else {
847 1.0
848 };
849 if vpc_c_j != 0.0 && vpr_j != 0.0 {
850 acc += low[idx] / vpc_c_j / vpr_j;
851 }
852 }
853 let price_v = (acc / len_v as f64) / 100.0;
854 let dev = multiplier * vpci * vm;
855 pre[i] = low[i] - price_v + dev;
856 }
857
858 let pre_in = SmaInput::from_slice(
859 &pre[..],
860 SmaParams {
861 period: Some(slow_period),
862 },
863 );
864 sma_into_slice(out, &pre_in, Kernel::Scalar)
865 .map_err(|e| AvslError::ComputationError(format!("AVSL SMA error: {}", e)))?;
866
867 let warmup_end = start + slow_period - 1;
868 if warmup_end <= len {
869 for v in &mut out[..warmup_end] {
870 *v = f64::NAN;
871 }
872 }
873 Ok(())
874}
875
876#[cfg(all(target_arch = "wasm32", target_feature = "simd128"))]
877#[inline]
878unsafe fn avsl_simd128(
879 close: &[f64],
880 low: &[f64],
881 volume: &[f64],
882 fast_period: usize,
883 slow_period: usize,
884 multiplier: f64,
885 first_val: usize,
886 out: &mut [f64],
887) -> Result<(), AvslError> {
888 avsl_scalar(
889 close,
890 low,
891 volume,
892 fast_period,
893 slow_period,
894 multiplier,
895 first_val,
896 out,
897 )
898}
899
900#[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
901#[target_feature(enable = "avx2,fma")]
902unsafe fn avsl_avx2(
903 close: &[f64],
904 low: &[f64],
905 volume: &[f64],
906 fast_period: usize,
907 slow_period: usize,
908 multiplier: f64,
909 first_val: usize,
910 out: &mut [f64],
911) -> Result<(), AvslError> {
912 use core::arch::x86_64::*;
913 let len = close.len();
914 if len == 0 {
915 return Err(AvslError::EmptyInputData);
916 }
917
918 const MAX_WIN: usize = 200;
919 let base = first_val + slow_period - 1;
920 let warmup2 = base + slow_period - 1;
921 if base >= len {
922 let upto = warmup2.min(len);
923 for v in &mut out[..upto] {
924 *v = f64::NAN;
925 }
926 for v in &mut out[upto..] {
927 *v = f64::NAN;
928 }
929 return Ok(());
930 }
931
932 let inv_fast = 1.0 / (fast_period as f64);
933 let inv_slow = 1.0 / (slow_period as f64);
934
935 let mut sum_close_f = 0.0_f64;
936 let mut sum_close_s = 0.0_f64;
937 let mut sum_vol_f = 0.0_f64;
938 let mut sum_vol_s = 0.0_f64;
939 let mut sum_cxv_f = 0.0_f64;
940 let mut sum_cxv_s = 0.0_f64;
941
942 let mut ring_vpc: [f64; MAX_WIN] = [0.0; MAX_WIN];
943 let mut ring_vpr: [f64; MAX_WIN] = [1.0; MAX_WIN];
944 let mut ring_pos: usize = 0;
945
946 let mut pre_ring: Vec<f64> = vec![0.0; slow_period];
947 let mut pre_pos: usize = 0;
948 let mut pre_sum: f64 = 0.0;
949 let mut pre_cnt: usize = 0;
950
951 let c_ptr = close.as_ptr();
952 let l_ptr = low.as_ptr();
953 let v_ptr = volume.as_ptr();
954
955 let v_neg1 = _mm256_set1_pd(-1.0);
956 let v_zero = _mm256_set1_pd(0.0);
957 let v_pos1 = _mm256_set1_pd(1.0);
958
959 #[inline(always)]
960 unsafe fn adj256(x: __m256d, v_neg1: __m256d, v_zero: __m256d, v_pos1: __m256d) -> __m256d {
961 let gt_neg1 = _mm256_cmp_pd(v_neg1, x, _CMP_LT_OQ);
962 let lt_zero = _mm256_cmp_pd(x, v_zero, _CMP_LT_OQ);
963 let mask1 = _mm256_and_pd(gt_neg1, lt_zero);
964 let ge_zero = _mm256_cmp_pd(x, v_zero, _CMP_GE_OQ);
965 let lt_pos1 = _mm256_cmp_pd(x, v_pos1, _CMP_LT_OQ);
966 let mask2 = _mm256_and_pd(ge_zero, lt_pos1);
967 let m1 = _mm256_blendv_pd(x, v_neg1, mask1);
968 _mm256_blendv_pd(m1, v_pos1, mask2)
969 }
970
971 for i in 0..len {
972 if i >= first_val {
973 let c = *c_ptr.add(i);
974 let v = *v_ptr.add(i);
975 let cv = c * v;
976 sum_close_f += c;
977 sum_vol_f += v;
978 sum_cxv_f += cv;
979 sum_close_s += c;
980 sum_vol_s += v;
981 sum_cxv_s += cv;
982 if i + 1 > fast_period + first_val {
983 let k = i + 1 - fast_period - 1;
984 let c_old = *c_ptr.add(k);
985 let v_old = *v_ptr.add(k);
986 sum_close_f -= c_old;
987 sum_vol_f -= v_old;
988 sum_cxv_f -= c_old * v_old;
989 }
990 if i + 1 > slow_period + first_val {
991 let k = i + 1 - slow_period - 1;
992 let c_old = *c_ptr.add(k);
993 let v_old = *v_ptr.add(k);
994 sum_close_s -= c_old;
995 sum_vol_s -= v_old;
996 sum_cxv_s -= c_old * v_old;
997 }
998 }
999
1000 if i >= base {
1001 let sma_f = sum_close_f * inv_fast;
1002 let sma_s = sum_close_s * inv_slow;
1003 let vwma_f = if sum_vol_f != 0.0 {
1004 sum_cxv_f / sum_vol_f
1005 } else {
1006 sma_f
1007 };
1008 let vwma_s = if sum_vol_s != 0.0 {
1009 sum_cxv_s / sum_vol_s
1010 } else {
1011 sma_s
1012 };
1013
1014 let vpc = vwma_s - sma_s;
1015
1016 let vpr = if sma_f != 0.0 { vwma_f / sma_f } else { 1.0 };
1017 let vol_f = sum_vol_f * inv_fast;
1018 let vol_s = sum_vol_s * inv_slow;
1019 let vm = if vol_s != 0.0 { vol_f / vol_s } else { 1.0 };
1020 let vpci = vpc * vpr * vm;
1021
1022 let len_v = {
1023 let t = if vpc < 0.0 {
1024 (vpci - 3.0).abs().round()
1025 } else {
1026 (vpci + 3.0).round()
1027 };
1028 let m = if t < 1.0 { 1.0 } else { t };
1029 let m = if m > MAX_WIN as f64 {
1030 MAX_WIN as f64
1031 } else {
1032 m
1033 };
1034 m as usize
1035 };
1036
1037 ring_vpc[ring_pos] = vpc;
1038 ring_vpr[ring_pos] = vpr;
1039 ring_pos += 1;
1040 if ring_pos == MAX_WIN {
1041 ring_pos = 0;
1042 }
1043
1044 let take = len_v.min(i + 1);
1045 let hist_n = (i - base + 1).min(take);
1046 let pref_n = take - hist_n;
1047 let mut acc = 0.0_f64;
1048
1049 if hist_n > 0 {
1050 let mut rp = if ring_pos == 0 {
1051 MAX_WIN - 1
1052 } else {
1053 ring_pos - 1
1054 };
1055 for j in 0..hist_n {
1056 let idx_r = rp;
1057 rp = if rp == 0 { MAX_WIN - 1 } else { rp - 1 };
1058 let x = ring_vpc[idx_r];
1059 let adj = if x > -1.0 && x < 0.0 {
1060 -1.0
1061 } else if x >= 0.0 && x < 1.0 {
1062 1.0
1063 } else {
1064 x
1065 };
1066 let r = ring_vpr[idx_r];
1067 if adj != 0.0 && r != 0.0 {
1068 acc += *l_ptr.add(i - j) / (adj * r);
1069 }
1070 }
1071 }
1072
1073 if pref_n > 0 {
1074 let start_idx = i + 1 - (hist_n + pref_n);
1075 let end_idx_excl = i + 1 - hist_n;
1076 let mut s = 0.0_f64;
1077 let mut k = start_idx;
1078 while k < end_idx_excl {
1079 s += *l_ptr.add(k);
1080 k += 1;
1081 }
1082 acc += s;
1083 }
1084
1085 let price_v = (acc / (len_v as f64)) * 0.01;
1086 let dev = (multiplier.mul_add(vpci, 0.0)) * vm;
1087 let pre_i = (*l_ptr.add(i) - price_v) + dev;
1088
1089 pre_sum += pre_i;
1090 if pre_cnt < slow_period {
1091 pre_ring[pre_pos] = pre_i;
1092 pre_pos += 1;
1093 if pre_pos == slow_period {
1094 pre_pos = 0;
1095 }
1096 pre_cnt += 1;
1097 } else {
1098 pre_sum -= pre_ring[pre_pos];
1099 pre_ring[pre_pos] = pre_i;
1100 pre_pos += 1;
1101 if pre_pos == slow_period {
1102 pre_pos = 0;
1103 }
1104 }
1105
1106 if i >= warmup2 {
1107 *out.get_unchecked_mut(i) = pre_sum * inv_slow;
1108 }
1109 }
1110 }
1111
1112 let upto = warmup2.min(len);
1113 for v in &mut out[..upto] {
1114 *v = f64::NAN;
1115 }
1116
1117 Ok(())
1118}
1119
1120#[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
1121#[target_feature(enable = "avx512f,fma")]
1122unsafe fn avsl_avx512(
1123 close: &[f64],
1124 low: &[f64],
1125 volume: &[f64],
1126 fast_period: usize,
1127 slow_period: usize,
1128 multiplier: f64,
1129 first_val: usize,
1130 out: &mut [f64],
1131) -> Result<(), AvslError> {
1132 use core::arch::x86_64::*;
1133 let len = close.len();
1134 if len == 0 {
1135 return Err(AvslError::EmptyInputData);
1136 }
1137
1138 const MAX_WIN: usize = 200;
1139 let base = first_val + slow_period - 1;
1140 let warmup2 = base + slow_period - 1;
1141 if base >= len {
1142 let upto = warmup2.min(len);
1143 for v in &mut out[..upto] {
1144 *v = f64::NAN;
1145 }
1146 for v in &mut out[upto..] {
1147 *v = f64::NAN;
1148 }
1149 return Ok(());
1150 }
1151
1152 let inv_fast = 1.0 / (fast_period as f64);
1153 let inv_slow = 1.0 / (slow_period as f64);
1154 let mut sum_close_f = 0.0_f64;
1155 let mut sum_close_s = 0.0_f64;
1156 let mut sum_vol_f = 0.0_f64;
1157 let mut sum_vol_s = 0.0_f64;
1158 let mut sum_cxv_f = 0.0_f64;
1159 let mut sum_cxv_s = 0.0_f64;
1160 let mut ring_vpc: [f64; MAX_WIN] = [0.0; MAX_WIN];
1161 let mut ring_vpr: [f64; MAX_WIN] = [1.0; MAX_WIN];
1162 let mut ring_pos: usize = 0;
1163 let mut pre_ring: Vec<f64> = vec![0.0; slow_period];
1164 let mut pre_pos: usize = 0;
1165 let mut pre_sum: f64 = 0.0;
1166 let mut pre_cnt: usize = 0;
1167
1168 let c_ptr = close.as_ptr();
1169 let l_ptr = low.as_ptr();
1170 let v_ptr = volume.as_ptr();
1171 #[inline(always)]
1172 fn adj(x: f64) -> f64 {
1173 if x > -1.0 && x < 0.0 {
1174 -1.0
1175 } else if x >= 0.0 && x < 1.0 {
1176 1.0
1177 } else {
1178 x
1179 }
1180 }
1181
1182 for i in 0..len {
1183 if i >= first_val {
1184 let c = unsafe { *c_ptr.add(i) };
1185 let v = unsafe { *v_ptr.add(i) };
1186 let cv = c * v;
1187 sum_close_f += c;
1188 sum_vol_f += v;
1189 sum_cxv_f += cv;
1190 sum_close_s += c;
1191 sum_vol_s += v;
1192 sum_cxv_s += cv;
1193 if i + 1 > fast_period + first_val {
1194 let k = i + 1 - fast_period - 1;
1195 let c_old = unsafe { *c_ptr.add(k) };
1196 let v_old = unsafe { *v_ptr.add(k) };
1197 sum_close_f -= c_old;
1198 sum_vol_f -= v_old;
1199 sum_cxv_f -= c_old * v_old;
1200 }
1201 if i + 1 > slow_period + first_val {
1202 let k = i + 1 - slow_period - 1;
1203 let c_old = unsafe { *c_ptr.add(k) };
1204 let v_old = unsafe { *v_ptr.add(k) };
1205 sum_close_s -= c_old;
1206 sum_vol_s -= v_old;
1207 sum_cxv_s -= c_old * v_old;
1208 }
1209 }
1210
1211 if i >= base {
1212 let sma_f = sum_close_f * inv_fast;
1213 let sma_s = sum_close_s * inv_slow;
1214 let vwma_f = if sum_vol_f != 0.0 {
1215 sum_cxv_f / sum_vol_f
1216 } else {
1217 sma_f
1218 };
1219 let vwma_s = if sum_vol_s != 0.0 {
1220 sum_cxv_s / sum_vol_s
1221 } else {
1222 sma_s
1223 };
1224 let vpc = vwma_s - sma_s;
1225
1226 let vpr = if sma_f != 0.0 && sum_vol_f != 0.0 {
1227 (sum_cxv_f * (fast_period as f64)) / (sum_vol_f * sum_close_f)
1228 } else {
1229 1.0
1230 };
1231 let vol_f = sum_vol_f * inv_fast;
1232 let vol_s = sum_vol_s * inv_slow;
1233 let vm = if vol_s != 0.0 { vol_f / vol_s } else { 1.0 };
1234 let vpci = vpc * vpr * vm;
1235 let len_v = {
1236 let t = if vpc < 0.0 {
1237 (vpci - 3.0).abs().round()
1238 } else {
1239 (vpci + 3.0).round()
1240 };
1241 let m = if t < 1.0 { 1.0 } else { t };
1242 let m = if m > MAX_WIN as f64 {
1243 MAX_WIN as f64
1244 } else {
1245 m
1246 };
1247 m as usize
1248 };
1249
1250 ring_vpc[ring_pos] = vpc;
1251 ring_vpr[ring_pos] = vpr;
1252 ring_pos += 1;
1253 if ring_pos == MAX_WIN {
1254 ring_pos = 0;
1255 }
1256
1257 let take = len_v.min(i + 1);
1258 let hist_n = (i - base + 1).min(take);
1259 let pref_n = take - hist_n;
1260 let mut acc = 0.0_f64;
1261
1262 if hist_n > 0 {
1263 let mut rp = if ring_pos == 0 {
1264 MAX_WIN - 1
1265 } else {
1266 ring_pos - 1
1267 };
1268 for j in 0..hist_n {
1269 let idx_r = rp;
1270 rp = if rp == 0 { MAX_WIN - 1 } else { rp - 1 };
1271 let a = adj(ring_vpc[idx_r]);
1272 let r = ring_vpr[idx_r];
1273 if a != 0.0 && r != 0.0 {
1274 acc += unsafe { *l_ptr.add(i - j) } / (a * r);
1275 }
1276 }
1277 }
1278 if pref_n > 0 {
1279 let start_idx = i + 1 - (hist_n + pref_n);
1280 let end_idx_excl = i + 1 - hist_n;
1281 let mut s = 0.0_f64;
1282 let mut k = start_idx;
1283 let n = end_idx_excl - start_idx;
1284 let vec_n = n / 8;
1285 let rem = n % 8;
1286 for _ in 0..vec_n {
1287 let a = unsafe { _mm512_loadu_pd(l_ptr.add(k)) };
1288 let arr: [f64; 8] = core::mem::transmute(a);
1289 s += arr[0] + arr[1] + arr[2] + arr[3] + arr[4] + arr[5] + arr[6] + arr[7];
1290 k += 8;
1291 }
1292 for _ in 0..rem {
1293 s += unsafe { *l_ptr.add(k) };
1294 k += 1;
1295 }
1296 acc += s;
1297 }
1298
1299 let price_v = (acc / (len_v as f64)) * 0.01;
1300 let dev = (multiplier.mul_add(vpci, 0.0)) * vm;
1301 let pre_i = unsafe { *l_ptr.add(i) } - price_v + dev;
1302 pre_sum += pre_i;
1303 if pre_cnt < slow_period {
1304 pre_ring[pre_pos] = pre_i;
1305 pre_pos += 1;
1306 if pre_pos == slow_period {
1307 pre_pos = 0;
1308 }
1309 pre_cnt += 1;
1310 } else {
1311 pre_sum -= pre_ring[pre_pos];
1312 pre_ring[pre_pos] = pre_i;
1313 pre_pos += 1;
1314 if pre_pos == slow_period {
1315 pre_pos = 0;
1316 }
1317 }
1318 if i >= warmup2 {
1319 unsafe {
1320 *out.get_unchecked_mut(i) = pre_sum * inv_slow;
1321 }
1322 }
1323 }
1324 }
1325
1326 let upto = warmup2.min(len);
1327 for v in &mut out[..upto] {
1328 *v = f64::NAN;
1329 }
1330
1331 Ok(())
1332}
1333
1334#[derive(Debug, Clone)]
1335pub struct AvslStream {
1336 fast_period: usize,
1337 slow_period: usize,
1338 multiplier: f64,
1339
1340 inv_fast: f64,
1341 inv_slow: f64,
1342 base: usize,
1343 warmup2: usize,
1344
1345 t: usize,
1346
1347 sum_close_f: f64,
1348 sum_vol_f: f64,
1349 sum_cxv_f: f64,
1350 sum_close_s: f64,
1351 sum_vol_s: f64,
1352 sum_cxv_s: f64,
1353
1354 ring_len: usize,
1355 ring_pos: usize,
1356 close_ring: Vec<f64>,
1357 vol_ring: Vec<f64>,
1358 cxv_ring: Vec<f64>,
1359
1360 csum_low: [f64; AvslStream::R],
1361 csum_y: [f64; AvslStream::R],
1362
1363 pre_ring: Vec<f64>,
1364 pre_pos: usize,
1365 pre_sum: f64,
1366 pre_cnt: usize,
1367}
1368
1369impl AvslStream {
1370 const MAX_WIN: usize = 200;
1371 const R: usize = Self::MAX_WIN + 1;
1372
1373 pub fn try_new(params: AvslParams) -> Result<Self, AvslError> {
1374 let fast_period = params.fast_period.unwrap_or(12);
1375 let slow_period = params.slow_period.unwrap_or(26);
1376 let multiplier = params.multiplier.unwrap_or(2.0);
1377
1378 if fast_period == 0 {
1379 return Err(AvslError::InvalidPeriod {
1380 period: fast_period,
1381 data_len: 0,
1382 });
1383 }
1384 if slow_period == 0 {
1385 return Err(AvslError::InvalidPeriod {
1386 period: slow_period,
1387 data_len: 0,
1388 });
1389 }
1390 if multiplier <= 0.0 || !multiplier.is_finite() {
1391 return Err(AvslError::InvalidMultiplier { multiplier });
1392 }
1393
1394 let ring_len = fast_period.max(slow_period);
1395 Ok(Self {
1396 fast_period,
1397 slow_period,
1398 multiplier,
1399 inv_fast: 1.0 / (fast_period as f64),
1400 inv_slow: 1.0 / (slow_period as f64),
1401 base: slow_period - 1,
1402 warmup2: (slow_period - 1) + (slow_period - 1),
1403 t: 0,
1404
1405 sum_close_f: 0.0,
1406 sum_vol_f: 0.0,
1407 sum_cxv_f: 0.0,
1408 sum_close_s: 0.0,
1409 sum_vol_s: 0.0,
1410 sum_cxv_s: 0.0,
1411
1412 ring_len,
1413 ring_pos: 0,
1414 close_ring: vec![0.0; ring_len],
1415 vol_ring: vec![0.0; ring_len],
1416 cxv_ring: vec![0.0; ring_len],
1417
1418 csum_low: [0.0; Self::R],
1419 csum_y: [0.0; Self::R],
1420
1421 pre_ring: vec![0.0; slow_period],
1422 pre_pos: 0,
1423 pre_sum: 0.0,
1424 pre_cnt: 0,
1425 })
1426 }
1427
1428 #[inline(always)]
1429 fn sum_last(csum: &[f64; Self::R], t_plus_1_mod: usize, t_plus_1: usize, k: usize) -> f64 {
1430 if k == 0 {
1431 return 0.0;
1432 }
1433 if t_plus_1 >= k {
1434 let start = (t_plus_1 - k) % Self::R;
1435 csum[t_plus_1_mod] - csum[start]
1436 } else {
1437 csum[t_plus_1_mod]
1438 }
1439 }
1440
1441 #[inline(always)]
1442 fn adjust_vpc(x: f64) -> f64 {
1443 if x > -1.0 && x < 0.0 {
1444 -1.0
1445 } else if x >= 0.0 && x < 1.0 {
1446 1.0
1447 } else {
1448 x
1449 }
1450 }
1451
1452 #[inline(always)]
1453 pub fn update(&mut self, close: f64, low: f64, volume: f64) -> Option<f64> {
1454 let i = self.t;
1455
1456 let cv = close * volume;
1457
1458 let rp = self.ring_pos;
1459 let rl = self.ring_len;
1460
1461 let pos_old_fast = (rp + rl - (self.fast_period % rl)) % rl;
1462 let pos_old_slow = (rp + rl - (self.slow_period % rl)) % rl;
1463
1464 let (c_old_f, v_old_f, cv_old_f) = if i >= self.fast_period {
1465 (
1466 self.close_ring[pos_old_fast],
1467 self.vol_ring[pos_old_fast],
1468 self.cxv_ring[pos_old_fast],
1469 )
1470 } else {
1471 (0.0, 0.0, 0.0)
1472 };
1473
1474 let (c_old_s, v_old_s, cv_old_s) = if i >= self.slow_period {
1475 (
1476 self.close_ring[pos_old_slow],
1477 self.vol_ring[pos_old_slow],
1478 self.cxv_ring[pos_old_slow],
1479 )
1480 } else {
1481 (0.0, 0.0, 0.0)
1482 };
1483
1484 self.sum_close_f += close - c_old_f;
1485 self.sum_vol_f += volume - v_old_f;
1486 self.sum_cxv_f += cv - cv_old_f;
1487
1488 self.sum_close_s += close - c_old_s;
1489 self.sum_vol_s += volume - v_old_s;
1490 self.sum_cxv_s += cv - cv_old_s;
1491
1492 self.close_ring[rp] = close;
1493 self.vol_ring[rp] = volume;
1494 self.cxv_ring[rp] = cv;
1495 self.ring_pos = (rp + 1) % rl;
1496
1497 let t1_mod = (i + 1) % Self::R;
1498
1499 let mut y_i = 0.0;
1500 if i >= self.base {
1501 let sma_f = self.sum_close_f * self.inv_fast;
1502 let sma_s = self.sum_close_s * self.inv_slow;
1503
1504 let vwma_f = if self.sum_vol_f != 0.0 {
1505 self.sum_cxv_f / self.sum_vol_f
1506 } else {
1507 sma_f
1508 };
1509 let vwma_s = if self.sum_vol_s != 0.0 {
1510 self.sum_cxv_s / self.sum_vol_s
1511 } else {
1512 sma_s
1513 };
1514
1515 let vpc = vwma_s - sma_s;
1516 let vpr = if sma_f != 0.0 { vwma_f / sma_f } else { 1.0 };
1517
1518 let vol_f = self.sum_vol_f * self.inv_fast;
1519 let vol_s = self.sum_vol_s * self.inv_slow;
1520 let _vm = if vol_s != 0.0 { vol_f / vol_s } else { 1.0 };
1521
1522 let adj = Self::adjust_vpc(vpc);
1523 if adj != 0.0 && vpr != 0.0 {
1524 y_i = low / (adj * vpr);
1525 }
1526 }
1527
1528 self.csum_low[t1_mod] = self.csum_low[i % Self::R] + low;
1529 self.csum_y[t1_mod] = self.csum_y[i % Self::R] + y_i;
1530
1531 let mut out: Option<f64> = None;
1532
1533 if i >= self.base {
1534 let sma_f = self.sum_close_f * self.inv_fast;
1535 let sma_s = self.sum_close_s * self.inv_slow;
1536 let vwma_f = if self.sum_vol_f != 0.0 {
1537 self.sum_cxv_f / self.sum_vol_f
1538 } else {
1539 sma_f
1540 };
1541 let vwma_s = if self.sum_vol_s != 0.0 {
1542 self.sum_cxv_s / self.sum_vol_s
1543 } else {
1544 sma_s
1545 };
1546 let vpc = vwma_s - sma_s;
1547 let vpr = if sma_f != 0.0 { vwma_f / sma_f } else { 1.0 };
1548 let vol_f = self.sum_vol_f * self.inv_fast;
1549 let vol_s = self.sum_vol_s * self.inv_slow;
1550 let vm = if vol_s != 0.0 { vol_f / vol_s } else { 1.0 };
1551 let vpci = vpc * vpr * vm;
1552
1553 let t_len = if vpc < 0.0 {
1554 (vpci - 3.0).abs().round()
1555 } else {
1556 (vpci + 3.0).round()
1557 };
1558 let len_v = t_len.max(1.0).min(Self::MAX_WIN as f64) as usize;
1559
1560 let take = len_v.min(i + 1);
1561 let hist_n = ((i - self.base + 1).min(take)) as usize;
1562 let pref_n = take - hist_n;
1563
1564 let sum_hist_y = Self::sum_last(&self.csum_y, t1_mod, i + 1, hist_n);
1565 let sum_take_l = Self::sum_last(&self.csum_low, t1_mod, i + 1, take);
1566 let sum_hist_l = Self::sum_last(&self.csum_low, t1_mod, i + 1, hist_n);
1567 let acc = sum_hist_y + (sum_take_l - sum_hist_l);
1568
1569 let inv_len_v = 1.0 / (len_v as f64);
1570 let price_v = (acc * inv_len_v) * 0.01;
1571 let dev = self.multiplier.mul_add(vpci, 0.0) * vm;
1572 let pre_i = (low - price_v) + dev;
1573
1574 self.pre_sum += pre_i;
1575 if self.pre_cnt < self.slow_period {
1576 self.pre_ring[self.pre_pos] = pre_i;
1577 self.pre_pos += 1;
1578 if self.pre_pos == self.slow_period {
1579 self.pre_pos = 0;
1580 }
1581 self.pre_cnt += 1;
1582 } else {
1583 self.pre_sum -= self.pre_ring[self.pre_pos];
1584 self.pre_ring[self.pre_pos] = pre_i;
1585 self.pre_pos += 1;
1586 if self.pre_pos == self.slow_period {
1587 self.pre_pos = 0;
1588 }
1589 }
1590
1591 if i >= self.warmup2 {
1592 out = Some(self.pre_sum * self.inv_slow);
1593 }
1594 }
1595
1596 self.t = i + 1;
1597 out
1598 }
1599}
1600
1601#[derive(Clone, Debug)]
1602pub struct AvslBatchRange {
1603 pub fast_period: (usize, usize, usize),
1604 pub slow_period: (usize, usize, usize),
1605 pub multiplier: (f64, f64, f64),
1606}
1607
1608impl Default for AvslBatchRange {
1609 fn default() -> Self {
1610 Self {
1611 fast_period: (12, 12, 0),
1612 slow_period: (26, 275, 1),
1613 multiplier: (2.0, 2.0, 0.0),
1614 }
1615 }
1616}
1617
1618#[derive(Clone, Debug, Default)]
1619pub struct AvslBatchBuilder {
1620 range: AvslBatchRange,
1621 kernel: Kernel,
1622}
1623
1624impl AvslBatchBuilder {
1625 pub fn new() -> Self {
1626 Self::default()
1627 }
1628 pub fn kernel(mut self, k: Kernel) -> Self {
1629 self.kernel = k;
1630 self
1631 }
1632 #[inline]
1633 pub fn fast_range(mut self, s: usize, e: usize, st: usize) -> Self {
1634 self.range.fast_period = (s, e, st);
1635 self
1636 }
1637 #[inline]
1638 pub fn fast_static(mut self, v: usize) -> Self {
1639 self.range.fast_period = (v, v, 0);
1640 self
1641 }
1642 #[inline]
1643 pub fn slow_range(mut self, s: usize, e: usize, st: usize) -> Self {
1644 self.range.slow_period = (s, e, st);
1645 self
1646 }
1647 #[inline]
1648 pub fn slow_static(mut self, v: usize) -> Self {
1649 self.range.slow_period = (v, v, 0);
1650 self
1651 }
1652 #[inline]
1653 pub fn mult_range(mut self, s: f64, e: f64, st: f64) -> Self {
1654 self.range.multiplier = (s, e, st);
1655 self
1656 }
1657 #[inline]
1658 pub fn mult_static(mut self, v: f64) -> Self {
1659 self.range.multiplier = (v, v, 0.0);
1660 self
1661 }
1662
1663 pub fn apply_slices(
1664 self,
1665 close: &[f64],
1666 low: &[f64],
1667 volume: &[f64],
1668 ) -> Result<AvslBatchOutput, AvslError> {
1669 avsl_batch_with_kernel(close, low, volume, &self.range, self.kernel)
1670 }
1671
1672 pub fn apply_candles(
1673 self,
1674 c: &Candles,
1675 close_src: &str,
1676 low_src: &str,
1677 ) -> Result<AvslBatchOutput, AvslError> {
1678 let close = source_type(c, close_src);
1679 let low = source_type(c, low_src);
1680 let volume = c.volume.as_slice();
1681 self.apply_slices(close, low, volume)
1682 }
1683
1684 pub fn with_default_candles(c: &Candles) -> Result<AvslBatchOutput, AvslError> {
1685 AvslBatchBuilder::new()
1686 .kernel(Kernel::Auto)
1687 .apply_candles(c, "close", "low")
1688 }
1689
1690 pub fn with_default_slices(
1691 close: &[f64],
1692 low: &[f64],
1693 volume: &[f64],
1694 k: Kernel,
1695 ) -> Result<AvslBatchOutput, AvslError> {
1696 AvslBatchBuilder::new()
1697 .kernel(k)
1698 .apply_slices(close, low, volume)
1699 }
1700}
1701
1702#[derive(Clone, Debug)]
1703pub struct AvslBatchOutput {
1704 pub values: Vec<f64>,
1705 pub combos: Vec<AvslParams>,
1706 pub rows: usize,
1707 pub cols: usize,
1708}
1709
1710impl AvslBatchOutput {
1711 pub fn row_for_params(&self, p: &AvslParams) -> Option<usize> {
1712 self.combos.iter().position(|c| {
1713 c.fast_period.unwrap_or(12) == p.fast_period.unwrap_or(12)
1714 && c.slow_period.unwrap_or(26) == p.slow_period.unwrap_or(26)
1715 && (c.multiplier.unwrap_or(2.0) - p.multiplier.unwrap_or(2.0)).abs() < 1e-12
1716 })
1717 }
1718
1719 #[inline]
1720 pub fn values_for(&self, p: &AvslParams) -> Option<&[f64]> {
1721 self.row_for_params(p).map(|row| {
1722 let start = row * self.cols;
1723 &self.values[start..start + self.cols]
1724 })
1725 }
1726}
1727
1728#[inline(always)]
1729fn axis_usize((s, e, st): (usize, usize, usize)) -> Vec<usize> {
1730 if st == 0 || s == e {
1731 return vec![s];
1732 }
1733 if s < e {
1734 return (s..=e).step_by(st.max(1)).collect();
1735 }
1736
1737 let mut v = Vec::new();
1738 let step = st.max(1);
1739 let mut cur = s;
1740 while cur >= e {
1741 v.push(cur);
1742 if cur < step {
1743 break;
1744 }
1745 cur -= step;
1746 if cur == usize::MAX {
1747 break;
1748 }
1749 }
1750 v
1751}
1752
1753#[inline(always)]
1754fn axis_f64((s, e, st): (f64, f64, f64)) -> Vec<f64> {
1755 let step = if st.is_sign_negative() { -st } else { st };
1756 if step.abs() < 1e-12 || (s - e).abs() < 1e-12 {
1757 return vec![s];
1758 }
1759 let mut v = Vec::new();
1760 if s <= e {
1761 let mut x = s;
1762 while x <= e + 1e-12 {
1763 v.push(x);
1764 x += step;
1765 }
1766 } else {
1767 let mut x = s;
1768 while x + 1e-12 >= e {
1769 v.push(x);
1770 x -= step;
1771 }
1772 }
1773 v
1774}
1775
1776#[inline(always)]
1777fn expand_grid_avsl(r: &AvslBatchRange) -> Vec<AvslParams> {
1778 let fs = axis_usize(r.fast_period);
1779 let ss = axis_usize(r.slow_period);
1780 let ms = axis_f64(r.multiplier);
1781 let cap = fs
1782 .len()
1783 .checked_mul(ss.len())
1784 .and_then(|x| x.checked_mul(ms.len()))
1785 .unwrap_or(0);
1786 let mut out = Vec::with_capacity(cap);
1787 for &f in &fs {
1788 for &s in &ss {
1789 for &m in &ms {
1790 out.push(AvslParams {
1791 fast_period: Some(f),
1792 slow_period: Some(s),
1793 multiplier: Some(m),
1794 });
1795 }
1796 }
1797 }
1798 out
1799}
1800
1801pub fn avsl_batch_with_kernel(
1802 close: &[f64],
1803 low: &[f64],
1804 volume: &[f64],
1805 sweep: &AvslBatchRange,
1806 k: Kernel,
1807) -> Result<AvslBatchOutput, AvslError> {
1808 if close.is_empty() {
1809 return Err(AvslError::EmptyInputData);
1810 }
1811 if close.len() != low.len() || close.len() != volume.len() {
1812 return Err(AvslError::DataLengthMismatch {
1813 close_len: close.len(),
1814 low_len: low.len(),
1815 volume_len: volume.len(),
1816 });
1817 }
1818
1819 let kernel = match k {
1820 Kernel::Auto => detect_best_batch_kernel(),
1821 other if other.is_batch() => other,
1822 other => return Err(AvslError::InvalidKernelForBatch(other)),
1823 };
1824
1825 let simd = match kernel {
1826 Kernel::Avx512Batch => Kernel::Avx512,
1827 Kernel::Avx2Batch => Kernel::Avx2,
1828 Kernel::ScalarBatch => Kernel::Scalar,
1829 _ => unreachable!(),
1830 };
1831
1832 let combos = expand_grid_avsl(sweep);
1833 if combos.is_empty() {
1834 return Err(AvslError::InvalidRange {
1835 start: sweep.fast_period.0,
1836 end: sweep.fast_period.1,
1837 step: sweep.fast_period.2,
1838 });
1839 }
1840
1841 let cols = close.len();
1842 let rows = combos.len();
1843
1844 let mut buf_mu = make_uninit_matrix(rows, cols);
1845
1846 let first = first_valid_max3(close, low, volume).ok_or(AvslError::AllValuesNaN)?;
1847 let warm: Vec<usize> = combos
1848 .iter()
1849 .map(|p| first + p.slow_period.unwrap_or(26) - 1)
1850 .collect();
1851 init_matrix_prefixes(&mut buf_mu, cols, &warm);
1852
1853 let mut guard = core::mem::ManuallyDrop::new(buf_mu);
1854 let out: &mut [f64] =
1855 unsafe { core::slice::from_raw_parts_mut(guard.as_mut_ptr() as *mut f64, guard.len()) };
1856
1857 avsl_batch_inner_into(close, low, volume, &combos, simd, out)?;
1858
1859 let values = unsafe {
1860 Vec::from_raw_parts(
1861 guard.as_mut_ptr() as *mut f64,
1862 guard.len(),
1863 guard.capacity(),
1864 )
1865 };
1866
1867 Ok(AvslBatchOutput {
1868 values,
1869 combos,
1870 rows,
1871 cols,
1872 })
1873}
1874
1875#[inline(always)]
1876pub fn avsl_batch_slice(
1877 close: &[f64],
1878 low: &[f64],
1879 volume: &[f64],
1880 sweep: &AvslBatchRange,
1881 kern: Kernel,
1882) -> Result<AvslBatchOutput, AvslError> {
1883 avsl_batch_inner(close, low, volume, sweep, kern, false)
1884}
1885
1886#[inline(always)]
1887pub fn avsl_batch_par_slice(
1888 close: &[f64],
1889 low: &[f64],
1890 volume: &[f64],
1891 sweep: &AvslBatchRange,
1892 kern: Kernel,
1893) -> Result<AvslBatchOutput, AvslError> {
1894 avsl_batch_inner(close, low, volume, sweep, kern, true)
1895}
1896
1897#[inline(always)]
1898fn avsl_batch_inner(
1899 close: &[f64],
1900 low: &[f64],
1901 volume: &[f64],
1902 sweep: &AvslBatchRange,
1903 kern: Kernel,
1904 parallel: bool,
1905) -> Result<AvslBatchOutput, AvslError> {
1906 if close.is_empty() {
1907 return Err(AvslError::EmptyInputData);
1908 }
1909 if close.len() != low.len() || close.len() != volume.len() {
1910 return Err(AvslError::DataLengthMismatch {
1911 close_len: close.len(),
1912 low_len: low.len(),
1913 volume_len: volume.len(),
1914 });
1915 }
1916
1917 let kernel = match kern {
1918 Kernel::Auto => detect_best_batch_kernel(),
1919 other if other.is_batch() => other,
1920 other => return Err(AvslError::InvalidKernelForBatch(other)),
1921 };
1922
1923 let simd = match kernel {
1924 Kernel::Avx512Batch => Kernel::Avx512,
1925 Kernel::Avx2Batch => Kernel::Avx2,
1926 Kernel::ScalarBatch => Kernel::Scalar,
1927 _ => unreachable!(),
1928 };
1929
1930 let combos = expand_grid_avsl(sweep);
1931 if combos.is_empty() {
1932 return Err(AvslError::InvalidRange {
1933 start: sweep.fast_period.0,
1934 end: sweep.fast_period.1,
1935 step: sweep.fast_period.2,
1936 });
1937 }
1938
1939 let cols = close.len();
1940 let rows = combos.len();
1941
1942 let mut buf_mu = make_uninit_matrix(rows, cols);
1943
1944 let first = first_valid_max3(close, low, volume).ok_or(AvslError::AllValuesNaN)?;
1945 let warm: Vec<usize> = combos
1946 .iter()
1947 .map(|p| first + p.slow_period.unwrap_or(26) - 1)
1948 .collect();
1949 init_matrix_prefixes(&mut buf_mu, cols, &warm);
1950
1951 let mut guard = core::mem::ManuallyDrop::new(buf_mu);
1952 let out: &mut [f64] =
1953 unsafe { core::slice::from_raw_parts_mut(guard.as_mut_ptr() as *mut f64, guard.len()) };
1954
1955 if parallel {
1956 avsl_batch_inner_into(close, low, volume, &combos, simd, out)?;
1957 } else {
1958 let out_rows: &mut [MaybeUninit<f64>] = unsafe {
1959 core::slice::from_raw_parts_mut(out.as_mut_ptr() as *mut MaybeUninit<f64>, out.len())
1960 };
1961 for (r, dst) in out_rows.chunks_mut(cols).enumerate() {
1962 let p = &combos[r];
1963 let fast = p.fast_period.unwrap_or(12);
1964 let slow = p.slow_period.unwrap_or(26);
1965 let mult = p.multiplier.unwrap_or(2.0);
1966 let dst_f64: &mut [f64] =
1967 unsafe { core::slice::from_raw_parts_mut(dst.as_mut_ptr() as *mut f64, cols) };
1968 avsl_compute_into(close, low, volume, fast, slow, mult, first, simd, dst_f64)?;
1969 }
1970 }
1971
1972 let values = unsafe {
1973 Vec::from_raw_parts(
1974 guard.as_mut_ptr() as *mut f64,
1975 guard.len(),
1976 guard.capacity(),
1977 )
1978 };
1979
1980 Ok(AvslBatchOutput {
1981 values,
1982 combos,
1983 rows,
1984 cols,
1985 })
1986}
1987
1988#[inline(always)]
1989fn avsl_batch_inner_into(
1990 close: &[f64],
1991 low: &[f64],
1992 volume: &[f64],
1993 combos: &[AvslParams],
1994 kern: Kernel,
1995 out: &mut [f64],
1996) -> Result<(), AvslError> {
1997 let cols = close.len();
1998 let first = first_valid_max3(close, low, volume).ok_or(AvslError::AllValuesNaN)?;
1999
2000 let rows = combos.len();
2001
2002 let expected = rows.checked_mul(cols).ok_or(AvslError::InvalidRange {
2003 start: 0,
2004 end: 0,
2005 step: 0,
2006 })?;
2007 if out.len() != expected {
2008 return Err(AvslError::OutputLengthMismatch {
2009 expected,
2010 got: out.len(),
2011 });
2012 }
2013 let out_rows: &mut [MaybeUninit<f64>] = unsafe {
2014 core::slice::from_raw_parts_mut(out.as_mut_ptr() as *mut MaybeUninit<f64>, out.len())
2015 };
2016
2017 let do_row = |row: usize, dst_mu: &mut [MaybeUninit<f64>]| -> Result<(), AvslError> {
2018 let p = &combos[row];
2019 let fast = p.fast_period.unwrap_or(12);
2020 let slow = p.slow_period.unwrap_or(26);
2021 let mult = p.multiplier.unwrap_or(2.0);
2022
2023 let dst: &mut [f64] =
2024 unsafe { core::slice::from_raw_parts_mut(dst_mu.as_mut_ptr() as *mut f64, cols) };
2025 avsl_compute_into(close, low, volume, fast, slow, mult, first, kern, dst)
2026 };
2027
2028 #[cfg(not(target_arch = "wasm32"))]
2029 {
2030 use rayon::prelude::*;
2031 out_rows
2032 .par_chunks_mut(cols)
2033 .enumerate()
2034 .try_for_each(|(r, dst)| do_row(r, dst))
2035 }
2036 #[cfg(target_arch = "wasm32")]
2037 {
2038 for (r, dst) in out_rows.chunks_mut(cols).enumerate() {
2039 do_row(r, dst)?;
2040 }
2041 Ok(())
2042 }
2043}
2044
2045#[cfg(feature = "python")]
2046#[pyclass(name = "AvslStream")]
2047pub struct AvslStreamPy {
2048 stream: AvslStream,
2049}
2050
2051#[cfg(feature = "python")]
2052#[pymethods]
2053impl AvslStreamPy {
2054 #[new]
2055 fn new(fast_period: usize, slow_period: usize, multiplier: f64) -> PyResult<Self> {
2056 let params = AvslParams {
2057 fast_period: Some(fast_period),
2058 slow_period: Some(slow_period),
2059 multiplier: Some(multiplier),
2060 };
2061 let stream =
2062 AvslStream::try_new(params).map_err(|e| PyValueError::new_err(e.to_string()))?;
2063 Ok(AvslStreamPy { stream })
2064 }
2065
2066 fn update(&mut self, close: f64, low: f64, volume: f64) -> Option<f64> {
2067 self.stream.update(close, low, volume)
2068 }
2069}
2070
2071#[cfg(feature = "python")]
2072#[pyfunction(name = "avsl")]
2073#[pyo3(signature = (close, low, volume, fast_period=None, slow_period=None, multiplier=None, kernel=None))]
2074pub fn avsl_py<'py>(
2075 py: Python<'py>,
2076 close: numpy::PyReadonlyArray1<'py, f64>,
2077 low: numpy::PyReadonlyArray1<'py, f64>,
2078 volume: numpy::PyReadonlyArray1<'py, f64>,
2079 fast_period: Option<usize>,
2080 slow_period: Option<usize>,
2081 multiplier: Option<f64>,
2082 kernel: Option<&str>,
2083) -> PyResult<Bound<'py, numpy::PyArray1<f64>>> {
2084 use numpy::{IntoPyArray, PyArray1, PyArrayMethods};
2085
2086 let close_slice = close.as_slice()?;
2087 let low_slice = low.as_slice()?;
2088 let volume_slice = volume.as_slice()?;
2089
2090 let kern = validate_kernel(kernel, false)?;
2091 let params = AvslParams {
2092 fast_period,
2093 slow_period,
2094 multiplier,
2095 };
2096 let input = AvslInput::from_slices(close_slice, low_slice, volume_slice, params);
2097
2098 let result_vec: Vec<f64> = py
2099 .allow_threads(|| avsl_with_kernel(&input, kern).map(|o| o.values))
2100 .map_err(|e| PyValueError::new_err(e.to_string()))?;
2101
2102 Ok(result_vec.into_pyarray(py))
2103}
2104
2105#[cfg(feature = "python")]
2106#[pyfunction(name = "avsl_batch")]
2107#[pyo3(signature = (close, low, volume, fast_range, slow_range, mult_range, kernel=None))]
2108pub fn avsl_batch_py<'py>(
2109 py: Python<'py>,
2110 close: numpy::PyReadonlyArray1<'py, f64>,
2111 low: numpy::PyReadonlyArray1<'py, f64>,
2112 volume: numpy::PyReadonlyArray1<'py, f64>,
2113 fast_range: (usize, usize, usize),
2114 slow_range: (usize, usize, usize),
2115 mult_range: (f64, f64, f64),
2116 kernel: Option<&str>,
2117) -> PyResult<Bound<'py, pyo3::types::PyDict>> {
2118 use numpy::{IntoPyArray, PyArray1, PyArrayMethods};
2119 use pyo3::types::PyDict;
2120
2121 let close = close.as_slice()?;
2122 let low = low.as_slice()?;
2123 let volume = volume.as_slice()?;
2124
2125 let sweep = AvslBatchRange {
2126 fast_period: fast_range,
2127 slow_period: slow_range,
2128 multiplier: mult_range,
2129 };
2130
2131 let combos = expand_grid_avsl(&sweep);
2132 if combos.is_empty() {
2133 return Err(PyValueError::new_err(
2134 AvslError::InvalidRange {
2135 start: sweep.fast_period.0,
2136 end: sweep.fast_period.1,
2137 step: sweep.fast_period.2,
2138 }
2139 .to_string(),
2140 ));
2141 }
2142 let rows = combos.len();
2143 let cols = close.len();
2144 let total = rows.checked_mul(cols).ok_or_else(|| {
2145 PyValueError::new_err(
2146 AvslError::InvalidRange {
2147 start: sweep.fast_period.0,
2148 end: sweep.fast_period.1,
2149 step: sweep.fast_period.2,
2150 }
2151 .to_string(),
2152 )
2153 })?;
2154
2155 let out_arr = unsafe { PyArray1::<f64>::new(py, [total], false) };
2156 let slice_out = unsafe { out_arr.as_slice_mut()? };
2157
2158 let kern = validate_kernel(kernel, true)?;
2159
2160 let combos = py
2161 .allow_threads(|| {
2162 let kernel = match kern {
2163 Kernel::Auto => detect_best_batch_kernel(),
2164 k => k,
2165 };
2166 let simd = match kernel {
2167 Kernel::Avx512Batch => Kernel::Avx512,
2168 Kernel::Avx2Batch => Kernel::Avx2,
2169 Kernel::ScalarBatch => Kernel::Scalar,
2170 _ => unreachable!(),
2171 };
2172 avsl_batch_inner_into(close, low, volume, &combos, simd, slice_out).map(|_| combos)
2173 })
2174 .map_err(|e: AvslError| PyValueError::new_err(e.to_string()))?;
2175
2176 let dict = PyDict::new(py);
2177 dict.set_item("values", out_arr.reshape((rows, cols))?)?;
2178 dict.set_item(
2179 "fast_periods",
2180 combos
2181 .iter()
2182 .map(|p| p.fast_period.unwrap() as u64)
2183 .collect::<Vec<_>>()
2184 .into_pyarray(py),
2185 )?;
2186 dict.set_item(
2187 "slow_periods",
2188 combos
2189 .iter()
2190 .map(|p| p.slow_period.unwrap() as u64)
2191 .collect::<Vec<_>>()
2192 .into_pyarray(py),
2193 )?;
2194 dict.set_item(
2195 "multipliers",
2196 combos
2197 .iter()
2198 .map(|p| p.multiplier.unwrap())
2199 .collect::<Vec<_>>()
2200 .into_pyarray(py),
2201 )?;
2202 Ok(dict)
2203}
2204
2205#[cfg(all(feature = "python", feature = "cuda"))]
2206use crate::cuda::avsl_wrapper::CudaAvsl;
2207#[cfg(all(feature = "python", feature = "cuda"))]
2208use crate::utilities::dlpack_cuda::export_f32_cuda_dlpack_2d;
2209#[cfg(all(feature = "python", feature = "cuda"))]
2210use cust::context::Context;
2211#[cfg(all(feature = "python", feature = "cuda"))]
2212use cust::memory::DeviceBuffer;
2213#[cfg(all(feature = "python", feature = "cuda"))]
2214use std::sync::Arc;
2215
2216#[cfg(all(feature = "python", feature = "cuda"))]
2217#[pyclass(module = "ta_indicators.cuda", name = "DeviceArrayF32Avsl", unsendable)]
2218pub struct DeviceArrayF32AvslPy {
2219 pub(crate) inner: crate::cuda::moving_averages::DeviceArrayF32,
2220 _ctx_guard: Arc<Context>,
2221 _device_id: u32,
2222}
2223
2224#[cfg(all(feature = "python", feature = "cuda"))]
2225#[pymethods]
2226impl DeviceArrayF32AvslPy {
2227 #[new]
2228 fn py_new() -> PyResult<Self> {
2229 Err(pyo3::exceptions::PyTypeError::new_err(
2230 "use factory functions (avsl_cuda_*_dev) to create this type",
2231 ))
2232 }
2233
2234 #[getter]
2235 fn __cuda_array_interface__<'py>(
2236 &self,
2237 py: Python<'py>,
2238 ) -> PyResult<Bound<'py, pyo3::types::PyDict>> {
2239 let inner = &self.inner;
2240 let d = pyo3::types::PyDict::new(py);
2241 let item = std::mem::size_of::<f32>();
2242 d.set_item("shape", (inner.rows, inner.cols))?;
2243 d.set_item("typestr", "<f4")?;
2244 d.set_item("strides", (inner.cols * item, item))?;
2245 let size = inner.rows.saturating_mul(inner.cols);
2246 let ptr_val: usize = if size == 0 {
2247 0
2248 } else {
2249 inner.buf.as_device_ptr().as_raw() as usize
2250 };
2251 d.set_item("data", (ptr_val, false))?;
2252
2253 d.set_item("version", 3)?;
2254 Ok(d)
2255 }
2256
2257 fn __dlpack_device__(&self) -> PyResult<(i32, i32)> {
2258 Ok((2, self._device_id as i32))
2259 }
2260
2261 #[pyo3(signature = (stream=None, max_version=None, dl_device=None, copy=None))]
2262 fn __dlpack__<'py>(
2263 &mut self,
2264 py: Python<'py>,
2265 stream: Option<PyObject>,
2266 max_version: Option<PyObject>,
2267 dl_device: Option<PyObject>,
2268 copy: Option<PyObject>,
2269 ) -> PyResult<PyObject> {
2270 let (kdl, alloc_dev) = self.__dlpack_device__()?;
2271 if let Some(dev_obj) = dl_device.as_ref() {
2272 if let Ok((dev_ty, dev_id)) = dev_obj.extract::<(i32, i32)>(py) {
2273 if dev_ty != kdl || dev_id != alloc_dev {
2274 let wants_copy = copy
2275 .as_ref()
2276 .and_then(|c| c.extract::<bool>(py).ok())
2277 .unwrap_or(false);
2278 if wants_copy {
2279 return Err(PyValueError::new_err(
2280 "device copy not implemented for __dlpack__",
2281 ));
2282 } else {
2283 return Err(PyValueError::new_err("dl_device mismatch for __dlpack__"));
2284 }
2285 }
2286 }
2287 }
2288
2289 let _ = stream;
2290
2291 let dummy =
2292 DeviceBuffer::from_slice(&[]).map_err(|e| PyValueError::new_err(e.to_string()))?;
2293 let inner = std::mem::replace(
2294 &mut self.inner,
2295 crate::cuda::moving_averages::DeviceArrayF32 {
2296 buf: dummy,
2297 rows: 0,
2298 cols: 0,
2299 },
2300 );
2301
2302 let rows = inner.rows;
2303 let cols = inner.cols;
2304 let buf = inner.buf;
2305
2306 let max_version_bound = max_version.map(|obj| obj.into_bound(py));
2307
2308 export_f32_cuda_dlpack_2d(py, buf, rows, cols, alloc_dev, max_version_bound)
2309 }
2310}
2311
2312#[cfg(all(feature = "python", feature = "cuda"))]
2313impl DeviceArrayF32AvslPy {
2314 pub fn new(
2315 inner: crate::cuda::moving_averages::DeviceArrayF32,
2316 ctx_guard: Arc<Context>,
2317 device_id: u32,
2318 ) -> Self {
2319 Self {
2320 inner,
2321 _ctx_guard: ctx_guard,
2322 _device_id: device_id,
2323 }
2324 }
2325}
2326
2327#[cfg(all(feature = "python", feature = "cuda"))]
2328#[pyfunction(name = "avsl_cuda_batch_dev")]
2329#[pyo3(signature = (close_f32, low_f32, volume_f32, fast_range, slow_range, mult_range, device_id=0))]
2330pub fn avsl_cuda_batch_dev_py<'py>(
2331 py: Python<'py>,
2332 close_f32: numpy::PyReadonlyArray1<'py, f32>,
2333 low_f32: numpy::PyReadonlyArray1<'py, f32>,
2334 volume_f32: numpy::PyReadonlyArray1<'py, f32>,
2335 fast_range: (usize, usize, usize),
2336 slow_range: (usize, usize, usize),
2337 mult_range: (f64, f64, f64),
2338 device_id: usize,
2339) -> PyResult<(DeviceArrayF32AvslPy, Bound<'py, pyo3::types::PyDict>)> {
2340 use crate::cuda::cuda_available;
2341 use numpy::IntoPyArray;
2342 use pyo3::types::PyDict;
2343 if !cuda_available() {
2344 return Err(PyValueError::new_err("CUDA not available"));
2345 }
2346 let close = close_f32.as_slice()?;
2347 let low = low_f32.as_slice()?;
2348 let vol = volume_f32.as_slice()?;
2349 let sweep = AvslBatchRange {
2350 fast_period: fast_range,
2351 slow_period: slow_range,
2352 multiplier: mult_range,
2353 };
2354 let (inner, ctx, dev_id, combos) = py.allow_threads(|| {
2355 let cuda = CudaAvsl::new(device_id).map_err(|e| PyValueError::new_err(e.to_string()))?;
2356 let ctx = cuda.ctx();
2357 let dev_id = cuda.device_id();
2358 let (arr, combos) = cuda
2359 .avsl_batch_dev(close, low, vol, &sweep)
2360 .map_err(|e| PyValueError::new_err(e.to_string()))?;
2361 Ok::<_, pyo3::PyErr>((arr, ctx, dev_id, combos))
2362 })?;
2363 let dict = PyDict::new(py);
2364 dict.set_item(
2365 "fast_periods",
2366 combos
2367 .iter()
2368 .map(|p| p.fast_period.unwrap() as u64)
2369 .collect::<Vec<_>>()
2370 .into_pyarray(py),
2371 )?;
2372 dict.set_item(
2373 "slow_periods",
2374 combos
2375 .iter()
2376 .map(|p| p.slow_period.unwrap() as u64)
2377 .collect::<Vec<_>>()
2378 .into_pyarray(py),
2379 )?;
2380 dict.set_item(
2381 "multipliers",
2382 combos
2383 .iter()
2384 .map(|p| p.multiplier.unwrap())
2385 .collect::<Vec<_>>()
2386 .into_pyarray(py),
2387 )?;
2388 Ok((DeviceArrayF32AvslPy::new(inner, ctx, dev_id), dict))
2389}
2390
2391#[cfg(all(feature = "python", feature = "cuda"))]
2392#[pyfunction(name = "avsl_cuda_many_series_one_param_dev")]
2393#[pyo3(signature = (close_tm_f32, low_tm_f32, volume_tm_f32, cols, rows, fast_period, slow_period, multiplier, device_id=0))]
2394pub fn avsl_cuda_many_series_one_param_dev_py(
2395 py: Python<'_>,
2396 close_tm_f32: numpy::PyReadonlyArray1<'_, f32>,
2397 low_tm_f32: numpy::PyReadonlyArray1<'_, f32>,
2398 volume_tm_f32: numpy::PyReadonlyArray1<'_, f32>,
2399 cols: usize,
2400 rows: usize,
2401 fast_period: usize,
2402 slow_period: usize,
2403 multiplier: f64,
2404 device_id: usize,
2405) -> PyResult<DeviceArrayF32AvslPy> {
2406 use crate::cuda::cuda_available;
2407 if !cuda_available() {
2408 return Err(PyValueError::new_err("CUDA not available"));
2409 }
2410 let c = close_tm_f32.as_slice()?;
2411 let l = low_tm_f32.as_slice()?;
2412 let v = volume_tm_f32.as_slice()?;
2413 let params = AvslParams {
2414 fast_period: Some(fast_period),
2415 slow_period: Some(slow_period),
2416 multiplier: Some(multiplier),
2417 };
2418 let (inner, ctx, dev_id) = py.allow_threads(|| {
2419 let cuda = CudaAvsl::new(device_id).map_err(|e| PyValueError::new_err(e.to_string()))?;
2420 let ctx = cuda.ctx();
2421 let dev_id = cuda.device_id();
2422 let arr = cuda
2423 .avsl_many_series_one_param_time_major_dev(c, l, v, cols, rows, ¶ms)
2424 .map_err(|e| PyValueError::new_err(e.to_string()))?;
2425 Ok::<_, pyo3::PyErr>((arr, ctx, dev_id))
2426 })?;
2427 Ok(DeviceArrayF32AvslPy::new(inner, ctx, dev_id))
2428}
2429
2430#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
2431#[wasm_bindgen]
2432pub fn avsl_js(
2433 close: &[f64],
2434 low: &[f64],
2435 volume: &[f64],
2436 fast_period: usize,
2437 slow_period: usize,
2438 multiplier: f64,
2439) -> Result<Vec<f64>, JsValue> {
2440 let len = close.len();
2441 if len == 0 {
2442 return Err(JsValue::from_str("empty input"));
2443 }
2444 if close.len() != low.len() || close.len() != volume.len() {
2445 return Err(JsValue::from_str("data length mismatch"));
2446 }
2447 let first = first_valid_max3(close, low, volume)
2448 .ok_or_else(|| JsValue::from_str("All values are NaN"))?;
2449 if fast_period == 0 || fast_period > len {
2450 return Err(JsValue::from_str("Invalid period"));
2451 }
2452 if slow_period == 0 || slow_period > len {
2453 return Err(JsValue::from_str("Invalid period"));
2454 }
2455 if !(multiplier.is_finite()) || multiplier <= 0.0 {
2456 return Err(JsValue::from_str("Invalid multiplier"));
2457 }
2458 if len - first < slow_period {
2459 return Err(JsValue::from_str("Not enough valid data"));
2460 }
2461
2462 let sweep = AvslBatchRange {
2463 fast_period: (fast_period, fast_period, 0),
2464 slow_period: (slow_period, slow_period, 0),
2465 multiplier: (multiplier, multiplier, 0.0),
2466 };
2467 let out = avsl_batch_with_kernel(close, low, volume, &sweep, detect_best_batch_kernel())
2468 .map_err(|e| JsValue::from_str(&e.to_string()))?;
2469 Ok(out.values)
2470}
2471
2472#[cfg(not(all(target_arch = "wasm32", feature = "wasm")))]
2473#[inline]
2474pub fn avsl_into(input: &AvslInput, out: &mut [f64]) -> Result<(), AvslError> {
2475 avsl_into_slice(out, input, Kernel::Auto)
2476}
2477
2478#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
2479#[wasm_bindgen]
2480pub fn avsl_alloc(len: usize) -> *mut f64 {
2481 let mut vec = Vec::<f64>::with_capacity(len);
2482 let ptr = vec.as_mut_ptr();
2483 std::mem::forget(vec);
2484 ptr
2485}
2486
2487#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
2488#[wasm_bindgen]
2489pub fn avsl_free(ptr: *mut f64, len: usize) {
2490 unsafe {
2491 let _ = Vec::from_raw_parts(ptr, len, len);
2492 }
2493}
2494
2495#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
2496#[wasm_bindgen]
2497pub fn avsl_into(
2498 close_ptr: *const f64,
2499 low_ptr: *const f64,
2500 vol_ptr: *const f64,
2501 out_ptr: *mut f64,
2502 len: usize,
2503 fast_period: usize,
2504 slow_period: usize,
2505 multiplier: f64,
2506) -> Result<(), JsValue> {
2507 if close_ptr.is_null() || low_ptr.is_null() || vol_ptr.is_null() || out_ptr.is_null() {
2508 return Err(JsValue::from_str("Null pointer"));
2509 }
2510 unsafe {
2511 let close = core::slice::from_raw_parts(close_ptr, len);
2512 let low = core::slice::from_raw_parts(low_ptr, len);
2513 let vol = core::slice::from_raw_parts(vol_ptr, len);
2514 let out = core::slice::from_raw_parts_mut(out_ptr, len);
2515
2516 let n = close.len();
2517 if n == 0 {
2518 return Err(JsValue::from_str("empty input"));
2519 }
2520 if close.len() != low.len() || close.len() != vol.len() {
2521 return Err(JsValue::from_str("data length mismatch"));
2522 }
2523 let first = match first_valid_max3(close, low, vol) {
2524 Some(i) => i,
2525 None => return Err(JsValue::from_str("All values are NaN")),
2526 };
2527 if fast_period == 0 || fast_period > n || slow_period == 0 || slow_period > n {
2528 return Err(JsValue::from_str("Invalid period"));
2529 }
2530 if !(multiplier.is_finite()) || multiplier <= 0.0 {
2531 return Err(JsValue::from_str("Invalid multiplier"));
2532 }
2533 if n - first < slow_period {
2534 return Err(JsValue::from_str("Not enough valid data"));
2535 }
2536
2537 let params = AvslParams {
2538 fast_period: Some(fast_period),
2539 slow_period: Some(slow_period),
2540 multiplier: Some(multiplier),
2541 };
2542
2543 if out_ptr as *const f64 == close_ptr as *const f64
2544 || out_ptr as *const f64 == low_ptr as *const f64
2545 || out_ptr as *const f64 == vol_ptr as *const f64
2546 {
2547 let mut temp = vec![0.0; len];
2548 let combos = vec![params];
2549 avsl_batch_inner_into(
2550 close,
2551 low,
2552 vol,
2553 &combos,
2554 detect_best_batch_kernel(),
2555 &mut temp,
2556 )
2557 .map_err(|e| JsValue::from_str(&e.to_string()))?;
2558 out.copy_from_slice(&temp);
2559 } else {
2560 let combos = vec![params];
2561 avsl_batch_inner_into(close, low, vol, &combos, detect_best_batch_kernel(), out)
2562 .map_err(|e| JsValue::from_str(&e.to_string()))?;
2563 }
2564 Ok(())
2565 }
2566}
2567
2568#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
2569#[derive(Serialize, Deserialize)]
2570pub struct AvslBatchConfig {
2571 pub fast_range: (usize, usize, usize),
2572 pub slow_range: (usize, usize, usize),
2573 pub mult_range: (f64, f64, f64),
2574}
2575
2576#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
2577#[derive(Serialize, Deserialize)]
2578pub struct AvslBatchJsOutput {
2579 pub values: Vec<f64>,
2580 pub combos: Vec<AvslParams>,
2581 pub rows: usize,
2582 pub cols: usize,
2583}
2584
2585#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
2586#[wasm_bindgen(js_name = avsl_batch)]
2587pub fn avsl_batch_unified_js(
2588 close: &[f64],
2589 low: &[f64],
2590 volume: &[f64],
2591 config: JsValue,
2592) -> Result<JsValue, JsValue> {
2593 let cfg: AvslBatchConfig = serde_wasm_bindgen::from_value(config)
2594 .map_err(|e| JsValue::from_str(&format!("Invalid config: {}", e)))?;
2595
2596 let sweep = AvslBatchRange {
2597 fast_period: cfg.fast_range,
2598 slow_period: cfg.slow_range,
2599 multiplier: cfg.mult_range,
2600 };
2601
2602 let out = avsl_batch_with_kernel(close, low, volume, &sweep, detect_best_batch_kernel())
2603 .map_err(|e| JsValue::from_str(&e.to_string()))?;
2604
2605 let js = AvslBatchJsOutput {
2606 values: out.values,
2607 combos: out.combos,
2608 rows: out.rows,
2609 cols: out.cols,
2610 };
2611 serde_wasm_bindgen::to_value(&js)
2612 .map_err(|e| JsValue::from_str(&format!("Serialization error: {}", e)))
2613}
2614
2615#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
2616#[wasm_bindgen]
2617pub struct AvslContext {
2618 fast_period: usize,
2619 slow_period: usize,
2620 multiplier: f64,
2621 kernel: Kernel,
2622}
2623
2624#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
2625#[wasm_bindgen]
2626impl AvslContext {
2627 #[wasm_bindgen(constructor)]
2628 pub fn new(
2629 fast_period: usize,
2630 slow_period: usize,
2631 multiplier: f64,
2632 ) -> Result<AvslContext, JsValue> {
2633 if fast_period == 0 {
2634 return Err(JsValue::from_str(&format!(
2635 "Invalid fast period: {}",
2636 fast_period
2637 )));
2638 }
2639 if slow_period == 0 {
2640 return Err(JsValue::from_str(&format!(
2641 "Invalid slow period: {}",
2642 slow_period
2643 )));
2644 }
2645 if multiplier <= 0.0 || multiplier.is_nan() || multiplier.is_infinite() {
2646 return Err(JsValue::from_str(&format!(
2647 "Invalid multiplier: {}",
2648 multiplier
2649 )));
2650 }
2651
2652 Ok(AvslContext {
2653 fast_period,
2654 slow_period,
2655 multiplier,
2656 kernel: Kernel::Auto,
2657 })
2658 }
2659
2660 pub fn update_into(
2661 &self,
2662 close_ptr: *const f64,
2663 low_ptr: *const f64,
2664 vol_ptr: *const f64,
2665 out_ptr: *mut f64,
2666 len: usize,
2667 ) -> Result<(), JsValue> {
2668 if len < self.slow_period {
2669 return Err(JsValue::from_str("Data length less than slow period"));
2670 }
2671
2672 if close_ptr.is_null() || low_ptr.is_null() || vol_ptr.is_null() || out_ptr.is_null() {
2673 return Err(JsValue::from_str("Null pointer passed"));
2674 }
2675
2676 unsafe {
2677 let close = std::slice::from_raw_parts(close_ptr, len);
2678 let low = std::slice::from_raw_parts(low_ptr, len);
2679 let volume = std::slice::from_raw_parts(vol_ptr, len);
2680 let out = std::slice::from_raw_parts_mut(out_ptr, len);
2681
2682 let first = first_valid_max3(close, low, volume).unwrap_or(0);
2683
2684 if out_ptr as *const f64 == close_ptr
2685 || out_ptr as *const f64 == low_ptr
2686 || out_ptr as *const f64 == vol_ptr
2687 {
2688 let mut temp = vec![0.0; len];
2689 avsl_scalar(
2690 close,
2691 low,
2692 volume,
2693 self.fast_period,
2694 self.slow_period,
2695 self.multiplier,
2696 first,
2697 &mut temp,
2698 )
2699 .map_err(|e| JsValue::from_str(&e.to_string()))?;
2700 out.copy_from_slice(&temp);
2701 } else {
2702 avsl_scalar(
2703 close,
2704 low,
2705 volume,
2706 self.fast_period,
2707 self.slow_period,
2708 self.multiplier,
2709 first,
2710 out,
2711 )
2712 .map_err(|e| JsValue::from_str(&e.to_string()))?;
2713 }
2714 }
2715
2716 Ok(())
2717 }
2718
2719 pub fn get_warmup_period(&self) -> usize {
2720 self.slow_period - 1
2721 }
2722}
2723
2724#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
2725#[wasm_bindgen]
2726pub fn avsl_batch_into(
2727 close_ptr: *const f64,
2728 low_ptr: *const f64,
2729 vol_ptr: *const f64,
2730 out_ptr: *mut f64,
2731 len: usize,
2732 fast_start: usize,
2733 fast_end: usize,
2734 fast_step: usize,
2735 slow_start: usize,
2736 slow_end: usize,
2737 slow_step: usize,
2738 mult_start: f64,
2739 mult_end: f64,
2740 mult_step: f64,
2741) -> Result<usize, JsValue> {
2742 if close_ptr.is_null() || low_ptr.is_null() || vol_ptr.is_null() || out_ptr.is_null() {
2743 return Err(JsValue::from_str("null pointer passed to avsl_batch_into"));
2744 }
2745 unsafe {
2746 let close = core::slice::from_raw_parts(close_ptr, len);
2747 let low = core::slice::from_raw_parts(low_ptr, len);
2748 let vol = core::slice::from_raw_parts(vol_ptr, len);
2749 let sweep = AvslBatchRange {
2750 fast_period: (fast_start, fast_end, fast_step),
2751 slow_period: (slow_start, slow_end, slow_step),
2752 multiplier: (mult_start, mult_end, mult_step),
2753 };
2754
2755 let combos = expand_grid_avsl(&sweep);
2756 if combos.is_empty() {
2757 return Err(JsValue::from_str(
2758 &AvslError::InvalidRange {
2759 start: sweep.fast_period.0,
2760 end: sweep.fast_period.1,
2761 step: sweep.fast_period.2,
2762 }
2763 .to_string(),
2764 ));
2765 }
2766 let rows = combos.len();
2767 let cols = len;
2768 let total = rows.checked_mul(cols).ok_or_else(|| {
2769 JsValue::from_str(
2770 &AvslError::InvalidRange {
2771 start: sweep.fast_period.0,
2772 end: sweep.fast_period.1,
2773 step: sweep.fast_period.2,
2774 }
2775 .to_string(),
2776 )
2777 })?;
2778 let out = core::slice::from_raw_parts_mut(out_ptr, total);
2779
2780 avsl_batch_inner_into(close, low, vol, &combos, detect_best_batch_kernel(), out)
2781 .map_err(|e| JsValue::from_str(&e.to_string()))?;
2782 Ok(rows)
2783 }
2784}
2785
2786#[cfg(test)]
2787mod tests {
2788 use super::*;
2789 use crate::utilities::data_loader::read_candles_from_csv;
2790 use paste::paste;
2791 use std::error::Error;
2792
2793 macro_rules! skip_if_unsupported {
2794 ($kernel:expr, $test_name:expr) => {
2795 #[cfg(not(all(feature = "nightly-avx", target_arch = "x86_64")))]
2796 {
2797 if matches!(
2798 $kernel,
2799 Kernel::Avx2 | Kernel::Avx512 | Kernel::Avx2Batch | Kernel::Avx512Batch
2800 ) {
2801 eprintln!("Skipping {} - AVX not supported", $test_name);
2802 return Ok(());
2803 }
2804 }
2805 };
2806 }
2807
2808 fn check_avsl_accuracy(test_name: &str, kernel: Kernel) -> Result<(), Box<dyn Error>> {
2809 skip_if_unsupported!(kernel, test_name);
2810
2811 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
2812 let candles = read_candles_from_csv(file_path)?;
2813
2814 let input = AvslInput::from_candles(&candles, "close", "low", AvslParams::default());
2815 let result = avsl_with_kernel(&input, kernel)?;
2816
2817 let expected_last_five = [
2818 56471.61721191,
2819 56267.11946706,
2820 56079.12004921,
2821 55910.07971214,
2822 55765.37864229,
2823 ];
2824
2825 let start = result.values.len().saturating_sub(5);
2826 for (i, &val) in result.values[start..].iter().enumerate() {
2827 let diff = (val - expected_last_five[i]).abs();
2828 let tolerance = expected_last_five[i].abs() * 0.01;
2829 assert!(
2830 diff < tolerance,
2831 "[{}] AVSL {:?} mismatch at idx {}: got {}, expected {}, diff {}",
2832 test_name,
2833 kernel,
2834 i,
2835 val,
2836 expected_last_five[i],
2837 diff
2838 );
2839 }
2840 Ok(())
2841 }
2842
2843 fn check_avsl_empty_input(test_name: &str, _kernel: Kernel) -> Result<(), Box<dyn Error>> {
2844 let empty: [f64; 0] = [];
2845 let params = AvslParams::default();
2846 let input = AvslInput::from_slices(&empty, &empty, &empty, params);
2847 let res = avsl(&input);
2848 assert!(
2849 res.is_err(),
2850 "[{}] Expected error for empty input",
2851 test_name
2852 );
2853 Ok(())
2854 }
2855
2856 fn check_avsl_all_nan(test_name: &str, kernel: Kernel) -> Result<(), Box<dyn Error>> {
2857 skip_if_unsupported!(kernel, test_name);
2858
2859 let nan_data = [f64::NAN, f64::NAN, f64::NAN];
2860 let params = AvslParams::default();
2861 let input = AvslInput::from_slices(&nan_data, &nan_data, &nan_data, params);
2862 let res = avsl_with_kernel(&input, kernel);
2863 assert!(
2864 res.is_err(),
2865 "[{}] Expected error for all NaN input",
2866 test_name
2867 );
2868 Ok(())
2869 }
2870
2871 fn check_avsl_mismatched_lengths(
2872 test_name: &str,
2873 kernel: Kernel,
2874 ) -> Result<(), Box<dyn Error>> {
2875 skip_if_unsupported!(kernel, test_name);
2876
2877 let close = [1.0, 2.0, 3.0];
2878 let low = [0.9, 1.9];
2879 let volume = [100.0, 200.0, 300.0];
2880 let params = AvslParams::default();
2881 let input = AvslInput::from_slices(&close, &low, &volume, params);
2882 let res = avsl_with_kernel(&input, kernel);
2883 assert!(
2884 res.is_err(),
2885 "[{}] Expected error for mismatched data lengths",
2886 test_name
2887 );
2888 Ok(())
2889 }
2890
2891 fn check_avsl_invalid_multiplier(
2892 test_name: &str,
2893 kernel: Kernel,
2894 ) -> Result<(), Box<dyn Error>> {
2895 skip_if_unsupported!(kernel, test_name);
2896
2897 let data = vec![1.0; 100];
2898 let params = AvslParams {
2899 fast_period: Some(12),
2900 slow_period: Some(26),
2901 multiplier: Some(-1.0),
2902 };
2903 let input = AvslInput::from_slices(&data, &data, &data, params);
2904 let res = avsl_with_kernel(&input, kernel);
2905 assert!(
2906 res.is_err(),
2907 "[{}] Expected error for invalid multiplier",
2908 test_name
2909 );
2910 Ok(())
2911 }
2912
2913 macro_rules! generate_all_avsl_tests {
2914 ($($test_fn:ident),*) => {
2915 paste::paste! {
2916 $(
2917 #[test]
2918 fn [<$test_fn _scalar>]() {
2919 let _ = $test_fn(stringify!([<$test_fn _scalar>]), Kernel::Scalar);
2920 }
2921 )*
2922 #[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
2923 $(
2924 #[test]
2925 fn [<$test_fn _avx2>]() {
2926 let _ = $test_fn(stringify!([<$test_fn _avx2>]), Kernel::Avx2);
2927 }
2928 #[test]
2929 fn [<$test_fn _avx512>]() {
2930 let _ = $test_fn(stringify!([<$test_fn _avx512>]), Kernel::Avx512);
2931 }
2932 )*
2933 #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))]
2934 $(
2935 #[test]
2936 fn [<$test_fn _simd128>]() {
2937 let _ = $test_fn(stringify!([<$test_fn _simd128>]), Kernel::Scalar);
2938 }
2939 )*
2940 }
2941 }
2942 }
2943
2944 generate_all_avsl_tests!(
2945 check_avsl_accuracy,
2946 check_avsl_empty_input,
2947 check_avsl_all_nan,
2948 check_avsl_mismatched_lengths,
2949 check_avsl_invalid_multiplier
2950 );
2951
2952 fn check_avsl_batch_default_row(test_name: &str, kernel: Kernel) -> Result<(), Box<dyn Error>> {
2953 skip_if_unsupported!(kernel, test_name);
2954
2955 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
2956 let candles = read_candles_from_csv(file_path)?;
2957
2958 let output = AvslBatchBuilder::new()
2959 .kernel(kernel)
2960 .apply_candles(&candles, "close", "low")?;
2961
2962 let def = AvslParams::default();
2963 let row = output.values_for(&def).expect("default row missing");
2964
2965 assert_eq!(row.len(), candles.close.len());
2966
2967 let expected_last_five = [
2968 56471.61721191,
2969 56267.11946706,
2970 56079.12004921,
2971 55910.07971214,
2972 55765.37864229,
2973 ];
2974
2975 let start = row.len().saturating_sub(5);
2976 for (i, &val) in row[start..].iter().enumerate() {
2977 let diff = (val - expected_last_five[i]).abs();
2978 let tolerance = expected_last_five[i].abs() * 0.01;
2979 assert!(
2980 diff < tolerance,
2981 "[{}] AVSL batch default row {:?} mismatch at idx {}: got {}, expected {}",
2982 test_name,
2983 kernel,
2984 i,
2985 val,
2986 expected_last_five[i]
2987 );
2988 }
2989 Ok(())
2990 }
2991
2992 fn check_avsl_batch_range(test_name: &str, kernel: Kernel) -> Result<(), Box<dyn Error>> {
2993 skip_if_unsupported!(kernel, test_name);
2994
2995 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
2996 let candles = read_candles_from_csv(file_path)?;
2997
2998 let output = AvslBatchBuilder::new()
2999 .kernel(kernel)
3000 .fast_range(10, 15, 5)
3001 .slow_range(20, 30, 10)
3002 .mult_range(1.5, 2.5, 0.5)
3003 .apply_candles(&candles, "close", "low")?;
3004
3005 let expected_combos = 2 * 2 * 3;
3006 assert_eq!(output.combos.len(), expected_combos);
3007 assert_eq!(output.rows, expected_combos);
3008 assert_eq!(output.cols, candles.close.len());
3009
3010 Ok(())
3011 }
3012
3013 macro_rules! gen_batch_tests {
3014 ($fn_name:ident) => {
3015 paste::paste! {
3016 #[test]
3017 fn [<$fn_name _scalar>]() {
3018 let _ = $fn_name(stringify!([<$fn_name _scalar>]), Kernel::ScalarBatch);
3019 }
3020 #[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
3021 #[test]
3022 fn [<$fn_name _avx2>]() {
3023 let _ = $fn_name(stringify!([<$fn_name _avx2>]), Kernel::Avx2Batch);
3024 }
3025 #[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
3026 #[test]
3027 fn [<$fn_name _avx512>]() {
3028 let _ = $fn_name(stringify!([<$fn_name _avx512>]), Kernel::Avx512Batch);
3029 }
3030 #[test]
3031 fn [<$fn_name _auto_detect>]() {
3032 let _ = $fn_name(stringify!([<$fn_name _auto_detect>]), Kernel::Auto);
3033 }
3034 }
3035 };
3036 }
3037
3038 gen_batch_tests!(check_avsl_batch_default_row);
3039 gen_batch_tests!(check_avsl_batch_range);
3040
3041 #[test]
3042 fn test_avsl_streaming() -> Result<(), Box<dyn Error>> {
3043 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
3044 let candles = read_candles_from_csv(file_path)?;
3045
3046 let params = AvslParams::default();
3047 let input = AvslInput::from_candles(&candles, "close", "low", params.clone());
3048 let batch_result = avsl(&input)?;
3049
3050 let mut stream = AvslStream::try_new(params)?;
3051
3052 let mut stream_results = Vec::new();
3053 for i in 0..candles.close.len() {
3054 if let Some(value) = stream.update(candles.close[i], candles.low[i], candles.volume[i])
3055 {
3056 stream_results.push(value);
3057 }
3058 }
3059
3060 if !stream_results.is_empty() && !batch_result.values.is_empty() {
3061 let last_stream = stream_results.last().unwrap();
3062 let last_batch = batch_result
3063 .values
3064 .iter()
3065 .rev()
3066 .find(|&&v| !v.is_nan())
3067 .unwrap();
3068
3069 let diff = (last_stream - last_batch).abs();
3070 let tolerance = last_batch.abs() * 0.01;
3071 assert!(
3072 diff < tolerance,
3073 "Streaming vs batch mismatch: {} vs {}, diff {}",
3074 last_stream,
3075 last_batch,
3076 diff
3077 );
3078 }
3079
3080 Ok(())
3081 }
3082
3083 #[test]
3084 fn test_avsl_batch_helpers() -> Result<(), Box<dyn Error>> {
3085 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
3086 let candles = read_candles_from_csv(file_path)?;
3087
3088 let output = AvslBatchBuilder::with_default_candles(&candles).map_err(|e| {
3089 eprintln!("Error: {:?}", e);
3090 e
3091 })?;
3092 assert_eq!(output.cols, candles.close.len());
3093
3094 let params = AvslParams::default();
3095 let row_idx = output.row_for_params(¶ms);
3096 assert!(row_idx.is_some());
3097 assert_eq!(row_idx.unwrap(), 0);
3098
3099 let sweep = AvslBatchRange::default();
3100 let par_output = avsl_batch_par_slice(
3101 &candles.close,
3102 &candles.low,
3103 &candles.volume,
3104 &sweep,
3105 Kernel::Auto,
3106 )?;
3107 let ser_output = avsl_batch_slice(
3108 &candles.close,
3109 &candles.low,
3110 &candles.volume,
3111 &sweep,
3112 Kernel::ScalarBatch,
3113 )?;
3114
3115 assert_eq!(par_output.rows, ser_output.rows);
3116 assert_eq!(par_output.cols, ser_output.cols);
3117
3118 let default_output = AvslBatchBuilder::with_default_slices(
3119 &candles.close,
3120 &candles.low,
3121 &candles.volume,
3122 Kernel::Auto,
3123 )?;
3124 assert_eq!(default_output.cols, candles.close.len());
3125 assert_eq!(default_output.rows, 250);
3126
3127 Ok(())
3128 }
3129
3130 #[cfg(not(all(target_arch = "wasm32", feature = "wasm")))]
3131 #[test]
3132 fn test_avsl_into_matches_api() -> Result<(), Box<dyn Error>> {
3133 fn eq_or_both_nan(a: f64, b: f64) -> bool {
3134 (a.is_nan() && b.is_nan()) || (a == b)
3135 }
3136
3137 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
3138 let candles = read_candles_from_csv(file_path)?;
3139 let input = AvslInput::from_candles(&candles, "close", "low", AvslParams::default());
3140
3141 let baseline = avsl(&input)?;
3142
3143 let mut out = vec![0.0f64; candles.close.len()];
3144 avsl_into(&input, &mut out)?;
3145
3146 assert_eq!(baseline.values.len(), out.len());
3147 for i in 0..out.len() {
3148 assert!(
3149 eq_or_both_nan(baseline.values[i], out[i]),
3150 "Mismatch at index {}: api={} into={}",
3151 i,
3152 baseline.values[i],
3153 out[i]
3154 );
3155 }
3156 Ok(())
3157 }
3158}