1#[cfg(all(feature = "python", feature = "cuda"))]
2use crate::cuda::cuda_available;
3#[cfg(all(feature = "python", feature = "cuda"))]
4use crate::cuda::oscillators::CudaReverseRsi;
5#[cfg(all(feature = "python", feature = "cuda"))]
6use crate::utilities::dlpack_cuda::DeviceArrayF32Py;
7#[cfg(feature = "python")]
8use numpy::{IntoPyArray, PyArray1, PyArrayMethods, PyReadonlyArray1};
9#[cfg(feature = "python")]
10use pyo3::exceptions::PyValueError;
11#[cfg(feature = "python")]
12use pyo3::prelude::*;
13#[cfg(feature = "python")]
14use pyo3::types::PyDict;
15
16#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
17use serde::{Deserialize, Serialize};
18#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
19use wasm_bindgen::prelude::*;
20
21use crate::indicators::moving_averages::ema::{ema, ema_into_slice, EmaInput, EmaParams};
22use crate::utilities::data_loader::{source_type, Candles};
23use crate::utilities::enums::Kernel;
24use crate::utilities::helpers::{
25 alloc_with_nan_prefix, detect_best_batch_kernel, detect_best_kernel, init_matrix_prefixes,
26 make_uninit_matrix,
27};
28#[cfg(feature = "python")]
29use crate::utilities::kernel_validation::validate_kernel;
30
31#[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
32use core::arch::x86_64::*;
33
34#[cfg(not(target_arch = "wasm32"))]
35use rayon::prelude::*;
36
37use std::convert::AsRef;
38use std::error::Error;
39use std::mem::MaybeUninit;
40use thiserror::Error;
41
42impl<'a> AsRef<[f64]> for ReverseRsiInput<'a> {
43 #[inline(always)]
44 fn as_ref(&self) -> &[f64] {
45 match &self.data {
46 ReverseRsiData::Slice(slice) => slice,
47 ReverseRsiData::Candles { candles, source } => source_type(candles, source),
48 }
49 }
50}
51
52#[derive(Debug, Clone)]
53pub enum ReverseRsiData<'a> {
54 Candles {
55 candles: &'a Candles,
56 source: &'a str,
57 },
58 Slice(&'a [f64]),
59}
60
61#[derive(Debug, Clone)]
62pub struct ReverseRsiOutput {
63 pub values: Vec<f64>,
64}
65
66#[derive(Debug, Clone)]
67#[cfg_attr(
68 all(target_arch = "wasm32", feature = "wasm"),
69 derive(Serialize, Deserialize)
70)]
71pub struct ReverseRsiParams {
72 pub rsi_length: Option<usize>,
73 pub rsi_level: Option<f64>,
74}
75
76impl Default for ReverseRsiParams {
77 fn default() -> Self {
78 Self {
79 rsi_length: Some(14),
80 rsi_level: Some(50.0),
81 }
82 }
83}
84
85#[derive(Debug, Clone)]
86pub struct ReverseRsiInput<'a> {
87 pub data: ReverseRsiData<'a>,
88 pub params: ReverseRsiParams,
89}
90
91impl<'a> ReverseRsiInput<'a> {
92 #[inline]
93 pub fn from_candles(c: &'a Candles, s: &'a str, p: ReverseRsiParams) -> Self {
94 Self {
95 data: ReverseRsiData::Candles {
96 candles: c,
97 source: s,
98 },
99 params: p,
100 }
101 }
102
103 #[inline]
104 pub fn from_slice(sl: &'a [f64], p: ReverseRsiParams) -> Self {
105 Self {
106 data: ReverseRsiData::Slice(sl),
107 params: p,
108 }
109 }
110
111 #[inline]
112 pub fn with_default_candles(c: &'a Candles) -> Self {
113 Self::from_candles(c, "close", ReverseRsiParams::default())
114 }
115
116 #[inline]
117 pub fn get_rsi_length(&self) -> usize {
118 self.params.rsi_length.unwrap_or(14)
119 }
120
121 #[inline]
122 pub fn get_rsi_level(&self) -> f64 {
123 self.params.rsi_level.unwrap_or(50.0)
124 }
125}
126
127#[derive(Copy, Clone, Debug)]
128pub struct ReverseRsiBuilder {
129 rsi_length: Option<usize>,
130 rsi_level: Option<f64>,
131 kernel: Kernel,
132}
133
134impl Default for ReverseRsiBuilder {
135 fn default() -> Self {
136 Self {
137 rsi_length: None,
138 rsi_level: None,
139 kernel: Kernel::Auto,
140 }
141 }
142}
143
144impl ReverseRsiBuilder {
145 #[inline(always)]
146 pub fn new() -> Self {
147 Self::default()
148 }
149
150 #[inline(always)]
151 pub fn rsi_length(mut self, val: usize) -> Self {
152 self.rsi_length = Some(val);
153 self
154 }
155
156 #[inline(always)]
157 pub fn rsi_level(mut self, val: f64) -> Self {
158 self.rsi_level = Some(val);
159 self
160 }
161
162 #[inline(always)]
163 pub fn kernel(mut self, k: Kernel) -> Self {
164 self.kernel = k;
165 self
166 }
167
168 #[inline(always)]
169 pub fn apply(self, c: &Candles) -> Result<ReverseRsiOutput, ReverseRsiError> {
170 let p = ReverseRsiParams {
171 rsi_length: self.rsi_length,
172 rsi_level: self.rsi_level,
173 };
174 let i = ReverseRsiInput::from_candles(c, "close", p);
175 reverse_rsi_with_kernel(&i, self.kernel)
176 }
177
178 #[inline(always)]
179 pub fn apply_slice(self, d: &[f64]) -> Result<ReverseRsiOutput, ReverseRsiError> {
180 let p = ReverseRsiParams {
181 rsi_length: self.rsi_length,
182 rsi_level: self.rsi_level,
183 };
184 let i = ReverseRsiInput::from_slice(d, p);
185 reverse_rsi_with_kernel(&i, self.kernel)
186 }
187
188 #[inline(always)]
189 pub fn into_stream(self) -> Result<ReverseRsiStream, ReverseRsiError> {
190 let p = ReverseRsiParams {
191 rsi_length: self.rsi_length,
192 rsi_level: self.rsi_level,
193 };
194 ReverseRsiStream::try_new(p)
195 }
196}
197
198#[derive(Debug, Clone)]
199pub struct ReverseRsiBatchRange {
200 pub rsi_length_range: (usize, usize, usize),
201 pub rsi_level_range: (f64, f64, f64),
202}
203
204impl Default for ReverseRsiBatchRange {
205 fn default() -> Self {
206 Self {
207 rsi_length_range: (14, 263, 1),
208 rsi_level_range: (50.0, 50.0, 0.0),
209 }
210 }
211}
212
213#[derive(Debug, Clone)]
214pub struct ReverseRsiBatchOutput {
215 pub values: Vec<f64>,
216 pub combos: Vec<ReverseRsiParams>,
217 pub rows: usize,
218 pub cols: usize,
219}
220
221impl ReverseRsiBatchOutput {
222 pub fn row_for_params(&self, p: &ReverseRsiParams) -> Option<usize> {
223 self.combos.iter().position(|c| {
224 c.rsi_length.unwrap_or(14) == p.rsi_length.unwrap_or(14)
225 && (c.rsi_level.unwrap_or(50.0) - p.rsi_level.unwrap_or(50.0)).abs() < 1e-12
226 })
227 }
228 pub fn values_for(&self, p: &ReverseRsiParams) -> Option<&[f64]> {
229 self.row_for_params(p).map(|row| {
230 let start = row * self.cols;
231 &self.values[start..start + self.cols]
232 })
233 }
234}
235
236#[derive(Copy, Clone, Debug)]
237pub struct ReverseRsiBatchBuilder {
238 rsi_length_range: (usize, usize, usize),
239 rsi_level_range: (f64, f64, f64),
240 kernel: Kernel,
241}
242
243impl Default for ReverseRsiBatchBuilder {
244 fn default() -> Self {
245 Self {
246 rsi_length_range: (14, 14, 0),
247 rsi_level_range: (50.0, 50.0, 0.0),
248 kernel: Kernel::Auto,
249 }
250 }
251}
252
253impl ReverseRsiBatchBuilder {
254 #[inline(always)]
255 pub fn new() -> Self {
256 Self::default()
257 }
258
259 #[inline(always)]
260 pub fn rsi_length_range(mut self, start: usize, end: usize, step: usize) -> Self {
261 self.rsi_length_range = (start, end, step);
262 self
263 }
264
265 #[inline(always)]
266 pub fn rsi_level_range(mut self, start: f64, end: f64, step: f64) -> Self {
267 self.rsi_level_range = (start, end, step);
268 self
269 }
270
271 #[inline(always)]
272 pub fn kernel(mut self, k: Kernel) -> Self {
273 self.kernel = k;
274 self
275 }
276
277 #[inline(always)]
278 pub fn apply_candles(
279 self,
280 c: &Candles,
281 source: &str,
282 ) -> Result<ReverseRsiBatchOutput, ReverseRsiError> {
283 let sweep = ReverseRsiBatchRange {
284 rsi_length_range: self.rsi_length_range,
285 rsi_level_range: self.rsi_level_range,
286 };
287 let data = source_type(c, source);
288 reverse_rsi_batch_slice(data, &sweep, self.kernel)
289 }
290
291 #[inline(always)]
292 pub fn apply_slice(self, data: &[f64]) -> Result<ReverseRsiBatchOutput, ReverseRsiError> {
293 let sweep = ReverseRsiBatchRange {
294 rsi_length_range: self.rsi_length_range,
295 rsi_level_range: self.rsi_level_range,
296 };
297 reverse_rsi_batch_slice(data, &sweep, self.kernel)
298 }
299
300 pub fn with_default_slice(
301 data: &[f64],
302 k: Kernel,
303 ) -> Result<ReverseRsiBatchOutput, ReverseRsiError> {
304 ReverseRsiBatchBuilder::new().kernel(k).apply_slice(data)
305 }
306
307 pub fn with_default_candles(c: &Candles) -> Result<ReverseRsiBatchOutput, ReverseRsiError> {
308 ReverseRsiBatchBuilder::new()
309 .kernel(Kernel::Auto)
310 .apply_candles(c, "close")
311 }
312}
313
314#[derive(Debug, Error)]
315pub enum ReverseRsiError {
316 #[error("reverse_rsi: Input data slice is empty.")]
317 EmptyInputData,
318
319 #[error("reverse_rsi: All values are NaN.")]
320 AllValuesNaN,
321
322 #[error("reverse_rsi: Invalid period: period = {period}, data length = {data_len}")]
323 InvalidPeriod { period: usize, data_len: usize },
324
325 #[error("reverse_rsi: Invalid RSI level: {level} (must be between 0 and 100)")]
326 InvalidRsiLevel { level: f64 },
327
328 #[error("reverse_rsi: Not enough valid data: needed = {needed}, valid = {valid}")]
329 NotEnoughValidData { needed: usize, valid: usize },
330
331 #[error("reverse_rsi: output slice length mismatch: expected={expected}, got={got}")]
332 OutputLengthMismatch { expected: usize, got: usize },
333
334 #[error("reverse_rsi: invalid range: start={start}, end={end}, step={step}")]
335 InvalidRange {
336 start: String,
337 end: String,
338 step: String,
339 },
340
341 #[error("reverse_rsi: invalid kernel for batch: {0:?}")]
342 InvalidKernelForBatch(Kernel),
343}
344
345#[inline]
346fn reverse_rsi_prepare<'a>(
347 input: &'a ReverseRsiInput,
348 _kernel: Kernel,
349) -> Result<(&'a [f64], usize, usize, f64, usize), ReverseRsiError> {
350 let data: &[f64] = input.as_ref();
351 let len = data.len();
352 if len == 0 {
353 return Err(ReverseRsiError::EmptyInputData);
354 }
355 let first = data
356 .iter()
357 .position(|x| !x.is_nan())
358 .ok_or(ReverseRsiError::AllValuesNaN)?;
359 let rsi_len = input.get_rsi_length();
360 let rsi_lvl = input.get_rsi_level();
361 if rsi_len == 0 || rsi_len > len {
362 return Err(ReverseRsiError::InvalidPeriod {
363 period: rsi_len,
364 data_len: len,
365 });
366 }
367 if !(0.0 < rsi_lvl && rsi_lvl < 100.0) || rsi_lvl.is_nan() || rsi_lvl.is_infinite() {
368 return Err(ReverseRsiError::InvalidRsiLevel { level: rsi_lvl });
369 }
370 let ema_len = rsi_len
371 .checked_mul(2)
372 .and_then(|v| v.checked_sub(1))
373 .ok_or(ReverseRsiError::InvalidPeriod {
374 period: rsi_len,
375 data_len: len,
376 })?;
377 if len - first < ema_len {
378 return Err(ReverseRsiError::NotEnoughValidData {
379 needed: ema_len,
380 valid: len - first,
381 });
382 }
383 Ok((data, first, rsi_len, rsi_lvl, ema_len))
384}
385
386#[inline(always)]
387fn reverse_rsi_compute_into_scalar_safe(
388 data: &[f64],
389 first: usize,
390 rsi_length: usize,
391 rsi_level: f64,
392 out: &mut [f64],
393) -> Result<(), ReverseRsiError> {
394 let len = data.len();
395 let ema_len = (2 * rsi_length) - 1;
396
397 let l = rsi_level;
398 let inv = 100.0 - l;
399 let n_minus_1 = (rsi_length - 1) as f64;
400 let rs_target = l / inv;
401 let neg_scale = inv / l;
402 let rs_coeff = n_minus_1 * rs_target;
403
404 let alpha = 2.0 / (ema_len as f64 + 1.0);
405 let beta = 1.0 - alpha;
406
407 let warm_end = first + ema_len;
408 let all_finite = data[first..].iter().all(|v| v.is_finite());
409
410 let mut sum_up = 0.0f64;
411 let mut sum_dn = 0.0f64;
412 let mut prev = 0.0f64;
413 for i in first..warm_end {
414 let cur = data[i];
415 let d = if all_finite || (cur.is_finite() && prev.is_finite()) {
416 cur - prev
417 } else {
418 0.0
419 };
420 sum_up += d.max(0.0);
421 sum_dn += (-d).max(0.0);
422 prev = cur;
423 }
424
425 let mut up_ema = sum_up / (ema_len as f64);
426 let mut dn_ema = sum_dn / (ema_len as f64);
427
428 let warm_idx = warm_end - 1;
429 let base = data[warm_idx];
430 let x0 = rs_coeff.mul_add(dn_ema, -n_minus_1 * up_ema);
431 let m0 = (x0 >= 0.0) as i32 as f64;
432 let scale0 = neg_scale + m0 * (1.0 - neg_scale);
433 let v0 = base + x0 * scale0;
434 out[warm_idx] = if v0.is_finite() || x0 >= 0.0 { v0 } else { 0.0 };
435
436 prev = base;
437 for i in warm_end..len {
438 let cur = data[i];
439 let d = if all_finite || (cur.is_finite() && prev.is_finite()) {
440 cur - prev
441 } else {
442 0.0
443 };
444 let up = d.max(0.0);
445 let dn = (-d).max(0.0);
446
447 up_ema = beta.mul_add(up_ema, alpha * up);
448 dn_ema = beta.mul_add(dn_ema, alpha * dn);
449
450 let x = rs_coeff.mul_add(dn_ema, -n_minus_1 * up_ema);
451 let m = (x >= 0.0) as i32 as f64;
452 let scale = neg_scale + m * (1.0 - neg_scale);
453 let v = cur + x * scale;
454 out[i] = if v.is_finite() || x >= 0.0 { v } else { 0.0 };
455 prev = cur;
456 }
457
458 Ok(())
459}
460
461#[inline(always)]
462unsafe fn reverse_rsi_compute_into_unsafe_fast(
463 data: &[f64],
464 first: usize,
465 rsi_length: usize,
466 rsi_level: f64,
467 out: &mut [f64],
468) -> Result<(), ReverseRsiError> {
469 let len = data.len();
470 let ema_len = (2 * rsi_length) - 1;
471
472 let l = rsi_level;
473 let inv = 100.0 - l;
474 let rs_target = l / inv;
475 let neg_scale = inv / l;
476 let n_minus_1 = (rsi_length - 1) as f64;
477 let rs_coeff = n_minus_1 * rs_target;
478
479 let alpha = 2.0 / (ema_len as f64 + 1.0);
480 let beta = 1.0 - alpha;
481
482 let warm_end = first + ema_len;
483 let mut sum_up = 0.0f64;
484 let mut sum_dn = 0.0f64;
485
486 let all_finite = data[first..].iter().all(|v| v.is_finite());
487
488 let mut i = first;
489 if all_finite {
490 while i < warm_end {
491 let cur = *data.get_unchecked(i);
492 let prev = if i == first {
493 0.0
494 } else {
495 *data.get_unchecked(i - 1)
496 };
497 let d = cur - prev;
498 sum_up += d.max(0.0);
499 sum_dn += (-d).max(0.0);
500 i += 1;
501 }
502 } else {
503 while i < warm_end {
504 let cur = *data.get_unchecked(i);
505 let prev = if i == first {
506 0.0
507 } else {
508 *data.get_unchecked(i - 1)
509 };
510 if cur.is_finite() & prev.is_finite() {
511 let d = cur - prev;
512 sum_up += d.max(0.0);
513 sum_dn += (-d).max(0.0);
514 }
515 i += 1;
516 }
517 }
518
519 let mut up_ema = sum_up / (ema_len as f64);
520 let mut dn_ema = sum_dn / (ema_len as f64);
521
522 let warm_idx = warm_end - 1;
523 let base = *data.get_unchecked(warm_idx);
524 let x0 = rs_coeff.mul_add(dn_ema, -n_minus_1 * up_ema);
525 let m0 = (x0 >= 0.0) as i32 as f64;
526 let scale0 = neg_scale + m0 * (1.0 - neg_scale);
527 let v0 = base + x0 * scale0;
528 *out.get_unchecked_mut(warm_idx) = if v0.is_finite() || x0 >= 0.0 { v0 } else { 0.0 };
529
530 i = warm_end;
531 if all_finite {
532 while i < len {
533 let cur = *data.get_unchecked(i);
534 let prev = *data.get_unchecked(i - 1);
535 let d = cur - prev;
536 let up = d.max(0.0);
537 let dn = (-d).max(0.0);
538 up_ema = beta.mul_add(up_ema, alpha * up);
539 dn_ema = beta.mul_add(dn_ema, alpha * dn);
540 let x = rs_coeff.mul_add(dn_ema, -n_minus_1 * up_ema);
541 let m = (x >= 0.0) as i32 as f64;
542 let scale = neg_scale + m * (1.0 - neg_scale);
543 let v = cur + x * scale;
544 *out.get_unchecked_mut(i) = if v.is_finite() || x >= 0.0 { v } else { 0.0 };
545 i += 1;
546 }
547 } else {
548 while i < len {
549 let cur = *data.get_unchecked(i);
550 let prev = *data.get_unchecked(i - 1);
551 let valid = cur.is_finite() & prev.is_finite();
552 let d = if valid { cur - prev } else { 0.0 };
553 let up = d.max(0.0);
554 let dn = (-d).max(0.0);
555 up_ema = beta.mul_add(up_ema, alpha * up);
556 dn_ema = beta.mul_add(dn_ema, alpha * dn);
557 let x = rs_coeff.mul_add(dn_ema, -n_minus_1 * up_ema);
558 let m = (x >= 0.0) as i32 as f64;
559 let scale = neg_scale + m * (1.0 - neg_scale);
560 let v = cur + x * scale;
561 *out.get_unchecked_mut(i) = if v.is_finite() || x >= 0.0 { v } else { 0.0 };
562 i += 1;
563 }
564 }
565
566 Ok(())
567}
568
569#[inline(always)]
570fn reverse_rsi_compute_into_avx2_stub(
571 data: &[f64],
572 first: usize,
573 rsi_length: usize,
574 rsi_level: f64,
575 out: &mut [f64],
576) -> Result<(), ReverseRsiError> {
577 #[cfg(all(
578 feature = "nightly-avx",
579 target_arch = "x86_64",
580 target_feature = "avx2"
581 ))]
582 unsafe {
583 use core::arch::x86_64::*;
584 let len = data.len();
585 let ema_len = (2 * rsi_length) - 1;
586
587 let l = rsi_level;
588 let inv = 100.0 - l;
589 let n_minus_1 = (rsi_length - 1) as f64;
590 let rs_target = l / inv;
591 let neg_scale = inv / l;
592 let rs_coeff = n_minus_1 * rs_target;
593
594 let alpha = 2.0 / (ema_len as f64 + 1.0);
595 let beta = 1.0 - alpha;
596
597 let warm_end = first + ema_len;
598 let all_finite = data[first..].iter().all(|v| v.is_finite());
599 if !all_finite {
600 return reverse_rsi_compute_into_unsafe_fast(data, first, rsi_length, rsi_level, out);
601 }
602
603 let mut sum_up = 0.0f64;
604 let mut sum_dn = 0.0f64;
605
606 if first < warm_end {
607 let c0 = *data.get_unchecked(first);
608 let d0 = c0 - 0.0;
609 sum_up += if d0 > 0.0 { d0 } else { 0.0 };
610 sum_dn += if d0 < 0.0 { -d0 } else { 0.0 };
611 }
612
613 let mut i = first + 1;
614 let mut v_up = _mm256_setzero_pd();
615 let mut v_dn = _mm256_setzero_pd();
616 let v_zero = _mm256_setzero_pd();
617
618 while i + 3 < warm_end {
619 let v_cur = _mm256_loadu_pd(data.as_ptr().add(i));
620 let v_prev = _mm256_loadu_pd(data.as_ptr().add(i - 1));
621 let v_d = _mm256_sub_pd(v_cur, v_prev);
622 let v_u = _mm256_max_pd(v_d, v_zero);
623 let v_n = _mm256_max_pd(_mm256_sub_pd(v_zero, v_d), v_zero);
624 v_up = _mm256_add_pd(v_up, v_u);
625 v_dn = _mm256_add_pd(v_dn, v_n);
626 i += 4;
627 }
628
629 let mut buf = [0.0f64; 4];
630 _mm256_storeu_pd(buf.as_mut_ptr(), v_up);
631 sum_up += buf[0] + buf[1] + buf[2] + buf[3];
632 _mm256_storeu_pd(buf.as_mut_ptr(), v_dn);
633 sum_dn += buf[0] + buf[1] + buf[2] + buf[3];
634
635 while i < warm_end {
636 let c = *data.get_unchecked(i);
637 let p = *data.get_unchecked(i - 1);
638 let d = c - p;
639 sum_up += if d > 0.0 { d } else { 0.0 };
640 sum_dn += if d < 0.0 { -d } else { 0.0 };
641 i += 1;
642 }
643
644 let mut up_ema = sum_up / (ema_len as f64);
645 let mut dn_ema = sum_dn / (ema_len as f64);
646
647 let warm_idx = warm_end - 1;
648 let base = *data.get_unchecked(warm_idx);
649 let x0 = rs_coeff.mul_add(dn_ema, -n_minus_1 * up_ema);
650 let m0 = (x0 >= 0.0) as i32 as f64;
651 let scale0 = neg_scale + m0 * (1.0 - neg_scale);
652 let v0 = base + x0 * scale0;
653 *out.get_unchecked_mut(warm_idx) = if v0.is_finite() || x0 >= 0.0 { v0 } else { 0.0 };
654
655 let mut j = warm_end;
656 while j < len {
657 let cur = *data.get_unchecked(j);
658 let prev = *data.get_unchecked(j - 1);
659 let d = cur - prev;
660 let up = if d > 0.0 { d } else { 0.0 };
661 let dn = if d < 0.0 { -d } else { 0.0 };
662
663 up_ema = beta.mul_add(up_ema, alpha * up);
664 dn_ema = beta.mul_add(dn_ema, alpha * dn);
665
666 let x = rs_coeff.mul_add(dn_ema, -n_minus_1 * up_ema);
667 let m = (x >= 0.0) as i32 as f64;
668 let scale = neg_scale + m * (1.0 - neg_scale);
669 let val = cur + x * scale;
670 *out.get_unchecked_mut(j) = if val.is_finite() || x >= 0.0 {
671 val
672 } else {
673 0.0
674 };
675 j += 1;
676 }
677
678 return Ok(());
679 }
680
681 unsafe { reverse_rsi_compute_into_unsafe_fast(data, first, rsi_length, rsi_level, out) }
682}
683
684#[inline(always)]
685fn reverse_rsi_compute_into_avx512_stub(
686 data: &[f64],
687 first: usize,
688 rsi_length: usize,
689 rsi_level: f64,
690 out: &mut [f64],
691) -> Result<(), ReverseRsiError> {
692 #[cfg(all(
693 feature = "nightly-avx",
694 target_arch = "x86_64",
695 target_feature = "avx512f"
696 ))]
697 unsafe {
698 use core::arch::x86_64::*;
699 let len = data.len();
700 let ema_len = (2 * rsi_length) - 1;
701
702 let l = rsi_level;
703 let inv = 100.0 - l;
704 let n_minus_1 = (rsi_length - 1) as f64;
705 let rs_target = l / inv;
706 let neg_scale = inv / l;
707 let rs_coeff = n_minus_1 * rs_target;
708
709 let alpha = 2.0 / (ema_len as f64 + 1.0);
710 let beta = 1.0 - alpha;
711
712 let warm_end = first + ema_len;
713 let all_finite = data[first..].iter().all(|v| v.is_finite());
714 if !all_finite {
715 return reverse_rsi_compute_into_unsafe_fast(data, first, rsi_length, rsi_level, out);
716 }
717
718 let mut sum_up = 0.0f64;
719 let mut sum_dn = 0.0f64;
720
721 if first < warm_end {
722 let c0 = *data.get_unchecked(first);
723 let d0 = c0 - 0.0;
724 sum_up += if d0 > 0.0 { d0 } else { 0.0 };
725 sum_dn += if d0 < 0.0 { -d0 } else { 0.0 };
726 }
727
728 let mut i = first + 1;
729 let mut v_up = _mm512_setzero_pd();
730 let mut v_dn = _mm512_setzero_pd();
731 let v_zero = _mm512_setzero_pd();
732
733 while i + 7 < warm_end {
734 let v_cur = _mm512_loadu_pd(data.as_ptr().add(i));
735 let v_prev = _mm512_loadu_pd(data.as_ptr().add(i - 1));
736 let v_d = _mm512_sub_pd(v_cur, v_prev);
737 let v_u = _mm512_max_pd(v_d, v_zero);
738 let v_n = _mm512_max_pd(_mm512_sub_pd(v_zero, v_d), v_zero);
739 v_up = _mm512_add_pd(v_up, v_u);
740 v_dn = _mm512_add_pd(v_dn, v_n);
741 i += 8;
742 }
743
744 let mut buf = [0.0f64; 8];
745 _mm512_storeu_pd(buf.as_mut_ptr(), v_up);
746 sum_up += buf.iter().sum::<f64>();
747 _mm512_storeu_pd(buf.as_mut_ptr(), v_dn);
748 sum_dn += buf.iter().sum::<f64>();
749
750 while i < warm_end {
751 let c = *data.get_unchecked(i);
752 let p = *data.get_unchecked(i - 1);
753 let d = c - p;
754 sum_up += if d > 0.0 { d } else { 0.0 };
755 sum_dn += if d < 0.0 { -d } else { 0.0 };
756 i += 1;
757 }
758
759 let mut up_ema = sum_up / (ema_len as f64);
760 let mut dn_ema = sum_dn / (ema_len as f64);
761
762 let warm_idx = warm_end - 1;
763 let base = *data.get_unchecked(warm_idx);
764 let x0 = rs_coeff.mul_add(dn_ema, -n_minus_1 * up_ema);
765 let m0 = (x0 >= 0.0) as i32 as f64;
766 let scale0 = neg_scale + m0 * (1.0 - neg_scale);
767 let v0 = base + x0 * scale0;
768 *out.get_unchecked_mut(warm_idx) = if v0.is_finite() || x0 >= 0.0 { v0 } else { 0.0 };
769
770 let mut j = warm_end;
771 while j < len {
772 let cur = *data.get_unchecked(j);
773 let prev = *data.get_unchecked(j - 1);
774 let d = cur - prev;
775 let up = if d > 0.0 { d } else { 0.0 };
776 let dn = if d < 0.0 { -d } else { 0.0 };
777
778 up_ema = beta.mul_add(up_ema, alpha * up);
779 dn_ema = beta.mul_add(dn_ema, alpha * dn);
780
781 let x = rs_coeff.mul_add(dn_ema, -n_minus_1 * up_ema);
782 let m = (x >= 0.0) as i32 as f64;
783 let scale = neg_scale + m * (1.0 - neg_scale);
784 let val = cur + x * scale;
785 *out.get_unchecked_mut(j) = if val.is_finite() || x >= 0.0 {
786 val
787 } else {
788 0.0
789 };
790 j += 1;
791 }
792
793 return Ok(());
794 }
795
796 reverse_rsi_compute_into_avx2_stub(data, first, rsi_length, rsi_level, out)
797}
798
799#[inline(always)]
800fn reverse_rsi_compute_into(
801 data: &[f64],
802 first: usize,
803 rsi_length: usize,
804 rsi_level: f64,
805 kern: Kernel,
806 out: &mut [f64],
807) -> Result<(), ReverseRsiError> {
808 let k = to_non_batch(match kern {
809 Kernel::Auto => detect_best_kernel(),
810 x => x,
811 });
812 match k {
813 Kernel::Avx512 => {
814 reverse_rsi_compute_into_avx512_stub(data, first, rsi_length, rsi_level, out)
815 }
816 Kernel::Avx2 => reverse_rsi_compute_into_avx2_stub(data, first, rsi_length, rsi_level, out),
817 _ => reverse_rsi_compute_into_scalar_safe(data, first, rsi_length, rsi_level, out),
818 }
819}
820
821#[inline(always)]
822fn to_non_batch(k: Kernel) -> Kernel {
823 match k {
824 Kernel::Auto => detect_best_kernel(),
825 Kernel::ScalarBatch => Kernel::Scalar,
826 Kernel::Avx2Batch => Kernel::Avx2,
827 Kernel::Avx512Batch => Kernel::Avx512,
828 other => other,
829 }
830}
831
832#[inline]
833fn ema_into_slice_or_wrap(
834 dst: &mut [f64],
835 inp: &EmaInput,
836 kern: Kernel,
837) -> Result<(), ReverseRsiError> {
838 let k = to_non_batch(kern);
839 ema_into_slice(dst, inp, k).map_err(|_| ReverseRsiError::NotEnoughValidData {
840 needed: inp.params.period.unwrap_or(1),
841 valid: dst.len(),
842 })
843}
844
845#[inline]
846pub fn reverse_rsi(input: &ReverseRsiInput) -> Result<ReverseRsiOutput, ReverseRsiError> {
847 reverse_rsi_with_kernel(input, Kernel::Auto)
848}
849
850pub fn reverse_rsi_with_kernel(
851 input: &ReverseRsiInput,
852 kernel: Kernel,
853) -> Result<ReverseRsiOutput, ReverseRsiError> {
854 let (data, first, rsi_len, rsi_lvl, ema_len) = reverse_rsi_prepare(input, kernel)?;
855 let mut out = alloc_with_nan_prefix(data.len(), first + ema_len - 1);
856 reverse_rsi_compute_into(data, first, rsi_len, rsi_lvl, kernel, &mut out)?;
857 Ok(ReverseRsiOutput { values: out })
858}
859
860#[cfg(not(all(target_arch = "wasm32", feature = "wasm")))]
861#[inline]
862pub fn reverse_rsi_into(input: &ReverseRsiInput, out: &mut [f64]) -> Result<(), ReverseRsiError> {
863 let (data, first, rsi_len, rsi_lvl, ema_len) = reverse_rsi_prepare(input, Kernel::Auto)?;
864 if out.len() != data.len() {
865 return Err(ReverseRsiError::OutputLengthMismatch {
866 expected: data.len(),
867 got: out.len(),
868 });
869 }
870
871 let warm = (first + ema_len - 1).min(out.len());
872 for v in &mut out[..warm] {
873 *v = f64::from_bits(0x7ff8_0000_0000_0000);
874 }
875
876 reverse_rsi_compute_into(data, first, rsi_len, rsi_lvl, Kernel::Auto, out)
877}
878
879#[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
880#[inline]
881pub fn reverse_rsi_avx2(input: &ReverseRsiInput) -> Result<ReverseRsiOutput, ReverseRsiError> {
882 reverse_rsi_with_kernel(input, Kernel::Avx2)
883}
884
885#[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
886#[inline]
887pub fn reverse_rsi_avx512(input: &ReverseRsiInput) -> Result<ReverseRsiOutput, ReverseRsiError> {
888 reverse_rsi_with_kernel(input, Kernel::Avx512)
889}
890
891#[inline]
892pub fn reverse_rsi_into_slice(
893 dst: &mut [f64],
894 input: &ReverseRsiInput,
895 kernel: Kernel,
896) -> Result<(), ReverseRsiError> {
897 let (data, first, rsi_len, rsi_lvl, ema_len) = reverse_rsi_prepare(input, kernel)?;
898 if dst.len() != data.len() {
899 return Err(ReverseRsiError::OutputLengthMismatch {
900 expected: data.len(),
901 got: dst.len(),
902 });
903 }
904 reverse_rsi_compute_into(data, first, rsi_len, rsi_lvl, kernel, dst)?;
905 for v in &mut dst[..first + ema_len - 1] {
906 *v = f64::NAN;
907 }
908 Ok(())
909}
910
911pub struct ReverseRsiStream {
912 rsi_length: usize,
913 rsi_level: f64,
914 ema_length: usize,
915 alpha: f64,
916 beta: f64,
917
918 n_minus_1: f64,
919 rs_target: f64,
920 rs_coeff: f64,
921 neg_scale: f64,
922
923 seen_first: bool,
924 warm_count: usize,
925 sum_up: f64,
926 sum_dn: f64,
927 up_ema: f64,
928 down_ema: f64,
929 prev: f64,
930}
931
932impl ReverseRsiStream {
933 #[inline]
934 pub fn try_new(params: ReverseRsiParams) -> Result<Self, ReverseRsiError> {
935 let rsi_length = params.rsi_length.unwrap_or(14);
936 if rsi_length == 0 {
937 return Err(ReverseRsiError::InvalidPeriod {
938 period: 0,
939 data_len: 0,
940 });
941 }
942
943 let rsi_level = params.rsi_level.unwrap_or(50.0);
944 if !(0.0 < rsi_level && rsi_level < 100.0) || !rsi_level.is_finite() {
945 return Err(ReverseRsiError::InvalidRsiLevel { level: rsi_level });
946 }
947
948 let ema_length = (2 * rsi_length).saturating_sub(1);
949 let alpha = 2.0 / (ema_length as f64 + 1.0);
950 let beta = 1.0 - alpha;
951
952 let n_minus_1 = (rsi_length - 1) as f64;
953 let inv = 100.0 - rsi_level;
954 let rs_target = rsi_level / inv;
955 let rs_coeff = n_minus_1 * rs_target;
956 let neg_scale = inv / rsi_level;
957
958 Ok(Self {
959 rsi_length,
960 rsi_level,
961 ema_length,
962 alpha,
963 beta,
964 n_minus_1,
965 rs_target,
966 rs_coeff,
967 neg_scale,
968 seen_first: false,
969 warm_count: 0,
970 sum_up: 0.0,
971 sum_dn: 0.0,
972 up_ema: 0.0,
973 down_ema: 0.0,
974 prev: f64::NAN,
975 })
976 }
977
978 #[inline(always)]
979 pub fn update(&mut self, value: f64) -> Option<f64> {
980 if !self.seen_first {
981 if !value.is_finite() {
982 self.prev = value;
983 return None;
984 }
985
986 let d = value;
987 self.sum_up += if d > 0.0 { d } else { 0.0 };
988 self.sum_dn += if d < 0.0 { -d } else { 0.0 };
989 self.warm_count = 1;
990 self.prev = value;
991 self.seen_first = true;
992
993 if self.ema_length == 1 {
994 self.up_ema = self.sum_up;
995 self.down_ema = self.sum_dn;
996 return Some(self.emit_seed(value));
997 }
998 return None;
999 }
1000
1001 let d = if value.is_finite() && self.prev.is_finite() {
1002 value - self.prev
1003 } else {
1004 0.0
1005 };
1006 let up = if d > 0.0 { d } else { 0.0 };
1007 let dn = if d < 0.0 { -d } else { 0.0 };
1008
1009 if self.warm_count < self.ema_length {
1010 self.warm_count += 1;
1011 self.sum_up += up;
1012 self.sum_dn += dn;
1013 self.prev = value;
1014
1015 if self.warm_count == self.ema_length {
1016 self.up_ema = self.sum_up / (self.ema_length as f64);
1017 self.down_ema = self.sum_dn / (self.ema_length as f64);
1018
1019 return Some(self.emit_seed(value));
1020 }
1021 return None;
1022 }
1023
1024 self.up_ema = self.beta.mul_add(self.up_ema, self.alpha * up);
1025 self.down_ema = self.beta.mul_add(self.down_ema, self.alpha * dn);
1026
1027 let out = self.emit_from(value);
1028 self.prev = value;
1029 Some(out)
1030 }
1031
1032 #[inline]
1033 pub fn next(&mut self, value: f64) -> f64 {
1034 self.update(value).unwrap_or(f64::NAN)
1035 }
1036
1037 #[inline(always)]
1038 fn emit_seed(&self, base: f64) -> f64 {
1039 let x0 = self
1040 .rs_coeff
1041 .mul_add(self.down_ema, -self.n_minus_1 * self.up_ema);
1042
1043 let m = (x0 >= 0.0) as i32 as f64;
1044 let scale0 = self.neg_scale + m * (1.0 - self.neg_scale);
1045 let v0 = base + x0 * scale0;
1046 if v0.is_finite() || x0 >= 0.0 {
1047 v0
1048 } else {
1049 0.0
1050 }
1051 }
1052
1053 #[inline(always)]
1054 fn emit_from(&self, cur: f64) -> f64 {
1055 let x = self
1056 .rs_coeff
1057 .mul_add(self.down_ema, -self.n_minus_1 * self.up_ema);
1058 let m = (x >= 0.0) as i32 as f64;
1059 let scale = self.neg_scale + m * (1.0 - self.neg_scale);
1060 let v = cur + x * scale;
1061 if v.is_finite() || x >= 0.0 {
1062 v
1063 } else {
1064 0.0
1065 }
1066 }
1067}
1068
1069pub fn reverse_rsi_batch_with_kernel(
1070 data: &[f64],
1071 sweep: &ReverseRsiBatchRange,
1072 k: Kernel,
1073) -> Result<ReverseRsiBatchOutput, ReverseRsiError> {
1074 let kernel = match k {
1075 Kernel::Auto => detect_best_batch_kernel(),
1076 other if other.is_batch() => other,
1077 other => {
1078 return Err(ReverseRsiError::InvalidKernelForBatch(other));
1079 }
1080 };
1081
1082 reverse_rsi_batch_inner(data, sweep, kernel, true)
1083}
1084
1085fn axis_usize((start, end, step): (usize, usize, usize)) -> Result<Vec<usize>, ReverseRsiError> {
1086 if step == 0 || start == end {
1087 return Ok(vec![start]);
1088 }
1089
1090 if start < end {
1091 let mut v = Vec::new();
1092 let mut x = start;
1093 let st = step.max(1);
1094 while x <= end {
1095 v.push(x);
1096 x = x
1097 .checked_add(st)
1098 .ok_or_else(|| ReverseRsiError::InvalidRange {
1099 start: start.to_string(),
1100 end: end.to_string(),
1101 step: step.to_string(),
1102 })?;
1103 }
1104 if v.is_empty() {
1105 return Err(ReverseRsiError::InvalidRange {
1106 start: start.to_string(),
1107 end: end.to_string(),
1108 step: step.to_string(),
1109 });
1110 }
1111 return Ok(v);
1112 }
1113
1114 let mut v = Vec::new();
1115 let mut x = start as isize;
1116 let end_i = end as isize;
1117 let st = (step as isize).max(1);
1118 while x >= end_i {
1119 v.push(x as usize);
1120 x -= st;
1121 }
1122 if v.is_empty() {
1123 return Err(ReverseRsiError::InvalidRange {
1124 start: start.to_string(),
1125 end: end.to_string(),
1126 step: step.to_string(),
1127 });
1128 }
1129 Ok(v)
1130}
1131
1132fn axis_f64((start, end, step): (f64, f64, f64)) -> Result<Vec<f64>, ReverseRsiError> {
1133 if step.abs() < 1e-12 || (start - end).abs() < 1e-12 {
1134 return Ok(vec![start]);
1135 }
1136 let mut v = Vec::new();
1137 let mut x = start;
1138 if step > 0.0 {
1139 while x <= end + 1e-12 {
1140 v.push(x);
1141 x += step;
1142 }
1143 } else {
1144 while x >= end - 1e-12 {
1145 v.push(x);
1146 x += step;
1147 }
1148 }
1149 if v.is_empty() {
1150 return Err(ReverseRsiError::InvalidRange {
1151 start: start.to_string(),
1152 end: end.to_string(),
1153 step: step.to_string(),
1154 });
1155 }
1156 Ok(v)
1157}
1158
1159pub(crate) fn expand_grid(
1160 sweep: &ReverseRsiBatchRange,
1161) -> Result<Vec<ReverseRsiParams>, ReverseRsiError> {
1162 let (len_start, len_end, len_step) = sweep.rsi_length_range;
1163 let (lvl_start, lvl_end, lvl_step) = sweep.rsi_level_range;
1164
1165 let lengths = axis_usize((len_start, len_end, len_step))?;
1166 let levels = axis_f64((lvl_start, lvl_end, lvl_step))?;
1167
1168 let cap =
1169 lengths
1170 .len()
1171 .checked_mul(levels.len())
1172 .ok_or_else(|| ReverseRsiError::InvalidRange {
1173 start: lengths.len().to_string(),
1174 end: levels.len().to_string(),
1175 step: "lengths*levels".into(),
1176 })?;
1177
1178 let mut combos = Vec::with_capacity(cap);
1179 for &length in &lengths {
1180 for &level in &levels {
1181 combos.push(ReverseRsiParams {
1182 rsi_length: Some(length),
1183 rsi_level: Some(level),
1184 });
1185 }
1186 }
1187
1188 Ok(combos)
1189}
1190
1191#[inline(always)]
1192pub fn reverse_rsi_batch_slice(
1193 data: &[f64],
1194 sweep: &ReverseRsiBatchRange,
1195 kern: Kernel,
1196) -> Result<ReverseRsiBatchOutput, ReverseRsiError> {
1197 reverse_rsi_batch_inner(data, sweep, kern, false)
1198}
1199
1200#[inline(always)]
1201pub fn reverse_rsi_batch_par_slice(
1202 data: &[f64],
1203 sweep: &ReverseRsiBatchRange,
1204 kern: Kernel,
1205) -> Result<ReverseRsiBatchOutput, ReverseRsiError> {
1206 reverse_rsi_batch_inner(data, sweep, kern, true)
1207}
1208
1209#[inline(always)]
1210fn reverse_rsi_batch_inner(
1211 data: &[f64],
1212 sweep: &ReverseRsiBatchRange,
1213 kern: Kernel,
1214 parallel: bool,
1215) -> Result<ReverseRsiBatchOutput, ReverseRsiError> {
1216 let combos = expand_grid(sweep)?;
1217 let cols = data.len();
1218 let rows = combos.len();
1219
1220 if cols == 0 {
1221 return Err(ReverseRsiError::EmptyInputData);
1222 }
1223
1224 let total = rows
1225 .checked_mul(cols)
1226 .ok_or_else(|| ReverseRsiError::InvalidRange {
1227 start: rows.to_string(),
1228 end: cols.to_string(),
1229 step: "rows*cols".into(),
1230 })?;
1231
1232 let mut buf_mu = make_uninit_matrix(rows, cols);
1233
1234 let warm: Vec<usize> = combos
1235 .iter()
1236 .map(|c| {
1237 let first = data.iter().position(|x| !x.is_nan()).unwrap_or(0);
1238 let ema_length = (2 * c.rsi_length.unwrap_or(14)) - 1;
1239 first + ema_length
1240 })
1241 .collect();
1242
1243 init_matrix_prefixes(&mut buf_mu, cols, &warm);
1244
1245 let mut buf_guard = core::mem::ManuallyDrop::new(buf_mu);
1246 let out: &mut [f64] = unsafe {
1247 core::slice::from_raw_parts_mut(buf_guard.as_mut_ptr() as *mut f64, buf_guard.len())
1248 };
1249
1250 if buf_guard.len() != total {
1251 return Err(ReverseRsiError::OutputLengthMismatch {
1252 expected: total,
1253 got: buf_guard.len(),
1254 });
1255 }
1256
1257 reverse_rsi_batch_inner_into(data, &combos, kern, parallel, out)?;
1258
1259 let values = unsafe {
1260 Vec::from_raw_parts(
1261 buf_guard.as_mut_ptr() as *mut f64,
1262 buf_guard.len(),
1263 buf_guard.capacity(),
1264 )
1265 };
1266
1267 Ok(ReverseRsiBatchOutput {
1268 values,
1269 combos,
1270 rows,
1271 cols,
1272 })
1273}
1274
1275#[inline(always)]
1276fn reverse_rsi_batch_inner_into(
1277 data: &[f64],
1278 combos: &[ReverseRsiParams],
1279 kern: Kernel,
1280 parallel: bool,
1281 out: &mut [f64],
1282) -> Result<(), ReverseRsiError> {
1283 let cols = data.len();
1284 let rows = combos.len();
1285
1286 let expected = rows
1287 .checked_mul(cols)
1288 .ok_or_else(|| ReverseRsiError::InvalidRange {
1289 start: rows.to_string(),
1290 end: cols.to_string(),
1291 step: "rows*cols".into(),
1292 })?;
1293 if out.len() != expected {
1294 return Err(ReverseRsiError::OutputLengthMismatch {
1295 expected,
1296 got: out.len(),
1297 });
1298 }
1299 let row_kern = to_non_batch(match kern {
1300 Kernel::Auto => detect_best_batch_kernel(),
1301 k => k,
1302 });
1303
1304 if matches!(kern, Kernel::ScalarBatch | Kernel::Auto) && matches!(row_kern, Kernel::Scalar) {
1305 let len = data.len();
1306 if len == 0 {
1307 return Err(ReverseRsiError::EmptyInputData);
1308 }
1309 let first = data.iter().position(|x| !x.is_nan()).unwrap_or(0);
1310
1311 let mut groups: std::collections::BTreeMap<usize, Vec<(usize, f64)>> =
1312 std::collections::BTreeMap::new();
1313 for (row, p) in combos.iter().enumerate() {
1314 let l = p.rsi_length.unwrap_or(14);
1315 let level = p.rsi_level.unwrap_or(50.0);
1316 groups.entry(l).or_default().push((row, level));
1317 }
1318
1319 let all_singletons = groups.values().all(|rows| rows.len() == 1);
1320 if all_singletons {
1321 for (r, s) in out.chunks_mut(cols).enumerate() {
1322 let input = ReverseRsiInput::from_slice(data, combos[r].clone());
1323 if reverse_rsi_into_slice(s, &input, row_kern).is_err() {
1324 for v in s {
1325 *v = f64::NAN;
1326 }
1327 }
1328 }
1329 return Ok(());
1330 }
1331
1332 for (rsi_length, rows) in groups {
1333 let ema_len = (2 * rsi_length) - 1;
1334 if len - first < ema_len {
1335 continue;
1336 }
1337 let warm_end = first + ema_len;
1338 let warm_idx = warm_end - 1;
1339 let all_finite = data[first..].iter().all(|v| v.is_finite());
1340
1341 let mut sum_up = 0.0f64;
1342 let mut sum_dn = 0.0f64;
1343 let mut prev = 0.0f64;
1344 for i in first..warm_end {
1345 let cur = data[i];
1346 let d = if all_finite || (cur.is_finite() && prev.is_finite()) {
1347 cur - prev
1348 } else {
1349 0.0
1350 };
1351 sum_up += d.max(0.0);
1352 sum_dn += (-d).max(0.0);
1353 prev = cur;
1354 }
1355 let mut up_ema = sum_up / (ema_len as f64);
1356 let mut dn_ema = sum_dn / (ema_len as f64);
1357
1358 let n_minus_1 = (rsi_length - 1) as f64;
1359 let alpha = 2.0 / (ema_len as f64 + 1.0);
1360 let beta = 1.0 - alpha;
1361
1362 let base = data[warm_idx];
1363 for &(row, rsi_level) in &rows {
1364 let l = rsi_level;
1365 if !(0.0 < l && l < 100.0) || !l.is_finite() {
1366 continue;
1367 }
1368 let inv = 100.0 - l;
1369 let neg_scale = inv / l;
1370 let rs_target = l / inv;
1371 let x0 = n_minus_1.mul_add(dn_ema * rs_target, -n_minus_1 * up_ema);
1372 let m0 = (x0 >= 0.0) as i32 as f64;
1373 let scale0 = neg_scale + m0 * (1.0 - neg_scale);
1374 let v0 = base + x0 * scale0;
1375 out[row * cols + warm_idx] = if v0.is_finite() || x0 >= 0.0 { v0 } else { 0.0 };
1376 }
1377
1378 prev = base;
1379 for i in warm_end..len {
1380 let cur = data[i];
1381 let d = if all_finite || (cur.is_finite() && prev.is_finite()) {
1382 cur - prev
1383 } else {
1384 0.0
1385 };
1386 let up = d.max(0.0);
1387 let dn = (-d).max(0.0);
1388 up_ema = beta.mul_add(up_ema, alpha * up);
1389 dn_ema = beta.mul_add(dn_ema, alpha * dn);
1390
1391 for &(row, rsi_level) in &rows {
1392 let l = rsi_level;
1393 let inv = 100.0 - l;
1394 let rs_target = l / inv;
1395 let neg_scale = inv / l;
1396 let x = n_minus_1.mul_add(dn_ema * rs_target, -n_minus_1 * up_ema);
1397 let m = (x >= 0.0) as i32 as f64;
1398 let scale = neg_scale + m * (1.0 - neg_scale);
1399 let v = cur + x * scale;
1400 out[row * cols + i] = if v.is_finite() || x >= 0.0 { v } else { 0.0 };
1401 }
1402 prev = cur;
1403 }
1404 }
1405 return Ok(());
1406 }
1407
1408 let do_row = |row: usize, dst: &mut [f64]| {
1409 let input = ReverseRsiInput::from_slice(data, combos[row].clone());
1410 if reverse_rsi_into_slice(dst, &input, row_kern).is_err() {
1411 for v in dst {
1412 *v = f64::NAN;
1413 }
1414 }
1415 };
1416
1417 if parallel {
1418 #[cfg(not(target_arch = "wasm32"))]
1419 out.par_chunks_mut(cols)
1420 .enumerate()
1421 .for_each(|(r, s)| do_row(r, s));
1422 #[cfg(target_arch = "wasm32")]
1423 for (r, s) in out.chunks_mut(cols).enumerate() {
1424 do_row(r, s);
1425 }
1426 } else {
1427 for (r, s) in out.chunks_mut(cols).enumerate() {
1428 do_row(r, s);
1429 }
1430 }
1431 Ok(())
1432}
1433
1434pub fn reverse_rsi_batch(
1435 data_matrix: &[f64],
1436 rows: usize,
1437 cols: usize,
1438 params: &[ReverseRsiParams],
1439) -> Result<Vec<Vec<f64>>, ReverseRsiError> {
1440 let expected = rows
1441 .checked_mul(cols)
1442 .ok_or_else(|| ReverseRsiError::InvalidRange {
1443 start: rows.to_string(),
1444 end: cols.to_string(),
1445 step: "rows*cols".into(),
1446 })?;
1447 if data_matrix.len() != expected {
1448 return Err(ReverseRsiError::InvalidPeriod {
1449 period: data_matrix.len(),
1450 data_len: expected,
1451 });
1452 }
1453
1454 let params_len = params.len();
1455 if params_len != cols && params_len != 1 {
1456 return Err(ReverseRsiError::InvalidPeriod {
1457 period: params_len,
1458 data_len: cols,
1459 });
1460 }
1461
1462 let kernel = detect_best_batch_kernel();
1463 let mut results = Vec::with_capacity(cols);
1464
1465 #[cfg(not(target_arch = "wasm32"))]
1466 {
1467 results = (0..cols)
1468 .into_par_iter()
1469 .map(|col| {
1470 let col_data: Vec<f64> =
1471 (0..rows).map(|row| data_matrix[row * cols + col]).collect();
1472
1473 let param_idx = if params_len == 1 { 0 } else { col };
1474 let input = ReverseRsiInput::from_slice(&col_data, params[param_idx].clone());
1475
1476 match reverse_rsi_with_kernel(&input, kernel) {
1477 Ok(output) => output.values,
1478 Err(_) => vec![f64::NAN; rows],
1479 }
1480 })
1481 .collect();
1482 }
1483
1484 #[cfg(target_arch = "wasm32")]
1485 {
1486 for col in 0..cols {
1487 let col_data: Vec<f64> = (0..rows).map(|row| data_matrix[row * cols + col]).collect();
1488
1489 let param_idx = if params_len == 1 { 0 } else { col };
1490 let input = ReverseRsiInput::from_slice(&col_data, params[param_idx].clone());
1491
1492 let output = match reverse_rsi_with_kernel(&input, kernel) {
1493 Ok(output) => output.values,
1494 Err(_) => vec![f64::NAN; rows],
1495 };
1496
1497 results.push(output);
1498 }
1499 }
1500
1501 Ok(results)
1502}
1503
1504#[cfg(feature = "python")]
1505#[pyfunction(name = "reverse_rsi")]
1506#[pyo3(signature = (data, rsi_length, rsi_level, kernel=None))]
1507pub fn reverse_rsi_py<'py>(
1508 py: Python<'py>,
1509 data: PyReadonlyArray1<'py, f64>,
1510 rsi_length: usize,
1511 rsi_level: f64,
1512 kernel: Option<&str>,
1513) -> PyResult<Bound<'py, PyArray1<f64>>> {
1514 let slice_in = data.as_slice()?;
1515 let kern = validate_kernel(kernel, false)?;
1516 let params = ReverseRsiParams {
1517 rsi_length: Some(rsi_length),
1518 rsi_level: Some(rsi_level),
1519 };
1520 let inp = ReverseRsiInput::from_slice(slice_in, params);
1521 let out: Vec<f64> = py
1522 .allow_threads(|| reverse_rsi_with_kernel(&inp, kern).map(|o| o.values))
1523 .map_err(|e| PyValueError::new_err(e.to_string()))?;
1524 Ok(out.into_pyarray(py))
1525}
1526
1527#[cfg(feature = "python")]
1528#[pyfunction(name = "reverse_rsi_batch")]
1529#[pyo3(signature = (data, rsi_length_range, rsi_level_range, kernel=None))]
1530pub fn reverse_rsi_batch_py<'py>(
1531 py: Python<'py>,
1532 data: PyReadonlyArray1<'py, f64>,
1533 rsi_length_range: (usize, usize, usize),
1534 rsi_level_range: (f64, f64, f64),
1535 kernel: Option<&str>,
1536) -> PyResult<Bound<'py, PyDict>> {
1537 use numpy::{IntoPyArray, PyArray1, PyArrayMethods};
1538 let slice_in = data.as_slice()?;
1539 let sweep = ReverseRsiBatchRange {
1540 rsi_length_range,
1541 rsi_level_range,
1542 };
1543 let kern = validate_kernel(kernel, true)?;
1544 let combos = expand_grid(&sweep).map_err(|e| PyValueError::new_err(e.to_string()))?;
1545 let rows = combos.len();
1546 let cols = slice_in.len();
1547 let total = rows
1548 .checked_mul(cols)
1549 .ok_or_else(|| PyValueError::new_err("rows*cols overflow in reverse_rsi_batch_py"))?;
1550 let out_arr = unsafe { PyArray1::<f64>::new(py, [total], false) };
1551 let slice_out = unsafe { out_arr.as_slice_mut()? };
1552
1553 py.allow_threads(|| {
1554 reverse_rsi_batch_inner_into(
1555 slice_in,
1556 &combos,
1557 {
1558 match kern {
1559 Kernel::Auto => detect_best_batch_kernel(),
1560 k => k,
1561 }
1562 },
1563 true,
1564 slice_out,
1565 )
1566 })
1567 .map_err(|e| PyValueError::new_err(e.to_string()))?;
1568
1569 let dict = PyDict::new(py);
1570 dict.set_item("values", out_arr.reshape((rows, cols))?)?;
1571 dict.set_item(
1572 "rsi_lengths",
1573 combos
1574 .iter()
1575 .map(|p| p.rsi_length.unwrap_or(14) as u64)
1576 .collect::<Vec<_>>()
1577 .into_pyarray(py),
1578 )?;
1579 dict.set_item(
1580 "rsi_levels",
1581 combos
1582 .iter()
1583 .map(|p| p.rsi_level.unwrap_or(50.0))
1584 .collect::<Vec<_>>()
1585 .into_pyarray(py),
1586 )?;
1587 Ok(dict.into())
1588}
1589
1590#[cfg(all(feature = "python", feature = "cuda"))]
1591#[pyfunction(name = "reverse_rsi_cuda_batch_dev")]
1592#[pyo3(signature = (data_f32, rsi_length_range, rsi_level_range, device_id=0))]
1593pub fn reverse_rsi_cuda_batch_dev_py<'py>(
1594 py: Python<'py>,
1595 data_f32: PyReadonlyArray1<'py, f32>,
1596 rsi_length_range: (usize, usize, usize),
1597 rsi_level_range: (f64, f64, f64),
1598 device_id: usize,
1599) -> PyResult<(DeviceArrayF32Py, Bound<'py, PyDict>)> {
1600 if !cuda_available() {
1601 return Err(PyValueError::new_err("CUDA not available"));
1602 }
1603 let slice_in = data_f32.as_slice()?;
1604 let sweep = ReverseRsiBatchRange {
1605 rsi_length_range,
1606 rsi_level_range,
1607 };
1608 let (inner, combos, ctx, dev_id) = py.allow_threads(|| {
1609 let cuda =
1610 CudaReverseRsi::new(device_id).map_err(|e| PyValueError::new_err(e.to_string()))?;
1611 let ctx = cuda.context_arc();
1612 let dev_id = cuda.device_id();
1613 cuda.reverse_rsi_batch_dev(slice_in, &sweep)
1614 .map(|(inner, combos)| (inner, combos, ctx, dev_id))
1615 .map_err(|e| PyValueError::new_err(e.to_string()))
1616 })?;
1617 let dict = PyDict::new(py);
1618 let lens: Vec<u64> = combos
1619 .iter()
1620 .map(|c| c.rsi_length.unwrap_or(14) as u64)
1621 .collect();
1622 let lvls: Vec<f64> = combos
1623 .iter()
1624 .map(|c| c.rsi_level.unwrap_or(50.0) as f64)
1625 .collect();
1626 dict.set_item("rsi_lengths", lens.into_pyarray(py))?;
1627 dict.set_item("rsi_levels", lvls.into_pyarray(py))?;
1628 Ok((
1629 DeviceArrayF32Py {
1630 inner,
1631 _ctx: Some(ctx),
1632 device_id: Some(dev_id),
1633 },
1634 dict,
1635 ))
1636}
1637
1638#[cfg(all(feature = "python", feature = "cuda"))]
1639#[pyfunction(name = "reverse_rsi_cuda_many_series_one_param_dev")]
1640#[pyo3(signature = (data_tm_f32, cols, rows, rsi_length, rsi_level, device_id=0))]
1641pub fn reverse_rsi_cuda_many_series_one_param_dev_py<'py>(
1642 py: Python<'py>,
1643 data_tm_f32: PyReadonlyArray1<'py, f32>,
1644 cols: usize,
1645 rows: usize,
1646 rsi_length: usize,
1647 rsi_level: f64,
1648 device_id: usize,
1649) -> PyResult<DeviceArrayF32Py> {
1650 if !cuda_available() {
1651 return Err(PyValueError::new_err("CUDA not available"));
1652 }
1653 let slice_in = data_tm_f32.as_slice()?;
1654 let params = ReverseRsiParams {
1655 rsi_length: Some(rsi_length),
1656 rsi_level: Some(rsi_level),
1657 };
1658 let (inner, ctx, dev_id) = py.allow_threads(|| {
1659 let cuda =
1660 CudaReverseRsi::new(device_id).map_err(|e| PyValueError::new_err(e.to_string()))?;
1661 let ctx = cuda.context_arc();
1662 let dev_id = cuda.device_id();
1663 cuda.reverse_rsi_many_series_one_param_time_major_dev(slice_in, cols, rows, ¶ms)
1664 .map(|inner| (inner, ctx, dev_id))
1665 .map_err(|e| PyValueError::new_err(e.to_string()))
1666 })?;
1667 Ok(DeviceArrayF32Py {
1668 inner,
1669 _ctx: Some(ctx),
1670 device_id: Some(dev_id),
1671 })
1672}
1673
1674#[cfg(feature = "python")]
1675#[pyclass(name = "ReverseRsiStream")]
1676pub struct ReverseRsiStreamPy {
1677 inner: ReverseRsiStream,
1678}
1679
1680#[cfg(feature = "python")]
1681#[pymethods]
1682impl ReverseRsiStreamPy {
1683 #[new]
1684 fn new(rsi_length: usize, rsi_level: f64) -> PyResult<Self> {
1685 let params = ReverseRsiParams {
1686 rsi_length: Some(rsi_length),
1687 rsi_level: Some(rsi_level),
1688 };
1689 let stream =
1690 ReverseRsiStream::try_new(params).map_err(|e| PyValueError::new_err(e.to_string()))?;
1691 Ok(Self { inner: stream })
1692 }
1693 fn update(&mut self, value: f64) -> Option<f64> {
1694 self.inner.update(value)
1695 }
1696 #[deprecated(note = "use update()")]
1697 fn next(&mut self, value: f64) -> f64 {
1698 self.inner.next(value)
1699 }
1700}
1701
1702#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1703#[derive(Serialize, Deserialize)]
1704pub struct ReverseRsiBatchConfig {
1705 pub rsi_length_range: (usize, usize, usize),
1706 pub rsi_level_range: (f64, f64, f64),
1707}
1708
1709#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1710#[derive(Serialize, Deserialize)]
1711pub struct ReverseRsiBatchJsOutput {
1712 pub values: Vec<f64>,
1713 pub combos: Vec<ReverseRsiParams>,
1714 pub rows: usize,
1715 pub cols: usize,
1716}
1717
1718#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1719#[wasm_bindgen(js_name = reverse_rsi_batch)]
1720pub fn reverse_rsi_batch_unified_js(data: &[f64], config: JsValue) -> Result<JsValue, JsValue> {
1721 let cfg: ReverseRsiBatchConfig = serde_wasm_bindgen::from_value(config)
1722 .map_err(|e| JsValue::from_str(&format!("Invalid config: {e}")))?;
1723 let sweep = ReverseRsiBatchRange {
1724 rsi_length_range: cfg.rsi_length_range,
1725 rsi_level_range: cfg.rsi_level_range,
1726 };
1727 let out = reverse_rsi_batch_inner(data, &sweep, detect_best_kernel(), false)
1728 .map_err(|e| JsValue::from_str(&e.to_string()))?;
1729 let js = ReverseRsiBatchJsOutput {
1730 values: out.values,
1731 combos: out.combos,
1732 rows: out.rows,
1733 cols: out.cols,
1734 };
1735 serde_wasm_bindgen::to_value(&js)
1736 .map_err(|e| JsValue::from_str(&format!("Serialization error: {e}")))
1737}
1738
1739#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1740#[wasm_bindgen]
1741pub fn reverse_rsi_js(
1742 data: &[f64],
1743 rsi_length: usize,
1744 rsi_level: f64,
1745) -> Result<Vec<f64>, JsValue> {
1746 let params = ReverseRsiParams {
1747 rsi_length: Some(rsi_length),
1748 rsi_level: Some(rsi_level),
1749 };
1750
1751 let input = ReverseRsiInput::from_slice(data, params);
1752 let output = reverse_rsi(&input).map_err(|e| JsValue::from_str(&e.to_string()))?;
1753
1754 Ok(output.values)
1755}
1756
1757#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1758#[wasm_bindgen]
1759pub fn reverse_rsi_alloc(len: usize) -> *mut f64 {
1760 let mut v = Vec::<f64>::with_capacity(len);
1761 let p = v.as_mut_ptr();
1762 std::mem::forget(v);
1763 p
1764}
1765
1766#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1767#[wasm_bindgen]
1768pub fn reverse_rsi_free(ptr: *mut f64, len: usize) {
1769 unsafe {
1770 let _ = Vec::from_raw_parts(ptr, len, len);
1771 }
1772}
1773
1774#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1775#[wasm_bindgen]
1776pub fn reverse_rsi_into(
1777 in_ptr: *const f64,
1778 out_ptr: *mut f64,
1779 len: usize,
1780 rsi_length: usize,
1781 rsi_level: f64,
1782) -> Result<(), JsValue> {
1783 if in_ptr.is_null() || out_ptr.is_null() {
1784 return Err(JsValue::from_str("null pointer passed to reverse_rsi_into"));
1785 }
1786 unsafe {
1787 let data = std::slice::from_raw_parts(in_ptr, len);
1788 let params = ReverseRsiParams {
1789 rsi_length: Some(rsi_length),
1790 rsi_level: Some(rsi_level),
1791 };
1792 let input = ReverseRsiInput::from_slice(data, params);
1793 if in_ptr == out_ptr {
1794 let mut temp = vec![0.0; len];
1795 reverse_rsi_into_slice(&mut temp, &input, detect_best_kernel())
1796 .map_err(|e| JsValue::from_str(&e.to_string()))?;
1797 let out = std::slice::from_raw_parts_mut(out_ptr, len);
1798 out.copy_from_slice(&temp);
1799 } else {
1800 let out = std::slice::from_raw_parts_mut(out_ptr, len);
1801 reverse_rsi_into_slice(out, &input, detect_best_kernel())
1802 .map_err(|e| JsValue::from_str(&e.to_string()))?;
1803 }
1804 Ok(())
1805 }
1806}
1807
1808#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1809#[wasm_bindgen]
1810pub fn reverse_rsi_batch_columnar_into(
1811 in_ptr: *const f64,
1812 out_ptr: *mut f64,
1813 rows: usize,
1814 cols: usize,
1815 rsi_length: usize,
1816 rsi_level: f64,
1817) -> i32 {
1818 let total_len = match rows.checked_mul(cols) {
1819 Some(v) => v,
1820 None => return -1,
1821 };
1822 let data = unsafe { std::slice::from_raw_parts(in_ptr, total_len) };
1823 let out = unsafe { std::slice::from_raw_parts_mut(out_ptr, total_len) };
1824
1825 let params = vec![ReverseRsiParams {
1826 rsi_length: Some(rsi_length),
1827 rsi_level: Some(rsi_level),
1828 }];
1829
1830 match reverse_rsi_batch(data, rows, cols, ¶ms) {
1831 Ok(results) => {
1832 for (col, result) in results.iter().enumerate() {
1833 for (row, &value) in result.iter().enumerate() {
1834 out[row * cols + col] = value;
1835 }
1836 }
1837 0
1838 }
1839 Err(_) => -1,
1840 }
1841}
1842
1843#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1844#[wasm_bindgen]
1845pub fn reverse_rsi_batch_into(
1846 in_ptr: *const f64,
1847 out_ptr: *mut f64,
1848 len: usize,
1849 rsi_len_start: usize,
1850 rsi_len_end: usize,
1851 rsi_len_step: usize,
1852 rsi_lvl_start: f64,
1853 rsi_lvl_end: f64,
1854 rsi_lvl_step: f64,
1855) -> Result<usize, JsValue> {
1856 if in_ptr.is_null() || out_ptr.is_null() {
1857 return Err(JsValue::from_str(
1858 "null pointer passed to reverse_rsi_batch_into",
1859 ));
1860 }
1861 unsafe {
1862 let data = std::slice::from_raw_parts(in_ptr, len);
1863 let sweep = ReverseRsiBatchRange {
1864 rsi_length_range: (rsi_len_start, rsi_len_end, rsi_len_step),
1865 rsi_level_range: (rsi_lvl_start, rsi_lvl_end, rsi_lvl_step),
1866 };
1867 let combos = expand_grid(&sweep).map_err(|e| JsValue::from_str(&e.to_string()))?;
1868 let rows = combos.len();
1869 let cols = len;
1870
1871 let total = rows
1872 .checked_mul(cols)
1873 .ok_or_else(|| JsValue::from_str("rows*cols overflow in reverse_rsi_batch_into"))?;
1874 let out = std::slice::from_raw_parts_mut(out_ptr, total);
1875 reverse_rsi_batch_inner_into(data, &combos, detect_best_batch_kernel(), false, out)
1876 .map_err(|e| JsValue::from_str(&e.to_string()))?;
1877 Ok(rows)
1878 }
1879}
1880
1881#[cfg(test)]
1882mod tests {
1883 use super::*;
1884 use crate::skip_if_unsupported;
1885 use crate::utilities::data_loader::read_candles_from_csv;
1886
1887 fn check_reverse_rsi_partial_params(
1888 test_name: &str,
1889 kernel: Kernel,
1890 ) -> Result<(), Box<dyn Error>> {
1891 skip_if_unsupported!(kernel, test_name);
1892 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
1893 let candles = read_candles_from_csv(file_path)?;
1894
1895 let default_params = ReverseRsiParams {
1896 rsi_length: None,
1897 rsi_level: None,
1898 };
1899 let input = ReverseRsiInput::from_candles(&candles, "close", default_params);
1900 let output = reverse_rsi_with_kernel(&input, kernel)?;
1901 assert_eq!(output.values.len(), candles.close.len());
1902
1903 Ok(())
1904 }
1905
1906 fn check_reverse_rsi_accuracy(test_name: &str, kernel: Kernel) -> Result<(), Box<dyn Error>> {
1907 skip_if_unsupported!(kernel, test_name);
1908 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
1909 let candles = read_candles_from_csv(file_path)?;
1910
1911 let params = ReverseRsiParams {
1912 rsi_length: Some(14),
1913 rsi_level: Some(50.0),
1914 };
1915
1916 let input = ReverseRsiInput::from_candles(&candles, "close", params);
1917 let result = reverse_rsi_with_kernel(&input, kernel)?;
1918
1919 assert_eq!(result.values.len(), candles.close.len());
1920
1921 let expected_last_5 = vec![
1922 60124.655535277416,
1923 60064.68013990046,
1924 60001.56012990757,
1925 59932.80583491417,
1926 59877.248275277445,
1927 ];
1928
1929 let start = result.values.len().saturating_sub(6);
1930 let end = result.values.len().saturating_sub(1);
1931
1932 for (i, &actual) in result.values[start..end].iter().enumerate() {
1933 let expected = expected_last_5[i];
1934 assert!(
1935 (actual - expected).abs() < 0.00001,
1936 "[{}] Last 5 values mismatch at index {}: expected {}, got {}",
1937 test_name,
1938 i,
1939 expected,
1940 actual
1941 );
1942 }
1943
1944 Ok(())
1945 }
1946
1947 fn check_reverse_rsi_default_candles(
1948 test_name: &str,
1949 kernel: Kernel,
1950 ) -> Result<(), Box<dyn Error>> {
1951 skip_if_unsupported!(kernel, test_name);
1952 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
1953 let candles = read_candles_from_csv(file_path)?;
1954
1955 let input = ReverseRsiInput::with_default_candles(&candles);
1956 match input.data {
1957 ReverseRsiData::Candles { source, .. } => assert_eq!(source, "close"),
1958 _ => panic!("Expected ReverseRsiData::Candles"),
1959 }
1960 let output = reverse_rsi_with_kernel(&input, kernel)?;
1961 assert_eq!(output.values.len(), candles.close.len());
1962
1963 Ok(())
1964 }
1965
1966 fn check_reverse_rsi_zero_period(
1967 test_name: &str,
1968 kernel: Kernel,
1969 ) -> Result<(), Box<dyn Error>> {
1970 skip_if_unsupported!(kernel, test_name);
1971 let input_data = [10.0, 20.0, 30.0];
1972 let params = ReverseRsiParams {
1973 rsi_length: Some(0),
1974 rsi_level: None,
1975 };
1976 let input = ReverseRsiInput::from_slice(&input_data, params);
1977 let res = reverse_rsi_with_kernel(&input, kernel);
1978 assert!(
1979 res.is_err(),
1980 "[{}] Reverse RSI should fail with zero period",
1981 test_name
1982 );
1983 Ok(())
1984 }
1985
1986 fn check_reverse_rsi_period_exceeds_length(
1987 test_name: &str,
1988 kernel: Kernel,
1989 ) -> Result<(), Box<dyn Error>> {
1990 skip_if_unsupported!(kernel, test_name);
1991 let data_small = [10.0, 20.0, 30.0];
1992 let params = ReverseRsiParams {
1993 rsi_length: Some(10),
1994 rsi_level: None,
1995 };
1996 let input = ReverseRsiInput::from_slice(&data_small, params);
1997 let res = reverse_rsi_with_kernel(&input, kernel);
1998 assert!(
1999 res.is_err(),
2000 "[{}] Reverse RSI should fail with period exceeding length",
2001 test_name
2002 );
2003 Ok(())
2004 }
2005
2006 fn check_reverse_rsi_very_small_dataset(
2007 test_name: &str,
2008 kernel: Kernel,
2009 ) -> Result<(), Box<dyn Error>> {
2010 skip_if_unsupported!(kernel, test_name);
2011 let single_point = [42.0];
2012 let params = ReverseRsiParams {
2013 rsi_length: Some(14),
2014 rsi_level: None,
2015 };
2016 let input = ReverseRsiInput::from_slice(&single_point, params);
2017 let res = reverse_rsi_with_kernel(&input, kernel);
2018 assert!(
2019 res.is_err(),
2020 "[{}] Reverse RSI should fail with insufficient data",
2021 test_name
2022 );
2023 Ok(())
2024 }
2025
2026 fn check_reverse_rsi_empty_input(
2027 test_name: &str,
2028 kernel: Kernel,
2029 ) -> Result<(), Box<dyn Error>> {
2030 skip_if_unsupported!(kernel, test_name);
2031 let empty: [f64; 0] = [];
2032 let input = ReverseRsiInput::from_slice(&empty, ReverseRsiParams::default());
2033 let res = reverse_rsi_with_kernel(&input, kernel);
2034 assert!(
2035 matches!(res, Err(ReverseRsiError::EmptyInputData)),
2036 "[{}] Reverse RSI should fail with empty input",
2037 test_name
2038 );
2039 Ok(())
2040 }
2041
2042 fn check_reverse_rsi_invalid_level(
2043 test_name: &str,
2044 kernel: Kernel,
2045 ) -> Result<(), Box<dyn Error>> {
2046 skip_if_unsupported!(kernel, test_name);
2047 let data = vec![1.0; 30];
2048
2049 let params = ReverseRsiParams {
2050 rsi_length: Some(14),
2051 rsi_level: Some(150.0),
2052 };
2053 let input = ReverseRsiInput::from_slice(&data, params);
2054 let res = reverse_rsi_with_kernel(&input, kernel);
2055 assert!(
2056 matches!(res, Err(ReverseRsiError::InvalidRsiLevel { .. })),
2057 "[{}] Reverse RSI should fail with invalid level > 100",
2058 test_name
2059 );
2060
2061 let params = ReverseRsiParams {
2062 rsi_length: Some(14),
2063 rsi_level: Some(-10.0),
2064 };
2065 let input = ReverseRsiInput::from_slice(&data, params);
2066 let res = reverse_rsi_with_kernel(&input, kernel);
2067 assert!(
2068 matches!(res, Err(ReverseRsiError::InvalidRsiLevel { .. })),
2069 "[{}] Reverse RSI should fail with negative level",
2070 test_name
2071 );
2072
2073 let params = ReverseRsiParams {
2074 rsi_length: Some(14),
2075 rsi_level: Some(0.0),
2076 };
2077 let input = ReverseRsiInput::from_slice(&data, params);
2078 let res = reverse_rsi_with_kernel(&input, kernel);
2079 assert!(
2080 matches!(res, Err(ReverseRsiError::InvalidRsiLevel { .. })),
2081 "[{}] Reverse RSI should fail with level = 0",
2082 test_name
2083 );
2084
2085 let params = ReverseRsiParams {
2086 rsi_length: Some(14),
2087 rsi_level: Some(100.0),
2088 };
2089 let input = ReverseRsiInput::from_slice(&data, params);
2090 let res = reverse_rsi_with_kernel(&input, kernel);
2091 assert!(
2092 matches!(res, Err(ReverseRsiError::InvalidRsiLevel { .. })),
2093 "[{}] Reverse RSI should fail with level = 100",
2094 test_name
2095 );
2096
2097 Ok(())
2098 }
2099
2100 fn check_reverse_rsi_all_nan(test_name: &str, kernel: Kernel) -> Result<(), Box<dyn Error>> {
2101 skip_if_unsupported!(kernel, test_name);
2102 let data = vec![f64::NAN; 20];
2103 let params = ReverseRsiParams::default();
2104 let input = ReverseRsiInput::from_slice(&data, params);
2105 let res = reverse_rsi_with_kernel(&input, kernel);
2106 assert!(
2107 matches!(res, Err(ReverseRsiError::AllValuesNaN)),
2108 "[{}] Reverse RSI should fail with all NaN values",
2109 test_name
2110 );
2111 Ok(())
2112 }
2113
2114 fn check_reverse_rsi_reinput(test_name: &str, kernel: Kernel) -> Result<(), Box<dyn Error>> {
2115 skip_if_unsupported!(kernel, test_name);
2116 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
2117 let candles = read_candles_from_csv(file_path)?;
2118
2119 let first_params = ReverseRsiParams {
2120 rsi_length: Some(14),
2121 rsi_level: Some(50.0),
2122 };
2123 let first_input = ReverseRsiInput::from_candles(&candles, "close", first_params);
2124 let first_result = reverse_rsi_with_kernel(&first_input, kernel)?;
2125
2126 let second_params = ReverseRsiParams {
2127 rsi_length: Some(14),
2128 rsi_level: Some(50.0),
2129 };
2130 let second_input = ReverseRsiInput::from_slice(&first_result.values, second_params);
2131 let second_result = reverse_rsi_with_kernel(&second_input, kernel)?;
2132
2133 assert_eq!(second_result.values.len(), first_result.values.len());
2134
2135 Ok(())
2136 }
2137
2138 fn check_reverse_rsi_nan_handling(
2139 test_name: &str,
2140 kernel: Kernel,
2141 ) -> Result<(), Box<dyn Error>> {
2142 skip_if_unsupported!(kernel, test_name);
2143 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
2144 let candles = read_candles_from_csv(file_path)?;
2145
2146 let input = ReverseRsiInput::from_candles(
2147 &candles,
2148 "close",
2149 ReverseRsiParams {
2150 rsi_length: Some(14),
2151 rsi_level: Some(50.0),
2152 },
2153 );
2154 let res = reverse_rsi_with_kernel(&input, kernel)?;
2155 assert_eq!(res.values.len(), candles.close.len());
2156
2157 Ok(())
2158 }
2159
2160 fn check_reverse_rsi_streaming(test_name: &str, kernel: Kernel) -> Result<(), Box<dyn Error>> {
2161 skip_if_unsupported!(kernel, test_name);
2162
2163 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
2164 let candles = read_candles_from_csv(file_path)?;
2165
2166 let rsi_length = 14;
2167 let rsi_level = 50.0;
2168
2169 let input = ReverseRsiInput::from_candles(
2170 &candles,
2171 "close",
2172 ReverseRsiParams {
2173 rsi_length: Some(rsi_length),
2174 rsi_level: Some(rsi_level),
2175 },
2176 );
2177 let batch_output = reverse_rsi_with_kernel(&input, kernel)?.values;
2178
2179 let mut stream = ReverseRsiStream::try_new(ReverseRsiParams {
2180 rsi_length: Some(rsi_length),
2181 rsi_level: Some(rsi_level),
2182 })?;
2183
2184 let mut stream_values = Vec::with_capacity(candles.close.len());
2185 for &price in &candles.close {
2186 match stream.update(price) {
2187 Some(val) => stream_values.push(val),
2188 None => stream_values.push(f64::NAN),
2189 }
2190 }
2191
2192 assert_eq!(batch_output.len(), stream_values.len());
2193
2194 for (i, (&b, &s)) in batch_output.iter().zip(stream_values.iter()).enumerate() {
2195 if b.is_nan() && s.is_nan() {
2196 continue;
2197 }
2198 if b.is_finite() && s.is_finite() {
2199 let diff = (b - s).abs();
2200 assert!(
2201 diff < 1e-9,
2202 "[{}] Reverse RSI streaming mismatch at idx {}: batch={}, stream={}, diff={}",
2203 test_name,
2204 i,
2205 b,
2206 s,
2207 diff
2208 );
2209 }
2210 }
2211 Ok(())
2212 }
2213
2214 fn check_reverse_rsi_warmup_nans(
2215 test_name: &str,
2216 kernel: Kernel,
2217 ) -> Result<(), Box<dyn Error>> {
2218 skip_if_unsupported!(kernel, test_name);
2219 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
2220 let candles = read_candles_from_csv(file_path)?;
2221
2222 let params = ReverseRsiParams {
2223 rsi_length: Some(14),
2224 rsi_level: Some(50.0),
2225 };
2226
2227 let input = ReverseRsiInput::from_candles(&candles, "close", params);
2228 let result = reverse_rsi_with_kernel(&input, kernel)?;
2229
2230 let first_valid = candles.close.iter().position(|x| !x.is_nan()).unwrap_or(0);
2231
2232 for i in 0..first_valid {
2233 assert!(
2234 result.values[i].is_nan(),
2235 "[{}] Expected NaN at index {} (before first valid data)",
2236 test_name,
2237 i
2238 );
2239 }
2240
2241 Ok(())
2242 }
2243
2244 #[cfg(debug_assertions)]
2245 fn check_reverse_rsi_no_poison(test_name: &str, kernel: Kernel) -> Result<(), Box<dyn Error>> {
2246 skip_if_unsupported!(kernel, test_name);
2247 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
2248 let candles = read_candles_from_csv(file_path)?;
2249
2250 let test_params = vec![
2251 ReverseRsiParams::default(),
2252 ReverseRsiParams {
2253 rsi_length: Some(7),
2254 rsi_level: Some(30.0),
2255 },
2256 ReverseRsiParams {
2257 rsi_length: Some(14),
2258 rsi_level: Some(50.0),
2259 },
2260 ReverseRsiParams {
2261 rsi_length: Some(21),
2262 rsi_level: Some(70.0),
2263 },
2264 ];
2265
2266 for (param_idx, params) in test_params.iter().enumerate() {
2267 let input = ReverseRsiInput::from_candles(&candles, "close", params.clone());
2268 let output = reverse_rsi_with_kernel(&input, kernel)?;
2269
2270 for (i, &val) in output.values.iter().enumerate() {
2271 if val.is_nan() {
2272 continue;
2273 }
2274
2275 let bits = val.to_bits();
2276
2277 if bits == 0x11111111_11111111 {
2278 panic!(
2279 "[{}] Found alloc_with_nan_prefix poison value {} (0x{:016X}) at index {} \
2280 with params: rsi_length={}, rsi_level={}",
2281 test_name,
2282 val,
2283 bits,
2284 i,
2285 params.rsi_length.unwrap_or(14),
2286 params.rsi_level.unwrap_or(50.0)
2287 );
2288 }
2289
2290 if bits == 0x22222222_22222222 {
2291 panic!(
2292 "[{}] Found init_matrix_prefixes poison value {} (0x{:016X}) at index {} \
2293 with params: rsi_length={}, rsi_level={}",
2294 test_name,
2295 val,
2296 bits,
2297 i,
2298 params.rsi_length.unwrap_or(14),
2299 params.rsi_level.unwrap_or(50.0)
2300 );
2301 }
2302
2303 if bits == 0x33333333_33333333 {
2304 panic!(
2305 "[{}] Found make_uninit_matrix poison value {} (0x{:016X}) at index {} \
2306 with params: rsi_length={}, rsi_level={}",
2307 test_name,
2308 val,
2309 bits,
2310 i,
2311 params.rsi_length.unwrap_or(14),
2312 params.rsi_level.unwrap_or(50.0)
2313 );
2314 }
2315 }
2316 }
2317
2318 Ok(())
2319 }
2320
2321 #[cfg(not(debug_assertions))]
2322 fn check_reverse_rsi_no_poison(
2323 _test_name: &str,
2324 _kernel: Kernel,
2325 ) -> Result<(), Box<dyn Error>> {
2326 Ok(())
2327 }
2328
2329 macro_rules! generate_all_reverse_rsi_tests {
2330 ($($test_fn:ident),*) => {
2331 paste::paste! {
2332 $(
2333 #[test]
2334 fn [<$test_fn _scalar>]() {
2335 let _ = $test_fn(stringify!([<$test_fn _scalar>]), Kernel::Scalar);
2336 }
2337 )*
2338 #[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
2339 $(
2340 #[test]
2341 fn [<$test_fn _avx2>]() {
2342 let _ = $test_fn(stringify!([<$test_fn _avx2>]), Kernel::Avx2);
2343 }
2344 #[test]
2345 fn [<$test_fn _avx512>]() {
2346 let _ = $test_fn(stringify!([<$test_fn _avx512>]), Kernel::Avx512);
2347 }
2348 )*
2349 #[cfg(all(target_arch = "wasm32", target_feature = "simd128"))]
2350 $(
2351 #[test]
2352 fn [<$test_fn _simd128>]() {
2353 let _ = $test_fn(stringify!([<$test_fn _simd128>]), Kernel::Scalar);
2354 }
2355 )*
2356 }
2357 }
2358 }
2359
2360 generate_all_reverse_rsi_tests!(
2361 check_reverse_rsi_partial_params,
2362 check_reverse_rsi_accuracy,
2363 check_reverse_rsi_default_candles,
2364 check_reverse_rsi_zero_period,
2365 check_reverse_rsi_period_exceeds_length,
2366 check_reverse_rsi_very_small_dataset,
2367 check_reverse_rsi_empty_input,
2368 check_reverse_rsi_invalid_level,
2369 check_reverse_rsi_all_nan,
2370 check_reverse_rsi_reinput,
2371 check_reverse_rsi_nan_handling,
2372 check_reverse_rsi_streaming,
2373 check_reverse_rsi_warmup_nans,
2374 check_reverse_rsi_no_poison
2375 );
2376
2377 fn check_batch_default_row(test: &str, kernel: Kernel) -> Result<(), Box<dyn Error>> {
2378 skip_if_unsupported!(kernel, test);
2379
2380 let file = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
2381 let c = read_candles_from_csv(file)?;
2382
2383 let output = ReverseRsiBatchBuilder::new()
2384 .kernel(kernel)
2385 .apply_candles(&c, "close")?;
2386
2387 let def = ReverseRsiParams::default();
2388 let row = output.values_for(&def).expect("default row missing");
2389
2390 assert_eq!(row.len(), c.close.len());
2391
2392 let valid_count = row.iter().filter(|v| v.is_finite()).count();
2393 assert!(
2394 valid_count > 0,
2395 "[{}] Should have valid values in default row",
2396 test
2397 );
2398
2399 Ok(())
2400 }
2401
2402 fn check_batch_sweep(test: &str, kernel: Kernel) -> Result<(), Box<dyn Error>> {
2403 skip_if_unsupported!(kernel, test);
2404
2405 let file = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
2406 let c = read_candles_from_csv(file)?;
2407
2408 let output = ReverseRsiBatchBuilder::new()
2409 .kernel(kernel)
2410 .rsi_length_range(10, 20, 2)
2411 .rsi_level_range(30.0, 70.0, 10.0)
2412 .apply_candles(&c, "close")?;
2413
2414 let expected_combos = 6 * 5;
2415 assert_eq!(output.combos.len(), expected_combos);
2416 assert_eq!(output.rows, expected_combos);
2417 assert_eq!(output.cols, c.close.len());
2418
2419 Ok(())
2420 }
2421
2422 #[cfg(debug_assertions)]
2423 fn check_batch_no_poison(test: &str, kernel: Kernel) -> Result<(), Box<dyn Error>> {
2424 skip_if_unsupported!(kernel, test);
2425
2426 let file = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
2427 let c = read_candles_from_csv(file)?;
2428
2429 let test_configs = vec![
2430 (7, 21, 7, 20.0, 80.0, 20.0),
2431 (14, 14, 0, 50.0, 50.0, 0.0),
2432 (10, 20, 5, 30.0, 70.0, 20.0),
2433 ];
2434
2435 for (cfg_idx, &(len_start, len_end, len_step, lvl_start, lvl_end, lvl_step)) in
2436 test_configs.iter().enumerate()
2437 {
2438 let output = ReverseRsiBatchBuilder::new()
2439 .kernel(kernel)
2440 .rsi_length_range(len_start, len_end, len_step)
2441 .rsi_level_range(lvl_start, lvl_end, lvl_step)
2442 .apply_candles(&c, "close")?;
2443
2444 for (idx, &val) in output.values.iter().enumerate() {
2445 if val.is_nan() {
2446 continue;
2447 }
2448
2449 let bits = val.to_bits();
2450 let row = idx / output.cols;
2451 let col = idx % output.cols;
2452 let combo = &output.combos[row];
2453
2454 if bits == 0x11111111_11111111 {
2455 panic!(
2456 "[{}] Config {}: Found alloc_with_nan_prefix poison value {} (0x{:016X}) \
2457 at row {} col {} (flat index {}) with params: rsi_length={}, rsi_level={}",
2458 test,
2459 cfg_idx,
2460 val,
2461 bits,
2462 row,
2463 col,
2464 idx,
2465 combo.rsi_length.unwrap_or(14),
2466 combo.rsi_level.unwrap_or(50.0)
2467 );
2468 }
2469
2470 if bits == 0x22222222_22222222 {
2471 panic!(
2472 "[{}] Config {}: Found init_matrix_prefixes poison value {} (0x{:016X}) \
2473 at row {} col {} (flat index {}) with params: rsi_length={}, rsi_level={}",
2474 test,
2475 cfg_idx,
2476 val,
2477 bits,
2478 row,
2479 col,
2480 idx,
2481 combo.rsi_length.unwrap_or(14),
2482 combo.rsi_level.unwrap_or(50.0)
2483 );
2484 }
2485
2486 if bits == 0x33333333_33333333 {
2487 panic!(
2488 "[{}] Config {}: Found make_uninit_matrix poison value {} (0x{:016X}) \
2489 at row {} col {} (flat index {}) with params: rsi_length={}, rsi_level={}",
2490 test,
2491 cfg_idx,
2492 val,
2493 bits,
2494 row,
2495 col,
2496 idx,
2497 combo.rsi_length.unwrap_or(14),
2498 combo.rsi_level.unwrap_or(50.0)
2499 );
2500 }
2501 }
2502 }
2503
2504 Ok(())
2505 }
2506
2507 #[cfg(not(debug_assertions))]
2508 fn check_batch_no_poison(_test: &str, _kernel: Kernel) -> Result<(), Box<dyn Error>> {
2509 Ok(())
2510 }
2511
2512 macro_rules! gen_batch_tests {
2513 ($fn_name:ident) => {
2514 paste::paste! {
2515 #[test] fn [<$fn_name _scalar>]() {
2516 let _ = $fn_name(stringify!([<$fn_name _scalar>]), Kernel::ScalarBatch);
2517 }
2518 #[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
2519 #[test] fn [<$fn_name _avx2>]() {
2520 let _ = $fn_name(stringify!([<$fn_name _avx2>]), Kernel::Avx2Batch);
2521 }
2522 #[cfg(all(feature = "nightly-avx", target_arch = "x86_64"))]
2523 #[test] fn [<$fn_name _avx512>]() {
2524 let _ = $fn_name(stringify!([<$fn_name _avx512>]), Kernel::Avx512Batch);
2525 }
2526 #[test] fn [<$fn_name _auto_detect>]() {
2527 let _ = $fn_name(stringify!([<$fn_name _auto_detect>]), Kernel::Auto);
2528 }
2529 }
2530 };
2531 }
2532
2533 gen_batch_tests!(check_batch_default_row);
2534 gen_batch_tests!(check_batch_sweep);
2535 gen_batch_tests!(check_batch_no_poison);
2536
2537 fn check_kernel_passthrough(_name: &str, _k: Kernel) -> Result<(), Box<dyn Error>> {
2538 let data = vec![1.0; 64];
2539 for k in [Kernel::Scalar, Kernel::Auto] {
2540 let p = ReverseRsiParams {
2541 rsi_length: Some(14),
2542 rsi_level: Some(50.0),
2543 };
2544 let inp = ReverseRsiInput::from_slice(&data, p);
2545 let _ = reverse_rsi_with_kernel(&inp, k)?;
2546 }
2547 Ok(())
2548 }
2549
2550 fn check_batch_into_signature_parity(_n: &str, _k: Kernel) -> Result<(), Box<dyn Error>> {
2551 Ok(())
2552 }
2553
2554 #[test]
2555 fn test_kernel_passthrough() {
2556 let _ = check_kernel_passthrough("kernel_passthrough", Kernel::Auto);
2557 }
2558
2559 #[test]
2560 fn test_batch_into_signature_parity() {
2561 let _ = check_batch_into_signature_parity("batch_into_signature", Kernel::Auto);
2562 }
2563
2564 #[test]
2565 fn test_reverse_rsi_into_matches_api() {
2566 let file_path = "src/data/2018-09-01-2024-Bitfinex_Spot-4h.csv";
2567 let candles = read_candles_from_csv(file_path).expect("read candles");
2568
2569 let params = ReverseRsiParams::default();
2570 let input = ReverseRsiInput::from_candles(&candles, "close", params);
2571
2572 let baseline = reverse_rsi(&input).expect("baseline").values;
2573
2574 let mut out = vec![0.0; candles.close.len()];
2575 #[cfg(not(all(target_arch = "wasm32", feature = "wasm")))]
2576 {
2577 reverse_rsi_into(&input, &mut out).expect("reverse_rsi_into");
2578 }
2579 #[cfg(all(target_arch = "wasm32", feature = "wasm"))]
2580 {
2581 reverse_rsi_into_slice(&mut out, &input, Kernel::Auto).expect("reverse_rsi_into_slice");
2582 }
2583
2584 assert_eq!(baseline.len(), out.len());
2585 for (i, (&a, &b)) in baseline.iter().zip(out.iter()).enumerate() {
2586 let equal = (a.is_nan() && b.is_nan()) || (a == b);
2587 assert!(
2588 equal,
2589 "parity mismatch at index {}: baseline={:?}, into={:?}",
2590 i, a, b
2591 );
2592 }
2593 }
2594}