Skip to main content

vector_ta/indicators/
leavitt_convolution_acceleration.rs

1#[cfg(feature = "python")]
2use numpy::{IntoPyArray, PyArray1, PyArrayMethods, PyReadonlyArray1};
3#[cfg(feature = "python")]
4use pyo3::exceptions::PyValueError;
5#[cfg(feature = "python")]
6use pyo3::prelude::*;
7#[cfg(feature = "python")]
8use pyo3::types::PyDict;
9
10#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
11use serde::{Deserialize, Serialize};
12#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
13use wasm_bindgen::prelude::*;
14
15use crate::utilities::data_loader::{source_type, Candles};
16use crate::utilities::enums::Kernel;
17use crate::utilities::helpers::{
18    alloc_with_nan_prefix, detect_best_batch_kernel, detect_best_kernel, init_matrix_prefixes,
19    make_uninit_matrix,
20};
21#[cfg(feature = "python")]
22use crate::utilities::kernel_validation::validate_kernel;
23
24#[cfg(not(target_arch = "wasm32"))]
25use rayon::prelude::*;
26use std::convert::AsRef;
27use std::mem::{ManuallyDrop, MaybeUninit};
28use thiserror::Error;
29
30const DEFAULT_LENGTH: usize = 70;
31const DEFAULT_NORM_LENGTH: usize = 150;
32
33impl<'a> AsRef<[f64]> for LeavittConvolutionAccelerationInput<'a> {
34    #[inline(always)]
35    fn as_ref(&self) -> &[f64] {
36        match &self.data {
37            LeavittConvolutionAccelerationData::Slice(slice) => slice,
38            LeavittConvolutionAccelerationData::Candles { candles, source } => {
39                source_type(candles, source)
40            }
41        }
42    }
43}
44
45#[derive(Debug, Clone)]
46pub enum LeavittConvolutionAccelerationData<'a> {
47    Candles {
48        candles: &'a Candles,
49        source: &'a str,
50    },
51    Slice(&'a [f64]),
52}
53
54#[derive(Debug, Clone)]
55pub struct LeavittConvolutionAccelerationOutput {
56    pub conv_acceleration: Vec<f64>,
57    pub signal: Vec<f64>,
58}
59
60#[derive(Debug, Clone)]
61#[cfg_attr(
62    all(target_arch = "wasm32", feature = "wasm"),
63    derive(Serialize, Deserialize)
64)]
65pub struct LeavittConvolutionAccelerationParams {
66    pub length: Option<usize>,
67    pub norm_length: Option<usize>,
68    pub use_norm_hyperbolic: Option<bool>,
69}
70
71impl Default for LeavittConvolutionAccelerationParams {
72    fn default() -> Self {
73        Self {
74            length: Some(DEFAULT_LENGTH),
75            norm_length: Some(DEFAULT_NORM_LENGTH),
76            use_norm_hyperbolic: Some(true),
77        }
78    }
79}
80
81#[derive(Debug, Clone)]
82pub struct LeavittConvolutionAccelerationInput<'a> {
83    pub data: LeavittConvolutionAccelerationData<'a>,
84    pub params: LeavittConvolutionAccelerationParams,
85}
86
87impl<'a> LeavittConvolutionAccelerationInput<'a> {
88    #[inline]
89    pub fn from_candles(
90        candles: &'a Candles,
91        source: &'a str,
92        params: LeavittConvolutionAccelerationParams,
93    ) -> Self {
94        Self {
95            data: LeavittConvolutionAccelerationData::Candles { candles, source },
96            params,
97        }
98    }
99
100    #[inline]
101    pub fn from_slice(slice: &'a [f64], params: LeavittConvolutionAccelerationParams) -> Self {
102        Self {
103            data: LeavittConvolutionAccelerationData::Slice(slice),
104            params,
105        }
106    }
107
108    #[inline]
109    pub fn with_default_candles(candles: &'a Candles) -> Self {
110        Self::from_candles(
111            candles,
112            "hlcc4",
113            LeavittConvolutionAccelerationParams::default(),
114        )
115    }
116
117    #[inline]
118    pub fn get_length(&self) -> usize {
119        self.params.length.unwrap_or(DEFAULT_LENGTH)
120    }
121
122    #[inline]
123    pub fn get_norm_length(&self) -> usize {
124        self.params.norm_length.unwrap_or(DEFAULT_NORM_LENGTH)
125    }
126
127    #[inline]
128    pub fn get_use_norm_hyperbolic(&self) -> bool {
129        self.params.use_norm_hyperbolic.unwrap_or(true)
130    }
131}
132
133#[derive(Clone, Debug)]
134pub struct LeavittConvolutionAccelerationBuilder {
135    length: Option<usize>,
136    norm_length: Option<usize>,
137    use_norm_hyperbolic: Option<bool>,
138    source: Option<String>,
139    kernel: Kernel,
140}
141
142impl Default for LeavittConvolutionAccelerationBuilder {
143    fn default() -> Self {
144        Self {
145            length: None,
146            norm_length: None,
147            use_norm_hyperbolic: None,
148            source: None,
149            kernel: Kernel::Auto,
150        }
151    }
152}
153
154impl LeavittConvolutionAccelerationBuilder {
155    #[inline(always)]
156    pub fn new() -> Self {
157        Self::default()
158    }
159
160    #[inline(always)]
161    pub fn length(mut self, value: usize) -> Self {
162        self.length = Some(value);
163        self
164    }
165
166    #[inline(always)]
167    pub fn norm_length(mut self, value: usize) -> Self {
168        self.norm_length = Some(value);
169        self
170    }
171
172    #[inline(always)]
173    pub fn use_norm_hyperbolic(mut self, value: bool) -> Self {
174        self.use_norm_hyperbolic = Some(value);
175        self
176    }
177
178    #[inline(always)]
179    pub fn source<S: Into<String>>(mut self, value: S) -> Self {
180        self.source = Some(value.into());
181        self
182    }
183
184    #[inline(always)]
185    pub fn kernel(mut self, value: Kernel) -> Self {
186        self.kernel = value;
187        self
188    }
189
190    #[inline(always)]
191    pub fn apply(
192        self,
193        candles: &Candles,
194    ) -> Result<LeavittConvolutionAccelerationOutput, LeavittConvolutionAccelerationError> {
195        let input = LeavittConvolutionAccelerationInput::from_candles(
196            candles,
197            self.source.as_deref().unwrap_or("hlcc4"),
198            LeavittConvolutionAccelerationParams {
199                length: self.length,
200                norm_length: self.norm_length,
201                use_norm_hyperbolic: self.use_norm_hyperbolic,
202            },
203        );
204        leavitt_convolution_acceleration_with_kernel(&input, self.kernel)
205    }
206
207    #[inline(always)]
208    pub fn apply_slice(
209        self,
210        data: &[f64],
211    ) -> Result<LeavittConvolutionAccelerationOutput, LeavittConvolutionAccelerationError> {
212        let input = LeavittConvolutionAccelerationInput::from_slice(
213            data,
214            LeavittConvolutionAccelerationParams {
215                length: self.length,
216                norm_length: self.norm_length,
217                use_norm_hyperbolic: self.use_norm_hyperbolic,
218            },
219        );
220        leavitt_convolution_acceleration_with_kernel(&input, self.kernel)
221    }
222
223    #[inline(always)]
224    pub fn into_stream(
225        self,
226    ) -> Result<LeavittConvolutionAccelerationStream, LeavittConvolutionAccelerationError> {
227        LeavittConvolutionAccelerationStream::try_new(LeavittConvolutionAccelerationParams {
228            length: self.length,
229            norm_length: self.norm_length,
230            use_norm_hyperbolic: self.use_norm_hyperbolic,
231        })
232    }
233}
234
235#[derive(Debug, Error)]
236pub enum LeavittConvolutionAccelerationError {
237    #[error("leavitt_convolution_acceleration: Input data slice is empty.")]
238    EmptyInputData,
239    #[error("leavitt_convolution_acceleration: All source values are invalid.")]
240    AllValuesNaN,
241    #[error(
242        "leavitt_convolution_acceleration: Invalid length: length = {length}, data length = {data_len}"
243    )]
244    InvalidLength { length: usize, data_len: usize },
245    #[error(
246        "leavitt_convolution_acceleration: Invalid norm_length: norm_length = {norm_length}, data length = {data_len}"
247    )]
248    InvalidNormLength { norm_length: usize, data_len: usize },
249    #[error(
250        "leavitt_convolution_acceleration: Not enough valid data: needed = {needed}, valid = {valid}"
251    )]
252    NotEnoughValidData { needed: usize, valid: usize },
253    #[error(
254        "leavitt_convolution_acceleration: Output length mismatch: expected = {expected}, got = {got}"
255    )]
256    OutputLengthMismatch { expected: usize, got: usize },
257    #[error(
258        "leavitt_convolution_acceleration: Invalid range: start={start}, end={end}, step={step}"
259    )]
260    InvalidRange {
261        start: String,
262        end: String,
263        step: String,
264    },
265    #[error("leavitt_convolution_acceleration: Invalid kernel for batch: {0:?}")]
266    InvalidKernelForBatch(Kernel),
267}
268
269#[inline(always)]
270fn first_valid_source(data: &[f64]) -> Option<usize> {
271    data.iter().position(|x| x.is_finite())
272}
273
274#[inline(always)]
275fn count_valid_from(data: &[f64], start: usize) -> usize {
276    data[start..].iter().filter(|x| x.is_finite()).count()
277}
278
279#[inline(always)]
280fn sqrt_length(length: usize) -> usize {
281    ((length as f64).sqrt().floor() as usize).max(1)
282}
283
284#[inline(always)]
285fn required_valid_bars(length: usize, norm_length: usize) -> usize {
286    length + sqrt_length(length) + norm_length - 2
287}
288
289#[inline(always)]
290fn output_warmup(first: usize, length: usize, norm_length: usize) -> usize {
291    first + length + sqrt_length(length) + norm_length - 3
292}
293
294#[inline(always)]
295fn normalized_kernel(kernel: Kernel) -> Kernel {
296    match kernel {
297        Kernel::Auto => detect_best_kernel(),
298        other if other.is_batch() => other.to_non_batch(),
299        other => other,
300    }
301}
302
303#[derive(Clone, Debug)]
304struct RollingLinRegState {
305    period: usize,
306    buffer: Vec<f64>,
307    head: usize,
308    count: usize,
309    filled: bool,
310    n: f64,
311    sum_x: f64,
312    inv_n: f64,
313    inv_denom: f64,
314    mean_x: f64,
315    forecast_x: f64,
316    sum_y: f64,
317    sum_xy: f64,
318}
319
320impl RollingLinRegState {
321    #[inline(always)]
322    fn new(period: usize) -> Self {
323        let n = period as f64;
324        let m = period.saturating_sub(1) as f64;
325        let sum_x = 0.5 * m * n;
326        let sum_x2 = (m * n) * (2.0 * m + 1.0) / 6.0;
327        let denom = n * sum_x2 - sum_x * sum_x;
328        Self {
329            period,
330            buffer: vec![0.0; period.max(1)],
331            head: 0,
332            count: 0,
333            filled: false,
334            n: n.max(1.0),
335            sum_x,
336            inv_n: if n > 0.0 { 1.0 / n } else { 0.0 },
337            inv_denom: if denom.abs() > f64::EPSILON {
338                1.0 / denom
339            } else {
340                0.0
341            },
342            mean_x: if n > 0.0 { sum_x / n } else { 0.0 },
343            forecast_x: period as f64,
344            sum_y: 0.0,
345            sum_xy: 0.0,
346        }
347    }
348
349    #[inline(always)]
350    fn reset(&mut self) {
351        self.buffer.fill(0.0);
352        self.head = 0;
353        self.count = 0;
354        self.filled = false;
355        self.sum_y = 0.0;
356        self.sum_xy = 0.0;
357    }
358
359    #[inline(always)]
360    fn update(&mut self, value: f64) -> Option<(f64, f64)> {
361        if !value.is_finite() {
362            self.reset();
363            return None;
364        }
365
366        if self.period == 1 {
367            self.buffer[0] = value;
368            self.count = 1;
369            self.filled = true;
370            return Some((value, 0.0));
371        }
372
373        if !self.filled {
374            let j = self.count as f64;
375            self.buffer[self.head] = value;
376            self.head = (self.head + 1) % self.period;
377            self.sum_y += value;
378            self.sum_xy += j * value;
379            self.count += 1;
380            if self.count < self.period {
381                return None;
382            }
383            self.filled = true;
384            return Some((self.forecast_next(), self.slope()));
385        }
386
387        let y_old = self.buffer[self.head];
388        self.buffer[self.head] = value;
389        let new_sum_y = self.sum_y + value - y_old;
390        let new_sum_xy = self.n * value + self.sum_xy - new_sum_y;
391        self.sum_y = new_sum_y;
392        self.sum_xy = new_sum_xy;
393        self.head = (self.head + 1) % self.period;
394        Some((self.forecast_next(), self.slope()))
395    }
396
397    #[inline(always)]
398    fn slope(&self) -> f64 {
399        if self.period <= 1 {
400            return 0.0;
401        }
402        (self.n.mul_add(self.sum_xy, -self.sum_x * self.sum_y)) * self.inv_denom
403    }
404
405    #[inline(always)]
406    fn forecast_next(&self) -> f64 {
407        if self.period == 1 {
408            return self.buffer[(self.head + self.period - 1) % self.period];
409        }
410        let slope = self.slope();
411        let mean_y = self.sum_y * self.inv_n;
412        mean_y + slope * (self.forecast_x - self.mean_x)
413    }
414}
415
416#[derive(Clone, Debug)]
417struct RollingMeanStdState {
418    period: usize,
419    buffer: Vec<f64>,
420    head: usize,
421    count: usize,
422    filled: bool,
423    sum: f64,
424    sum_sq: f64,
425}
426
427impl RollingMeanStdState {
428    #[inline(always)]
429    fn new(period: usize) -> Self {
430        Self {
431            period,
432            buffer: vec![0.0; period.max(1)],
433            head: 0,
434            count: 0,
435            filled: false,
436            sum: 0.0,
437            sum_sq: 0.0,
438        }
439    }
440
441    #[inline(always)]
442    fn reset(&mut self) {
443        self.buffer.fill(0.0);
444        self.head = 0;
445        self.count = 0;
446        self.filled = false;
447        self.sum = 0.0;
448        self.sum_sq = 0.0;
449    }
450
451    #[inline(always)]
452    fn update(&mut self, value: f64) -> Option<(f64, f64)> {
453        if !value.is_finite() {
454            self.reset();
455            return None;
456        }
457
458        if !self.filled {
459            self.buffer[self.head] = value;
460            self.head = (self.head + 1) % self.period;
461            self.count += 1;
462            self.sum += value;
463            self.sum_sq += value * value;
464            if self.count < self.period {
465                return None;
466            }
467            self.filled = true;
468        } else {
469            let old = self.buffer[self.head];
470            self.buffer[self.head] = value;
471            self.head = (self.head + 1) % self.period;
472            self.sum += value - old;
473            self.sum_sq += value * value - old * old;
474        }
475
476        let n = self.period as f64;
477        let mean = self.sum / n;
478        let variance = (self.sum_sq / n - mean * mean).max(0.0);
479        Some((mean, variance.sqrt()))
480    }
481}
482
483#[derive(Clone, Debug)]
484pub struct LeavittConvolutionAccelerationStream {
485    source_projection: RollingLinRegState,
486    projection_slope: RollingLinRegState,
487    norm: RollingMeanStdState,
488    use_norm_hyperbolic: bool,
489    prev_scaled: f64,
490    prev_conv_acceleration: f64,
491    prev_slo: f64,
492    prev_src1: f64,
493    prev_src2: f64,
494    have_src1: bool,
495    have_src2: bool,
496}
497
498impl LeavittConvolutionAccelerationStream {
499    pub fn try_new(
500        params: LeavittConvolutionAccelerationParams,
501    ) -> Result<Self, LeavittConvolutionAccelerationError> {
502        let length = params.length.unwrap_or(DEFAULT_LENGTH);
503        let norm_length = params.norm_length.unwrap_or(DEFAULT_NORM_LENGTH);
504        if length == 0 {
505            return Err(LeavittConvolutionAccelerationError::InvalidLength {
506                length,
507                data_len: 0,
508            });
509        }
510        if norm_length == 0 {
511            return Err(LeavittConvolutionAccelerationError::InvalidNormLength {
512                norm_length,
513                data_len: 0,
514            });
515        }
516        Ok(Self {
517            source_projection: RollingLinRegState::new(length),
518            projection_slope: RollingLinRegState::new(sqrt_length(length)),
519            norm: RollingMeanStdState::new(norm_length),
520            use_norm_hyperbolic: params.use_norm_hyperbolic.unwrap_or(true),
521            prev_scaled: 0.0,
522            prev_conv_acceleration: 0.0,
523            prev_slo: 0.0,
524            prev_src1: 0.0,
525            prev_src2: 0.0,
526            have_src1: false,
527            have_src2: false,
528        })
529    }
530
531    #[inline(always)]
532    fn logistic(z: f64) -> f64 {
533        1.0 / (1.0 + (-z).exp())
534    }
535
536    #[inline(always)]
537    fn hyperbolic(z: f64) -> f64 {
538        let e = (-z).exp();
539        (1.0 - e) / (1.0 + e)
540    }
541
542    #[inline(always)]
543    pub fn reset(&mut self) {
544        self.source_projection.reset();
545        self.projection_slope.reset();
546        self.norm.reset();
547        self.prev_scaled = 0.0;
548        self.prev_conv_acceleration = 0.0;
549        self.prev_slo = 0.0;
550        self.prev_src1 = 0.0;
551        self.prev_src2 = 0.0;
552        self.have_src1 = false;
553        self.have_src2 = false;
554    }
555
556    #[inline(always)]
557    pub fn update(&mut self, value: f64) -> Option<(f64, f64)> {
558        if !value.is_finite() {
559            self.reset();
560            return None;
561        }
562
563        let src1 = if self.have_src1 { self.prev_src1 } else { 0.0 };
564        let src2 = if self.have_src2 { self.prev_src2 } else { 0.0 };
565        let is_accelerated = src2 - 2.0 * src1 + value > 0.0;
566
567        let projection = match self.source_projection.update(value) {
568            Some((forecast, _)) if forecast.is_finite() => forecast,
569            Some(_) => {
570                self.projection_slope.reset();
571                self.norm.reset();
572                self.prev_scaled = 0.0;
573                self.prev_conv_acceleration = 0.0;
574                self.prev_slo = 0.0;
575                self.bump_source_history(value);
576                return None;
577            }
578            None => {
579                self.bump_source_history(value);
580                return None;
581            }
582        };
583
584        let conv_slope = match self.projection_slope.update(projection) {
585            Some((_, slope)) if slope.is_finite() => slope,
586            Some(_) => {
587                self.norm.reset();
588                self.prev_scaled = 0.0;
589                self.prev_conv_acceleration = 0.0;
590                self.prev_slo = 0.0;
591                self.bump_source_history(value);
592                return None;
593            }
594            None => {
595                self.bump_source_history(value);
596                return None;
597            }
598        };
599
600        let scaled = match self.norm.update(conv_slope) {
601            Some((mean, dev)) => {
602                let z = if dev != 0.0 {
603                    (conv_slope - mean) / dev
604                } else {
605                    0.0
606                };
607                if self.use_norm_hyperbolic {
608                    Self::hyperbolic(z)
609                } else {
610                    Self::logistic(z)
611                }
612            }
613            None => {
614                self.bump_source_history(value);
615                return None;
616            }
617        };
618
619        let conv_acceleration = scaled - self.prev_scaled;
620        let slo = if self.use_norm_hyperbolic {
621            conv_acceleration
622        } else {
623            conv_acceleration - self.prev_conv_acceleration
624        };
625        let signal = if slo > 0.0 && is_accelerated {
626            if slo > self.prev_slo {
627                2.0
628            } else {
629                1.0
630            }
631        } else if slo < 0.0 && !is_accelerated {
632            if slo < self.prev_slo {
633                -2.0
634            } else {
635                -1.0
636            }
637        } else {
638            0.0
639        };
640
641        self.prev_scaled = scaled;
642        self.prev_conv_acceleration = conv_acceleration;
643        self.prev_slo = slo;
644        self.bump_source_history(value);
645        Some((conv_acceleration, signal))
646    }
647
648    #[inline(always)]
649    pub fn update_reset_on_nan(&mut self, value: f64) -> Option<(f64, f64)> {
650        self.update(value)
651    }
652
653    #[inline(always)]
654    fn bump_source_history(&mut self, value: f64) {
655        if self.have_src1 {
656            self.prev_src2 = self.prev_src1;
657            self.have_src2 = true;
658        }
659        self.prev_src1 = value;
660        self.have_src1 = true;
661    }
662}
663
664#[inline(always)]
665fn leavitt_convolution_acceleration_prepare<'a>(
666    input: &'a LeavittConvolutionAccelerationInput,
667) -> Result<(&'a [f64], usize, usize, bool, usize), LeavittConvolutionAccelerationError> {
668    let data = input.as_ref();
669    let data_len = data.len();
670    if data_len == 0 {
671        return Err(LeavittConvolutionAccelerationError::EmptyInputData);
672    }
673    let first =
674        first_valid_source(data).ok_or(LeavittConvolutionAccelerationError::AllValuesNaN)?;
675    let length = input.get_length();
676    if length == 0 || length > data_len {
677        return Err(LeavittConvolutionAccelerationError::InvalidLength { length, data_len });
678    }
679    let norm_length = input.get_norm_length();
680    if norm_length == 0 || norm_length > data_len {
681        return Err(LeavittConvolutionAccelerationError::InvalidNormLength {
682            norm_length,
683            data_len,
684        });
685    }
686    let needed = required_valid_bars(length, norm_length);
687    let valid = count_valid_from(data, first);
688    if valid < needed {
689        return Err(LeavittConvolutionAccelerationError::NotEnoughValidData { needed, valid });
690    }
691    Ok((
692        data,
693        length,
694        norm_length,
695        input.get_use_norm_hyperbolic(),
696        first,
697    ))
698}
699
700#[inline(always)]
701fn leavitt_convolution_acceleration_compute_into(
702    data: &[f64],
703    length: usize,
704    norm_length: usize,
705    use_norm_hyperbolic: bool,
706    _kernel: Kernel,
707    out_conv_acceleration: &mut [f64],
708    out_signal: &mut [f64],
709) {
710    let mut stream =
711        LeavittConvolutionAccelerationStream::try_new(LeavittConvolutionAccelerationParams {
712            length: Some(length),
713            norm_length: Some(norm_length),
714            use_norm_hyperbolic: Some(use_norm_hyperbolic),
715        })
716        .expect("validated stream params");
717
718    for i in 0..data.len() {
719        if let Some((conv_acceleration, signal)) = stream.update_reset_on_nan(data[i]) {
720            out_conv_acceleration[i] = conv_acceleration;
721            out_signal[i] = signal;
722        }
723    }
724}
725
726#[inline]
727pub fn leavitt_convolution_acceleration(
728    input: &LeavittConvolutionAccelerationInput,
729) -> Result<LeavittConvolutionAccelerationOutput, LeavittConvolutionAccelerationError> {
730    leavitt_convolution_acceleration_with_kernel(input, Kernel::Auto)
731}
732
733pub fn leavitt_convolution_acceleration_with_kernel(
734    input: &LeavittConvolutionAccelerationInput,
735    kernel: Kernel,
736) -> Result<LeavittConvolutionAccelerationOutput, LeavittConvolutionAccelerationError> {
737    let (data, length, norm_length, use_norm_hyperbolic, first) =
738        leavitt_convolution_acceleration_prepare(input)?;
739    let warmup = output_warmup(first, length, norm_length).min(data.len());
740    let mut conv_acceleration = alloc_with_nan_prefix(data.len(), warmup);
741    let mut signal = alloc_with_nan_prefix(data.len(), warmup);
742    leavitt_convolution_acceleration_compute_into(
743        data,
744        length,
745        norm_length,
746        use_norm_hyperbolic,
747        normalized_kernel(kernel),
748        &mut conv_acceleration,
749        &mut signal,
750    );
751    Ok(LeavittConvolutionAccelerationOutput {
752        conv_acceleration,
753        signal,
754    })
755}
756
757#[cfg(not(all(target_arch = "wasm32", feature = "wasm")))]
758#[inline]
759pub fn leavitt_convolution_acceleration_into(
760    input: &LeavittConvolutionAccelerationInput,
761    out_conv_acceleration: &mut [f64],
762    out_signal: &mut [f64],
763) -> Result<(), LeavittConvolutionAccelerationError> {
764    leavitt_convolution_acceleration_into_slice(
765        out_conv_acceleration,
766        out_signal,
767        input,
768        Kernel::Auto,
769    )
770}
771
772pub fn leavitt_convolution_acceleration_into_slice(
773    out_conv_acceleration: &mut [f64],
774    out_signal: &mut [f64],
775    input: &LeavittConvolutionAccelerationInput,
776    kernel: Kernel,
777) -> Result<(), LeavittConvolutionAccelerationError> {
778    let (data, length, norm_length, use_norm_hyperbolic, _first) =
779        leavitt_convolution_acceleration_prepare(input)?;
780    if out_conv_acceleration.len() != data.len() || out_signal.len() != data.len() {
781        return Err(LeavittConvolutionAccelerationError::OutputLengthMismatch {
782            expected: data.len(),
783            got: out_conv_acceleration.len().max(out_signal.len()),
784        });
785    }
786    out_conv_acceleration.fill(f64::NAN);
787    out_signal.fill(f64::NAN);
788    leavitt_convolution_acceleration_compute_into(
789        data,
790        length,
791        norm_length,
792        use_norm_hyperbolic,
793        normalized_kernel(kernel),
794        out_conv_acceleration,
795        out_signal,
796    );
797    Ok(())
798}
799
800#[derive(Clone, Debug)]
801pub struct LeavittConvolutionAccelerationBatchRange {
802    pub length: (usize, usize, usize),
803    pub norm_length: (usize, usize, usize),
804    pub use_norm_hyperbolic: Option<bool>,
805}
806
807impl Default for LeavittConvolutionAccelerationBatchRange {
808    fn default() -> Self {
809        Self {
810            length: (DEFAULT_LENGTH, DEFAULT_LENGTH, 0),
811            norm_length: (DEFAULT_NORM_LENGTH, DEFAULT_NORM_LENGTH, 0),
812            use_norm_hyperbolic: Some(true),
813        }
814    }
815}
816
817#[derive(Clone, Debug, Default)]
818pub struct LeavittConvolutionAccelerationBatchBuilder {
819    range: LeavittConvolutionAccelerationBatchRange,
820    kernel: Kernel,
821}
822
823impl LeavittConvolutionAccelerationBatchBuilder {
824    pub fn new() -> Self {
825        Self::default()
826    }
827
828    pub fn kernel(mut self, value: Kernel) -> Self {
829        self.kernel = value;
830        self
831    }
832
833    #[inline]
834    pub fn length_range(mut self, start: usize, end: usize, step: usize) -> Self {
835        self.range.length = (start, end, step);
836        self
837    }
838
839    #[inline]
840    pub fn length_static(mut self, value: usize) -> Self {
841        self.range.length = (value, value, 0);
842        self
843    }
844
845    #[inline]
846    pub fn norm_length_range(mut self, start: usize, end: usize, step: usize) -> Self {
847        self.range.norm_length = (start, end, step);
848        self
849    }
850
851    #[inline]
852    pub fn norm_length_static(mut self, value: usize) -> Self {
853        self.range.norm_length = (value, value, 0);
854        self
855    }
856
857    #[inline]
858    pub fn use_norm_hyperbolic(mut self, value: bool) -> Self {
859        self.range.use_norm_hyperbolic = Some(value);
860        self
861    }
862
863    #[inline]
864    pub fn apply_slice(
865        self,
866        data: &[f64],
867    ) -> Result<LeavittConvolutionAccelerationBatchOutput, LeavittConvolutionAccelerationError>
868    {
869        leavitt_convolution_acceleration_batch_with_kernel(data, &self.range, self.kernel)
870    }
871
872    #[inline]
873    pub fn apply_candles(
874        self,
875        candles: &Candles,
876        source: &str,
877    ) -> Result<LeavittConvolutionAccelerationBatchOutput, LeavittConvolutionAccelerationError>
878    {
879        self.apply_slice(source_type(candles, source))
880    }
881}
882
883#[derive(Debug, Clone)]
884pub struct LeavittConvolutionAccelerationBatchOutput {
885    pub conv_acceleration: Vec<f64>,
886    pub signal: Vec<f64>,
887    pub combos: Vec<LeavittConvolutionAccelerationParams>,
888    pub rows: usize,
889    pub cols: usize,
890}
891
892#[inline(always)]
893fn expand_usize_axis(
894    start: usize,
895    end: usize,
896    step: usize,
897) -> Result<Vec<usize>, LeavittConvolutionAccelerationError> {
898    if start == 0 || end == 0 {
899        return Err(LeavittConvolutionAccelerationError::InvalidRange {
900            start: start.to_string(),
901            end: end.to_string(),
902            step: step.to_string(),
903        });
904    }
905    if step == 0 {
906        if start != end {
907            return Err(LeavittConvolutionAccelerationError::InvalidRange {
908                start: start.to_string(),
909                end: end.to_string(),
910                step: step.to_string(),
911            });
912        }
913        return Ok(vec![start]);
914    }
915    if start > end {
916        return Err(LeavittConvolutionAccelerationError::InvalidRange {
917            start: start.to_string(),
918            end: end.to_string(),
919            step: step.to_string(),
920        });
921    }
922    let mut out = Vec::new();
923    let mut value = start;
924    while value <= end {
925        out.push(value);
926        match value.checked_add(step) {
927            Some(next) => value = next,
928            None => break,
929        }
930    }
931    if out.is_empty() {
932        return Err(LeavittConvolutionAccelerationError::InvalidRange {
933            start: start.to_string(),
934            end: end.to_string(),
935            step: step.to_string(),
936        });
937    }
938    Ok(out)
939}
940
941pub fn expand_grid_leavitt_convolution_acceleration(
942    sweep: &LeavittConvolutionAccelerationBatchRange,
943) -> Result<Vec<LeavittConvolutionAccelerationParams>, LeavittConvolutionAccelerationError> {
944    let lengths = expand_usize_axis(sweep.length.0, sweep.length.1, sweep.length.2)?;
945    let norm_lengths = expand_usize_axis(
946        sweep.norm_length.0,
947        sweep.norm_length.1,
948        sweep.norm_length.2,
949    )?;
950    let use_norm_hyperbolic = sweep.use_norm_hyperbolic.unwrap_or(true);
951
952    let total = lengths
953        .len()
954        .checked_mul(norm_lengths.len())
955        .ok_or_else(|| LeavittConvolutionAccelerationError::InvalidRange {
956            start: sweep.length.0.to_string(),
957            end: sweep.norm_length.1.to_string(),
958            step: "overflow".to_string(),
959        })?;
960    let mut combos = Vec::with_capacity(total);
961    for &length in &lengths {
962        for &norm_length in &norm_lengths {
963            combos.push(LeavittConvolutionAccelerationParams {
964                length: Some(length),
965                norm_length: Some(norm_length),
966                use_norm_hyperbolic: Some(use_norm_hyperbolic),
967            });
968        }
969    }
970    Ok(combos)
971}
972
973pub fn leavitt_convolution_acceleration_batch_with_kernel(
974    data: &[f64],
975    sweep: &LeavittConvolutionAccelerationBatchRange,
976    kernel: Kernel,
977) -> Result<LeavittConvolutionAccelerationBatchOutput, LeavittConvolutionAccelerationError> {
978    let chosen = match kernel {
979        Kernel::Auto => detect_best_batch_kernel(),
980        other => other,
981    };
982    match chosen {
983        Kernel::Scalar | Kernel::ScalarBatch => {
984            leavitt_convolution_acceleration_batch_par_slice(data, sweep)
985        }
986        other => Err(LeavittConvolutionAccelerationError::InvalidKernelForBatch(
987            other,
988        )),
989    }
990}
991
992pub fn leavitt_convolution_acceleration_batch_slice(
993    data: &[f64],
994    sweep: &LeavittConvolutionAccelerationBatchRange,
995) -> Result<LeavittConvolutionAccelerationBatchOutput, LeavittConvolutionAccelerationError> {
996    leavitt_convolution_acceleration_batch_impl(data, sweep, Kernel::Scalar, false)
997}
998
999pub fn leavitt_convolution_acceleration_batch_par_slice(
1000    data: &[f64],
1001    sweep: &LeavittConvolutionAccelerationBatchRange,
1002) -> Result<LeavittConvolutionAccelerationBatchOutput, LeavittConvolutionAccelerationError> {
1003    leavitt_convolution_acceleration_batch_impl(data, sweep, Kernel::Scalar, true)
1004}
1005
1006fn leavitt_convolution_acceleration_batch_impl(
1007    data: &[f64],
1008    sweep: &LeavittConvolutionAccelerationBatchRange,
1009    kernel: Kernel,
1010    parallel: bool,
1011) -> Result<LeavittConvolutionAccelerationBatchOutput, LeavittConvolutionAccelerationError> {
1012    let combos = expand_grid_leavitt_convolution_acceleration(sweep)?;
1013    let rows = combos.len();
1014    let cols = data.len();
1015    if cols == 0 {
1016        return Err(LeavittConvolutionAccelerationError::EmptyInputData);
1017    }
1018    let first =
1019        first_valid_source(data).ok_or(LeavittConvolutionAccelerationError::AllValuesNaN)?;
1020    let valid = count_valid_from(data, first);
1021    for params in &combos {
1022        let needed = required_valid_bars(
1023            params.length.unwrap_or(DEFAULT_LENGTH),
1024            params.norm_length.unwrap_or(DEFAULT_NORM_LENGTH),
1025        );
1026        if valid < needed {
1027            return Err(LeavittConvolutionAccelerationError::NotEnoughValidData { needed, valid });
1028        }
1029    }
1030
1031    let warmups: Vec<usize> = combos
1032        .iter()
1033        .map(|params| {
1034            output_warmup(
1035                first,
1036                params.length.unwrap_or(DEFAULT_LENGTH),
1037                params.norm_length.unwrap_or(DEFAULT_NORM_LENGTH),
1038            )
1039            .min(cols)
1040        })
1041        .collect();
1042
1043    let mut conv_matrix = make_uninit_matrix(rows, cols);
1044    init_matrix_prefixes(&mut conv_matrix, cols, &warmups);
1045    let mut signal_matrix = make_uninit_matrix(rows, cols);
1046    init_matrix_prefixes(&mut signal_matrix, cols, &warmups);
1047
1048    let mut conv_guard = ManuallyDrop::new(conv_matrix);
1049    let mut signal_guard = ManuallyDrop::new(signal_matrix);
1050    let conv_mu: &mut [MaybeUninit<f64>] =
1051        unsafe { std::slice::from_raw_parts_mut(conv_guard.as_mut_ptr(), conv_guard.len()) };
1052    let signal_mu: &mut [MaybeUninit<f64>] =
1053        unsafe { std::slice::from_raw_parts_mut(signal_guard.as_mut_ptr(), signal_guard.len()) };
1054
1055    let do_row = |row: usize,
1056                  row_conv_mu: &mut [MaybeUninit<f64>],
1057                  row_signal_mu: &mut [MaybeUninit<f64>]| {
1058        let params = &combos[row];
1059        let out_conv = unsafe {
1060            std::slice::from_raw_parts_mut(row_conv_mu.as_mut_ptr() as *mut f64, row_conv_mu.len())
1061        };
1062        let out_signal = unsafe {
1063            std::slice::from_raw_parts_mut(
1064                row_signal_mu.as_mut_ptr() as *mut f64,
1065                row_signal_mu.len(),
1066            )
1067        };
1068        leavitt_convolution_acceleration_compute_into(
1069            data,
1070            params.length.unwrap_or(DEFAULT_LENGTH),
1071            params.norm_length.unwrap_or(DEFAULT_NORM_LENGTH),
1072            params.use_norm_hyperbolic.unwrap_or(true),
1073            kernel,
1074            out_conv,
1075            out_signal,
1076        );
1077    };
1078
1079    if parallel {
1080        #[cfg(not(target_arch = "wasm32"))]
1081        conv_mu
1082            .par_chunks_mut(cols)
1083            .zip(signal_mu.par_chunks_mut(cols))
1084            .enumerate()
1085            .for_each(|(row, (row_conv, row_signal))| do_row(row, row_conv, row_signal));
1086        #[cfg(target_arch = "wasm32")]
1087        for (row, (row_conv, row_signal)) in conv_mu
1088            .chunks_mut(cols)
1089            .zip(signal_mu.chunks_mut(cols))
1090            .enumerate()
1091        {
1092            do_row(row, row_conv, row_signal);
1093        }
1094    } else {
1095        for (row, (row_conv, row_signal)) in conv_mu
1096            .chunks_mut(cols)
1097            .zip(signal_mu.chunks_mut(cols))
1098            .enumerate()
1099        {
1100            do_row(row, row_conv, row_signal);
1101        }
1102    }
1103
1104    let conv_acceleration = unsafe {
1105        Vec::from_raw_parts(
1106            conv_guard.as_mut_ptr() as *mut f64,
1107            conv_guard.len(),
1108            conv_guard.capacity(),
1109        )
1110    };
1111    let signal = unsafe {
1112        Vec::from_raw_parts(
1113            signal_guard.as_mut_ptr() as *mut f64,
1114            signal_guard.len(),
1115            signal_guard.capacity(),
1116        )
1117    };
1118
1119    Ok(LeavittConvolutionAccelerationBatchOutput {
1120        conv_acceleration,
1121        signal,
1122        combos,
1123        rows,
1124        cols,
1125    })
1126}
1127
1128fn leavitt_convolution_acceleration_batch_inner_into(
1129    data: &[f64],
1130    sweep: &LeavittConvolutionAccelerationBatchRange,
1131    kernel: Kernel,
1132    parallel: bool,
1133    out_conv_acceleration: &mut [f64],
1134    out_signal: &mut [f64],
1135) -> Result<(), LeavittConvolutionAccelerationError> {
1136    let combos = expand_grid_leavitt_convolution_acceleration(sweep)?;
1137    let rows = combos.len();
1138    let cols = data.len();
1139    if out_conv_acceleration.len() != rows * cols || out_signal.len() != rows * cols {
1140        return Err(LeavittConvolutionAccelerationError::OutputLengthMismatch {
1141            expected: rows * cols,
1142            got: out_conv_acceleration.len().max(out_signal.len()),
1143        });
1144    }
1145
1146    out_conv_acceleration.fill(f64::NAN);
1147    out_signal.fill(f64::NAN);
1148
1149    if parallel {
1150        #[cfg(not(target_arch = "wasm32"))]
1151        {
1152            let ptr_conv = out_conv_acceleration.as_mut_ptr() as usize;
1153            let ptr_signal = out_signal.as_mut_ptr() as usize;
1154            combos.par_iter().enumerate().for_each(|(row, params)| {
1155                let start = row * cols;
1156                let out_conv = unsafe {
1157                    std::slice::from_raw_parts_mut((ptr_conv as *mut f64).add(start), cols)
1158                };
1159                let out_sig = unsafe {
1160                    std::slice::from_raw_parts_mut((ptr_signal as *mut f64).add(start), cols)
1161                };
1162                leavitt_convolution_acceleration_compute_into(
1163                    data,
1164                    params.length.unwrap_or(DEFAULT_LENGTH),
1165                    params.norm_length.unwrap_or(DEFAULT_NORM_LENGTH),
1166                    params.use_norm_hyperbolic.unwrap_or(true),
1167                    kernel,
1168                    out_conv,
1169                    out_sig,
1170                );
1171            });
1172        }
1173        #[cfg(target_arch = "wasm32")]
1174        for (row, params) in combos.iter().enumerate() {
1175            let start = row * cols;
1176            let end = start + cols;
1177            leavitt_convolution_acceleration_compute_into(
1178                data,
1179                params.length.unwrap_or(DEFAULT_LENGTH),
1180                params.norm_length.unwrap_or(DEFAULT_NORM_LENGTH),
1181                params.use_norm_hyperbolic.unwrap_or(true),
1182                kernel,
1183                &mut out_conv_acceleration[start..end],
1184                &mut out_signal[start..end],
1185            );
1186        }
1187    } else {
1188        for (row, params) in combos.iter().enumerate() {
1189            let start = row * cols;
1190            let end = start + cols;
1191            leavitt_convolution_acceleration_compute_into(
1192                data,
1193                params.length.unwrap_or(DEFAULT_LENGTH),
1194                params.norm_length.unwrap_or(DEFAULT_NORM_LENGTH),
1195                params.use_norm_hyperbolic.unwrap_or(true),
1196                kernel,
1197                &mut out_conv_acceleration[start..end],
1198                &mut out_signal[start..end],
1199            );
1200        }
1201    }
1202
1203    Ok(())
1204}
1205
1206#[cfg(feature = "python")]
1207#[pyfunction(name = "leavitt_convolution_acceleration")]
1208#[pyo3(signature = (data, length=DEFAULT_LENGTH, norm_length=DEFAULT_NORM_LENGTH, use_norm_hyperbolic=true, kernel=None))]
1209pub fn leavitt_convolution_acceleration_py<'py>(
1210    py: Python<'py>,
1211    data: PyReadonlyArray1<'py, f64>,
1212    length: usize,
1213    norm_length: usize,
1214    use_norm_hyperbolic: bool,
1215    kernel: Option<&str>,
1216) -> PyResult<(Bound<'py, PyArray1<f64>>, Bound<'py, PyArray1<f64>>)> {
1217    let data = data.as_slice()?;
1218    let kernel = validate_kernel(kernel, false)?;
1219    let input = LeavittConvolutionAccelerationInput::from_slice(
1220        data,
1221        LeavittConvolutionAccelerationParams {
1222            length: Some(length),
1223            norm_length: Some(norm_length),
1224            use_norm_hyperbolic: Some(use_norm_hyperbolic),
1225        },
1226    );
1227    let output = py
1228        .allow_threads(|| leavitt_convolution_acceleration_with_kernel(&input, kernel))
1229        .map_err(|e| PyValueError::new_err(e.to_string()))?;
1230    Ok((
1231        output.conv_acceleration.into_pyarray(py),
1232        output.signal.into_pyarray(py),
1233    ))
1234}
1235
1236#[cfg(feature = "python")]
1237#[pyclass(name = "LeavittConvolutionAccelerationStream")]
1238pub struct LeavittConvolutionAccelerationStreamPy {
1239    stream: LeavittConvolutionAccelerationStream,
1240}
1241
1242#[cfg(feature = "python")]
1243#[pymethods]
1244impl LeavittConvolutionAccelerationStreamPy {
1245    #[new]
1246    #[pyo3(signature = (length=DEFAULT_LENGTH, norm_length=DEFAULT_NORM_LENGTH, use_norm_hyperbolic=true))]
1247    fn new(length: usize, norm_length: usize, use_norm_hyperbolic: bool) -> PyResult<Self> {
1248        let stream =
1249            LeavittConvolutionAccelerationStream::try_new(LeavittConvolutionAccelerationParams {
1250                length: Some(length),
1251                norm_length: Some(norm_length),
1252                use_norm_hyperbolic: Some(use_norm_hyperbolic),
1253            })
1254            .map_err(|e| PyValueError::new_err(e.to_string()))?;
1255        Ok(Self { stream })
1256    }
1257
1258    fn update(&mut self, value: f64) -> Option<(f64, f64)> {
1259        self.stream.update(value)
1260    }
1261}
1262
1263#[cfg(feature = "python")]
1264#[pyfunction(name = "leavitt_convolution_acceleration_batch")]
1265#[pyo3(signature = (data, length_range, norm_length_range, use_norm_hyperbolic=true, kernel=None))]
1266pub fn leavitt_convolution_acceleration_batch_py<'py>(
1267    py: Python<'py>,
1268    data: PyReadonlyArray1<'py, f64>,
1269    length_range: (usize, usize, usize),
1270    norm_length_range: (usize, usize, usize),
1271    use_norm_hyperbolic: bool,
1272    kernel: Option<&str>,
1273) -> PyResult<Bound<'py, PyDict>> {
1274    let data = data.as_slice()?;
1275    let sweep = LeavittConvolutionAccelerationBatchRange {
1276        length: length_range,
1277        norm_length: norm_length_range,
1278        use_norm_hyperbolic: Some(use_norm_hyperbolic),
1279    };
1280    let combos = expand_grid_leavitt_convolution_acceleration(&sweep)
1281        .map_err(|e| PyValueError::new_err(e.to_string()))?;
1282    let rows = combos.len();
1283    let cols = data.len();
1284    let total = rows
1285        .checked_mul(cols)
1286        .ok_or_else(|| PyValueError::new_err("rows*cols overflow"))?;
1287    let conv_arr = unsafe { PyArray1::<f64>::new(py, [total], false) };
1288    let signal_arr = unsafe { PyArray1::<f64>::new(py, [total], false) };
1289    let out_conv = unsafe { conv_arr.as_slice_mut()? };
1290    let out_signal = unsafe { signal_arr.as_slice_mut()? };
1291    let kernel = validate_kernel(kernel, true)?;
1292
1293    py.allow_threads(|| {
1294        let batch_kernel = match kernel {
1295            Kernel::Auto => detect_best_batch_kernel(),
1296            other => other,
1297        };
1298        leavitt_convolution_acceleration_batch_inner_into(
1299            data,
1300            &sweep,
1301            batch_kernel.to_non_batch(),
1302            true,
1303            out_conv,
1304            out_signal,
1305        )
1306    })
1307    .map_err(|e| PyValueError::new_err(e.to_string()))?;
1308
1309    let lengths: Vec<usize> = combos
1310        .iter()
1311        .map(|p| p.length.unwrap_or(DEFAULT_LENGTH))
1312        .collect();
1313    let norm_lengths: Vec<usize> = combos
1314        .iter()
1315        .map(|p| p.norm_length.unwrap_or(DEFAULT_NORM_LENGTH))
1316        .collect();
1317    let dict = PyDict::new(py);
1318    dict.set_item("conv_acceleration", conv_arr.reshape((rows, cols))?)?;
1319    dict.set_item("signal", signal_arr.reshape((rows, cols))?)?;
1320    dict.set_item("rows", rows)?;
1321    dict.set_item("cols", cols)?;
1322    dict.set_item("lengths", lengths.into_pyarray(py))?;
1323    dict.set_item("norm_lengths", norm_lengths.into_pyarray(py))?;
1324    dict.set_item("use_norm_hyperbolic", use_norm_hyperbolic)?;
1325    Ok(dict)
1326}
1327
1328#[cfg(feature = "python")]
1329pub fn register_leavitt_convolution_acceleration_module(m: &Bound<'_, PyModule>) -> PyResult<()> {
1330    m.add_function(wrap_pyfunction!(leavitt_convolution_acceleration_py, m)?)?;
1331    m.add_function(wrap_pyfunction!(
1332        leavitt_convolution_acceleration_batch_py,
1333        m
1334    )?)?;
1335    m.add_class::<LeavittConvolutionAccelerationStreamPy>()?;
1336    Ok(())
1337}
1338
1339#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1340#[derive(Debug, Clone, Serialize, Deserialize)]
1341struct LeavittConvolutionAccelerationJsOutput {
1342    conv_acceleration: Vec<f64>,
1343    signal: Vec<f64>,
1344}
1345
1346#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1347#[derive(Debug, Clone, Serialize, Deserialize)]
1348struct LeavittConvolutionAccelerationBatchConfig {
1349    length_range: Vec<usize>,
1350    norm_length_range: Vec<usize>,
1351    use_norm_hyperbolic: Option<bool>,
1352}
1353
1354#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1355#[derive(Debug, Clone, Serialize, Deserialize)]
1356struct LeavittConvolutionAccelerationBatchJsOutput {
1357    conv_acceleration: Vec<f64>,
1358    signal: Vec<f64>,
1359    rows: usize,
1360    cols: usize,
1361    combos: Vec<LeavittConvolutionAccelerationParams>,
1362}
1363
1364#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1365#[wasm_bindgen]
1366pub fn leavitt_convolution_acceleration_js(
1367    data: &[f64],
1368    length: usize,
1369    norm_length: usize,
1370    use_norm_hyperbolic: bool,
1371) -> Result<JsValue, JsValue> {
1372    let input = LeavittConvolutionAccelerationInput::from_slice(
1373        data,
1374        LeavittConvolutionAccelerationParams {
1375            length: Some(length),
1376            norm_length: Some(norm_length),
1377            use_norm_hyperbolic: Some(use_norm_hyperbolic),
1378        },
1379    );
1380    let output = leavitt_convolution_acceleration_with_kernel(&input, Kernel::Scalar)
1381        .map_err(|e| JsValue::from_str(&e.to_string()))?;
1382    serde_wasm_bindgen::to_value(&LeavittConvolutionAccelerationJsOutput {
1383        conv_acceleration: output.conv_acceleration,
1384        signal: output.signal,
1385    })
1386    .map_err(|e| JsValue::from_str(&format!("Serialization error: {e}")))
1387}
1388
1389#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1390#[wasm_bindgen(js_name = "leavitt_convolution_acceleration_batch_js")]
1391pub fn leavitt_convolution_acceleration_batch_js(
1392    data: &[f64],
1393    config: JsValue,
1394) -> Result<JsValue, JsValue> {
1395    let config: LeavittConvolutionAccelerationBatchConfig = serde_wasm_bindgen::from_value(config)
1396        .map_err(|e| JsValue::from_str(&format!("Invalid config: {e}")))?;
1397    if config.length_range.len() != 3 || config.norm_length_range.len() != 3 {
1398        return Err(JsValue::from_str(
1399            "Invalid config: each range must have exactly 3 elements [start, end, step]",
1400        ));
1401    }
1402    let sweep = LeavittConvolutionAccelerationBatchRange {
1403        length: (
1404            config.length_range[0],
1405            config.length_range[1],
1406            config.length_range[2],
1407        ),
1408        norm_length: (
1409            config.norm_length_range[0],
1410            config.norm_length_range[1],
1411            config.norm_length_range[2],
1412        ),
1413        use_norm_hyperbolic: config.use_norm_hyperbolic,
1414    };
1415    let batch = leavitt_convolution_acceleration_batch_slice(data, &sweep)
1416        .map_err(|e| JsValue::from_str(&e.to_string()))?;
1417    serde_wasm_bindgen::to_value(&LeavittConvolutionAccelerationBatchJsOutput {
1418        conv_acceleration: batch.conv_acceleration,
1419        signal: batch.signal,
1420        rows: batch.rows,
1421        cols: batch.cols,
1422        combos: batch.combos,
1423    })
1424    .map_err(|e| JsValue::from_str(&format!("Serialization error: {e}")))
1425}
1426
1427#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1428#[wasm_bindgen]
1429pub fn leavitt_convolution_acceleration_alloc(len: usize) -> *mut f64 {
1430    let mut buf = vec![0.0_f64; len * 2];
1431    let ptr = buf.as_mut_ptr();
1432    std::mem::forget(buf);
1433    ptr
1434}
1435
1436#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1437#[wasm_bindgen]
1438pub fn leavitt_convolution_acceleration_free(ptr: *mut f64, len: usize) {
1439    if ptr.is_null() {
1440        return;
1441    }
1442    unsafe {
1443        let _ = Vec::from_raw_parts(ptr, len * 2, len * 2);
1444    }
1445}
1446
1447#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1448#[wasm_bindgen]
1449pub fn leavitt_convolution_acceleration_into(
1450    data_ptr: *const f64,
1451    out_ptr: *mut f64,
1452    len: usize,
1453    length: usize,
1454    norm_length: usize,
1455    use_norm_hyperbolic: bool,
1456) -> Result<(), JsValue> {
1457    if data_ptr.is_null() || out_ptr.is_null() {
1458        return Err(JsValue::from_str(
1459            "null pointer passed to leavitt_convolution_acceleration_into",
1460        ));
1461    }
1462    unsafe {
1463        let data = std::slice::from_raw_parts(data_ptr, len);
1464        let out = std::slice::from_raw_parts_mut(out_ptr, len * 2);
1465        let (out_conv, out_signal) = out.split_at_mut(len);
1466        let input = LeavittConvolutionAccelerationInput::from_slice(
1467            data,
1468            LeavittConvolutionAccelerationParams {
1469                length: Some(length),
1470                norm_length: Some(norm_length),
1471                use_norm_hyperbolic: Some(use_norm_hyperbolic),
1472            },
1473        );
1474        leavitt_convolution_acceleration_into_slice(out_conv, out_signal, &input, Kernel::Auto)
1475            .map_err(|e| JsValue::from_str(&e.to_string()))
1476    }
1477}
1478
1479#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1480#[wasm_bindgen(js_name = "leavitt_convolution_acceleration_into_host")]
1481pub fn leavitt_convolution_acceleration_into_host(
1482    data: &[f64],
1483    out_ptr: *mut f64,
1484    length: usize,
1485    norm_length: usize,
1486    use_norm_hyperbolic: bool,
1487) -> Result<(), JsValue> {
1488    if out_ptr.is_null() {
1489        return Err(JsValue::from_str(
1490            "null pointer passed to leavitt_convolution_acceleration_into_host",
1491        ));
1492    }
1493    unsafe {
1494        let out = std::slice::from_raw_parts_mut(out_ptr, data.len() * 2);
1495        let (out_conv, out_signal) = out.split_at_mut(data.len());
1496        let input = LeavittConvolutionAccelerationInput::from_slice(
1497            data,
1498            LeavittConvolutionAccelerationParams {
1499                length: Some(length),
1500                norm_length: Some(norm_length),
1501                use_norm_hyperbolic: Some(use_norm_hyperbolic),
1502            },
1503        );
1504        leavitt_convolution_acceleration_into_slice(out_conv, out_signal, &input, Kernel::Auto)
1505            .map_err(|e| JsValue::from_str(&e.to_string()))
1506    }
1507}
1508
1509#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
1510#[wasm_bindgen]
1511pub fn leavitt_convolution_acceleration_batch_into(
1512    data: &[f64],
1513    out_ptr: *mut f64,
1514    config: JsValue,
1515) -> Result<(), JsValue> {
1516    if out_ptr.is_null() {
1517        return Err(JsValue::from_str(
1518            "null pointer passed to leavitt_convolution_acceleration_batch_into",
1519        ));
1520    }
1521    let config: LeavittConvolutionAccelerationBatchConfig = serde_wasm_bindgen::from_value(config)
1522        .map_err(|e| JsValue::from_str(&format!("Invalid config: {e}")))?;
1523    if config.length_range.len() != 3 || config.norm_length_range.len() != 3 {
1524        return Err(JsValue::from_str(
1525            "Invalid config: each range must have exactly 3 elements [start, end, step]",
1526        ));
1527    }
1528    let sweep = LeavittConvolutionAccelerationBatchRange {
1529        length: (
1530            config.length_range[0],
1531            config.length_range[1],
1532            config.length_range[2],
1533        ),
1534        norm_length: (
1535            config.norm_length_range[0],
1536            config.norm_length_range[1],
1537            config.norm_length_range[2],
1538        ),
1539        use_norm_hyperbolic: config.use_norm_hyperbolic,
1540    };
1541    let combos = expand_grid_leavitt_convolution_acceleration(&sweep)
1542        .map_err(|e| JsValue::from_str(&e.to_string()))?;
1543    let rows = combos.len();
1544    let cols = data.len();
1545    let expected = rows
1546        .checked_mul(cols)
1547        .and_then(|x| x.checked_mul(2))
1548        .ok_or_else(|| JsValue::from_str("rows*cols overflow"))?;
1549    let out = unsafe { std::slice::from_raw_parts_mut(out_ptr, expected) };
1550    let (out_conv, out_signal) = out.split_at_mut(rows * cols);
1551    leavitt_convolution_acceleration_batch_inner_into(
1552        data,
1553        &sweep,
1554        Kernel::Scalar,
1555        false,
1556        out_conv,
1557        out_signal,
1558    )
1559    .map_err(|e| JsValue::from_str(&e.to_string()))
1560}
1561
1562#[cfg(test)]
1563mod tests {
1564    use super::*;
1565    use crate::indicators::dispatch::{
1566        compute_cpu_batch, IndicatorBatchRequest, IndicatorDataRef, IndicatorParamSet, ParamKV,
1567        ParamValue,
1568    };
1569
1570    fn sample_data(len: usize) -> Vec<f64> {
1571        let mut out = Vec::with_capacity(len);
1572        for i in 0..len {
1573            let x = i as f64;
1574            out.push(100.0 + x * 0.04 + (x * 0.13).sin() * 2.7 + (x * 0.07).cos() * 1.1);
1575        }
1576        out
1577    }
1578
1579    fn assert_close_nan(a: &[f64], b: &[f64]) {
1580        assert_eq!(a.len(), b.len());
1581        for (idx, (&lhs, &rhs)) in a.iter().zip(b.iter()).enumerate() {
1582            if lhs.is_nan() || rhs.is_nan() {
1583                assert!(
1584                    lhs.is_nan() && rhs.is_nan(),
1585                    "nan mismatch at {idx}: {lhs} vs {rhs}"
1586                );
1587            } else {
1588                assert!(
1589                    (lhs - rhs).abs() <= 1e-10,
1590                    "value mismatch at {idx}: {lhs} vs {rhs}"
1591                );
1592            }
1593        }
1594    }
1595
1596    fn linreg_value(window: &[f64], offset: isize) -> f64 {
1597        let n = window.len();
1598        if n == 1 {
1599            return window[0];
1600        }
1601        let nf = n as f64;
1602        let mean_x = (n - 1) as f64 / 2.0;
1603        let mean_y = window.iter().sum::<f64>() / nf;
1604        let mut num = 0.0;
1605        let mut den = 0.0;
1606        for (i, &y) in window.iter().enumerate() {
1607            let x = i as f64;
1608            num += (x - mean_x) * (y - mean_y);
1609            den += (x - mean_x) * (x - mean_x);
1610        }
1611        let slope = if den != 0.0 { num / den } else { 0.0 };
1612        let intercept = mean_y - slope * mean_x;
1613        intercept + slope * ((n - 1) as isize - offset) as f64
1614    }
1615
1616    fn naive_expected(
1617        data: &[f64],
1618        length: usize,
1619        norm_length: usize,
1620        use_norm_hyperbolic: bool,
1621    ) -> (Vec<f64>, Vec<f64>) {
1622        let n = data.len();
1623        let sqrt_len = sqrt_length(length);
1624        let mut projection = vec![f64::NAN; n];
1625        let mut conv_slope = vec![f64::NAN; n];
1626        let mut conv_acceleration = vec![f64::NAN; n];
1627        let mut signal = vec![f64::NAN; n];
1628
1629        for i in 0..n {
1630            if i + 1 >= length {
1631                let start = i + 1 - length;
1632                projection[i] = linreg_value(&data[start..=i], -1);
1633            }
1634        }
1635
1636        for i in 0..n {
1637            if i + 1 >= sqrt_len {
1638                let start = i + 1 - sqrt_len;
1639                let window = &projection[start..=i];
1640                if window.iter().all(|v| v.is_finite()) {
1641                    let curr = linreg_value(window, 0);
1642                    let prev = linreg_value(window, 1);
1643                    conv_slope[i] = curr - prev;
1644                }
1645            }
1646        }
1647
1648        let mut prev_scaled = 0.0;
1649        let mut prev_conv = 0.0;
1650        let mut prev_slo = 0.0;
1651        for i in 0..n {
1652            if i + 1 >= norm_length {
1653                let start = i + 1 - norm_length;
1654                let window = &conv_slope[start..=i];
1655                if window.iter().all(|v| v.is_finite()) {
1656                    let mean = window.iter().sum::<f64>() / norm_length as f64;
1657                    let variance = window
1658                        .iter()
1659                        .map(|v| {
1660                            let d = *v - mean;
1661                            d * d
1662                        })
1663                        .sum::<f64>()
1664                        / norm_length as f64;
1665                    let dev = variance.sqrt();
1666                    let z = if dev != 0.0 {
1667                        (conv_slope[i] - mean) / dev
1668                    } else {
1669                        0.0
1670                    };
1671                    let scaled = if use_norm_hyperbolic {
1672                        let e = (-z).exp();
1673                        (1.0 - e) / (1.0 + e)
1674                    } else {
1675                        1.0 / (1.0 + (-z).exp())
1676                    };
1677                    let accel = scaled - prev_scaled;
1678                    conv_acceleration[i] = accel;
1679                    let slo = if use_norm_hyperbolic {
1680                        accel
1681                    } else {
1682                        accel - prev_conv
1683                    };
1684                    let src1 = if i >= 1 { data[i - 1] } else { 0.0 };
1685                    let src2 = if i >= 2 { data[i - 2] } else { 0.0 };
1686                    let is_accelerated = src2 - 2.0 * src1 + data[i] > 0.0;
1687                    signal[i] = if slo > 0.0 && is_accelerated {
1688                        if slo > prev_slo {
1689                            2.0
1690                        } else {
1691                            1.0
1692                        }
1693                    } else if slo < 0.0 && !is_accelerated {
1694                        if slo < prev_slo {
1695                            -2.0
1696                        } else {
1697                            -1.0
1698                        }
1699                    } else {
1700                        0.0
1701                    };
1702                    prev_scaled = scaled;
1703                    prev_conv = accel;
1704                    prev_slo = slo;
1705                }
1706            }
1707        }
1708
1709        (conv_acceleration, signal)
1710    }
1711
1712    #[test]
1713    fn leavitt_convolution_acceleration_matches_naive() {
1714        let data = sample_data(320);
1715        let input = LeavittConvolutionAccelerationInput::from_slice(
1716            &data,
1717            LeavittConvolutionAccelerationParams {
1718                length: Some(21),
1719                norm_length: Some(34),
1720                use_norm_hyperbolic: Some(true),
1721            },
1722        );
1723        let out = leavitt_convolution_acceleration(&input).expect("indicator");
1724        let (expected_conv, expected_signal) = naive_expected(&data, 21, 34, true);
1725        assert_close_nan(&out.conv_acceleration, &expected_conv);
1726        assert_close_nan(&out.signal, &expected_signal);
1727    }
1728
1729    #[test]
1730    fn leavitt_convolution_acceleration_into_matches_api() {
1731        let data = sample_data(240);
1732        let input = LeavittConvolutionAccelerationInput::from_slice(
1733            &data,
1734            LeavittConvolutionAccelerationParams {
1735                length: Some(14),
1736                norm_length: Some(28),
1737                use_norm_hyperbolic: Some(false),
1738            },
1739        );
1740        let baseline = leavitt_convolution_acceleration(&input).expect("baseline");
1741        let mut conv = vec![0.0; data.len()];
1742        let mut signal = vec![0.0; data.len()];
1743        leavitt_convolution_acceleration_into(&input, &mut conv, &mut signal).expect("into");
1744        assert_close_nan(&conv, &baseline.conv_acceleration);
1745        assert_close_nan(&signal, &baseline.signal);
1746    }
1747
1748    #[test]
1749    fn leavitt_convolution_acceleration_stream_matches_batch() {
1750        let data = sample_data(256);
1751        let params = LeavittConvolutionAccelerationParams {
1752            length: Some(20),
1753            norm_length: Some(25),
1754            use_norm_hyperbolic: Some(true),
1755        };
1756        let batch = leavitt_convolution_acceleration(
1757            &LeavittConvolutionAccelerationInput::from_slice(&data, params.clone()),
1758        )
1759        .expect("batch");
1760        let mut stream = LeavittConvolutionAccelerationStream::try_new(params).expect("stream");
1761        let mut conv = vec![f64::NAN; data.len()];
1762        let mut sig = vec![f64::NAN; data.len()];
1763        for (i, &value) in data.iter().enumerate() {
1764            if let Some((a, b)) = stream.update_reset_on_nan(value) {
1765                conv[i] = a;
1766                sig[i] = b;
1767            }
1768        }
1769        assert_close_nan(&conv, &batch.conv_acceleration);
1770        assert_close_nan(&sig, &batch.signal);
1771    }
1772
1773    #[test]
1774    fn leavitt_convolution_acceleration_batch_single_param_matches_single() {
1775        let data = sample_data(220);
1776        let batch = leavitt_convolution_acceleration_batch_with_kernel(
1777            &data,
1778            &LeavittConvolutionAccelerationBatchRange {
1779                length: (21, 21, 0),
1780                norm_length: (34, 34, 0),
1781                use_norm_hyperbolic: Some(false),
1782            },
1783            Kernel::ScalarBatch,
1784        )
1785        .expect("batch");
1786        let direct =
1787            leavitt_convolution_acceleration(&LeavittConvolutionAccelerationInput::from_slice(
1788                &data,
1789                LeavittConvolutionAccelerationParams {
1790                    length: Some(21),
1791                    norm_length: Some(34),
1792                    use_norm_hyperbolic: Some(false),
1793                },
1794            ))
1795            .expect("direct");
1796        assert_eq!(batch.rows, 1);
1797        assert_eq!(batch.cols, data.len());
1798        assert_close_nan(
1799            &batch.conv_acceleration[..data.len()],
1800            &direct.conv_acceleration,
1801        );
1802        assert_close_nan(&batch.signal[..data.len()], &direct.signal);
1803    }
1804
1805    #[test]
1806    fn leavitt_convolution_acceleration_rejects_invalid_norm_length() {
1807        let data = sample_data(32);
1808        let input = LeavittConvolutionAccelerationInput::from_slice(
1809            &data,
1810            LeavittConvolutionAccelerationParams {
1811                length: Some(10),
1812                norm_length: Some(0),
1813                use_norm_hyperbolic: Some(true),
1814            },
1815        );
1816        let err = leavitt_convolution_acceleration(&input).expect_err("invalid");
1817        assert!(matches!(
1818            err,
1819            LeavittConvolutionAccelerationError::InvalidNormLength { .. }
1820        ));
1821    }
1822
1823    #[test]
1824    fn leavitt_convolution_acceleration_dispatch_matches_direct() {
1825        let data = sample_data(280);
1826        let combo = [
1827            ParamKV {
1828                key: "length",
1829                value: ParamValue::Int(21),
1830            },
1831            ParamKV {
1832                key: "norm_length",
1833                value: ParamValue::Int(34),
1834            },
1835            ParamKV {
1836                key: "use_norm_hyperbolic",
1837                value: ParamValue::Bool(true),
1838            },
1839        ];
1840        let combos = [IndicatorParamSet { params: &combo }];
1841        let req = IndicatorBatchRequest {
1842            indicator_id: "leavitt_convolution_acceleration",
1843            output_id: Some("signal"),
1844            data: IndicatorDataRef::Slice { values: &data },
1845            combos: &combos,
1846            kernel: Kernel::ScalarBatch,
1847        };
1848        let out = compute_cpu_batch(req).expect("dispatch");
1849        let direct =
1850            leavitt_convolution_acceleration(&LeavittConvolutionAccelerationInput::from_slice(
1851                &data,
1852                LeavittConvolutionAccelerationParams {
1853                    length: Some(21),
1854                    norm_length: Some(34),
1855                    use_norm_hyperbolic: Some(true),
1856                },
1857            ))
1858            .expect("direct");
1859        assert_eq!(out.rows, 1);
1860        assert_eq!(out.cols, data.len());
1861        assert_close_nan(out.values_f64.as_ref().expect("values"), &direct.signal);
1862    }
1863}