use crate::error::WasmError;
use serde::{Deserialize, Serialize};
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
pub struct OnlineStats {
count: u64,
m1: f64,
m2: f64,
m3: f64,
min: f64,
max: f64,
}
#[wasm_bindgen]
impl OnlineStats {
#[wasm_bindgen(constructor)]
pub fn new() -> OnlineStats {
OnlineStats {
count: 0,
m1: 0.0,
m2: 0.0,
m3: 0.0,
min: f64::INFINITY,
max: f64::NEG_INFINITY,
}
}
pub fn update(&mut self, x: f64) {
self.count += 1;
let n = self.count as f64;
let delta = x - self.m1;
let delta_n = delta / n;
let delta_n2 = delta_n * delta_n;
let term1 = delta * delta_n * (n - 1.0);
self.m1 += delta_n;
self.m3 += term1 * delta_n2 * (n - 2.0) - 3.0 * delta_n * self.m2;
self.m2 += term1;
if x < self.min {
self.min = x;
}
if x > self.max {
self.max = x;
}
}
pub fn update_batch(&mut self, xs: &[f64]) {
for &x in xs {
self.update(x);
}
}
pub fn count(&self) -> u64 {
self.count
}
pub fn mean(&self) -> f64 {
if self.count == 0 {
f64::NAN
} else {
self.m1
}
}
pub fn variance(&self) -> f64 {
if self.count < 2 {
return if self.count == 0 { f64::NAN } else { 0.0 };
}
self.m2 / self.count as f64
}
pub fn sample_variance(&self) -> f64 {
if self.count < 2 {
return if self.count == 0 { f64::NAN } else { 0.0 };
}
self.m2 / (self.count - 1) as f64
}
pub fn std_dev(&self) -> f64 {
self.variance().sqrt()
}
pub fn skewness(&self) -> f64 {
if self.count < 3 {
return f64::NAN;
}
let n = self.count as f64;
let variance = self.m2 / n;
if variance == 0.0 {
return 0.0;
}
(self.m3 / n) / variance.powf(1.5)
}
pub fn min(&self) -> f64 {
self.min
}
pub fn max(&self) -> f64 {
self.max
}
pub fn reset(&mut self) {
self.count = 0;
self.m1 = 0.0;
self.m2 = 0.0;
self.m3 = 0.0;
self.min = f64::INFINITY;
self.max = f64::NEG_INFINITY;
}
pub fn snapshot_json(&self) -> Result<String, JsValue> {
let obj = serde_json::json!({
"count": self.count,
"mean": self.mean(),
"variance": self.variance(),
"sample_variance": self.sample_variance(),
"std_dev": self.std_dev(),
"skewness": self.skewness(),
"min": self.min(),
"max": self.max(),
});
serde_json::to_string(&obj)
.map_err(|e| WasmError::SerializationError(e.to_string()).into())
}
}
impl Default for OnlineStats {
fn default() -> Self {
Self::new()
}
}
#[wasm_bindgen]
pub struct RollingWindow {
buffer: Vec<f64>,
head: usize,
filled: usize,
capacity: usize,
sum: f64,
sum_sq: f64,
updates_since_recompute: u64,
recompute_interval: u64,
}
#[wasm_bindgen]
impl RollingWindow {
#[wasm_bindgen(constructor)]
pub fn new(capacity: usize) -> Result<RollingWindow, JsValue> {
if capacity == 0 {
return Err(
WasmError::InvalidParameter("RollingWindow: capacity must be > 0".to_string())
.into(),
);
}
Ok(RollingWindow {
buffer: vec![0.0; capacity],
head: 0,
filled: 0,
capacity,
sum: 0.0,
sum_sq: 0.0,
updates_since_recompute: 0,
recompute_interval: capacity as u64 * 10,
})
}
pub fn push(&mut self, x: f64) {
if self.filled == self.capacity {
let old = self.buffer[self.head];
self.sum -= old;
self.sum_sq -= old * old;
} else {
self.filled += 1;
}
self.buffer[self.head] = x;
self.head = (self.head + 1) % self.capacity;
self.sum += x;
self.sum_sq += x * x;
self.updates_since_recompute += 1;
if self.updates_since_recompute >= self.recompute_interval {
self.recompute_sums();
}
}
pub fn push_batch(&mut self, xs: &[f64]) {
for &x in xs {
self.push(x);
}
}
pub fn len(&self) -> usize {
self.filled
}
pub fn is_empty(&self) -> bool {
self.filled == 0
}
pub fn capacity(&self) -> usize {
self.capacity
}
pub fn mean(&self) -> f64 {
if self.filled == 0 {
return f64::NAN;
}
self.sum / self.filled as f64
}
pub fn variance(&self) -> f64 {
if self.filled < 2 {
return if self.filled == 0 { f64::NAN } else { 0.0 };
}
let n = self.filled as f64;
let mean = self.sum / n;
(self.sum_sq / n - mean * mean).max(0.0)
}
pub fn std_dev(&self) -> f64 {
self.variance().sqrt()
}
pub fn min(&self) -> f64 {
if self.filled == 0 {
return f64::NAN;
}
self.current_slice()
.iter()
.copied()
.fold(f64::INFINITY, f64::min)
}
pub fn max(&self) -> f64 {
if self.filled == 0 {
return f64::NAN;
}
self.current_slice()
.iter()
.copied()
.fold(f64::NEG_INFINITY, f64::max)
}
pub fn to_vec(&self) -> Vec<f64> {
self.current_slice().to_vec()
}
pub fn window_as_f32(&self) -> Vec<f32> {
self.current_slice().iter().map(|&v| v as f32).collect()
}
pub fn stats_json(&self) -> Result<String, JsValue> {
let obj = serde_json::json!({
"len": self.filled,
"capacity": self.capacity,
"mean": self.mean(),
"variance": self.variance(),
"std_dev": self.std_dev(),
"min": self.min(),
"max": self.max(),
});
serde_json::to_string(&obj)
.map_err(|e| WasmError::SerializationError(e.to_string()).into())
}
pub fn reset(&mut self) {
self.head = 0;
self.filled = 0;
self.sum = 0.0;
self.sum_sq = 0.0;
self.updates_since_recompute = 0;
for v in &mut self.buffer {
*v = 0.0;
}
}
}
impl RollingWindow {
fn current_slice(&self) -> Vec<f64> {
if self.filled < self.capacity {
self.buffer[..self.filled].to_vec()
} else {
let mut out = Vec::with_capacity(self.capacity);
out.extend_from_slice(&self.buffer[self.head..]);
out.extend_from_slice(&self.buffer[..self.head]);
out
}
}
fn recompute_sums(&mut self) {
let slice = self.current_slice();
self.sum = slice.iter().sum();
self.sum_sq = slice.iter().map(|&x| x * x).sum();
self.updates_since_recompute = 0;
}
}
#[wasm_bindgen]
pub struct StreamingFFT {
window_size: usize,
hop_size: usize,
input_buf: Vec<f64>,
buf_fill: usize,
output_frames: Vec<Vec<f32>>,
magnitude_only: bool,
window_fn: Vec<f64>,
}
#[wasm_bindgen]
impl StreamingFFT {
#[wasm_bindgen(constructor)]
pub fn new(
window_size: usize,
hop_size: usize,
magnitude_only: bool,
) -> Result<StreamingFFT, JsValue> {
if window_size < 2 {
return Err(WasmError::InvalidParameter(
"StreamingFFT: window_size must be ≥ 2".to_string(),
)
.into());
}
if hop_size == 0 || hop_size > window_size {
return Err(WasmError::InvalidParameter(format!(
"StreamingFFT: hop_size must be in [1, {}], got {}",
window_size, hop_size
))
.into());
}
let window_fn: Vec<f64> = (0..window_size)
.map(|i| {
0.5 * (1.0
- (2.0 * std::f64::consts::PI * i as f64 / (window_size - 1) as f64).cos())
})
.collect();
Ok(StreamingFFT {
window_size,
hop_size,
input_buf: vec![0.0; window_size],
buf_fill: 0,
output_frames: Vec::new(),
magnitude_only,
window_fn,
})
}
pub fn push_samples(&mut self, samples: &[f64]) {
for &s in samples {
if self.buf_fill < self.window_size {
self.input_buf[self.buf_fill] = s;
self.buf_fill += 1;
} else {
let hop = self.hop_size;
self.input_buf.copy_within(hop.., 0);
let new_fill = self.window_size - hop;
self.input_buf[new_fill] = s;
self.buf_fill = self.window_size; }
if self.buf_fill == self.window_size {
self.emit_frame();
}
}
}
pub fn has_frame(&self) -> bool {
!self.output_frames.is_empty()
}
pub fn pop_frame(&mut self) -> Vec<f32> {
if self.output_frames.is_empty() {
Vec::new()
} else {
self.output_frames.remove(0)
}
}
pub fn pending_frames(&self) -> usize {
self.output_frames.len()
}
pub fn window_size(&self) -> usize {
self.window_size
}
pub fn hop_size(&self) -> usize {
self.hop_size
}
pub fn flush(&mut self) {
if self.buf_fill > 0 {
for i in self.buf_fill..self.window_size {
self.input_buf[i] = 0.0;
}
self.buf_fill = self.window_size;
self.emit_frame();
self.buf_fill = 0;
}
}
pub fn reset(&mut self) {
self.buf_fill = 0;
self.output_frames.clear();
for v in &mut self.input_buf {
*v = 0.0;
}
}
}
impl StreamingFFT {
fn emit_frame(&mut self) {
let n = self.window_size;
let windowed: Vec<f64> = self.input_buf[..n]
.iter()
.zip(self.window_fn.iter())
.map(|(&s, &w)| s * w)
.collect();
let frame = if self.magnitude_only {
dft_magnitude(&windowed)
} else {
dft_interleaved(&windowed)
};
self.output_frames.push(frame);
}
}
fn dft_interleaved(x: &[f64]) -> Vec<f32> {
let n = x.len();
let mut out = vec![0.0_f32; n * 2];
let two_pi_over_n = 2.0 * std::f64::consts::PI / n as f64;
for k in 0..n {
let mut re = 0.0_f64;
let mut im = 0.0_f64;
for (j, &xj) in x.iter().enumerate() {
let angle = two_pi_over_n * k as f64 * j as f64;
re += xj * angle.cos();
im -= xj * angle.sin();
}
out[k * 2] = re as f32;
out[k * 2 + 1] = im as f32;
}
out
}
fn dft_magnitude(x: &[f64]) -> Vec<f32> {
let n = x.len();
let bins = n / 2 + 1;
let mut out = vec![0.0_f32; bins];
let two_pi_over_n = 2.0 * std::f64::consts::PI / n as f64;
for k in 0..bins {
let mut re = 0.0_f64;
let mut im = 0.0_f64;
for (j, &xj) in x.iter().enumerate() {
let angle = two_pi_over_n * k as f64 * j as f64;
re += xj * angle.cos();
im -= xj * angle.sin();
}
out[k] = (re * re + im * im).sqrt() as f32;
}
out
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BufferedProcessorConfig {
pub block_size: usize,
pub pad_last_block: bool,
pub downsample: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProcessedBlock {
pub block_index: u64,
pub input_count: usize,
pub mean: f64,
pub std_dev: f64,
pub rms: f64,
pub peak: f64,
pub output: Vec<f32>,
}
#[wasm_bindgen]
pub struct BufferedProcessor {
config: BufferedProcessorConfig,
accumulator: Vec<f64>,
acc_fill: usize,
output_queue: Vec<ProcessedBlock>,
block_index: u64,
}
#[wasm_bindgen]
impl BufferedProcessor {
#[wasm_bindgen(constructor)]
pub fn new(
block_size: usize,
downsample: usize,
pad_last_block: bool,
) -> Result<BufferedProcessor, JsValue> {
if block_size == 0 {
return Err(
WasmError::InvalidParameter("BufferedProcessor: block_size must be ≥ 1".to_string())
.into(),
);
}
if downsample == 0 {
return Err(
WasmError::InvalidParameter("BufferedProcessor: downsample must be ≥ 1".to_string())
.into(),
);
}
if block_size % downsample != 0 {
return Err(WasmError::InvalidParameter(format!(
"BufferedProcessor: block_size {} must be divisible by downsample {}",
block_size, downsample
))
.into());
}
Ok(BufferedProcessor {
config: BufferedProcessorConfig {
block_size,
pad_last_block,
downsample,
},
accumulator: vec![0.0; block_size],
acc_fill: 0,
output_queue: Vec::new(),
block_index: 0,
})
}
pub fn push(&mut self, samples: &[f64]) {
for &s in samples {
self.accumulator[self.acc_fill] = s;
self.acc_fill += 1;
if self.acc_fill == self.config.block_size {
let block = &self.accumulator[..self.config.block_size];
let processed = Self::process_block(block, self.block_index, self.config.downsample);
self.output_queue.push(processed);
self.block_index += 1;
self.acc_fill = 0;
}
}
}
pub fn flush(&mut self) {
if self.acc_fill == 0 {
return;
}
if !self.config.pad_last_block {
return;
}
for i in self.acc_fill..self.config.block_size {
self.accumulator[i] = 0.0;
}
let block = &self.accumulator[..self.config.block_size];
let processed = Self::process_block(block, self.block_index, self.config.downsample);
self.output_queue.push(processed);
self.block_index += 1;
self.acc_fill = 0;
}
pub fn has_output(&self) -> bool {
!self.output_queue.is_empty()
}
pub fn output_count(&self) -> usize {
self.output_queue.len()
}
pub fn pop_output_json(&mut self) -> Result<String, JsValue> {
if self.output_queue.is_empty() {
return Ok(String::new());
}
let block = self.output_queue.remove(0);
serde_json::to_string(&block)
.map_err(|e| WasmError::SerializationError(e.to_string()).into())
}
pub fn reset(&mut self) {
self.acc_fill = 0;
self.output_queue.clear();
self.block_index = 0;
for v in &mut self.accumulator {
*v = 0.0;
}
}
pub fn buffered_samples(&self) -> usize {
self.acc_fill
}
pub fn block_size(&self) -> usize {
self.config.block_size
}
pub fn downsample(&self) -> usize {
self.config.downsample
}
}
impl BufferedProcessor {
fn process_block(block: &[f64], block_index: u64, downsample: usize) -> ProcessedBlock {
let n = block.len() as f64;
let sum: f64 = block.iter().sum();
let mean = sum / n;
let mut var_acc = 0.0_f64;
let mut sum_sq = 0.0_f64;
let mut peak = 0.0_f64;
for &x in block {
let d = x - mean;
var_acc += d * d;
sum_sq += x * x;
let abs = x.abs();
if abs > peak {
peak = abs;
}
}
let std_dev = (var_acc / n).sqrt();
let rms = (sum_sq / n).sqrt();
let out_len = block.len() / downsample;
let mut output = Vec::with_capacity(out_len);
for chunk in block.chunks_exact(downsample) {
let avg: f64 = chunk.iter().sum::<f64>() / downsample as f64;
output.push(avg as f32);
}
ProcessedBlock {
block_index,
input_count: block.len(),
mean,
std_dev,
rms,
peak,
output,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_online_stats_basic() {
let mut s = OnlineStats::new();
assert!(s.mean().is_nan(), "empty mean should be NaN");
s.update(2.0);
s.update(4.0);
s.update(6.0);
assert!((s.mean() - 4.0).abs() < 1e-12, "mean = {}", s.mean());
assert_eq!(s.count(), 3);
assert!((s.min() - 2.0).abs() < 1e-12);
assert!((s.max() - 6.0).abs() < 1e-12);
}
#[test]
fn test_online_stats_variance() {
let mut s = OnlineStats::new();
for x in [2.0_f64, 4.0, 4.0, 4.0, 5.0, 5.0, 7.0, 9.0] {
s.update(x);
}
assert!((s.variance() - 4.0).abs() < 1e-10, "var = {}", s.variance());
}
#[test]
fn test_online_stats_batch() {
let mut s = OnlineStats::new();
s.update_batch(&[1.0, 2.0, 3.0, 4.0, 5.0]);
assert!((s.mean() - 3.0).abs() < 1e-12);
}
#[test]
fn test_online_stats_reset() {
let mut s = OnlineStats::new();
s.update_batch(&[1.0, 2.0, 3.0]);
s.reset();
assert_eq!(s.count(), 0);
assert!(s.mean().is_nan());
}
#[test]
fn test_online_stats_snapshot_json() {
let mut s = OnlineStats::new();
s.update_batch(&[1.0, 2.0, 3.0]);
let json = s.snapshot_json().expect("snapshot ok");
assert!(json.contains("mean"));
assert!(json.contains("variance"));
}
#[test]
fn test_rolling_window_basic() {
let mut rw = RollingWindow::new(3).expect("ok");
rw.push(1.0);
rw.push(2.0);
rw.push(3.0);
assert!((rw.mean() - 2.0).abs() < 1e-12, "mean = {}", rw.mean());
}
#[test]
fn test_rolling_window_eviction() {
let mut rw = RollingWindow::new(3).expect("ok");
rw.push(1.0);
rw.push(2.0);
rw.push(3.0);
rw.push(4.0); assert!((rw.mean() - 3.0).abs() < 1e-10, "mean = {}", rw.mean());
assert_eq!(rw.len(), 3);
}
#[test]
fn test_rolling_window_min_max() {
let mut rw = RollingWindow::new(4).expect("ok");
rw.push_batch(&[5.0, 2.0, 8.0, 3.0]);
assert!((rw.min() - 2.0).abs() < 1e-12);
assert!((rw.max() - 8.0).abs() < 1e-12);
}
#[test]
fn test_rolling_window_as_f32() {
let mut rw = RollingWindow::new(2).expect("ok");
rw.push(1.5);
rw.push(2.5);
let f32s = rw.window_as_f32();
assert_eq!(f32s.len(), 2);
assert!((f32s[0] - 1.5_f32).abs() < 1e-6);
}
#[test]
fn test_rolling_window_zero_capacity() {
assert!(RollingWindow::new(0).is_err());
}
#[test]
fn test_streaming_fft_no_frames_until_window_full() {
let mut sfft = StreamingFFT::new(8, 4, false).expect("ok");
sfft.push_samples(&[1.0, 2.0, 3.0]); assert!(!sfft.has_frame());
}
#[test]
fn test_streaming_fft_emits_frame() {
let mut sfft = StreamingFFT::new(8, 8, false).expect("ok");
let samples: Vec<f64> = (0..8).map(|i| i as f64).collect();
sfft.push_samples(&samples);
assert!(sfft.has_frame(), "should have one frame");
let frame = sfft.pop_frame();
assert_eq!(frame.len(), 16, "interleaved: 8 complex = 16 floats");
}
#[test]
fn test_streaming_fft_magnitude() {
let mut sfft = StreamingFFT::new(8, 8, true).expect("ok");
let samples: Vec<f64> = vec![1.0; 8];
sfft.push_samples(&samples);
assert!(sfft.has_frame());
let frame = sfft.pop_frame();
assert_eq!(frame.len(), 5, "magnitude bins = n/2+1 = 5");
assert!(frame[0] > 0.0, "DC bin must be positive");
}
#[test]
fn test_streaming_fft_flush() {
let mut sfft = StreamingFFT::new(8, 8, false).expect("ok");
sfft.push_samples(&[1.0, 2.0, 3.0]);
assert!(!sfft.has_frame());
sfft.flush();
assert!(sfft.has_frame(), "flush should emit padded frame");
}
#[test]
fn test_streaming_fft_invalid_args() {
assert!(StreamingFFT::new(0, 1, false).is_err());
assert!(StreamingFFT::new(8, 0, false).is_err());
assert!(StreamingFFT::new(8, 9, false).is_err());
}
#[test]
fn test_buffered_processor_basic() {
let mut proc = BufferedProcessor::new(4, 2, false).expect("ok");
proc.push(&[1.0, 2.0, 3.0, 4.0]);
assert!(proc.has_output());
let json = proc.pop_output_json().expect("pop ok");
let block: serde_json::Value = serde_json::from_str(&json).expect("parse ok");
let mean = block["mean"].as_f64().expect("mean field");
assert!((mean - 2.5).abs() < 1e-10);
let out_len = block["output"].as_array().expect("output array").len();
assert_eq!(out_len, 2);
}
#[test]
fn test_buffered_processor_flush_pad() {
let mut proc = BufferedProcessor::new(4, 1, true).expect("ok");
proc.push(&[1.0, 2.0]); assert!(!proc.has_output());
proc.flush();
assert!(proc.has_output(), "flush should emit padded block");
}
#[test]
fn test_buffered_processor_flush_no_pad() {
let mut proc = BufferedProcessor::new(4, 1, false).expect("ok");
proc.push(&[1.0, 2.0]);
proc.flush(); assert!(!proc.has_output());
}
#[test]
fn test_buffered_processor_rms() {
let mut proc = BufferedProcessor::new(4, 1, false).expect("ok");
proc.push(&[1.0, 1.0, 1.0, 1.0]);
let json = proc.pop_output_json().expect("ok");
let block: serde_json::Value = serde_json::from_str(&json).expect("ok");
let rms = block["rms"].as_f64().expect("rms");
assert!((rms - 1.0).abs() < 1e-10, "rms = {rms}");
}
#[test]
fn test_buffered_processor_invalid_args() {
assert!(BufferedProcessor::new(0, 1, false).is_err());
assert!(BufferedProcessor::new(4, 0, false).is_err());
assert!(BufferedProcessor::new(4, 3, false).is_err()); }
#[test]
fn test_buffered_processor_reset() {
let mut proc = BufferedProcessor::new(4, 1, false).expect("ok");
proc.push(&[1.0, 2.0, 3.0, 4.0]);
proc.reset();
assert!(!proc.has_output());
assert_eq!(proc.buffered_samples(), 0);
}
}