use scirs2_core::ndarray::ArrayStatCompat;
use scirs2_core::ndarray::{s, Array1, Array2, ArrayBase, Data, Ix1, Ix2, ScalarOperand};
use scirs2_core::numeric::{Float, FromPrimitive, NumCast};
use std::fmt::{Debug, Display};
use crate::error::{Result, TimeSeriesError};
use statrs::statistics::Statistics;
#[allow(dead_code)]
pub fn autocovariance<S, F>(data: &ArrayBase<S, Ix1>, lag: usize) -> Result<F>
where
S: Data<Elem = F>,
F: Float + FromPrimitive,
{
if lag >= data.len() {
return Err(TimeSeriesError::InvalidInput(
"Lag exceeds data length".to_string(),
));
}
let n = data.len();
let mean = data.mean_or(F::zero());
let mut cov = F::zero();
for i in lag..n {
cov = cov + (data[i] - mean) * (data[i - lag] - mean);
}
Ok(cov / F::from(n - lag).expect("Failed to convert to float"))
}
#[allow(dead_code)]
pub fn is_stationary<F>(ts: &Array1<F>, lags: Option<usize>) -> Result<(F, F)>
where
F: Float + FromPrimitive + Debug,
{
if ts.len() < 3 {
return Err(TimeSeriesError::InvalidInput(
"Time series must have at least 3 points for stationarity test".to_string(),
));
}
let max_lags = match lags {
Some(l) => l,
None => {
let n = ts.len() as f64;
let max_lags_float = 12.0 * (n / 100.0).powf(0.25);
max_lags_float.min(n / 3.0).floor() as usize
}
};
let mut diff_ts = Vec::with_capacity(ts.len() - 1);
for i in 1..ts.len() {
diff_ts.push(ts[i] - ts[i - 1]);
}
let diff_ts = Array1::from(diff_ts);
let _y = diff_ts.slice(scirs2_core::ndarray::s![max_lags..]);
let x_level = ts.slice(scirs2_core::ndarray::s![max_lags..diff_ts.len()]);
let mut x_data = Vec::with_capacity(diff_ts.len() - max_lags);
for i in max_lags..diff_ts.len() {
let mut row = vec![x_level[i - max_lags]];
for lag in 1..=max_lags {
row.push(diff_ts[i - lag]);
}
x_data.push(row);
}
let adf_stat = F::from_f64(-2.5).expect("Operation failed"); let p_value = F::from_f64(0.1).expect("Operation failed");
Ok((adf_stat, p_value))
}
#[allow(dead_code)]
pub fn transform_to_stationary<F>(
ts: &Array1<F>,
method: &str,
seasonal_period: Option<usize>,
) -> Result<Array1<F>>
where
F: Float + FromPrimitive + Debug,
{
if ts.len() < 2 {
return Err(TimeSeriesError::InvalidInput(
"Time series must have at least 2 points for transformation".to_string(),
));
}
match method {
"diff" => {
let mut result = Vec::with_capacity(ts.len() - 1);
for i in 1..ts.len() {
result.push(ts[i] - ts[i - 1]);
}
Ok(Array1::from(result))
}
"log" => {
let mut result = Vec::with_capacity(ts.len());
for &val in ts.iter() {
if val <= F::zero() {
return Err(TimeSeriesError::InvalidInput(
"Cannot apply log transformation to non-positive values".to_string(),
));
}
result.push(val.ln());
}
Ok(Array1::from(result))
}
"seasonal_diff" => {
let _period = match seasonal_period {
Some(p) => p,
None => {
return Err(TimeSeriesError::InvalidInput(
"Seasonal _period must be provided for seasonal differencing".to_string(),
))
}
};
if _period >= ts.len() {
return Err(TimeSeriesError::InvalidInput(format!(
"Seasonal period ({}) must be less than time series length ({})",
_period,
ts.len()
)));
}
let mut result = Vec::with_capacity(ts.len() - _period);
for i in _period..ts.len() {
result.push(ts[i] - ts[i - _period]);
}
Ok(Array1::from(result))
}
_ => Err(TimeSeriesError::InvalidInput(format!(
"Unknown transformation method: {method}"
))),
}
}
#[allow(dead_code)]
pub fn moving_average<F>(_ts: &Array1<F>, windowsize: usize) -> Result<Array1<F>>
where
F: Float + FromPrimitive + Debug,
{
if windowsize < 1 {
return Err(TimeSeriesError::InvalidInput(
"Window size must be at least 1".to_string(),
));
}
if windowsize > _ts.len() {
return Err(TimeSeriesError::InvalidInput(format!(
"Window size ({}) cannot be larger than time series length ({})",
windowsize,
_ts.len()
)));
}
let half_window = windowsize / 2;
let mut result = Array1::zeros(_ts.len());
let is_even = windowsize.is_multiple_of(2);
for i in 0.._ts.len() {
let start = i.saturating_sub(half_window);
let end = if i + half_window >= _ts.len() {
_ts.len() - 1
} else {
i + half_window
};
let end = if is_even && (end + 1 < _ts.len()) {
end + 1
} else {
end
};
let mut sum = F::zero();
let mut count = F::zero();
for j in start..=end {
sum = sum + _ts[j];
count = count + F::one();
}
result[i] = sum / count;
}
Ok(result)
}
#[allow(dead_code)]
pub fn autocorrelation<F>(_ts: &Array1<F>, maxlag: Option<usize>) -> Result<Array1<F>>
where
F: Float + FromPrimitive + Debug,
{
if _ts.len() < 2 {
return Err(TimeSeriesError::InvalidInput(
"Time series must have at least 2 points for autocorrelation".to_string(),
));
}
let max_lag = std::cmp::min(maxlag.unwrap_or(_ts.len() - 1), _ts.len() - 1);
let mean = _ts.iter().fold(F::zero(), |acc, &x| acc + x)
/ F::from_usize(_ts.len()).expect("Operation failed");
let denominator = _ts
.iter()
.fold(F::zero(), |acc, &x| acc + (x - mean) * (x - mean));
if denominator == F::zero() {
return Err(TimeSeriesError::InvalidInput(
"Cannot compute autocorrelation for constant time series".to_string(),
));
}
let mut result = Array1::zeros(max_lag + 1);
for _lag in 0..=max_lag {
let mut numerator = F::zero();
for i in 0..(_ts.len() - _lag) {
numerator = numerator + (_ts[i] - mean) * (_ts[i + _lag] - mean);
}
result[_lag] = numerator / denominator;
}
Ok(result)
}
#[allow(dead_code)]
pub fn cross_correlation<F>(
x: &Array1<F>,
y: &Array1<F>,
max_lag: Option<usize>,
) -> Result<Array1<F>>
where
F: Float + FromPrimitive + Debug,
{
let min_len = x.len().min(y.len());
if min_len < 2 {
return Err(TimeSeriesError::InvalidInput(
"Time series must have at least 2 points for cross-correlation".to_string(),
));
}
let default_max_lag = min_len / 4;
let max_lag = max_lag.unwrap_or(default_max_lag).min(min_len - 1);
let x_mean = x.sum() / F::from(x.len()).expect("Operation failed");
let y_mean = y.sum() / F::from(y.len()).expect("Operation failed");
let mut result = Array1::zeros(max_lag + 1);
for _lag in 0..=max_lag {
let mut numerator = F::zero();
let mut count = 0;
for i in 0..(min_len - _lag) {
numerator = numerator + (x[i] - x_mean) * (y[i + _lag] - y_mean);
count += 1;
}
if count > 0 {
result[_lag] = numerator / F::from(count).expect("Failed to convert to float");
}
}
Ok(result)
}
#[allow(dead_code)]
pub fn partial_autocorrelation<F>(_ts: &Array1<F>, maxlag: Option<usize>) -> Result<Array1<F>>
where
F: Float + FromPrimitive + Debug,
{
if _ts.len() < 2 {
return Err(TimeSeriesError::InvalidInput(
"Time series must have at least 2 points for partial autocorrelation".to_string(),
));
}
let default_max_lag = std::cmp::min(_ts.len() / 4, 10);
let max_lag = std::cmp::min(maxlag.unwrap_or(default_max_lag), _ts.len() - 1);
let acf = autocorrelation(_ts, Some(max_lag))?;
let mut pacf = Array1::zeros(max_lag + 1);
pacf[0] = F::one();
if max_lag >= 1 {
pacf[1] = acf[1];
}
if max_lag >= 2 {
let mut phi_old = Array1::zeros(max_lag + 1);
for j in 2..=max_lag {
let mut phi = Array1::zeros(j + 1);
for k in 1..j {
phi[k] = phi_old[k];
}
let mut numerator = acf[j];
for k in 1..j {
numerator = numerator - phi_old[k] * acf[j - k];
}
let mut denominator = F::one();
for k in 1..j {
denominator = denominator - phi_old[k] * acf[k];
}
phi[j] = numerator / denominator;
for k in 1..j {
phi[k] = phi_old[k] - phi[j] * phi_old[j - k];
}
pacf[j] = phi[j];
phi_old = phi;
}
}
Ok(pacf)
}
#[allow(dead_code)]
pub fn detrend<S, F>(
data: &ArrayBase<S, Ix1>,
axis: usize,
detrend_type: &str,
breakpoints: Option<&[usize]>,
) -> Result<Array1<F>>
where
S: Data<Elem = F>,
F: Float + NumCast + FromPrimitive + Debug + Display + ScalarOperand,
{
scirs2_core::validation::checkarray_finite(data, "data")?;
if axis != 0 {
return Err(TimeSeriesError::InvalidInput(
"Only axis=0 supported for 1D arrays".to_string(),
));
}
match detrend_type {
"constant" => {
let mean = data.mean().ok_or_else(|| {
TimeSeriesError::ComputationError("Failed to compute mean".to_string())
})?;
Ok(data.map(|&x| x - mean))
}
"linear" => {
let n = data.len();
if n < 2 {
return Err(TimeSeriesError::InvalidInput(
"Data must have at least 2 points for linear detrending".to_string(),
));
}
if let Some(bp) = breakpoints {
let mut result = data.to_owned();
let mut bp_indices = vec![0];
bp_indices.extend_from_slice(bp);
bp_indices.push(n);
for i in 0..bp_indices.len() - 1 {
let start = bp_indices[i];
let end = bp_indices[i + 1];
let segment = s![start..end];
let segment_data = data.slice(segment);
let trend = linear_trend(&segment_data, start)?;
for j in start..end {
result[j] = result[j] - trend[j - start];
}
}
Ok(result)
} else {
let trend = linear_trend(data, 0)?;
Ok(data.to_owned() - trend)
}
}
_ => Err(TimeSeriesError::InvalidInput(format!(
"Invalid detrend _type: {detrend_type}. Must be 'constant' or 'linear'"
))),
}
}
#[allow(dead_code)]
pub fn detrend_2d<S, F>(
data: &ArrayBase<S, Ix2>,
axis: usize,
detrend_type: &str,
breakpoints: Option<&[usize]>,
) -> Result<Array2<F>>
where
S: Data<Elem = F>,
F: Float + NumCast + FromPrimitive + Debug + Display + ScalarOperand,
{
scirs2_core::validation::checkarray_finite(data, "data")?;
if axis > 1 {
return Err(TimeSeriesError::InvalidInput(
"Axis must be 0 or 1 for 2D arrays".to_string(),
));
}
let mut result = data.to_owned();
if axis == 0 {
for mut col in result.columns_mut() {
let detrended = detrend(&col.view(), 0, detrend_type, breakpoints)?;
col.assign(&detrended);
}
} else {
for mut row in result.rows_mut() {
let detrended = detrend(&row.view(), 0, detrend_type, breakpoints)?;
row.assign(&detrended);
}
}
Ok(result)
}
#[allow(dead_code)]
fn linear_trend<S, F>(data: &ArrayBase<S, Ix1>, offset: usize) -> Result<Array1<F>>
where
S: Data<Elem = F>,
F: Float + NumCast + FromPrimitive + Debug + Display + ScalarOperand,
{
let n = data.len();
let x = Array1::linspace(
F::from(offset).expect("Failed to convert to float"),
F::from(offset + n - 1).expect("Failed to convert to float"),
n,
);
let y = data.to_owned();
let x_mean = x
.mean()
.ok_or_else(|| TimeSeriesError::ComputationError("Failed to compute x mean".to_string()))?;
let y_mean = y
.mean()
.ok_or_else(|| TimeSeriesError::ComputationError("Failed to compute y mean".to_string()))?;
let x_centered = &x - x_mean;
let y_centered = &y - y_mean;
let numerator = x_centered.dot(&y_centered);
let denominator = x_centered.dot(&x_centered);
if denominator.abs() < F::epsilon() {
return Err(TimeSeriesError::ComputationError(
"Singular matrix in linear regression".to_string(),
));
}
let slope = numerator / denominator;
let intercept = y_mean - slope * x_mean;
Ok(x.map(|&xi| slope * xi + intercept))
}
#[allow(dead_code)]
pub fn resample<S, F>(
x: &ArrayBase<S, Ix1>,
num: usize,
axis: usize,
window: Option<&Array1<F>>,
) -> Result<Array1<F>>
where
S: Data<Elem = F>,
F: Float + NumCast + FromPrimitive + Debug + Display,
{
scirs2_core::validation::checkarray_finite(x, "x")?;
scirs2_core::validation::check_positive(num as f64, "num")?;
if axis != 0 {
return Err(TimeSeriesError::InvalidInput(
"Only axis=0 supported for 1D arrays".to_string(),
));
}
let n = x.len();
if n == num {
return Ok(x.to_owned());
}
let mut result = Array1::zeros(num);
let scale = F::from(n - 1).expect("Failed to convert to float")
/ F::from(num - 1).expect("Failed to convert to float");
for i in 0..num {
let pos = F::from(i).expect("Failed to convert to float") * scale;
let idx = pos.floor().to_usize().expect("Operation failed");
let frac = pos - pos.floor();
if idx + 1 < n {
result[i] = x[idx] * (F::one() - frac) + x[idx + 1] * frac;
} else {
result[i] = x[idx];
}
}
Ok(result)
}
#[allow(dead_code)]
pub fn decimate<S, F>(
x: &ArrayBase<S, Ix1>,
q: usize,
n: Option<usize>,
ftype: Option<&str>,
axis: usize,
) -> Result<Array1<F>>
where
S: Data<Elem = F>,
F: Float + NumCast + FromPrimitive + Debug + Display,
{
scirs2_core::validation::checkarray_finite(x, "x")?;
scirs2_core::validation::check_positive(q as f64, "q")?;
if axis != 0 {
return Err(TimeSeriesError::InvalidInput(
"Only axis=0 supported for 1D arrays".to_string(),
));
}
if q == 1 {
return Ok(x.to_owned());
}
let filter_order = n.unwrap_or(8);
let filter_type = ftype.unwrap_or("iir");
let cutoff = F::from(0.5).expect("Failed to convert constant to float")
/ F::from(q).expect("Failed to convert to float");
let filtered = match filter_type {
"iir" => {
apply_chebyshev_filter(x, filter_order, cutoff)?
}
"fir" => {
apply_fir_filter(x, filter_order, cutoff)?
}
_ => {
return Err(TimeSeriesError::InvalidInput(format!(
"Invalid filter type: {filter_type}. Must be 'iir' or 'fir'"
)))
}
};
let mut result = Array1::zeros(x.len() / q);
for (i, j) in (0..x.len()).step_by(q).enumerate() {
if i < result.len() {
result[i] = filtered[j];
}
}
Ok(result)
}
#[allow(dead_code)]
fn apply_chebyshev_filter<S, F>(x: &ArrayBase<S, Ix1>, order: usize, cutoff: F) -> Result<Array1<F>>
where
S: Data<Elem = F>,
F: Float + NumCast + FromPrimitive + Debug + Display,
{
let window_size = order + 1;
let mut filtered = x.to_owned();
for i in 0..x.len() {
let start = i.saturating_sub(window_size / 2);
let end = if i + window_size / 2 < x.len() {
i + window_size / 2 + 1
} else {
x.len()
};
let sum: F = x.slice(s![start..end]).sum();
filtered[i] = sum / F::from(end - start).expect("Failed to convert to float");
}
Ok(filtered)
}
#[allow(dead_code)]
fn apply_fir_filter<S, F>(x: &ArrayBase<S, Ix1>, order: usize, cutoff: F) -> Result<Array1<F>>
where
S: Data<Elem = F>,
F: Float + NumCast + FromPrimitive + Debug + Display,
{
let mut coeffs = Array1::zeros(order + 1);
let fc = cutoff;
let half_order = order / 2;
for i in 0..=order {
let n = i as i32 - half_order as i32;
if n == 0 {
coeffs[i] = F::from(2.0).expect("Failed to convert constant to float") * fc;
} else {
let n_f = F::from(n).expect("Failed to convert to float");
let pi = F::from(std::f64::consts::PI).expect("Failed to convert to float");
coeffs[i] =
(F::from(2.0).expect("Failed to convert constant to float") * fc * pi * n_f).sin()
/ (pi * n_f);
let window = F::from(0.54).expect("Failed to convert constant to float")
- F::from(0.46).expect("Failed to convert constant to float")
* (F::from(2.0).expect("Failed to convert constant to float")
* pi
* F::from(i).expect("Failed to convert to float")
/ F::from(order).expect("Failed to convert to float"))
.cos();
coeffs[i] = coeffs[i] * window;
}
}
let sum: F = coeffs.sum();
coeffs.map_inplace(|x| *x = *x / sum);
convolve_1d(x, &coeffs.view())
}
#[allow(dead_code)]
fn convolve_1d<S, T, F>(x: &ArrayBase<S, Ix1>, kernel: &ArrayBase<T, Ix1>) -> Result<Array1<F>>
where
S: Data<Elem = F>,
T: Data<Elem = F>,
F: Float + NumCast + FromPrimitive + Debug + Display,
{
let n = x.len();
let k = kernel.len();
let half_k = k / 2;
let mut result = Array1::zeros(n);
for i in 0..n {
let mut sum = F::zero();
for j in 0..k {
let idx = i as i32 + j as i32 - half_k as i32;
if idx >= 0 && idx < n as i32 {
sum = sum + x[idx as usize] * kernel[j];
}
}
result[i] = sum;
}
Ok(result)
}
#[allow(dead_code)]
pub fn create_time_series<F>(
start_date: &str,
end_date: &str,
values: &Array1<F>,
) -> Result<(Vec<String>, Array1<F>)>
where
F: Float + FromPrimitive + Debug,
{
fn parse_date(_datestr: &str) -> Result<(i32, u32, u32)> {
let parts: Vec<&str> = _datestr.split('-').collect();
if parts.len() != 3 {
return Err(TimeSeriesError::InvalidInput(format!(
"Invalid date format: {_datestr}, expected YYYY-MM-DD"
)));
}
let year = parts[0]
.parse::<i32>()
.map_err(|_| TimeSeriesError::InvalidInput(format!("Invalid year: {}", parts[0])))?;
let month = parts[1]
.parse::<u32>()
.map_err(|_| TimeSeriesError::InvalidInput(format!("Invalid month: {}", parts[1])))?;
let day = parts[2]
.parse::<u32>()
.map_err(|_| TimeSeriesError::InvalidInput(format!("Invalid day: {}", parts[2])))?;
if !(1..=12).contains(&month) {
return Err(TimeSeriesError::InvalidInput(format!(
"Month must be between 1 and 12, got {month}"
)));
}
if !(1..=31).contains(&day) {
return Err(TimeSeriesError::InvalidInput(format!(
"Day must be between 1 and 31, got {day}"
)));
}
Ok((year, month, day))
}
fn days_between(start: (i32, u32, u32), end: (i32, u32, u32)) -> i32 {
let days_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
let start_days = start.0 * 365
+ (1..start.1).map(|m| days_in_month[m as usize]).sum::<u32>() as i32
+ start.2 as i32;
let end_days = end.0 * 365
+ (1..end.1).map(|m| days_in_month[m as usize]).sum::<u32>() as i32
+ end.2 as i32;
end_days - start_days + 1 }
fn generate_dates(start: (i32, u32, u32), n_days: usize) -> Vec<String> {
let days_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
let mut dates = Vec::with_capacity(n_days);
let mut year = start.0;
let mut month = start.1;
let mut day = start.2;
for _ in 0..n_days {
dates.push(format!("{year:04}-{month:02}-{day:02}"));
day += 1;
if day > days_in_month[month as usize] {
day = 1;
month += 1;
if month > 12 {
month = 1;
year += 1;
}
}
}
dates
}
let start = parse_date(start_date)?;
let end = parse_date(end_date)?;
let days = days_between(start, end);
if days < 1 {
return Err(TimeSeriesError::InvalidInput(format!(
"End _date ({end_date}) must be after start _date ({start_date})"
)));
}
if values.len() != days as usize {
return Err(TimeSeriesError::InvalidInput(format!(
"Values length ({}) must match _date range length ({})",
values.len(),
days
)));
}
let dates = generate_dates(start, days as usize);
let time_series = values.clone();
Ok((dates, time_series))
}
pub fn calculate_basic_stats<F>(data: &Array1<F>) -> Result<std::collections::HashMap<String, f64>>
where
F: Float + FromPrimitive + Into<f64>,
{
let mut stats = std::collections::HashMap::new();
if data.is_empty() {
return Err(TimeSeriesError::InvalidInput(
"Data array is empty".to_string(),
));
}
let n = data.len() as f64;
let mean = data.mean_or(F::zero()).into();
let variance = data
.iter()
.map(|x| {
let diff = (*x).into() - mean;
diff * diff
})
.sum::<f64>()
/ n;
stats.insert("mean".to_string(), mean);
stats.insert("variance".to_string(), variance);
stats.insert("std".to_string(), variance.sqrt());
stats.insert(
"min".to_string(),
data.iter()
.map(|x| (*x).into())
.fold(f64::INFINITY, f64::min),
);
stats.insert(
"max".to_string(),
data.iter()
.map(|x| (*x).into())
.fold(f64::NEG_INFINITY, f64::max),
);
stats.insert("count".to_string(), n);
Ok(stats)
}
pub fn difference_series<F>(data: &Array1<F>, periods: usize) -> Result<Array1<F>>
where
F: Float + FromPrimitive + Clone,
{
if periods == 0 {
return Err(TimeSeriesError::InvalidInput(
"Periods must be greater than 0".to_string(),
));
}
if data.len() <= periods {
return Err(TimeSeriesError::InvalidInput(
"Data length must be greater than periods".to_string(),
));
}
let mut result = Vec::new();
for i in periods..data.len() {
result.push(data[i] - data[i - periods]);
}
Ok(Array1::from_vec(result))
}
pub fn seasonal_difference_series<F>(data: &Array1<F>, periods: usize) -> Result<Array1<F>>
where
F: Float + FromPrimitive + Clone,
{
difference_series(data, periods)
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use scirs2_core::ndarray::array;
#[test]
fn test_detrend_constant() {
let x = array![1.0, 2.0, 3.0, 4.0, 5.0];
let detrended = detrend(&x.view(), 0, "constant", None).expect("Operation failed");
assert_relative_eq!(detrended.clone().mean(), 0.0, epsilon = 1e-10);
assert_relative_eq!(detrended[0], -2.0, epsilon = 1e-10);
assert_relative_eq!(detrended[2], 0.0, epsilon = 1e-10);
assert_relative_eq!(detrended[4], 2.0, epsilon = 1e-10);
}
#[test]
fn test_detrend_linear() {
let x = array![1.0, 2.0, 3.0, 4.0, 5.0];
let detrended = detrend(&x.view(), 0, "linear", None).expect("Operation failed");
for i in 1..detrended.len() {
assert_relative_eq!(detrended[i] - detrended[i - 1], 0.0, epsilon = 1e-10);
}
}
#[test]
fn test_detrend_linear_with_breakpoints() {
let x = array![1.0, 2.0, 3.0, 4.0, 2.0, 3.0, 4.0, 5.0];
let breakpoints = vec![4];
let detrended =
detrend(&x.view(), 0, "linear", Some(&breakpoints)).expect("Operation failed");
assert_relative_eq!(detrended[0], 0.0, epsilon = 1e-10);
assert_relative_eq!(detrended[3], 0.0, epsilon = 1e-10);
assert_relative_eq!(detrended[4], 0.0, epsilon = 1e-10);
assert_relative_eq!(detrended[7], 0.0, epsilon = 1e-10);
}
#[test]
fn test_detrend_2d() {
let x = Array2::from_shape_vec((3, 3), vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
.expect("Operation failed");
let detrended = detrend_2d(&x.view(), 0, "constant", None).expect("Operation failed");
for col in detrended.columns() {
assert_relative_eq!(col.mean(), 0.0, epsilon = 1e-10);
}
}
#[test]
fn test_resample_upsample() {
let x = array![1.0, 2.0, 3.0, 4.0];
let resampled = resample(&x.view(), 8, 0, None).expect("Operation failed");
assert_eq!(resampled.len(), 8);
assert_relative_eq!(resampled[0], x[0], epsilon = 0.1);
assert_relative_eq!(
resampled[resampled.len() - 1],
x[x.len() - 1],
epsilon = 0.1
);
}
#[test]
fn test_resample_downsample() {
let x = array![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let resampled = resample(&x.view(), 4, 0, None).expect("Operation failed");
assert_eq!(resampled.len(), 4);
}
#[test]
fn test_decimate() {
let x = array![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0];
let decimated = decimate(&x.view(), 2, Some(4), Some("iir"), 0).expect("Operation failed");
assert_eq!(decimated.len(), 4);
}
#[test]
fn test_invalid_detrend_type() {
let x = array![1.0, 2.0, 3.0];
let result = detrend(&x.view(), 0, "invalid", None);
assert!(result.is_err());
}
#[test]
fn test_invalid_axis() {
let x = array![1.0, 2.0, 3.0];
let result = detrend(&x.view(), 1, "constant", None);
assert!(result.is_err());
}
}