Skip to main content

scirs2_core/
preprocessing.rs

1//! Data preprocessing utilities for scientific computing
2//!
3//! This module provides common data preprocessing operations used across
4//! the SciRS2 ecosystem, including scaling, encoding, imputation, and
5//! outlier detection.
6//!
7//! # Scalers
8//!
9//! - [`StandardScaler`] - Standardize features by removing the mean and scaling to unit variance
10//! - [`MinMaxScaler`] - Scale features to a given range (default [0, 1])
11//! - [`RobustScaler`] - Scale features using statistics robust to outliers (median, IQR)
12//! - [`MaxAbsScaler`] - Scale each feature by its maximum absolute value
13//!
14//! # Encoders
15//!
16//! - [`LabelEncoder`] - Encode string labels as integers
17//! - [`OneHotEncoder`] - Encode categorical features as one-hot numeric arrays
18//! - [`OrdinalEncoder`] - Encode categorical features as ordinal integers
19//!
20//! # Imputation
21//!
22//! - [`Imputer`] - Fill missing values using various strategies
23//!
24//! # Outlier Detection
25//!
26//! - [`OutlierDetector`] - Detect outliers using Z-score or IQR methods
27
28use crate::error::{CoreError, CoreResult, ErrorContext};
29use ::ndarray::{Array1, Array2, Axis};
30use num_traits::{Float, FromPrimitive, NumCast, Zero};
31use std::collections::HashMap;
32use std::fmt::{Debug, Display};
33use std::hash::Hash;
34
35// ---------------------------------------------------------------------------
36// StandardScaler
37// ---------------------------------------------------------------------------
38
39/// Standardize features by removing the mean and scaling to unit variance.
40///
41/// z = (x - mean) / std
42///
43/// # Example
44///
45/// ```
46/// use scirs2_core::preprocessing::StandardScaler;
47/// use ndarray::array;
48///
49/// let data = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]];
50/// let mut scaler = StandardScaler::<f64>::new();
51/// scaler.fit(&data).expect("fit failed");
52/// let transformed = scaler.transform(&data).expect("transform failed");
53/// ```
54#[derive(Debug, Clone)]
55pub struct StandardScaler<F: Float> {
56    /// Per-feature mean
57    pub mean: Option<Array1<F>>,
58    /// Per-feature standard deviation
59    pub std_dev: Option<Array1<F>>,
60    /// Whether to center the data (subtract mean)
61    pub with_mean: bool,
62    /// Whether to scale to unit variance
63    pub with_std: bool,
64}
65
66impl<F: Float + FromPrimitive + Debug + Display + std::iter::Sum> StandardScaler<F> {
67    /// Create a new StandardScaler with default settings
68    #[must_use]
69    pub fn new() -> Self {
70        Self {
71            mean: None,
72            std_dev: None,
73            with_mean: true,
74            with_std: true,
75        }
76    }
77
78    /// Create with explicit centering/scaling options
79    #[must_use]
80    pub fn with_options(with_mean: bool, with_std: bool) -> Self {
81        Self {
82            mean: None,
83            std_dev: None,
84            with_mean,
85            with_std,
86        }
87    }
88
89    /// Fit the scaler by computing mean and std from the data.
90    pub fn fit(&mut self, data: &Array2<F>) -> CoreResult<()> {
91        let n_samples = data.nrows();
92        if n_samples == 0 {
93            return Err(CoreError::ValueError(ErrorContext::new(
94                "Cannot fit StandardScaler on empty data",
95            )));
96        }
97        let n_f = F::from_usize(n_samples).ok_or_else(|| {
98            CoreError::ValueError(ErrorContext::new("Failed to convert n_samples to float"))
99        })?;
100        let n_cols = data.ncols();
101        let mut mean_arr = Array1::<F>::zeros(n_cols);
102        let mut std_arr = Array1::<F>::zeros(n_cols);
103
104        for j in 0..n_cols {
105            let col = data.column(j);
106            let sum: F = col.iter().copied().sum();
107            let m = sum / n_f;
108            mean_arr[j] = m;
109
110            let var_sum: F = col.iter().map(|&x| (x - m) * (x - m)).sum();
111            let var = var_sum / n_f;
112            std_arr[j] = var.sqrt();
113        }
114
115        self.mean = Some(mean_arr);
116        self.std_dev = Some(std_arr);
117        Ok(())
118    }
119
120    /// Transform the data using fitted parameters.
121    pub fn transform(&self, data: &Array2<F>) -> CoreResult<Array2<F>> {
122        let mean = self.mean.as_ref().ok_or_else(|| {
123            CoreError::InvalidState(ErrorContext::new("StandardScaler not fitted"))
124        })?;
125        let std_dev = self.std_dev.as_ref().ok_or_else(|| {
126            CoreError::InvalidState(ErrorContext::new("StandardScaler not fitted"))
127        })?;
128        if data.ncols() != mean.len() {
129            return Err(CoreError::DimensionError(ErrorContext::new(format!(
130                "Expected {} features, got {}",
131                mean.len(),
132                data.ncols()
133            ))));
134        }
135        let mut result = data.clone();
136        let eps = F::from_f64(1e-10).unwrap_or_else(F::epsilon);
137        for j in 0..data.ncols() {
138            for i in 0..data.nrows() {
139                let mut val = result[[i, j]];
140                if self.with_mean {
141                    val = val - mean[j];
142                }
143                if self.with_std {
144                    let s = if std_dev[j] < eps {
145                        F::one()
146                    } else {
147                        std_dev[j]
148                    };
149                    val = val / s;
150                }
151                result[[i, j]] = val;
152            }
153        }
154        Ok(result)
155    }
156
157    /// Fit and transform in one step.
158    pub fn fit_transform(&mut self, data: &Array2<F>) -> CoreResult<Array2<F>> {
159        self.fit(data)?;
160        self.transform(data)
161    }
162
163    /// Inverse-transform scaled data back to original scale.
164    pub fn inverse_transform(&self, data: &Array2<F>) -> CoreResult<Array2<F>> {
165        let mean = self.mean.as_ref().ok_or_else(|| {
166            CoreError::InvalidState(ErrorContext::new("StandardScaler not fitted"))
167        })?;
168        let std_dev = self.std_dev.as_ref().ok_or_else(|| {
169            CoreError::InvalidState(ErrorContext::new("StandardScaler not fitted"))
170        })?;
171        if data.ncols() != mean.len() {
172            return Err(CoreError::DimensionError(ErrorContext::new(format!(
173                "Expected {} features, got {}",
174                mean.len(),
175                data.ncols()
176            ))));
177        }
178        let mut result = data.clone();
179        let eps = F::from_f64(1e-10).unwrap_or_else(F::epsilon);
180        for j in 0..data.ncols() {
181            for i in 0..data.nrows() {
182                let mut val = result[[i, j]];
183                if self.with_std {
184                    let s = if std_dev[j] < eps {
185                        F::one()
186                    } else {
187                        std_dev[j]
188                    };
189                    val = val * s;
190                }
191                if self.with_mean {
192                    val = val + mean[j];
193                }
194                result[[i, j]] = val;
195            }
196        }
197        Ok(result)
198    }
199}
200
201impl<F: Float + FromPrimitive + Debug + Display + std::iter::Sum> Default for StandardScaler<F> {
202    fn default() -> Self {
203        Self::new()
204    }
205}
206
207// ---------------------------------------------------------------------------
208// MinMaxScaler
209// ---------------------------------------------------------------------------
210
211/// Scale features to a given range [feature_min, feature_max] (default [0, 1]).
212///
213/// X_scaled = (X - X_min) / (X_max - X_min) * (feature_max - feature_min) + feature_min
214///
215/// # Example
216///
217/// ```
218/// use scirs2_core::preprocessing::MinMaxScaler;
219/// use ndarray::array;
220///
221/// let data = array![[1.0, 10.0], [2.0, 20.0], [3.0, 30.0]];
222/// let mut scaler = MinMaxScaler::<f64>::new(0.0, 1.0);
223/// scaler.fit(&data).expect("fit failed");
224/// let scaled = scaler.transform(&data).expect("transform failed");
225/// ```
226#[derive(Debug, Clone)]
227pub struct MinMaxScaler<F: Float> {
228    /// Per-feature minimum from training data
229    pub data_min: Option<Array1<F>>,
230    /// Per-feature maximum from training data
231    pub data_max: Option<Array1<F>>,
232    /// Target range minimum
233    pub feature_min: F,
234    /// Target range maximum
235    pub feature_max: F,
236}
237
238impl<F: Float + FromPrimitive + Debug + Display> MinMaxScaler<F> {
239    /// Create a new MinMaxScaler with target range [feature_min, feature_max].
240    #[must_use]
241    pub fn new(feature_min: F, feature_max: F) -> Self {
242        Self {
243            data_min: None,
244            data_max: None,
245            feature_min,
246            feature_max,
247        }
248    }
249
250    /// Create a scaler mapping to [0, 1].
251    #[must_use]
252    pub fn unit_range() -> Self {
253        Self::new(F::zero(), F::one())
254    }
255
256    /// Fit the scaler from data.
257    pub fn fit(&mut self, data: &Array2<F>) -> CoreResult<()> {
258        if data.nrows() == 0 {
259            return Err(CoreError::ValueError(ErrorContext::new(
260                "Cannot fit MinMaxScaler on empty data",
261            )));
262        }
263        let n_cols = data.ncols();
264        let mut mins = Array1::<F>::zeros(n_cols);
265        let mut maxs = Array1::<F>::zeros(n_cols);
266        for j in 0..n_cols {
267            let col = data.column(j);
268            let mut col_min = F::infinity();
269            let mut col_max = F::neg_infinity();
270            for &v in col.iter() {
271                if v < col_min {
272                    col_min = v;
273                }
274                if v > col_max {
275                    col_max = v;
276                }
277            }
278            mins[j] = col_min;
279            maxs[j] = col_max;
280        }
281        self.data_min = Some(mins);
282        self.data_max = Some(maxs);
283        Ok(())
284    }
285
286    /// Transform data.
287    pub fn transform(&self, data: &Array2<F>) -> CoreResult<Array2<F>> {
288        let d_min = self
289            .data_min
290            .as_ref()
291            .ok_or_else(|| CoreError::InvalidState(ErrorContext::new("MinMaxScaler not fitted")))?;
292        let d_max = self
293            .data_max
294            .as_ref()
295            .ok_or_else(|| CoreError::InvalidState(ErrorContext::new("MinMaxScaler not fitted")))?;
296        if data.ncols() != d_min.len() {
297            return Err(CoreError::DimensionError(ErrorContext::new(format!(
298                "Expected {} features, got {}",
299                d_min.len(),
300                data.ncols()
301            ))));
302        }
303        let range = self.feature_max - self.feature_min;
304        let eps = F::from_f64(1e-10).unwrap_or_else(F::epsilon);
305        let mut result = data.clone();
306        for j in 0..data.ncols() {
307            let data_range = d_max[j] - d_min[j];
308            let scale = if data_range.abs() < eps {
309                F::zero()
310            } else {
311                range / data_range
312            };
313            for i in 0..data.nrows() {
314                result[[i, j]] = (result[[i, j]] - d_min[j]) * scale + self.feature_min;
315            }
316        }
317        Ok(result)
318    }
319
320    /// Fit and transform.
321    pub fn fit_transform(&mut self, data: &Array2<F>) -> CoreResult<Array2<F>> {
322        self.fit(data)?;
323        self.transform(data)
324    }
325
326    /// Inverse transform.
327    pub fn inverse_transform(&self, data: &Array2<F>) -> CoreResult<Array2<F>> {
328        let d_min = self
329            .data_min
330            .as_ref()
331            .ok_or_else(|| CoreError::InvalidState(ErrorContext::new("MinMaxScaler not fitted")))?;
332        let d_max = self
333            .data_max
334            .as_ref()
335            .ok_or_else(|| CoreError::InvalidState(ErrorContext::new("MinMaxScaler not fitted")))?;
336        if data.ncols() != d_min.len() {
337            return Err(CoreError::DimensionError(ErrorContext::new(format!(
338                "Expected {} features, got {}",
339                d_min.len(),
340                data.ncols()
341            ))));
342        }
343        let range = self.feature_max - self.feature_min;
344        let eps = F::from_f64(1e-10).unwrap_or_else(F::epsilon);
345        let mut result = data.clone();
346        for j in 0..data.ncols() {
347            let data_range = d_max[j] - d_min[j];
348            let scale = if range.abs() < eps {
349                F::zero()
350            } else {
351                data_range / range
352            };
353            for i in 0..data.nrows() {
354                result[[i, j]] = (result[[i, j]] - self.feature_min) * scale + d_min[j];
355            }
356        }
357        Ok(result)
358    }
359}
360
361// ---------------------------------------------------------------------------
362// RobustScaler
363// ---------------------------------------------------------------------------
364
365/// Scale features using statistics robust to outliers.
366///
367/// Uses the median and interquartile range (IQR = Q3 - Q1) so that
368/// outliers have less influence than StandardScaler.
369///
370/// X_scaled = (X - median) / IQR
371#[derive(Debug, Clone)]
372pub struct RobustScaler<F: Float> {
373    /// Per-feature median
374    pub median: Option<Array1<F>>,
375    /// Per-feature interquartile range
376    pub iqr: Option<Array1<F>>,
377    /// Whether to center the data
378    pub with_centering: bool,
379    /// Whether to scale the data
380    pub with_scaling: bool,
381}
382
383impl<F: Float + FromPrimitive + Debug + Display> RobustScaler<F> {
384    /// Create a new RobustScaler.
385    #[must_use]
386    pub fn new() -> Self {
387        Self {
388            median: None,
389            iqr: None,
390            with_centering: true,
391            with_scaling: true,
392        }
393    }
394
395    /// Create with explicit options.
396    #[must_use]
397    pub fn with_options(with_centering: bool, with_scaling: bool) -> Self {
398        Self {
399            median: None,
400            iqr: None,
401            with_centering,
402            with_scaling,
403        }
404    }
405
406    /// Fit the scaler.
407    pub fn fit(&mut self, data: &Array2<F>) -> CoreResult<()> {
408        if data.nrows() == 0 {
409            return Err(CoreError::ValueError(ErrorContext::new(
410                "Cannot fit RobustScaler on empty data",
411            )));
412        }
413        let n_cols = data.ncols();
414        let mut median_arr = Array1::<F>::zeros(n_cols);
415        let mut iqr_arr = Array1::<F>::zeros(n_cols);
416        for j in 0..n_cols {
417            let mut col_vals: Vec<F> = data.column(j).iter().copied().collect();
418            col_vals.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
419            let n = col_vals.len();
420            median_arr[j] = compute_quantile(&col_vals, F::from_f64(0.5).unwrap_or_else(F::zero));
421            let q1 = compute_quantile(&col_vals, F::from_f64(0.25).unwrap_or_else(F::zero));
422            let q3 = compute_quantile(&col_vals, F::from_f64(0.75).unwrap_or_else(F::zero));
423            iqr_arr[j] = q3 - q1;
424            let _ = n; // suppress unused warning
425        }
426        self.median = Some(median_arr);
427        self.iqr = Some(iqr_arr);
428        Ok(())
429    }
430
431    /// Transform data.
432    pub fn transform(&self, data: &Array2<F>) -> CoreResult<Array2<F>> {
433        let med = self
434            .median
435            .as_ref()
436            .ok_or_else(|| CoreError::InvalidState(ErrorContext::new("RobustScaler not fitted")))?;
437        let iqr = self
438            .iqr
439            .as_ref()
440            .ok_or_else(|| CoreError::InvalidState(ErrorContext::new("RobustScaler not fitted")))?;
441        if data.ncols() != med.len() {
442            return Err(CoreError::DimensionError(ErrorContext::new(format!(
443                "Expected {} features, got {}",
444                med.len(),
445                data.ncols()
446            ))));
447        }
448        let eps = F::from_f64(1e-10).unwrap_or_else(F::epsilon);
449        let mut result = data.clone();
450        for j in 0..data.ncols() {
451            for i in 0..data.nrows() {
452                let mut val = result[[i, j]];
453                if self.with_centering {
454                    val = val - med[j];
455                }
456                if self.with_scaling {
457                    let s = if iqr[j].abs() < eps { F::one() } else { iqr[j] };
458                    val = val / s;
459                }
460                result[[i, j]] = val;
461            }
462        }
463        Ok(result)
464    }
465
466    /// Fit and transform.
467    pub fn fit_transform(&mut self, data: &Array2<F>) -> CoreResult<Array2<F>> {
468        self.fit(data)?;
469        self.transform(data)
470    }
471}
472
473impl<F: Float + FromPrimitive + Debug + Display> Default for RobustScaler<F> {
474    fn default() -> Self {
475        Self::new()
476    }
477}
478
479// ---------------------------------------------------------------------------
480// MaxAbsScaler
481// ---------------------------------------------------------------------------
482
483/// Scale each feature by its maximum absolute value so values are in [-1, 1].
484///
485/// X_scaled = X / max(|X|)
486#[derive(Debug, Clone)]
487pub struct MaxAbsScaler<F: Float> {
488    /// Per-feature maximum absolute value
489    pub max_abs: Option<Array1<F>>,
490}
491
492impl<F: Float + FromPrimitive + Debug + Display> MaxAbsScaler<F> {
493    /// Create a new MaxAbsScaler.
494    #[must_use]
495    pub fn new() -> Self {
496        Self { max_abs: None }
497    }
498
499    /// Fit the scaler.
500    pub fn fit(&mut self, data: &Array2<F>) -> CoreResult<()> {
501        if data.nrows() == 0 {
502            return Err(CoreError::ValueError(ErrorContext::new(
503                "Cannot fit MaxAbsScaler on empty data",
504            )));
505        }
506        let n_cols = data.ncols();
507        let mut max_abs_arr = Array1::<F>::zeros(n_cols);
508        for j in 0..n_cols {
509            let mut ma = F::zero();
510            for &v in data.column(j).iter() {
511                let av = v.abs();
512                if av > ma {
513                    ma = av;
514                }
515            }
516            max_abs_arr[j] = ma;
517        }
518        self.max_abs = Some(max_abs_arr);
519        Ok(())
520    }
521
522    /// Transform data.
523    pub fn transform(&self, data: &Array2<F>) -> CoreResult<Array2<F>> {
524        let ma = self
525            .max_abs
526            .as_ref()
527            .ok_or_else(|| CoreError::InvalidState(ErrorContext::new("MaxAbsScaler not fitted")))?;
528        if data.ncols() != ma.len() {
529            return Err(CoreError::DimensionError(ErrorContext::new(format!(
530                "Expected {} features, got {}",
531                ma.len(),
532                data.ncols()
533            ))));
534        }
535        let eps = F::from_f64(1e-10).unwrap_or_else(F::epsilon);
536        let mut result = data.clone();
537        for j in 0..data.ncols() {
538            let s = if ma[j].abs() < eps { F::one() } else { ma[j] };
539            for i in 0..data.nrows() {
540                result[[i, j]] = result[[i, j]] / s;
541            }
542        }
543        Ok(result)
544    }
545
546    /// Fit and transform.
547    pub fn fit_transform(&mut self, data: &Array2<F>) -> CoreResult<Array2<F>> {
548        self.fit(data)?;
549        self.transform(data)
550    }
551
552    /// Inverse transform.
553    pub fn inverse_transform(&self, data: &Array2<F>) -> CoreResult<Array2<F>> {
554        let ma = self
555            .max_abs
556            .as_ref()
557            .ok_or_else(|| CoreError::InvalidState(ErrorContext::new("MaxAbsScaler not fitted")))?;
558        if data.ncols() != ma.len() {
559            return Err(CoreError::DimensionError(ErrorContext::new(format!(
560                "Expected {} features, got {}",
561                ma.len(),
562                data.ncols()
563            ))));
564        }
565        let eps = F::from_f64(1e-10).unwrap_or_else(F::epsilon);
566        let mut result = data.clone();
567        for j in 0..data.ncols() {
568            let s = if ma[j].abs() < eps { F::one() } else { ma[j] };
569            for i in 0..data.nrows() {
570                result[[i, j]] = result[[i, j]] * s;
571            }
572        }
573        Ok(result)
574    }
575}
576
577impl<F: Float + FromPrimitive + Debug + Display> Default for MaxAbsScaler<F> {
578    fn default() -> Self {
579        Self::new()
580    }
581}
582
583// ---------------------------------------------------------------------------
584// LabelEncoder
585// ---------------------------------------------------------------------------
586
587/// Encode string (or any hashable) labels as integer indices.
588///
589/// # Example
590///
591/// ```
592/// use scirs2_core::preprocessing::LabelEncoder;
593///
594/// let labels = vec!["cat", "dog", "cat", "bird"];
595/// let mut enc = LabelEncoder::new();
596/// enc.fit(&labels);
597/// let encoded = enc.transform(&labels).expect("transform failed");
598/// assert_eq!(encoded.len(), 4);
599/// ```
600#[derive(Debug, Clone)]
601pub struct LabelEncoder<L: Eq + Hash + Clone> {
602    /// Mapping from label to integer
603    pub label_to_int: HashMap<L, usize>,
604    /// Mapping from integer back to label
605    pub int_to_label: Vec<L>,
606}
607
608impl<L: Eq + Hash + Clone + Debug> LabelEncoder<L> {
609    /// Create a new empty LabelEncoder.
610    #[must_use]
611    pub fn new() -> Self {
612        Self {
613            label_to_int: HashMap::new(),
614            int_to_label: Vec::new(),
615        }
616    }
617
618    /// Fit the encoder by learning the unique labels.
619    /// Labels are assigned indices in the order they first appear.
620    pub fn fit(&mut self, labels: &[L]) {
621        self.label_to_int.clear();
622        self.int_to_label.clear();
623        for label in labels {
624            if !self.label_to_int.contains_key(label) {
625                let idx = self.int_to_label.len();
626                self.label_to_int.insert(label.clone(), idx);
627                self.int_to_label.push(label.clone());
628            }
629        }
630    }
631
632    /// Transform labels to integer indices.
633    pub fn transform(&self, labels: &[L]) -> CoreResult<Vec<usize>> {
634        if self.int_to_label.is_empty() {
635            return Err(CoreError::InvalidState(ErrorContext::new(
636                "LabelEncoder not fitted",
637            )));
638        }
639        let mut result = Vec::with_capacity(labels.len());
640        for label in labels {
641            let idx = self.label_to_int.get(label).ok_or_else(|| {
642                CoreError::ValueError(ErrorContext::new(format!(
643                    "Unknown label encountered: {:?}",
644                    label
645                )))
646            })?;
647            result.push(*idx);
648        }
649        Ok(result)
650    }
651
652    /// Inverse-transform integer indices back to labels.
653    pub fn inverse_transform(&self, indices: &[usize]) -> CoreResult<Vec<L>> {
654        let mut result = Vec::with_capacity(indices.len());
655        for &idx in indices {
656            if idx >= self.int_to_label.len() {
657                return Err(CoreError::IndexError(ErrorContext::new(format!(
658                    "Label index {} out of range (max {})",
659                    idx,
660                    self.int_to_label.len().saturating_sub(1)
661                ))));
662            }
663            result.push(self.int_to_label[idx].clone());
664        }
665        Ok(result)
666    }
667
668    /// Fit and transform in one step.
669    pub fn fit_transform(&mut self, labels: &[L]) -> Vec<usize> {
670        self.fit(labels);
671        // After fit, transform should always succeed for the training data
672        labels.iter().map(|l| self.label_to_int[l]).collect()
673    }
674
675    /// Return the number of unique classes.
676    #[must_use]
677    pub fn n_classes(&self) -> usize {
678        self.int_to_label.len()
679    }
680}
681
682impl<L: Eq + Hash + Clone + Debug> Default for LabelEncoder<L> {
683    fn default() -> Self {
684        Self::new()
685    }
686}
687
688// ---------------------------------------------------------------------------
689// OneHotEncoder
690// ---------------------------------------------------------------------------
691
692/// One-hot encode categorical features.
693///
694/// Each unique category for a feature becomes a binary column.
695///
696/// # Example
697///
698/// ```
699/// use scirs2_core::preprocessing::OneHotEncoder;
700///
701/// let data = vec![vec!["red", "small"], vec!["blue", "large"], vec!["red", "large"]];
702/// let mut enc = OneHotEncoder::new();
703/// enc.fit(&data);
704/// let encoded = enc.transform(&data).expect("transform failed");
705/// assert_eq!(encoded.ncols(), 4); // red, blue, small, large
706/// ```
707#[derive(Debug, Clone)]
708pub struct OneHotEncoder<L: Eq + Hash + Clone> {
709    /// Per-feature category encoders
710    pub encoders: Vec<LabelEncoder<L>>,
711    /// Number of features
712    pub n_features: usize,
713}
714
715impl<L: Eq + Hash + Clone + Debug> OneHotEncoder<L> {
716    /// Create a new OneHotEncoder.
717    #[must_use]
718    pub fn new() -> Self {
719        Self {
720            encoders: Vec::new(),
721            n_features: 0,
722        }
723    }
724
725    /// Fit from a 2D vector of labels (rows x features).
726    pub fn fit(&mut self, data: &[Vec<L>]) {
727        if data.is_empty() {
728            self.n_features = 0;
729            self.encoders.clear();
730            return;
731        }
732        self.n_features = data[0].len();
733        self.encoders.clear();
734        for j in 0..self.n_features {
735            let mut enc = LabelEncoder::new();
736            let col: Vec<L> = data.iter().map(|row| row[j].clone()).collect();
737            enc.fit(&col);
738            self.encoders.push(enc);
739        }
740    }
741
742    /// Transform data to a one-hot encoded `Array2<f64>`.
743    pub fn transform(&self, data: &[Vec<L>]) -> CoreResult<Array2<f64>> {
744        if self.encoders.is_empty() {
745            return Err(CoreError::InvalidState(ErrorContext::new(
746                "OneHotEncoder not fitted",
747            )));
748        }
749        let total_cols: usize = self.encoders.iter().map(|e| e.n_classes()).sum();
750        let n_rows = data.len();
751        let mut result = Array2::<f64>::zeros((n_rows, total_cols));
752        let mut col_offset = 0;
753        for (j, enc) in self.encoders.iter().enumerate() {
754            let col_labels: Vec<L> = data.iter().map(|row| row[j].clone()).collect();
755            let indices = enc.transform(&col_labels)?;
756            for (i, idx) in indices.into_iter().enumerate() {
757                result[[i, col_offset + idx]] = 1.0;
758            }
759            col_offset += enc.n_classes();
760        }
761        Ok(result)
762    }
763
764    /// Fit and transform.
765    pub fn fit_transform(&mut self, data: &[Vec<L>]) -> CoreResult<Array2<f64>> {
766        self.fit(data);
767        self.transform(data)
768    }
769
770    /// Total number of output columns.
771    #[must_use]
772    pub fn n_output_features(&self) -> usize {
773        self.encoders.iter().map(|e| e.n_classes()).sum()
774    }
775}
776
777impl<L: Eq + Hash + Clone + Debug> Default for OneHotEncoder<L> {
778    fn default() -> Self {
779        Self::new()
780    }
781}
782
783// ---------------------------------------------------------------------------
784// OrdinalEncoder
785// ---------------------------------------------------------------------------
786
787/// Encode categorical features as ordinal integers.
788///
789/// Each feature's categories are mapped to 0, 1, 2, ... in the order
790/// they first appear during fitting.
791#[derive(Debug, Clone)]
792pub struct OrdinalEncoder<L: Eq + Hash + Clone> {
793    /// Per-feature label encoders
794    pub encoders: Vec<LabelEncoder<L>>,
795    /// Number of features
796    pub n_features: usize,
797}
798
799impl<L: Eq + Hash + Clone + Debug> OrdinalEncoder<L> {
800    /// Create a new OrdinalEncoder.
801    #[must_use]
802    pub fn new() -> Self {
803        Self {
804            encoders: Vec::new(),
805            n_features: 0,
806        }
807    }
808
809    /// Fit from 2D data.
810    pub fn fit(&mut self, data: &[Vec<L>]) {
811        if data.is_empty() {
812            self.n_features = 0;
813            self.encoders.clear();
814            return;
815        }
816        self.n_features = data[0].len();
817        self.encoders.clear();
818        for j in 0..self.n_features {
819            let mut enc = LabelEncoder::new();
820            let col: Vec<L> = data.iter().map(|row| row[j].clone()).collect();
821            enc.fit(&col);
822            self.encoders.push(enc);
823        }
824    }
825
826    /// Transform to ordinal-encoded `Array2<usize>`.
827    pub fn transform(&self, data: &[Vec<L>]) -> CoreResult<Vec<Vec<usize>>> {
828        if self.encoders.is_empty() {
829            return Err(CoreError::InvalidState(ErrorContext::new(
830                "OrdinalEncoder not fitted",
831            )));
832        }
833        let n_rows = data.len();
834        let mut result = vec![vec![0usize; self.n_features]; n_rows];
835        for (j, enc) in self.encoders.iter().enumerate() {
836            let col_labels: Vec<L> = data.iter().map(|row| row[j].clone()).collect();
837            let indices = enc.transform(&col_labels)?;
838            for (i, idx) in indices.into_iter().enumerate() {
839                result[i][j] = idx;
840            }
841        }
842        Ok(result)
843    }
844
845    /// Inverse transform.
846    pub fn inverse_transform(&self, data: &[Vec<usize>]) -> CoreResult<Vec<Vec<L>>> {
847        if self.encoders.is_empty() {
848            return Err(CoreError::InvalidState(ErrorContext::new(
849                "OrdinalEncoder not fitted",
850            )));
851        }
852        let n_rows = data.len();
853        let mut result: Vec<Vec<L>> = Vec::with_capacity(n_rows);
854        for row in data {
855            let mut out_row = Vec::with_capacity(self.n_features);
856            for (j, enc) in self.encoders.iter().enumerate() {
857                let labels = enc.inverse_transform(&[row[j]])?;
858                out_row.push(labels.into_iter().next().ok_or_else(|| {
859                    CoreError::ValueError(ErrorContext::new("Empty inverse_transform result"))
860                })?);
861            }
862            result.push(out_row);
863        }
864        Ok(result)
865    }
866
867    /// Fit and transform.
868    pub fn fit_transform(&mut self, data: &[Vec<L>]) -> CoreResult<Vec<Vec<usize>>> {
869        self.fit(data);
870        self.transform(data)
871    }
872}
873
874impl<L: Eq + Hash + Clone + Debug> Default for OrdinalEncoder<L> {
875    fn default() -> Self {
876        Self::new()
877    }
878}
879
880// ---------------------------------------------------------------------------
881// Imputer
882// ---------------------------------------------------------------------------
883
884/// Strategy for imputing missing values.
885#[derive(Debug, Clone, Copy, PartialEq, Eq)]
886pub enum ImputeStrategy {
887    /// Replace with the column mean
888    Mean,
889    /// Replace with the column median
890    Median,
891    /// Replace with the column mode (most frequent value, discretized)
892    Mode,
893    /// Replace with a constant value
894    Constant,
895}
896
897/// Impute missing values in numeric data.
898///
899/// Missing values are represented as NaN. The imputer learns fill values
900/// during `fit` and applies them during `transform`.
901///
902/// # Example
903///
904/// ```
905/// use scirs2_core::preprocessing::{Imputer, ImputeStrategy};
906/// use ndarray::array;
907///
908/// let data = array![[1.0, f64::NAN], [3.0, 4.0], [5.0, 6.0]];
909/// let mut imp = Imputer::<f64>::new(ImputeStrategy::Mean, None);
910/// imp.fit(&data).expect("fit failed");
911/// let filled = imp.transform(&data).expect("transform failed");
912/// assert!(!filled[[0, 1]].is_nan());
913/// ```
914#[derive(Debug, Clone)]
915pub struct Imputer<F: Float> {
916    /// Imputation strategy
917    pub strategy: ImputeStrategy,
918    /// Fill values per feature (computed during fit)
919    pub fill_values: Option<Array1<F>>,
920    /// Constant fill value (used when strategy == Constant)
921    pub fill_constant: F,
922}
923
924impl<F: Float + FromPrimitive + Debug + Display + std::iter::Sum> Imputer<F> {
925    /// Create a new Imputer.
926    ///
927    /// `fill_constant` is only used when `strategy == ImputeStrategy::Constant`.
928    #[must_use]
929    pub fn new(strategy: ImputeStrategy, fill_constant: Option<F>) -> Self {
930        Self {
931            strategy,
932            fill_values: None,
933            fill_constant: fill_constant.unwrap_or_else(F::zero),
934        }
935    }
936
937    /// Fit the imputer.
938    pub fn fit(&mut self, data: &Array2<F>) -> CoreResult<()> {
939        if data.nrows() == 0 {
940            return Err(CoreError::ValueError(ErrorContext::new(
941                "Cannot fit Imputer on empty data",
942            )));
943        }
944        let n_cols = data.ncols();
945        let mut fill_vals = Array1::<F>::zeros(n_cols);
946        for j in 0..n_cols {
947            let col = data.column(j);
948            let valid: Vec<F> = col.iter().copied().filter(|v| !v.is_nan()).collect();
949            if valid.is_empty() {
950                fill_vals[j] = self.fill_constant;
951                continue;
952            }
953            match self.strategy {
954                ImputeStrategy::Mean => {
955                    let n = F::from_usize(valid.len()).unwrap_or_else(F::one);
956                    let s: F = valid.iter().copied().sum();
957                    fill_vals[j] = s / n;
958                }
959                ImputeStrategy::Median => {
960                    let mut sorted = valid.clone();
961                    sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
962                    fill_vals[j] =
963                        compute_quantile(&sorted, F::from_f64(0.5).unwrap_or_else(F::zero));
964                }
965                ImputeStrategy::Mode => {
966                    // Discretize to find mode: bucket by rounding to ~6 decimal places
967                    let factor = F::from_f64(1e6).unwrap_or_else(F::one);
968                    let mut counts: HashMap<i64, (usize, F)> = HashMap::new();
969                    for &v in &valid {
970                        let key = NumCast::from(v * factor)
971                            .map(|x: f64| x.round() as i64)
972                            .unwrap_or(0);
973                        let entry = counts.entry(key).or_insert((0, v));
974                        entry.0 += 1;
975                    }
976                    let mode_val = counts
977                        .values()
978                        .max_by_key(|(count, _)| *count)
979                        .map(|(_, v)| *v)
980                        .unwrap_or_else(F::zero);
981                    fill_vals[j] = mode_val;
982                }
983                ImputeStrategy::Constant => {
984                    fill_vals[j] = self.fill_constant;
985                }
986            }
987        }
988        self.fill_values = Some(fill_vals);
989        Ok(())
990    }
991
992    /// Transform data by replacing NaN with imputed values.
993    pub fn transform(&self, data: &Array2<F>) -> CoreResult<Array2<F>> {
994        let fill = self
995            .fill_values
996            .as_ref()
997            .ok_or_else(|| CoreError::InvalidState(ErrorContext::new("Imputer not fitted")))?;
998        if data.ncols() != fill.len() {
999            return Err(CoreError::DimensionError(ErrorContext::new(format!(
1000                "Expected {} features, got {}",
1001                fill.len(),
1002                data.ncols()
1003            ))));
1004        }
1005        let mut result = data.clone();
1006        for j in 0..data.ncols() {
1007            for i in 0..data.nrows() {
1008                if result[[i, j]].is_nan() {
1009                    result[[i, j]] = fill[j];
1010                }
1011            }
1012        }
1013        Ok(result)
1014    }
1015
1016    /// Fit and transform.
1017    pub fn fit_transform(&mut self, data: &Array2<F>) -> CoreResult<Array2<F>> {
1018        self.fit(data)?;
1019        self.transform(data)
1020    }
1021}
1022
1023// ---------------------------------------------------------------------------
1024// Outlier Detection
1025// ---------------------------------------------------------------------------
1026
1027/// Method used for outlier detection.
1028#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1029pub enum OutlierMethod {
1030    /// Z-score method: outliers have |z| > threshold (default 3.0)
1031    ZScore,
1032    /// IQR method: outliers are below Q1 - factor*IQR or above Q3 + factor*IQR (default factor 1.5)
1033    Iqr,
1034}
1035
1036/// Detect outliers in numeric data.
1037///
1038/// # Example
1039///
1040/// ```
1041/// use scirs2_core::preprocessing::{OutlierDetector, OutlierMethod};
1042/// use ndarray::array;
1043///
1044/// let data = array![[1.0], [2.0], [3.0], [100.0]];
1045/// let mut det = OutlierDetector::<f64>::new(OutlierMethod::ZScore, 2.0);
1046/// det.fit(&data).expect("fit failed");
1047/// let mask = det.detect(&data).expect("detect failed");
1048/// assert!(mask[3]); // 100.0 is an outlier
1049/// ```
1050#[derive(Debug, Clone)]
1051pub struct OutlierDetector<F: Float> {
1052    /// Detection method
1053    pub method: OutlierMethod,
1054    /// Threshold / factor parameter
1055    pub threshold: F,
1056    /// Fitted parameters for ZScore: (mean, std) per feature
1057    zscore_params: Option<Vec<(F, F)>>,
1058    /// Fitted parameters for IQR: (Q1, Q3, IQR) per feature
1059    iqr_params: Option<Vec<(F, F, F)>>,
1060}
1061
1062impl<F: Float + FromPrimitive + Debug + Display + std::iter::Sum> OutlierDetector<F> {
1063    /// Create a new OutlierDetector.
1064    ///
1065    /// For ZScore, `threshold` is the z-score threshold (e.g. 3.0).
1066    /// For IQR, `threshold` is the IQR factor (e.g. 1.5).
1067    #[must_use]
1068    pub fn new(method: OutlierMethod, threshold: F) -> Self {
1069        Self {
1070            method,
1071            threshold,
1072            zscore_params: None,
1073            iqr_params: None,
1074        }
1075    }
1076
1077    /// Fit the detector.
1078    pub fn fit(&mut self, data: &Array2<F>) -> CoreResult<()> {
1079        if data.nrows() == 0 {
1080            return Err(CoreError::ValueError(ErrorContext::new(
1081                "Cannot fit OutlierDetector on empty data",
1082            )));
1083        }
1084        let n_cols = data.ncols();
1085        match self.method {
1086            OutlierMethod::ZScore => {
1087                let mut params = Vec::with_capacity(n_cols);
1088                for j in 0..n_cols {
1089                    let col = data.column(j);
1090                    let n = F::from_usize(col.len()).unwrap_or_else(F::one);
1091                    let sum: F = col.iter().copied().sum();
1092                    let mean = sum / n;
1093                    let var_sum: F = col.iter().map(|&x| (x - mean) * (x - mean)).sum();
1094                    let std_dev = (var_sum / n).sqrt();
1095                    params.push((mean, std_dev));
1096                }
1097                self.zscore_params = Some(params);
1098            }
1099            OutlierMethod::Iqr => {
1100                let mut params = Vec::with_capacity(n_cols);
1101                for j in 0..n_cols {
1102                    let mut sorted: Vec<F> = data.column(j).iter().copied().collect();
1103                    sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
1104                    let q1 = compute_quantile(&sorted, F::from_f64(0.25).unwrap_or_else(F::zero));
1105                    let q3 = compute_quantile(&sorted, F::from_f64(0.75).unwrap_or_else(F::zero));
1106                    let iqr = q3 - q1;
1107                    params.push((q1, q3, iqr));
1108                }
1109                self.iqr_params = Some(params);
1110            }
1111        }
1112        Ok(())
1113    }
1114
1115    /// Detect outliers. Returns a boolean mask where `true` means outlier.
1116    ///
1117    /// A sample is considered an outlier if ANY of its features is an outlier.
1118    pub fn detect(&self, data: &Array2<F>) -> CoreResult<Vec<bool>> {
1119        let n_rows = data.nrows();
1120        let mut mask = vec![false; n_rows];
1121        let eps = F::from_f64(1e-10).unwrap_or_else(F::epsilon);
1122        match self.method {
1123            OutlierMethod::ZScore => {
1124                let params = self.zscore_params.as_ref().ok_or_else(|| {
1125                    CoreError::InvalidState(ErrorContext::new("OutlierDetector not fitted"))
1126                })?;
1127                for j in 0..data.ncols() {
1128                    let (mean, std_dev) = params[j];
1129                    let s = if std_dev.abs() < eps {
1130                        F::one()
1131                    } else {
1132                        std_dev
1133                    };
1134                    for i in 0..n_rows {
1135                        let z = (data[[i, j]] - mean) / s;
1136                        if z.abs() > self.threshold {
1137                            mask[i] = true;
1138                        }
1139                    }
1140                }
1141            }
1142            OutlierMethod::Iqr => {
1143                let params = self.iqr_params.as_ref().ok_or_else(|| {
1144                    CoreError::InvalidState(ErrorContext::new("OutlierDetector not fitted"))
1145                })?;
1146                for j in 0..data.ncols() {
1147                    let (q1, q3, iqr) = params[j];
1148                    let lower = q1 - self.threshold * iqr;
1149                    let upper = q3 + self.threshold * iqr;
1150                    for i in 0..n_rows {
1151                        let v = data[[i, j]];
1152                        if v < lower || v > upper {
1153                            mask[i] = true;
1154                        }
1155                    }
1156                }
1157            }
1158        }
1159        Ok(mask)
1160    }
1161
1162    /// Detect per-feature outlier masks. Returns `Array2<bool>` with same shape as data.
1163    pub fn detect_per_feature(&self, data: &Array2<F>) -> CoreResult<Array2<bool>> {
1164        let n_rows = data.nrows();
1165        let n_cols = data.ncols();
1166        let mut mask = Array2::<bool>::default((n_rows, n_cols));
1167        let eps = F::from_f64(1e-10).unwrap_or_else(F::epsilon);
1168        match self.method {
1169            OutlierMethod::ZScore => {
1170                let params = self.zscore_params.as_ref().ok_or_else(|| {
1171                    CoreError::InvalidState(ErrorContext::new("OutlierDetector not fitted"))
1172                })?;
1173                for j in 0..n_cols {
1174                    let (mean, std_dev) = params[j];
1175                    let s = if std_dev.abs() < eps {
1176                        F::one()
1177                    } else {
1178                        std_dev
1179                    };
1180                    for i in 0..n_rows {
1181                        let z = (data[[i, j]] - mean) / s;
1182                        mask[[i, j]] = z.abs() > self.threshold;
1183                    }
1184                }
1185            }
1186            OutlierMethod::Iqr => {
1187                let params = self.iqr_params.as_ref().ok_or_else(|| {
1188                    CoreError::InvalidState(ErrorContext::new("OutlierDetector not fitted"))
1189                })?;
1190                for j in 0..n_cols {
1191                    let (q1, q3, iqr) = params[j];
1192                    let lower = q1 - self.threshold * iqr;
1193                    let upper = q3 + self.threshold * iqr;
1194                    for i in 0..n_rows {
1195                        let v = data[[i, j]];
1196                        mask[[i, j]] = v < lower || v > upper;
1197                    }
1198                }
1199            }
1200        }
1201        Ok(mask)
1202    }
1203}
1204
1205// ---------------------------------------------------------------------------
1206// Helper: quantile computation
1207// ---------------------------------------------------------------------------
1208
1209/// Compute a quantile (0..1) from a sorted slice using linear interpolation.
1210fn compute_quantile<F: Float + FromPrimitive>(sorted: &[F], q: F) -> F {
1211    if sorted.is_empty() {
1212        return F::zero();
1213    }
1214    if sorted.len() == 1 {
1215        return sorted[0];
1216    }
1217    let n = sorted.len();
1218    let idx_f = q * F::from_usize(n - 1).unwrap_or_else(F::zero);
1219    let lower = NumCast::from(idx_f.floor()).unwrap_or(0usize);
1220    let upper = NumCast::from(idx_f.ceil()).unwrap_or(n - 1);
1221    let lower = lower.min(n - 1);
1222    let upper = upper.min(n - 1);
1223    if lower == upper {
1224        return sorted[lower];
1225    }
1226    let frac = idx_f - F::from_usize(lower).unwrap_or_else(F::zero);
1227    sorted[lower] * (F::one() - frac) + sorted[upper] * frac
1228}
1229
1230// ---------------------------------------------------------------------------
1231// Tests
1232// ---------------------------------------------------------------------------
1233
1234#[cfg(test)]
1235mod tests {
1236    use super::*;
1237    use ::ndarray::array;
1238
1239    const EPS: f64 = 1e-6;
1240
1241    #[test]
1242    fn test_standard_scaler_basic() {
1243        let data = array![[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]];
1244        let mut scaler = StandardScaler::<f64>::new();
1245        scaler.fit(&data).expect("fit");
1246        let transformed = scaler.transform(&data).expect("transform");
1247        // Mean of each column should be ~0
1248        for j in 0..2 {
1249            let col_mean: f64 = transformed.column(j).iter().sum::<f64>() / 3.0;
1250            assert!(col_mean.abs() < EPS, "col {} mean = {}", j, col_mean);
1251        }
1252    }
1253
1254    #[test]
1255    fn test_standard_scaler_inverse() {
1256        let data = array![[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]];
1257        let mut scaler = StandardScaler::<f64>::new();
1258        scaler.fit(&data).expect("fit");
1259        let t = scaler.transform(&data).expect("transform");
1260        let inv = scaler.inverse_transform(&t).expect("inverse");
1261        for i in 0..3 {
1262            for j in 0..2 {
1263                assert!(
1264                    (inv[[i, j]] - data[[i, j]]).abs() < EPS,
1265                    "mismatch at [{}, {}]",
1266                    i,
1267                    j
1268                );
1269            }
1270        }
1271    }
1272
1273    #[test]
1274    fn test_standard_scaler_empty_error() {
1275        let data = Array2::<f64>::zeros((0, 3));
1276        let mut scaler = StandardScaler::<f64>::new();
1277        assert!(scaler.fit(&data).is_err());
1278    }
1279
1280    #[test]
1281    fn test_minmax_scaler_basic() {
1282        let data = array![[1.0, 10.0], [2.0, 20.0], [3.0, 30.0]];
1283        let mut scaler = MinMaxScaler::<f64>::new(0.0, 1.0);
1284        scaler.fit(&data).expect("fit");
1285        let t = scaler.transform(&data).expect("transform");
1286        assert!((t[[0, 0]] - 0.0).abs() < EPS);
1287        assert!((t[[2, 0]] - 1.0).abs() < EPS);
1288        assert!((t[[0, 1]] - 0.0).abs() < EPS);
1289        assert!((t[[2, 1]] - 1.0).abs() < EPS);
1290    }
1291
1292    #[test]
1293    fn test_minmax_scaler_custom_range() {
1294        let data = array![[0.0], [5.0], [10.0]];
1295        let mut scaler = MinMaxScaler::<f64>::new(-1.0, 1.0);
1296        scaler.fit(&data).expect("fit");
1297        let t = scaler.transform(&data).expect("transform");
1298        assert!((t[[0, 0]] - (-1.0)).abs() < EPS);
1299        assert!((t[[1, 0]] - 0.0).abs() < EPS);
1300        assert!((t[[2, 0]] - 1.0).abs() < EPS);
1301    }
1302
1303    #[test]
1304    fn test_minmax_scaler_inverse() {
1305        let data = array![[2.0, 4.0], [6.0, 8.0]];
1306        let mut scaler = MinMaxScaler::<f64>::new(0.0, 1.0);
1307        scaler.fit(&data).expect("fit");
1308        let t = scaler.transform(&data).expect("transform");
1309        let inv = scaler.inverse_transform(&t).expect("inverse");
1310        for i in 0..2 {
1311            for j in 0..2 {
1312                assert!((inv[[i, j]] - data[[i, j]]).abs() < EPS);
1313            }
1314        }
1315    }
1316
1317    #[test]
1318    fn test_robust_scaler_basic() {
1319        let data = array![[1.0], [2.0], [3.0], [4.0], [100.0]];
1320        let mut scaler = RobustScaler::<f64>::new();
1321        scaler.fit(&data).expect("fit");
1322        let t = scaler.transform(&data).expect("transform");
1323        // The median is 3.0 so the third element should be 0
1324        assert!((t[[2, 0]]).abs() < EPS);
1325    }
1326
1327    #[test]
1328    fn test_max_abs_scaler_basic() {
1329        let data = array![[-3.0, 2.0], [1.0, -5.0]];
1330        let mut scaler = MaxAbsScaler::<f64>::new();
1331        scaler.fit(&data).expect("fit");
1332        let t = scaler.transform(&data).expect("transform");
1333        // Max abs of col0 = 3, col1 = 5
1334        assert!((t[[0, 0]] - (-1.0)).abs() < EPS);
1335        assert!((t[[1, 1]] - (-1.0)).abs() < EPS);
1336    }
1337
1338    #[test]
1339    fn test_max_abs_scaler_inverse() {
1340        let data = array![[4.0, -8.0], [-2.0, 6.0]];
1341        let mut scaler = MaxAbsScaler::<f64>::new();
1342        scaler.fit(&data).expect("fit");
1343        let t = scaler.transform(&data).expect("transform");
1344        let inv = scaler.inverse_transform(&t).expect("inverse");
1345        for i in 0..2 {
1346            for j in 0..2 {
1347                assert!((inv[[i, j]] - data[[i, j]]).abs() < EPS);
1348            }
1349        }
1350    }
1351
1352    #[test]
1353    fn test_label_encoder() {
1354        let labels = vec!["cat", "dog", "cat", "bird", "dog"];
1355        let mut enc = LabelEncoder::new();
1356        enc.fit(&labels);
1357        assert_eq!(enc.n_classes(), 3);
1358        let encoded = enc.transform(&labels).expect("transform");
1359        assert_eq!(encoded[0], encoded[2]); // cat == cat
1360        assert_eq!(encoded[1], encoded[4]); // dog == dog
1361        let decoded = enc.inverse_transform(&encoded).expect("inverse");
1362        assert_eq!(decoded, labels);
1363    }
1364
1365    #[test]
1366    fn test_label_encoder_unknown() {
1367        let labels = vec!["a", "b"];
1368        let mut enc = LabelEncoder::new();
1369        enc.fit(&labels);
1370        let result = enc.transform(&["c"]);
1371        assert!(result.is_err());
1372    }
1373
1374    #[test]
1375    fn test_one_hot_encoder() {
1376        let data = vec![
1377            vec!["red", "small"],
1378            vec!["blue", "large"],
1379            vec!["red", "large"],
1380        ];
1381        let mut enc = OneHotEncoder::new();
1382        enc.fit(&data);
1383        let encoded = enc.transform(&data).expect("transform");
1384        assert_eq!(encoded.nrows(), 3);
1385        assert_eq!(encoded.ncols(), 4); // 2 colors + 2 sizes
1386                                        // Each row should sum to 2 (one per feature)
1387        for i in 0..3 {
1388            let row_sum: f64 = encoded.row(i).iter().sum();
1389            assert!((row_sum - 2.0).abs() < EPS);
1390        }
1391    }
1392
1393    #[test]
1394    fn test_ordinal_encoder() {
1395        let data = vec![vec!["a", "x"], vec!["b", "y"], vec!["a", "y"]];
1396        let mut enc = OrdinalEncoder::new();
1397        let encoded = enc.fit_transform(&data).expect("transform");
1398        assert_eq!(encoded[0][0], encoded[2][0]); // a == a
1399        let decoded = enc.inverse_transform(&encoded).expect("inverse");
1400        assert_eq!(decoded, data);
1401    }
1402
1403    #[test]
1404    fn test_imputer_mean() {
1405        let data = array![[1.0, f64::NAN], [3.0, 4.0], [5.0, 6.0]];
1406        let mut imp = Imputer::<f64>::new(ImputeStrategy::Mean, None);
1407        imp.fit(&data).expect("fit");
1408        let filled = imp.transform(&data).expect("transform");
1409        assert!(!filled[[0, 1]].is_nan());
1410        // Mean of col1 valid values: (4+6)/2 = 5
1411        assert!((filled[[0, 1]] - 5.0).abs() < EPS);
1412    }
1413
1414    #[test]
1415    fn test_imputer_median() {
1416        let data = array![[f64::NAN, 1.0], [2.0, 3.0], [4.0, 5.0], [6.0, 7.0]];
1417        let mut imp = Imputer::<f64>::new(ImputeStrategy::Median, None);
1418        imp.fit(&data).expect("fit");
1419        let filled = imp.transform(&data).expect("transform");
1420        assert!(!filled[[0, 0]].is_nan());
1421        // Median of [2,4,6] = 4
1422        assert!((filled[[0, 0]] - 4.0).abs() < EPS);
1423    }
1424
1425    #[test]
1426    fn test_imputer_constant() {
1427        let data = array![[1.0, f64::NAN], [f64::NAN, 4.0]];
1428        let mut imp = Imputer::<f64>::new(ImputeStrategy::Constant, Some(-999.0));
1429        imp.fit(&data).expect("fit");
1430        let filled = imp.transform(&data).expect("transform");
1431        assert!((filled[[0, 1]] - (-999.0)).abs() < EPS);
1432        assert!((filled[[1, 0]] - (-999.0)).abs() < EPS);
1433    }
1434
1435    #[test]
1436    fn test_outlier_zscore() {
1437        // Use enough normal-range samples so the outlier has a clear z-score
1438        let data = array![
1439            [1.0],
1440            [2.0],
1441            [3.0],
1442            [2.0],
1443            [1.5],
1444            [2.5],
1445            [3.0],
1446            [2.0],
1447            [1.0],
1448            [2.0],
1449            [3.0],
1450            [2.5],
1451            [1.5],
1452            [2.0],
1453            [2.5],
1454            [100.0]
1455        ];
1456        let mut det = OutlierDetector::<f64>::new(OutlierMethod::ZScore, 2.0);
1457        det.fit(&data).expect("fit");
1458        let mask = det.detect(&data).expect("detect");
1459        // 100 should be an outlier (last element, index 15)
1460        assert!(mask[15]);
1461        // Normal values should not be outliers
1462        assert!(!mask[0]);
1463        assert!(!mask[1]);
1464    }
1465
1466    #[test]
1467    fn test_outlier_iqr() {
1468        let data = array![[1.0], [2.0], [3.0], [4.0], [5.0], [100.0]];
1469        let mut det = OutlierDetector::<f64>::new(OutlierMethod::Iqr, 1.5);
1470        det.fit(&data).expect("fit");
1471        let mask = det.detect(&data).expect("detect");
1472        assert!(mask[5]); // 100 is outlier
1473        assert!(!mask[0]);
1474    }
1475
1476    #[test]
1477    fn test_outlier_per_feature() {
1478        let data = array![[1.0, 10.0], [2.0, 20.0], [3.0, 100.0]];
1479        let mut det = OutlierDetector::<f64>::new(OutlierMethod::ZScore, 1.0);
1480        det.fit(&data).expect("fit");
1481        let mask = det.detect_per_feature(&data).expect("detect");
1482        assert_eq!(mask.nrows(), 3);
1483        assert_eq!(mask.ncols(), 2);
1484    }
1485
1486    #[test]
1487    fn test_standard_scaler_f32() {
1488        let data = array![[1.0f32, 2.0], [3.0, 4.0], [5.0, 6.0]];
1489        let mut scaler = StandardScaler::<f32>::new();
1490        scaler.fit(&data).expect("fit");
1491        let t = scaler.transform(&data).expect("transform");
1492        let col_mean: f32 = t.column(0).iter().sum::<f32>() / 3.0;
1493        assert!(col_mean.abs() < 1e-4);
1494    }
1495
1496    #[test]
1497    fn test_compute_quantile() {
1498        let sorted = vec![1.0f64, 2.0, 3.0, 4.0, 5.0];
1499        assert!((compute_quantile(&sorted, 0.0) - 1.0).abs() < EPS);
1500        assert!((compute_quantile(&sorted, 0.5) - 3.0).abs() < EPS);
1501        assert!((compute_quantile(&sorted, 1.0) - 5.0).abs() < EPS);
1502        assert!((compute_quantile(&sorted, 0.25) - 2.0).abs() < EPS);
1503    }
1504
1505    #[test]
1506    fn test_constant_feature_standard_scaler() {
1507        // All values the same => std=0, should not produce NaN
1508        let data = array![[5.0], [5.0], [5.0]];
1509        let mut scaler = StandardScaler::<f64>::new();
1510        scaler.fit(&data).expect("fit");
1511        let t = scaler.transform(&data).expect("transform");
1512        assert!(!t[[0, 0]].is_nan());
1513    }
1514
1515    #[test]
1516    fn test_constant_feature_minmax() {
1517        let data = array![[5.0], [5.0], [5.0]];
1518        let mut scaler = MinMaxScaler::<f64>::new(0.0, 1.0);
1519        scaler.fit(&data).expect("fit");
1520        let t = scaler.transform(&data).expect("transform");
1521        assert!(!t[[0, 0]].is_nan());
1522    }
1523
1524    #[test]
1525    fn test_fit_transform_shortcut() {
1526        let data = array![[1.0, 2.0], [3.0, 4.0]];
1527        let mut scaler = StandardScaler::<f64>::new();
1528        let t = scaler.fit_transform(&data).expect("fit_transform");
1529        assert_eq!(t.shape(), &[2, 2]);
1530    }
1531
1532    #[test]
1533    fn test_dimension_mismatch_error() {
1534        let train = array![[1.0, 2.0], [3.0, 4.0]];
1535        let test_data = array![[1.0, 2.0, 3.0]];
1536        let mut scaler = StandardScaler::<f64>::new();
1537        scaler.fit(&train).expect("fit");
1538        assert!(scaler.transform(&test_data).is_err());
1539    }
1540
1541    #[test]
1542    fn test_not_fitted_error() {
1543        let data = array![[1.0]];
1544        let scaler = StandardScaler::<f64>::new();
1545        assert!(scaler.transform(&data).is_err());
1546    }
1547
1548    #[test]
1549    fn test_label_encoder_fit_transform() {
1550        let labels = vec![10, 20, 30, 20, 10];
1551        let mut enc = LabelEncoder::new();
1552        let encoded = enc.fit_transform(&labels);
1553        assert_eq!(encoded[0], encoded[4]);
1554        assert_eq!(encoded[1], encoded[3]);
1555        assert_ne!(encoded[0], encoded[1]);
1556    }
1557
1558    #[test]
1559    fn test_imputer_mode() {
1560        let data = array![[1.0, f64::NAN], [2.0, 3.0], [2.0, 3.0], [3.0, 5.0]];
1561        let mut imp = Imputer::<f64>::new(ImputeStrategy::Mode, None);
1562        imp.fit(&data).expect("fit");
1563        let filled = imp.transform(&data).expect("transform");
1564        // Mode of col0 valid: 2 appears twice
1565        assert!(
1566            (filled[[0, 0]] - 1.0).abs() < EPS
1567                || (filled[[0, 0]] - 2.0).abs() < EPS
1568                || (filled[[0, 0]] - 3.0).abs() < EPS
1569        );
1570        // Mode of col1 valid: 3 appears twice
1571        assert!((filled[[0, 1]] - 3.0).abs() < EPS);
1572    }
1573}