use crate::error::FdarError;
use crate::matrix::FdMatrix;
use crate::regression::fdata_to_pc_1d;
pub mod cv;
pub mod dd;
pub mod fit;
pub mod kernel;
pub mod knn;
pub mod lda;
pub mod qda;
#[cfg(test)]
mod tests;
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub struct ClassifResult {
pub predicted: Vec<usize>,
pub probabilities: Option<FdMatrix>,
pub accuracy: f64,
pub confusion: Vec<Vec<usize>>,
pub n_classes: usize,
pub ncomp: usize,
}
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub struct ClassifCvResult {
pub error_rate: f64,
pub fold_errors: Vec<f64>,
pub best_ncomp: usize,
}
pub(crate) fn remap_labels(y: &[usize]) -> (Vec<usize>, usize) {
let mut labels: Vec<usize> = y.to_vec();
let mut unique: Vec<usize> = y.to_vec();
unique.sort_unstable();
unique.dedup();
let g = unique.len();
for label in &mut labels {
*label = unique.iter().position(|&u| u == *label).unwrap_or(0);
}
(labels, g)
}
fn confusion_matrix(true_labels: &[usize], pred_labels: &[usize], g: usize) -> Vec<Vec<usize>> {
let mut cm = vec![vec![0usize; g]; g];
for (&t, &p) in true_labels.iter().zip(pred_labels.iter()) {
if t < g && p < g {
cm[t][p] += 1;
}
}
cm
}
pub(crate) fn class_means_and_priors(
features: &FdMatrix,
labels: &[usize],
g: usize,
) -> (Vec<Vec<f64>>, Vec<usize>, Vec<f64>) {
let n = features.nrows();
let d = features.ncols();
let mut counts = vec![0usize; g];
let mut class_means = vec![vec![0.0; d]; g];
for i in 0..n {
let c = labels[i];
counts[c] += 1;
for j in 0..d {
class_means[c][j] += features[(i, j)];
}
}
for c in 0..g {
if counts[c] > 0 {
for j in 0..d {
class_means[c][j] /= counts[c] as f64;
}
}
}
let priors: Vec<f64> = counts.iter().map(|&c| c as f64 / n as f64).collect();
(class_means, counts, priors)
}
fn compute_accuracy(true_labels: &[usize], pred_labels: &[usize]) -> f64 {
let n = true_labels.len();
if n == 0 {
return 0.0;
}
let correct = true_labels
.iter()
.zip(pred_labels.iter())
.filter(|(&t, &p)| t == p)
.count();
correct as f64 / n as f64
}
pub(crate) fn build_feature_matrix(
data: &FdMatrix,
scalar_covariates: Option<&FdMatrix>,
ncomp: usize,
) -> Result<(FdMatrix, Vec<f64>, FdMatrix, Vec<f64>), FdarError> {
let m = data.ncols();
let argvals: Vec<f64> = (0..m).map(|j| j as f64 / (m - 1).max(1) as f64).collect();
let fpca = fdata_to_pc_1d(data, ncomp, &argvals)?;
let n = data.nrows();
let d_pc = fpca.scores.ncols();
let d_cov = scalar_covariates.map_or(0, super::matrix::FdMatrix::ncols);
let d = d_pc + d_cov;
let mut features = FdMatrix::zeros(n, d);
for i in 0..n {
for j in 0..d_pc {
features[(i, j)] = fpca.scores[(i, j)];
}
if let Some(cov) = scalar_covariates {
for j in 0..d_cov {
features[(i, d_pc + j)] = cov[(i, j)];
}
}
}
Ok((features, fpca.mean, fpca.rotation, fpca.weights))
}
pub use cv::fclassif_cv;
pub use dd::fclassif_dd;
pub(crate) use fit::classif_predict_probs;
pub use fit::{
fclassif_cv_with_config, fclassif_knn_fit, fclassif_lda_fit, fclassif_qda_fit, ClassifCvConfig,
ClassifFit, ClassifMethod,
};
pub use kernel::{fclassif_kernel, kernel_classify_from_distances};
pub use knn::{fclassif_knn, knn_classify_from_distances};
pub use lda::fclassif_lda;
pub use qda::fclassif_qda;