#[allow(deprecated)]
pub mod models {
use crate::column::{Column, ColumnTrait, Float64Column};
use crate::error::{Error, Result};
use crate::optimized::{ColumnView, OptimizedDataFrame};
use std::collections::HashMap;
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::models::SupervisedModel` instead"
)]
pub use crate::ml::models::SupervisedModel;
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::models::linear::LinearRegression` instead"
)]
pub use crate::ml::models::linear::LinearRegression;
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::models::linear::LogisticRegression` instead"
)]
pub use crate::ml::models::linear::LogisticRegression;
pub mod model_selection {
use crate::error::Result;
use crate::optimized::OptimizedDataFrame;
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::models::train_test_split` instead"
)]
pub fn train_test_split(
df: &OptimizedDataFrame,
test_size: f64,
random_state: Option<u64>,
) -> Result<(OptimizedDataFrame, OptimizedDataFrame)> {
if test_size <= 0.0 || test_size >= 1.0 {
return Err(crate::error::Error::InvalidInput(
"test_size must be between 0 and 1".into(),
));
}
let n_rows = df.row_count();
let n_test = (n_rows as f64 * test_size).round() as usize;
if n_test == 0 || n_test == n_rows {
return Err(crate::error::Error::InvalidInput(format!(
"test_size {} would result in empty training or test set",
test_size
)));
}
let train_indices: Vec<usize> = (0..(n_rows - n_test)).collect();
let test_indices: Vec<usize> = ((n_rows - n_test)..n_rows).collect();
let seed = 42;
let train_data = df.sample(train_indices.len(), false, Some(seed))?;
let test_data = df.sample(test_indices.len(), false, Some(seed + 1))?;
Ok((train_data, test_data))
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::models::evaluation::cross_val_score` instead"
)]
pub fn cross_val_score<M>(
model: &M,
df: &OptimizedDataFrame,
target: &str,
features: &[&str],
k_folds: usize,
) -> Result<Vec<f64>>
where
M: crate::ml::models::SupervisedModel + Clone,
{
Err(crate::error::Error::InvalidOperation(
"This function is deprecated. Please use `pandrs::ml::models::evaluation::cross_val_score` with the new API".into()
))
}
}
pub mod model_persistence {
use crate::error::Result;
use std::path::Path;
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::models::persistence::ModelPersistence` instead"
)]
pub trait ModelPersistence: Sized {
fn save_model<P: AsRef<Path>>(&self, path: P) -> Result<()>;
fn load_model<P: AsRef<Path>>(path: P) -> Result<Self>;
}
}
}
#[allow(deprecated)]
pub mod anomaly_detection {
use crate::error::Result;
use crate::optimized::OptimizedDataFrame;
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::anomaly::IsolationForest` instead"
)]
pub struct IsolationForest {
inner: crate::ml::anomaly::IsolationForest,
}
impl IsolationForest {
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::anomaly::IsolationForest::new` instead"
)]
pub fn new(
n_estimators: usize,
max_samples: Option<usize>,
max_features: Option<f64>,
contamination: f64,
random_seed: Option<u64>,
) -> Self {
let mut forest = crate::ml::anomaly::IsolationForest::new();
forest.n_estimators = n_estimators;
forest.max_samples = max_samples;
forest.contamination = contamination;
forest.random_seed = random_seed;
IsolationForest { inner: forest }
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::anomaly::IsolationForest::anomaly_scores` instead"
)]
pub fn anomaly_scores(&self) -> &[f64] {
self.inner.anomaly_scores()
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::anomaly::IsolationForest::labels` instead"
)]
pub fn labels(&self) -> &[i64] {
self.inner.labels()
}
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::clustering::DistanceMetric` instead"
)]
pub enum DistanceMetric {
Euclidean,
Manhattan,
Cosine,
}
impl From<DistanceMetric> for crate::ml::clustering::DistanceMetric {
fn from(metric: DistanceMetric) -> Self {
match metric {
DistanceMetric::Euclidean => crate::ml::clustering::DistanceMetric::Euclidean,
DistanceMetric::Manhattan => crate::ml::clustering::DistanceMetric::Manhattan,
DistanceMetric::Cosine => crate::ml::clustering::DistanceMetric::Cosine,
}
}
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::anomaly::LocalOutlierFactor` instead"
)]
pub struct LocalOutlierFactor {
inner: crate::ml::anomaly::LocalOutlierFactor,
}
impl LocalOutlierFactor {
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::anomaly::LocalOutlierFactor::new` instead"
)]
pub fn new(n_neighbors: usize, contamination: f64, metric: DistanceMetric) -> Self {
let lof = crate::ml::anomaly::LocalOutlierFactor::new(n_neighbors)
.contamination(contamination);
LocalOutlierFactor { inner: lof }
}
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::anomaly::OneClassSVM` instead"
)]
pub struct OneClassSVM {
inner: crate::ml::anomaly::OneClassSVM,
}
impl OneClassSVM {
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::anomaly::OneClassSVM::new` instead"
)]
pub fn new(nu: f64, gamma: f64, max_iter: usize, tol: f64) -> Self {
let svm = crate::ml::anomaly::OneClassSVM::new().nu(nu).gamma(gamma);
OneClassSVM { inner: svm }
}
}
}
#[allow(deprecated)]
pub mod pipeline {
use crate::error::Result;
use crate::optimized::OptimizedDataFrame;
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::pipeline::PipelineTransformer` instead"
)]
pub trait Transformer {
fn fit(&mut self, df: &OptimizedDataFrame) -> Result<()>;
fn transform(&self, df: &OptimizedDataFrame) -> Result<OptimizedDataFrame>;
fn fit_transform(&mut self, df: &OptimizedDataFrame) -> Result<OptimizedDataFrame> {
self.fit(df)?;
self.transform(df)
}
}
}
pub mod metrics {
pub mod regression {
use crate::error::Result;
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::metrics::regression::mean_squared_error` instead"
)]
pub fn mean_squared_error(y_true: &[f64], y_pred: &[f64]) -> Result<f64> {
crate::ml::metrics::regression::mean_squared_error(y_true, y_pred)
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::metrics::regression::mean_absolute_error` instead"
)]
pub fn mean_absolute_error(y_true: &[f64], y_pred: &[f64]) -> Result<f64> {
crate::ml::metrics::regression::mean_absolute_error(y_true, y_pred)
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::metrics::regression::root_mean_squared_error` instead"
)]
pub fn root_mean_squared_error(y_true: &[f64], y_pred: &[f64]) -> Result<f64> {
crate::ml::metrics::regression::root_mean_squared_error(y_true, y_pred)
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::metrics::regression::r2_score` instead"
)]
pub fn r2_score(y_true: &[f64], y_pred: &[f64]) -> Result<f64> {
crate::ml::metrics::regression::r2_score(y_true, y_pred)
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::metrics::regression::explained_variance_score` instead"
)]
pub fn explained_variance_score(y_true: &[f64], y_pred: &[f64]) -> Result<f64> {
crate::ml::metrics::regression::explained_variance_score(y_true, y_pred)
}
}
pub mod classification {
use crate::error::Result;
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::metrics::classification::accuracy_score` instead"
)]
pub fn accuracy_score(y_true: &[bool], y_pred: &[bool]) -> Result<f64> {
crate::ml::metrics::classification::accuracy_score(y_true, y_pred)
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::metrics::classification::precision_score` instead"
)]
pub fn precision_score(y_true: &[bool], y_pred: &[bool]) -> Result<f64> {
crate::ml::metrics::classification::precision_score(y_true, y_pred)
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::metrics::classification::recall_score` instead"
)]
pub fn recall_score(y_true: &[bool], y_pred: &[bool]) -> Result<f64> {
crate::ml::metrics::classification::recall_score(y_true, y_pred)
}
#[deprecated(
since = "0.1.0",
note = "Use `pandrs::ml::metrics::classification::f1_score` instead"
)]
pub fn f1_score(y_true: &[bool], y_pred: &[bool]) -> Result<f64> {
crate::ml::metrics::classification::f1_score(y_true, y_pred)
}
}
}