use super::module::PyModule;
use crate::{error::PyResult, py_result, tensor::PyTensor};
use pyo3::prelude::*;
use std::collections::HashMap;
#[pyclass(name = "Dropout", extends = PyModule)]
pub struct PyDropout {
p: f32,
inplace: bool,
training: bool,
}
#[pymethods]
impl PyDropout {
#[new]
fn new(p: Option<f32>, inplace: Option<bool>) -> PyResult<(Self, PyModule)> {
let p = p.unwrap_or(0.5);
let inplace = inplace.unwrap_or(false);
if !(0.0..=1.0).contains(&p) {
return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(
"dropout probability has to be between 0 and 1, but got {p}",
));
}
Ok((
Self {
p,
inplace,
training: true,
},
PyModule::new(),
))
}
fn forward(&mut self, input: &PyTensor) -> PyResult<PyTensor> {
if !self.training || self.p == 0.0 {
return Ok(PyTensor {
tensor: input.tensor.clone(),
});
}
if self.p == 1.0 {
let zeros = py_result!(torsh_tensor::creation::zeros_like(&input.tensor))?;
return Ok(PyTensor { tensor: zeros });
}
use scirs2_core::random::Distribution;
use scirs2_core::random::{thread_rng, Uniform};
let mut rng = thread_rng();
let dist = Uniform::new(0.0_f32, 1.0_f32).map_err(|e| {
PyErr::new::<pyo3::exceptions::PyValueError, _>(format!(
"Failed to create uniform distribution: {}",
e
))
})?;
let mut data = py_result!(input.tensor.data())?;
let scale = 1.0 / (1.0 - self.p);
for val in data.iter_mut() {
if dist.sample(&mut rng) < self.p {
*val = 0.0;
} else {
*val *= scale; }
}
let shape = input.tensor.shape().dims().to_vec();
let result = py_result!(torsh_tensor::Tensor::from_data(
data,
shape,
input.tensor.device()
))?;
Ok(PyTensor { tensor: result })
}
fn parameters(&self) -> PyResult<Vec<PyTensor>> {
Ok(Vec::new())
}
fn named_parameters(&self) -> PyResult<HashMap<String, PyTensor>> {
Ok(HashMap::new())
}
fn train(&mut self, mode: Option<bool>) -> PyResult<()> {
self.training = mode.unwrap_or(true);
Ok(())
}
fn eval(&mut self) -> PyResult<()> {
self.training = false;
Ok(())
}
fn __repr__(&self) -> String {
format!("Dropout(p={}, inplace={})", self.p, self.inplace)
}
}
#[pyclass(name = "Dropout2d", extends = PyModule)]
pub struct PyDropout2d {
p: f32,
inplace: bool,
training: bool,
}
#[pymethods]
impl PyDropout2d {
#[new]
fn new(p: Option<f32>, inplace: Option<bool>) -> PyResult<(Self, PyModule)> {
let p = p.unwrap_or(0.5);
let inplace = inplace.unwrap_or(false);
if !(0.0..=1.0).contains(&p) {
return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(
"dropout probability has to be between 0 and 1, but got {p}",
));
}
Ok((
Self {
p,
inplace,
training: true,
},
PyModule::new(),
))
}
fn forward(&mut self, input: &PyTensor) -> PyResult<PyTensor> {
if !self.training || self.p == 0.0 {
return Ok(PyTensor {
tensor: input.tensor.clone(),
});
}
if self.p == 1.0 {
let zeros = py_result!(torsh_tensor::creation::zeros_like(&input.tensor))?;
return Ok(PyTensor { tensor: zeros });
}
use scirs2_core::random::Distribution;
use scirs2_core::random::{thread_rng, Uniform};
let shape = input.tensor.shape().dims().to_vec();
if shape.len() < 2 {
return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(
"Dropout2d expects at least 2D input",
));
}
let mut rng = thread_rng();
let dist = Uniform::new(0.0_f32, 1.0_f32).map_err(|e| {
PyErr::new::<pyo3::exceptions::PyValueError, _>(format!(
"Failed to create uniform distribution: {}",
e
))
})?;
let batch_size = shape[0];
let channels = shape[1];
let spatial_size: usize = shape[2..].iter().product();
let mut data = py_result!(input.tensor.data())?;
let scale = 1.0 / (1.0 - self.p);
for b in 0..batch_size {
for c in 0..channels {
if dist.sample(&mut rng) < self.p {
let start = (b * channels + c) * spatial_size;
let end = start + spatial_size;
for val in &mut data[start..end] {
*val = 0.0;
}
} else {
let start = (b * channels + c) * spatial_size;
let end = start + spatial_size;
for val in &mut data[start..end] {
*val *= scale;
}
}
}
}
let result = py_result!(torsh_tensor::Tensor::from_data(
data,
shape.to_vec(),
input.tensor.device()
))?;
Ok(PyTensor { tensor: result })
}
fn parameters(&self) -> PyResult<Vec<PyTensor>> {
Ok(Vec::new())
}
fn named_parameters(&self) -> PyResult<HashMap<String, PyTensor>> {
Ok(HashMap::new())
}
fn train(&mut self, mode: Option<bool>) -> PyResult<()> {
self.training = mode.unwrap_or(true);
Ok(())
}
fn eval(&mut self) -> PyResult<()> {
self.training = false;
Ok(())
}
fn __repr__(&self) -> String {
format!("Dropout2d(p={}, inplace={})", self.p, self.inplace)
}
}
#[pyclass(name = "Dropout3d", extends = PyModule)]
pub struct PyDropout3d {
p: f32,
inplace: bool,
training: bool,
}
#[pymethods]
impl PyDropout3d {
#[new]
fn new(p: Option<f32>, inplace: Option<bool>) -> PyResult<(Self, PyModule)> {
let p = p.unwrap_or(0.5);
let inplace = inplace.unwrap_or(false);
if !(0.0..=1.0).contains(&p) {
return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(
"dropout probability has to be between 0 and 1, but got {p}",
));
}
Ok((
Self {
p,
inplace,
training: true,
},
PyModule::new(),
))
}
fn forward(&mut self, input: &PyTensor) -> PyResult<PyTensor> {
if !self.training || self.p == 0.0 {
return Ok(PyTensor {
tensor: input.tensor.clone(),
});
}
if self.p == 1.0 {
let zeros = py_result!(torsh_tensor::creation::zeros_like(&input.tensor))?;
return Ok(PyTensor { tensor: zeros });
}
use scirs2_core::random::Distribution;
use scirs2_core::random::{thread_rng, Uniform};
let shape = input.tensor.shape().dims().to_vec();
if shape.len() < 3 {
return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(
"Dropout3d expects at least 3D input",
));
}
let mut rng = thread_rng();
let dist = Uniform::new(0.0_f32, 1.0_f32).map_err(|e| {
PyErr::new::<pyo3::exceptions::PyValueError, _>(format!(
"Failed to create uniform distribution: {}",
e
))
})?;
let batch_size = shape[0];
let channels = shape[1];
let spatial_size: usize = shape[2..].iter().product();
let mut data = py_result!(input.tensor.data())?;
let scale = 1.0 / (1.0 - self.p);
for b in 0..batch_size {
for c in 0..channels {
if dist.sample(&mut rng) < self.p {
let start = (b * channels + c) * spatial_size;
let end = start + spatial_size;
for val in &mut data[start..end] {
*val = 0.0;
}
} else {
let start = (b * channels + c) * spatial_size;
let end = start + spatial_size;
for val in &mut data[start..end] {
*val *= scale;
}
}
}
}
let result = py_result!(torsh_tensor::Tensor::from_data(
data,
shape.to_vec(),
input.tensor.device()
))?;
Ok(PyTensor { tensor: result })
}
fn parameters(&self) -> PyResult<Vec<PyTensor>> {
Ok(Vec::new())
}
fn named_parameters(&self) -> PyResult<HashMap<String, PyTensor>> {
Ok(HashMap::new())
}
fn train(&mut self, mode: Option<bool>) -> PyResult<()> {
self.training = mode.unwrap_or(true);
Ok(())
}
fn eval(&mut self) -> PyResult<()> {
self.training = false;
Ok(())
}
fn __repr__(&self) -> String {
format!("Dropout3d(p={}, inplace={})", self.p, self.inplace)
}
}
#[pyclass(name = "AlphaDropout", extends = PyModule)]
pub struct PyAlphaDropout {
p: f32,
inplace: bool,
training: bool,
}
#[pymethods]
impl PyAlphaDropout {
#[new]
fn new(p: Option<f32>, inplace: Option<bool>) -> PyResult<(Self, PyModule)> {
let p = p.unwrap_or(0.5);
let inplace = inplace.unwrap_or(false);
if !(0.0..=1.0).contains(&p) {
return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(
"dropout probability has to be between 0 and 1, but got {p}",
));
}
Ok((
Self {
p,
inplace,
training: true,
},
PyModule::new(),
))
}
fn forward(&mut self, input: &PyTensor) -> PyResult<PyTensor> {
if !self.training || self.p == 0.0 {
return Ok(PyTensor {
tensor: input.tensor.clone(),
});
}
if self.p == 1.0 {
let alpha_prime = -1.7580993408473766_f32;
let mut data = py_result!(input.tensor.data())?;
for val in data.iter_mut() {
*val = alpha_prime;
}
let shape = input.tensor.shape().dims().to_vec();
let result = py_result!(torsh_tensor::Tensor::from_data(
data,
shape,
input.tensor.device()
))?;
return Ok(PyTensor { tensor: result });
}
use scirs2_core::random::Distribution;
use scirs2_core::random::{thread_rng, Uniform};
let _alpha = 1.6732632423543772_f32;
let alpha_prime = -1.7580993408473766_f32;
let a = ((1.0 - self.p) * (1.0 + self.p * alpha_prime * alpha_prime)).sqrt();
let b = -a * alpha_prime * self.p;
let mut rng = thread_rng();
let dist = Uniform::new(0.0_f32, 1.0_f32).map_err(|e| {
PyErr::new::<pyo3::exceptions::PyValueError, _>(format!(
"Failed to create uniform distribution: {}",
e
))
})?;
let mut data = py_result!(input.tensor.data())?;
for val in data.iter_mut() {
if dist.sample(&mut rng) < self.p {
*val = (*val * 0.0 + alpha_prime) * a + b;
} else {
*val = *val * a + b;
}
}
let shape = input.tensor.shape().dims().to_vec();
let result = py_result!(torsh_tensor::Tensor::from_data(
data,
shape,
input.tensor.device()
))?;
Ok(PyTensor { tensor: result })
}
fn parameters(&self) -> PyResult<Vec<PyTensor>> {
Ok(Vec::new())
}
fn named_parameters(&self) -> PyResult<HashMap<String, PyTensor>> {
Ok(HashMap::new())
}
fn train(&mut self, mode: Option<bool>) -> PyResult<()> {
self.training = mode.unwrap_or(true);
Ok(())
}
fn eval(&mut self) -> PyResult<()> {
self.training = false;
Ok(())
}
fn __repr__(&self) -> String {
format!("AlphaDropout(p={}, inplace={})", self.p, self.inplace)
}
}