Struct L1ActivityRegularization

Source
pub struct L1ActivityRegularization<F: Float + Debug + Send + Sync> { /* private fields */ }
Expand description

L1 Activity Regularization layer

A convenience layer that applies only L1 regularization to activations.

§Examples

use scirs2_neural::layers::{L1ActivityRegularization, Layer};
use ndarray::{Array, Array2};

// Create an L1 activity regularization layer with factor 0.01
let regularizer = L1ActivityRegularization::new(0.01, Some("l1_reg")).unwrap();

// Forward pass with a batch of 2 samples, 10 features
let batch_size = 2;
let features = 10;
let input = Array2::<f64>::from_elem((batch_size, features), 1.0).into_dyn();

// Forward pass
let output = regularizer.forward(&input).unwrap();

// Output shape should match input shape
assert_eq!(output.shape(), input.shape());

Implementations§

Source§

impl<F: Float + Debug + ScalarOperand + Send + Sync + 'static> L1ActivityRegularization<F>

Source

pub fn new(factor: f64, name: Option<&str>) -> Result<Self>

Create a new L1 activity regularization layer

§Arguments
  • factor - L1 regularization factor
  • name - Optional name for the layer
§Returns
  • A new L1 activity regularization layer
Examples found in repository?
examples/new_features_showcase.rs (line 86)
73fn demonstrate_activity_regularization() -> Result<(), Box<dyn std::error::Error>> {
74    println!("🎯 Activity Regularization Demonstration");
75    println!("=======================================\n");
76
77    // Create some activations to regularize
78    let activations =
79        Array::from_shape_vec((2, 4), vec![1.5, -2.0, 0.5, 3.0, -1.0, 0.0, 2.5, -0.5])?.into_dyn();
80
81    println!("Input activations:");
82    println!("{:?}\n", activations);
83
84    // 1. L1 Activity Regularization
85    println!("1. L1 Activity Regularization (factor=0.1):");
86    let l1_reg = L1ActivityRegularization::new(0.1, Some("l1_regularizer"))?;
87    let l1_output = l1_reg.forward(&activations)?;
88    let l1_loss = l1_reg.get_activity_loss()?;
89    println!("   Output (unchanged): {:?}", l1_output.shape());
90    println!("   L1 activity loss: {:.4}", l1_loss);
91    println!("   Layer description: {}", l1_reg.layer_description());
92
93    // 2. L2 Activity Regularization
94    println!("\n2. L2 Activity Regularization (factor=0.05):");
95    let l2_reg = L2ActivityRegularization::new(0.05, Some("l2_regularizer"))?;
96    let l2_output = l2_reg.forward(&activations)?;
97    let l2_loss = l2_reg.get_activity_loss()?;
98    println!("   Output (unchanged): {:?}", l2_output.shape());
99    println!("   L2 activity loss: {:.4}", l2_loss);
100    println!("   Layer description: {}", l2_reg.layer_description());
101
102    // 3. Combined L1 + L2 Activity Regularization
103    println!("\n3. Combined L1+L2 Activity Regularization:");
104    let combined_reg = ActivityRegularization::new(Some(0.1), Some(0.05), Some("combined_reg"))?;
105    let combined_output = combined_reg.forward(&activations)?;
106    let combined_loss = combined_reg.get_activity_loss()?;
107    println!("   Output (unchanged): {:?}", combined_output.shape());
108    println!("   Combined activity loss: {:.4}", combined_loss);
109    println!("   Layer description: {}", combined_reg.layer_description());
110
111    // Demonstrate backward pass
112    println!("\n4. Backward Pass with Gradient Modification:");
113    let grad_output = Array::ones(activations.raw_dim());
114    let grad_input = combined_reg.backward(&activations, &grad_output)?;
115    println!("   Gradient input shape: {:?}", grad_input.shape());
116    println!(
117        "   Sample gradient values: [{:.3}, {:.3}, {:.3}, {:.3}]",
118        grad_input[[0, 0]],
119        grad_input[[0, 1]],
120        grad_input[[0, 2]],
121        grad_input[[0, 3]]
122    );
123
124    println!("✅ Activity regularization demonstration completed!\n");
125    Ok(())
126}
Source

pub fn name(&self) -> Option<&str>

Get the name of the layer

Source

pub fn get_activity_loss(&self) -> Result<F>

Get the current activity loss

Examples found in repository?
examples/new_features_showcase.rs (line 88)
73fn demonstrate_activity_regularization() -> Result<(), Box<dyn std::error::Error>> {
74    println!("🎯 Activity Regularization Demonstration");
75    println!("=======================================\n");
76
77    // Create some activations to regularize
78    let activations =
79        Array::from_shape_vec((2, 4), vec![1.5, -2.0, 0.5, 3.0, -1.0, 0.0, 2.5, -0.5])?.into_dyn();
80
81    println!("Input activations:");
82    println!("{:?}\n", activations);
83
84    // 1. L1 Activity Regularization
85    println!("1. L1 Activity Regularization (factor=0.1):");
86    let l1_reg = L1ActivityRegularization::new(0.1, Some("l1_regularizer"))?;
87    let l1_output = l1_reg.forward(&activations)?;
88    let l1_loss = l1_reg.get_activity_loss()?;
89    println!("   Output (unchanged): {:?}", l1_output.shape());
90    println!("   L1 activity loss: {:.4}", l1_loss);
91    println!("   Layer description: {}", l1_reg.layer_description());
92
93    // 2. L2 Activity Regularization
94    println!("\n2. L2 Activity Regularization (factor=0.05):");
95    let l2_reg = L2ActivityRegularization::new(0.05, Some("l2_regularizer"))?;
96    let l2_output = l2_reg.forward(&activations)?;
97    let l2_loss = l2_reg.get_activity_loss()?;
98    println!("   Output (unchanged): {:?}", l2_output.shape());
99    println!("   L2 activity loss: {:.4}", l2_loss);
100    println!("   Layer description: {}", l2_reg.layer_description());
101
102    // 3. Combined L1 + L2 Activity Regularization
103    println!("\n3. Combined L1+L2 Activity Regularization:");
104    let combined_reg = ActivityRegularization::new(Some(0.1), Some(0.05), Some("combined_reg"))?;
105    let combined_output = combined_reg.forward(&activations)?;
106    let combined_loss = combined_reg.get_activity_loss()?;
107    println!("   Output (unchanged): {:?}", combined_output.shape());
108    println!("   Combined activity loss: {:.4}", combined_loss);
109    println!("   Layer description: {}", combined_reg.layer_description());
110
111    // Demonstrate backward pass
112    println!("\n4. Backward Pass with Gradient Modification:");
113    let grad_output = Array::ones(activations.raw_dim());
114    let grad_input = combined_reg.backward(&activations, &grad_output)?;
115    println!("   Gradient input shape: {:?}", grad_input.shape());
116    println!(
117        "   Sample gradient values: [{:.3}, {:.3}, {:.3}, {:.3}]",
118        grad_input[[0, 0]],
119        grad_input[[0, 1]],
120        grad_input[[0, 2]],
121        grad_input[[0, 3]]
122    );
123
124    println!("✅ Activity regularization demonstration completed!\n");
125    Ok(())
126}

Trait Implementations§

Source§

impl<F: Clone + Float + Debug + Send + Sync> Clone for L1ActivityRegularization<F>

Source§

fn clone(&self) -> L1ActivityRegularization<F>

Returns a duplicate of the value. Read more
1.0.0 · Source§

const fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl<F: Debug + Float + Debug + Send + Sync> Debug for L1ActivityRegularization<F>

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl<F: Float + Debug + ScalarOperand + Send + Sync + 'static> Layer<F> for L1ActivityRegularization<F>

Source§

fn as_any(&self) -> &dyn Any

Get the layer as a dyn Any for downcasting Read more
Source§

fn as_any_mut(&mut self) -> &mut dyn Any

Get the layer as a mutable dyn Any for downcasting Read more
Source§

fn forward(&self, input: &Array<F, IxDyn>) -> Result<Array<F, IxDyn>>

Forward pass of the layer Read more
Source§

fn backward( &self, input: &Array<F, IxDyn>, grad_output: &Array<F, IxDyn>, ) -> Result<Array<F, IxDyn>>

Backward pass of the layer to compute gradients Read more
Source§

fn update(&mut self, learning_rate: F) -> Result<()>

Update the layer parameters with the given gradients Read more
Source§

fn layer_type(&self) -> &str

Get the type of the layer (e.g., “Dense”, “Conv2D”) Read more
Source§

fn parameter_count(&self) -> usize

Get the number of trainable parameters in this layer Read more
Source§

fn layer_description(&self) -> String

Get a detailed description of this layer Read more
Source§

fn params(&self) -> Vec<Array<F, IxDyn>>

Get the parameters of the layer Read more
Source§

fn gradients(&self) -> Vec<Array<F, IxDyn>>

Get the gradients of the layer parameters Read more
Source§

fn set_gradients(&mut self, _gradients: &[Array<F, IxDyn>]) -> Result<()>

Set the gradients of the layer parameters Read more
Source§

fn set_params(&mut self, _params: &[Array<F, IxDyn>]) -> Result<()>

Set the parameters of the layer Read more
Source§

fn set_training(&mut self, _training: bool)

Set the layer to training mode (true) or evaluation mode (false) Read more
Source§

fn is_training(&self) -> bool

Get the current training mode Read more

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V