pub struct L1ActivityRegularization<F: Float + Debug + Send + Sync> { /* private fields */ }
Expand description
L1 Activity Regularization layer
A convenience layer that applies only L1 regularization to activations.
§Examples
use scirs2_neural::layers::{L1ActivityRegularization, Layer};
use ndarray::{Array, Array2};
// Create an L1 activity regularization layer with factor 0.01
let regularizer = L1ActivityRegularization::new(0.01, Some("l1_reg")).unwrap();
// Forward pass with a batch of 2 samples, 10 features
let batch_size = 2;
let features = 10;
let input = Array2::<f64>::from_elem((batch_size, features), 1.0).into_dyn();
// Forward pass
let output = regularizer.forward(&input).unwrap();
// Output shape should match input shape
assert_eq!(output.shape(), input.shape());
Implementations§
Source§impl<F: Float + Debug + ScalarOperand + Send + Sync + 'static> L1ActivityRegularization<F>
impl<F: Float + Debug + ScalarOperand + Send + Sync + 'static> L1ActivityRegularization<F>
Sourcepub fn new(factor: f64, name: Option<&str>) -> Result<Self>
pub fn new(factor: f64, name: Option<&str>) -> Result<Self>
Create a new L1 activity regularization layer
§Arguments
factor
- L1 regularization factorname
- Optional name for the layer
§Returns
- A new L1 activity regularization layer
Examples found in repository?
examples/new_features_showcase.rs (line 86)
73fn demonstrate_activity_regularization() -> Result<(), Box<dyn std::error::Error>> {
74 println!("🎯 Activity Regularization Demonstration");
75 println!("=======================================\n");
76
77 // Create some activations to regularize
78 let activations =
79 Array::from_shape_vec((2, 4), vec![1.5, -2.0, 0.5, 3.0, -1.0, 0.0, 2.5, -0.5])?.into_dyn();
80
81 println!("Input activations:");
82 println!("{:?}\n", activations);
83
84 // 1. L1 Activity Regularization
85 println!("1. L1 Activity Regularization (factor=0.1):");
86 let l1_reg = L1ActivityRegularization::new(0.1, Some("l1_regularizer"))?;
87 let l1_output = l1_reg.forward(&activations)?;
88 let l1_loss = l1_reg.get_activity_loss()?;
89 println!(" Output (unchanged): {:?}", l1_output.shape());
90 println!(" L1 activity loss: {:.4}", l1_loss);
91 println!(" Layer description: {}", l1_reg.layer_description());
92
93 // 2. L2 Activity Regularization
94 println!("\n2. L2 Activity Regularization (factor=0.05):");
95 let l2_reg = L2ActivityRegularization::new(0.05, Some("l2_regularizer"))?;
96 let l2_output = l2_reg.forward(&activations)?;
97 let l2_loss = l2_reg.get_activity_loss()?;
98 println!(" Output (unchanged): {:?}", l2_output.shape());
99 println!(" L2 activity loss: {:.4}", l2_loss);
100 println!(" Layer description: {}", l2_reg.layer_description());
101
102 // 3. Combined L1 + L2 Activity Regularization
103 println!("\n3. Combined L1+L2 Activity Regularization:");
104 let combined_reg = ActivityRegularization::new(Some(0.1), Some(0.05), Some("combined_reg"))?;
105 let combined_output = combined_reg.forward(&activations)?;
106 let combined_loss = combined_reg.get_activity_loss()?;
107 println!(" Output (unchanged): {:?}", combined_output.shape());
108 println!(" Combined activity loss: {:.4}", combined_loss);
109 println!(" Layer description: {}", combined_reg.layer_description());
110
111 // Demonstrate backward pass
112 println!("\n4. Backward Pass with Gradient Modification:");
113 let grad_output = Array::ones(activations.raw_dim());
114 let grad_input = combined_reg.backward(&activations, &grad_output)?;
115 println!(" Gradient input shape: {:?}", grad_input.shape());
116 println!(
117 " Sample gradient values: [{:.3}, {:.3}, {:.3}, {:.3}]",
118 grad_input[[0, 0]],
119 grad_input[[0, 1]],
120 grad_input[[0, 2]],
121 grad_input[[0, 3]]
122 );
123
124 println!("✅ Activity regularization demonstration completed!\n");
125 Ok(())
126}
Sourcepub fn get_activity_loss(&self) -> Result<F>
pub fn get_activity_loss(&self) -> Result<F>
Get the current activity loss
Examples found in repository?
examples/new_features_showcase.rs (line 88)
73fn demonstrate_activity_regularization() -> Result<(), Box<dyn std::error::Error>> {
74 println!("🎯 Activity Regularization Demonstration");
75 println!("=======================================\n");
76
77 // Create some activations to regularize
78 let activations =
79 Array::from_shape_vec((2, 4), vec![1.5, -2.0, 0.5, 3.0, -1.0, 0.0, 2.5, -0.5])?.into_dyn();
80
81 println!("Input activations:");
82 println!("{:?}\n", activations);
83
84 // 1. L1 Activity Regularization
85 println!("1. L1 Activity Regularization (factor=0.1):");
86 let l1_reg = L1ActivityRegularization::new(0.1, Some("l1_regularizer"))?;
87 let l1_output = l1_reg.forward(&activations)?;
88 let l1_loss = l1_reg.get_activity_loss()?;
89 println!(" Output (unchanged): {:?}", l1_output.shape());
90 println!(" L1 activity loss: {:.4}", l1_loss);
91 println!(" Layer description: {}", l1_reg.layer_description());
92
93 // 2. L2 Activity Regularization
94 println!("\n2. L2 Activity Regularization (factor=0.05):");
95 let l2_reg = L2ActivityRegularization::new(0.05, Some("l2_regularizer"))?;
96 let l2_output = l2_reg.forward(&activations)?;
97 let l2_loss = l2_reg.get_activity_loss()?;
98 println!(" Output (unchanged): {:?}", l2_output.shape());
99 println!(" L2 activity loss: {:.4}", l2_loss);
100 println!(" Layer description: {}", l2_reg.layer_description());
101
102 // 3. Combined L1 + L2 Activity Regularization
103 println!("\n3. Combined L1+L2 Activity Regularization:");
104 let combined_reg = ActivityRegularization::new(Some(0.1), Some(0.05), Some("combined_reg"))?;
105 let combined_output = combined_reg.forward(&activations)?;
106 let combined_loss = combined_reg.get_activity_loss()?;
107 println!(" Output (unchanged): {:?}", combined_output.shape());
108 println!(" Combined activity loss: {:.4}", combined_loss);
109 println!(" Layer description: {}", combined_reg.layer_description());
110
111 // Demonstrate backward pass
112 println!("\n4. Backward Pass with Gradient Modification:");
113 let grad_output = Array::ones(activations.raw_dim());
114 let grad_input = combined_reg.backward(&activations, &grad_output)?;
115 println!(" Gradient input shape: {:?}", grad_input.shape());
116 println!(
117 " Sample gradient values: [{:.3}, {:.3}, {:.3}, {:.3}]",
118 grad_input[[0, 0]],
119 grad_input[[0, 1]],
120 grad_input[[0, 2]],
121 grad_input[[0, 3]]
122 );
123
124 println!("✅ Activity regularization demonstration completed!\n");
125 Ok(())
126}
Trait Implementations§
Source§impl<F: Clone + Float + Debug + Send + Sync> Clone for L1ActivityRegularization<F>
impl<F: Clone + Float + Debug + Send + Sync> Clone for L1ActivityRegularization<F>
Source§fn clone(&self) -> L1ActivityRegularization<F>
fn clone(&self) -> L1ActivityRegularization<F>
Returns a duplicate of the value. Read more
1.0.0 · Source§const fn clone_from(&mut self, source: &Self)
const fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source
. Read moreSource§impl<F: Float + Debug + ScalarOperand + Send + Sync + 'static> Layer<F> for L1ActivityRegularization<F>
impl<F: Float + Debug + ScalarOperand + Send + Sync + 'static> Layer<F> for L1ActivityRegularization<F>
Source§fn as_any_mut(&mut self) -> &mut dyn Any
fn as_any_mut(&mut self) -> &mut dyn Any
Get the layer as a mutable dyn Any for downcasting Read more
Source§fn forward(&self, input: &Array<F, IxDyn>) -> Result<Array<F, IxDyn>>
fn forward(&self, input: &Array<F, IxDyn>) -> Result<Array<F, IxDyn>>
Forward pass of the layer Read more
Source§fn backward(
&self,
input: &Array<F, IxDyn>,
grad_output: &Array<F, IxDyn>,
) -> Result<Array<F, IxDyn>>
fn backward( &self, input: &Array<F, IxDyn>, grad_output: &Array<F, IxDyn>, ) -> Result<Array<F, IxDyn>>
Backward pass of the layer to compute gradients Read more
Source§fn update(&mut self, learning_rate: F) -> Result<()>
fn update(&mut self, learning_rate: F) -> Result<()>
Update the layer parameters with the given gradients Read more
Source§fn layer_type(&self) -> &str
fn layer_type(&self) -> &str
Get the type of the layer (e.g., “Dense”, “Conv2D”) Read more
Source§fn parameter_count(&self) -> usize
fn parameter_count(&self) -> usize
Get the number of trainable parameters in this layer Read more
Source§fn layer_description(&self) -> String
fn layer_description(&self) -> String
Get a detailed description of this layer Read more
Source§fn gradients(&self) -> Vec<Array<F, IxDyn>> ⓘ
fn gradients(&self) -> Vec<Array<F, IxDyn>> ⓘ
Get the gradients of the layer parameters Read more
Source§fn set_gradients(&mut self, _gradients: &[Array<F, IxDyn>]) -> Result<()>
fn set_gradients(&mut self, _gradients: &[Array<F, IxDyn>]) -> Result<()>
Set the gradients of the layer parameters Read more
Source§fn set_params(&mut self, _params: &[Array<F, IxDyn>]) -> Result<()>
fn set_params(&mut self, _params: &[Array<F, IxDyn>]) -> Result<()>
Set the parameters of the layer Read more
Source§fn set_training(&mut self, _training: bool)
fn set_training(&mut self, _training: bool)
Set the layer to training mode (true) or evaluation mode (false) Read more
Source§fn is_training(&self) -> bool
fn is_training(&self) -> bool
Get the current training mode Read more
Auto Trait Implementations§
impl<F> Freeze for L1ActivityRegularization<F>where
F: Freeze,
impl<F> RefUnwindSafe for L1ActivityRegularization<F>where
F: RefUnwindSafe,
impl<F> Send for L1ActivityRegularization<F>
impl<F> Sync for L1ActivityRegularization<F>
impl<F> Unpin for L1ActivityRegularization<F>where
F: Unpin,
impl<F> UnwindSafe for L1ActivityRegularization<F>where
F: UnwindSafe,
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more