pub struct GroupedAdam<A: Float + Send + Sync, D: Dimension> { /* private fields */ }Expand description
Adam optimizer with parameter group support
This optimizer allows different parameter groups to have different hyperparameters (learning rate, weight decay, betas).
§Example
use scirs2_core::ndarray::Array1;
use optirs_core::optimizers::{GroupedAdam, Optimizer};
use optirs_core::parameter_groups::{GroupedOptimizer, ParameterGroupConfig};
// Create grouped optimizer
let mut optimizer = GroupedAdam::new(0.001);
// Add parameter groups with different learning rates
let params_fast = vec![Array1::zeros(5)];
let config_fast = ParameterGroupConfig::new().with_learning_rate(0.01);
let group_fast = optimizer.add_group(params_fast, config_fast).unwrap();
let params_slow = vec![Array1::zeros(3)];
let config_slow = ParameterGroupConfig::new().with_learning_rate(0.0001);
let group_slow = optimizer.add_group(params_slow, config_slow).unwrap();
// Optimize each group separately
let grads_fast = vec![Array1::ones(5)];
let updated_fast = optimizer.step_group(group_fast, &grads_fast).unwrap();
let grads_slow = vec![Array1::ones(3)];
let updated_slow = optimizer.step_group(group_slow, &grads_slow).unwrap();Implementations§
Source§impl<A: Float + ScalarOperand + Debug + Send + Sync, D: Dimension + Send + Sync> GroupedAdam<A, D>
impl<A: Float + ScalarOperand + Debug + Send + Sync, D: Dimension + Send + Sync> GroupedAdam<A, D>
Sourcepub fn with_beta1(self, beta1: A) -> Self
pub fn with_beta1(self, beta1: A) -> Self
Set default beta1
Sourcepub fn with_beta2(self, beta2: A) -> Self
pub fn with_beta2(self, beta2: A) -> Self
Set default beta2
Sourcepub fn with_weight_decay(self, weight_decay: A) -> Self
pub fn with_weight_decay(self, weight_decay: A) -> Self
Set default weight decay
Sourcepub fn with_amsgrad(self) -> Self
pub fn with_amsgrad(self) -> Self
Enable AMSGrad
Trait Implementations§
Source§impl<A: Float + ScalarOperand + Debug + Send + Sync, D: Dimension + Send + Sync> GroupedOptimizer<A, D> for GroupedAdam<A, D>
impl<A: Float + ScalarOperand + Debug + Send + Sync, D: Dimension + Send + Sync> GroupedOptimizer<A, D> for GroupedAdam<A, D>
Source§fn add_group(
&mut self,
params: Vec<Array<A, D>>,
config: ParameterGroupConfig<A>,
) -> Result<usize>
fn add_group( &mut self, params: Vec<Array<A, D>>, config: ParameterGroupConfig<A>, ) -> Result<usize>
Add a parameter group
Source§fn get_group(&self, groupid: usize) -> Result<&ParameterGroup<A, D>>
fn get_group(&self, groupid: usize) -> Result<&ParameterGroup<A, D>>
Get parameter group by ID
Source§fn get_group_mut(&mut self, groupid: usize) -> Result<&mut ParameterGroup<A, D>>
fn get_group_mut(&mut self, groupid: usize) -> Result<&mut ParameterGroup<A, D>>
Get mutable parameter group by ID
Source§fn groups(&self) -> &[ParameterGroup<A, D>]
fn groups(&self) -> &[ParameterGroup<A, D>]
Get all parameter groups
Source§fn groups_mut(&mut self) -> &mut [ParameterGroup<A, D>]
fn groups_mut(&mut self) -> &mut [ParameterGroup<A, D>]
Get all parameter groups mutably
Source§fn step_group(
&mut self,
groupid: usize,
gradients: &[Array<A, D>],
) -> Result<Vec<Array<A, D>>>
fn step_group( &mut self, groupid: usize, gradients: &[Array<A, D>], ) -> Result<Vec<Array<A, D>>>
Step for a specific group
Source§impl<A: Float + ScalarOperand + Debug + Send + Sync, D: Dimension + Send + Sync> Optimizer<A, D> for GroupedAdam<A, D>
impl<A: Float + ScalarOperand + Debug + Send + Sync, D: Dimension + Send + Sync> Optimizer<A, D> for GroupedAdam<A, D>
Source§fn step(
&mut self,
params: &Array<A, D>,
gradients: &Array<A, D>,
) -> Result<Array<A, D>>
fn step( &mut self, params: &Array<A, D>, gradients: &Array<A, D>, ) -> Result<Array<A, D>>
Updates parameters using the given gradients Read more
Source§fn get_learning_rate(&self) -> A
fn get_learning_rate(&self) -> A
Gets the current learning rate
Source§fn set_learning_rate(&mut self, learning_rate: A)
fn set_learning_rate(&mut self, learning_rate: A)
Sets a new learning rate
Auto Trait Implementations§
impl<A, D> Freeze for GroupedAdam<A, D>where
A: Freeze,
impl<A, D> RefUnwindSafe for GroupedAdam<A, D>where
A: RefUnwindSafe,
D: RefUnwindSafe,
impl<A, D> Send for GroupedAdam<A, D>
impl<A, D> Sync for GroupedAdam<A, D>
impl<A, D> Unpin for GroupedAdam<A, D>
impl<A, D> UnwindSafe for GroupedAdam<A, D>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.