Skip to main content

yscv_optim/
error.rs

1use thiserror::Error;
2use yscv_autograd::AutogradError;
3use yscv_tensor::TensorError;
4
5/// Errors returned by optimizer configuration and update steps.
6#[derive(Debug, Clone, PartialEq, Error)]
7pub enum OptimError {
8    #[error("invalid learning rate: {lr}; expected finite lr >= 0")]
9    InvalidLearningRate { lr: f32 },
10    #[error("invalid momentum: {momentum}; expected finite momentum in [0, 1)")]
11    InvalidMomentum { momentum: f32 },
12    #[error("invalid beta1: {beta1}; expected finite beta1 in [0, 1)")]
13    InvalidBeta1 { beta1: f32 },
14    #[error("invalid beta2: {beta2}; expected finite beta2 in [0, 1)")]
15    InvalidBeta2 { beta2: f32 },
16    #[error("invalid epsilon: {epsilon}; expected finite epsilon > 0")]
17    InvalidEpsilon { epsilon: f32 },
18    #[error("invalid rmsprop alpha: {alpha}; expected finite alpha in [0, 1)")]
19    InvalidRmsPropAlpha { alpha: f32 },
20    #[error("invalid step scheduler gamma: {gamma}; expected finite gamma in (0, 1]")]
21    InvalidStepGamma { gamma: f32 },
22    #[error("invalid step scheduler step_size: {step_size}; expected step_size > 0")]
23    InvalidStepSize { step_size: usize },
24    #[error("invalid cosine scheduler t_max: {t_max}; expected t_max > 0")]
25    InvalidCosineTMax { t_max: usize },
26    #[error("invalid warmup scheduler warmup_steps: {warmup_steps}; expected warmup_steps > 0")]
27    InvalidWarmupSteps { warmup_steps: usize },
28    #[error("invalid one-cycle total_steps: {total_steps}; expected total_steps > 0")]
29    InvalidOneCycleTotalSteps { total_steps: usize },
30    #[error("invalid one-cycle pct_start: {pct_start}; expected finite pct_start in (0, 1]")]
31    InvalidOneCyclePctStart { pct_start: f32 },
32    #[error(
33        "invalid one-cycle final_div_factor: {final_div_factor}; expected finite final_div_factor > 1"
34    )]
35    InvalidOneCycleFinalDivFactor { final_div_factor: f32 },
36    #[error(
37        "scheduler start lr exceeds base lr: start_lr={start_lr}, base_lr={base_lr}; expected start_lr <= base_lr"
38    )]
39    SchedulerStartLrExceedsBase { start_lr: f32, base_lr: f32 },
40    #[error(
41        "scheduler max lr below initial lr: max_lr={max_lr}, initial_lr={initial_lr}; expected max_lr >= initial_lr"
42    )]
43    SchedulerMaxLrBelowInitial { max_lr: f32, initial_lr: f32 },
44    #[error(
45        "scheduler min_lr exceeds base lr: min_lr={min_lr}, base_lr={base_lr}; expected min_lr <= base_lr"
46    )]
47    SchedulerMinLrExceedsBase { min_lr: f32, base_lr: f32 },
48    #[error("invalid dampening: {dampening}; expected finite dampening in [0, 1]")]
49    InvalidDampening { dampening: f32 },
50    #[error("invalid weight_decay: {weight_decay}; expected finite weight_decay >= 0")]
51    InvalidWeightDecay { weight_decay: f32 },
52    #[error("nesterov momentum requires momentum > 0")]
53    NesterovRequiresMomentum,
54    #[error("optimizer shape mismatch: weights={weights:?}, grad={grad:?}")]
55    ShapeMismatch {
56        weights: Vec<usize>,
57        grad: Vec<usize>,
58    },
59    #[error("missing gradient for node {node}; call backward() first")]
60    MissingGradient { node: usize },
61    #[error(transparent)]
62    Tensor(#[from] TensorError),
63    #[error(transparent)]
64    Autograd(#[from] AutogradError),
65}