1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
//! Optimization utilities for tensor operations
//!
//! This module provides optimization algorithms including line search methods,
//! gradient descent variants, quasi-Newton methods, and adaptive algorithm selection
//! commonly used in machine learning and numerical optimization.
//!
//! # Organization
//!
//! The optimization module is organized into focused sub-modules:
//!
//! - [`utilities`]: Basic tensor operations used by optimization algorithms
//! - [`line_search`]: Line search methods (backtracking, Wolfe conditions)
//! - [`mod@gradient_descent`]: Gradient descent variants (basic, momentum, Adam)
//! - [`quasi_newton`]: Quasi-Newton methods (L-BFGS)
//! - [`adaptive`]: Adaptive algorithm selection based on problem characteristics
//!
//! # Examples
//!
//! ```rust
//! use torsh_functional::optimization::*;
//! use torsh_tensor::Tensor;
//! use torsh_core::Result as TorshResult;
//!
//! fn example() -> Result<(), Box<dyn std::error::Error>> {
//! // Basic gradient descent
//! let objective = |x: &Tensor| -> TorshResult<f32> {
//! let data = x.data()?;
//! Ok(data[0].powi(2)) // f(x) = x²
//! };
//!
//! let gradient = |x: &Tensor| -> TorshResult<Tensor> {
//! let data = x.data()?;
//! Ok(Tensor::from_vec(vec![2.0 * data[0]], &[1])?) // f'(x) = 2x
//! };
//!
//! let x0 = Tensor::from_vec(vec![1.0], &[1])?;
//! let (x_opt, history) = gradient_descent(objective, gradient, &x0, None)?;
//! Ok(())
//! }
//! ```
// Re-export commonly used types and functions
pub use ;
pub use ;
pub use ;
pub use ;
pub use *;