Skip to main content

axonml_autograd/
lib.rs

1//! Axonml Autograd - Automatic Differentiation Engine
2//!
3//! Provides reverse-mode automatic differentiation for computing gradients
4//! of tensor operations. This is the foundation for training neural networks
5//! using gradient descent optimization.
6//!
7//! # Key Features
8//!
9//! - **Dynamic Computational Graph** - Build graph during forward pass
10//! - **Reverse-mode Autodiff** - Efficient backpropagation
11//! - **Gradient Accumulation** - Support for gradient accumulation across batches
12//! - **No-grad Context** - Disable gradient tracking for inference
13//! - **Automatic Mixed Precision (AMP)** - F16 autocast for faster training
14//! - **Gradient Checkpointing** - Trade compute for memory on large models
15//!
16//! # Basic Example
17//!
18//! ```rust,ignore
19//! use axonml_autograd::{Variable, no_grad};
20//!
21//! // Create variables with gradient tracking
22//! let x = Variable::new(tensor, true);  // requires_grad = true
23//! let w = Variable::new(weights, true);
24//!
25//! // Forward pass builds computational graph
26//! let y = x.matmul(&w);
27//! let loss = y.mse_loss(&target);
28//!
29//! // Backward pass computes gradients
30//! loss.backward();
31//!
32//! // Access gradients
33//! println!("dL/dw = {:?}", w.grad());
34//! ```
35//!
36//! # Mixed Precision Training
37//!
38//! ```rust,ignore
39//! use axonml_autograd::amp::{autocast, AutocastGuard};
40//! use axonml_core::DType;
41//!
42//! // Enable F16 autocast for forward pass
43//! let output = autocast(DType::F16, || {
44//!     model.forward(&input)
45//! });
46//!
47//! // Or use RAII guard
48//! {
49//!     let _guard = AutocastGuard::new(DType::F16);
50//!     let output = model.forward(&input);
51//! }
52//! ```
53//!
54//! # Gradient Checkpointing
55//!
56//! ```rust,ignore
57//! use axonml_autograd::checkpoint::{checkpoint, checkpoint_sequential};
58//!
59//! // Checkpoint a single function - recomputes during backward
60//! let output = checkpoint(|x| heavy_computation(x), &input);
61//!
62//! // Checkpoint sequential layers in segments
63//! let output = checkpoint_sequential(24, 4, &input, |layer_idx, x| {
64//!     layers[layer_idx].forward(x)
65//! });
66//! ```
67//!
68//! @version 0.2.6
69//! @author `AutomataNexus` Development Team
70
71#![warn(missing_docs)]
72#![warn(clippy::all)]
73#![warn(clippy::pedantic)]
74// ML/tensor-specific allowances
75#![allow(clippy::cast_possible_truncation)]
76#![allow(clippy::cast_sign_loss)]
77#![allow(clippy::cast_precision_loss)]
78#![allow(clippy::cast_possible_wrap)]
79#![allow(clippy::missing_errors_doc)]
80#![allow(clippy::missing_panics_doc)]
81#![allow(clippy::must_use_candidate)]
82#![allow(clippy::module_name_repetitions)]
83#![allow(clippy::similar_names)]
84#![allow(clippy::many_single_char_names)]
85#![allow(clippy::too_many_arguments)]
86#![allow(clippy::doc_markdown)]
87#![allow(clippy::cast_lossless)]
88#![allow(clippy::needless_pass_by_value)]
89#![allow(clippy::redundant_closure_for_method_calls)]
90#![allow(clippy::uninlined_format_args)]
91#![allow(clippy::ptr_arg)]
92#![allow(clippy::return_self_not_must_use)]
93#![allow(clippy::not_unsafe_ptr_arg_deref)]
94#![allow(clippy::items_after_statements)]
95#![allow(clippy::unreadable_literal)]
96#![allow(clippy::if_same_then_else)]
97#![allow(clippy::needless_range_loop)]
98#![allow(clippy::trivially_copy_pass_by_ref)]
99#![allow(clippy::unnecessary_wraps)]
100#![allow(clippy::match_same_arms)]
101#![allow(clippy::unused_self)]
102#![allow(clippy::too_many_lines)]
103#![allow(clippy::single_match_else)]
104#![allow(clippy::fn_params_excessive_bools)]
105#![allow(clippy::struct_excessive_bools)]
106#![allow(clippy::format_push_string)]
107#![allow(clippy::erasing_op)]
108#![allow(clippy::type_repetition_in_bounds)]
109#![allow(clippy::iter_without_into_iter)]
110#![allow(clippy::should_implement_trait)]
111#![allow(clippy::use_debug)]
112#![allow(clippy::case_sensitive_file_extension_comparisons)]
113#![allow(clippy::large_enum_variant)]
114#![allow(clippy::panic)]
115#![allow(clippy::struct_field_names)]
116#![allow(clippy::missing_fields_in_debug)]
117#![allow(clippy::upper_case_acronyms)]
118#![allow(clippy::assigning_clones)]
119#![allow(clippy::option_if_let_else)]
120#![allow(clippy::manual_let_else)]
121#![allow(clippy::explicit_iter_loop)]
122#![allow(clippy::default_trait_access)]
123#![allow(clippy::only_used_in_recursion)]
124#![allow(clippy::manual_clamp)]
125#![allow(clippy::ref_option)]
126#![allow(clippy::multiple_bound_locations)]
127#![allow(clippy::comparison_chain)]
128#![allow(clippy::manual_assert)]
129#![allow(clippy::unnecessary_debug_formatting)]
130
131// =============================================================================
132// Modules
133// =============================================================================
134
135pub mod amp;
136pub mod backward;
137pub mod checkpoint;
138pub mod functions;
139pub mod grad_fn;
140pub mod graph;
141pub mod no_grad;
142pub mod variable;
143
144// =============================================================================
145// Re-exports
146// =============================================================================
147
148pub use amp::{autocast, autocast_dtype, disable_autocast, is_autocast_enabled, AutocastGuard, AutocastPolicy};
149pub use backward::backward;
150pub use checkpoint::{checkpoint, checkpoint_sequential};
151pub use grad_fn::{GradFn, GradientFunction};
152pub use graph::{ComputationGraph, GraphNode};
153pub use no_grad::{no_grad, NoGradGuard};
154pub use variable::Variable;
155
156// =============================================================================
157// Prelude
158// =============================================================================
159
160/// Convenient imports for common autograd usage.
161pub mod prelude {
162    pub use crate::backward::backward;
163    pub use crate::no_grad::{no_grad, NoGradGuard};
164    pub use crate::variable::Variable;
165}