Skip to main content

tensorlogic_train/lora/
mod.rs

1//! LoRA (Low-Rank Adaptation) for parameter-efficient fine-tuning.
2//!
3//! Implements Hu et al. (2021): weight updates are decomposed as
4//! `dW = B @ A` where `B in R^{d x r}`, `A in R^{r x k}`, and
5//! `r << min(d, k)`, drastically reducing trainable parameter count.
6
7pub mod adapter;
8pub mod config;
9pub mod error;
10pub mod layer;
11
12#[cfg(test)]
13mod tests;
14
15pub use adapter::{LayerStats, LoraAdapter, LoraAdapterSummary};
16pub use config::LoraConfig;
17pub use error::{LoraError, LoraResult};
18pub use layer::LoraLayer;