//! LoRA (Low-Rank Adaptation) for parameter-efficient fine-tuning.
//!
//! Implements Hu et al. (2021): weight updates are decomposed as
//! `dW = B @ A` where `B in R^{d x r}`, `A in R^{r x k}`, and
//! `r << min(d, k)`, drastically reducing trainable parameter count.
pub use ;
pub use LoraConfig;
pub use ;
pub use LoraLayer;