Skip to main content

axolotl_rs/
lib.rs

1//! # axolotl-rs
2//!
3//! YAML-driven configurable fine-tuning toolkit for LLMs.
4//!
5//! This crate provides a user-friendly interface for fine-tuning language models,
6//! similar to the Python Axolotl project but in pure Rust.
7//!
8//! ## Features
9//!
10//! - **YAML Configuration** - Define entire training runs in simple config files
11//! - **Multiple Adapters** - Support for `LoRA`, `QLoRA`, full fine-tuning
12//! - **Dataset Handling** - Automatic loading and preprocessing
13//! - **Multi-GPU** - Distributed training support (planned)
14//!
15//! ## Quick Start (CLI)
16//!
17//! ```bash
18//! # Validate configuration
19//! axolotl validate config.yaml
20//!
21//! # Start training
22//! axolotl train config.yaml
23//!
24//! # Merge adapters
25//! axolotl merge --config config.yaml --output ./merged-model
26//! ```
27//!
28//! ## Quick Start (Library)
29//!
30//! ```no_run
31//! use axolotl_rs::{AxolotlConfig, Trainer};
32//!
33//! # fn main() -> axolotl_rs::Result<()> {
34//! // Load configuration from YAML file
35//! let config = AxolotlConfig::from_file("config.yaml")?;
36//!
37//! // Create trainer and start training
38//! let mut trainer = Trainer::new(config)?;
39//! trainer.train()?;
40//! # Ok(())
41//! # }
42//! ```
43//!
44//! ## Using Presets
45//!
46//! ```rust
47//! use axolotl_rs::AxolotlConfig;
48//!
49//! # fn main() -> axolotl_rs::Result<()> {
50//! // Create mutable config from preset
51//! let mut config = AxolotlConfig::from_preset("llama2-7b")?;
52//!
53//! // Customize as needed
54//! config.training.epochs = 5;
55//! config.training.learning_rate = 1e-4;
56//! # Ok(())
57//! # }
58//! ```
59//!
60//! ## Building Custom Configurations
61//!
62//! ```rust
63//! use axolotl_rs::{AxolotlConfig, TrainingConfig};
64//! use axolotl_rs::config::{AdapterType, LoraSettings, DatasetConfig};
65//!
66//! let config = AxolotlConfig {
67//!     base_model: "meta-llama/Llama-2-7b-hf".to_string(),
68//!     adapter: AdapterType::Lora,
69//!     lora: LoraSettings {
70//!         r: 64,
71//!         alpha: 16,
72//!         ..Default::default()
73//!     },
74//!     quantization: None,
75//!     dataset: DatasetConfig {
76//!         path: "./data/train.jsonl".to_string(),
77//!         ..Default::default()
78//!     },
79//!     training: TrainingConfig {
80//!         epochs: 3,
81//!         learning_rate: 2e-4,
82//!         ..Default::default()
83//!     },
84//!     output_dir: "./outputs".to_string(),
85//!     seed: 42,
86//! };
87//! ```
88
89#![warn(missing_docs)]
90#![warn(clippy::pedantic)]
91
92pub mod adapters;
93pub mod cli;
94pub mod config;
95pub mod dataset;
96pub mod error;
97#[cfg(feature = "peft")]
98pub mod llama_common;
99#[cfg(feature = "peft")]
100pub mod lora_llama;
101pub mod model;
102pub mod normalization;
103pub mod optimizer;
104#[cfg(all(feature = "peft", feature = "qlora"))]
105pub mod qlora_llama;
106pub mod scheduler;
107pub mod trainer;
108#[cfg(feature = "vsa-optim")]
109pub mod vsa_accel;
110
111// Mock modules for testing without external dependencies
112#[cfg(any(
113    feature = "mock-peft",
114    feature = "mock-qlora",
115    feature = "mock-unsloth"
116))]
117pub mod mocks;
118
119pub use config::{AxolotlConfig, TrainingConfig};
120pub use error::{AxolotlError, Result};
121pub use trainer::Trainer;
122
123#[cfg(feature = "vsa-optim")]
124pub use vsa_accel::{VSAAccelerator, VSAAcceleratorConfig, VSAStats, VSAStepInfo};