1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
// SPDX-License-Identifier: MIT
// Copyright 2026 Tyler Zervas
//! Unified re-exports from the rust-ai ecosystem crates.
//!
//! This module provides convenient access to all rust-ai ecosystem crates
//! through a single import path. All crates are always available as they
//! are required dependencies of rust-ai-core.
//!
//! ## Available Modules
//!
//! | Module | Crate | Description |
//! |--------|-------|-------------|
//! | [`peft`] | peft-rs | LoRA, DoRA, AdaLoRA adapters |
//! | [`qlora`] | qlora-rs | 4-bit quantized fine-tuning |
//! | [`unsloth`] | unsloth-rs | Optimized transformer blocks |
//! | [`axolotl`] | axolotl-rs | Fine-tuning orchestration |
//! | [`bitnet`] | bitnet-quantize | BitNet 1.58-bit quantization |
//! | [`trit`] | trit-vsa | Ternary VSA operations |
//! | [`vsa_optim`] | vsa-optim-rs | VSA-based optimization |
//! | [`tritter`] | tritter-accel | Ternary GPU acceleration |
//!
//! ## Usage
//!
//! ```rust,ignore
//! use rust_ai_core::ecosystem::peft::{LoraConfig, LoraLinear};
//! use rust_ai_core::ecosystem::qlora::QLoraConfig;
//! use rust_ai_core::ecosystem::bitnet::TernaryLinear;
//! ```
//!
//! Or use the top-level facade for common operations:
//!
//! ```rust,ignore
//! use rust_ai_core::RustAI;
//!
//! let ai = RustAI::new(RustAIConfig::default())?;
//! let config = ai.finetune()
//! .model("meta-llama/Llama-2-7b")
//! .rank(64)
//! .build()?;
//! ```
// =============================================================================
// PEFT (Parameter-Efficient Fine-Tuning)
// =============================================================================
/// LoRA, DoRA, and AdaLoRA adapter implementations.
///
/// Re-exports from `peft-rs` crate.
///
/// ## Key Types
///
/// - `LoraConfig` - Configuration for LoRA adapters
/// - `LoraLinear` - LoRA-wrapped linear layer
/// - `DoraConfig` - Configuration for DoRA (Weight-Decomposed LoRA)
/// - `AdaLoraConfig` - Configuration for AdaLoRA (Adaptive Budget)
///
/// ## Example
///
/// ```rust,ignore
/// use rust_ai_core::ecosystem::peft::{LoraConfig, LoraLinear};
///
/// let config = LoraConfig::new(64, 16.0); // rank=64, alpha=16.0
/// let lora_layer = LoraLinear::new(base_linear, &config)?;
/// ```
// =============================================================================
// QLoRA (Quantized LoRA)
// =============================================================================
/// 4-bit quantized LoRA for memory-efficient fine-tuning.
///
/// Re-exports from `qlora-rs` crate.
///
/// ## Key Types
///
/// - `QLoraConfig` - Combined quantization and LoRA configuration
/// - `Nf4Quantizer` - NF4 (Normal Float 4-bit) quantizer
/// - `QuantizedLinear` - Quantized linear layer with LoRA
///
/// ## Example
///
/// ```rust,ignore
/// use rust_ai_core::ecosystem::qlora::{QLoraConfig, QuantizedLinear};
///
/// let config = QLoraConfig::default()
/// .with_lora_rank(32)
/// .with_bits(4);
/// let qlora_layer = QuantizedLinear::new(weights, &config)?;
/// ```
// =============================================================================
// Unsloth (Optimized Transformers)
// =============================================================================
/// Optimized transformer building blocks.
///
/// Re-exports from `unsloth-rs` crate.
///
/// ## Key Types
///
/// - `FlashAttention` - Memory-efficient attention implementation
/// - `SwiGLU` - SwiGLU activation (used in Llama models)
/// - `RMSNorm` - Root Mean Square layer normalization
///
/// ## Example
///
/// ```rust,ignore
/// use rust_ai_core::ecosystem::unsloth::{FlashAttention, AttentionConfig};
///
/// let attn = FlashAttention::new(&config, device)?;
/// let output = attn.forward(&q, &k, &v, mask)?;
/// ```
// =============================================================================
// Axolotl (Fine-Tuning Orchestration)
// =============================================================================
/// YAML-driven fine-tuning configuration and orchestration.
///
/// Re-exports from `axolotl-rs` crate.
///
/// ## Key Types
///
/// - `AxolotlConfig` - Main configuration struct (loadable from YAML)
/// - `TrainingPipeline` - Orchestrates the training workflow
/// - `DatasetConfig` - Dataset loading and preprocessing configuration
///
/// ## Example
///
/// ```rust,ignore
/// use rust_ai_core::ecosystem::axolotl::{AxolotlConfig, TrainingPipeline};
///
/// let config = AxolotlConfig::from_yaml("config.yaml")?;
/// let pipeline = TrainingPipeline::new(config)?;
/// pipeline.run()?;
/// ```
// =============================================================================
// BitNet (1.58-bit Quantization)
// =============================================================================
/// Microsoft BitNet b1.58 quantization and inference.
///
/// Re-exports from `bitnet-quantize` crate.
///
/// ## Key Types
///
/// - `BitNetConfig` - Configuration for BitNet quantization
/// - `TernaryLinear` - Linear layer with ternary weights (-1, 0, +1)
/// - `BitNetQuantizer` - Quantizes weights to 1.58-bit representation
///
/// ## Example
///
/// ```rust,ignore
/// use rust_ai_core::ecosystem::bitnet::{BitNetConfig, TernaryLinear};
///
/// let config = BitNetConfig::default();
/// let ternary_layer = TernaryLinear::from_linear(linear, &config)?;
/// ```
// =============================================================================
// Trit-VSA (Ternary Vector Symbolic Architectures)
// =============================================================================
/// Balanced ternary arithmetic with bitsliced storage.
///
/// Re-exports from `trit-vsa` crate.
///
/// ## Key Types
///
/// - `TritVector` - Balanced ternary vector (-1, 0, +1)
/// - `TritSlice` - Bitsliced storage for efficient operations
/// - `HdcEncoder` - Hyperdimensional computing encoder
///
/// ## Example
///
/// ```rust,ignore
/// use rust_ai_core::ecosystem::trit::{TritVector, TritOps};
///
/// let a = TritVector::random(10000);
/// let b = TritVector::random(10000);
/// let bound = a.bind(&b); // Multiplication in VSA
/// ```
// =============================================================================
// VSA-Optim (VSA-Based Optimization)
// =============================================================================
/// Deterministic training optimization using VSA compression.
///
/// Re-exports from `vsa-optim-rs` crate.
///
/// ## Key Types
///
/// - `VsaOptimizer` - VSA-based optimizer with gradient prediction
/// - `CompressionConfig` - Configuration for gradient compression
/// - `GradientPredictor` - Closed-form gradient prediction
///
/// ## Example
///
/// ```rust,ignore
/// use rust_ai_core::ecosystem::vsa_optim::{VsaOptimizer, VsaConfig};
///
/// let config = VsaConfig::default()
/// .with_dimension(10000)
/// .with_compression_ratio(0.1);
/// let optimizer = VsaOptimizer::new(model.parameters(), config)?;
/// ```
// =============================================================================
// Tritter-Accel (Ternary GPU Acceleration)
// =============================================================================
/// GPU-accelerated ternary operations for BitNet and VSA.
///
/// Re-exports from `tritter-accel` crate.
///
/// ## Key Types
///
/// - `TritterRuntime` - GPU runtime for ternary operations
/// - `TernaryMatmul` - Optimized ternary matrix multiplication
/// - `PackedTernary` - Memory-efficient ternary storage
///
/// ## Example
///
/// ```rust,ignore
/// use rust_ai_core::ecosystem::tritter::{TritterRuntime, TernaryMatmul};
///
/// let runtime = TritterRuntime::new(device)?;
/// let matmul = TernaryMatmul::new(&runtime);
/// let output = matmul.forward(&weights, &input)?;
/// ```
// =============================================================================
// ECOSYSTEM INFO
// =============================================================================
/// Information about the rust-ai ecosystem crates.
///
/// Provides version information and capability detection for all ecosystem crates.