#![warn(clippy::all)]
#![allow(clippy::cast_possible_truncation)]
#![allow(clippy::cast_sign_loss)]
#![allow(clippy::cast_precision_loss)]
#![allow(clippy::cast_possible_wrap)]
#![allow(clippy::missing_errors_doc)]
#![allow(clippy::missing_panics_doc)]
#![allow(clippy::must_use_candidate)]
#![allow(clippy::module_name_repetitions)]
#![allow(clippy::similar_names)]
#![allow(clippy::doc_markdown)]
#![allow(clippy::uninlined_format_args)]
#[cfg(feature = "core")]
pub use axonml_core as core;
#[cfg(feature = "core")]
pub use axonml_tensor as tensor;
#[cfg(feature = "core")]
pub use axonml_autograd as autograd;
#[cfg(feature = "nn")]
pub use axonml_nn as nn;
#[cfg(feature = "nn")]
pub use axonml_optim as optim;
#[cfg(feature = "data")]
pub use axonml_data as data;
#[cfg(feature = "vision")]
pub use axonml_vision as vision;
#[cfg(feature = "text")]
pub use axonml_text as text;
#[cfg(feature = "audio")]
pub use axonml_audio as audio;
#[cfg(feature = "distributed")]
pub use axonml_distributed as distributed;
#[cfg(feature = "profile")]
pub use axonml_profile as profile;
#[cfg(feature = "llm")]
pub use axonml_llm as llm;
#[cfg(feature = "jit")]
pub use axonml_jit as jit;
#[cfg(feature = "onnx")]
pub use axonml_onnx as onnx;
#[cfg(feature = "serialize")]
pub use axonml_serialize as serialize;
#[cfg(feature = "quant")]
pub use axonml_quant as quant;
#[cfg(feature = "fusion")]
pub use axonml_fusion as fusion;
#[cfg(feature = "hvac")]
pub use axonml_hvac as hvac;
#[cfg(feature = "train")]
pub use axonml_train as train;
pub mod monitor;
pub use monitor::TrainingMonitor;
pub mod prelude {
#[cfg(feature = "core")]
pub use axonml_core::{DType, Device, Error, Result};
#[cfg(feature = "core")]
pub use axonml_tensor::Tensor;
#[cfg(feature = "core")]
pub use axonml_autograd::{Variable, no_grad};
#[cfg(feature = "nn")]
pub use axonml_nn::{
AvgPool2d, BCELoss, BatchNorm1d, BatchNorm2d, Conv2d, CrossEntropyLoss, Dropout, Embedding,
GELU, GRU, L1Loss, LSTM, LayerNorm, LeakyReLU, Linear, MSELoss, MaxPool2d, Module,
MultiHeadAttention, Parameter, RNN, ReLU, Sequential, SiLU, Sigmoid, Softmax, Tanh,
};
#[cfg(feature = "nn")]
pub use axonml_optim::{
Adam, AdamW, CosineAnnealingLR, ExponentialLR, LRScheduler, Optimizer, RMSprop, SGD, StepLR,
};
#[cfg(feature = "data")]
pub use axonml_data::{DataLoader, Dataset, RandomSampler, SequentialSampler, Transform};
#[cfg(feature = "vision")]
pub use axonml_vision::{
CenterCrop, ImageNormalize, LeNet, RandomHorizontalFlip, Resize, SimpleCNN, SyntheticCIFAR,
SyntheticMNIST,
};
#[cfg(feature = "text")]
pub use axonml_text::{
BasicBPETokenizer, CharTokenizer, LanguageModelDataset, SyntheticSentimentDataset,
TextDataset, Tokenizer, Vocab, WhitespaceTokenizer,
};
#[cfg(feature = "audio")]
pub use axonml_audio::{
AddNoise, MFCC, MelSpectrogram, NormalizeAudio, Resample, SyntheticCommandDataset,
SyntheticMusicDataset,
};
#[cfg(feature = "distributed")]
pub use axonml_distributed::{
DDP, DistributedDataParallel, ProcessGroup, World, all_reduce_mean, all_reduce_sum,
barrier, broadcast,
};
#[cfg(feature = "profile")]
pub use axonml_profile::{
Bottleneck, BottleneckAnalyzer, ComputeProfiler, MemoryProfiler, ProfileGuard,
ProfileReport, Profiler, TimelineProfiler,
};
#[cfg(feature = "llm")]
pub use axonml_llm::{
Bert, BertConfig, BertForMaskedLM, BertForSequenceClassification, GPT2, GPT2Config,
GPT2LMHead, GenerationConfig, TextGenerator,
};
#[cfg(feature = "train")]
pub use axonml_train::{
AdversarialTrainer, Callback, EarlyStopping, ProgressLogger, TrainingConfig,
TrainingHistory, TrainingMetrics,
};
#[cfg(feature = "jit")]
pub use axonml_jit::{
CompiledFunction, Graph, JitCompiler, Optimizer as JitOptimizer, TracedValue, trace,
};
}
#[must_use]
pub fn version() -> &'static str {
env!("CARGO_PKG_VERSION")
}
#[must_use]
pub fn features() -> String {
let mut features = Vec::new();
#[cfg(feature = "core")]
features.push("core");
#[cfg(feature = "nn")]
features.push("nn");
#[cfg(feature = "data")]
features.push("data");
#[cfg(feature = "vision")]
features.push("vision");
#[cfg(feature = "text")]
features.push("text");
#[cfg(feature = "audio")]
features.push("audio");
#[cfg(feature = "distributed")]
features.push("distributed");
#[cfg(feature = "profile")]
features.push("profile");
#[cfg(feature = "llm")]
features.push("llm");
#[cfg(feature = "jit")]
features.push("jit");
#[cfg(feature = "onnx")]
features.push("onnx");
#[cfg(feature = "serialize")]
features.push("serialize");
#[cfg(feature = "quant")]
features.push("quant");
#[cfg(feature = "fusion")]
features.push("fusion");
#[cfg(feature = "hvac")]
features.push("hvac");
#[cfg(feature = "train")]
features.push("train");
if features.is_empty() {
"none".to_string()
} else {
features.join(", ")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_version() {
let v = version();
assert!(!v.is_empty());
}
#[test]
fn test_features() {
let f = features();
assert!(f.contains("core"));
}
#[cfg(feature = "core")]
#[test]
fn test_tensor_creation() {
use tensor::Tensor;
let t = Tensor::from_vec(vec![1.0, 2.0, 3.0, 4.0], &[2, 2]).unwrap();
assert_eq!(t.shape(), &[2, 2]);
}
#[cfg(feature = "core")]
#[test]
fn test_variable_creation() {
use autograd::Variable;
use tensor::Tensor;
let t = Tensor::from_vec(vec![1.0, 2.0, 3.0], &[3]).unwrap();
let v = Variable::new(t, true);
assert_eq!(v.data().shape(), &[3]);
}
#[cfg(feature = "nn")]
#[test]
fn test_linear_layer() {
use autograd::Variable;
use nn::Linear;
use nn::Module;
use tensor::Tensor;
let layer = Linear::new(4, 2);
let input = Variable::new(Tensor::from_vec(vec![1.0; 4], &[1, 4]).unwrap(), false);
let output = layer.forward(&input);
assert_eq!(output.data().shape(), &[1, 2]);
}
}