quantize_rs/lib.rs
1//! Neural network quantization toolkit for ONNX models.
2//!
3//! `quantize-rs` converts FP32 ONNX model weights to INT8 or INT4,
4//! reducing model size by 4--8x with minimal accuracy loss. It supports
5//! per-tensor and per-channel quantization, calibration-based range
6//! optimization, and writes ONNX-Runtime-compatible QDQ models.
7//!
8//! # Modules
9//!
10//! - [`quantization`] -- core quantization logic (INT8/INT4, per-channel, packing)
11//! - [`onnx_utils`] -- ONNX model loading, weight extraction, QDQ save, validation
12//! - [`calibration`] -- (feature `calibration`) calibration datasets, activation-based inference, range methods
13//! - [`config`] -- YAML/TOML configuration file support
14//! - [`errors`] -- typed error enum ([`QuantizeError`]) for all public API functions
15//!
16//! # Feature flags
17//!
18//! - **`calibration`** *(default)* -- enables activation-based calibration (adds `tract-onnx`, `ndarray`)
19//! - **`python`** -- enables PyO3 bindings (`quantize_rs` Python module)
20
21pub mod errors;
22pub mod onnx_proto;
23pub mod onnx_utils;
24pub mod quantization;
25pub mod config;
26pub mod calibration;
27
28pub use errors::QuantizeError;
29pub use onnx_utils::{ModelInfo, OnnxModel, WeightTensor, QuantizedWeightInfo, ConnectivityReport};
30pub use onnx_utils::graph_builder::QdqWeightInput;
31pub use quantization::{Quantizer, QuantConfig, QuantParams, pack_int4, unpack_int4};
32pub use config::Config;
33pub use calibration::{CalibrationDataset, stats::ActivationStats};
34#[cfg(feature = "calibration")]
35pub use calibration::inference::ActivationEstimator;
36
37/// Library version string, read from `Cargo.toml` at compile time.
38pub const VERSION: &str = env!("CARGO_PKG_VERSION");
39
40#[cfg(test)]
41mod tests {
42 use super::*;
43
44 #[test]
45 fn test_version() {
46 assert!(!VERSION.is_empty());
47 }
48}
49
50#[cfg(feature = "python")]
51mod python;