quantize_rs/errors.rs
1//! Typed error handling for the quantize-rs library.
2//!
3//! All public API functions return [`Result<T>`](type@Result), which uses
4//! [`QuantizeError`] as the error type. The CLI binary converts these into
5//! `anyhow::Error` automatically via the blanket `From<E: std::error::Error>`
6//! impl, so callers that prefer `anyhow` can use `?` without `.map_err()`.
7
8use std::fmt;
9use std::path::PathBuf;
10
11/// Result type alias used throughout the quantize-rs public API.
12pub type Result<T> = std::result::Result<T, QuantizeError>;
13
14/// Errors produced by the quantize-rs library.
15///
16/// Each variant covers a distinct failure category. The `reason` field
17/// carries a human-readable explanation suitable for display.
18#[derive(Debug)]
19pub enum QuantizeError {
20 /// Empty tensor, shape mismatch, per-channel on a scalar, etc.
21 InvalidTensor {
22 /// What went wrong.
23 reason: String,
24 },
25
26 /// Unsupported quantization configuration (e.g. bits != 4 or 8).
27 UnsupportedConfig {
28 /// What went wrong.
29 reason: String,
30 },
31
32 /// Failed to load an ONNX model from disk.
33 ModelLoad {
34 /// Path that was being loaded.
35 path: PathBuf,
36 /// What went wrong.
37 reason: String,
38 },
39
40 /// Failed to save a quantized ONNX model to disk.
41 ModelSave {
42 /// Path that was being written.
43 path: PathBuf,
44 /// What went wrong.
45 reason: String,
46 },
47
48 /// Error during QDQ graph transformation (weight not found, size mismatch, etc.).
49 GraphTransform {
50 /// What went wrong.
51 reason: String,
52 },
53
54 /// Error during calibration (invalid dataset, inference failure, etc.).
55 Calibration {
56 /// What went wrong.
57 reason: String,
58 },
59
60 /// Configuration file parsing or validation error.
61 Config {
62 /// What went wrong.
63 reason: String,
64 },
65}
66
67impl fmt::Display for QuantizeError {
68 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
69 match self {
70 QuantizeError::InvalidTensor { reason } => {
71 write!(f, "invalid tensor: {reason}")
72 }
73 QuantizeError::UnsupportedConfig { reason } => {
74 write!(f, "unsupported config: {reason}")
75 }
76 QuantizeError::ModelLoad { path, reason } => {
77 write!(f, "failed to load model '{}': {reason}", path.display())
78 }
79 QuantizeError::ModelSave { path, reason } => {
80 write!(f, "failed to save model '{}': {reason}", path.display())
81 }
82 QuantizeError::GraphTransform { reason } => {
83 write!(f, "graph transform error: {reason}")
84 }
85 QuantizeError::Calibration { reason } => {
86 write!(f, "calibration error: {reason}")
87 }
88 QuantizeError::Config { reason } => {
89 write!(f, "config error: {reason}")
90 }
91 }
92 }
93}
94
95impl std::error::Error for QuantizeError {}