use thiserror::Error;
#[derive(Error, Debug)]
pub enum TurboQuantError {
#[error("Dimension must be positive, got {0}")]
InvalidDimension(usize),
#[error("Bit width must be between 1 and 8, got {0}")]
InvalidBitWidth(u8),
#[error("Bit width mismatch: expected {expected}, got {got}")]
BitWidthMismatch { expected: u8, got: u8 },
#[error("Vector must be on unit sphere (norm ≈ 1), got norm = {0}")]
NotUnitVector(f64),
#[error("Cannot normalize zero vector (norm = {0})")]
ZeroVector(f64),
#[error("Dimension mismatch: expected {expected}, got {got}")]
DimensionMismatch { expected: usize, got: usize },
#[error("{context} length mismatch: expected {expected}, got {got}")]
LengthMismatch {
context: String,
expected: usize,
got: usize,
},
#[error(
"Invalid quantization index {index}; maximum allowed for {bit_width}-bit quantization is {max}"
)]
InvalidQuantizationIndex { index: u8, max: u8, bit_width: u8 },
#[error("Invalid numeric value for {context}: {value}")]
InvalidValue { context: String, value: f64 },
#[error("Codebook not initialized for bit width {0}")]
CodebookNotInitialized(u8),
#[error("Unsupported execution backend: {0}")]
UnsupportedBackend(String),
#[error("I/O error: {0}")]
Io(String),
#[error("Trace format error: {0}")]
TraceFormat(String),
#[error("Model config error: {0}")]
ModelConfig(String),
#[error("Model format error: {0}")]
ModelFormat(String),
#[error("Tokenizer error: {0}")]
Tokenizer(String),
#[error("ONNX runtime error: {0}")]
Onnx(String),
#[error("Unsupported model: {0}")]
UnsupportedModel(String),
#[error("Internal error: {0}")]
Internal(String),
}
pub type Result<T> = std::result::Result<T, TurboQuantError>;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_error_display() {
let e = TurboQuantError::InvalidDimension(0);
assert!(e.to_string().contains("0"));
let e = TurboQuantError::InvalidBitWidth(9);
assert!(e.to_string().contains("9"));
let e = TurboQuantError::BitWidthMismatch {
expected: 4,
got: 3,
};
assert!(e.to_string().contains("4"));
assert!(e.to_string().contains("3"));
let e = TurboQuantError::NotUnitVector(2.5);
assert!(e.to_string().contains("2.5"));
let e = TurboQuantError::ZeroVector(0.0);
assert!(e.to_string().contains("0"));
let e = TurboQuantError::DimensionMismatch {
expected: 64,
got: 128,
};
assert!(e.to_string().contains("64"));
assert!(e.to_string().contains("128"));
let e = TurboQuantError::LengthMismatch {
context: "test payload".into(),
expected: 64,
got: 63,
};
assert!(e.to_string().contains("test payload"));
assert!(e.to_string().contains("64"));
assert!(e.to_string().contains("63"));
let e = TurboQuantError::InvalidQuantizationIndex {
index: 9,
max: 7,
bit_width: 3,
};
assert!(e.to_string().contains("9"));
assert!(e.to_string().contains("7"));
assert!(e.to_string().contains("3"));
let e = TurboQuantError::InvalidValue {
context: "temperature".into(),
value: f64::NAN,
};
assert!(e.to_string().contains("temperature"));
let e = TurboQuantError::CodebookNotInitialized(4);
assert!(e.to_string().contains("4"));
let e = TurboQuantError::UnsupportedBackend("wgpu prod path".into());
assert!(e.to_string().contains("wgpu"));
let e = TurboQuantError::Io("permission denied".into());
assert!(e.to_string().contains("permission denied"));
let e = TurboQuantError::TraceFormat("missing queries tensor".into());
assert!(e.to_string().contains("queries"));
let e = TurboQuantError::ModelConfig("missing config.json".into());
assert!(e.to_string().contains("config.json"));
let e = TurboQuantError::ModelFormat("unexpected cache tensor shape".into());
assert!(e.to_string().contains("cache tensor"));
let e = TurboQuantError::Tokenizer("invalid tokenizer.json".into());
assert!(e.to_string().contains("tokenizer"));
let e = TurboQuantError::Onnx("missing logits output".into());
assert!(e.to_string().contains("logits"));
let e = TurboQuantError::UnsupportedModel("requires unsupported extra input".into());
assert!(e.to_string().contains("unsupported"));
let e = TurboQuantError::Internal("test message".into());
assert!(e.to_string().contains("test message"));
}
}