Skip to main content

inference/
error.rs

1//! Error types for the inference engine.
2
3use thiserror::Error;
4
5/// Errors that can occur during inference operations.
6#[derive(Error, Debug)]
7pub enum InferenceError {
8    /// Model not found or failed to download
9    #[error("Model not found: {0}")]
10    ModelNotFound(String),
11
12    /// Failed to load model weights
13    #[error("Failed to load model: {0}")]
14    ModelLoadError(String),
15
16    /// Tokenization error
17    #[error("Tokenization failed: {0}")]
18    TokenizationError(String),
19
20    /// Inference/forward pass error
21    #[error("Inference failed: {0}")]
22    InferenceError(String),
23
24    /// Device not available (CUDA/Metal)
25    #[error("Device not available: {0}")]
26    DeviceNotAvailable(String),
27
28    /// Invalid input
29    #[error("Invalid input: {0}")]
30    InvalidInput(String),
31
32    /// IO error
33    #[error("IO error: {0}")]
34    IoError(#[from] std::io::Error),
35
36    /// Candle error
37    #[error("Candle error: {0}")]
38    CandleError(String),
39
40    /// HuggingFace Hub error
41    #[error("HuggingFace Hub error: {0}")]
42    HubError(String),
43}
44
45impl From<candle_core::Error> for InferenceError {
46    fn from(err: candle_core::Error) -> Self {
47        InferenceError::CandleError(err.to_string())
48    }
49}
50
51impl From<tokenizers::Error> for InferenceError {
52    fn from(err: tokenizers::Error) -> Self {
53        InferenceError::TokenizationError(err.to_string())
54    }
55}
56
57impl From<hf_hub::api::tokio::ApiError> for InferenceError {
58    fn from(err: hf_hub::api::tokio::ApiError) -> Self {
59        InferenceError::HubError(err.to_string())
60    }
61}
62
63/// Result type for inference operations.
64pub type Result<T> = std::result::Result<T, InferenceError>;