dakera-inference 0.6.2

Embedded inference engine for Dakera - generates embeddings locally
Documentation
//! Error types for the inference engine.

use thiserror::Error;

/// Errors that can occur during inference operations.
#[derive(Error, Debug)]
pub enum InferenceError {
    /// Model not found or failed to download
    #[error("Model not found: {0}")]
    ModelNotFound(String),

    /// Failed to load model weights
    #[error("Failed to load model: {0}")]
    ModelLoadError(String),

    /// Tokenization error
    #[error("Tokenization failed: {0}")]
    TokenizationError(String),

    /// Inference/forward pass error
    #[error("Inference failed: {0}")]
    InferenceError(String),

    /// Device not available (CUDA/Metal)
    #[error("Device not available: {0}")]
    DeviceNotAvailable(String),

    /// Invalid input
    #[error("Invalid input: {0}")]
    InvalidInput(String),

    /// IO error
    #[error("IO error: {0}")]
    IoError(#[from] std::io::Error),

    /// Candle error
    #[error("Candle error: {0}")]
    CandleError(String),

    /// HuggingFace Hub error
    #[error("HuggingFace Hub error: {0}")]
    HubError(String),
}

impl From<candle_core::Error> for InferenceError {
    fn from(err: candle_core::Error) -> Self {
        InferenceError::CandleError(err.to_string())
    }
}

impl From<tokenizers::Error> for InferenceError {
    fn from(err: tokenizers::Error) -> Self {
        InferenceError::TokenizationError(err.to_string())
    }
}

impl From<hf_hub::api::tokio::ApiError> for InferenceError {
    fn from(err: hf_hub::api::tokio::ApiError) -> Self {
        InferenceError::HubError(err.to_string())
    }
}

/// Result type for inference operations.
pub type Result<T> = std::result::Result<T, InferenceError>;