dakera-inference 0.9.11

Embedded inference engine for Dakera - generates embeddings locally via ONNX Runtime
Documentation
//! Error types for the inference engine.

use thiserror::Error;

/// Errors that can occur during inference operations.
#[derive(Error, Debug)]
pub enum InferenceError {
    /// Model not found or failed to download
    #[error("Model not found: {0}")]
    ModelNotFound(String),

    /// Failed to load model weights
    #[error("Failed to load model: {0}")]
    ModelLoadError(String),

    /// Tokenization error
    #[error("Tokenization failed: {0}")]
    TokenizationError(String),

    /// Inference/forward pass error
    #[error("Inference failed: {0}")]
    InferenceError(String),

    /// Invalid input
    #[error("Invalid input: {0}")]
    InvalidInput(String),

    /// IO error
    #[error("IO error: {0}")]
    IoError(#[from] std::io::Error),

    /// ONNX Runtime error
    #[error("ONNX Runtime error: {0}")]
    OrtError(String),

    /// HuggingFace Hub error
    #[error("HuggingFace Hub error: {0}")]
    HubError(String),

    /// External extraction provider error (EXT-1)
    #[error("Extraction failed: {0}")]
    ExtractionFailed(String),
}

impl From<ort::Error> for InferenceError {
    fn from(err: ort::Error) -> Self {
        InferenceError::OrtError(err.to_string())
    }
}

impl From<tokenizers::Error> for InferenceError {
    fn from(err: tokenizers::Error) -> Self {
        InferenceError::TokenizationError(err.to_string())
    }
}

/// Result type for inference operations.
pub type Result<T> = std::result::Result<T, InferenceError>;