reflex/embedding/
error.rs

1use std::path::PathBuf;
2use thiserror::Error;
3
4#[derive(Debug, Error)]
5/// Errors returned by embedding generation and model loading.
6pub enum EmbeddingError {
7    /// Model files were not found.
8    #[error("embedding model not found at path: {path}")]
9    ModelNotFound {
10        /// Missing model path.
11        path: PathBuf,
12    },
13
14    /// Model load failed.
15    #[error("failed to load embedding model: {reason}")]
16    ModelLoadFailed {
17        /// Error message.
18        reason: String,
19    },
20
21    /// Requested compute device is unavailable.
22    #[error("{device} device unavailable: {reason}")]
23    DeviceUnavailable {
24        /// Device name (e.g. "cuda", "metal").
25        device: String,
26        /// Error message.
27        reason: String,
28    },
29
30    /// Inference failed.
31    #[error("embedding inference failed: {reason}")]
32    InferenceFailed {
33        /// Error message.
34        reason: String,
35    },
36
37    /// Tokenization failed.
38    #[error("tokenization failed: {reason}")]
39    TokenizationFailed {
40        /// Error message.
41        reason: String,
42    },
43
44    /// Configuration is invalid.
45    #[error("invalid model configuration: {reason}")]
46    InvalidConfig {
47        /// Error message.
48        reason: String,
49    },
50}
51
52impl From<candle_core::Error> for EmbeddingError {
53    fn from(err: candle_core::Error) -> Self {
54        EmbeddingError::InferenceFailed {
55            reason: err.to_string(),
56        }
57    }
58}
59
60impl From<std::io::Error> for EmbeddingError {
61    fn from(err: std::io::Error) -> Self {
62        EmbeddingError::ModelLoadFailed {
63            reason: err.to_string(),
64        }
65    }
66}