Skip to main content

autoagents_llamacpp/
error.rs

1//! Error handling and conversions for llama.cpp backend.
2
3use autoagents_llm::error::LLMError;
4use std::fmt;
5
6/// Internal error type for llama.cpp operations.
7#[derive(Debug)]
8pub enum LlamaCppProviderError {
9    /// Model loading failed.
10    ModelLoad(String),
11    /// Context creation failed.
12    ContextLoad(String),
13    /// Tokenization or detokenization failed.
14    Tokenization(String),
15    /// Inference failed.
16    Inference(String),
17    /// Configuration error.
18    Config(String),
19    /// Prompt or template error.
20    Template(String),
21    /// Embedding error.
22    Embedding(String),
23    /// Unsupported feature.
24    Unsupported(String),
25    /// Generic error.
26    Other(String),
27}
28
29impl fmt::Display for LlamaCppProviderError {
30    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
31        match self {
32            LlamaCppProviderError::ModelLoad(e) => write!(f, "Model Load Error: {}", e),
33            LlamaCppProviderError::ContextLoad(e) => write!(f, "Context Load Error: {}", e),
34            LlamaCppProviderError::Tokenization(e) => write!(f, "Tokenization Error: {}", e),
35            LlamaCppProviderError::Inference(e) => write!(f, "Inference Error: {}", e),
36            LlamaCppProviderError::Config(e) => write!(f, "Configuration Error: {}", e),
37            LlamaCppProviderError::Template(e) => write!(f, "Template Error: {}", e),
38            LlamaCppProviderError::Embedding(e) => write!(f, "Embedding Error: {}", e),
39            LlamaCppProviderError::Unsupported(e) => write!(f, "Unsupported: {}", e),
40            LlamaCppProviderError::Other(e) => write!(f, "llama.cpp Error: {}", e),
41        }
42    }
43}
44
45impl std::error::Error for LlamaCppProviderError {}
46
47impl From<LlamaCppProviderError> for LLMError {
48    fn from(err: LlamaCppProviderError) -> Self {
49        match err {
50            LlamaCppProviderError::ModelLoad(e) => {
51                LLMError::ProviderError(format!("Failed to load model: {}", e))
52            }
53            LlamaCppProviderError::ContextLoad(e) => {
54                LLMError::ProviderError(format!("Failed to create context: {}", e))
55            }
56            LlamaCppProviderError::Tokenization(e) => {
57                LLMError::ProviderError(format!("Tokenization failed: {}", e))
58            }
59            LlamaCppProviderError::Inference(e) => {
60                LLMError::ProviderError(format!("Inference failed: {}", e))
61            }
62            LlamaCppProviderError::Config(e) => {
63                LLMError::InvalidRequest(format!("Invalid configuration: {}", e))
64            }
65            LlamaCppProviderError::Template(e) => {
66                LLMError::InvalidRequest(format!("Template error: {}", e))
67            }
68            LlamaCppProviderError::Embedding(e) => {
69                LLMError::ProviderError(format!("Embedding failed: {}", e))
70            }
71            LlamaCppProviderError::Unsupported(e) => LLMError::NoToolSupport(e),
72            LlamaCppProviderError::Other(e) => {
73                LLMError::ProviderError(format!("llama.cpp error: {}", e))
74            }
75        }
76    }
77}
78
79#[cfg(test)]
80mod tests {
81    use super::*;
82
83    #[test]
84    fn test_error_display() {
85        let err = LlamaCppProviderError::ModelLoad("missing file".to_string());
86        assert_eq!(err.to_string(), "Model Load Error: missing file");
87    }
88
89    #[test]
90    fn test_error_to_llm_error() {
91        let err = LlamaCppProviderError::Config("bad config".to_string());
92        let llm_err: LLMError = err.into();
93        assert!(llm_err.to_string().contains("Invalid configuration"));
94    }
95}