Skip to main content

agent_io/llm/
mod.rs

1//! LLM abstraction layer for multiple providers
2
3mod anthropic;
4mod base;
5mod deepseek;
6mod google;
7mod groq;
8mod mistral;
9mod ollama;
10mod openai;
11mod openai_compatible;
12mod openrouter;
13mod schema;
14mod types;
15
16pub use anthropic::ChatAnthropic;
17pub use base::*;
18pub use deepseek::ChatDeepSeek;
19pub use google::ChatGoogle;
20pub use groq::ChatGroq;
21pub use mistral::ChatMistral;
22pub use ollama::ChatOllama;
23pub use openai::{ChatOpenAI, ReasoningEffort};
24pub use openai_compatible::ChatOpenAICompatible;
25pub use openrouter::ChatOpenRouter;
26pub use schema::SchemaOptimizer;
27pub use types::*;
28
29/// Error types for LLM operations
30#[derive(Debug, thiserror::Error)]
31pub enum LlmError {
32    #[error("API error: {0}")]
33    Api(String),
34
35    #[error("Authentication error: {0}")]
36    Auth(String),
37
38    #[error("Rate limit exceeded")]
39    RateLimit,
40
41    #[error("Model not found: {0}")]
42    ModelNotFound(String),
43
44    #[error("Serialization error: {0}")]
45    Serialization(#[from] serde_json::Error),
46
47    #[error("HTTP error: {0}")]
48    Http(#[from] reqwest::Error),
49
50    #[error("Stream error: {0}")]
51    Stream(String),
52
53    #[error("Configuration error: {0}")]
54    Config(String),
55}