Skip to main content

aagt_core/agent/
provider.rs

1//! Provider trait for LLM integrations
2
3use async_trait::async_trait;
4
5use crate::error::Result;
6use crate::agent::message::Message;
7use crate::agent::streaming::StreamingResponse;
8use crate::skills::tool::ToolDefinition;
9
10mod resilient;
11
12pub use resilient::{ResilientProvider, CircuitBreakerConfig};
13
14/// Request for a chat completion
15#[derive(Debug, Clone, Default)]
16pub struct ChatRequest {
17    /// Model name to use
18    pub model: String,
19    /// Optional system prompt
20    pub system_prompt: Option<String>,
21    /// Conversation history
22    pub messages: Vec<Message>,
23    /// Available tools
24    pub tools: Vec<ToolDefinition>,
25    /// Optional temperature setting
26    pub temperature: Option<f64>,
27    /// Optional max tokens
28    pub max_tokens: Option<u64>,
29    /// Optional provider-specific parameters
30    pub extra_params: Option<serde_json::Value>,
31    /// Whether to enable explicit context caching (e.g. Anthropic cache_control)
32    pub enable_cache_control: bool,
33}
34
35/// Trait for LLM providers
36///
37/// Implement this trait to add support for a new LLM provider.
38#[async_trait]
39pub trait Provider: Send + Sync {
40    /// Stream a completion request
41    async fn stream_completion(
42        &self,
43        request: ChatRequest,
44    ) -> Result<StreamingResponse>;
45
46    /// Get provider name (for logging/debugging)
47    fn name(&self) -> &'static str;
48
49    /// Check if provider supports streaming
50    fn supports_streaming(&self) -> bool {
51        true
52    }
53
54    /// Check if provider supports tool calls
55    fn supports_tools(&self) -> bool {
56        true
57    }
58}