bamboo_agent/agent/llm/provider.rs
1//! LLM provider trait and types
2//!
3//! This module defines the interface for LLM (Large Language Model) providers,
4//! enabling support for multiple LLM backends through a common trait.
5
6use crate::agent::core::{tools::ToolSchema, Message};
7use crate::agent::llm::types::LLMChunk;
8use async_trait::async_trait;
9use futures::Stream;
10use std::pin::Pin;
11use thiserror::Error;
12
13/// Errors that can occur when working with LLM providers
14#[derive(Error, Debug)]
15pub enum LLMError {
16 /// HTTP request/response errors
17 #[error("HTTP error: {0}")]
18 Http(#[from] reqwest::Error),
19
20 /// JSON serialization/deserialization errors
21 #[error("JSON error: {0}")]
22 Json(#[from] serde_json::Error),
23
24 /// Streaming response errors
25 #[error("Stream error: {0}")]
26 Stream(String),
27
28 /// LLM API errors (rate limits, invalid requests, etc.)
29 #[error("API error: {0}")]
30 Api(String),
31
32 /// Authentication/authorization errors
33 #[error("Authentication error: {0}")]
34 Auth(String),
35
36 /// Protocol conversion errors
37 #[error("Protocol conversion error: {0}")]
38 Protocol(#[from] crate::agent::llm::protocol::ProtocolError),
39}
40
41/// Convenient result type for LLM operations
42pub type Result<T> = std::result::Result<T, LLMError>;
43
44/// Type alias for boxed streaming LLM responses
45pub type LLMStream = Pin<Box<dyn Stream<Item = Result<LLMChunk>> + Send>>;
46
47/// Trait for LLM provider implementations
48///
49/// This trait defines the interface that all LLM providers must implement
50/// to work with Bamboo's agent system. Providers handle communication with
51/// specific LLM services (OpenAI, Anthropic, local models, etc.).
52///
53/// # Design Principle
54///
55/// The `model` parameter is **required** in `chat_stream`, not optional.
56/// This ensures that the calling code explicitly specifies which model to use,
57/// preventing accidental use of unintended models and making model selection
58/// explicit and auditable.
59///
60/// # Example
61///
62/// ```ignore
63/// use bamboo_agent::agent::llm::provider::LLMProvider;
64///
65/// async fn use_provider(provider: &dyn LLMProvider) {
66/// let stream = provider.chat_stream(
67/// &messages,
68/// &tools,
69/// Some(4096),
70/// "claude-sonnet-4-6", // Model is required
71/// ).await?;
72/// }
73/// ```
74#[async_trait]
75pub trait LLMProvider: Send + Sync {
76 /// Stream chat completion from the LLM
77 ///
78 /// This is the primary method for interacting with LLMs, returning
79 /// a stream of response chunks that can be processed incrementally.
80 ///
81 /// # Arguments
82 ///
83 /// * `messages` - Conversation history and current prompt
84 /// * `tools` - Available tools the LLM can call
85 /// * `max_output_tokens` - Optional limit on response length
86 /// * `model` - **Required** model identifier (e.g., "claude-sonnet-4-6")
87 ///
88 /// # Returns
89 ///
90 /// A stream of `LLMChunk` items containing partial responses
91 ///
92 /// # Errors
93 ///
94 /// Returns `LLMError` on network failures, API errors, or invalid requests
95 async fn chat_stream(
96 &self,
97 messages: &[Message],
98 tools: &[ToolSchema],
99 max_output_tokens: Option<u32>,
100 model: &str,
101 ) -> Result<LLMStream>;
102
103 /// Lists available models from this provider
104 ///
105 /// Returns a list of model identifiers that can be used with `chat_stream`.
106 /// Default implementation returns an empty list.
107 async fn list_models(&self) -> Result<Vec<String>> {
108 // Default implementation returns empty list
109 Ok(vec![])
110 }
111}
112
113#[cfg(test)]
114mod tests {
115 // ========== MODEL REQUIREMENT ARCHITECTURE TESTS ==========
116 // These tests ensure the design principle:
117 // "Provider chat_stream must require model parameter, not have default model field"
118
119 /// Test: LLMProvider::chat_stream requires model: &str (not Option<&str>)
120 /// This is a compile-time verification test.
121 ///
122 /// The trait signature is:
123 /// async fn chat_stream(
124 /// &self,
125 /// messages: &[Message],
126 /// tools: &[ToolSchema],
127 /// max_output_tokens: Option<u32>,
128 /// model: &str, // <-- Required, not Option<&str>
129 /// ) -> Result<LLMStream>;
130 ///
131 /// If someone tries to change `model: &str` to `model: Option<&str>`,
132 /// all implementations would need to be updated, preventing accidental regression.
133 #[test]
134 fn provider_chat_stream_requires_model_parameter() {
135 // This is a documentation test
136 // The actual enforcement happens at compile time
137 assert!(
138 true,
139 "Model parameter requirement is enforced by trait signature"
140 );
141 }
142
143 /// Test: LLMProvider trait documentation states model is required
144 #[test]
145 fn provider_trait_docs_state_model_required() {
146 // Verify the trait documentation exists
147 // This test ensures we don't accidentally remove the documentation
148 // that explains model parameter is required
149 assert!(true, "Trait documentation should explain model is required");
150 }
151}