Skip to main content

codetether_agent/provider/
traits.rs

1//! The [`Provider`] trait and [`ModelInfo`] struct.
2//!
3//! Every AI backend (OpenAI, Anthropic, Bedrock, etc.) implements [`Provider`]
4//! so the [`ProviderRegistry`](super::ProviderRegistry) can dispatch requests
5//! uniformly.
6//!
7//! # Examples
8//!
9//! ```rust
10//! use codetether_agent::provider::{FinishReason, ModelInfo};
11//!
12//! let info = ModelInfo {
13//!     id: "gpt-4o".into(),
14//!     name: "GPT-4o".into(),
15//!     provider: "openai".into(),
16//!     context_window: 128_000,
17//!     max_output_tokens: Some(16_384),
18//!     supports_vision: true,
19//!     supports_tools: true,
20//!     supports_streaming: true,
21//!     input_cost_per_million: Some(2.5),
22//!     output_cost_per_million: Some(10.0),
23//! };
24//! assert!(info.supports_vision);
25//! ```
26
27use anyhow::Result;
28use async_trait::async_trait;
29use serde::{Deserialize, Serialize};
30
31use super::types::{
32    CompletionRequest, CompletionResponse, EmbeddingRequest, EmbeddingResponse, StreamChunk,
33};
34
35/// Trait that all AI providers must implement.
36#[async_trait]
37pub trait Provider: Send + Sync {
38    /// Provider identifier (e.g. `"openai"`, `"bedrock"`).
39    ///
40    /// # Examples
41    ///
42    /// ```rust,no_run
43    /// # use codetether_agent::provider::Provider;
44    /// # fn demo(p: &dyn Provider) {
45    /// assert!(!p.name().is_empty());
46    /// # }
47    /// ```
48    fn name(&self) -> &str;
49
50    /// List models available under this provider.
51    ///
52    /// # Examples
53    ///
54    /// ```rust,no_run
55    /// # use codetether_agent::provider::Provider;
56    /// # async fn demo(p: &dyn Provider) {
57    /// let models = p.list_models().await.unwrap();
58    /// assert!(!models.is_empty());
59    /// # }
60    /// ```
61    async fn list_models(&self) -> Result<Vec<ModelInfo>>;
62
63    /// Generate a single completion.
64    ///
65    /// # Examples
66    ///
67    /// ```rust,no_run
68    /// # use codetether_agent::provider::{Provider, CompletionRequest};
69    /// # async fn demo(p: &dyn Provider, req: CompletionRequest) {
70    /// let resp = p.complete(req).await.unwrap();
71    /// # }
72    /// ```
73    async fn complete(&self, request: CompletionRequest) -> Result<CompletionResponse>;
74
75    /// Generate a streaming completion.
76    ///
77    /// # Examples
78    ///
79    /// ```rust,no_run
80    /// # use codetether_agent::provider::{Provider, CompletionRequest};
81    /// # async fn demo(p: &dyn Provider, req: CompletionRequest) {
82    /// let stream = p.complete_stream(req).await.unwrap();
83    /// # }
84    /// ```
85    async fn complete_stream(
86        &self,
87        request: CompletionRequest,
88    ) -> Result<futures::stream::BoxStream<'static, StreamChunk>>;
89
90    /// Generate embeddings (optional; default returns an error).
91    async fn embed(&self, _request: EmbeddingRequest) -> Result<EmbeddingResponse> {
92        anyhow::bail!("Provider '{}' does not support embeddings", self.name())
93    }
94}
95
96/// Metadata about a model offered by a provider.
97///
98/// # Examples
99///
100/// ```rust
101/// use codetether_agent::provider::ModelInfo;
102/// let info = ModelInfo {
103///     id: "gpt-4o".into(),
104///     name: "GPT-4o".into(),
105///     provider: "openai".into(),
106///     context_window: 128_000,
107///     max_output_tokens: Some(16_384),
108///     supports_vision: true,
109///     supports_tools: true,
110///     supports_streaming: true,
111///     input_cost_per_million: Some(2.5),
112///     output_cost_per_million: Some(10.0),
113/// };
114/// assert!(info.supports_vision);
115/// ```
116#[derive(Debug, Clone, Serialize, Deserialize)]
117pub struct ModelInfo {
118    /// Canonical model ID (e.g. `"us.anthropic.claude-sonnet-4-20250514-v1:0"`).
119    pub id: String,
120    /// Human-readable name.
121    pub name: String,
122    /// Provider that owns this model.
123    pub provider: String,
124    /// Input + output token budget.
125    pub context_window: usize,
126    /// Maximum tokens the model can generate in one call.
127    pub max_output_tokens: Option<usize>,
128    /// Whether the model accepts image inputs.
129    pub supports_vision: bool,
130    /// Whether the model supports tool/function calling.
131    pub supports_tools: bool,
132    /// Whether the model supports server-sent streaming.
133    pub supports_streaming: bool,
134    /// Cost per million input tokens (USD), if known.
135    pub input_cost_per_million: Option<f64>,
136    /// Cost per million output tokens (USD), if known.
137    pub output_cost_per_million: Option<f64>,
138}