Skip to main content

codetether_agent/provider/
types.rs

1//! Core types shared across all AI providers.
2//!
3//! Defines the message, request, response, and streaming types that every
4//! provider implementation must accept or produce.
5//!
6//! # Examples
7//!
8//! ```rust
9//! use codetether_agent::provider::{Message, Role, ContentPart};
10//!
11//! let msg = Message {
12//!     role: Role::User,
13//!     content: vec![ContentPart::Text { text: "hello".into() }],
14//! };
15//! assert_eq!(msg.role, Role::User);
16//! ```
17
18use serde::{Deserialize, Serialize};
19
20/// A message in a conversation.
21///
22/// # Examples
23///
24/// ```rust
25/// use codetether_agent::provider::{Message, Role, ContentPart};
26/// let msg = Message {
27///     role: Role::User,
28///     content: vec![ContentPart::Text { text: "hello".into() }],
29/// };
30/// assert_eq!(msg.role, Role::User);
31/// ```
32#[derive(Debug, Clone, Serialize, Deserialize)]
33pub struct Message {
34    /// Who sent this message.
35    pub role: Role,
36    /// Ordered content blocks (text, images, tool calls, etc.).
37    pub content: Vec<ContentPart>,
38}
39
40/// Participant role in a conversation.
41#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
42#[serde(rename_all = "lowercase")]
43pub enum Role {
44    /// System prompt / instructions.
45    System,
46    /// End-user input.
47    User,
48    /// Model response.
49    Assistant,
50    /// Tool result to be fed back to the model.
51    Tool,
52}
53
54/// One content block within a [`Message`].
55///
56/// # Examples
57///
58/// ```rust
59/// use codetether_agent::provider::ContentPart;
60/// let text = ContentPart::Text { text: "hi".into() };
61/// let tool_result = ContentPart::ToolResult {
62///     tool_call_id: "call_1".into(),
63///     content: "ok".into(),
64/// };
65/// ```
66#[derive(Debug, Clone, Serialize, Deserialize)]
67#[serde(tag = "type", rename_all = "snake_case")]
68pub enum ContentPart {
69    /// Plain text.
70    Text { text: String },
71    /// Image referenced by URL.
72    Image {
73        url: String,
74        mime_type: Option<String>,
75    },
76    /// File attachment.
77    File {
78        path: String,
79        mime_type: Option<String>,
80    },
81    /// A tool call made by the model.
82    ToolCall {
83        id: String,
84        name: String,
85        arguments: String,
86        /// Thought signature for Gemini 3.x models.
87        #[serde(skip_serializing_if = "Option::is_none")]
88        thought_signature: Option<String>,
89    },
90    /// Tool execution result to return to the model.
91    ToolResult {
92        tool_call_id: String,
93        content: String,
94    },
95    /// Extended thinking / reasoning output.
96    Thinking { text: String },
97}
98
99/// Schema-driven tool definition passed to the model.
100///
101/// # Examples
102///
103/// ```rust
104/// use codetether_agent::provider::ToolDefinition;
105/// let tool = ToolDefinition {
106///     name: "bash".into(),
107///     description: "Run a shell command".into(),
108///     parameters: serde_json::json!({"type": "object"}),
109/// };
110/// ```
111#[derive(Debug, Clone, Serialize, Deserialize)]
112pub struct ToolDefinition {
113    /// Tool function name (e.g. `"bash"`).
114    pub name: String,
115    /// Human-readable description the model uses to decide when to call.
116    pub description: String,
117    /// JSON Schema for the tool's input parameters.
118    pub parameters: serde_json::Value,
119}
120
121/// Request to generate a completion.
122///
123/// # Examples
124///
125/// ```rust
126/// use codetether_agent::provider::{CompletionRequest, Message, Role, ContentPart};
127/// let req = CompletionRequest {
128///     messages: vec![Message {
129///         role: Role::User,
130///         content: vec![ContentPart::Text { text: "hello".into() }],
131///     }],
132///     tools: vec![],
133///     model: "gpt-4o".into(),
134///     temperature: None,
135///     top_p: None,
136///     max_tokens: None,
137///     stop: vec![],
138/// };
139/// assert_eq!(req.model, "gpt-4o");
140/// ```
141#[derive(Debug, Clone)]
142pub struct CompletionRequest {
143    /// Conversation history (system + user + assistant + tool).
144    pub messages: Vec<Message>,
145    /// Tools the model may invoke.
146    pub tools: Vec<ToolDefinition>,
147    /// Model identifier (provider-alias or full ID).
148    pub model: String,
149    /// Sampling temperature (0–2).
150    pub temperature: Option<f32>,
151    /// Nucleus sampling threshold.
152    pub top_p: Option<f32>,
153    /// Maximum tokens the model should generate.
154    pub max_tokens: Option<usize>,
155    /// Strings that cause the model to stop generating.
156    pub stop: Vec<String>,
157}
158
159/// Request to generate embeddings.
160///
161/// # Examples
162///
163/// ```rust
164/// use codetether_agent::provider::EmbeddingRequest;
165/// let req = EmbeddingRequest {
166///     model: "text-embedding-3-small".into(),
167///     inputs: vec!["hello".into()],
168/// };
169/// ```
170#[derive(Debug, Clone)]
171pub struct EmbeddingRequest {
172    /// Embedding model identifier.
173    pub model: String,
174    /// Text inputs to embed.
175    pub inputs: Vec<String>,
176}
177
178/// Response from an embedding request.
179///
180/// # Examples
181///
182/// ```rust
183/// use codetether_agent::provider::{EmbeddingResponse, Usage};
184/// let resp = EmbeddingResponse {
185///     embeddings: vec![vec![0.1, 0.2]],
186///     usage: Usage::default(),
187/// };
188/// assert_eq!(resp.embeddings.len(), 1);
189/// ```
190#[derive(Debug, Clone)]
191pub struct EmbeddingResponse {
192    /// One float vector per input.
193    pub embeddings: Vec<Vec<f32>>,
194    /// Token usage.
195    pub usage: Usage,
196}
197
198/// A streaming chunk produced by [`Provider::complete_stream`](super::Provider::complete_stream).
199///
200/// # Examples
201///
202/// ```rust
203/// use codetether_agent::provider::StreamChunk;
204/// let chunk = StreamChunk::Text("hello".into());
205/// assert!(matches!(chunk, StreamChunk::Text(_)));
206/// ```
207#[derive(Debug, Clone)]
208pub enum StreamChunk {
209    /// Incremental text delta.
210    Text(String),
211    /// Beginning of a tool call.
212    ToolCallStart { id: String, name: String },
213    /// Partial tool-call arguments.
214    ToolCallDelta { id: String, arguments_delta: String },
215    /// End of a tool call.
216    ToolCallEnd { id: String },
217    /// Stream finished.
218    Done { usage: Option<Usage> },
219    /// Recoverable error (stream continues).
220    Error(String),
221}
222
223/// Token usage statistics.
224///
225/// # Examples
226///
227/// ```rust
228/// use codetether_agent::provider::Usage;
229/// let usage = Usage { prompt_tokens: 10, completion_tokens: 5, ..Usage::default() };
230/// assert_eq!(usage.total_tokens, 0); // default
231/// ```
232#[derive(Debug, Clone, Default, Serialize, Deserialize)]
233pub struct Usage {
234    /// Tokens in the prompt.
235    pub prompt_tokens: usize,
236    /// Tokens in the completion.
237    pub completion_tokens: usize,
238    /// Total (prompt + completion).
239    pub total_tokens: usize,
240    /// Prompt-cache read hits (Anthropic / Bedrock).
241    pub cache_read_tokens: Option<usize>,
242    /// Prompt-cache write misses.
243    pub cache_write_tokens: Option<usize>,
244}
245
246/// Response from a completion request.
247///
248/// # Examples
249///
250/// ```rust
251/// use codetether_agent::provider::{CompletionResponse, FinishReason, Message, Role, ContentPart, Usage};
252/// let resp = CompletionResponse {
253///     message: Message { role: Role::Assistant, content: vec![] },
254///     usage: Usage::default(),
255///     finish_reason: FinishReason::Stop,
256/// };
257/// assert_eq!(resp.finish_reason, FinishReason::Stop);
258/// ```
259#[derive(Debug, Clone)]
260pub struct CompletionResponse {
261    /// The model's reply message.
262    pub message: Message,
263    /// Token usage for this request.
264    pub usage: Usage,
265    /// Why the model stopped generating.
266    pub finish_reason: FinishReason,
267}
268
269/// Reason the model stopped generating.
270#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
271#[serde(rename_all = "snake_case")]
272pub enum FinishReason {
273    /// Natural stop or stop sequence hit.
274    Stop,
275    /// Hit the `max_tokens` limit.
276    Length,
277    /// The model requested tool execution.
278    ToolCalls,
279    /// Content was filtered by safety systems.
280    ContentFilter,
281    /// An error occurred.
282    Error,
283}