Skip to main content

simple_agent_type/
response.rs

1//! Response types for LLM completions.
2//!
3//! Provides OpenAI-compatible response structures.
4
5use crate::coercion::CoercionFlag;
6use crate::message::Message;
7use serde::{Deserialize, Serialize};
8
9/// Metadata about healing/coercion applied to a response.
10///
11/// When native structured output parsing fails and healing is enabled,
12/// this metadata tracks all transformations applied to recover the response.
13#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
14pub struct HealingMetadata {
15    /// All coercion flags applied during healing
16    pub flags: Vec<CoercionFlag>,
17    /// Confidence score (0.0-1.0) of the healed response
18    pub confidence: f32,
19    /// The original parsing error that triggered healing
20    pub original_error: String,
21}
22
23impl HealingMetadata {
24    /// Create new healing metadata.
25    pub fn new(flags: Vec<CoercionFlag>, confidence: f32, original_error: String) -> Self {
26        Self {
27            flags,
28            confidence: confidence.clamp(0.0, 1.0),
29            original_error,
30        }
31    }
32
33    /// Check if any major coercions were applied.
34    pub fn has_major_coercions(&self) -> bool {
35        self.flags.iter().any(|f| f.is_major())
36    }
37
38    /// Check if confidence meets a threshold.
39    pub fn is_confident(&self, threshold: f32) -> bool {
40        self.confidence >= threshold
41    }
42}
43
44/// A completion response from an LLM provider.
45#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
46pub struct CompletionResponse {
47    /// Unique response identifier
48    pub id: String,
49    /// Model used for completion
50    pub model: String,
51    /// List of completion choices
52    pub choices: Vec<CompletionChoice>,
53    /// Token usage statistics
54    pub usage: Usage,
55    /// Unix timestamp of creation
56    #[serde(skip_serializing_if = "Option::is_none")]
57    pub created: Option<i64>,
58    /// Provider that generated this response
59    #[serde(skip_serializing_if = "Option::is_none")]
60    pub provider: Option<String>,
61    /// Healing metadata (present if response was healed after parse failure)
62    #[serde(skip_serializing_if = "Option::is_none")]
63    pub healing_metadata: Option<HealingMetadata>,
64}
65
66impl CompletionResponse {
67    /// Get the content of the first choice (convenience method).
68    ///
69    /// # Example
70    /// ```
71    /// use simple_agent_type::response::{CompletionResponse, CompletionChoice, Usage, FinishReason};
72    /// use simple_agent_type::message::Message;
73    ///
74    /// let response = CompletionResponse {
75    ///     id: "resp_123".to_string(),
76    ///     model: "gpt-4".to_string(),
77    ///     choices: vec![CompletionChoice {
78    ///         index: 0,
79    ///         message: Message::assistant("Hello!"),
80    ///         finish_reason: FinishReason::Stop,
81    ///         logprobs: None,
82    ///     }],
83    ///     usage: Usage {
84    ///         prompt_tokens: 10,
85    ///         completion_tokens: 5,
86    ///         total_tokens: 15,
87    ///     },
88    ///     created: None,
89    ///     provider: None,
90    ///     healing_metadata: None,
91    /// };
92    ///
93    /// assert_eq!(response.content(), Some("Hello!"));
94    /// ```
95    pub fn content(&self) -> Option<&str> {
96        self.choices
97            .first()
98            .map(|choice| choice.message.content.as_str())
99    }
100
101    /// Get the first choice.
102    pub fn first_choice(&self) -> Option<&CompletionChoice> {
103        self.choices.first()
104    }
105
106    /// Check if this response was healed after a parsing failure.
107    ///
108    /// Returns `true` if healing metadata is present, indicating the response
109    /// required transformation to be parseable.
110    ///
111    /// # Example
112    /// ```
113    /// use simple_agent_type::response::{CompletionResponse, CompletionChoice, Usage, FinishReason, HealingMetadata};
114    /// use simple_agent_type::message::Message;
115    /// use simple_agent_type::coercion::CoercionFlag;
116    ///
117    /// let mut response = CompletionResponse {
118    ///     id: "resp_123".to_string(),
119    ///     model: "gpt-4".to_string(),
120    ///     choices: vec![],
121    ///     usage: Usage::new(10, 5),
122    ///     created: None,
123    ///     provider: None,
124    ///     healing_metadata: None,
125    /// };
126    ///
127    /// assert!(!response.was_healed());
128    ///
129    /// response.healing_metadata = Some(HealingMetadata::new(
130    ///     vec![CoercionFlag::StrippedMarkdown],
131    ///     0.9,
132    ///     "Parse error".to_string(),
133    /// ));
134    ///
135    /// assert!(response.was_healed());
136    /// ```
137    pub fn was_healed(&self) -> bool {
138        self.healing_metadata.is_some()
139    }
140
141    /// Get the confidence score of the response.
142    ///
143    /// Returns 1.0 if the response was not healed (perfect confidence),
144    /// otherwise returns the confidence score from healing metadata.
145    ///
146    /// # Example
147    /// ```
148    /// use simple_agent_type::response::{CompletionResponse, CompletionChoice, Usage, FinishReason, HealingMetadata};
149    /// use simple_agent_type::message::Message;
150    /// use simple_agent_type::coercion::CoercionFlag;
151    ///
152    /// let mut response = CompletionResponse {
153    ///     id: "resp_123".to_string(),
154    ///     model: "gpt-4".to_string(),
155    ///     choices: vec![],
156    ///     usage: Usage::new(10, 5),
157    ///     created: None,
158    ///     provider: None,
159    ///     healing_metadata: None,
160    /// };
161    ///
162    /// assert_eq!(response.confidence(), 1.0);
163    ///
164    /// response.healing_metadata = Some(HealingMetadata::new(
165    ///     vec![CoercionFlag::StrippedMarkdown],
166    ///     0.8,
167    ///     "Parse error".to_string(),
168    /// ));
169    ///
170    /// assert_eq!(response.confidence(), 0.8);
171    /// ```
172    pub fn confidence(&self) -> f32 {
173        self.healing_metadata
174            .as_ref()
175            .map(|m| m.confidence)
176            .unwrap_or(1.0)
177    }
178}
179
180/// A single completion choice.
181#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
182pub struct CompletionChoice {
183    /// Index of this choice
184    pub index: u32,
185    /// The message content
186    pub message: Message,
187    /// Why the completion finished
188    pub finish_reason: FinishReason,
189    /// Log probabilities (if requested)
190    #[serde(skip_serializing_if = "Option::is_none")]
191    pub logprobs: Option<serde_json::Value>,
192}
193
194/// Reason why a completion finished.
195#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
196#[serde(rename_all = "snake_case")]
197pub enum FinishReason {
198    /// Natural stop point reached
199    Stop,
200    /// Maximum token length reached
201    Length,
202    /// Content filtered by provider
203    ContentFilter,
204    /// Tool/function calls generated
205    ToolCalls,
206}
207
208impl FinishReason {
209    /// Returns this finish reason as its canonical snake_case string value.
210    pub fn as_str(self) -> &'static str {
211        match self {
212            Self::Stop => "stop",
213            Self::Length => "length",
214            Self::ContentFilter => "content_filter",
215            Self::ToolCalls => "tool_calls",
216        }
217    }
218}
219
220/// Token usage statistics.
221#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
222pub struct Usage {
223    /// Tokens in the prompt
224    pub prompt_tokens: u32,
225    /// Tokens in the completion
226    pub completion_tokens: u32,
227    /// Total tokens used
228    pub total_tokens: u32,
229}
230
231impl Usage {
232    /// Create a new Usage with calculated total.
233    pub fn new(prompt_tokens: u32, completion_tokens: u32) -> Self {
234        Self {
235            prompt_tokens,
236            completion_tokens,
237            total_tokens: prompt_tokens + completion_tokens,
238        }
239    }
240}
241
242/// A chunk of a streaming completion response.
243#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
244pub struct CompletionChunk {
245    /// Unique response identifier
246    pub id: String,
247    /// Model used for completion
248    pub model: String,
249    /// List of choice deltas
250    pub choices: Vec<ChoiceDelta>,
251    /// Unix timestamp of creation
252    #[serde(skip_serializing_if = "Option::is_none")]
253    pub created: Option<i64>,
254    /// Optional token usage for this chunk (typically on final chunk)
255    #[serde(skip_serializing_if = "Option::is_none")]
256    pub usage: Option<Usage>,
257}
258
259/// A delta in a streaming choice.
260#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
261pub struct ChoiceDelta {
262    /// Index of this choice
263    pub index: u32,
264    /// The message delta
265    pub delta: MessageDelta,
266    /// Why the completion finished (only in final chunk)
267    #[serde(skip_serializing_if = "Option::is_none")]
268    pub finish_reason: Option<FinishReason>,
269}
270
271/// Incremental message content in a stream.
272#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
273pub struct MessageDelta {
274    /// Role (only in first chunk)
275    #[serde(skip_serializing_if = "Option::is_none")]
276    pub role: Option<crate::message::Role>,
277    /// Incremental content
278    #[serde(skip_serializing_if = "Option::is_none")]
279    pub content: Option<String>,
280    /// Optional incremental reasoning/thinking content.
281    #[serde(skip_serializing_if = "Option::is_none")]
282    pub reasoning_content: Option<String>,
283}
284
285#[cfg(test)]
286mod tests {
287    use super::*;
288
289    #[test]
290    fn test_completion_response_content() {
291        let response = CompletionResponse {
292            id: "resp_123".to_string(),
293            model: "gpt-4".to_string(),
294            choices: vec![CompletionChoice {
295                index: 0,
296                message: Message::assistant("Hello!"),
297                finish_reason: FinishReason::Stop,
298                logprobs: None,
299            }],
300            usage: Usage::new(10, 5),
301            created: Some(1234567890),
302            provider: Some("openai".to_string()),
303            healing_metadata: None,
304        };
305
306        assert_eq!(response.content(), Some("Hello!"));
307        assert_eq!(response.first_choice().unwrap().index, 0);
308        assert!(!response.was_healed());
309        assert_eq!(response.confidence(), 1.0);
310    }
311
312    #[test]
313    fn test_completion_response_empty_choices() {
314        let response = CompletionResponse {
315            id: "resp_123".to_string(),
316            model: "gpt-4".to_string(),
317            choices: vec![],
318            usage: Usage::new(10, 0),
319            created: None,
320            provider: None,
321            healing_metadata: None,
322        };
323
324        assert_eq!(response.content(), None);
325        assert_eq!(response.first_choice(), None);
326    }
327
328    #[test]
329    fn test_usage_calculation() {
330        let usage = Usage::new(100, 50);
331        assert_eq!(usage.prompt_tokens, 100);
332        assert_eq!(usage.completion_tokens, 50);
333        assert_eq!(usage.total_tokens, 150);
334    }
335
336    #[test]
337    fn test_finish_reason_serialization() {
338        let json = serde_json::to_string(&FinishReason::Stop).unwrap();
339        assert_eq!(json, "\"stop\"");
340
341        let json = serde_json::to_string(&FinishReason::Length).unwrap();
342        assert_eq!(json, "\"length\"");
343
344        let json = serde_json::to_string(&FinishReason::ContentFilter).unwrap();
345        assert_eq!(json, "\"content_filter\"");
346
347        let json = serde_json::to_string(&FinishReason::ToolCalls).unwrap();
348        assert_eq!(json, "\"tool_calls\"");
349    }
350
351    #[test]
352    fn test_response_serialization() {
353        let response = CompletionResponse {
354            id: "resp_123".to_string(),
355            model: "gpt-4".to_string(),
356            choices: vec![CompletionChoice {
357                index: 0,
358                message: Message::assistant("Hello!"),
359                finish_reason: FinishReason::Stop,
360                logprobs: None,
361            }],
362            usage: Usage::new(10, 5),
363            created: None,
364            provider: None,
365            healing_metadata: None,
366        };
367
368        let json = serde_json::to_string(&response).unwrap();
369        let parsed: CompletionResponse = serde_json::from_str(&json).unwrap();
370        assert_eq!(response, parsed);
371    }
372
373    #[test]
374    fn test_streaming_chunk() {
375        let chunk = CompletionChunk {
376            id: "resp_123".to_string(),
377            model: "gpt-4".to_string(),
378            choices: vec![ChoiceDelta {
379                index: 0,
380                delta: MessageDelta {
381                    role: Some(crate::message::Role::Assistant),
382                    content: Some("Hello".to_string()),
383                    reasoning_content: None,
384                },
385                finish_reason: None,
386            }],
387            created: Some(1234567890),
388            usage: None,
389        };
390
391        let json = serde_json::to_string(&chunk).unwrap();
392        let parsed: CompletionChunk = serde_json::from_str(&json).unwrap();
393        assert_eq!(chunk, parsed);
394    }
395
396    #[test]
397    fn test_message_delta() {
398        let delta = MessageDelta {
399            role: Some(crate::message::Role::Assistant),
400            content: Some("Hi".to_string()),
401            reasoning_content: None,
402        };
403
404        let json = serde_json::to_value(&delta).unwrap();
405        assert_eq!(json.get("role").and_then(|v| v.as_str()), Some("assistant"));
406        assert_eq!(json.get("content").and_then(|v| v.as_str()), Some("Hi"));
407    }
408
409    #[test]
410    fn test_optional_fields_not_serialized() {
411        let response = CompletionResponse {
412            id: "resp_123".to_string(),
413            model: "gpt-4".to_string(),
414            choices: vec![],
415            usage: Usage::new(10, 5),
416            created: None,
417            provider: None,
418            healing_metadata: None,
419        };
420
421        let json = serde_json::to_value(&response).unwrap();
422        assert!(json.get("created").is_none());
423        assert!(json.get("provider").is_none());
424        assert!(json.get("healing_metadata").is_none());
425    }
426
427    #[test]
428    fn test_healing_metadata() {
429        use crate::coercion::CoercionFlag;
430
431        let metadata = HealingMetadata::new(
432            vec![CoercionFlag::StrippedMarkdown],
433            0.9,
434            "Parse error".to_string(),
435        );
436
437        assert_eq!(metadata.confidence, 0.9);
438        assert!(!metadata.has_major_coercions());
439        assert!(metadata.is_confident(0.8));
440        assert!(!metadata.is_confident(0.95));
441
442        let major_metadata = HealingMetadata::new(
443            vec![CoercionFlag::TruncatedJson],
444            0.7,
445            "Parse error".to_string(),
446        );
447
448        assert!(major_metadata.has_major_coercions());
449    }
450
451    #[test]
452    fn test_healing_metadata_confidence_clamped() {
453        let metadata = HealingMetadata::new(vec![], 1.5, "error".to_string());
454        assert_eq!(metadata.confidence, 1.0);
455
456        let metadata = HealingMetadata::new(vec![], -0.5, "error".to_string());
457        assert_eq!(metadata.confidence, 0.0);
458    }
459
460    #[test]
461    fn test_response_with_healing_metadata() {
462        use crate::coercion::CoercionFlag;
463
464        let metadata = HealingMetadata::new(
465            vec![
466                CoercionFlag::StrippedMarkdown,
467                CoercionFlag::FixedTrailingComma,
468            ],
469            0.85,
470            "JSON parse error".to_string(),
471        );
472
473        let response = CompletionResponse {
474            id: "resp_123".to_string(),
475            model: "gpt-4".to_string(),
476            choices: vec![],
477            usage: Usage::new(10, 5),
478            created: None,
479            provider: None,
480            healing_metadata: Some(metadata),
481        };
482
483        assert!(response.was_healed());
484        assert_eq!(response.confidence(), 0.85);
485
486        let json = serde_json::to_string(&response).unwrap();
487        let parsed: CompletionResponse = serde_json::from_str(&json).unwrap();
488        assert_eq!(response, parsed);
489        assert!(parsed.was_healed());
490        assert_eq!(parsed.confidence(), 0.85);
491    }
492}