Skip to main content

simple_agent_type/
response.rs

1//! Response types for LLM completions.
2//!
3//! Provides OpenAI-compatible response structures.
4
5use crate::coercion::CoercionFlag;
6use crate::message::Message;
7use serde::{Deserialize, Serialize};
8
9/// Metadata about healing/coercion applied to a response.
10///
11/// When native structured output parsing fails and healing is enabled,
12/// this metadata tracks all transformations applied to recover the response.
13#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
14pub struct HealingMetadata {
15    /// All coercion flags applied during healing
16    pub flags: Vec<CoercionFlag>,
17    /// Confidence score (0.0-1.0) of the healed response
18    pub confidence: f32,
19    /// The original parsing error that triggered healing
20    pub original_error: String,
21}
22
23impl HealingMetadata {
24    /// Create new healing metadata.
25    pub fn new(flags: Vec<CoercionFlag>, confidence: f32, original_error: String) -> Self {
26        Self {
27            flags,
28            confidence: confidence.clamp(0.0, 1.0),
29            original_error,
30        }
31    }
32
33    /// Check if any major coercions were applied.
34    pub fn has_major_coercions(&self) -> bool {
35        self.flags.iter().any(|f| f.is_major())
36    }
37
38    /// Check if confidence meets a threshold.
39    pub fn is_confident(&self, threshold: f32) -> bool {
40        self.confidence >= threshold
41    }
42}
43
44/// A completion response from an LLM provider.
45#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
46pub struct CompletionResponse {
47    /// Unique response identifier
48    pub id: String,
49    /// Model used for completion
50    pub model: String,
51    /// List of completion choices
52    pub choices: Vec<CompletionChoice>,
53    /// Token usage statistics
54    pub usage: Usage,
55    /// Unix timestamp of creation
56    #[serde(skip_serializing_if = "Option::is_none")]
57    pub created: Option<i64>,
58    /// Provider that generated this response
59    #[serde(skip_serializing_if = "Option::is_none")]
60    pub provider: Option<String>,
61    /// Healing metadata (present if response was healed after parse failure)
62    #[serde(skip_serializing_if = "Option::is_none")]
63    pub healing_metadata: Option<HealingMetadata>,
64}
65
66impl CompletionResponse {
67    /// Get the content of the first choice (convenience method).
68    ///
69    /// # Example
70    /// ```
71    /// use simple_agent_type::response::{CompletionResponse, CompletionChoice, Usage, FinishReason};
72    /// use simple_agent_type::message::Message;
73    ///
74    /// let response = CompletionResponse {
75    ///     id: "resp_123".to_string(),
76    ///     model: "gpt-4".to_string(),
77    ///     choices: vec![CompletionChoice {
78    ///         index: 0,
79    ///         message: Message::assistant("Hello!"),
80    ///         finish_reason: FinishReason::Stop,
81    ///         logprobs: None,
82    ///     }],
83    ///     usage: Usage {
84    ///         prompt_tokens: 10,
85    ///         completion_tokens: 5,
86    ///         total_tokens: 15,
87    ///     },
88    ///     created: None,
89    ///     provider: None,
90    ///     healing_metadata: None,
91    /// };
92    ///
93    /// assert_eq!(response.content(), Some("Hello!"));
94    /// ```
95    pub fn content(&self) -> Option<&str> {
96        self.choices
97            .first()
98            .map(|choice| choice.message.content.as_str())
99    }
100
101    /// Get the first choice.
102    pub fn first_choice(&self) -> Option<&CompletionChoice> {
103        self.choices.first()
104    }
105
106    /// Check if this response was healed after a parsing failure.
107    ///
108    /// Returns `true` if healing metadata is present, indicating the response
109    /// required transformation to be parseable.
110    ///
111    /// # Example
112    /// ```
113    /// use simple_agent_type::response::{CompletionResponse, CompletionChoice, Usage, FinishReason, HealingMetadata};
114    /// use simple_agent_type::message::Message;
115    /// use simple_agent_type::coercion::CoercionFlag;
116    ///
117    /// let mut response = CompletionResponse {
118    ///     id: "resp_123".to_string(),
119    ///     model: "gpt-4".to_string(),
120    ///     choices: vec![],
121    ///     usage: Usage::new(10, 5),
122    ///     created: None,
123    ///     provider: None,
124    ///     healing_metadata: None,
125    /// };
126    ///
127    /// assert!(!response.was_healed());
128    ///
129    /// response.healing_metadata = Some(HealingMetadata::new(
130    ///     vec![CoercionFlag::StrippedMarkdown],
131    ///     0.9,
132    ///     "Parse error".to_string(),
133    /// ));
134    ///
135    /// assert!(response.was_healed());
136    /// ```
137    pub fn was_healed(&self) -> bool {
138        self.healing_metadata.is_some()
139    }
140
141    /// Get the confidence score of the response.
142    ///
143    /// Returns 1.0 if the response was not healed (perfect confidence),
144    /// otherwise returns the confidence score from healing metadata.
145    ///
146    /// # Example
147    /// ```
148    /// use simple_agent_type::response::{CompletionResponse, CompletionChoice, Usage, FinishReason, HealingMetadata};
149    /// use simple_agent_type::message::Message;
150    /// use simple_agent_type::coercion::CoercionFlag;
151    ///
152    /// let mut response = CompletionResponse {
153    ///     id: "resp_123".to_string(),
154    ///     model: "gpt-4".to_string(),
155    ///     choices: vec![],
156    ///     usage: Usage::new(10, 5),
157    ///     created: None,
158    ///     provider: None,
159    ///     healing_metadata: None,
160    /// };
161    ///
162    /// assert_eq!(response.confidence(), 1.0);
163    ///
164    /// response.healing_metadata = Some(HealingMetadata::new(
165    ///     vec![CoercionFlag::StrippedMarkdown],
166    ///     0.8,
167    ///     "Parse error".to_string(),
168    /// ));
169    ///
170    /// assert_eq!(response.confidence(), 0.8);
171    /// ```
172    pub fn confidence(&self) -> f32 {
173        self.healing_metadata
174            .as_ref()
175            .map(|m| m.confidence)
176            .unwrap_or(1.0)
177    }
178}
179
180/// A single completion choice.
181#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
182pub struct CompletionChoice {
183    /// Index of this choice
184    pub index: u32,
185    /// The message content
186    pub message: Message,
187    /// Why the completion finished
188    pub finish_reason: FinishReason,
189    /// Log probabilities (if requested)
190    #[serde(skip_serializing_if = "Option::is_none")]
191    pub logprobs: Option<serde_json::Value>,
192}
193
194/// Reason why a completion finished.
195#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
196#[serde(rename_all = "snake_case")]
197pub enum FinishReason {
198    /// Natural stop point reached
199    Stop,
200    /// Maximum token length reached
201    Length,
202    /// Content filtered by provider
203    ContentFilter,
204    /// Tool/function calls generated
205    ToolCalls,
206}
207
208/// Token usage statistics.
209#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
210pub struct Usage {
211    /// Tokens in the prompt
212    pub prompt_tokens: u32,
213    /// Tokens in the completion
214    pub completion_tokens: u32,
215    /// Total tokens used
216    pub total_tokens: u32,
217}
218
219impl Usage {
220    /// Create a new Usage with calculated total.
221    pub fn new(prompt_tokens: u32, completion_tokens: u32) -> Self {
222        Self {
223            prompt_tokens,
224            completion_tokens,
225            total_tokens: prompt_tokens + completion_tokens,
226        }
227    }
228}
229
230/// A chunk of a streaming completion response.
231#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
232pub struct CompletionChunk {
233    /// Unique response identifier
234    pub id: String,
235    /// Model used for completion
236    pub model: String,
237    /// List of choice deltas
238    pub choices: Vec<ChoiceDelta>,
239    /// Unix timestamp of creation
240    #[serde(skip_serializing_if = "Option::is_none")]
241    pub created: Option<i64>,
242}
243
244/// A delta in a streaming choice.
245#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
246pub struct ChoiceDelta {
247    /// Index of this choice
248    pub index: u32,
249    /// The message delta
250    pub delta: MessageDelta,
251    /// Why the completion finished (only in final chunk)
252    #[serde(skip_serializing_if = "Option::is_none")]
253    pub finish_reason: Option<FinishReason>,
254}
255
256/// Incremental message content in a stream.
257#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
258pub struct MessageDelta {
259    /// Role (only in first chunk)
260    #[serde(skip_serializing_if = "Option::is_none")]
261    pub role: Option<crate::message::Role>,
262    /// Incremental content
263    #[serde(skip_serializing_if = "Option::is_none")]
264    pub content: Option<String>,
265}
266
267#[cfg(test)]
268mod tests {
269    use super::*;
270
271    #[test]
272    fn test_completion_response_content() {
273        let response = CompletionResponse {
274            id: "resp_123".to_string(),
275            model: "gpt-4".to_string(),
276            choices: vec![CompletionChoice {
277                index: 0,
278                message: Message::assistant("Hello!"),
279                finish_reason: FinishReason::Stop,
280                logprobs: None,
281            }],
282            usage: Usage::new(10, 5),
283            created: Some(1234567890),
284            provider: Some("openai".to_string()),
285            healing_metadata: None,
286        };
287
288        assert_eq!(response.content(), Some("Hello!"));
289        assert_eq!(response.first_choice().unwrap().index, 0);
290        assert!(!response.was_healed());
291        assert_eq!(response.confidence(), 1.0);
292    }
293
294    #[test]
295    fn test_completion_response_empty_choices() {
296        let response = CompletionResponse {
297            id: "resp_123".to_string(),
298            model: "gpt-4".to_string(),
299            choices: vec![],
300            usage: Usage::new(10, 0),
301            created: None,
302            provider: None,
303            healing_metadata: None,
304        };
305
306        assert_eq!(response.content(), None);
307        assert_eq!(response.first_choice(), None);
308    }
309
310    #[test]
311    fn test_usage_calculation() {
312        let usage = Usage::new(100, 50);
313        assert_eq!(usage.prompt_tokens, 100);
314        assert_eq!(usage.completion_tokens, 50);
315        assert_eq!(usage.total_tokens, 150);
316    }
317
318    #[test]
319    fn test_finish_reason_serialization() {
320        let json = serde_json::to_string(&FinishReason::Stop).unwrap();
321        assert_eq!(json, "\"stop\"");
322
323        let json = serde_json::to_string(&FinishReason::Length).unwrap();
324        assert_eq!(json, "\"length\"");
325
326        let json = serde_json::to_string(&FinishReason::ContentFilter).unwrap();
327        assert_eq!(json, "\"content_filter\"");
328
329        let json = serde_json::to_string(&FinishReason::ToolCalls).unwrap();
330        assert_eq!(json, "\"tool_calls\"");
331    }
332
333    #[test]
334    fn test_response_serialization() {
335        let response = CompletionResponse {
336            id: "resp_123".to_string(),
337            model: "gpt-4".to_string(),
338            choices: vec![CompletionChoice {
339                index: 0,
340                message: Message::assistant("Hello!"),
341                finish_reason: FinishReason::Stop,
342                logprobs: None,
343            }],
344            usage: Usage::new(10, 5),
345            created: None,
346            provider: None,
347            healing_metadata: None,
348        };
349
350        let json = serde_json::to_string(&response).unwrap();
351        let parsed: CompletionResponse = serde_json::from_str(&json).unwrap();
352        assert_eq!(response, parsed);
353    }
354
355    #[test]
356    fn test_streaming_chunk() {
357        let chunk = CompletionChunk {
358            id: "resp_123".to_string(),
359            model: "gpt-4".to_string(),
360            choices: vec![ChoiceDelta {
361                index: 0,
362                delta: MessageDelta {
363                    role: Some(crate::message::Role::Assistant),
364                    content: Some("Hello".to_string()),
365                },
366                finish_reason: None,
367            }],
368            created: Some(1234567890),
369        };
370
371        let json = serde_json::to_string(&chunk).unwrap();
372        let parsed: CompletionChunk = serde_json::from_str(&json).unwrap();
373        assert_eq!(chunk, parsed);
374    }
375
376    #[test]
377    fn test_message_delta() {
378        let delta = MessageDelta {
379            role: Some(crate::message::Role::Assistant),
380            content: Some("Hi".to_string()),
381        };
382
383        let json = serde_json::to_value(&delta).unwrap();
384        assert_eq!(json.get("role").and_then(|v| v.as_str()), Some("assistant"));
385        assert_eq!(json.get("content").and_then(|v| v.as_str()), Some("Hi"));
386    }
387
388    #[test]
389    fn test_optional_fields_not_serialized() {
390        let response = CompletionResponse {
391            id: "resp_123".to_string(),
392            model: "gpt-4".to_string(),
393            choices: vec![],
394            usage: Usage::new(10, 5),
395            created: None,
396            provider: None,
397            healing_metadata: None,
398        };
399
400        let json = serde_json::to_value(&response).unwrap();
401        assert!(json.get("created").is_none());
402        assert!(json.get("provider").is_none());
403        assert!(json.get("healing_metadata").is_none());
404    }
405
406    #[test]
407    fn test_healing_metadata() {
408        use crate::coercion::CoercionFlag;
409
410        let metadata = HealingMetadata::new(
411            vec![CoercionFlag::StrippedMarkdown],
412            0.9,
413            "Parse error".to_string(),
414        );
415
416        assert_eq!(metadata.confidence, 0.9);
417        assert!(!metadata.has_major_coercions());
418        assert!(metadata.is_confident(0.8));
419        assert!(!metadata.is_confident(0.95));
420
421        let major_metadata = HealingMetadata::new(
422            vec![CoercionFlag::TruncatedJson],
423            0.7,
424            "Parse error".to_string(),
425        );
426
427        assert!(major_metadata.has_major_coercions());
428    }
429
430    #[test]
431    fn test_healing_metadata_confidence_clamped() {
432        let metadata = HealingMetadata::new(vec![], 1.5, "error".to_string());
433        assert_eq!(metadata.confidence, 1.0);
434
435        let metadata = HealingMetadata::new(vec![], -0.5, "error".to_string());
436        assert_eq!(metadata.confidence, 0.0);
437    }
438
439    #[test]
440    fn test_response_with_healing_metadata() {
441        use crate::coercion::CoercionFlag;
442
443        let metadata = HealingMetadata::new(
444            vec![
445                CoercionFlag::StrippedMarkdown,
446                CoercionFlag::FixedTrailingComma,
447            ],
448            0.85,
449            "JSON parse error".to_string(),
450        );
451
452        let response = CompletionResponse {
453            id: "resp_123".to_string(),
454            model: "gpt-4".to_string(),
455            choices: vec![],
456            usage: Usage::new(10, 5),
457            created: None,
458            provider: None,
459            healing_metadata: Some(metadata),
460        };
461
462        assert!(response.was_healed());
463        assert_eq!(response.confidence(), 0.85);
464
465        let json = serde_json::to_string(&response).unwrap();
466        let parsed: CompletionResponse = serde_json::from_str(&json).unwrap();
467        assert_eq!(response, parsed);
468        assert!(parsed.was_healed());
469        assert_eq!(parsed.confidence(), 0.85);
470    }
471}