openai_tools/responses/
response.rs

1use crate::common::{structured_output::Schema, tool::Tool, usage::Usage};
2use serde::{Deserialize, Serialize};
3use serde_json::Value;
4use std::collections::HashMap;
5
6/// Content within a response output.
7///
8/// Represents textual content returned by the AI, including any annotations or log probabilities.
9#[derive(Debug, Clone, Default, Deserialize, Serialize)]
10pub struct Content {
11    /// The type of content, typically "text"
12    #[serde(rename = "type")]
13    pub type_name: Option<String>,
14    /// The actual text content
15    pub text: Option<String>,
16    /// Any annotations associated with the content
17    pub annotations: Option<Vec<String>>,
18    /// Log probabilities for the content tokens
19    pub logprobs: Option<Vec<String>>,
20}
21
22#[derive(Debug, Clone, Default, Deserialize, Serialize)]
23pub struct FileSearchCallResult {
24    /// Set of 16 key-value pairs that can be attached to an object
25    pub attributes: Option<HashMap<String, String>>,
26    /// The unique ID of the file
27    pub file_id: Option<String>,
28    /// The name of the file
29    pub filename: Option<String>,
30    /// The relevance score of the file - a value between 0 and 1
31    pub score: Option<f64>,
32    /// The text that was retrieved from the file
33    pub text: Option<String>,
34}
35
36/// Individual output item from the AI response.
37///
38/// This can represent different types of outputs:
39/// - Text responses from the AI
40/// - Function calls that should be executed
41/// - Other structured outputs
42#[derive(Debug, Clone, Default, Deserialize, Serialize)]
43pub struct Output {
44    /// Unique identifier for this output
45    pub id: Option<String>,
46    /// The type of output: "text", "function_call", etc.
47    #[serde(rename = "type")]
48    pub type_name: Option<String>,
49    /// The role (e.g., "assistant") for text outputs
50    pub role: Option<String>,
51    /// Status of the output
52    pub status: Option<String>,
53    /// Text content (for text outputs)
54    pub content: Option<Vec<Content>>,
55    /// Function arguments as JSON string (for function_call outputs)
56    pub arguments: Option<String>,
57    /// Unique identifier for the function call
58    pub call_id: Option<String>,
59    /// Function name (for function_call outputs)
60    pub name: Option<String>,
61    /// The queries used to search for files (for file_search_call outputs)
62    pub queries: Option<Vec<String>>,
63    /// The results of the file search tool call (for file_search_call outputs)
64    pub results: Option<Vec<FileSearchCallResult>>,
65    // TODO: implement the action structure
66    /// An object describing the specific action taken in this web search call (for web_search_call outputs)
67    pub action: Option<Value>,
68    // TODO: implement the tool_call structure
69    /// The pending safety checks for the computer call (for computer_call outputs)
70    pub pending_safety_checks: Option<Value>,
71    /// Reasoning summary content (for reasoning outputs)
72    pub summary: Option<Value>,
73    /// The encrypted content of the reasoning item (for reasoning outputs)
74    pub encrypted_content: Option<String>,
75    // TODO: implement Image generation call
76    // TODO: implement Code interpreter tool call
77    // TODO: implement Local shell call
78    // TODO: implement MCP tool call
79    // TODO: implement MCP list tools
80    // TODO: implement MCP approval request
81    // TODO: implement Custom tool call
82}
83
84/// Reasoning information from the AI model.
85///
86/// Provides insight into the AI's reasoning process and effort level.
87#[derive(Debug, Clone, Default, Deserialize, Serialize)]
88pub struct Reasoning {
89    /// The effort level used in reasoning
90    pub effort: Option<String>,
91    /// Summary of the reasoning process
92    pub summary: Option<String>,
93}
94
95/// Text formatting configuration for responses.
96#[derive(Debug, Clone, Default, Deserialize, Serialize)]
97pub struct Text {
98    /// Format configuration
99    pub format: Schema,
100}
101
102/// Complete response from the OpenAI Responses API.
103///
104/// This struct contains all the information returned by the API, including the AI's outputs,
105/// usage statistics, and metadata about the request processing.
106#[derive(Debug, Clone, Default, Deserialize, Serialize)]
107pub struct Response {
108    /// Unique identifier for this response
109    pub id: Option<String>,
110    /// Object type, typically "response"
111    pub object: Option<String>,
112    /// Unix timestamp when the response was created
113    pub created_at: Option<usize>,
114    /// Status of the response processing
115    pub status: Option<String>,
116    /// Whether the response was processed in the background
117    pub background: Option<bool>,
118    /// Error message if the request failed
119    pub error: Option<String>,
120    /// Details about incomplete responses
121    pub incomplete_details: Option<String>,
122    /// Instructions that were used for this response
123    pub instructions: Option<String>,
124    /// Maximum number of output tokens that were allowed
125    pub max_output_tokens: Option<usize>,
126    /// Maximum number of tool calls that were allowed
127    pub max_tool_calls: Option<usize>,
128    /// The model that was used to generate the response
129    pub model: Option<String>,
130    /// List of outputs from the AI (text, function calls, etc.)
131    pub output: Option<Vec<Output>>,
132    /// Whether parallel tool calls were enabled
133    pub parallel_tool_calls: Option<bool>,
134    /// ID of the previous response in a conversation chain
135    pub previous_response_id: Option<String>,
136    /// Reasoning information from the AI
137    pub reasoning: Option<Reasoning>,
138    /// Service tier used for processing
139    pub service_tier: Option<String>,
140    /// Whether the response should be stored
141    pub store: Option<bool>,
142    /// Temperature setting used for generation
143    pub temperature: Option<f64>,
144    /// Text formatting configuration
145    pub text: Option<Text>,
146    /// Tool choice configuration that was used
147    pub tool_choice: Option<String>,
148    /// Tools that were available during generation
149    pub tools: Option<Vec<Tool>>,
150    /// Number of top log probabilities returned
151    pub top_logprobs: Option<usize>,
152    /// Top-p (nucleus sampling) parameter used
153    pub top_p: Option<f64>,
154    /// Truncation strategy that was applied
155    pub truncation: Option<String>,
156    /// Token usage statistics
157    pub usage: Option<Usage>,
158    /// User identifier associated with the request
159    pub user: Option<String>,
160    /// Additional metadata as key-value pairs
161    pub metadata: Option<HashMap<String, String>>,
162}
163
164impl Response {
165    pub fn output_text(&self) -> Option<String> {
166        let output = if let Some(outputs) = &self.output {
167            let outputs =
168                outputs.iter().filter_map(|o| if o.type_name.as_deref() == Some("message") { Some(o.clone()) } else { None }).collect::<Vec<_>>();
169            if outputs.is_empty() {
170                tracing::warn!("No message outputs found in response");
171                return None;
172            }
173            outputs.first().unwrap().clone()
174        } else {
175            return None;
176        };
177        let content = if let Some(contents) = &output.content {
178            let contents = contents
179                .iter()
180                .filter_map(|c| if c.type_name.as_deref() == Some("output_text") { Some(c.clone()) } else { None })
181                .collect::<Vec<_>>();
182            if contents.is_empty() {
183                tracing::warn!("No output_text contents found in message output");
184                return None;
185            }
186            contents.first().unwrap().clone()
187        } else {
188            return None;
189        };
190        content.text.clone()
191    }
192}