Skip to main content

openai_tools/responses/
response.rs

1use crate::common::{structured_output::Schema, tool::Tool, usage::Usage};
2use serde::{Deserialize, Serialize};
3use serde_json::Value;
4use std::collections::HashMap;
5
6/// Content within a response output.
7///
8/// Represents textual content returned by the AI, including any annotations or log probabilities.
9#[derive(Debug, Clone, Default, Deserialize, Serialize)]
10pub struct Content {
11    /// The type of content, typically "text"
12    #[serde(rename = "type")]
13    pub type_name: Option<String>,
14    /// The actual text content
15    pub text: Option<String>,
16    /// Any annotations associated with the content
17    pub annotations: Option<Vec<String>>,
18    /// Log probabilities for the content tokens
19    pub logprobs: Option<Vec<String>>,
20}
21
22#[derive(Debug, Clone, Default, Deserialize, Serialize)]
23pub struct FileSearchCallResult {
24    /// Set of 16 key-value pairs that can be attached to an object
25    pub attributes: Option<HashMap<String, String>>,
26    /// The unique ID of the file
27    pub file_id: Option<String>,
28    /// The name of the file
29    pub filename: Option<String>,
30    /// The relevance score of the file - a value between 0 and 1
31    pub score: Option<f64>,
32    /// The text that was retrieved from the file
33    pub text: Option<String>,
34}
35
36/// Individual output item from the AI response.
37///
38/// This can represent different types of outputs:
39/// - Text responses from the AI
40/// - Function calls that should be executed
41/// - Other structured outputs
42#[derive(Debug, Clone, Default, Deserialize, Serialize)]
43pub struct Output {
44    /// Unique identifier for this output
45    pub id: Option<String>,
46    /// The type of output: "text", "function_call", etc.
47    #[serde(rename = "type")]
48    pub type_name: Option<String>,
49    /// The role (e.g., "assistant") for text outputs
50    pub role: Option<String>,
51    /// Status of the output
52    pub status: Option<String>,
53    /// Text content (for text outputs)
54    pub content: Option<Vec<Content>>,
55    /// Function arguments as JSON string (for function_call outputs)
56    pub arguments: Option<String>,
57    /// Unique identifier for the function call
58    pub call_id: Option<String>,
59    /// Function name (for function_call outputs)
60    pub name: Option<String>,
61    /// The queries used to search for files (for file_search_call outputs)
62    pub queries: Option<Vec<String>>,
63    /// The results of the file search tool call (for file_search_call outputs)
64    pub results: Option<Vec<FileSearchCallResult>>,
65    // TODO: implement the action structure
66    /// An object describing the specific action taken in this web search call (for web_search_call outputs)
67    pub action: Option<Value>,
68    // TODO: implement the tool_call structure
69    /// The pending safety checks for the computer call (for computer_call outputs)
70    pub pending_safety_checks: Option<Value>,
71    /// Reasoning summary content (for reasoning outputs)
72    pub summary: Option<Value>,
73    /// The encrypted content of the reasoning item (for reasoning outputs)
74    pub encrypted_content: Option<String>,
75    // TODO: implement Image generation call
76    // TODO: implement Code interpreter tool call
77    // TODO: implement Local shell call
78    // TODO: implement MCP tool call
79    // TODO: implement MCP list tools
80    // TODO: implement MCP approval request
81    // TODO: implement Custom tool call
82}
83
84/// Reasoning information from the AI model.
85///
86/// Provides insight into the AI's reasoning process and effort level.
87#[derive(Debug, Clone, Default, Deserialize, Serialize)]
88pub struct Reasoning {
89    /// The effort level used in reasoning
90    pub effort: Option<String>,
91    /// Summary of the reasoning process
92    pub summary: Option<String>,
93}
94
95/// Text formatting configuration for responses.
96#[derive(Debug, Clone, Default, Deserialize, Serialize)]
97pub struct Text {
98    /// Format configuration
99    pub format: Schema,
100}
101
102/// Complete response from the OpenAI Responses API.
103///
104/// This struct contains all the information returned by the API, including the AI's outputs,
105/// usage statistics, and metadata about the request processing.
106#[derive(Debug, Clone, Default, Deserialize, Serialize)]
107pub struct Response {
108    /// Unique identifier for this response
109    pub id: Option<String>,
110    /// Object type, typically "response"
111    pub object: Option<String>,
112    /// Unix timestamp when the response was created
113    pub created_at: Option<usize>,
114    /// Unix timestamp when the response completed
115    pub completed_at: Option<u64>,
116    /// Status of the response processing
117    pub status: Option<String>,
118    /// Whether the response was processed in the background
119    pub background: Option<bool>,
120    /// Error message if the request failed
121    pub error: Option<String>,
122    /// Details about incomplete responses
123    pub incomplete_details: Option<String>,
124    /// Instructions that were used for this response
125    pub instructions: Option<String>,
126    /// Maximum number of output tokens that were allowed
127    pub max_output_tokens: Option<usize>,
128    /// Maximum number of tool calls that were allowed
129    pub max_tool_calls: Option<usize>,
130    /// The model that was used to generate the response
131    pub model: Option<String>,
132    /// List of outputs from the AI (text, function calls, etc.)
133    pub output: Option<Vec<Output>>,
134    /// Whether parallel tool calls were enabled
135    pub parallel_tool_calls: Option<bool>,
136    /// ID of the previous response in a conversation chain
137    pub previous_response_id: Option<String>,
138    /// Reasoning information from the AI
139    pub reasoning: Option<Reasoning>,
140    /// Service tier used for processing
141    pub service_tier: Option<String>,
142    /// Whether the response should be stored
143    pub store: Option<bool>,
144    /// Temperature setting used for generation
145    pub temperature: Option<f64>,
146    /// Text formatting configuration
147    pub text: Option<Text>,
148    /// Tool choice configuration that was used
149    pub tool_choice: Option<String>,
150    /// Tools that were available during generation
151    pub tools: Option<Vec<Tool>>,
152    /// Number of top log probabilities returned
153    pub top_logprobs: Option<usize>,
154    /// Top-p (nucleus sampling) parameter used
155    pub top_p: Option<f64>,
156    /// Truncation strategy that was applied
157    pub truncation: Option<String>,
158    /// Token usage statistics
159    pub usage: Option<Usage>,
160    /// User identifier associated with the request
161    pub user: Option<String>,
162    /// Additional metadata as key-value pairs
163    pub metadata: Option<HashMap<String, String>>,
164}
165
166impl Response {
167    pub fn output_text(&self) -> Option<String> {
168        let output = if let Some(outputs) = &self.output {
169            let outputs =
170                outputs.iter().filter_map(|o| if o.type_name.as_deref() == Some("message") { Some(o.clone()) } else { None }).collect::<Vec<_>>();
171            if outputs.is_empty() {
172                tracing::warn!("No message outputs found in response");
173                return None;
174            }
175            outputs.first().unwrap().clone()
176        } else {
177            return None;
178        };
179        let content = if let Some(contents) = &output.content {
180            let contents = contents
181                .iter()
182                .filter_map(|c| if c.type_name.as_deref() == Some("output_text") { Some(c.clone()) } else { None })
183                .collect::<Vec<_>>();
184            if contents.is_empty() {
185                tracing::warn!("No output_text contents found in message output");
186                return None;
187            }
188            contents.first().unwrap().clone()
189        } else {
190            return None;
191        };
192        content.text.clone()
193    }
194}
195
196/// Response for delete operations
197///
198/// Returned when a response is successfully deleted via the DELETE endpoint.
199///
200/// # API Reference
201///
202/// <https://platform.openai.com/docs/api-reference/responses/delete>
203#[derive(Debug, Clone, Default, Deserialize, Serialize)]
204pub struct DeleteResponseResult {
205    /// The ID of the deleted response
206    pub id: String,
207    /// Object type, typically "response.deleted"
208    pub object: String,
209    /// Whether the deletion was successful
210    pub deleted: bool,
211}
212
213/// Input item in a response
214///
215/// Represents a single input item that was part of the request.
216#[derive(Debug, Clone, Default, Deserialize, Serialize)]
217pub struct ResponseInputItem {
218    /// Unique identifier for this input item
219    pub id: String,
220    /// The type of input item (e.g., "message", "function_call_output")
221    #[serde(rename = "type")]
222    pub item_type: String,
223    /// The role of the input item (e.g., "user", "assistant")
224    #[serde(skip_serializing_if = "Option::is_none")]
225    pub role: Option<String>,
226    /// The content of the input item
227    #[serde(skip_serializing_if = "Option::is_none")]
228    pub content: Option<Value>,
229    /// The status of the input item
230    #[serde(skip_serializing_if = "Option::is_none")]
231    pub status: Option<String>,
232}
233
234/// Response for listing input items
235///
236/// Returned when listing input items for a response via the GET endpoint.
237///
238/// # API Reference
239///
240/// <https://platform.openai.com/docs/api-reference/responses/list-input-items>
241#[derive(Debug, Clone, Default, Deserialize, Serialize)]
242pub struct InputItemsListResponse {
243    /// Object type, typically "list"
244    pub object: String,
245    /// The list of input items
246    pub data: Vec<ResponseInputItem>,
247    /// ID of the first item in the list
248    #[serde(skip_serializing_if = "Option::is_none")]
249    pub first_id: Option<String>,
250    /// ID of the last item in the list
251    #[serde(skip_serializing_if = "Option::is_none")]
252    pub last_id: Option<String>,
253    /// Whether there are more items to fetch
254    pub has_more: bool,
255}
256
257/// Response for compact operation
258///
259/// Returned when a response is compacted to reduce its size.
260///
261/// # API Reference
262///
263/// <https://platform.openai.com/docs/api-reference/responses/compact>
264#[derive(Debug, Clone, Default, Deserialize, Serialize)]
265pub struct CompactedResponse {
266    /// Unique identifier for the compacted response
267    pub id: String,
268    /// Object type, typically "response"
269    pub object: String,
270    /// Unix timestamp when the response was created
271    #[serde(skip_serializing_if = "Option::is_none")]
272    pub created_at: Option<u64>,
273    /// The compacted output
274    #[serde(skip_serializing_if = "Option::is_none")]
275    pub output: Option<Vec<Value>>,
276    /// Token usage statistics
277    #[serde(skip_serializing_if = "Option::is_none")]
278    pub usage: Option<Usage>,
279}
280
281/// Response for input token counting
282///
283/// Returned when counting input tokens for a potential request.
284///
285/// # API Reference
286///
287/// <https://platform.openai.com/docs/api-reference/responses/input-tokens>
288#[derive(Debug, Clone, Default, Deserialize, Serialize)]
289pub struct InputTokensResponse {
290    /// Object type, typically "input_tokens"
291    pub object: String,
292    /// The number of input tokens
293    pub input_tokens: u64,
294}