openai_tools/responses/
response.rs

1use crate::common::{structured_output::Schema, tool::Tool, usage::Usage};
2use serde::{Deserialize, Serialize};
3use std::collections::HashMap;
4
5/// Content within a response output.
6///
7/// Represents textual content returned by the AI, including any annotations or log probabilities.
8#[derive(Debug, Clone, Default, Deserialize, Serialize)]
9pub struct Content {
10    /// The type of content, typically "text"
11    #[serde(rename = "type")]
12    pub type_name: String,
13    /// The actual text content
14    pub text: String,
15    /// Any annotations associated with the content
16    pub annotations: Vec<String>,
17    /// Log probabilities for the content tokens
18    pub logprobs: Vec<String>,
19}
20
21/// Individual output item from the AI response.
22///
23/// This can represent different types of outputs:
24/// - Text responses from the AI
25/// - Function calls that should be executed
26/// - Other structured outputs
27#[derive(Debug, Clone, Default, Deserialize, Serialize)]
28pub struct Output {
29    /// Unique identifier for this output
30    pub id: String,
31    /// The type of output: "text", "function_call", etc.
32    #[serde(rename = "type")]
33    pub type_name: String,
34    /// The role (e.g., "assistant") for text outputs
35    pub role: Option<String>,
36    /// Status of the output
37    pub status: Option<String>,
38    /// Text content (for text outputs)
39    pub content: Option<Vec<Content>>,
40    /// Function arguments as JSON string (for function_call outputs)
41    pub arguments: Option<String>,
42    /// Unique identifier for the function call
43    pub call_id: Option<String>,
44    /// Function name (for function_call outputs)
45    pub name: Option<String>,
46}
47
48/// Reasoning information from the AI model.
49///
50/// Provides insight into the AI's reasoning process and effort level.
51#[derive(Debug, Clone, Default, Deserialize, Serialize)]
52pub struct Reasoning {
53    /// The effort level used in reasoning
54    pub effort: Option<String>,
55    /// Summary of the reasoning process
56    pub summary: Option<String>,
57}
58
59/// Text formatting configuration for responses.
60#[derive(Debug, Clone, Default, Deserialize, Serialize)]
61pub struct Text {
62    /// Format configuration
63    pub format: Schema,
64}
65
66/// Complete response from the OpenAI Responses API.
67///
68/// This struct contains all the information returned by the API, including the AI's outputs,
69/// usage statistics, and metadata about the request processing.
70#[derive(Debug, Clone, Default, Deserialize, Serialize)]
71pub struct Response {
72    /// Unique identifier for this response
73    pub id: String,
74    /// Object type, typically "response"
75    pub object: String,
76    /// Unix timestamp when the response was created
77    pub created_at: usize,
78    /// Status of the response processing
79    pub status: String,
80    /// Whether the response was processed in the background
81    pub background: bool,
82    /// Error message if the request failed
83    pub error: Option<String>,
84    /// Details about incomplete responses
85    pub incomplete_details: Option<String>,
86    /// Instructions that were used for this response
87    pub instructions: Option<String>,
88    /// Maximum number of output tokens that were allowed
89    pub max_output_tokens: Option<usize>,
90    /// Maximum number of tool calls that were allowed
91    pub max_tool_calls: Option<usize>,
92    /// The model that was used to generate the response
93    pub model: String,
94    /// List of outputs from the AI (text, function calls, etc.)
95    pub output: Vec<Output>,
96    /// Whether parallel tool calls were enabled
97    pub parallel_tool_calls: bool,
98    /// ID of the previous response in a conversation chain
99    pub previous_response_id: Option<String>,
100    /// Reasoning information from the AI
101    pub reasoning: Reasoning,
102    /// Service tier used for processing
103    pub service_tier: Option<String>,
104    /// Whether the response should be stored
105    pub store: Option<bool>,
106    /// Temperature setting used for generation
107    pub temperature: Option<f64>,
108    /// Text formatting configuration
109    pub text: Text,
110    /// Tool choice configuration that was used
111    pub tool_choice: Option<String>,
112    /// Tools that were available during generation
113    pub tools: Option<Vec<Tool>>,
114    /// Number of top log probabilities returned
115    pub top_logprobs: Option<usize>,
116    /// Top-p (nucleus sampling) parameter used
117    pub top_p: Option<f64>,
118    /// Truncation strategy that was applied
119    pub truncation: Option<String>,
120    /// Token usage statistics
121    pub usage: Option<Usage>,
122    /// User identifier associated with the request
123    pub user: Option<String>,
124    /// Additional metadata as key-value pairs
125    pub metadata: HashMap<String, String>,
126}