openai_rust_sdk/models/
gpt5.rs

1//! GPT-5 model constants and configuration
2
3use crate::{De, Ser};
4use serde::{self, Deserialize, Serialize};
5
6/// GPT-5 model constants
7pub mod models {
8    /// GPT-5 models - Latest reasoning models
9    pub const GPT_5: &str = "gpt-5";
10    /// GPT-5 Mini - Smaller, faster version of GPT-5
11    pub const GPT_5_MINI: &str = "gpt-5-mini";
12    /// GPT-5 Nano - Smallest, fastest version of GPT-5
13    pub const GPT_5_NANO: &str = "gpt-5-nano";
14    /// GPT-5 Chat Latest - Latest chat-optimized GPT-5 model
15    pub const GPT_5_CHAT_LATEST: &str = "gpt-5-chat-latest";
16
17    /// Model snapshots with dates
18    pub const GPT_5_2025_01_01: &str = "gpt-5-2025-01-01";
19    /// GPT-5 Mini snapshot from 2025-01-01
20    pub const GPT_5_MINI_2025_01_01: &str = "gpt-5-mini-2025-01-01";
21    /// GPT-5 Nano snapshot from 2025-01-01
22    pub const GPT_5_NANO_2025_01_01: &str = "gpt-5-nano-2025-01-01";
23
24    /// GPT-4.1 models
25    pub const GPT_4_1: &str = "gpt-4.1";
26    /// GPT-4.1 Mini - Smaller version of GPT-4.1
27    pub const GPT_4_1_MINI: &str = "gpt-4.1-mini";
28    /// GPT-4.1 Nano - Smallest version of GPT-4.1
29    pub const GPT_4_1_NANO: &str = "gpt-4.1-nano";
30
31    /// GPT-4 models
32    pub const GPT_4: &str = "gpt-4";
33    /// GPT-4 Turbo - Previous generation turbo model
34    pub const GPT_4_TURBO: &str = "gpt-4-turbo";
35
36    /// GPT-3.5 models
37    pub const GPT_3_5_TURBO: &str = "gpt-3.5-turbo";
38
39    /// O-series reasoning models (legacy)
40    pub const O3: &str = "o3";
41    /// O4 Mini - Legacy reasoning model
42    pub const O4_MINI: &str = "o4-mini";
43}
44
45/// Reasoning effort levels for GPT-5 models
46#[derive(Debug, Clone, Copy, Ser, De, PartialEq, Eq)]
47#[serde(rename_all = "lowercase")]
48pub enum ReasoningEffort {
49    /// Very few reasoning tokens for fastest time-to-first-token
50    Minimal,
51    /// Favors speed and fewer tokens (default for o3-like behavior)
52    Low,
53    /// Balanced reasoning (default)
54    Medium,
55    /// More thorough reasoning for complex tasks
56    High,
57}
58
59impl Default for ReasoningEffort {
60    fn default() -> Self {
61        Self::Medium
62    }
63}
64
65/// Verbosity levels for GPT-5 output
66#[derive(Debug, Clone, Copy, Ser, De, PartialEq, Eq)]
67#[serde(rename_all = "lowercase")]
68pub enum Verbosity {
69    /// Concise answers with minimal commentary
70    Low,
71    /// Balanced output (default)
72    Medium,
73    /// Thorough explanations and detailed responses
74    High,
75}
76
77impl Default for Verbosity {
78    fn default() -> Self {
79        Self::Medium
80    }
81}
82
83/// Reasoning configuration for GPT-5 models
84#[derive(Debug, Clone, Ser, De, Default)]
85pub struct ReasoningConfig {
86    /// The effort level for reasoning
87    #[serde(skip_serializing_if = "Option::is_none")]
88    pub effort: Option<ReasoningEffort>,
89}
90
91impl ReasoningConfig {
92    /// Create a new reasoning config with specified effort
93    #[must_use]
94    pub fn new(effort: ReasoningEffort) -> Self {
95        Self {
96            effort: Some(effort),
97        }
98    }
99
100    /// Create minimal reasoning config for fastest responses
101    #[must_use]
102    pub fn minimal() -> Self {
103        Self::new(ReasoningEffort::Minimal)
104    }
105
106    /// Create low reasoning config for speed
107    #[must_use]
108    pub fn low() -> Self {
109        Self::new(ReasoningEffort::Low)
110    }
111
112    /// Create medium reasoning config (default)
113    #[must_use]
114    pub fn medium() -> Self {
115        Self::new(ReasoningEffort::Medium)
116    }
117
118    /// Create high reasoning config for complex tasks
119    #[must_use]
120    pub fn high() -> Self {
121        Self::new(ReasoningEffort::High)
122    }
123}
124
125/// Text output configuration for GPT-5 models
126#[derive(Debug, Clone, Ser, De, Default)]
127pub struct TextConfig {
128    /// The verbosity level for output
129    #[serde(skip_serializing_if = "Option::is_none")]
130    pub verbosity: Option<Verbosity>,
131
132    /// Format for the text output (for structured outputs)
133    #[serde(skip_serializing_if = "Option::is_none")]
134    pub format: Option<serde_json::Value>,
135}
136
137impl TextConfig {
138    /// Create a new text config with specified verbosity
139    #[must_use]
140    pub fn new(verbosity: Verbosity) -> Self {
141        Self {
142            verbosity: Some(verbosity),
143            format: None,
144        }
145    }
146
147    /// Create low verbosity config for concise responses
148    #[must_use]
149    pub fn low() -> Self {
150        Self::new(Verbosity::Low)
151    }
152
153    /// Create medium verbosity config (default)
154    #[must_use]
155    pub fn medium() -> Self {
156        Self::new(Verbosity::Medium)
157    }
158
159    /// Create high verbosity config for detailed responses
160    #[must_use]
161    pub fn high() -> Self {
162        Self::new(Verbosity::High)
163    }
164
165    /// Set the format for structured outputs
166    #[must_use]
167    pub fn with_format(mut self, format: serde_json::Value) -> Self {
168        self.format = Some(format);
169        self
170    }
171}
172
173/// GPT-5 model selection helper
174pub struct GPT5ModelSelector;
175
176impl GPT5ModelSelector {
177    /// Select the best model for complex reasoning tasks
178    #[must_use]
179    pub fn for_complex_reasoning() -> &'static str {
180        models::GPT_5
181    }
182
183    /// Select the best model for cost-optimized reasoning
184    #[must_use]
185    pub fn for_cost_optimized() -> &'static str {
186        models::GPT_5_MINI
187    }
188
189    /// Select the best model for high-throughput tasks
190    #[must_use]
191    pub fn for_high_throughput() -> &'static str {
192        models::GPT_5_NANO
193    }
194
195    /// Select the best model for coding tasks
196    #[must_use]
197    pub fn for_coding() -> &'static str {
198        models::GPT_5
199    }
200
201    /// Select the best model for chat applications
202    #[must_use]
203    pub fn for_chat() -> &'static str {
204        models::GPT_5_CHAT_LATEST
205    }
206
207    /// Get migration recommendation from an older model
208    #[must_use]
209    pub fn migration_from(old_model: &str) -> &'static str {
210        match old_model {
211            "o3" => models::GPT_5,
212            "gpt-4.1" | "gpt-4" | "gpt-4-turbo" => models::GPT_5,
213            "o4-mini" | "gpt-4.1-mini" => models::GPT_5_MINI,
214            "gpt-4.1-nano" | "gpt-3.5-turbo" => models::GPT_5_NANO,
215            _ => models::GPT_5,
216        }
217    }
218}