Skip to main content

hanzo_protocol/
openai_models.rs

1use std::collections::HashMap;
2
3use schemars::JsonSchema;
4use serde::Deserialize;
5use serde::Serialize;
6use strum::IntoEnumIterator;
7use strum_macros::Display;
8use strum_macros::EnumIter;
9use ts_rs::TS;
10
11use crate::config_types::Verbosity;
12
13/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
14#[derive(
15    Debug,
16    Serialize,
17    Deserialize,
18    Default,
19    Clone,
20    Copy,
21    PartialEq,
22    Eq,
23    Display,
24    JsonSchema,
25    TS,
26    EnumIter,
27    Hash,
28)]
29#[serde(rename_all = "lowercase")]
30#[strum(serialize_all = "lowercase")]
31pub enum ReasoningEffort {
32    None,
33    Minimal,
34    Low,
35    #[default]
36    Medium,
37    High,
38    XHigh,
39}
40
41/// A reasoning effort option that can be surfaced for a model.
42#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema, PartialEq, Eq)]
43pub struct ReasoningEffortPreset {
44    /// Effort level that the model supports.
45    pub effort: ReasoningEffort,
46    /// Short human description shown next to the effort in UIs.
47    pub description: String,
48}
49
50#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema, PartialEq)]
51pub struct ModelUpgrade {
52    pub id: String,
53    pub reasoning_effort_mapping: Option<HashMap<ReasoningEffort, ReasoningEffort>>,
54    pub migration_config_key: String,
55    pub model_link: Option<String>,
56    pub upgrade_copy: Option<String>,
57}
58
59/// Metadata describing a Codex-supported model.
60#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema, PartialEq)]
61pub struct ModelPreset {
62    /// Stable identifier for the preset.
63    pub id: String,
64    /// Model slug (e.g., "gpt-5").
65    pub model: String,
66    /// Display name shown in UIs.
67    pub display_name: String,
68    /// Short human description shown in UIs.
69    pub description: String,
70    /// Reasoning effort applied when none is explicitly chosen.
71    pub default_reasoning_effort: ReasoningEffort,
72    /// Supported reasoning effort options.
73    pub supported_reasoning_efforts: Vec<ReasoningEffortPreset>,
74    /// Whether this is the default model for new users.
75    pub is_default: bool,
76    /// recommended upgrade model
77    pub upgrade: Option<ModelUpgrade>,
78    /// Whether this preset should appear in the picker UI.
79    pub show_in_picker: bool,
80    /// Whether this model is supported in the API.
81    pub supported_in_api: bool,
82}
83
84/// Visibility of a model in the picker or APIs.
85#[derive(
86    Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema, EnumIter, Display,
87)]
88#[serde(rename_all = "lowercase")]
89#[strum(serialize_all = "lowercase")]
90pub enum ModelVisibility {
91    List,
92    Hide,
93    None,
94}
95
96/// Shell execution capability for a model.
97#[derive(
98    Debug,
99    Serialize,
100    Deserialize,
101    Clone,
102    Copy,
103    PartialEq,
104    Eq,
105    TS,
106    JsonSchema,
107    EnumIter,
108    Display,
109    Hash,
110)]
111#[serde(rename_all = "snake_case")]
112#[strum(serialize_all = "snake_case")]
113pub enum ConfigShellToolType {
114    Default,
115    Local,
116    UnifiedExec,
117    Disabled,
118    ShellCommand,
119}
120
121#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, TS, JsonSchema)]
122#[serde(rename_all = "snake_case")]
123pub enum ApplyPatchToolType {
124    Freeform,
125    Function,
126}
127
128/// Server-provided truncation policy metadata for a model.
129#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema)]
130#[serde(rename_all = "snake_case")]
131pub enum TruncationMode {
132    Bytes,
133    Tokens,
134}
135
136#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema)]
137pub struct TruncationPolicyConfig {
138    pub mode: TruncationMode,
139    pub limit: i64,
140}
141
142impl TruncationPolicyConfig {
143    pub const fn bytes(limit: i64) -> Self {
144        Self {
145            mode: TruncationMode::Bytes,
146            limit,
147        }
148    }
149
150    pub const fn tokens(limit: i64) -> Self {
151        Self {
152            mode: TruncationMode::Tokens,
153            limit,
154        }
155    }
156}
157
158/// Model metadata returned by the Codex backend `/models` endpoint.
159#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, TS, JsonSchema)]
160pub struct ModelInfo {
161    pub slug: String,
162    pub display_name: String,
163    pub description: Option<String>,
164    pub default_reasoning_level: ReasoningEffort,
165    pub supported_reasoning_levels: Vec<ReasoningEffortPreset>,
166    pub shell_type: ConfigShellToolType,
167    pub visibility: ModelVisibility,
168    pub supported_in_api: bool,
169    pub priority: i32,
170    pub upgrade: Option<String>,
171    pub base_instructions: Option<String>,
172    pub supports_reasoning_summaries: bool,
173    pub support_verbosity: bool,
174    pub default_verbosity: Option<Verbosity>,
175    pub apply_patch_tool_type: Option<ApplyPatchToolType>,
176    pub truncation_policy: TruncationPolicyConfig,
177    pub supports_parallel_tool_calls: bool,
178    pub context_window: Option<i64>,
179    pub experimental_supported_tools: Vec<String>,
180}
181
182/// Response wrapper for `/models`.
183#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, TS, JsonSchema, Default)]
184pub struct ModelsResponse {
185    pub models: Vec<ModelInfo>,
186}
187
188// convert ModelInfo to ModelPreset
189impl From<ModelInfo> for ModelPreset {
190    fn from(info: ModelInfo) -> Self {
191        ModelPreset {
192            id: info.slug.clone(),
193            model: info.slug.clone(),
194            display_name: info.display_name,
195            description: info.description.unwrap_or_default(),
196            default_reasoning_effort: info.default_reasoning_level,
197            supported_reasoning_efforts: info.supported_reasoning_levels.clone(),
198            is_default: false, // default is the highest priority available model
199            upgrade: info.upgrade.as_ref().map(|upgrade_slug| ModelUpgrade {
200                id: upgrade_slug.clone(),
201                reasoning_effort_mapping: reasoning_effort_mapping_from_presets(
202                    &info.supported_reasoning_levels,
203                ),
204                migration_config_key: info.slug.clone(),
205                model_link: None,
206                upgrade_copy: None,
207            }),
208            show_in_picker: info.visibility == ModelVisibility::List,
209            supported_in_api: info.supported_in_api,
210        }
211    }
212}
213
214fn reasoning_effort_mapping_from_presets(
215    presets: &[ReasoningEffortPreset],
216) -> Option<HashMap<ReasoningEffort, ReasoningEffort>> {
217    if presets.is_empty() {
218        return None;
219    }
220
221    // Map every canonical effort to the closest supported effort for the new model.
222    let supported: Vec<ReasoningEffort> = presets.iter().map(|p| p.effort).collect();
223    let mut map = HashMap::new();
224    for effort in ReasoningEffort::iter() {
225        let nearest = nearest_effort(effort, &supported);
226        map.insert(effort, nearest);
227    }
228    Some(map)
229}
230
231fn effort_rank(effort: ReasoningEffort) -> i32 {
232    match effort {
233        ReasoningEffort::None => 0,
234        ReasoningEffort::Minimal => 1,
235        ReasoningEffort::Low => 2,
236        ReasoningEffort::Medium => 3,
237        ReasoningEffort::High => 4,
238        ReasoningEffort::XHigh => 5,
239    }
240}
241
242fn nearest_effort(target: ReasoningEffort, supported: &[ReasoningEffort]) -> ReasoningEffort {
243    let target_rank = effort_rank(target);
244    supported
245        .iter()
246        .copied()
247        .min_by_key(|candidate| (effort_rank(*candidate) - target_rank).abs())
248        .unwrap_or(target)
249}