Skip to main content

hanzo_protocol/
openai_models.rs

1//! Shared model metadata types exchanged between Codex services and clients.
2//!
3//! These types are serialized across core, TUI, app-server, and SDK boundaries, so field defaults
4//! are used to preserve compatibility when older payloads omit newly introduced attributes.
5
6use std::collections::HashMap;
7
8use schemars::JsonSchema;
9use serde::Deserialize;
10use serde::Serialize;
11use strum::IntoEnumIterator;
12use strum_macros::Display;
13use strum_macros::EnumIter;
14use tracing::warn;
15use ts_rs::TS;
16
17use crate::config_types::Personality;
18use crate::config_types::ReasoningSummary;
19use crate::config_types::Verbosity;
20
21const PERSONALITY_PLACEHOLDER: &str = "{{ personality }}";
22
23/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
24#[derive(
25    Debug,
26    Serialize,
27    Deserialize,
28    Default,
29    Clone,
30    Copy,
31    PartialEq,
32    Eq,
33    Display,
34    JsonSchema,
35    TS,
36    EnumIter,
37    Hash,
38)]
39#[serde(rename_all = "lowercase")]
40#[strum(serialize_all = "lowercase")]
41pub enum ReasoningEffort {
42    None,
43    Minimal,
44    Low,
45    #[default]
46    Medium,
47    High,
48    XHigh,
49}
50
51/// Canonical user-input modality tags advertised by a model.
52#[derive(
53    Debug,
54    Serialize,
55    Deserialize,
56    Clone,
57    Copy,
58    PartialEq,
59    Eq,
60    Display,
61    JsonSchema,
62    TS,
63    EnumIter,
64    Hash,
65)]
66#[serde(rename_all = "lowercase")]
67#[strum(serialize_all = "lowercase")]
68pub enum InputModality {
69    /// Plain text turns and tool payloads.
70    Text,
71    /// Image attachments included in user turns.
72    Image,
73}
74
75/// Backward-compatible default when `input_modalities` is omitted on the wire.
76///
77/// Legacy payloads predate modality metadata, so we conservatively assume both text and images are
78/// accepted unless a preset explicitly narrows support.
79pub fn default_input_modalities() -> Vec<InputModality> {
80    vec![InputModality::Text, InputModality::Image]
81}
82
83/// A reasoning effort option that can be surfaced for a model.
84#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema, PartialEq, Eq)]
85pub struct ReasoningEffortPreset {
86    /// Effort level that the model supports.
87    pub effort: ReasoningEffort,
88    /// Short human description shown next to the effort in UIs.
89    pub description: String,
90}
91
92#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema, PartialEq)]
93pub struct ModelUpgrade {
94    pub id: String,
95    pub reasoning_effort_mapping: Option<HashMap<ReasoningEffort, ReasoningEffort>>,
96    pub migration_config_key: String,
97    pub model_link: Option<String>,
98    pub upgrade_copy: Option<String>,
99    pub migration_markdown: Option<String>,
100}
101
102#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema, PartialEq, Eq)]
103pub struct ModelAvailabilityNux {
104    pub message: String,
105}
106
107/// Metadata describing a Codex-supported model.
108#[derive(Debug, Clone, Deserialize, Serialize, TS, JsonSchema, PartialEq)]
109pub struct ModelPreset {
110    /// Stable identifier for the preset.
111    pub id: String,
112    /// Model slug (e.g., "gpt-5").
113    pub model: String,
114    /// Display name shown in UIs.
115    pub display_name: String,
116    /// Short human description shown in UIs.
117    pub description: String,
118    /// Reasoning effort applied when none is explicitly chosen.
119    pub default_reasoning_effort: ReasoningEffort,
120    /// Supported reasoning effort options.
121    pub supported_reasoning_efforts: Vec<ReasoningEffortPreset>,
122    /// Whether this model supports personality-specific instructions.
123    #[serde(default)]
124    pub supports_personality: bool,
125    /// Whether this is the default model for new users.
126    pub is_default: bool,
127    /// recommended upgrade model
128    pub upgrade: Option<ModelUpgrade>,
129    /// Whether this preset should appear in the picker UI.
130    pub show_in_picker: bool,
131    /// Availability NUX shown when this preset becomes accessible to the user.
132    pub availability_nux: Option<ModelAvailabilityNux>,
133    /// whether this model is supported in the api
134    pub supported_in_api: bool,
135    /// Input modalities accepted when composing user turns for this preset.
136    #[serde(default = "default_input_modalities")]
137    pub input_modalities: Vec<InputModality>,
138}
139
140/// Visibility of a model in the picker or APIs.
141#[derive(
142    Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema, EnumIter, Display,
143)]
144#[serde(rename_all = "lowercase")]
145#[strum(serialize_all = "lowercase")]
146pub enum ModelVisibility {
147    List,
148    Hide,
149    None,
150}
151
152/// Shell execution capability for a model.
153#[derive(
154    Debug,
155    Serialize,
156    Deserialize,
157    Clone,
158    Copy,
159    PartialEq,
160    Eq,
161    TS,
162    JsonSchema,
163    EnumIter,
164    Display,
165    Hash,
166)]
167#[serde(rename_all = "snake_case")]
168#[strum(serialize_all = "snake_case")]
169pub enum ConfigShellToolType {
170    Default,
171    Local,
172    UnifiedExec,
173    Disabled,
174    ShellCommand,
175}
176
177#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, TS, JsonSchema)]
178#[serde(rename_all = "snake_case")]
179pub enum ApplyPatchToolType {
180    Freeform,
181    Function,
182}
183
184#[derive(
185    Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash, TS, JsonSchema, Default,
186)]
187#[serde(rename_all = "snake_case")]
188pub enum WebSearchToolType {
189    #[default]
190    Text,
191    TextAndImage,
192}
193
194/// Server-provided truncation policy metadata for a model.
195#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema)]
196#[serde(rename_all = "snake_case")]
197pub enum TruncationMode {
198    Bytes,
199    Tokens,
200}
201
202#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema)]
203pub struct TruncationPolicyConfig {
204    pub mode: TruncationMode,
205    pub limit: i64,
206}
207
208impl TruncationPolicyConfig {
209    pub const fn bytes(limit: i64) -> Self {
210        Self {
211            mode: TruncationMode::Bytes,
212            limit,
213        }
214    }
215
216    pub const fn tokens(limit: i64) -> Self {
217        Self {
218            mode: TruncationMode::Tokens,
219            limit,
220        }
221    }
222}
223
224/// Semantic version triple encoded as an array in JSON (e.g. [0, 62, 0]).
225#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema)]
226pub struct ClientVersion(pub i32, pub i32, pub i32);
227
228const fn default_effective_context_window_percent() -> i64 {
229    95
230}
231
232/// Model metadata returned by the Codex backend `/models` endpoint.
233#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, TS, JsonSchema)]
234pub struct ModelInfo {
235    pub slug: String,
236    pub display_name: String,
237    pub description: Option<String>,
238    #[serde(default, skip_serializing_if = "Option::is_none")]
239    pub default_reasoning_level: Option<ReasoningEffort>,
240    pub supported_reasoning_levels: Vec<ReasoningEffortPreset>,
241    pub shell_type: ConfigShellToolType,
242    pub visibility: ModelVisibility,
243    pub supported_in_api: bool,
244    pub priority: i32,
245    pub availability_nux: Option<ModelAvailabilityNux>,
246    pub upgrade: Option<ModelInfoUpgrade>,
247    pub base_instructions: String,
248    #[serde(default, skip_serializing_if = "Option::is_none")]
249    pub model_messages: Option<ModelMessages>,
250    pub supports_reasoning_summaries: bool,
251    #[serde(default)]
252    pub default_reasoning_summary: ReasoningSummary,
253    pub support_verbosity: bool,
254    pub default_verbosity: Option<Verbosity>,
255    pub apply_patch_tool_type: Option<ApplyPatchToolType>,
256    #[serde(default)]
257    pub web_search_tool_type: WebSearchToolType,
258    pub truncation_policy: TruncationPolicyConfig,
259    pub supports_parallel_tool_calls: bool,
260    #[serde(default)]
261    pub supports_image_detail_original: bool,
262    #[serde(default, skip_serializing_if = "Option::is_none")]
263    pub context_window: Option<i64>,
264    /// Token threshold for automatic compaction. When omitted, core derives it
265    /// from `context_window` (90%). When provided, core clamps it to 90% of the
266    /// context window when available.
267    #[serde(default, skip_serializing_if = "Option::is_none")]
268    pub auto_compact_token_limit: Option<i64>,
269    /// Percentage of the context window considered usable for inputs, after
270    /// reserving headroom for system prompts, tool overhead, and model output.
271    #[serde(default = "default_effective_context_window_percent")]
272    pub effective_context_window_percent: i64,
273    pub experimental_supported_tools: Vec<String>,
274    /// Input modalities accepted by the backend for this model.
275    #[serde(default = "default_input_modalities")]
276    pub input_modalities: Vec<InputModality>,
277    /// When true, this model should use websocket transport even when websocket features are off.
278    #[serde(default)]
279    pub prefer_websockets: bool,
280    /// Internal-only marker set by core when a model slug resolved to fallback metadata.
281    #[serde(default, skip_serializing, skip_deserializing)]
282    #[schemars(skip)]
283    #[ts(skip)]
284    pub used_fallback_model_metadata: bool,
285}
286
287impl ModelInfo {
288    pub fn auto_compact_token_limit(&self) -> Option<i64> {
289        let context_limit = self
290            .context_window
291            .map(|context_window| (context_window * 9) / 10);
292        let config_limit = self.auto_compact_token_limit;
293        if let Some(context_limit) = context_limit {
294            return Some(
295                config_limit.map_or(context_limit, |limit| std::cmp::min(limit, context_limit)),
296            );
297        }
298        config_limit
299    }
300
301    pub fn supports_personality(&self) -> bool {
302        self.model_messages
303            .as_ref()
304            .is_some_and(ModelMessages::supports_personality)
305    }
306
307    pub fn get_model_instructions(&self, personality: Option<Personality>) -> String {
308        if let Some(model_messages) = &self.model_messages
309            && let Some(template) = &model_messages.instructions_template
310        {
311            // if we have a template, always use it
312            let personality_message = model_messages
313                .get_personality_message(personality)
314                .unwrap_or_default();
315            template.replace(PERSONALITY_PLACEHOLDER, personality_message.as_str())
316        } else if let Some(personality) = personality {
317            warn!(
318                model = %self.slug,
319                %personality,
320                "Model personality requested but model_messages is missing, falling back to base instructions."
321            );
322            self.base_instructions.clone()
323        } else {
324            self.base_instructions.clone()
325        }
326    }
327}
328
329/// A strongly-typed template for assembling model instructions and developer messages. If
330/// instructions_* is populated and valid, it will override base_instructions.
331#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, TS, JsonSchema)]
332pub struct ModelMessages {
333    pub instructions_template: Option<String>,
334    pub instructions_variables: Option<ModelInstructionsVariables>,
335}
336
337impl ModelMessages {
338    fn has_personality_placeholder(&self) -> bool {
339        self.instructions_template
340            .as_ref()
341            .map(|spec| spec.contains(PERSONALITY_PLACEHOLDER))
342            .unwrap_or(false)
343    }
344
345    fn supports_personality(&self) -> bool {
346        self.has_personality_placeholder()
347            && self
348                .instructions_variables
349                .as_ref()
350                .is_some_and(ModelInstructionsVariables::is_complete)
351    }
352
353    pub fn get_personality_message(&self, personality: Option<Personality>) -> Option<String> {
354        self.instructions_variables
355            .as_ref()
356            .and_then(|variables| variables.get_personality_message(personality))
357    }
358}
359
360#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, TS, JsonSchema)]
361pub struct ModelInstructionsVariables {
362    pub personality_default: Option<String>,
363    pub personality_friendly: Option<String>,
364    pub personality_pragmatic: Option<String>,
365}
366
367impl ModelInstructionsVariables {
368    pub fn is_complete(&self) -> bool {
369        self.personality_default.is_some()
370            && self.personality_friendly.is_some()
371            && self.personality_pragmatic.is_some()
372    }
373
374    pub fn get_personality_message(&self, personality: Option<Personality>) -> Option<String> {
375        if let Some(personality) = personality {
376            match personality {
377                Personality::None => Some(String::new()),
378                Personality::Friendly => self.personality_friendly.clone(),
379                Personality::Pragmatic => self.personality_pragmatic.clone(),
380            }
381        } else {
382            self.personality_default.clone()
383        }
384    }
385}
386
387#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, TS, JsonSchema)]
388pub struct ModelInfoUpgrade {
389    pub model: String,
390    pub migration_markdown: String,
391}
392
393impl From<&ModelUpgrade> for ModelInfoUpgrade {
394    fn from(upgrade: &ModelUpgrade) -> Self {
395        ModelInfoUpgrade {
396            model: upgrade.id.clone(),
397            migration_markdown: upgrade.migration_markdown.clone().unwrap_or_default(),
398        }
399    }
400}
401
402/// Response wrapper for `/models`.
403#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, TS, JsonSchema, Default)]
404pub struct ModelsResponse {
405    pub models: Vec<ModelInfo>,
406}
407
408// convert ModelInfo to ModelPreset
409impl From<ModelInfo> for ModelPreset {
410    fn from(info: ModelInfo) -> Self {
411        let supports_personality = info.supports_personality();
412        ModelPreset {
413            id: info.slug.clone(),
414            model: info.slug.clone(),
415            display_name: info.display_name,
416            description: info.description.unwrap_or_default(),
417            default_reasoning_effort: info
418                .default_reasoning_level
419                .unwrap_or(ReasoningEffort::None),
420            supported_reasoning_efforts: info.supported_reasoning_levels.clone(),
421            supports_personality,
422            is_default: false, // default is the highest priority available model
423            upgrade: info.upgrade.as_ref().map(|upgrade| ModelUpgrade {
424                id: upgrade.model.clone(),
425                reasoning_effort_mapping: reasoning_effort_mapping_from_presets(
426                    &info.supported_reasoning_levels,
427                ),
428                migration_config_key: info.slug.clone(),
429                // todo(aibrahim): add the model link here.
430                model_link: None,
431                upgrade_copy: None,
432                migration_markdown: Some(upgrade.migration_markdown.clone()),
433            }),
434            show_in_picker: info.visibility == ModelVisibility::List,
435            availability_nux: info.availability_nux,
436            supported_in_api: info.supported_in_api,
437            input_modalities: info.input_modalities,
438        }
439    }
440}
441
442impl ModelPreset {
443    /// Filter models based on authentication mode.
444    ///
445    /// In ChatGPT mode, all models are visible. Otherwise, only API-supported models are shown.
446    pub fn filter_by_auth(models: Vec<ModelPreset>, chatgpt_mode: bool) -> Vec<ModelPreset> {
447        models
448            .into_iter()
449            .filter(|model| chatgpt_mode || model.supported_in_api)
450            .collect()
451    }
452
453    /// Recompute the single default preset using picker visibility.
454    ///
455    /// The first picker-visible model wins; if none are picker-visible, the first model wins.
456    pub fn mark_default_by_picker_visibility(models: &mut [ModelPreset]) {
457        for preset in models.iter_mut() {
458            preset.is_default = false;
459        }
460        if let Some(default) = models.iter_mut().find(|preset| preset.show_in_picker) {
461            default.is_default = true;
462        } else if let Some(default) = models.first_mut() {
463            default.is_default = true;
464        }
465    }
466}
467
468fn reasoning_effort_mapping_from_presets(
469    presets: &[ReasoningEffortPreset],
470) -> Option<HashMap<ReasoningEffort, ReasoningEffort>> {
471    if presets.is_empty() {
472        return None;
473    }
474
475    // Map every canonical effort to the closest supported effort for the new model.
476    let supported: Vec<ReasoningEffort> = presets.iter().map(|p| p.effort).collect();
477    let mut map = HashMap::new();
478    for effort in ReasoningEffort::iter() {
479        let nearest = nearest_effort(effort, &supported);
480        map.insert(effort, nearest);
481    }
482    Some(map)
483}
484
485fn effort_rank(effort: ReasoningEffort) -> i32 {
486    match effort {
487        ReasoningEffort::None => 0,
488        ReasoningEffort::Minimal => 1,
489        ReasoningEffort::Low => 2,
490        ReasoningEffort::Medium => 3,
491        ReasoningEffort::High => 4,
492        ReasoningEffort::XHigh => 5,
493    }
494}
495
496fn nearest_effort(target: ReasoningEffort, supported: &[ReasoningEffort]) -> ReasoningEffort {
497    let target_rank = effort_rank(target);
498    supported
499        .iter()
500        .copied()
501        .min_by_key(|candidate| (effort_rank(*candidate) - target_rank).abs())
502        .unwrap_or(target)
503}
504
505#[cfg(test)]
506mod tests {
507    use super::*;
508    use pretty_assertions::assert_eq;
509
510    fn test_model(spec: Option<ModelMessages>) -> ModelInfo {
511        ModelInfo {
512            slug: "test-model".to_string(),
513            display_name: "Test Model".to_string(),
514            description: None,
515            default_reasoning_level: None,
516            supported_reasoning_levels: vec![],
517            shell_type: ConfigShellToolType::ShellCommand,
518            visibility: ModelVisibility::List,
519            supported_in_api: true,
520            priority: 1,
521            availability_nux: None,
522            upgrade: None,
523            base_instructions: "base".to_string(),
524            model_messages: spec,
525            supports_reasoning_summaries: false,
526            default_reasoning_summary: ReasoningSummary::Auto,
527            support_verbosity: false,
528            default_verbosity: None,
529            apply_patch_tool_type: None,
530            web_search_tool_type: WebSearchToolType::Text,
531            truncation_policy: TruncationPolicyConfig::bytes(10_000),
532            supports_parallel_tool_calls: false,
533            supports_image_detail_original: false,
534            context_window: None,
535            auto_compact_token_limit: None,
536            effective_context_window_percent: 95,
537            experimental_supported_tools: vec![],
538            input_modalities: default_input_modalities(),
539            prefer_websockets: false,
540            used_fallback_model_metadata: false,
541        }
542    }
543
544    fn personality_variables() -> ModelInstructionsVariables {
545        ModelInstructionsVariables {
546            personality_default: Some("default".to_string()),
547            personality_friendly: Some("friendly".to_string()),
548            personality_pragmatic: Some("pragmatic".to_string()),
549        }
550    }
551
552    #[test]
553    fn auto_compact_token_limit_uses_context_default() {
554        let mut model = test_model(None);
555        model.context_window = Some(100_000);
556        model.auto_compact_token_limit = None;
557
558        assert_eq!(model.auto_compact_token_limit(), Some(90_000));
559    }
560
561    #[test]
562    fn auto_compact_token_limit_clamps_to_context_limit() {
563        let mut model = test_model(None);
564        model.context_window = Some(200_000);
565        model.auto_compact_token_limit = Some(250_000);
566
567        assert_eq!(model.auto_compact_token_limit(), Some(180_000));
568    }
569
570    #[test]
571    fn get_model_instructions_uses_template_when_placeholder_present() {
572        let model = test_model(Some(ModelMessages {
573            instructions_template: Some("Hello {{ personality }}".to_string()),
574            instructions_variables: Some(personality_variables()),
575        }));
576
577        let instructions = model.get_model_instructions(Some(Personality::Friendly));
578
579        assert_eq!(instructions, "Hello friendly");
580    }
581
582    #[test]
583    fn get_model_instructions_always_strips_placeholder() {
584        let model = test_model(Some(ModelMessages {
585            instructions_template: Some("Hello\n{{ personality }}".to_string()),
586            instructions_variables: Some(ModelInstructionsVariables {
587                personality_default: None,
588                personality_friendly: Some("friendly".to_string()),
589                personality_pragmatic: None,
590            }),
591        }));
592        assert_eq!(
593            model.get_model_instructions(Some(Personality::Friendly)),
594            "Hello\nfriendly"
595        );
596        assert_eq!(
597            model.get_model_instructions(Some(Personality::Pragmatic)),
598            "Hello\n"
599        );
600        assert_eq!(
601            model.get_model_instructions(Some(Personality::None)),
602            "Hello\n"
603        );
604        assert_eq!(model.get_model_instructions(None), "Hello\n");
605
606        let model_no_personality = test_model(Some(ModelMessages {
607            instructions_template: Some("Hello\n{{ personality }}".to_string()),
608            instructions_variables: Some(ModelInstructionsVariables {
609                personality_default: None,
610                personality_friendly: None,
611                personality_pragmatic: None,
612            }),
613        }));
614        assert_eq!(
615            model_no_personality.get_model_instructions(Some(Personality::Friendly)),
616            "Hello\n"
617        );
618        assert_eq!(
619            model_no_personality.get_model_instructions(Some(Personality::Pragmatic)),
620            "Hello\n"
621        );
622        assert_eq!(
623            model_no_personality.get_model_instructions(Some(Personality::None)),
624            "Hello\n"
625        );
626        assert_eq!(model_no_personality.get_model_instructions(None), "Hello\n");
627    }
628
629    #[test]
630    fn get_model_instructions_falls_back_when_template_is_missing() {
631        let model = test_model(Some(ModelMessages {
632            instructions_template: None,
633            instructions_variables: Some(ModelInstructionsVariables {
634                personality_default: None,
635                personality_friendly: None,
636                personality_pragmatic: None,
637            }),
638        }));
639
640        let instructions = model.get_model_instructions(Some(Personality::Friendly));
641
642        assert_eq!(instructions, "base");
643    }
644
645    #[test]
646    fn get_personality_message_returns_default_when_personality_is_none() {
647        let personality_template = personality_variables();
648        assert_eq!(
649            personality_template.get_personality_message(None),
650            Some("default".to_string())
651        );
652    }
653
654    #[test]
655    fn get_personality_message() {
656        let personality_variables = personality_variables();
657        assert_eq!(
658            personality_variables.get_personality_message(Some(Personality::Friendly)),
659            Some("friendly".to_string())
660        );
661        assert_eq!(
662            personality_variables.get_personality_message(Some(Personality::Pragmatic)),
663            Some("pragmatic".to_string())
664        );
665        assert_eq!(
666            personality_variables.get_personality_message(Some(Personality::None)),
667            Some(String::new())
668        );
669        assert_eq!(
670            personality_variables.get_personality_message(None),
671            Some("default".to_string())
672        );
673
674        let personality_variables = ModelInstructionsVariables {
675            personality_default: Some("default".to_string()),
676            personality_friendly: None,
677            personality_pragmatic: None,
678        };
679        assert_eq!(
680            personality_variables.get_personality_message(Some(Personality::Friendly)),
681            None
682        );
683        assert_eq!(
684            personality_variables.get_personality_message(Some(Personality::Pragmatic)),
685            None
686        );
687        assert_eq!(
688            personality_variables.get_personality_message(Some(Personality::None)),
689            Some(String::new())
690        );
691        assert_eq!(
692            personality_variables.get_personality_message(None),
693            Some("default".to_string())
694        );
695
696        let personality_variables = ModelInstructionsVariables {
697            personality_default: None,
698            personality_friendly: Some("friendly".to_string()),
699            personality_pragmatic: Some("pragmatic".to_string()),
700        };
701        assert_eq!(
702            personality_variables.get_personality_message(Some(Personality::Friendly)),
703            Some("friendly".to_string())
704        );
705        assert_eq!(
706            personality_variables.get_personality_message(Some(Personality::Pragmatic)),
707            Some("pragmatic".to_string())
708        );
709        assert_eq!(
710            personality_variables.get_personality_message(Some(Personality::None)),
711            Some(String::new())
712        );
713        assert_eq!(personality_variables.get_personality_message(None), None);
714    }
715
716    #[test]
717    fn model_info_defaults_availability_nux_to_none_when_omitted() {
718        let model: ModelInfo = serde_json::from_value(serde_json::json!({
719            "slug": "test-model",
720            "display_name": "Test Model",
721            "description": null,
722            "supported_reasoning_levels": [],
723            "shell_type": "shell_command",
724            "visibility": "list",
725            "supported_in_api": true,
726            "priority": 1,
727            "upgrade": null,
728            "base_instructions": "base",
729            "model_messages": null,
730            "supports_reasoning_summaries": false,
731            "default_reasoning_summary": "auto",
732            "support_verbosity": false,
733            "default_verbosity": null,
734            "apply_patch_tool_type": null,
735            "truncation_policy": {
736                "mode": "bytes",
737                "limit": 10000
738            },
739            "supports_parallel_tool_calls": false,
740            "supports_image_detail_original": false,
741            "context_window": null,
742            "auto_compact_token_limit": null,
743            "effective_context_window_percent": 95,
744            "experimental_supported_tools": [],
745            "input_modalities": ["text", "image"],
746            "prefer_websockets": false
747        }))
748        .expect("deserialize model info");
749
750        assert_eq!(model.availability_nux, None);
751        assert!(!model.supports_image_detail_original);
752        assert_eq!(model.web_search_tool_type, WebSearchToolType::Text);
753    }
754
755    #[test]
756    fn model_preset_preserves_availability_nux() {
757        let preset = ModelPreset::from(ModelInfo {
758            availability_nux: Some(ModelAvailabilityNux {
759                message: "Try Spark.".to_string(),
760            }),
761            ..test_model(None)
762        });
763
764        assert_eq!(
765            preset.availability_nux,
766            Some(ModelAvailabilityNux {
767                message: "Try Spark.".to_string(),
768            })
769        );
770    }
771}