use super::{ModelCapabilities, ThinkingSupport};
use crate::Provider;
use crate::model_profile::catalog::ModelTier;
const GPT5_RECENT_EFFORT: &[&str] = &["none", "low", "medium", "high", "xhigh"];
const GPT5_3_CODEX_EFFORT: &[&str] = &["low", "medium", "high", "xhigh"];
const REALTIME_NO_EFFORT: &[&str] = &[];
pub const CAPABILITIES: &[ModelCapabilities] = &[
ModelCapabilities {
id: "gpt-5.5",
provider: Provider::OpenAI,
display_name: "GPT-5.5",
tier: ModelTier::Recommended,
model_family: "gpt-5",
context_window: 1_050_000,
max_output_tokens: 128_000,
context_window_beta: None,
max_output_tokens_beta: None,
vision: true,
image_tool_results: false,
inline_video: false,
realtime: false,
supports_temperature: false,
supports_top_p: false,
supports_top_k: false,
thinking: ThinkingSupport::None,
supports_reasoning: true,
effort_levels: GPT5_RECENT_EFFORT,
supports_web_search: true,
supports_inference_geo: false,
supports_compaction: false,
supports_structured_output: true,
supports_legacy_penalties: true,
supports_thinking_budget_legacy: false,
beta_headers: &[],
call_timeout_secs: Some(600),
},
ModelCapabilities {
id: "gpt-5.5-pro",
provider: Provider::OpenAI,
display_name: "GPT-5.5 Pro",
tier: ModelTier::Recommended,
model_family: "gpt-5",
context_window: 1_050_000,
max_output_tokens: 128_000,
context_window_beta: None,
max_output_tokens_beta: None,
vision: true,
image_tool_results: false,
inline_video: false,
realtime: false,
supports_temperature: false,
supports_top_p: false,
supports_top_k: false,
thinking: ThinkingSupport::None,
supports_reasoning: true,
effort_levels: GPT5_RECENT_EFFORT,
supports_web_search: true,
supports_inference_geo: false,
supports_compaction: false,
supports_structured_output: true,
supports_legacy_penalties: true,
supports_thinking_budget_legacy: false,
beta_headers: &[],
call_timeout_secs: Some(7200),
},
ModelCapabilities {
id: "gpt-5.4",
provider: Provider::OpenAI,
display_name: "GPT-5.4",
tier: ModelTier::Supported,
model_family: "gpt-5",
context_window: 1_050_000,
max_output_tokens: 128_000,
context_window_beta: None,
max_output_tokens_beta: None,
vision: true,
image_tool_results: false,
inline_video: false,
realtime: false,
supports_temperature: false,
supports_top_p: false,
supports_top_k: false,
thinking: ThinkingSupport::None,
supports_reasoning: true,
effort_levels: GPT5_RECENT_EFFORT,
supports_web_search: true,
supports_inference_geo: false,
supports_compaction: false,
supports_structured_output: true,
supports_legacy_penalties: true,
supports_thinking_budget_legacy: false,
beta_headers: &[],
call_timeout_secs: Some(600),
},
ModelCapabilities {
id: "gpt-5.4-mini",
provider: Provider::OpenAI,
display_name: "GPT-5.4 Mini",
tier: ModelTier::Supported,
model_family: "gpt-5",
context_window: 128_000,
max_output_tokens: 16_384,
context_window_beta: None,
max_output_tokens_beta: None,
vision: true,
image_tool_results: false,
inline_video: false,
realtime: false,
supports_temperature: false,
supports_top_p: false,
supports_top_k: false,
thinking: ThinkingSupport::None,
supports_reasoning: true,
effort_levels: GPT5_RECENT_EFFORT,
supports_web_search: true,
supports_inference_geo: false,
supports_compaction: true,
supports_structured_output: true,
supports_legacy_penalties: true,
supports_thinking_budget_legacy: false,
beta_headers: &[],
call_timeout_secs: Some(600),
},
ModelCapabilities {
id: "gpt-5.3-codex",
provider: Provider::OpenAI,
display_name: "GPT-5.3 Codex",
tier: ModelTier::Supported,
model_family: "codex",
context_window: 400_000,
max_output_tokens: 128_000,
context_window_beta: None,
max_output_tokens_beta: None,
vision: true,
image_tool_results: false,
inline_video: false,
realtime: false,
supports_temperature: false,
supports_top_p: false,
supports_top_k: false,
thinking: ThinkingSupport::None,
supports_reasoning: true,
effort_levels: GPT5_3_CODEX_EFFORT,
supports_web_search: false,
supports_inference_geo: false,
supports_compaction: false,
supports_structured_output: true,
supports_legacy_penalties: true,
supports_thinking_budget_legacy: false,
beta_headers: &[],
call_timeout_secs: Some(600),
},
ModelCapabilities {
id: "gpt-realtime-1.5",
provider: Provider::OpenAI,
display_name: "GPT Realtime 1.5",
tier: ModelTier::Recommended,
model_family: "gpt-realtime",
context_window: 128_000,
max_output_tokens: 4_096,
context_window_beta: None,
max_output_tokens_beta: None,
vision: false,
image_tool_results: false,
inline_video: false,
realtime: true,
supports_temperature: true,
supports_top_p: true,
supports_top_k: false,
thinking: ThinkingSupport::None,
supports_reasoning: false,
effort_levels: REALTIME_NO_EFFORT,
supports_web_search: false,
supports_inference_geo: false,
supports_compaction: false,
supports_structured_output: true,
supports_legacy_penalties: false,
supports_thinking_budget_legacy: false,
beta_headers: &[],
call_timeout_secs: Some(600),
},
ModelCapabilities {
id: "gpt-realtime",
provider: Provider::OpenAI,
display_name: "GPT Realtime (legacy alias)",
tier: ModelTier::Supported,
model_family: "gpt-realtime",
context_window: 128_000,
max_output_tokens: 4_096,
context_window_beta: None,
max_output_tokens_beta: None,
vision: false,
image_tool_results: false,
inline_video: false,
realtime: true,
supports_temperature: true,
supports_top_p: true,
supports_top_k: false,
thinking: ThinkingSupport::None,
supports_reasoning: false,
effort_levels: REALTIME_NO_EFFORT,
supports_web_search: false,
supports_inference_geo: false,
supports_compaction: false,
supports_structured_output: true,
supports_legacy_penalties: false,
supports_thinking_budget_legacy: false,
beta_headers: &[],
call_timeout_secs: Some(600),
},
];