1use crate::llm::Usage;
2
3#[derive(Debug, Clone, Copy, PartialEq, Eq)]
4pub enum SourceStatus {
5 Official,
6 Derived,
7 Unverified,
8}
9
10#[derive(Debug, Clone, Copy, PartialEq)]
11pub struct PricePoint {
12 pub usd_per_million_tokens: f64,
14}
15
16impl PricePoint {
17 #[must_use]
18 pub const fn new(usd_per_million_tokens: f64) -> Self {
19 Self {
20 usd_per_million_tokens,
21 }
22 }
23
24 #[must_use]
25 pub fn estimate_cost_usd(self, tokens: u32) -> f64 {
26 (f64::from(tokens) / 1_000_000.0) * self.usd_per_million_tokens
27 }
28}
29
30#[derive(Debug, Clone, Copy, PartialEq)]
31pub struct Pricing {
32 pub input: Option<PricePoint>,
33 pub output: Option<PricePoint>,
34 pub cached_input: Option<PricePoint>,
35 pub notes: Option<&'static str>,
36}
37
38impl Pricing {
39 #[must_use]
40 pub const fn flat(input: f64, output: f64) -> Self {
41 Self {
42 input: Some(PricePoint::new(input)),
43 output: Some(PricePoint::new(output)),
44 cached_input: None,
45 notes: None,
46 }
47 }
48
49 #[must_use]
50 pub const fn flat_with_cached(input: f64, output: f64, cached_input: f64) -> Self {
51 Self {
52 input: Some(PricePoint::new(input)),
53 output: Some(PricePoint::new(output)),
54 cached_input: Some(PricePoint::new(cached_input)),
55 notes: None,
56 }
57 }
58
59 #[must_use]
60 pub const fn with_notes(mut self, notes: &'static str) -> Self {
61 self.notes = Some(notes);
62 self
63 }
64
65 #[must_use]
66 pub fn estimate_cost_usd(&self, usage: &Usage) -> Option<f64> {
67 let input = self.input.map(|p| p.estimate_cost_usd(usage.input_tokens));
68 let output = self
69 .output
70 .map(|p| p.estimate_cost_usd(usage.output_tokens));
71 match (input, output) {
72 (Some(input), Some(output)) => Some(input + output),
73 (Some(input), None) => Some(input),
74 (None, Some(output)) => Some(output),
75 (None, None) => None,
76 }
77 }
78}
79
80#[derive(Debug, Clone, Copy, PartialEq)]
81pub struct ModelCapabilities {
82 pub provider: &'static str,
83 pub model_id: &'static str,
84 pub context_window: Option<u32>,
85 pub max_output_tokens: Option<u32>,
86 pub pricing: Option<Pricing>,
87 pub supports_thinking: bool,
88 pub supports_adaptive_thinking: bool,
89 pub source_url: &'static str,
90 pub source_status: SourceStatus,
91 pub notes: Option<&'static str>,
92}
93
94impl ModelCapabilities {
95 #[must_use]
96 pub fn estimate_cost_usd(&self, usage: &Usage) -> Option<f64> {
97 self.pricing
98 .as_ref()
99 .and_then(|p| p.estimate_cost_usd(usage))
100 }
101}
102
103const ANTHROPIC_MODELS_URL: &str =
104 "https://docs.anthropic.com/en/docs/about-claude/models/all-models";
105const OPENAI_MODELS_URL: &str = "https://developers.openai.com/api/docs/models";
106const OPENAI_PRICING_URL: &str = "https://developers.openai.com/api/docs/pricing";
107const OPENAI_GPT54_URL: &str = "https://developers.openai.com/api/docs/models/gpt-5.4";
108const OPENAI_GPT53_CODEX_URL: &str = "https://developers.openai.com/api/docs/models/gpt-5.3-codex";
109const GOOGLE_MODELS_URL: &str = "https://ai.google.dev/gemini-api/docs/models";
110const GOOGLE_PRICING_URL: &str = "https://ai.google.dev/gemini-api/docs/pricing";
111
112const MODEL_CAPABILITIES: &[ModelCapabilities] = &[
113 ModelCapabilities {
115 provider: "anthropic",
116 model_id: "claude-opus-4-6",
117 context_window: Some(200_000),
118 max_output_tokens: Some(128_000),
119 pricing: Some(Pricing::flat(15.0, 75.0).with_notes("Anthropic Opus tier pricing; verify exact current SKU mapping before billing-critical use.")),
120 supports_thinking: true,
121 supports_adaptive_thinking: true,
122 source_url: ANTHROPIC_MODELS_URL,
123 source_status: SourceStatus::Derived,
124 notes: Some("Current Anthropic docs show this model alongside 200K/128K markers."),
125 },
126 ModelCapabilities {
127 provider: "anthropic",
128 model_id: "claude-sonnet-4-6",
129 context_window: Some(200_000),
130 max_output_tokens: Some(64_000),
131 pricing: Some(Pricing::flat(3.0, 15.0).with_notes("Anthropic Sonnet tier pricing; verify exact current SKU mapping before billing-critical use.")),
132 supports_thinking: true,
133 supports_adaptive_thinking: true,
134 source_url: ANTHROPIC_MODELS_URL,
135 source_status: SourceStatus::Derived,
136 notes: Some("Anthropic docs list Sonnet 4.6; user confirmed adaptive thinking support."),
137 },
138 ModelCapabilities {
139 provider: "anthropic",
140 model_id: "claude-sonnet-4-5-20250929",
141 context_window: Some(200_000),
142 max_output_tokens: Some(64_000),
143 pricing: Some(Pricing::flat(3.0, 15.0).with_notes("Anthropic Sonnet tier pricing; verify exact current SKU mapping before billing-critical use.")),
144 supports_thinking: true,
145 supports_adaptive_thinking: false,
146 source_url: ANTHROPIC_MODELS_URL,
147 source_status: SourceStatus::Derived,
148 notes: None,
149 },
150 ModelCapabilities {
151 provider: "anthropic",
152 model_id: "claude-haiku-4-5-20251001",
153 context_window: Some(200_000),
154 max_output_tokens: Some(64_000),
155 pricing: Some(Pricing::flat(1.0, 5.0).with_notes("Anthropic Haiku tier pricing; verify exact current SKU mapping before billing-critical use.")),
156 supports_thinking: true,
157 supports_adaptive_thinking: false,
158 source_url: ANTHROPIC_MODELS_URL,
159 source_status: SourceStatus::Derived,
160 notes: None,
161 },
162 ModelCapabilities {
163 provider: "anthropic",
164 model_id: "claude-sonnet-4-20250514",
165 context_window: Some(200_000),
166 max_output_tokens: Some(64_000),
167 pricing: Some(Pricing::flat(3.0, 15.0).with_notes("Anthropic Sonnet tier pricing; verify exact current SKU mapping before billing-critical use.")),
168 supports_thinking: true,
169 supports_adaptive_thinking: false,
170 source_url: ANTHROPIC_MODELS_URL,
171 source_status: SourceStatus::Derived,
172 notes: None,
173 },
174 ModelCapabilities {
175 provider: "anthropic",
176 model_id: "claude-opus-4-20250514",
177 context_window: Some(200_000),
178 max_output_tokens: Some(32_000),
179 pricing: Some(Pricing::flat(15.0, 75.0).with_notes("Anthropic Opus tier pricing; verify exact current SKU mapping before billing-critical use.")),
180 supports_thinking: true,
181 supports_adaptive_thinking: false,
182 source_url: ANTHROPIC_MODELS_URL,
183 source_status: SourceStatus::Derived,
184 notes: None,
185 },
186 ModelCapabilities {
187 provider: "anthropic",
188 model_id: "claude-3-5-sonnet-20241022",
189 context_window: Some(200_000),
190 max_output_tokens: Some(8_192),
191 pricing: Some(Pricing::flat(3.0, 15.0).with_notes("Anthropic Sonnet tier pricing; verify exact current SKU mapping before billing-critical use.")),
192 supports_thinking: true,
193 supports_adaptive_thinking: false,
194 source_url: ANTHROPIC_MODELS_URL,
195 source_status: SourceStatus::Derived,
196 notes: None,
197 },
198 ModelCapabilities {
199 provider: "anthropic",
200 model_id: "claude-3-5-haiku-20241022",
201 context_window: Some(200_000),
202 max_output_tokens: Some(8_192),
203 pricing: Some(Pricing::flat(1.0, 5.0).with_notes("Anthropic Haiku tier pricing; verify exact current SKU mapping before billing-critical use.")),
204 supports_thinking: true,
205 supports_adaptive_thinking: false,
206 source_url: ANTHROPIC_MODELS_URL,
207 source_status: SourceStatus::Derived,
208 notes: None,
209 },
210 ModelCapabilities {
212 provider: "openai",
213 model_id: "gpt-5.4",
214 context_window: Some(1_050_000),
215 max_output_tokens: Some(128_000),
216 pricing: Some(Pricing::flat_with_cached(2.50, 15.0, 0.25)),
217 supports_thinking: true,
218 supports_adaptive_thinking: false,
219 source_url: OPENAI_GPT54_URL,
220 source_status: SourceStatus::Official,
221 notes: Some("OpenAI model docs list 1.05M context, 128K max output, and reasoning.effort support."),
222 },
223 ModelCapabilities {
224 provider: "openai",
225 model_id: "gpt-5.3-codex",
226 context_window: Some(400_000),
227 max_output_tokens: Some(120_000),
228 pricing: Some(Pricing::flat_with_cached(1.50, 6.0, 0.375)),
229 supports_thinking: true,
230 supports_adaptive_thinking: false,
231 source_url: OPENAI_GPT53_CODEX_URL,
232 source_status: SourceStatus::Official,
233 notes: Some("OpenAI model docs list Chat Completions and Responses API support plus reasoning.effort levels."),
234 },
235 ModelCapabilities {
236 provider: "openai",
237 model_id: "gpt-5",
238 context_window: Some(400_000),
239 max_output_tokens: Some(128_000),
240 pricing: Some(Pricing::flat_with_cached(1.25, 10.0, 0.125)),
241 supports_thinking: false,
242 supports_adaptive_thinking: false,
243 source_url: OPENAI_PRICING_URL,
244 source_status: SourceStatus::Official,
245 notes: Some("Pricing verified from OpenAI pricing page. Context/max output still need clean extraction from models docs."),
246 },
247 ModelCapabilities {
248 provider: "openai",
249 model_id: "gpt-5-mini",
250 context_window: Some(400_000),
251 max_output_tokens: Some(128_000),
252 pricing: Some(Pricing::flat_with_cached(0.125, 1.0, 0.0125)),
253 supports_thinking: false,
254 supports_adaptive_thinking: false,
255 source_url: OPENAI_PRICING_URL,
256 source_status: SourceStatus::Official,
257 notes: Some("Pricing verified from OpenAI pricing page. Context/max output still need clean extraction from models docs."),
258 },
259 ModelCapabilities {
260 provider: "openai",
261 model_id: "gpt-5-nano",
262 context_window: Some(400_000),
263 max_output_tokens: Some(128_000),
264 pricing: Some(Pricing::flat_with_cached(0.025, 0.20, 0.0025)),
265 supports_thinking: false,
266 supports_adaptive_thinking: false,
267 source_url: OPENAI_PRICING_URL,
268 source_status: SourceStatus::Official,
269 notes: Some("Pricing verified from OpenAI pricing page. Context/max output still need clean extraction from models docs."),
270 },
271 ModelCapabilities {
272 provider: "openai",
273 model_id: "gpt-5.2-instant",
274 context_window: Some(400_000),
275 max_output_tokens: Some(128_000),
276 pricing: None,
277 supports_thinking: false,
278 supports_adaptive_thinking: false,
279 source_url: OPENAI_MODELS_URL,
280 source_status: SourceStatus::Unverified,
281 notes: Some("Model exists in OpenAI docs, but pricing was not extracted from the official pricing page in this pass."),
282 },
283 ModelCapabilities {
284 provider: "openai",
285 model_id: "gpt-5.2-thinking",
286 context_window: Some(400_000),
287 max_output_tokens: Some(128_000),
288 pricing: None,
289 supports_thinking: true,
290 supports_adaptive_thinking: false,
291 source_url: OPENAI_MODELS_URL,
292 source_status: SourceStatus::Unverified,
293 notes: Some("Model exists in OpenAI docs, but pricing was not extracted from the official pricing page in this pass."),
294 },
295 ModelCapabilities {
296 provider: "openai",
297 model_id: "gpt-5.2-pro",
298 context_window: Some(400_000),
299 max_output_tokens: Some(128_000),
300 pricing: Some(Pricing::flat(10.50, 84.0)),
301 supports_thinking: false,
302 supports_adaptive_thinking: false,
303 source_url: OPENAI_PRICING_URL,
304 source_status: SourceStatus::Official,
305 notes: Some("Pricing verified from OpenAI pricing page. Context/max output still need clean extraction from models docs."),
306 },
307 ModelCapabilities {
308 provider: "openai",
309 model_id: "gpt-5.2-codex",
310 context_window: Some(400_000),
311 max_output_tokens: Some(128_000),
312 pricing: None,
313 supports_thinking: false,
314 supports_adaptive_thinking: false,
315 source_url: OPENAI_MODELS_URL,
316 source_status: SourceStatus::Unverified,
317 notes: Some("Model presence confirmed from OpenAI docs; pricing not yet extracted in this pass."),
318 },
319 ModelCapabilities {
320 provider: "openai",
321 model_id: "o3",
322 context_window: Some(200_000),
323 max_output_tokens: Some(100_000),
324 pricing: Some(Pricing::flat(1.0, 4.0)),
325 supports_thinking: true,
326 supports_adaptive_thinking: false,
327 source_url: OPENAI_PRICING_URL,
328 source_status: SourceStatus::Official,
329 notes: Some("Pricing verified from OpenAI pricing page. Context/max output still need clean extraction from models docs."),
330 },
331 ModelCapabilities {
332 provider: "openai",
333 model_id: "o3-mini",
334 context_window: Some(200_000),
335 max_output_tokens: Some(100_000),
336 pricing: Some(Pricing::flat(0.55, 2.20)),
337 supports_thinking: true,
338 supports_adaptive_thinking: false,
339 source_url: OPENAI_PRICING_URL,
340 source_status: SourceStatus::Official,
341 notes: Some("Pricing verified from OpenAI pricing page. Context/max output still need clean extraction from models docs."),
342 },
343 ModelCapabilities {
344 provider: "openai",
345 model_id: "o4-mini",
346 context_window: Some(200_000),
347 max_output_tokens: Some(100_000),
348 pricing: Some(Pricing::flat(0.55, 2.20)),
349 supports_thinking: true,
350 supports_adaptive_thinking: false,
351 source_url: OPENAI_PRICING_URL,
352 source_status: SourceStatus::Official,
353 notes: Some("Pricing verified from OpenAI pricing page. Context/max output still need clean extraction from models docs."),
354 },
355 ModelCapabilities {
356 provider: "openai",
357 model_id: "o1",
358 context_window: Some(200_000),
359 max_output_tokens: Some(100_000),
360 pricing: Some(Pricing::flat(7.50, 30.0)),
361 supports_thinking: true,
362 supports_adaptive_thinking: false,
363 source_url: OPENAI_PRICING_URL,
364 source_status: SourceStatus::Official,
365 notes: Some("Pricing verified from OpenAI pricing page. Context/max output still need clean extraction from models docs."),
366 },
367 ModelCapabilities {
368 provider: "openai",
369 model_id: "o1-mini",
370 context_window: Some(200_000),
371 max_output_tokens: Some(100_000),
372 pricing: Some(Pricing::flat(0.55, 2.20)),
373 supports_thinking: true,
374 supports_adaptive_thinking: false,
375 source_url: OPENAI_PRICING_URL,
376 source_status: SourceStatus::Official,
377 notes: Some("Pricing verified from OpenAI pricing page. Context/max output still need clean extraction from models docs."),
378 },
379 ModelCapabilities {
380 provider: "openai",
381 model_id: "gpt-4.1",
382 context_window: Some(1_000_000),
383 max_output_tokens: Some(16_384),
384 pricing: Some(Pricing::flat(1.0, 4.0)),
385 supports_thinking: false,
386 supports_adaptive_thinking: false,
387 source_url: OPENAI_PRICING_URL,
388 source_status: SourceStatus::Official,
389 notes: Some("Pricing verified from OpenAI pricing page. Context window from model family docs/notes."),
390 },
391 ModelCapabilities {
392 provider: "openai",
393 model_id: "gpt-4.1-mini",
394 context_window: Some(1_000_000),
395 max_output_tokens: Some(16_384),
396 pricing: Some(Pricing::flat(0.20, 0.80)),
397 supports_thinking: false,
398 supports_adaptive_thinking: false,
399 source_url: OPENAI_PRICING_URL,
400 source_status: SourceStatus::Official,
401 notes: Some("Pricing verified from OpenAI pricing page. Context window from model family docs/notes."),
402 },
403 ModelCapabilities {
404 provider: "openai",
405 model_id: "gpt-4.1-nano",
406 context_window: Some(1_000_000),
407 max_output_tokens: Some(16_384),
408 pricing: Some(Pricing::flat(0.05, 0.20)),
409 supports_thinking: false,
410 supports_adaptive_thinking: false,
411 source_url: OPENAI_PRICING_URL,
412 source_status: SourceStatus::Official,
413 notes: Some("Pricing verified from OpenAI pricing page. Context window from model family docs/notes."),
414 },
415 ModelCapabilities {
416 provider: "openai",
417 model_id: "gpt-4o",
418 context_window: Some(128_000),
419 max_output_tokens: Some(16_384),
420 pricing: Some(Pricing::flat(1.25, 5.0)),
421 supports_thinking: false,
422 supports_adaptive_thinking: false,
423 source_url: OPENAI_PRICING_URL,
424 source_status: SourceStatus::Official,
425 notes: Some("Pricing verified from OpenAI pricing page. Context/max output from existing runtime assumptions."),
426 },
427 ModelCapabilities {
428 provider: "openai",
429 model_id: "gpt-4o-mini",
430 context_window: Some(128_000),
431 max_output_tokens: Some(16_384),
432 pricing: Some(Pricing::flat(0.075, 0.30)),
433 supports_thinking: false,
434 supports_adaptive_thinking: false,
435 source_url: OPENAI_PRICING_URL,
436 source_status: SourceStatus::Official,
437 notes: Some("Pricing verified from OpenAI pricing page. Context/max output from existing runtime assumptions."),
438 },
439 ModelCapabilities {
441 provider: "gemini",
442 model_id: "gemini-3.1-pro",
443 context_window: Some(1_048_576),
444 max_output_tokens: Some(65_536),
445 pricing: Some(Pricing::flat(2.0, 12.0).with_notes("Official pricing for prompts <= 200K tokens. For prompts > 200K, pricing increases to $4 input / $18 output per 1M tokens.")),
446 supports_thinking: true,
447 supports_adaptive_thinking: false,
448 source_url: GOOGLE_PRICING_URL,
449 source_status: SourceStatus::Official,
450 notes: Some("SDK model ID omits the preview suffix; pricing sourced from Gemini 3.1 Pro Preview docs."),
451 },
452 ModelCapabilities {
453 provider: "gemini",
454 model_id: "gemini-3.1-flash-lite-preview",
455 context_window: Some(1_048_576),
456 max_output_tokens: Some(65_536),
457 pricing: None,
458 supports_thinking: true,
459 supports_adaptive_thinking: false,
460 source_url: GOOGLE_MODELS_URL,
461 source_status: SourceStatus::Unverified,
462 notes: Some("Model presence confirmed from Google docs, but pricing was not extracted in this pass."),
463 },
464 ModelCapabilities {
465 provider: "gemini",
466 model_id: "gemini-3.0-flash",
467 context_window: Some(1_048_576),
468 max_output_tokens: Some(65_536),
469 pricing: None,
470 supports_thinking: true,
471 supports_adaptive_thinking: false,
472 source_url: GOOGLE_MODELS_URL,
473 source_status: SourceStatus::Unverified,
474 notes: Some("Model presence confirmed from Google docs, but pricing was not extracted in this pass."),
475 },
476 ModelCapabilities {
477 provider: "gemini",
478 model_id: "gemini-3.0-pro",
479 context_window: Some(1_048_576),
480 max_output_tokens: Some(65_536),
481 pricing: None,
482 supports_thinking: true,
483 supports_adaptive_thinking: false,
484 source_url: GOOGLE_MODELS_URL,
485 source_status: SourceStatus::Unverified,
486 notes: Some("Model presence confirmed from Google docs, but pricing was not extracted in this pass."),
487 },
488 ModelCapabilities {
489 provider: "gemini",
490 model_id: "gemini-2.5-flash",
491 context_window: Some(1_000_000),
492 max_output_tokens: Some(65_536),
493 pricing: Some(Pricing::flat(0.30, 2.50).with_notes("Official text/image/video pricing. Audio input is priced separately at $1.00 / 1M tokens.")),
494 supports_thinking: true,
495 supports_adaptive_thinking: false,
496 source_url: GOOGLE_PRICING_URL,
497 source_status: SourceStatus::Official,
498 notes: Some("Official docs state output pricing includes thinking tokens."),
499 },
500 ModelCapabilities {
501 provider: "gemini",
502 model_id: "gemini-2.5-pro",
503 context_window: Some(1_000_000),
504 max_output_tokens: Some(65_536),
505 pricing: None,
506 supports_thinking: true,
507 supports_adaptive_thinking: false,
508 source_url: GOOGLE_MODELS_URL,
509 source_status: SourceStatus::Unverified,
510 notes: Some("Model presence confirmed from Google docs, but pricing was not extracted in this pass."),
511 },
512 ModelCapabilities {
513 provider: "gemini",
514 model_id: "gemini-2.0-flash",
515 context_window: Some(1_000_000),
516 max_output_tokens: Some(8_192),
517 pricing: Some(Pricing::flat(0.10, 0.40).with_notes("Official text/image/video pricing. Audio input is priced separately at $0.70 / 1M tokens.")),
518 supports_thinking: false,
519 supports_adaptive_thinking: false,
520 source_url: GOOGLE_PRICING_URL,
521 source_status: SourceStatus::Official,
522 notes: None,
523 },
524 ModelCapabilities {
525 provider: "gemini",
526 model_id: "gemini-2.0-flash-lite",
527 context_window: Some(1_000_000),
528 max_output_tokens: Some(8_192),
529 pricing: Some(Pricing::flat(0.075, 0.30)),
530 supports_thinking: false,
531 supports_adaptive_thinking: false,
532 source_url: GOOGLE_PRICING_URL,
533 source_status: SourceStatus::Official,
534 notes: None,
535 },
536];
537
538#[must_use]
539pub fn get_model_capabilities(
540 provider: &str,
541 model_id: &str,
542) -> Option<&'static ModelCapabilities> {
543 MODEL_CAPABILITIES.iter().find(|caps| {
544 caps.provider.eq_ignore_ascii_case(provider) && caps.model_id.eq_ignore_ascii_case(model_id)
545 })
546}
547
548#[must_use]
549pub fn default_max_output_tokens(provider: &str, model_id: &str) -> Option<u32> {
550 get_model_capabilities(provider, model_id).and_then(|caps| caps.max_output_tokens)
551}
552
553#[must_use]
554pub const fn supported_model_capabilities() -> &'static [ModelCapabilities] {
555 MODEL_CAPABILITIES
556}
557
558#[cfg(test)]
559mod tests {
560 use super::*;
561
562 #[test]
563 fn test_lookup_anthropic_sonnet_46() {
564 let caps = get_model_capabilities("anthropic", "claude-sonnet-4-6").unwrap();
565 assert_eq!(caps.context_window, Some(200_000));
566 assert_eq!(caps.max_output_tokens, Some(64_000));
567 assert!(caps.supports_adaptive_thinking);
568 }
569
570 #[test]
571 fn test_lookup_anthropic_sonnet_45_disables_adaptive_thinking() {
572 let caps = get_model_capabilities("anthropic", "claude-sonnet-4-5-20250929").unwrap();
573 assert!(!caps.supports_adaptive_thinking);
574 }
575
576 #[test]
577 fn test_lookup_openai_pricing() {
578 let caps = get_model_capabilities("openai", "gpt-4o").unwrap();
579 let pricing = caps.pricing.unwrap();
580 assert!((pricing.input.unwrap().usd_per_million_tokens - 1.25).abs() < f64::EPSILON);
581 assert!((pricing.output.unwrap().usd_per_million_tokens - 5.0).abs() < f64::EPSILON);
582 }
583
584 #[test]
585 fn test_lookup_openai_gpt54() {
586 let caps = get_model_capabilities("openai", "gpt-5.4").unwrap();
587 assert_eq!(caps.context_window, Some(1_050_000));
588 assert_eq!(caps.max_output_tokens, Some(128_000));
589 assert!(caps.supports_thinking);
590 assert_eq!(caps.source_status, SourceStatus::Official);
591 }
592
593 #[test]
594 fn test_lookup_openai_gpt53_codex() {
595 let caps = get_model_capabilities("openai", "gpt-5.3-codex").unwrap();
596 assert_eq!(caps.context_window, Some(400_000));
597 assert_eq!(caps.max_output_tokens, Some(120_000));
598 assert!(caps.supports_thinking);
599 assert_eq!(caps.source_status, SourceStatus::Official);
600 }
601
602 #[test]
603 fn test_estimate_cost_usd() {
604 let caps = get_model_capabilities("openai", "gpt-4o").unwrap();
605 let cost = caps
606 .estimate_cost_usd(&Usage {
607 input_tokens: 2_000,
608 output_tokens: 1_000,
609 })
610 .unwrap();
611 assert!((cost - 0.0075).abs() < f64::EPSILON);
612 }
613}