Skip to main content

saorsa_ai/
models.rs

1//! Model registry for known LLM models.
2//!
3//! Provides a lookup table of known models with context window sizes,
4//! capability flags, and provider associations.
5
6use crate::provider::ProviderKind;
7
8/// Information about a known LLM model.
9#[derive(Clone, Copy, Debug, PartialEq)]
10pub struct ModelInfo {
11    /// Model identifier (e.g., "gpt-4o", "claude-sonnet-4").
12    pub name: &'static str,
13    /// Which provider this model belongs to.
14    pub provider: ProviderKind,
15    /// Context window size in tokens.
16    pub context_window: u32,
17    /// Whether this model supports tool/function calling.
18    pub supports_tools: bool,
19    /// Whether this model supports vision/image inputs.
20    pub supports_vision: bool,
21    /// Cost per million input tokens in USD (None if unknown).
22    pub cost_per_million_input: Option<f64>,
23    /// Cost per million output tokens in USD (None if unknown).
24    pub cost_per_million_output: Option<f64>,
25}
26
27impl ModelInfo {
28    /// Create a new model info entry.
29    pub const fn new(
30        name: &'static str,
31        provider: ProviderKind,
32        context_window: u32,
33        supports_tools: bool,
34        supports_vision: bool,
35        cost_per_million_input: Option<f64>,
36        cost_per_million_output: Option<f64>,
37    ) -> Self {
38        Self {
39            name,
40            provider,
41            context_window,
42            supports_tools,
43            supports_vision,
44            cost_per_million_input,
45            cost_per_million_output,
46        }
47    }
48}
49
50/// Known models registry.
51const KNOWN_MODELS: &[ModelInfo] = &[
52    // ── Anthropic ──
53    ModelInfo {
54        name: "claude-opus-4",
55        provider: ProviderKind::Anthropic,
56        context_window: 200_000,
57        supports_tools: true,
58        supports_vision: true,
59        cost_per_million_input: Some(15.0),
60        cost_per_million_output: Some(75.0),
61    },
62    ModelInfo {
63        name: "claude-sonnet-4",
64        provider: ProviderKind::Anthropic,
65        context_window: 200_000,
66        supports_tools: true,
67        supports_vision: true,
68        cost_per_million_input: Some(3.0),
69        cost_per_million_output: Some(15.0),
70    },
71    ModelInfo {
72        name: "claude-haiku-4",
73        provider: ProviderKind::Anthropic,
74        context_window: 200_000,
75        supports_tools: true,
76        supports_vision: true,
77        cost_per_million_input: Some(0.8),
78        cost_per_million_output: Some(4.0),
79    },
80    ModelInfo {
81        name: "claude-3-5-sonnet",
82        provider: ProviderKind::Anthropic,
83        context_window: 200_000,
84        supports_tools: true,
85        supports_vision: true,
86        cost_per_million_input: Some(3.0),
87        cost_per_million_output: Some(15.0),
88    },
89    ModelInfo {
90        name: "claude-3-5-haiku",
91        provider: ProviderKind::Anthropic,
92        context_window: 200_000,
93        supports_tools: true,
94        supports_vision: true,
95        cost_per_million_input: Some(0.8),
96        cost_per_million_output: Some(4.0),
97    },
98    ModelInfo {
99        name: "claude-3-opus",
100        provider: ProviderKind::Anthropic,
101        context_window: 200_000,
102        supports_tools: true,
103        supports_vision: true,
104        cost_per_million_input: Some(15.0),
105        cost_per_million_output: Some(75.0),
106    },
107    // ── OpenAI ──
108    ModelInfo {
109        name: "gpt-4o",
110        provider: ProviderKind::OpenAi,
111        context_window: 128_000,
112        supports_tools: true,
113        supports_vision: true,
114        cost_per_million_input: Some(2.5),
115        cost_per_million_output: Some(10.0),
116    },
117    ModelInfo {
118        name: "gpt-4o-mini",
119        provider: ProviderKind::OpenAi,
120        context_window: 128_000,
121        supports_tools: true,
122        supports_vision: true,
123        cost_per_million_input: Some(0.15),
124        cost_per_million_output: Some(0.6),
125    },
126    ModelInfo {
127        name: "gpt-4-turbo",
128        provider: ProviderKind::OpenAi,
129        context_window: 128_000,
130        supports_tools: true,
131        supports_vision: true,
132        cost_per_million_input: None,
133        cost_per_million_output: None,
134    },
135    ModelInfo {
136        name: "o1",
137        provider: ProviderKind::OpenAi,
138        context_window: 200_000,
139        supports_tools: true,
140        supports_vision: true,
141        cost_per_million_input: Some(15.0),
142        cost_per_million_output: Some(60.0),
143    },
144    ModelInfo {
145        name: "o3-mini",
146        provider: ProviderKind::OpenAi,
147        context_window: 200_000,
148        supports_tools: true,
149        supports_vision: false,
150        cost_per_million_input: Some(1.1),
151        cost_per_million_output: Some(4.4),
152    },
153    // ── Google Gemini ──
154    ModelInfo {
155        name: "gemini-2.0-flash",
156        provider: ProviderKind::Gemini,
157        context_window: 1_048_576,
158        supports_tools: true,
159        supports_vision: true,
160        cost_per_million_input: Some(0.1),
161        cost_per_million_output: Some(0.4),
162    },
163    ModelInfo {
164        name: "gemini-1.5-pro",
165        provider: ProviderKind::Gemini,
166        context_window: 2_097_152,
167        supports_tools: true,
168        supports_vision: true,
169        cost_per_million_input: Some(1.25),
170        cost_per_million_output: Some(5.0),
171    },
172    ModelInfo {
173        name: "gemini-1.5-flash",
174        provider: ProviderKind::Gemini,
175        context_window: 1_048_576,
176        supports_tools: true,
177        supports_vision: true,
178        cost_per_million_input: Some(0.075),
179        cost_per_million_output: Some(0.3),
180    },
181    // ── Ollama ──
182    ModelInfo {
183        name: "llama3",
184        provider: ProviderKind::Ollama,
185        context_window: 8_192,
186        supports_tools: true,
187        supports_vision: false,
188        cost_per_million_input: None,
189        cost_per_million_output: None,
190    },
191    ModelInfo {
192        name: "llama3.1",
193        provider: ProviderKind::Ollama,
194        context_window: 131_072,
195        supports_tools: true,
196        supports_vision: false,
197        cost_per_million_input: None,
198        cost_per_million_output: None,
199    },
200    ModelInfo {
201        name: "codellama",
202        provider: ProviderKind::Ollama,
203        context_window: 16_384,
204        supports_tools: false,
205        supports_vision: false,
206        cost_per_million_input: None,
207        cost_per_million_output: None,
208    },
209    ModelInfo {
210        name: "mistral",
211        provider: ProviderKind::Ollama,
212        context_window: 32_768,
213        supports_tools: true,
214        supports_vision: false,
215        cost_per_million_input: None,
216        cost_per_million_output: None,
217    },
218    ModelInfo {
219        name: "mixtral",
220        provider: ProviderKind::Ollama,
221        context_window: 32_768,
222        supports_tools: true,
223        supports_vision: false,
224        cost_per_million_input: None,
225        cost_per_million_output: None,
226    },
227    ModelInfo {
228        name: "llava",
229        provider: ProviderKind::Ollama,
230        context_window: 4_096,
231        supports_tools: false,
232        supports_vision: true,
233        cost_per_million_input: None,
234        cost_per_million_output: None,
235    },
236    // ── Groq (OpenAI-Compatible) ──
237    ModelInfo {
238        name: "llama-3.3-70b-versatile",
239        provider: ProviderKind::OpenAiCompatible,
240        context_window: 131_072,
241        supports_tools: true,
242        supports_vision: false,
243        cost_per_million_input: None,
244        cost_per_million_output: None,
245    },
246    ModelInfo {
247        name: "llama-3.1-8b-instant",
248        provider: ProviderKind::OpenAiCompatible,
249        context_window: 131_072,
250        supports_tools: true,
251        supports_vision: false,
252        cost_per_million_input: None,
253        cost_per_million_output: None,
254    },
255    ModelInfo {
256        name: "mixtral-8x7b-32768",
257        provider: ProviderKind::OpenAiCompatible,
258        context_window: 32_768,
259        supports_tools: true,
260        supports_vision: false,
261        cost_per_million_input: None,
262        cost_per_million_output: None,
263    },
264    ModelInfo {
265        name: "gemma2-9b-it",
266        provider: ProviderKind::OpenAiCompatible,
267        context_window: 8_192,
268        supports_tools: true,
269        supports_vision: false,
270        cost_per_million_input: None,
271        cost_per_million_output: None,
272    },
273    // ── xAI (OpenAI-Compatible) ──
274    ModelInfo {
275        name: "grok-2",
276        provider: ProviderKind::OpenAiCompatible,
277        context_window: 131_072,
278        supports_tools: true,
279        supports_vision: true,
280        cost_per_million_input: None,
281        cost_per_million_output: None,
282    },
283    ModelInfo {
284        name: "grok-2-mini",
285        provider: ProviderKind::OpenAiCompatible,
286        context_window: 131_072,
287        supports_tools: true,
288        supports_vision: false,
289        cost_per_million_input: None,
290        cost_per_million_output: None,
291    },
292    // ── Cerebras (OpenAI-Compatible) ──
293    ModelInfo {
294        name: "cerebras-llama3.1-8b",
295        provider: ProviderKind::OpenAiCompatible,
296        context_window: 8_192,
297        supports_tools: false,
298        supports_vision: false,
299        cost_per_million_input: None,
300        cost_per_million_output: None,
301    },
302    ModelInfo {
303        name: "cerebras-llama3.1-70b",
304        provider: ProviderKind::OpenAiCompatible,
305        context_window: 8_192,
306        supports_tools: false,
307        supports_vision: false,
308        cost_per_million_input: None,
309        cost_per_million_output: None,
310    },
311    // ── OpenRouter ──
312    ModelInfo {
313        name: "meta-llama/llama-3.1-405b-instruct",
314        provider: ProviderKind::OpenRouter,
315        context_window: 131_072,
316        supports_tools: true,
317        supports_vision: false,
318        cost_per_million_input: None,
319        cost_per_million_output: None,
320    },
321    ModelInfo {
322        name: "meta-llama/llama-3.1-70b-instruct",
323        provider: ProviderKind::OpenRouter,
324        context_window: 131_072,
325        supports_tools: true,
326        supports_vision: false,
327        cost_per_million_input: None,
328        cost_per_million_output: None,
329    },
330    ModelInfo {
331        name: "anthropic/claude-sonnet-4",
332        provider: ProviderKind::OpenRouter,
333        context_window: 200_000,
334        supports_tools: true,
335        supports_vision: true,
336        cost_per_million_input: Some(3.0),
337        cost_per_million_output: Some(15.0),
338    },
339    ModelInfo {
340        name: "openai/gpt-4o",
341        provider: ProviderKind::OpenRouter,
342        context_window: 128_000,
343        supports_tools: true,
344        supports_vision: true,
345        cost_per_million_input: Some(2.5),
346        cost_per_million_output: Some(10.0),
347    },
348    ModelInfo {
349        name: "google/gemini-2.0-flash",
350        provider: ProviderKind::OpenRouter,
351        context_window: 1_048_576,
352        supports_tools: true,
353        supports_vision: true,
354        cost_per_million_input: Some(0.1),
355        cost_per_million_output: Some(0.4),
356    },
357    // ── Mistral (OpenAI-Compatible) ──
358    ModelInfo {
359        name: "mistral-large-latest",
360        provider: ProviderKind::OpenAiCompatible,
361        context_window: 131_072,
362        supports_tools: true,
363        supports_vision: false,
364        cost_per_million_input: None,
365        cost_per_million_output: None,
366    },
367    ModelInfo {
368        name: "mistral-small-latest",
369        provider: ProviderKind::OpenAiCompatible,
370        context_window: 131_072,
371        supports_tools: true,
372        supports_vision: false,
373        cost_per_million_input: None,
374        cost_per_million_output: None,
375    },
376    ModelInfo {
377        name: "codestral-latest",
378        provider: ProviderKind::OpenAiCompatible,
379        context_window: 32_768,
380        supports_tools: false,
381        supports_vision: false,
382        cost_per_million_input: None,
383        cost_per_million_output: None,
384    },
385];
386
387/// Look up a model by exact name.
388pub fn lookup_model(name: &str) -> Option<ModelInfo> {
389    KNOWN_MODELS.iter().find(|m| m.name == name).copied()
390}
391
392/// Look up a model by prefix match.
393///
394/// Useful for versioned model names (e.g., "claude-sonnet-4-5-20250929"
395/// starts with "claude-sonnet-4"). Returns the first match.
396pub fn lookup_model_by_prefix(name: &str) -> Option<ModelInfo> {
397    KNOWN_MODELS
398        .iter()
399        .find(|m| name.starts_with(m.name))
400        .copied()
401}
402
403/// Get the context window size for a model.
404///
405/// Tries exact match first, then prefix match. Returns `None` for unknown models.
406pub fn get_context_window(model: &str) -> Option<u32> {
407    lookup_model(model)
408        .or_else(|| lookup_model_by_prefix(model))
409        .map(|m| m.context_window)
410}
411
412/// Check if a model supports tool/function calling.
413///
414/// Returns `None` for unknown models.
415pub fn supports_tools(model: &str) -> Option<bool> {
416    lookup_model(model)
417        .or_else(|| lookup_model_by_prefix(model))
418        .map(|m| m.supports_tools)
419}
420
421/// Check if a model supports vision/image inputs.
422///
423/// Returns `None` for unknown models.
424pub fn supports_vision(model: &str) -> Option<bool> {
425    lookup_model(model)
426        .or_else(|| lookup_model_by_prefix(model))
427        .map(|m| m.supports_vision)
428}
429
430/// Returns all known models.
431pub fn all_models() -> &'static [ModelInfo] {
432    KNOWN_MODELS
433}
434
435/// Look up a model using `provider/model` format.
436///
437/// Tries the full input first, then strips the provider prefix and looks
438/// up the bare model name.
439pub fn lookup_by_provider_prefix(input: &str) -> Option<ModelInfo> {
440    lookup_model(input)
441        .or_else(|| lookup_model_by_prefix(input))
442        .or_else(|| {
443            let model_part = input.rsplit('/').next()?;
444            lookup_model(model_part).or_else(|| lookup_model_by_prefix(model_part))
445        })
446}
447
448#[cfg(test)]
449mod tests {
450    use super::*;
451
452    #[test]
453    fn lookup_exact_match() {
454        let model = lookup_model("gpt-4o");
455        assert!(model.is_some());
456        if let Some(m) = model {
457            assert_eq!(m.provider, ProviderKind::OpenAi);
458            assert_eq!(m.context_window, 128_000);
459            assert!(m.supports_tools);
460            assert!(m.supports_vision);
461        }
462    }
463
464    #[test]
465    fn lookup_claude_exact() {
466        let model = lookup_model("claude-sonnet-4");
467        assert!(model.is_some());
468        if let Some(m) = model {
469            assert_eq!(m.provider, ProviderKind::Anthropic);
470            assert_eq!(m.context_window, 200_000);
471        }
472    }
473
474    #[test]
475    fn lookup_unknown_returns_none() {
476        assert!(lookup_model("nonexistent-model").is_none());
477    }
478
479    #[test]
480    fn lookup_prefix_match() {
481        let model = lookup_model_by_prefix("gpt-4o-2024-08-06");
482        assert!(model.is_some());
483        if let Some(m) = model {
484            assert_eq!(m.name, "gpt-4o");
485            assert_eq!(m.context_window, 128_000);
486        }
487    }
488
489    #[test]
490    fn lookup_prefix_claude_versioned() {
491        let model = lookup_model_by_prefix("claude-sonnet-4-5-20250929");
492        assert!(model.is_some());
493        if let Some(m) = model {
494            assert_eq!(m.context_window, 200_000);
495        }
496    }
497
498    #[test]
499    fn lookup_prefix_no_match() {
500        assert!(lookup_model_by_prefix("nonexistent").is_none());
501    }
502
503    #[test]
504    fn context_window_exact() {
505        assert_eq!(get_context_window("gpt-4o"), Some(128_000));
506        assert_eq!(get_context_window("gemini-2.0-flash"), Some(1_048_576));
507        assert_eq!(get_context_window("llama3"), Some(8_192));
508    }
509
510    #[test]
511    fn context_window_prefix_fallback() {
512        assert_eq!(
513            get_context_window("claude-sonnet-4-5-20250929"),
514            Some(200_000)
515        );
516        assert_eq!(get_context_window("claude-opus-4-20250514"), Some(200_000));
517    }
518
519    #[test]
520    fn context_window_unknown() {
521        assert_eq!(get_context_window("totally-unknown"), None);
522    }
523
524    #[test]
525    fn supports_tools_check() {
526        assert_eq!(supports_tools("gpt-4o"), Some(true));
527        assert_eq!(supports_tools("codellama"), Some(false));
528        assert_eq!(supports_tools("unknown"), None);
529    }
530
531    #[test]
532    fn supports_vision_check() {
533        assert_eq!(supports_vision("gpt-4o"), Some(true));
534        assert_eq!(supports_vision("llama3"), Some(false));
535        assert_eq!(supports_vision("llava"), Some(true));
536        assert_eq!(supports_vision("unknown"), None);
537    }
538
539    #[test]
540    fn all_anthropic_models_200k() {
541        for model in KNOWN_MODELS
542            .iter()
543            .filter(|m| m.provider == ProviderKind::Anthropic)
544        {
545            assert_eq!(
546                model.context_window, 200_000,
547                "Anthropic model {} should have 200k context",
548                model.name
549            );
550        }
551    }
552
553    #[test]
554    fn gemini_models_large_context() {
555        for model in KNOWN_MODELS
556            .iter()
557            .filter(|m| m.provider == ProviderKind::Gemini)
558        {
559            assert!(
560                model.context_window >= 1_000_000,
561                "Gemini model {} should have 1M+ context, got {}",
562                model.name,
563                model.context_window
564            );
565        }
566    }
567
568    #[test]
569    fn all_models_have_positive_context() {
570        for model in KNOWN_MODELS {
571            assert!(
572                model.context_window > 0,
573                "Model {} has zero context window",
574                model.name
575            );
576        }
577    }
578
579    #[test]
580    fn known_model_count() {
581        let anthropic = KNOWN_MODELS
582            .iter()
583            .filter(|m| m.provider == ProviderKind::Anthropic)
584            .count();
585        let openai = KNOWN_MODELS
586            .iter()
587            .filter(|m| m.provider == ProviderKind::OpenAi)
588            .count();
589        let gemini = KNOWN_MODELS
590            .iter()
591            .filter(|m| m.provider == ProviderKind::Gemini)
592            .count();
593        let ollama = KNOWN_MODELS
594            .iter()
595            .filter(|m| m.provider == ProviderKind::Ollama)
596            .count();
597        let compat = KNOWN_MODELS
598            .iter()
599            .filter(|m| m.provider == ProviderKind::OpenAiCompatible)
600            .count();
601        let openrouter = KNOWN_MODELS
602            .iter()
603            .filter(|m| m.provider == ProviderKind::OpenRouter)
604            .count();
605        assert!(anthropic >= 3, "Need at least 3 Anthropic models");
606        assert!(openai >= 3, "Need at least 3 OpenAI models");
607        assert!(gemini >= 2, "Need at least 2 Gemini models");
608        assert!(ollama >= 3, "Need at least 3 Ollama models");
609        assert!(compat >= 8, "Need at least 8 OpenAI-Compatible models");
610        assert!(openrouter >= 5, "Need at least 5 OpenRouter models");
611    }
612
613    #[test]
614    fn all_models_returns_full_list() {
615        let models = all_models();
616        assert!(models.len() >= 35, "Expected at least 35 known models");
617        // Verify it returns the same slice
618        assert_eq!(models.len(), KNOWN_MODELS.len());
619    }
620
621    #[test]
622    fn lookup_by_provider_prefix_exact() {
623        let model = lookup_by_provider_prefix("gpt-4o");
624        assert!(model.is_some());
625        if let Some(m) = model {
626            assert_eq!(m.name, "gpt-4o");
627            assert_eq!(m.provider, ProviderKind::OpenAi);
628        }
629    }
630
631    #[test]
632    fn lookup_by_provider_prefix_with_slash() {
633        // "openai/gpt-4o" is a known OpenRouter model, so exact match wins
634        let model = lookup_by_provider_prefix("openai/gpt-4o");
635        assert!(model.is_some());
636        if let Some(m) = model {
637            assert_eq!(m.name, "openai/gpt-4o");
638            assert_eq!(m.provider, ProviderKind::OpenRouter);
639        }
640    }
641
642    #[test]
643    fn lookup_by_provider_prefix_strips_unknown_provider() {
644        // "custom/gpt-4o" - not a known model, but stripping "custom/" gives "gpt-4o"
645        let model = lookup_by_provider_prefix("custom/gpt-4o");
646        assert!(model.is_some());
647        if let Some(m) = model {
648            assert_eq!(m.name, "gpt-4o");
649            assert_eq!(m.provider, ProviderKind::OpenAi);
650        }
651    }
652
653    #[test]
654    fn lookup_by_provider_prefix_unknown_returns_none() {
655        assert!(lookup_by_provider_prefix("totally/unknown-model").is_none());
656    }
657
658    #[test]
659    fn lookup_groq_model() {
660        let model = lookup_model("llama-3.3-70b-versatile");
661        assert!(model.is_some());
662        if let Some(m) = model {
663            assert_eq!(m.provider, ProviderKind::OpenAiCompatible);
664            assert_eq!(m.context_window, 131_072);
665        }
666    }
667
668    #[test]
669    fn lookup_xai_model() {
670        let model = lookup_model("grok-2");
671        assert!(model.is_some());
672        if let Some(m) = model {
673            assert_eq!(m.provider, ProviderKind::OpenAiCompatible);
674            assert!(m.supports_vision);
675        }
676    }
677
678    #[test]
679    fn lookup_openrouter_model() {
680        let model = lookup_model("meta-llama/llama-3.1-405b-instruct");
681        assert!(model.is_some());
682        if let Some(m) = model {
683            assert_eq!(m.provider, ProviderKind::OpenRouter);
684            assert_eq!(m.context_window, 131_072);
685        }
686    }
687
688    #[test]
689    fn lookup_mistral_model() {
690        let model = lookup_model("mistral-large-latest");
691        assert!(model.is_some());
692        if let Some(m) = model {
693            assert_eq!(m.provider, ProviderKind::OpenAiCompatible);
694            assert!(m.supports_tools);
695        }
696    }
697
698    #[test]
699    fn cerebras_models_prefixed() {
700        // Cerebras models are prefixed to avoid collision with Ollama llama3.1
701        let model = lookup_model("cerebras-llama3.1-8b");
702        assert!(model.is_some());
703        if let Some(m) = model {
704            assert_eq!(m.provider, ProviderKind::OpenAiCompatible);
705        }
706        let model = lookup_model("cerebras-llama3.1-70b");
707        assert!(model.is_some());
708        if let Some(m) = model {
709            assert_eq!(m.provider, ProviderKind::OpenAiCompatible);
710        }
711    }
712
713    #[test]
714    fn anthropic_models_have_costs() {
715        for model in KNOWN_MODELS
716            .iter()
717            .filter(|m| m.provider == ProviderKind::Anthropic)
718        {
719            assert!(
720                model.cost_per_million_input.is_some(),
721                "Anthropic model {} should have input cost",
722                model.name
723            );
724            assert!(
725                model.cost_per_million_output.is_some(),
726                "Anthropic model {} should have output cost",
727                model.name
728            );
729        }
730    }
731
732    #[test]
733    fn openai_flagship_models_have_costs() {
734        let gpt4o = lookup_model("gpt-4o");
735        assert!(gpt4o.is_some());
736        if let Some(m) = gpt4o {
737            assert_eq!(m.cost_per_million_input, Some(2.5));
738            assert_eq!(m.cost_per_million_output, Some(10.0));
739        }
740    }
741
742    #[test]
743    fn ollama_models_have_no_costs() {
744        for model in KNOWN_MODELS
745            .iter()
746            .filter(|m| m.provider == ProviderKind::Ollama)
747        {
748            assert!(
749                model.cost_per_million_input.is_none(),
750                "Ollama model {} should have no input cost",
751                model.name
752            );
753            assert!(
754                model.cost_per_million_output.is_none(),
755                "Ollama model {} should have no output cost",
756                model.name
757            );
758        }
759    }
760
761    #[test]
762    fn gemini_models_have_costs() {
763        for model in KNOWN_MODELS
764            .iter()
765            .filter(|m| m.provider == ProviderKind::Gemini)
766        {
767            assert!(
768                model.cost_per_million_input.is_some(),
769                "Gemini model {} should have input cost",
770                model.name
771            );
772            assert!(
773                model.cost_per_million_output.is_some(),
774                "Gemini model {} should have output cost",
775                model.name
776            );
777        }
778    }
779}