language_barrier_core/
model.rs

1use std::fmt;
2
3/// Model that can be converted to a string ID for API requests
4pub trait ModelInfo: Send + Sync + fmt::Debug {
5    /// Context window size in tokens
6    fn context_window(&self) -> usize;
7
8    /// Maximum number of output tokens
9    /// NOTE: we may want to do something smart here to have this be
10    /// context-dependent.  for example if you set the right headers
11    /// for anthropic, 3.7 can output 128k instead of 64k.
12    fn max_output_tokens(&self) -> usize;
13}
14
15/// Sonnet 3.5 has two published tags.
16#[derive(Debug, Clone, Copy, PartialEq, Eq)]
17pub enum Sonnet35Version {
18    V1,
19    V2,
20}
21
22/// Represents an Anthropic Claude model
23#[derive(Debug, Clone, Copy, PartialEq, Eq)]
24pub enum Claude {
25    Sonnet35 { version: Sonnet35Version },
26    Sonnet37 { use_extended_thinking: bool },
27    Haiku35,
28    Haiku3,
29    Opus3,
30}
31
32impl ModelInfo for Claude {
33    /// All anthropic models have a 200k token context window.
34    fn context_window(&self) -> usize {
35        200_000
36    }
37
38    fn max_output_tokens(&self) -> usize {
39        match self {
40            Self::Sonnet37 {
41                use_extended_thinking: _,
42            } => 64_000,
43            Self::Sonnet35 { version: _ } | Self::Haiku35 => 8192,
44            Self::Haiku3 | Self::Opus3 => 4096,
45        }
46    }
47}
48
49/// Represents a Google Gemini model
50#[derive(Debug, Clone, Copy, PartialEq, Eq)]
51pub enum Gemini {
52    /// Gemini 1.5 Flash
53    Flash15,
54    /// Gemini 2.0 Flash
55    Flash20,
56    /// Gemini 2.0 Flash-Lite
57    Flash20Lite,
58    /// Gemini 2.5 Flash Preview
59    Flash25Preview,
60}
61
62impl ModelInfo for Gemini {
63    fn context_window(&self) -> usize {
64        // All Gemini Flash models support 1M token context
65        1_048_576
66    }
67
68    fn max_output_tokens(&self) -> usize {
69        match self {
70            Self::Flash15 | Self::Flash20 | Self::Flash20Lite => 8_192,
71            Self::Flash25Preview => 65_536,
72        }
73    }
74}
75
76// Implement the GeminiModelInfo trait from provider/gemini.rs
77impl crate::provider::gemini::GeminiModelInfo for Gemini {
78    fn gemini_model_id(&self) -> String {
79        match self {
80            Self::Flash15 => "gemini-1.5-flash",
81            Self::Flash20 => "gemini-2.0-flash",
82            Self::Flash20Lite => "gemini-2.0-flash-lite",
83            Self::Flash25Preview => "gemini-2.5-flash-preview-04-17",
84        }.to_string()
85    }
86}
87
88/// Represents an `OpenAI` GPT model
89#[derive(Debug, Clone, Copy, PartialEq, Eq)]
90pub enum GPT {
91    /// GPT-4o model
92    GPT4o,
93    /// GPT-4o-mini model
94    GPT4oMini,
95    /// GPT-4 Turbo model
96    GPT4Turbo,
97    /// GPT-3.5 Turbo model
98    GPT35Turbo,
99}
100
101impl ModelInfo for GPT {
102    fn context_window(&self) -> usize {
103        match self {
104            Self::GPT4o | Self::GPT4oMini | Self::GPT4Turbo => 128_000,
105            Self::GPT35Turbo => 16_000,
106        }
107    }
108
109    fn max_output_tokens(&self) -> usize {
110        match self {
111            Self::GPT4o | Self::GPT4oMini | Self::GPT4Turbo | Self::GPT35Turbo => 4_096,
112        }
113    }
114}
115
116// Implement the OpenAIModelInfo trait from provider/openai.rs
117impl crate::provider::openai::OpenAIModelInfo for GPT {
118    fn openai_model_id(&self) -> String {
119        match self {
120            Self::GPT4o => "gpt-4o",
121            Self::GPT4oMini => "gpt-4o-mini",
122            Self::GPT4Turbo => "gpt-4-turbo",
123            Self::GPT35Turbo => "gpt-3.5-turbo",
124        }.to_string()
125    }
126}
127
128/// Represents a Mistral AI model
129#[derive(Debug, Clone, Copy, PartialEq, Eq)]
130pub enum Mistral {
131    /// Mistral Large
132    Large,
133    /// Mistral Small
134    Small,
135    /// Open Mistral Nemo
136    Nemo,
137    /// Codestral
138    Codestral,
139    /// Mistral Embed
140    Embed,
141}
142
143impl ModelInfo for Mistral {
144    fn context_window(&self) -> usize {
145        match self {
146            Self::Large | Self::Small | Self::Nemo => 131_072,  // 131k
147            Self::Codestral => 262_144, // 256k
148            Self::Embed => 8_192,    // 8k
149        }
150    }
151
152    fn max_output_tokens(&self) -> usize {
153        // All Mistral models have the same max output tokens
154        4_096
155    }
156}
157
158// Implement the MistralModelInfo trait from provider/mistral.rs
159impl crate::provider::mistral::MistralModelInfo for Mistral {
160    fn mistral_model_id(&self) -> String {
161        match self {
162            Self::Large => "mistral-large-latest",
163            Self::Small => "mistral-small-latest",
164            Self::Nemo => "open-mistral-nemo",
165            Self::Codestral => "codestral-latest",
166            Self::Embed => "mistral-embed",
167        }.to_string()
168    }
169}