language_barrier_core/
model.rs

1use std::fmt;
2
3/// Model that can be converted to a string ID for API requests
4pub trait ModelInfo: Send + Sync + fmt::Debug + Clone + Copy {
5    /// Context window size in tokens
6    fn context_window(&self) -> usize;
7
8    /// Maximum number of output tokens
9    /// NOTE: we may want to do something smart here to have this be
10    /// context-dependent.  for example if you set the right headers
11    /// for anthropic, 3.7 can output 128k instead of 64k.
12    fn max_output_tokens(&self) -> usize;
13}
14
15/// Sonnet 3.5 has two published tags.
16#[derive(Debug, Clone, Copy, PartialEq, Eq)]
17pub enum Sonnet35Version {
18    V1,
19    V2,
20}
21
22/// Represents an Anthropic Claude model
23#[derive(Debug, Clone, Copy, PartialEq, Eq)]
24pub enum Claude {
25    Sonnet35 { version: Sonnet35Version },
26    Sonnet37 { use_extended_thinking: bool },
27    Haiku35,
28    Haiku3,
29    Opus3,
30}
31
32/// Represents an Ollama model
33#[derive(Debug, Clone, Copy, PartialEq, Eq)]
34pub enum Ollama {
35    /// Llama 3 models
36    Llama3 { size: OllamaModelSize },
37    /// LlaVA multimodal models
38    Llava,
39    /// Mistral models
40    Mistral { size: OllamaModelSize },
41    /// Custom model with specified name
42    Custom { name: &'static str },
43}
44
45/// Standard model sizes for Ollama models
46#[derive(Debug, Clone, Copy, PartialEq, Eq)]
47pub enum OllamaModelSize {
48    /// 8B parameters
49    _8B,
50    /// 7B parameters
51    _7B,
52    /// 3B parameters
53    _3B,
54    /// 1B parameters
55    _1B,
56}
57
58impl Default for Claude {
59    fn default() -> Self {
60        Self::Opus3
61    }
62}
63
64impl ModelInfo for Claude {
65    /// All anthropic models have a 200k token context window.
66    fn context_window(&self) -> usize {
67        200_000
68    }
69
70    fn max_output_tokens(&self) -> usize {
71        match self {
72            Self::Sonnet37 {
73                use_extended_thinking: _,
74            } => 64_000,
75            Self::Sonnet35 { version: _ } | Self::Haiku35 => 8192,
76            Self::Haiku3 | Self::Opus3 => 4096,
77        }
78    }
79}
80
81/// Represents a Google Gemini model
82#[derive(Debug, Clone, Copy, PartialEq, Eq)]
83pub enum Gemini {
84    /// Gemini 1.5 Flash
85    Flash15,
86    /// Gemini 2.0 Flash
87    Flash20,
88    /// Gemini 2.0 Flash-Lite
89    Flash20Lite,
90    /// Gemini 2.5 Flash Preview
91    Flash25Preview,
92}
93
94impl ModelInfo for Gemini {
95    fn context_window(&self) -> usize {
96        // All Gemini Flash models support 1M token context
97        1_048_576
98    }
99
100    fn max_output_tokens(&self) -> usize {
101        match self {
102            Self::Flash15 | Self::Flash20 | Self::Flash20Lite => 8_192,
103            Self::Flash25Preview => 65_536,
104        }
105    }
106}
107
108// Implement the GeminiModelInfo trait from provider/gemini.rs
109impl crate::provider::gemini::GeminiModelInfo for Gemini {
110    fn gemini_model_id(&self) -> String {
111        match self {
112            Self::Flash15 => "gemini-1.5-flash",
113            Self::Flash20 => "gemini-2.0-flash",
114            Self::Flash20Lite => "gemini-2.0-flash-lite",
115            Self::Flash25Preview => "gemini-2.5-flash-preview-04-17",
116        }
117        .to_string()
118    }
119}
120
121/// Represents an `OpenAI` GPT model
122#[derive(Debug, Clone, Copy, PartialEq, Eq)]
123pub enum OpenAi {
124    /// GPT-4o model
125    GPT4o,
126    /// GPT-4o-mini model
127    GPT4oMini,
128    /// GPT-4 Turbo model
129    GPT4Turbo,
130    /// GPT-3.5 Turbo model
131    GPT35Turbo,
132    /// O1
133    O1,
134    O1Mini,
135    O1Pro,
136    /// O3
137    O3,
138    O3Mini,
139    O4Mini,
140}
141
142impl ModelInfo for OpenAi {
143    fn context_window(&self) -> usize {
144        match self {
145            Self::O1Mini | Self::GPT4o | Self::GPT4oMini | Self::GPT4Turbo => 128_000,
146            Self::GPT35Turbo => 16_000,
147            _ => 200_000,
148        }
149    }
150
151    fn max_output_tokens(&self) -> usize {
152        match self {
153            Self::GPT4o | Self::GPT4oMini | Self::GPT4Turbo | Self::GPT35Turbo => 4_096,
154            Self::O1Mini => 65_536,
155            _ => 100_000,
156        }
157    }
158}
159
160// Implement the OpenAIModelInfo trait from provider/openai.rs
161impl crate::provider::openai::OpenAIModelInfo for OpenAi {
162    fn openai_model_id(&self) -> String {
163        match self {
164            Self::GPT4o => "gpt-4o",
165            Self::GPT4oMini => "gpt-4o-mini",
166            Self::GPT4Turbo => "gpt-4-turbo",
167            Self::GPT35Turbo => "gpt-3.5-turbo",
168            Self::O4Mini => "o4-mini-2025-04-16",
169            Self::O3 => "o3-2025-04-16",
170            Self::O3Mini => "o3-mini-2025-01-31",
171            Self::O1 => "o1-2024-12-17",
172            Self::O1Mini => "o1-mini-2024-09-12",
173            Self::O1Pro => "o1-pro-2025-03-19",
174        }
175        .to_string()
176    }
177}
178
179/// Represents a Mistral AI model
180#[derive(Debug, Clone, Copy, PartialEq, Eq)]
181pub enum Mistral {
182    /// Mistral Large
183    Large,
184    /// Mistral Small
185    Small,
186    /// Open Mistral Nemo
187    Nemo,
188    /// Codestral
189    Codestral,
190    /// Mistral Embed
191    Embed,
192}
193
194impl ModelInfo for Mistral {
195    fn context_window(&self) -> usize {
196        match self {
197            Self::Large | Self::Small | Self::Nemo => 131_072, // 131k
198            Self::Codestral => 262_144,                        // 256k
199            Self::Embed => 8_192,                              // 8k
200        }
201    }
202
203    fn max_output_tokens(&self) -> usize {
204        // All Mistral models have the same max output tokens
205        4_096
206    }
207}
208
209// Implement the MistralModelInfo trait from provider/mistral.rs
210impl crate::provider::mistral::MistralModelInfo for Mistral {
211    fn mistral_model_id(&self) -> String {
212        match self {
213            Self::Large => "mistral-large-latest",
214            Self::Small => "mistral-small-latest",
215            Self::Nemo => "open-mistral-nemo",
216            Self::Codestral => "codestral-latest",
217            Self::Embed => "mistral-embed",
218        }
219        .to_string()
220    }
221}
222
223impl Default for Ollama {
224    fn default() -> Self {
225        Self::Llama3 { size: OllamaModelSize::_7B }
226    }
227}
228
229impl ModelInfo for Ollama {
230    fn context_window(&self) -> usize {
231        match self {
232            Self::Llama3 { size } => match size {
233                OllamaModelSize::_8B => 32_768,
234                OllamaModelSize::_7B => 32_768,
235                OllamaModelSize::_3B => 16_384,
236                OllamaModelSize::_1B => 8_192,
237            },
238            Self::Llava => 8_192,
239            Self::Mistral { size } => match size {
240                OllamaModelSize::_8B => 32_768,
241                OllamaModelSize::_7B => 16_384,
242                OllamaModelSize::_3B => 8_192,
243                OllamaModelSize::_1B => 4_096,
244            },
245            Self::Custom { .. } => 8_192, // Default for unknown models
246        }
247    }
248
249    fn max_output_tokens(&self) -> usize {
250        match self {
251            Self::Llama3 { .. } => 4_096,
252            Self::Llava => 4_096,
253            Self::Mistral { .. } => 4_096,
254            Self::Custom { .. } => 4_096, // Default for unknown models
255        }
256    }
257}