language_barrier_core/
model.rs1use std::fmt;
2
3pub trait ModelInfo: Send + Sync + fmt::Debug {
5 fn context_window(&self) -> usize;
7
8 fn max_output_tokens(&self) -> usize;
13}
14
15#[derive(Debug, Clone, Copy, PartialEq, Eq)]
17pub enum Sonnet35Version {
18 V1,
19 V2,
20}
21
22#[derive(Debug, Clone, Copy, PartialEq, Eq)]
24pub enum Claude {
25 Sonnet35 { version: Sonnet35Version },
26 Sonnet37 { use_extended_thinking: bool },
27 Haiku35,
28 Haiku3,
29 Opus3,
30}
31
32impl ModelInfo for Claude {
33 fn context_window(&self) -> usize {
35 200_000
36 }
37
38 fn max_output_tokens(&self) -> usize {
39 match self {
40 Self::Sonnet37 {
41 use_extended_thinking: _,
42 } => 64_000,
43 Self::Sonnet35 { version: _ } | Self::Haiku35 => 8192,
44 Self::Haiku3 | Self::Opus3 => 4096,
45 }
46 }
47}
48
49#[derive(Debug, Clone, Copy, PartialEq, Eq)]
51pub enum Gemini {
52 Flash15,
54 Flash20,
56 Flash20Lite,
58 Flash25Preview,
60}
61
62impl ModelInfo for Gemini {
63 fn context_window(&self) -> usize {
64 1_048_576
66 }
67
68 fn max_output_tokens(&self) -> usize {
69 match self {
70 Self::Flash15 | Self::Flash20 | Self::Flash20Lite => 8_192,
71 Self::Flash25Preview => 65_536,
72 }
73 }
74}
75
76impl crate::provider::gemini::GeminiModelInfo for Gemini {
78 fn gemini_model_id(&self) -> String {
79 match self {
80 Self::Flash15 => "gemini-1.5-flash",
81 Self::Flash20 => "gemini-2.0-flash",
82 Self::Flash20Lite => "gemini-2.0-flash-lite",
83 Self::Flash25Preview => "gemini-2.5-flash-preview-04-17",
84 }.to_string()
85 }
86}
87
88#[derive(Debug, Clone, Copy, PartialEq, Eq)]
90pub enum GPT {
91 GPT4o,
93 GPT4oMini,
95 GPT4Turbo,
97 GPT35Turbo,
99}
100
101impl ModelInfo for GPT {
102 fn context_window(&self) -> usize {
103 match self {
104 Self::GPT4o | Self::GPT4oMini | Self::GPT4Turbo => 128_000,
105 Self::GPT35Turbo => 16_000,
106 }
107 }
108
109 fn max_output_tokens(&self) -> usize {
110 match self {
111 Self::GPT4o | Self::GPT4oMini | Self::GPT4Turbo | Self::GPT35Turbo => 4_096,
112 }
113 }
114}
115
116impl crate::provider::openai::OpenAIModelInfo for GPT {
118 fn openai_model_id(&self) -> String {
119 match self {
120 Self::GPT4o => "gpt-4o",
121 Self::GPT4oMini => "gpt-4o-mini",
122 Self::GPT4Turbo => "gpt-4-turbo",
123 Self::GPT35Turbo => "gpt-3.5-turbo",
124 }.to_string()
125 }
126}
127
128#[derive(Debug, Clone, Copy, PartialEq, Eq)]
130pub enum Mistral {
131 Large,
133 Small,
135 Nemo,
137 Codestral,
139 Embed,
141}
142
143impl ModelInfo for Mistral {
144 fn context_window(&self) -> usize {
145 match self {
146 Self::Large | Self::Small | Self::Nemo => 131_072, Self::Codestral => 262_144, Self::Embed => 8_192, }
150 }
151
152 fn max_output_tokens(&self) -> usize {
153 4_096
155 }
156}
157
158impl crate::provider::mistral::MistralModelInfo for Mistral {
160 fn mistral_model_id(&self) -> String {
161 match self {
162 Self::Large => "mistral-large-latest",
163 Self::Small => "mistral-small-latest",
164 Self::Nemo => "open-mistral-nemo",
165 Self::Codestral => "codestral-latest",
166 Self::Embed => "mistral-embed",
167 }.to_string()
168 }
169}