1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
use crate::compactor::{ChatHistoryCompactor, DropOldestCompactor};
use crate::message::{Content, Message};
use crate::token::TokenCounter;
use crate::tool::{LlmToolInfo, ToolChoice};
use crate::{Result, ToolDefinition};
/// The main Chat client that users will interact with.
/// All methods return a new instance rather than mutating the existing one,
/// following the immutable builder pattern.
#[derive(Clone, Debug)]
pub struct Chat {
// Tunable knobs / state
pub system_prompt: String,
pub max_output_tokens: usize,
// History and token tracking
pub history: Vec<Message>,
token_counter: TokenCounter,
// Registry for type-safe tool definitions (optional)
pub tools: Option<Vec<LlmToolInfo>>,
// Tool execution settings
pub tool_choice: Option<ToolChoice>,
}
impl Default for Chat {
fn default() -> Self {
Self {
system_prompt: String::new(),
max_output_tokens: 2048,
history: Vec::new(),
token_counter: TokenCounter::default(),
tools: None,
tool_choice: None,
}
}
}
impl Chat {
/// Sets system prompt and returns a new instance
#[must_use]
pub fn with_system_prompt(self, prompt: impl Into<String>) -> Self {
let p = prompt.into();
let mut token_counter = self.token_counter.clone();
token_counter.observe(&p);
let mut new_chat = Self {
system_prompt: p,
token_counter,
..self
};
new_chat = new_chat.trim_to_context_window();
new_chat
}
/// Sets max output tokens and returns a new instance
#[must_use]
pub fn with_max_output_tokens(self, n: usize) -> Self {
Self {
max_output_tokens: n,
..self
}
}
/// Sets history and returns a new instance
#[must_use]
pub fn with_history(self, history: Vec<Message>) -> Self {
// Create a new token counter from scratch
let mut token_counter = TokenCounter::default();
// Count tokens in system prompt
token_counter.observe(&self.system_prompt);
// Count tokens in message history
for msg in &history {
match msg {
Message::User { content, .. } => {
if let Content::Text(text) = content {
token_counter.observe(text);
}
}
Message::Assistant { content, .. } => {
if let Some(Content::Text(text)) = content {
token_counter.observe(text);
}
}
Message::System { content, .. } | Message::Tool { content, .. } => {
token_counter.observe(content);
}
}
}
let mut new_chat = Self {
history,
token_counter,
..self
};
new_chat = new_chat.trim_to_context_window();
new_chat
}
/// Adds a message to the conversation history and returns a new instance
#[must_use]
pub fn add_message(self, msg: Message) -> Self {
let mut token_counter = self.token_counter.clone();
let mut history = self.history.clone();
// Count tokens based on message type
match &msg {
Message::User { content, .. } => {
if let Content::Text(text) = content {
token_counter.observe(text);
}
}
Message::Assistant { content, .. } => {
if let Some(Content::Text(text)) = content {
token_counter.observe(text);
}
}
Message::System { content, .. } | Message::Tool { content, .. } => {
token_counter.observe(content);
}
}
history.push(msg);
let mut new_chat = Self {
history,
token_counter,
..self
};
new_chat = new_chat.trim_to_context_window();
new_chat
}
/// Alias for `add_message` for backward compatibility
#[must_use]
pub fn push_message(self, msg: Message) -> Self {
self.add_message(msg)
}
/// Trims the conversation history to fit within token budget and returns a new instance
#[must_use]
fn trim_to_context_window(self) -> Self {
const MAX_TOKENS: usize = 32_768; // could be model-specific
let mut history = self.history.clone();
let mut token_counter = self.token_counter.clone();
// Create a fresh compactor of the same default type
// Note: In a real implementation, you would want a way to clone the compactor
// or to properly reconstruct the specific type that was being used.
let new_compactor = Box::<DropOldestCompactor>::default();
// Use the compactor to trim history
new_compactor.compact(&mut history, &mut token_counter, MAX_TOKENS);
Self {
history,
token_counter,
..self
}
}
/// Gets the current token count
pub fn tokens_used(&self) -> usize {
self.token_counter.total()
}
/// Add a tool and returns a new instance with the tool added
#[must_use = "This returns a new Chat with the tool added"]
pub fn with_tool(self, tool: impl ToolDefinition) -> Result<Self> {
let info = LlmToolInfo {
name: tool.name(),
description: tool.description(),
parameters: tool.schema()?,
};
let tools = match self.tools {
Some(mut tools) => {
tools.push(info);
Some(tools)
}
None => Some(vec![info]),
};
let new_chat = Self { tools, ..self };
Ok(new_chat)
}
/// Add multiple tools at once and return a new instance with the tools added
#[must_use = "This returns a new Chat with the tools added"]
pub fn with_tools(self, tools: Vec<LlmToolInfo>) -> Self {
let new_tools = match self.tools {
Some(mut existing_tools) => {
existing_tools.extend(tools);
Some(existing_tools)
}
None => Some(tools),
};
Self {
tools: new_tools,
..self
}
}
/// Sets the tool choice strategy and returns a new instance
///
/// This method allows configuring how the model should choose tools:
/// - `ToolChoice::Auto` - Model can choose whether to use a tool (default)
/// - `ToolChoice::Any` - Model must use one of the available tools
/// - `ToolChoice::None` - Model must not use any tools
/// - `ToolChoice::Specific(name)` - Model must use the specified tool
///
/// Different providers implement this with slightly different terminology:
/// - OpenAI/Mistral use "auto", "required", "none"
/// - Anthropic uses "auto", "any", "none"
/// - Gemini uses function_calling_config with modes
///
/// The library transparently handles these differences, providing a
/// consistent API regardless of which provider you're using.
///
/// # Examples
///
/// ```
/// use language_barrier_core::{Chat, tool::ToolChoice};
///
/// // Require using a tool
/// let chat = Chat::default()
/// .with_tool_choice(ToolChoice::Any);
///
/// // Specify a tool by name
/// let chat = Chat::default()
/// .with_tool_choice(ToolChoice::Specific("weather_tool".to_string()));
///
/// // Disable tools for this conversation
/// let chat = Chat::default()
/// .with_tool_choice(ToolChoice::None);
/// ```
#[must_use]
pub fn with_tool_choice(self, choice: ToolChoice) -> Self {
Self {
tool_choice: Some(choice),
..self
}
}
/// Removes tool choice configuration and returns a new instance
///
/// This resets to the default behavior, where the model can choose whether to use tools.
#[must_use]
pub fn without_tool_choice(self) -> Self {
Self {
tool_choice: None,
..self
}
}
/// Return the most recent message in the chat.
pub fn most_recent_message(&self) -> Option<&Message> {
self.history.last()
}
}