language_barrier_core/
chat.rs

1use crate::compactor::{ChatHistoryCompactor, DropOldestCompactor};
2use crate::message::{Content, Message};
3use crate::token::TokenCounter;
4use crate::tool::{LlmToolInfo, ToolChoice};
5use crate::{Result, ToolDefinition};
6
7/// The main Chat client that users will interact with.
8/// All methods return a new instance rather than mutating the existing one,
9/// following the immutable builder pattern.
10#[derive(Clone, Debug)]
11pub struct Chat {
12    // Tunable knobs / state
13    pub system_prompt: String,
14    pub max_output_tokens: usize,
15
16    // History and token tracking
17    pub history: Vec<Message>,
18    token_counter: TokenCounter,
19
20    // Registry for type-safe tool definitions (optional)
21    pub tools: Option<Vec<LlmToolInfo>>,
22
23    // Tool execution settings
24    pub tool_choice: Option<ToolChoice>,
25}
26
27impl Default for Chat {
28    fn default() -> Self {
29        Self {
30            system_prompt: String::new(),
31            max_output_tokens: 2048,
32            history: Vec::new(),
33            token_counter: TokenCounter::default(),
34            tools: None,
35            tool_choice: None,
36        }
37    }
38}
39
40impl Chat {
41    /// Sets system prompt and returns a new instance
42    #[must_use]
43    pub fn with_system_prompt(self, prompt: impl Into<String>) -> Self {
44        let p = prompt.into();
45        let mut token_counter = self.token_counter.clone();
46        token_counter.observe(&p);
47
48        let mut new_chat = Self {
49            system_prompt: p,
50            token_counter,
51            ..self
52        };
53
54        new_chat = new_chat.trim_to_context_window();
55        new_chat
56    }
57
58    /// Sets max output tokens and returns a new instance
59    #[must_use]
60    pub fn with_max_output_tokens(self, n: usize) -> Self {
61        Self {
62            max_output_tokens: n,
63            ..self
64        }
65    }
66
67    /// Sets history and returns a new instance
68    #[must_use]
69    pub fn with_history(self, history: Vec<Message>) -> Self {
70        // Create a new token counter from scratch
71        let mut token_counter = TokenCounter::default();
72
73        // Count tokens in system prompt
74        token_counter.observe(&self.system_prompt);
75
76        // Count tokens in message history
77        for msg in &history {
78            match msg {
79                Message::User { content, .. } => {
80                    if let Content::Text(text) = content {
81                        token_counter.observe(text);
82                    }
83                }
84                Message::Assistant { content, .. } => {
85                    if let Some(Content::Text(text)) = content {
86                        token_counter.observe(text);
87                    }
88                }
89                Message::System { content, .. } | Message::Tool { content, .. } => {
90                    token_counter.observe(content);
91                }
92            }
93        }
94
95        let mut new_chat = Self {
96            history,
97            token_counter,
98            ..self
99        };
100
101        new_chat = new_chat.trim_to_context_window();
102        new_chat
103    }
104
105    /// Adds a message to the conversation history and returns a new instance
106    #[must_use]
107    pub fn add_message(self, msg: Message) -> Self {
108        let mut token_counter = self.token_counter.clone();
109        let mut history = self.history.clone();
110
111        // Count tokens based on message type
112        match &msg {
113            Message::User { content, .. } => {
114                if let Content::Text(text) = content {
115                    token_counter.observe(text);
116                }
117            }
118            Message::Assistant { content, .. } => {
119                if let Some(Content::Text(text)) = content {
120                    token_counter.observe(text);
121                }
122            }
123            Message::System { content, .. } | Message::Tool { content, .. } => {
124                token_counter.observe(content);
125            }
126        }
127
128        history.push(msg);
129
130        let mut new_chat = Self {
131            history,
132            token_counter,
133            ..self
134        };
135
136        new_chat = new_chat.trim_to_context_window();
137        new_chat
138    }
139
140    /// Alias for `add_message` for backward compatibility
141    #[must_use]
142    pub fn push_message(self, msg: Message) -> Self {
143        self.add_message(msg)
144    }
145
146    /// Trims the conversation history to fit within token budget and returns a new instance
147    #[must_use]
148    fn trim_to_context_window(self) -> Self {
149        const MAX_TOKENS: usize = 32_768; // could be model-specific
150
151        let mut history = self.history.clone();
152        let mut token_counter = self.token_counter.clone();
153
154        // Create a fresh compactor of the same default type
155        // Note: In a real implementation, you would want a way to clone the compactor
156        // or to properly reconstruct the specific type that was being used.
157        let new_compactor = Box::<DropOldestCompactor>::default();
158
159        // Use the compactor to trim history
160        new_compactor.compact(&mut history, &mut token_counter, MAX_TOKENS);
161
162        Self {
163            history,
164            token_counter,
165            ..self
166        }
167    }
168
169    /// Gets the current token count
170    pub fn tokens_used(&self) -> usize {
171        self.token_counter.total()
172    }
173
174    /// Add a tool and returns a new instance with the tool added
175    #[must_use = "This returns a new Chat with the tool added"]
176    pub fn with_tool(self, tool: impl ToolDefinition) -> Result<Self> {
177        let info = LlmToolInfo {
178            name: tool.name(),
179            description: tool.description(),
180            parameters: tool.schema()?,
181        };
182
183        let tools = match self.tools {
184            Some(mut tools) => {
185                tools.push(info);
186                Some(tools)
187            }
188            None => Some(vec![info]),
189        };
190
191        let new_chat = Self { tools, ..self };
192
193        Ok(new_chat)
194    }
195
196    /// Add multiple tools at once and return a new instance with the tools added
197    #[must_use = "This returns a new Chat with the tools added"]
198    pub fn with_tools(self, tools: Vec<LlmToolInfo>) -> Self {
199        let new_tools = match self.tools {
200            Some(mut existing_tools) => {
201                existing_tools.extend(tools);
202                Some(existing_tools)
203            }
204            None => Some(tools),
205        };
206
207        Self {
208            tools: new_tools,
209            ..self
210        }
211    }
212
213    /// Sets the tool choice strategy and returns a new instance
214    ///
215    /// This method allows configuring how the model should choose tools:
216    /// - `ToolChoice::Auto` - Model can choose whether to use a tool (default)
217    /// - `ToolChoice::Any` - Model must use one of the available tools
218    /// - `ToolChoice::None` - Model must not use any tools
219    /// - `ToolChoice::Specific(name)` - Model must use the specified tool
220    ///
221    /// Different providers implement this with slightly different terminology:
222    /// - OpenAI/Mistral use "auto", "required", "none"
223    /// - Anthropic uses "auto", "any", "none"
224    /// - Gemini uses function_calling_config with modes
225    ///
226    /// The library transparently handles these differences, providing a
227    /// consistent API regardless of which provider you're using.
228    ///
229    /// # Examples
230    ///
231    /// ```
232    /// use language_barrier_core::{Chat, tool::ToolChoice};
233    ///
234    /// // Require using a tool
235    /// let chat = Chat::default()
236    ///     .with_tool_choice(ToolChoice::Any);
237    ///
238    /// // Specify a tool by name
239    /// let chat = Chat::default()
240    ///     .with_tool_choice(ToolChoice::Specific("weather_tool".to_string()));
241    ///
242    /// // Disable tools for this conversation
243    /// let chat = Chat::default()
244    ///     .with_tool_choice(ToolChoice::None);
245    /// ```
246    #[must_use]
247    pub fn with_tool_choice(self, choice: ToolChoice) -> Self {
248        Self {
249            tool_choice: Some(choice),
250            ..self
251        }
252    }
253
254    /// Removes tool choice configuration and returns a new instance
255    ///
256    /// This resets to the default behavior, where the model can choose whether to use tools.
257    #[must_use]
258    pub fn without_tool_choice(self) -> Self {
259        Self {
260            tool_choice: None,
261            ..self
262        }
263    }
264
265    /// Return the most recent message in the chat.
266    pub fn most_recent_message(&self) -> Option<&Message> {
267        self.history.last()
268    }
269}