1use super::memory::AgentMemoryManager;
4use super::{AgentConfig, ConversationMemory, ConversationMode, ToolExecutor};
5use crate::error::RragResult;
6
7#[cfg(feature = "rexis-llm-client")]
8use rexis_llm::{ChatMessage, ChatResponse, Client};
9
10use tracing::{debug, error, info};
11
12pub struct Agent {
14 llm_client: Client,
16
17 tool_executor: ToolExecutor,
19
20 legacy_memory: ConversationMemory,
22
23 memory_manager: Option<AgentMemoryManager>,
25
26 config: AgentConfig,
28}
29
30impl Agent {
31 pub fn new(
33 llm_client: Client,
34 tool_executor: ToolExecutor,
35 config: AgentConfig,
36 ) -> RragResult<Self> {
37 let mut legacy_memory = ConversationMemory::with_max_length(config.max_conversation_length);
38
39 legacy_memory.add_message(ChatMessage::system(config.system_prompt.clone()));
41
42 Ok(Self {
43 llm_client,
44 tool_executor,
45 legacy_memory,
46 memory_manager: None,
47 config,
48 })
49 }
50
51 pub fn new_with_memory(
53 llm_client: Client,
54 tool_executor: ToolExecutor,
55 memory_manager: AgentMemoryManager,
56 config: AgentConfig,
57 ) -> RragResult<Self> {
58 let mut legacy_memory = ConversationMemory::with_max_length(config.max_conversation_length);
59
60 legacy_memory.add_message(ChatMessage::system(config.system_prompt.clone()));
62
63 Ok(Self {
64 llm_client,
65 tool_executor,
66 legacy_memory,
67 memory_manager: Some(memory_manager),
68 config,
69 })
70 }
71
72 pub async fn run(&mut self, user_input: impl Into<String>) -> RragResult<String> {
77 let input = user_input.into();
78
79 info!(user_input = %input, "Agent received user input");
80
81 if self.config.verbose {
82 debug!(input = %input, "Processing user query");
83 }
84
85 let mut conversation = match self.config.conversation_mode {
87 ConversationMode::Stateless => {
88 vec![
90 ChatMessage::system(self.config.system_prompt.clone()),
91 ChatMessage::user(input.clone()),
92 ]
93 }
94 ConversationMode::Stateful => {
95 if let Some(ref memory_manager) = self.memory_manager {
97 memory_manager
99 .add_conversation_message(ChatMessage::user(input.clone()))
100 .await?;
101
102 memory_manager.get_conversation_messages().await?
104 } else {
105 self.legacy_memory
107 .add_message(ChatMessage::user(input.clone()));
108 self.legacy_memory.to_messages()
109 }
110 }
111 };
112
113 for iteration in 1..=self.config.max_iterations {
115 debug!(
116 iteration,
117 max_iterations = self.config.max_iterations,
118 "Agent iteration"
119 );
120
121 let response = self.llm_step(&conversation).await?;
123
124 if let Some(tool_calls) = &response.tool_calls {
126 if !tool_calls.is_empty() {
127 info!(
128 tool_count = tool_calls.len(),
129 tools = ?tool_calls.iter().map(|t| &t.function.name).collect::<Vec<_>>(),
130 "Agent requesting tool calls"
131 );
132
133 let mut assistant_msg = ChatMessage::assistant(response.content.clone());
135 assistant_msg.tool_calls = Some(tool_calls.clone());
136 conversation.push(assistant_msg);
137
138 let tool_results = self.tool_executor.execute_tool_calls(tool_calls);
140
141 for result in tool_results {
143 if let rexis_llm::MessageContent::Text(ref content) = result.content {
144 debug!(tool_result = %content, "Tool execution completed");
145 }
146 conversation.push(result);
147 }
148
149 continue;
151 }
152 }
153
154 info!(
156 response = %response.content,
157 iterations = iteration,
158 "Agent generated final answer"
159 );
160
161 if self.config.conversation_mode == ConversationMode::Stateful {
163 if let Some(ref memory_manager) = self.memory_manager {
164 memory_manager
166 .add_conversation_message(ChatMessage::assistant(response.content.clone()))
167 .await?;
168 } else {
169 self.legacy_memory
171 .add_message(ChatMessage::assistant(response.content.clone()));
172 }
173 }
174
175 return Ok(response.content);
176 }
177
178 error!(
180 max_iterations = self.config.max_iterations,
181 "Agent exceeded maximum iterations without reaching final answer"
182 );
183
184 Err(crate::error::RragError::Agent {
185 agent_id: "default".to_string(),
186 message: format!(
187 "Agent exceeded maximum iterations ({})",
188 self.config.max_iterations
189 ),
190 source: None,
191 })
192 }
193
194 async fn llm_step(&self, conversation: &[ChatMessage]) -> RragResult<ChatResponse> {
196 let tools = self.tool_executor.registry().tool_definitions();
198
199 debug!(
200 tool_count = tools.len(),
201 message_count = conversation.len(),
202 "Calling LLM with tools"
203 );
204
205 let response = self
207 .llm_client
208 .chat_completion_with_tools(conversation.to_vec(), tools)
209 .await?;
210
211 debug!(
212 content_length = response.content.len(),
213 has_tool_calls = response.tool_calls.is_some(),
214 tool_call_count = response.tool_calls.as_ref().map(|t| t.len()).unwrap_or(0),
215 "LLM response received"
216 );
217
218 Ok(response)
219 }
220
221 pub async fn reset(&mut self) -> RragResult<()> {
223 if let Some(ref memory_manager) = self.memory_manager {
224 memory_manager.clear_conversation().await?;
225 } else {
226 self.legacy_memory.clear();
227 }
228 Ok(())
229 }
230
231 pub fn get_conversation(&self) -> &[ChatMessage] {
233 self.legacy_memory.get_messages()
234 }
235
236 pub async fn get_conversation_async(&self) -> RragResult<Vec<ChatMessage>> {
238 if let Some(ref memory_manager) = self.memory_manager {
239 memory_manager.get_conversation_messages().await
240 } else {
241 Ok(self.legacy_memory.to_messages())
242 }
243 }
244
245 pub fn config(&self) -> &AgentConfig {
247 &self.config
248 }
249
250 pub fn config_mut(&mut self) -> &mut AgentConfig {
252 &mut self.config
253 }
254
255 pub fn memory(&self) -> Option<&AgentMemoryManager> {
257 self.memory_manager.as_ref()
258 }
259
260 pub fn memory_mut(&mut self) -> Option<&mut AgentMemoryManager> {
262 self.memory_manager.as_mut()
263 }
264}