1use crate::client::{LLMClient, LLMConfig, LLMResponse, UniversalLLMClient};
2use crate::react::{ReActConfig, ReActEngine, ReActResult};
3use crate::types::{Message as LlmMessage, ToolSpec};
4use async_trait::async_trait;
5use enki_core::action::{ActionInvoker, ActionMetadata, ToolInvoker};
6use enki_core::agent::{Agent, AgentContext};
7use enki_core::error::Result;
8use enki_core::memory::{Memory, MemoryEntry, MemoryQuery};
9use enki_core::message::Message as EnkiMessage;
10use serde_json::{json, Value};
11use std::sync::Arc;
12
13pub struct LlmAgent {
42 name: String,
43 llm_client: UniversalLLMClient,
44 llm_config: LLMConfig,
45 system_prompt: String,
46 conversation_history: Vec<LlmMessage>,
47 tool_invoker: ToolInvoker,
48 memory: Option<Arc<dyn Memory>>,
49 react_config: Option<ReActConfig>,
50}
51
52impl LlmAgent {
53 pub fn builder(name: impl Into<String>, model: impl Into<String>) -> LlmAgentBuilder {
70 LlmAgentBuilder::new(name, model)
71 }
72
73 pub fn new_with_config(
77 name: impl Into<String>,
78 config: LLMConfig,
79 system_prompt: impl Into<String>,
80 memory: Option<Arc<dyn Memory>>,
81 ) -> Result<Self> {
82 let client = UniversalLLMClient::new_with_config(config.clone())
83 .map_err(|e| enki_core::error::Error::MeshError(e))?;
84
85 let mut agent = Self {
86 name: name.into(),
87 llm_client: client,
88 llm_config: config,
89 system_prompt: system_prompt.into(),
90 conversation_history: Vec::new(),
91 tool_invoker: ToolInvoker::default(),
92 memory,
93 react_config: None,
94 };
95
96 if agent.memory.is_some() {
98 agent.register_memory_tools();
99 }
100
101 Ok(agent)
102 }
103
104 fn register_memory_tools(&mut self) {
106 if let Some(memory) = &self.memory {
107 self.tool_invoker
109 .register(Box::new(SaveMemoryAction::new(memory.clone())));
110
111 self.tool_invoker
113 .register(Box::new(SearchMemoryAction::new(memory.clone())));
114 }
115 }
116
117 fn action_to_tool_spec(action: &ActionMetadata) -> ToolSpec {
119 ToolSpec {
120 name: action.name.clone(),
121 description: action.description.clone(),
122 input_schema: action.input_schema.clone(),
123 }
124 }
125
126 pub fn with_react(&mut self, config: ReActConfig) {
128 self.react_config = Some(config);
129 }
130
131 pub async fn send_message_react(
135 &mut self,
136 message: impl Into<String>,
137 ctx: &mut AgentContext,
138 ) -> Result<ReActResult> {
139 let content = message.into();
140
141 let react_config = self.react_config.clone().ok_or_else(|| {
143 enki_core::error::Error::MeshError(
144 "ReAct mode not enabled. Call with_react() first".to_string(),
145 )
146 })?;
147
148 let engine = ReActEngine::new(react_config, None);
151
152 let result = engine
154 .execute(
155 content,
156 &self.llm_client,
157 &self.llm_config,
158 self.memory.as_ref(),
159 ctx,
160 )
161 .await?;
162
163 Ok(result)
164 }
165
166 pub async fn send_message_and_get_response(
170 &mut self,
171 message: impl Into<String>,
172 ctx: &mut AgentContext,
173 ) -> Result<String> {
174 let content = message.into();
175
176 self.conversation_history.push(LlmMessage {
178 role: "user".to_string(),
179 content,
180 });
181
182 self.process_with_llm(ctx).await
184 }
185
186 pub fn last_response(&self) -> Option<String> {
188 self.conversation_history
189 .iter()
190 .rev()
191 .find(|m| m.role == "assistant")
192 .map(|m| m.content.clone())
193 }
194
195 async fn process_with_llm(&mut self, ctx: &mut AgentContext) -> Result<String> {
197 if self.conversation_history.len() == 1 {
199 self.conversation_history.insert(
200 0,
201 LlmMessage {
202 role: "system".to_string(),
203 content: self.system_prompt.clone(),
204 },
205 );
206 }
207
208 let actions = self.tool_invoker.list_actions();
210 let tools: Vec<ToolSpec> = actions.iter().map(Self::action_to_tool_spec).collect();
211
212 let response: LLMResponse<String> = self
214 .llm_client
215 .complete::<LLMResponse<String>, String>(&self.conversation_history, &tools)
216 .await
217 .map_err(|e| enki_core::error::Error::MeshError(e))?;
218
219 if !response.is_complete && !response.tool_calls.is_empty() {
221 let mut tool_results = Vec::new();
222
223 for tool_call in response.tool_calls {
224 ctx.emit(enki_core::AgentEvent::ToolCalled {
226 agent: self.name.clone(),
227 tool: tool_call.name.clone(),
228 input: tool_call.input.to_string(),
229 });
230
231 let result = self
232 .tool_invoker
233 .invoke(&tool_call.name, ctx, tool_call.input.clone())
234 .await?;
235
236 ctx.emit(enki_core::AgentEvent::ToolResult {
238 agent: self.name.clone(),
239 tool: tool_call.name.clone(),
240 result: result.to_string(),
241 });
242
243 tool_results.push(format!("Tool {}: {}", tool_call.name, result));
244 }
245
246 let tool_result_message = LlmMessage {
248 role: "tool".to_string(),
249 content: tool_results.join("\n"),
250 };
251 self.conversation_history.push(tool_result_message);
252
253 return Box::pin(self.process_with_llm(ctx)).await;
255 }
256
257 self.conversation_history.push(LlmMessage {
259 role: "assistant".to_string(),
260 content: response.content.clone(),
261 });
262
263 Ok(response.content)
264 }
265}
266
267#[async_trait]
268impl Agent for LlmAgent {
269 fn name(&self) -> String {
270 self.name.clone()
271 }
272
273 async fn on_message(&mut self, msg: EnkiMessage, ctx: &mut AgentContext) -> Result<()> {
274 let content = match &msg.content {
276 enki_core::message::MessageContent::Text(text) => text.clone(),
277 enki_core::message::MessageContent::Binary(bytes) => String::from_utf8(bytes.clone())
278 .map_err(|e| {
279 enki_core::error::Error::MeshError(format!(
280 "Invalid UTF-8 in binary message: {}",
281 e
282 ))
283 })?,
284 enki_core::message::MessageContent::Json(value) => value.to_string(),
285 _ => {
286 return Err(enki_core::error::Error::MeshError(
287 "Unsupported message content type".to_string(),
288 ))
289 }
290 };
291
292 self.conversation_history.push(LlmMessage {
294 role: "user".to_string(),
295 content,
296 });
297
298 let _response = self.process_with_llm(ctx).await?;
300 Ok(())
301 }
302
303 async fn on_generic_message(
304 &mut self,
305 msg: enki_core::message::GenericMessage,
306 ctx: &mut AgentContext,
307 ) -> Result<enki_core::message::GenericResponse> {
308 let response_text = self.send_message_and_get_response(msg.content, ctx).await?;
310 Ok(enki_core::message::GenericResponse::new(response_text))
311 }
312
313 fn tool_invoker(&self) -> Option<&ToolInvoker> {
314 Some(&self.tool_invoker)
315 }
316
317 fn tool_invoker_mut(&mut self) -> Option<&mut ToolInvoker> {
318 Some(&mut self.tool_invoker)
319 }
320}
321
322pub struct LlmAgentBuilder {
324 name: String,
325 model: String,
326 api_key: Option<String>,
327 system_prompt: String,
328 temperature: Option<f32>,
329 max_tokens: Option<u32>,
330 memory: Option<Arc<dyn Memory>>,
331}
332
333impl LlmAgentBuilder {
334 pub fn new(name: impl Into<String>, model: impl Into<String>) -> Self {
336 Self {
337 name: name.into(),
338 model: model.into(),
339 api_key: None,
340 system_prompt: "You are a helpful AI assistant.".to_string(),
341 temperature: None,
342 max_tokens: None,
343 memory: None,
344 }
345 }
346
347 pub fn with_api_key(mut self, api_key: impl Into<String>) -> Self {
349 self.api_key = Some(api_key.into());
350 self
351 }
352
353 pub fn with_system_prompt(mut self, prompt: impl Into<String>) -> Self {
355 self.system_prompt = prompt.into();
356 self
357 }
358
359 pub fn with_temperature(mut self, temperature: f32) -> Self {
361 self.temperature = Some(temperature);
362 self
363 }
364
365 pub fn with_max_tokens(mut self, max_tokens: u32) -> Self {
367 self.max_tokens = Some(max_tokens);
368 self
369 }
370
371 pub fn with_memory(mut self, memory: Arc<dyn Memory>) -> Self {
373 self.memory = Some(memory);
374 self
375 }
376
377 pub fn build(self) -> Result<LlmAgent> {
379 let mut config = LLMConfig::new(self.model);
381
382 if let Some(api_key) = self.api_key {
383 config = config.with_api_key(api_key);
384 }
385
386 if let Some(temperature) = self.temperature {
387 config = config.with_temperature(temperature);
388 }
389
390 if let Some(max_tokens) = self.max_tokens {
391 config = config.with_max_tokens(max_tokens);
392 }
393
394 LlmAgent::new_with_config(self.name, config, self.system_prompt, self.memory)
395 }
396}
397
398struct SaveMemoryAction {
401 memory: Arc<dyn Memory>,
402 metadata: ActionMetadata,
403}
404
405impl SaveMemoryAction {
406 fn new(memory: Arc<dyn Memory>) -> Self {
407 Self {
408 memory,
409 metadata: ActionMetadata {
410 name: "save_memory".to_string(),
411 description: "Save information to memory for later retrieval.".to_string(),
412 input_schema: json!({
413 "type": "object",
414 "properties": {
415 "content": {
416 "type": "string",
417 "description": "The information to save."
418 }
419 },
420 "required": ["content"]
421 }),
422 output_schema: Some(json!({
423 "type": "object",
424 "properties": {
425 "status": { "type": "string" },
426 "id": { "type": "string" }
427 }
428 })),
429 },
430 }
431 }
432}
433
434#[async_trait]
435impl ActionInvoker for SaveMemoryAction {
436 async fn execute(&self, _ctx: &mut AgentContext, inputs: Value) -> Result<Value> {
437 let content = inputs
438 .get("content")
439 .and_then(|v| v.as_str())
440 .ok_or_else(|| {
441 enki_core::error::Error::ActionExecutionError(
442 "Missing 'content' in inputs".to_string(),
443 )
444 })?;
445
446 let entry = MemoryEntry::new(content);
447 let id = self.memory.store(entry).await?;
448
449 Ok(json!({ "status": "success", "id": id }))
450 }
451
452 fn metadata(&self) -> &ActionMetadata {
453 &self.metadata
454 }
455}
456
457struct SearchMemoryAction {
458 memory: Arc<dyn Memory>,
459 metadata: ActionMetadata,
460}
461
462impl SearchMemoryAction {
463 fn new(memory: Arc<dyn Memory>) -> Self {
464 Self {
465 memory,
466 metadata: ActionMetadata {
467 name: "search_memory".to_string(),
468 description: "Search memory for relevant information.".to_string(),
469 input_schema: json!({
470 "type": "object",
471 "properties": {
472 "query": {
473 "type": "string",
474 "description": "The query to search for."
475 },
476 "limit": {
477 "type": "integer",
478 "description": "Max number of results (default 5)."
479 }
480 },
481 "required": ["query"]
482 }),
483 output_schema: Some(json!({
484 "type": "object",
485 "properties": {
486 "results": {
487 "type": "array",
488 "items": { "type": "string" }
489 }
490 }
491 })),
492 },
493 }
494 }
495}
496
497#[async_trait]
498impl ActionInvoker for SearchMemoryAction {
499 async fn execute(&self, _ctx: &mut AgentContext, inputs: Value) -> Result<Value> {
500 let query_str = inputs
501 .get("query")
502 .and_then(|v| v.as_str())
503 .ok_or_else(|| {
504 enki_core::error::Error::ActionExecutionError(
505 "Missing 'query' in inputs".to_string(),
506 )
507 })?;
508
509 let limit = inputs
510 .get("limit")
511 .and_then(|v| v.as_u64())
512 .map(|v| v as usize)
513 .unwrap_or(5);
514
515 let mut query = MemoryQuery::new().with_limit(limit);
516 query.semantic_query = Some(query_str.to_string());
517
518 let results = self.memory.search(query).await?;
519
520 let result_strings: Vec<String> = results.into_iter().map(|e| e.content).collect();
521
522 Ok(json!({ "results": result_strings }))
523 }
524
525 fn metadata(&self) -> &ActionMetadata {
526 &self.metadata
527 }
528}
529
530#[cfg(test)]
531mod tests {
532 use super::*;
533
534 #[tokio::test]
539 async fn test_llm_agent_builder() {
540 let builder = LlmAgent::builder("test", "ollama::llama2")
543 .with_system_prompt("Custom prompt")
544 .with_temperature(0.7)
545 .with_max_tokens(1000);
546
547 assert_eq!(builder.name, "test");
549 assert_eq!(builder.model, "ollama::llama2");
550 assert_eq!(builder.system_prompt, "Custom prompt");
551 assert_eq!(builder.temperature, Some(0.7));
552 assert_eq!(builder.max_tokens, Some(1000));
553 }
554}