ceylon_next/agent.rs
1//! Agent module providing the core Agent functionality.
2//!
3//! This module contains the [`Agent`] struct which is the main entry point for creating
4//! and running AI agents with goal-oriented capabilities, memory, and tool integration.
5
6use crate::llm::types::Message;
7use crate::llm::{LLMClient, LLMConfig, LLMResponse, ToolCall, UniversalLLMClient};
8use crate::memory::{InMemoryStore, Memory, MemoryEntry};
9use crate::tasks::{OutputData, TaskRequest, TaskResponse};
10use crate::tools::ToolTrait;
11use crate::tools::invoker::ToolInvoker;
12use crate::types::AgentStatus;
13use std::sync::Arc;
14
15// Import goal structures (you'll need to add mod goal; to your lib.rs)
16use crate::goal::{Goal, GoalAnalysis, GoalAnalyzer, GoalStatus};
17
18// ============================================
19// Agent Configuration
20// ============================================
21
22/// Configuration for an AI Agent.
23///
24/// Controls various aspects of agent behavior including retry logic,
25/// timeouts, and goal analysis capabilities.
26///
27/// # Examples
28///
29/// ```rust
30/// use ceylon_next::agent::AgentConfig;
31///
32/// // Create default configuration
33/// let config = AgentConfig::default();
34///
35/// // Create custom configuration
36/// let mut config = AgentConfig::new(5, 120);
37/// config.with_goal_analysis(false);
38/// ```
39#[derive(Clone)]
40pub struct AgentConfig {
41 max_retries: u64,
42 timeout: u64,
43 /// Should the agent analyze goals before starting?
44 analyze_goals: bool,
45}
46
47impl Default for AgentConfig {
48 fn default() -> Self {
49 Self {
50 max_retries: 3,
51 timeout: 60,
52 analyze_goals: true, // Enable goal analysis by default
53 }
54 }
55}
56
57impl AgentConfig {
58 /// Creates a new `AgentConfig` with specified retry and timeout settings.
59 ///
60 /// # Arguments
61 ///
62 /// * `max_retries` - Maximum number of retries for failed operations
63 /// * `timeout` - Timeout in seconds for agent operations
64 ///
65 /// # Examples
66 ///
67 /// ```rust
68 /// use ceylon_next::agent::AgentConfig;
69 ///
70 /// let config = AgentConfig::new(5, 120);
71 /// ```
72 pub fn new(max_retries: u64, timeout: u64) -> Self {
73 Self {
74 max_retries,
75 timeout,
76 analyze_goals: true,
77 }
78 }
79
80 /// Enables or disables automatic goal analysis for tasks.
81 ///
82 /// When enabled, the agent will automatically analyze incoming tasks
83 /// and break them down into structured goals with sub-goals and success criteria.
84 ///
85 /// # Arguments
86 ///
87 /// * `enabled` - Whether to enable goal analysis
88 ///
89 /// # Examples
90 ///
91 /// ```rust
92 /// use ceylon_next::agent::AgentConfig;
93 ///
94 /// let mut config = AgentConfig::default();
95 /// config.with_goal_analysis(false);
96 /// ```
97 pub fn with_goal_analysis(&mut self, enabled: bool) -> &mut Self {
98 self.analyze_goals = enabled;
99 self
100 }
101}
102
103// ============================================
104// Agent
105// ============================================
106
107/// An AI Agent with goal-oriented capabilities, memory, and tool integration.
108///
109/// `Agent` is the core component of the Ceylon framework. It manages:
110/// - LLM interactions for generating responses
111/// - Goal analysis and tracking
112/// - Conversation history and memory
113/// - Tool execution and management
114///
115/// # Examples
116///
117/// ```rust,no_run
118/// use ceylon_next::agent::Agent;
119/// use ceylon_next::tasks::TaskRequest;
120///
121/// #[tokio::main]
122/// async fn main() {
123/// let mut agent = Agent::new("Assistant", "openai::gpt-4");
124///
125/// let task = TaskRequest::new("Hello, how are you?");
126/// let response = agent.run(task).await;
127///
128/// println!("{:?}", response.result());
129/// }
130/// ```
131#[derive(Clone)]
132pub struct Agent {
133 id: String,
134 name: String,
135 model: String,
136 system_prompt: String,
137 config: AgentConfig,
138 status: AgentStatus,
139 llm_client: Box<UniversalLLMClient>,
140 tool_invoker: ToolInvoker,
141 memory: Arc<dyn Memory>,
142 /// Current goal the agent is working on
143 current_goal: Option<Goal>,
144}
145
146impl Agent {
147 /// Creates a new `Agent` with the specified name and LLM model.
148 ///
149 /// # Arguments
150 ///
151 /// * `name` - The name of the agent (used in system prompts)
152 /// * `model` - The LLM model to use (e.g., "openai::gpt-4", "claude-3-opus", "llama2")
153 ///
154 /// # Examples
155 ///
156 /// ```rust,no_run
157 /// use ceylon_next::agent::Agent;
158 ///
159 /// let agent = Agent::new("MyAssistant", "openai::gpt-4");
160 /// ```
161 ///
162 /// # Supported Models
163 ///
164 /// - OpenAI: "openai::gpt-4", "gpt-3.5-turbo"
165 /// - Anthropic: "claude-3-opus", "claude-3-sonnet"
166 /// - Ollama: "llama2", "mistral"
167 ///
168 /// # Panics
169 ///
170 /// Panics if the model format is invalid or if an API key is required but not found
171 /// in environment variables. Use [`Agent::new_with_config`] to provide API keys explicitly.
172 pub fn new(name: &str, model: &str) -> Self {
173 let id = uuid::Uuid::new_v4().to_string();
174 let llm_client = Box::new(
175 UniversalLLMClient::new(model, None)
176 .unwrap_or_else(|e| panic!("Failed to create LLM client: {}", e)),
177 );
178 let system_prompt = Self::create_default_system_prompt(name);
179
180 Self {
181 id,
182 name: name.into(),
183 model: model.into(),
184 system_prompt,
185 config: AgentConfig::default(),
186 status: AgentStatus::Stopped,
187 llm_client,
188 tool_invoker: ToolInvoker::new(),
189 memory: Arc::new(InMemoryStore::new()),
190 current_goal: None,
191 }
192 }
193
194 /// Creates a new `Agent` with the specified name and LLM configuration.
195 ///
196 /// This method allows you to provide comprehensive LLM configuration including
197 /// API keys, temperature, max tokens, and other provider-specific settings.
198 ///
199 /// # Arguments
200 ///
201 /// * `name` - The name of the agent (used in system prompts)
202 /// * `llm_config` - LLM configuration with model, API key, and other settings
203 ///
204 /// # Examples
205 ///
206 /// ```rust,no_run
207 /// use ceylon_next::agent::Agent;
208 /// use ceylon_next::llm::LLMConfig;
209 ///
210 /// // Create agent with explicit API key
211 /// let config = LLMConfig::new("openai::gpt-4")
212 /// .with_api_key("your-api-key")
213 /// .with_temperature(0.7)
214 /// .with_max_tokens(2048);
215 ///
216 /// let agent = Agent::new_with_config("Assistant", config).unwrap();
217 /// ```
218 ///
219 /// # Errors
220 ///
221 /// Returns an error if:
222 /// - The model format is invalid (should be "provider::model-name")
223 /// - An API key is required but not provided or found in environment variables
224 /// - The LLM provider fails to initialize
225 pub fn new_with_config(name: &str, llm_config: crate::llm::LLMConfig) -> Result<Self, String> {
226 let id = uuid::Uuid::new_v4().to_string();
227 let llm_client = Box::new(UniversalLLMClient::new_with_config(llm_config.clone())?);
228 let system_prompt = Self::create_default_system_prompt(name);
229
230 Ok(Self {
231 id,
232 name: name.into(),
233 model: llm_config.model.clone(),
234 system_prompt,
235 config: AgentConfig::default(),
236 status: AgentStatus::Stopped,
237 llm_client,
238 tool_invoker: ToolInvoker::new(),
239 memory: Arc::new(InMemoryStore::new()),
240 current_goal: None,
241 })
242 }
243
244 /// Sets a custom memory implementation for the agent.
245 ///
246 /// By default, agents use [`InMemoryStore`]. You can provide a custom
247 /// implementation of the [`Memory`] trait for persistent storage.
248 ///
249 /// # Arguments
250 ///
251 /// * `memory` - An Arc-wrapped implementation of the Memory trait
252 ///
253 /// # Examples
254 ///
255 /// ```rust
256 /// use ceylon_next::agent::Agent;
257 /// use std::sync::Arc;
258 /// // Assuming you have a custom RedisMemory implementation
259 /// // let redis_memory = Arc::new(RedisMemory::new());
260 /// // let mut agent = Agent::new("Assistant", "openai::gpt-4");
261 /// // agent.with_memory(redis_memory);
262 /// ```
263 pub fn with_memory(&mut self, memory: Arc<dyn Memory>) -> &mut Self {
264 self.memory = memory;
265 self
266 }
267
268 /// Retrieves conversation history from memory.
269 ///
270 /// # Arguments
271 ///
272 /// * `limit` - Optional limit on number of conversations to retrieve.
273 /// If `None`, returns all history.
274 ///
275 /// # Returns
276 ///
277 /// A vector of [`MemoryEntry`] objects sorted by recency (newest first).
278 ///
279 /// # Examples
280 ///
281 /// ```rust,no_run
282 /// use ceylon_next::agent::Agent;
283 ///
284 /// # #[tokio::main]
285 /// # async fn main() {
286 /// let agent = Agent::new("Assistant", "openai::gpt-4");
287 ///
288 /// // Get last 5 conversations
289 /// let recent = agent.get_history(Some(5)).await;
290 ///
291 /// // Get all history
292 /// let all = agent.get_history(None).await;
293 /// # }
294 /// ```
295 pub async fn get_history(&self, limit: Option<usize>) -> Vec<MemoryEntry> {
296 match limit {
297 Some(n) => self
298 .memory
299 .get_recent(&self.id, n)
300 .await
301 .unwrap_or_default(),
302 None => self
303 .memory
304 .get_agent_history(&self.id)
305 .await
306 .unwrap_or_default(),
307 }
308 }
309
310 /// Searches conversation history for messages containing the query string.
311 ///
312 /// Performs a case-insensitive text search across all stored messages.
313 ///
314 /// # Arguments
315 ///
316 /// * `query` - The search query string
317 ///
318 /// # Returns
319 ///
320 /// A vector of [`MemoryEntry`] objects containing the query.
321 ///
322 /// # Examples
323 ///
324 /// ```rust,no_run
325 /// use ceylon_next::agent::Agent;
326 ///
327 /// # #[tokio::main]
328 /// # async fn main() {
329 /// let agent = Agent::new("Assistant", "openai::gpt-4");
330 ///
331 /// let results = agent.search_memory("Python").await;
332 /// println!("Found {} conversations about Python", results.len());
333 /// # }
334 /// ```
335 pub async fn search_memory(&self, query: &str) -> Vec<MemoryEntry> {
336 self.memory
337 .search(&self.id, query)
338 .await
339 .unwrap_or_default()
340 }
341
342 /// Clears all conversation history for this agent.
343 ///
344 /// # Returns
345 ///
346 /// `Ok(())` if successful, `Err(String)` with error message if failed.
347 ///
348 /// # Examples
349 ///
350 /// ```rust,no_run
351 /// use ceylon_next::agent::Agent;
352 ///
353 /// # #[tokio::main]
354 /// # async fn main() {
355 /// let agent = Agent::new("Assistant", "openai::gpt-4");
356 ///
357 /// if let Err(e) = agent.clear_memory().await {
358 /// eprintln!("Failed to clear memory: {}", e);
359 /// }
360 /// # }
361 /// ```
362 pub async fn clear_memory(&self) -> Result<(), String> {
363 self.memory.clear_agent_memory(&self.id).await
364 }
365
366 /// Returns the current goal the agent is working on, if any.
367 ///
368 /// # Examples
369 ///
370 /// ```rust,no_run
371 /// use ceylon_next::agent::Agent;
372 /// use ceylon_next::tasks::TaskRequest;
373 ///
374 /// # #[tokio::main]
375 /// # async fn main() {
376 /// let mut agent = Agent::new("Assistant", "openai::gpt-4");
377 ///
378 /// let task = TaskRequest::new("Build a web server");
379 /// agent.run(task).await;
380 ///
381 /// if let Some(goal) = agent.get_current_goal() {
382 /// println!("Current goal: {}", goal.description);
383 /// }
384 /// # }
385 /// ```
386 pub fn get_current_goal(&self) -> Option<&Goal> {
387 self.current_goal.as_ref()
388 }
389
390 /// Manually sets a goal for the agent to work on.
391 ///
392 /// This is useful when you want to provide a pre-structured goal
393 /// instead of having the agent analyze the task automatically.
394 ///
395 /// # Arguments
396 ///
397 /// * `goal` - The goal to set
398 ///
399 /// # Examples
400 ///
401 /// ```rust,no_run
402 /// use ceylon_next::agent::Agent;
403 /// use ceylon_next::goal::Goal;
404 ///
405 /// let mut agent = Agent::new("Assistant", "openai::gpt-4");
406 ///
407 /// let mut goal = Goal::new("Create a REST API".to_string());
408 /// goal.add_sub_goal("Design endpoints".to_string(), 1);
409 /// goal.add_sub_goal("Implement handlers".to_string(), 2);
410 ///
411 /// agent.set_goal(goal);
412 /// ```
413 pub fn set_goal(&mut self, goal: Goal) {
414 self.current_goal = Some(goal);
415 }
416
417 fn create_default_system_prompt(agent_name: &str) -> String {
418 format!(
419 "You are {}, an intelligent AI assistant with goal-oriented capabilities.
420
421Your role:
422- Understand the user's ultimate goal, not just their immediate question
423- Break down complex tasks into clear sub-goals
424- Track progress towards completion
425- Know when you've successfully completed a task
426
427When working on tasks:
4281. First understand: What is the user really trying to achieve?
4292. Define success: How will we know when we're done?
4303. Break it down: What are the steps to get there?
4314. Execute: Work through each step systematically
4325. Verify: Check that success criteria are met before finishing
433
434When to use tools:
435- Use tools when you need external information or actions
436- Always use the most appropriate tool for the task
437- If a tool fails, try an alternative approach
438- Explain what you're doing when using tools
439
440HOW TO USE TOOLS:
441If your model supports native tool calling, use it. Otherwise, use this format:
442
443USE_TOOL: tool_name
444{{\"parameter1\": \"value1\", \"parameter2\": \"value2\"}}
445---
446
447Example:
448USE_TOOL: file_saver
449{{\"filename\": \"hello.py\", \"content\": \"print('Hello, World!')\"}}
450---
451
452Your personality:
453- Professional but friendly
454- Patient and helpful
455- Honest about your limitations
456- Thorough and goal-focused
457- Always double-check your work
458
459Remember: Complete the entire goal, not just part of it. Think step by step and track your progress.",
460 agent_name
461 )
462 }
463
464 /// Sets a custom system prompt for the agent.
465 ///
466 /// The system prompt defines the agent's behavior, personality, and instructions.
467 /// By default, a goal-oriented system prompt is used.
468 ///
469 /// # Arguments
470 ///
471 /// * `prompt` - The system prompt text
472 ///
473 /// # Examples
474 ///
475 /// ```rust,no_run
476 /// use ceylon_next::agent::Agent;
477 ///
478 /// let mut agent = Agent::new("Assistant", "openai::gpt-4");
479 /// agent.with_system_prompt("You are a helpful coding assistant specializing in Rust.");
480 /// ```
481 pub fn with_system_prompt(&mut self, prompt: &str) -> &mut Self {
482 self.system_prompt = prompt.to_string();
483 self
484 }
485
486 /// Returns the current system prompt.
487 ///
488 /// # Examples
489 ///
490 /// ```rust,no_run
491 /// use ceylon_next::agent::Agent;
492 ///
493 /// let agent = Agent::new("Assistant", "openai::gpt-4");
494 /// println!("System prompt: {}", agent.get_system_prompt());
495 /// ```
496 pub fn get_system_prompt(&self) -> &str {
497 &self.system_prompt
498 }
499
500 /// Sets the agent configuration.
501 ///
502 /// # Arguments
503 ///
504 /// * `config` - The [`AgentConfig`] to use
505 ///
506 /// # Examples
507 ///
508 /// ```rust,no_run
509 /// use ceylon_next::agent::{Agent, AgentConfig};
510 ///
511 /// let mut agent = Agent::new("Assistant", "openai::gpt-4");
512 ///
513 /// let mut config = AgentConfig::new(5, 120);
514 /// config.with_goal_analysis(true);
515 ///
516 /// agent.with_config(config);
517 /// ```
518 pub fn with_config(&mut self, config: AgentConfig) -> &mut Self {
519 self.config = config;
520 self
521 }
522
523 /// Configures the agent with comprehensive LLM settings using LLMConfig.
524 ///
525 /// This allows you to set advanced LLM parameters like temperature, top_p,
526 /// reasoning, provider-specific options, and more.
527 ///
528 /// # Arguments
529 ///
530 /// * `llm_config` - The [`LLMConfig`] containing comprehensive LLM settings
531 ///
532 /// # Examples
533 ///
534 /// ```rust,no_run
535 /// use ceylon_next::agent::Agent;
536 /// use ceylon_next::llm::LLMConfig;
537 ///
538 /// let mut agent = Agent::new("Assistant", "openai::gpt-4");
539 ///
540 /// // Configure with advanced settings
541 /// let llm_config = LLMConfig::new("openai::gpt-4")
542 /// .with_api_key("your-api-key")
543 /// .with_temperature(0.7)
544 /// .with_max_tokens(2048)
545 /// .with_top_p(0.9);
546 ///
547 /// agent.with_llm_config(llm_config);
548 /// ```
549 ///
550 /// # Provider-Specific Examples
551 ///
552 /// ## Azure OpenAI
553 /// ```rust,no_run
554 /// use ceylon_next::agent::Agent;
555 /// use ceylon_next::llm::LLMConfig;
556 ///
557 /// let mut agent = Agent::new("Assistant", "azure::gpt-4");
558 ///
559 /// let config = LLMConfig::new("azure::gpt-4")
560 /// .with_api_key("your-azure-key")
561 /// .with_deployment_id("your-deployment-id")
562 /// .with_api_version("2024-02-01");
563 ///
564 /// agent.with_llm_config(config);
565 /// ```
566 ///
567 /// ## OpenAI with Web Search
568 /// ```rust,no_run
569 /// use ceylon_next::agent::Agent;
570 /// use ceylon_next::llm::LLMConfig;
571 ///
572 /// let mut agent = Agent::new("Assistant", "openai::gpt-4");
573 ///
574 /// let config = LLMConfig::new("openai::gpt-4")
575 /// .with_api_key("your-api-key")
576 /// .with_openai_web_search(true);
577 ///
578 /// agent.with_llm_config(config);
579 /// ```
580 ///
581 /// ## Anthropic with Reasoning
582 /// ```rust,no_run
583 /// use ceylon_next::agent::Agent;
584 /// use ceylon_next::llm::LLMConfig;
585 ///
586 /// let mut agent = Agent::new("Assistant", "anthropic::claude-3-opus");
587 ///
588 /// let config = LLMConfig::new("anthropic::claude-3-opus")
589 /// .with_api_key("your-api-key")
590 /// .with_reasoning(true)
591 /// .with_reasoning_effort("high");
592 ///
593 /// agent.with_llm_config(config);
594 /// ```
595 pub fn with_llm_config(&mut self, llm_config: LLMConfig) -> Result<&mut Self, String> {
596 // Create new LLM client with the config
597 let new_client = UniversalLLMClient::new_with_config(llm_config)?;
598
599 // Update the model name if specified in config
600 self.llm_client = Box::new(new_client);
601
602 Ok(self)
603 }
604
605 /// Adds a tool to the agent's toolset.
606 ///
607 /// Tools extend the agent's capabilities by allowing it to perform
608 /// external actions like web searches, calculations, database queries, etc.
609 ///
610 /// # Arguments
611 ///
612 /// * `tool` - Any type implementing [`ToolTrait`]
613 ///
614 /// # Examples
615 ///
616 /// ```rust,no_run
617 /// use ceylon_next::agent::Agent;
618 /// use ceylon_next::tools::ToolTrait;
619 /// use serde_json::{json, Value};
620 ///
621 /// struct WeatherTool;
622 ///
623 /// impl ToolTrait for WeatherTool {
624 /// fn name(&self) -> String { "get_weather".to_string() }
625 /// fn description(&self) -> String { "Get weather for a location".to_string() }
626 /// fn input_schema(&self) -> Value { json!({"type": "object"}) }
627 /// fn execute(&self, input: Value) -> Value { json!({"temp": 72}) }
628 /// }
629 ///
630 /// let mut agent = Agent::new("Assistant", "openai::gpt-4");
631 /// agent.add_tool(WeatherTool);
632 /// ```
633 pub fn add_tool<T>(&mut self, tool: T) -> &mut Self
634 where
635 T: ToolTrait + Send + Sync + 'static,
636 {
637 self.tool_invoker.add_tool(tool);
638 self
639 }
640
641 /// Returns a reference to the agent's tool invoker.
642 ///
643 /// This can be used to inspect registered tools or invoke them manually.
644 pub fn get_tool_invoker(&self) -> &crate::tools::invoker::ToolInvoker {
645 &self.tool_invoker
646 }
647
648 /// NEW: Analyze the task and create a goal structure
649 async fn analyze_task_goal(&mut self, task: &TaskRequest) -> Result<Goal, String> {
650 println!("šÆ Analyzing task to understand the goal...");
651
652 // Create a prompt to analyze the goal
653 let analysis_prompt = GoalAnalyzer::create_analysis_prompt(task.description());
654
655 let messages = vec![
656 Message {
657 role: "system".to_string(),
658 content: "You are a goal analysis assistant. Analyze tasks and break them into structured goals.".to_string(),
659 },
660 Message {
661 role: "user".to_string(),
662 content: analysis_prompt,
663 },
664 ];
665
666 // Ask LLM to analyze
667 let response = self
668 .llm_client
669 .complete::<LLMResponse<String>, String>(&messages, &[])
670 .await
671 .map_err(|e| format!("Failed to analyze goal: {}", e))?;
672
673 // Parse the JSON response
674 let analysis: GoalAnalysis = serde_json::from_str(&response.content)
675 .map_err(|e| format!("Failed to parse goal analysis: {}", e))?;
676
677 let goal = analysis.to_goal();
678
679 println!("\nš Goal Analysis:");
680 println!("{}", goal.get_summary());
681
682 Ok(goal)
683 }
684
685 /// NEW: Check if current sub-goal is complete
686 fn check_sub_goal_completion(&self, messages: &[Message]) -> bool {
687 // Look at recent assistant messages to see if sub-goal was addressed
688 // This is a simple heuristic - you can make it smarter
689 let recent_content: String = messages
690 .iter()
691 .rev()
692 .take(3)
693 .filter(|m| m.role == "assistant")
694 .map(|m| m.content.clone())
695 .collect::<Vec<_>>()
696 .join(" ");
697
698 // Simple check: did we generate substantial content?
699 !recent_content.trim().is_empty() && recent_content.len() > 50
700 }
701
702 /// NEW: Update goal progress based on conversation
703 fn update_goal_progress(&mut self, messages: &[Message]) {
704 // Check completion first (immutable borrow of self)
705 let is_complete = self.check_sub_goal_completion(messages);
706
707 // Now borrow goal mutably (no conflict)
708 if let Some(goal) = &mut self.current_goal {
709 if goal.status == GoalStatus::InProgress && is_complete {
710 // Find next incomplete sub-goal
711 if let Some(index) = goal.sub_goals.iter().position(|sg| !sg.completed) {
712 println!(
713 "ā
Completed sub-goal: {}",
714 goal.sub_goals[index].description
715 );
716 goal.complete_sub_goal(
717 index,
718 Some("Completed via agent interaction".to_string()),
719 );
720 }
721 }
722 }
723 }
724
725 /// NEW: Check if we should continue working
726 fn should_continue_working(&self) -> bool {
727 if let Some(goal) = &self.current_goal {
728 // Continue if:
729 // 1. Goal is in progress
730 // 2. Not all sub-goals are complete
731 // 3. Not all success criteria are met
732 goal.status == GoalStatus::InProgress
733 && (!goal.all_sub_goals_complete() || !goal.is_successful())
734 } else {
735 // No goal, use default behavior
736 false
737 }
738 }
739
740 /// Parse text-based tool calls from LLM response.
741 ///
742 /// This is a fallback for models that don't support native tool calling.
743 /// It looks for patterns like:
744 /// USE_TOOL: tool_name
745 /// {"param": "value"}
746 fn parse_text_tool_calls(&self, content: &str) -> Vec<ToolCall> {
747 use serde_json::Value;
748
749 let mut tool_calls = Vec::new();
750
751 // Look for "USE_TOOL: tool_name" pattern
752 for line in content.lines() {
753 if let Some(tool_name) = line.strip_prefix("USE_TOOL:").map(|s| s.trim()) {
754 println!(" š Detected text-based tool call: {}", tool_name);
755
756 // Look for JSON in the following lines
757 let remaining = content
758 .split_once(&format!("USE_TOOL: {}", tool_name))
759 .and_then(|(_, after)| {
760 after.split_once("---").map(|(json, _)| json.to_string())
761 })
762 .or_else(|| {
763 content
764 .split_once(&format!("USE_TOOL: {}", tool_name))
765 .map(|(_, after)| {
766 // Take lines until we hit another USE_TOOL or end
767 after
768 .lines()
769 .take_while(|l| !l.starts_with("USE_TOOL:"))
770 .collect::<Vec<_>>()
771 .join("\n")
772 })
773 });
774
775 if let Some(json_str) = remaining {
776 // Try to parse JSON from the text
777 if let Ok(input) = serde_json::from_str::<Value>(json_str.trim()) {
778 println!(" ā Parsed tool input: {:?}", input);
779 tool_calls.push(ToolCall {
780 name: tool_name.to_string(),
781 input,
782 });
783 } else {
784 // Try line by line
785 for potential_json in json_str.lines() {
786 let trimmed = potential_json.trim();
787 if trimmed.starts_with('{') {
788 if let Ok(input) = serde_json::from_str::<Value>(trimmed) {
789 println!(" ā Parsed tool input: {:?}", input);
790 tool_calls.push(ToolCall {
791 name: tool_name.to_string(),
792 input,
793 });
794 break;
795 }
796 }
797 }
798 }
799 }
800 }
801 }
802
803 tool_calls
804 }
805
806 /// Executes a task using the agent.
807 ///
808 /// This is the main method for running the agent. It:
809 /// 1. Analyzes the task and creates a goal structure (if enabled)
810 /// 2. Loads relevant conversation history from memory
811 /// 3. Iteratively processes the task with the LLM
812 /// 4. Invokes tools as needed
813 /// 5. Tracks progress towards goal completion
814 /// 6. Saves the conversation to memory
815 ///
816 /// # Arguments
817 ///
818 /// * `task` - A [`TaskRequest`] containing the user's request
819 ///
820 /// # Returns
821 ///
822 /// A [`TaskResponse`] containing the agent's final output
823 ///
824 /// # Examples
825 ///
826 /// ```rust,no_run
827 /// use ceylon_next::agent::Agent;
828 /// use ceylon_next::tasks::{TaskRequest, OutputData};
829 ///
830 /// #[tokio::main]
831 /// async fn main() {
832 /// let mut agent = Agent::new("Assistant", "openai::gpt-4");
833 ///
834 /// let task = TaskRequest::new("Write a haiku about Rust");
835 /// let response = agent.run(task).await;
836 ///
837 /// match response.result() {
838 /// OutputData::Text(text) => println!("{}", text),
839 /// _ => println!("Unexpected output type"),
840 /// }
841 /// }
842 /// ```
843 pub async fn run(&mut self, task: TaskRequest) -> TaskResponse {
844 self.status = AgentStatus::Running;
845
846 // NEW: Analyze the task and create a goal
847 if self.config.analyze_goals {
848 match self.analyze_task_goal(&task).await {
849 Ok(mut goal) => {
850 goal.status = GoalStatus::InProgress;
851 self.current_goal = Some(goal);
852 }
853 Err(e) => {
854 println!(
855 "ā ļø Could not analyze goal: {}. Continuing without goal tracking.",
856 e
857 );
858 }
859 }
860 }
861
862 // Load recent context from memory
863 let recent_memories = self.get_history(Some(3)).await;
864
865 // Start with system prompt
866 let mut messages = vec![self.create_system_message()];
867
868 // Add context from recent conversations
869 if !recent_memories.is_empty() {
870 println!(
871 "š Loading {} recent conversations from memory",
872 recent_memories.len()
873 );
874 let context_summary = self.create_context_message(&recent_memories);
875 messages.push(context_summary);
876 }
877
878 // NEW: Add goal context if we have one
879 if let Some(goal) = &self.current_goal {
880 messages.push(Message {
881 role: "system".to_string(),
882 content: format!(
883 "CURRENT GOAL:\n{}\n\nWork through each sub-goal systematically. \
884 Don't finish until all success criteria are met.",
885 goal.get_summary()
886 ),
887 });
888 }
889
890 // Add current task
891 messages.push(task.message());
892
893 // Get available tools
894 let tool_specs = self.tool_invoker.get_tool_specs();
895
896 // The main agent loop
897 let max_iterations = 15; // Increased for goal-oriented work
898 let mut iteration = 0;
899
900 loop {
901 iteration += 1;
902
903 if iteration > max_iterations {
904 println!("ā ļø Max iterations reached");
905 if let Some(goal) = &mut self.current_goal {
906 goal.status = GoalStatus::Failed;
907 }
908 break;
909 }
910
911 println!("\nš¤ Agent thinking... (iteration {})", iteration);
912
913 // NEW: Show current goal progress
914 if let Some(goal) = &self.current_goal {
915 if let Some(next_sub_goal) = goal.get_next_sub_goal() {
916 println!("š Working on: {}", next_sub_goal.description);
917 }
918 }
919
920 let response = self
921 .llm_client
922 .complete::<LLMResponse<String>, String>(&messages, &tool_specs)
923 .await;
924
925 if let Err(e) = response {
926 println!("ā LLM error: {}", e);
927 break;
928 }
929
930 let llm_response = response.unwrap();
931
932 // Handle tool calls (both native and text-based)
933 let mut tool_calls = llm_response.tool_calls;
934
935 // FALLBACK: Parse text-based tool calls for models that don't support native tool calling
936 if tool_calls.is_empty() {
937 tool_calls = self.parse_text_tool_calls(&llm_response.content);
938 }
939
940 if !tool_calls.is_empty() {
941 for tool_call in tool_calls {
942 println!("š§ LLM wants to use tool: {}", tool_call.name);
943
944 let tool_result = self
945 .tool_invoker
946 .invoke(&tool_call.name, tool_call.input)
947 .await;
948
949 match tool_result {
950 Ok(result) => {
951 println!(" ā Tool succeeded: {:?}", result);
952
953 messages.push(Message {
954 role: "assistant".to_string(),
955 content: format!(
956 "I used the {} tool and got this result: {}",
957 tool_call.name,
958 serde_json::to_string_pretty(&result).unwrap_or_default()
959 ),
960 });
961 }
962 Err(e) => {
963 println!(" ā Tool failed: {}", e);
964
965 messages.push(Message {
966 role: "assistant".to_string(),
967 content: format!("Tool '{}' failed: {}", tool_call.name, e),
968 });
969 }
970 }
971 }
972
973 // NEW: Update goal progress after tool use
974 self.update_goal_progress(&messages);
975
976 // Continue loop to let LLM think about results
977 continue;
978 }
979
980 println!("š Agent generated response");
981
982 messages.push(Message {
983 role: "assistant".to_string(),
984 content: llm_response.content.clone(),
985 });
986
987 // NEW: Update goal progress
988 self.update_goal_progress(&messages);
989
990 // NEW: Check if we should continue based on goal
991 if self.should_continue_working() {
992 println!("š Goal not complete yet, continuing...");
993
994 // Add a prompt to continue working
995 messages.push(Message {
996 role: "user".to_string(),
997 content: "Please continue working on the remaining sub-goals.".to_string(),
998 });
999 continue;
1000 }
1001
1002 // NEW: Mark goal as complete if all criteria met
1003 if let Some(goal) = &mut self.current_goal {
1004 if goal.is_successful() && goal.all_sub_goals_complete() {
1005 println!("š Goal completed successfully!");
1006 goal.status = GoalStatus::Completed;
1007 } else {
1008 println!("ā ļø Goal incomplete - some criteria not met");
1009 }
1010 }
1011
1012 break;
1013 }
1014
1015 self.status = AgentStatus::Stopped;
1016
1017 // Save conversation to memory
1018 let memory_entry = MemoryEntry::new(self.id.clone(), task.id().into(), messages.clone());
1019
1020 if let Err(e) = self.memory.store(memory_entry).await {
1021 println!("ā ļø Failed to save to memory: {}", e);
1022 } else {
1023 println!("š¾ Conversation saved to memory");
1024 }
1025
1026 let final_answer = messages
1027 .last()
1028 .map(|m| m.content.clone())
1029 .unwrap_or_else(|| "No response generated".to_string());
1030
1031 TaskResponse::new(&task.id(), OutputData::Text(final_answer))
1032 }
1033
1034 fn create_context_message(&self, memories: &[MemoryEntry]) -> Message {
1035 let mut context = String::from("PREVIOUS CONVERSATION HISTORY:\n");
1036 context.push_str("Use this information to answer questions about the user.\n\n");
1037
1038 for (i, memory) in memories.iter().enumerate() {
1039 context.push_str(&format!("=== Past Conversation {} ===\n", i + 1));
1040
1041 for msg in &memory.messages {
1042 match msg.role.as_str() {
1043 "user" => {
1044 context.push_str(&format!("User said: {}\n", msg.content));
1045 }
1046 "assistant" => {
1047 if !msg.content.is_empty() && !msg.content.contains("analyze_text") {
1048 context.push_str(&format!("You replied: {}\n", msg.content));
1049 }
1050 }
1051 _ => {}
1052 }
1053 }
1054 context.push_str("\n");
1055 }
1056
1057 context.push_str("Remember: Use this history to answer questions about the user.\n");
1058
1059 Message {
1060 role: "system".to_string(),
1061 content: context,
1062 }
1063 }
1064
1065 fn create_system_message(&self) -> Message {
1066 Message {
1067 role: "system".to_string(),
1068 content: self.system_prompt.clone(),
1069 }
1070 }
1071}