rustchain/cli/
interactive.rs

1use crate::core::{RuntimeContext, Result};
2use crate::core::memory::ConversationMemory;
3use std::io::{self, Write};
4use std::sync::Arc;
5use tokio::sync::RwLock;
6use uuid::Uuid;
7
8/// Interactive CLI mode providing Claude Code-style conversational experience
9pub struct InteractiveMode {
10    context: Arc<RuntimeContext>,
11    session_id: String,
12    conversation: Arc<RwLock<ConversationMemory>>,
13    exit_requested: bool,
14}
15
16impl InteractiveMode {
17    pub fn new(context: Arc<RuntimeContext>) -> Self {
18        let session_id = format!("interactive_{}", Uuid::new_v4().simple());
19        let conversation = Arc::new(RwLock::new(ConversationMemory::new(1000))); // 1000 message capacity
20        
21        Self {
22            context,
23            session_id,
24            conversation,
25            exit_requested: false,
26        }
27    }
28
29    /// Start the interactive CLI session
30    pub async fn run(&mut self) -> Result<()> {
31        self.print_welcome().await?;
32        
33        while !self.exit_requested {
34            match self.handle_user_input().await {
35                Ok(_) => continue,
36                Err(e) => {
37                    eprintln!("Error: {}", e);
38                    continue;
39                }
40            }
41        }
42        
43        self.print_goodbye().await?;
44        Ok(())
45    }
46
47    async fn print_welcome(&self) -> Result<()> {
48        println!("🤖 RustChain Interactive Mode");
49        println!("─────────────────────────────");
50        println!("Welcome to RustChain's conversational AI agent interface.");
51        println!("Type your requests naturally, and I'll help you execute missions,");
52        println!("manage configurations, run safety checks, and more.");
53        println!();
54        println!("💡 Examples:");
55        println!("  • \"Run a mission to create a file called hello.txt\"");
56        println!("  • \"Check my system configuration\""); 
57        println!("  • \"What safety policies are currently active?\"");
58        println!("  • \"Show me the audit trail from today\"");
59        println!();
60        println!("Type 'exit', 'quit', or press Ctrl+C to leave.");
61        println!("─────────────────────────────────────────────────");
62        println!();
63        Ok(())
64    }
65
66    async fn print_goodbye(&self) -> Result<()> {
67        println!();
68        println!("👋 Thanks for using RustChain Interactive Mode!");
69        
70        // Save session summary
71        let conversation = self.conversation.read().await;
72        let stats = conversation.stats();
73        
74        if stats.total_messages > 0 {
75            println!("📊 Session Summary:");
76            println!("   • Messages exchanged: {}", stats.total_messages);
77            println!("   • Session ID: {}", self.session_id);
78            
79            // Audit the session
80            self.context.audit_action(
81                &self.session_id,
82                "interactive_session_end",
83                &format!("Completed interactive session with {} messages", stats.total_messages)
84            ).await;
85        }
86        
87        println!("🚀 Session saved. See you next time!");
88        Ok(())
89    }
90
91    async fn handle_user_input(&mut self) -> Result<()> {
92        // Print prompt
93        print!("🤖 rustchain> ");
94        if let Err(e) = io::stdout().flush() {
95            tracing::warn!("Failed to flush stdout: {}", e);
96            // Continue execution even if stdout flush fails
97        }
98        
99        // Read user input
100        let mut input = String::new();
101        let bytes_read = io::stdin().read_line(&mut input).map_err(|e| {
102            crate::core::error::RustChainError::Execution(
103                crate::core::error::ExecutionError::InvalidState {
104                    state: "reading_input".to_string(),
105                    operation: format!("Failed to read input: {}", e)
106                }
107            )
108        })?;
109        
110        tracing::debug!("Read {} bytes: {:?}", bytes_read, input.trim());
111        
112        // Handle EOF (end of input stream)
113        if bytes_read == 0 {
114            println!("\nEOF detected, exiting interactive mode.");
115            self.exit_requested = true;
116            return Ok(());
117        }
118        
119        let input = input.trim();
120        
121        // Handle empty input
122        if input.is_empty() {
123            return Ok(());
124        }
125        
126        // Handle exit commands
127        if matches!(input.to_lowercase().as_str(), "exit" | "quit" | "bye" | "q") {
128            self.exit_requested = true;
129            return Ok(());
130        }
131        
132        // Add user message to conversation
133        self.conversation.write().await.add_message("user", input)?;
134        
135        // Process the input and generate response
136        let response = self.process_natural_language_request(input).await?;
137        
138        // Add assistant response to conversation
139        self.conversation.write().await.add_message("assistant", &response)?;
140        
141        // Print response
142        println!();
143        println!("🤖 {}", response);
144        println!();
145        
146        Ok(())
147    }
148
149    async fn process_natural_language_request(&self, input: &str) -> Result<String> {
150        // Audit the user request
151        self.context.audit_action(
152            &self.session_id,
153            "interactive_request",
154            input
155        ).await;
156        
157        // Use LLM for actual AI-powered conversation
158        #[cfg(feature = "llm")]
159        {
160            match self.process_with_llm(input).await {
161                Ok(response) => return Ok(response),
162                Err(e) => {
163                    // Fall back to pattern matching if LLM fails
164                    tracing::warn!("LLM processing failed, falling back to pattern matching: {}", e);
165                }
166            }
167        }
168        
169        // Fallback: Natural language processing and intent recognition using patterns
170        let intent = self.analyze_intent(input).await?;
171        
172        match intent {
173            Intent::RunMission { description } => {
174                self.handle_mission_request(&description).await
175            },
176            Intent::CheckConfig => {
177                self.handle_config_check().await
178            },
179            Intent::ShowSafety => {
180                self.handle_safety_check().await
181            },
182            Intent::ShowAudit { timeframe } => {
183                self.handle_audit_request(&timeframe).await
184            },
185            Intent::ShowFeatures => {
186                self.handle_features_request().await
187            },
188            Intent::Help => {
189                self.handle_help_request().await
190            },
191            Intent::General { response } => {
192                Ok(response)
193            }
194        }
195    }
196
197    async fn analyze_intent(&self, input: &str) -> Result<Intent> {
198        let input_lower = input.to_lowercase();
199        
200        // Mission execution patterns
201        if input_lower.contains("run") && (input_lower.contains("mission") || input_lower.contains("task")) {
202            return Ok(Intent::RunMission { 
203                description: input.to_string() 
204            });
205        }
206        
207        if input_lower.contains("create") && input_lower.contains("file") {
208            return Ok(Intent::RunMission {
209                description: format!("Create file: {}", input)
210            });
211        }
212        
213        // Configuration patterns
214        if input_lower.contains("config") || input_lower.contains("configuration") {
215            return Ok(Intent::CheckConfig);
216        }
217        
218        // Safety patterns  
219        if input_lower.contains("safety") || input_lower.contains("policy") || input_lower.contains("policies") {
220            return Ok(Intent::ShowSafety);
221        }
222        
223        // Audit patterns
224        if input_lower.contains("audit") || input_lower.contains("log") || input_lower.contains("history") {
225            let timeframe = if input_lower.contains("today") { 
226                "today".to_string() 
227            } else if input_lower.contains("recent") {
228                "recent".to_string()
229            } else {
230                "all".to_string()
231            };
232            return Ok(Intent::ShowAudit { timeframe });
233        }
234        
235        // Features patterns
236        if input_lower.contains("feature") || input_lower.contains("capability") || input_lower.contains("what can") {
237            return Ok(Intent::ShowFeatures);
238        }
239        
240        // Help patterns
241        if input_lower.contains("help") || input_lower.contains("how") || input_lower == "?" {
242            return Ok(Intent::Help);
243        }
244        
245        // Greeting patterns
246        if matches!(input_lower.as_str(), "hi" | "hello" | "hey" | "good morning" | "good afternoon") {
247            return Ok(Intent::General {
248                response: "Hello! I'm your RustChain AI assistant. I can help you run missions, check configurations, manage safety policies, review audit logs, and more. What would you like to do?".to_string()
249            });
250        }
251        
252        // Default: general assistance
253        Ok(Intent::General {
254            response: format!("I understand you want to: '{}'. Let me help you with that.\n\nFor mission execution, try: 'run a mission to [description]'\nFor system info, try: 'check config', 'show safety policies', or 'show audit logs'\nFor help, just type 'help'", input)
255        })
256    }
257
258    async fn handle_mission_request(&self, description: &str) -> Result<String> {
259        Ok(format!(
260            "🚀 Mission Request Received: {}\n\n\
261            I can help you execute this mission! However, I need a properly formatted mission file.\n\
262            \n\
263            To run missions:\n\
264            • Use: `rustchain run path/to/mission.yaml`\n\
265            • Or create a simple mission with: `rustchain mission create`\n\
266            \n\
267            Would you like me to help you create a mission file for this task?",
268            description
269        ))
270    }
271
272    async fn handle_config_check(&self) -> Result<String> {
273        // In a real implementation, this would check actual config
274        Ok("📋 Configuration Status:\n\n\
275            ✅ Runtime Context: Initialized\n\
276            ✅ Audit System: Active\n\
277            ✅ Policy Engine: Loaded with default policies\n\
278            ✅ Safety Validator: Enabled\n\
279            ✅ Tool Registry: Ready\n\
280            \n\
281            💡 To view detailed config: `rustchain config show`\n\
282            💡 To modify config: `rustchain config set <key> <value>`".to_string())
283    }
284
285    async fn handle_safety_check(&self) -> Result<String> {
286        Ok("🛡️ Safety & Policy Status:\n\n\
287            Active Safety Rules:\n\
288            • ✅ Dangerous command detection\n\
289            • ✅ File system access validation\n\
290            • ✅ Network request filtering\n\
291            • ✅ Resource usage limits\n\
292            \n\
293            Policy Engine:\n\
294            • ✅ Default security policies loaded\n\
295            • ✅ Custom rules: 0 configured\n\
296            \n\
297            💡 To run safety validation: `rustchain safety validate <mission>`\n\
298            💡 To view all policies: `rustchain policy list`".to_string())
299    }
300
301    async fn handle_audit_request(&self, timeframe: &str) -> Result<String> {
302        let conversation = self.conversation.read().await;
303        let stats = conversation.stats();
304        
305        Ok(format!(
306            "📊 Audit Trail ({}): \n\n\
307            Current Session:\n\
308            • Messages: {}\n\
309            • Session ID: {}\n\
310            • Started: Just now\n\
311            \n\
312            System Activity:\n\
313            • ✅ Interactive mode started\n\
314            • ✅ User requests processed: {}\n\
315            \n\
316            💡 For detailed audit reports: `rustchain audit report`\n\
317            💡 To export audit data: `rustchain audit export --format json`",
318            timeframe,
319            stats.total_messages,
320            self.session_id,
321            stats.total_messages / 2 // Approximate user requests
322        ))
323    }
324
325    async fn handle_features_request(&self) -> Result<String> {
326        // Check if we're in enterprise mode
327        let enterprise_features = self.context.get_enterprise_features().await;
328        let is_enterprise = !enterprise_features.is_empty();
329        
330        if is_enterprise {
331            Ok("🚀 RustChain Enterprise Features Available:\n\n\
332                Core Features:\n\
333                • ✅ Mission execution with DAG support\n\
334                • ✅ Multi-provider LLM integration\n\
335                • ✅ Extensible tool system\n\
336                • ✅ Memory management\n\
337                • ✅ Safety validation\n\
338                \n\
339                Enterprise Features:\n\
340                • ✅ Authentication & RBAC\n\
341                • ✅ Compliance monitoring\n\
342                • ✅ Performance dashboards\n\
343                • ✅ Multi-tenant support\n\
344                \n\
345                💡 Check detailed features: `rustchain features list`".to_string())
346        } else {
347            Ok("🚀 RustChain Community Features:\n\n\
348                Available:\n\
349                • ✅ Mission execution with DAG support\n\
350                • ✅ Multi-provider LLM integration (OpenAI, Anthropic, Ollama)\n\
351                • ✅ Extensible tool system with 20+ built-in tools\n\
352                • ✅ Memory management with multiple strategies\n\
353                • ✅ RAG document processing\n\
354                • ✅ Safety validation and policy enforcement\n\
355                • ✅ Comprehensive audit trails\n\
356                \n\
357                Enterprise Upgrade Available:\n\
358                • 🔒 Advanced authentication & RBAC\n\
359                • 🔒 Compliance monitoring (GDPR, HIPAA, SOX)\n\
360                • 🔒 Performance dashboards & alerting\n\
361                • 🔒 Multi-tenant support\n\
362                \n\
363                💡 Upgrade info: https://github.com/your-org/rustchain-enterprise".to_string())
364        }
365    }
366
367    async fn handle_help_request(&self) -> Result<String> {
368        Ok("🤖 RustChain Interactive Help\n\
369            ═══════════════════════════════\n\
370            \n\
371            I can help you with:\n\
372            \n\
373            🚀 Mission Management:\n\
374            • \"Run a mission to create a file\"\n\
375            • \"Execute task: backup my data\"\n\
376            • \"Start mission from file.yaml\"\n\
377            \n\
378            ⚙️ System Configuration:\n\
379            • \"Check my configuration\"\n\
380            • \"Show system status\"\n\
381            • \"What are my current settings?\"\n\
382            \n\
383            🛡️ Safety & Policies:\n\
384            • \"Show safety policies\"\n\
385            • \"What security rules are active?\"\n\
386            • \"Check policy status\"\n\
387            \n\
388            📊 Audit & Monitoring:\n\
389            • \"Show audit logs\"\n\
390            • \"What happened today?\"\n\
391            • \"Display recent activity\"\n\
392            \n\
393            🎯 Features & Capabilities:\n\
394            • \"What can you do?\"\n\
395            • \"Show available features\"\n\
396            • \"List capabilities\"\n\
397            \n\
398            💡 Just ask naturally - I'll understand and help!".to_string())
399    }
400    
401    /// Process user input using LLM for actual AI conversation
402    #[cfg(feature = "llm")]
403    async fn process_with_llm(&self, input: &str) -> Result<String> {
404        use crate::llm::{create_default_llm_manager, ChatMessage, LLMRequest, MessageRole};
405        use std::collections::HashMap;
406        
407        let manager = create_default_llm_manager().map_err(|e| {
408            crate::core::error::RustChainError::Execution(
409                crate::core::error::ExecutionError::InvalidState {
410                    state: "llm_initialization".to_string(),
411                    operation: format!("Failed to create LLM manager: {}", e)
412                }
413            )
414        })?;
415        
416        // Get conversation history for context
417        let conversation_guard = self.conversation.read().await;
418        let history = conversation_guard.get_recent(10)?; // Last 10 messages
419        drop(conversation_guard);
420        
421        // Build conversation context
422        let mut messages = Vec::new();
423        
424        // System message with RustChain context
425        messages.push(ChatMessage {
426            role: MessageRole::System,
427            content: self.sanitize_content(&self.build_system_prompt().await?),
428            name: None,
429            tool_calls: None,
430            tool_call_id: None,
431        });
432        
433        // Add conversation history - format is "role: content"
434        for formatted_msg in history {
435            if let Some(colon_pos) = formatted_msg.find(": ") {
436                let role = &formatted_msg[..colon_pos];
437                let content = &formatted_msg[colon_pos + 2..];
438                let message_role = if role == "user" { MessageRole::User } else { MessageRole::Assistant };
439                messages.push(ChatMessage {
440                    role: message_role,
441                    content: self.sanitize_content(content),
442                    name: None,
443                    tool_calls: None,
444                    tool_call_id: None,
445                });
446            }
447        }
448        
449        // Add current user message
450        messages.push(ChatMessage {
451            role: MessageRole::User,
452            content: self.sanitize_content(input),
453            name: None,
454            tool_calls: None,
455            tool_call_id: None,
456        });
457        
458        let request = LLMRequest {
459            messages,
460            model: None, // Use default model
461            temperature: Some(0.7),
462            max_tokens: Some(2000),
463            stream: false,
464            tools: None,
465            metadata: HashMap::new(),
466        };
467        
468        // Send to LLM
469        let response = manager.complete(request, None).await.map_err(|e| {
470            crate::core::error::RustChainError::Execution(
471                crate::core::error::ExecutionError::InvalidState {
472                    state: "llm_completion".to_string(),
473                    operation: format!("LLM request failed: {}", e)
474                }
475            )
476        })?;
477        
478        Ok(response.content)
479    }
480    
481    /// Build system prompt with RustChain capabilities
482    #[cfg(feature = "llm")]
483    async fn build_system_prompt(&self) -> Result<String> {
484        let features = self.context.get_available_features().await;
485        let enterprise_features = self.context.get_enterprise_features().await;
486        
487        Ok(format!(
488            "You are the RustChain AI Assistant, an intelligent agent framework helper.
489            
490Your capabilities:
491            🚀 Mission Execution: You can run, validate, and manage YAML-based mission files
492            🛡️ Safety & Security: You enforce safety policies and run security validation
493            🔧 Tool Management: You have access to 20+ built-in tools for file ops, HTTP, etc.
494            🤖 LLM Integration: You support multiple LLM providers (OpenAI, Anthropic, Ollama)
495            📊 Audit & Monitoring: You maintain comprehensive audit trails
496            ⚙️ Configuration: You can show and validate system configuration
497            
498            Available Features: {:?}
499            Enterprise Features: {:?}
500            
501Instructions:
502            - Be helpful, concise, and action-oriented
503            - When users ask to run missions, explain what's needed (YAML file path)
504            - For system info, provide current status from your knowledge
505            - Always offer specific CLI commands when relevant
506            - If asked about capabilities you don't have, be honest
507            - Keep responses under 200 words unless detailed explanation needed
508            - Use emojis sparingly for clarity
509            
510Current Session: {} (Interactive Mode)",
511            features,
512            enterprise_features,
513            self.session_id
514        ))
515    }
516    
517    /// Sanitize content for LLM input (basic filtering)
518    #[cfg(feature = "llm")]
519    fn sanitize_content(&self, content: &str) -> String {
520        // Basic content sanitization for LLM input
521        content
522            .trim()
523            .chars()
524            .filter(|&c| c.is_ascii() || c.is_whitespace())
525            .collect::<String>()
526            .lines()
527            .take(100) // Limit to 100 lines
528            .collect::<Vec<_>>()
529            .join("\n")
530    }
531    
532}
533
534#[derive(Debug)]
535enum Intent {
536    RunMission { description: String },
537    CheckConfig,
538    ShowSafety,
539    ShowAudit { timeframe: String },
540    ShowFeatures,
541    Help,
542    General { response: String },
543}