Skip to main content

mermaid_cli/runtime/
orchestrator.rs

1use anyhow::Result;
2use std::path::PathBuf;
3
4use crate::{
5    app::{load_config, persist_last_model, Config},
6    cli::{handle_command, Cli},
7    models::ModelFactory,
8    ollama::{ensure_model as ensure_ollama_model, require_any_model},
9    session::{select_conversation, ConversationManager},
10    tui::{run_ui, App},
11    utils::{check_ollama_available, log_error, log_info, log_progress, log_warn},
12};
13
14/// Main runtime orchestrator
15pub struct Orchestrator {
16    cli: Cli,
17    config: Config,
18}
19
20impl Orchestrator {
21    /// Create a new orchestrator from CLI args
22    pub fn new(cli: Cli) -> Result<Self> {
23        // Load configuration (single file + defaults)
24        let config = match load_config() {
25            Ok(cfg) => cfg,
26            Err(e) => {
27                log_warn("CONFIG", format!("Config load failed: {:#}. Using defaults.", e));
28                Config::default()
29            },
30        };
31
32        Ok(Self {
33            cli,
34            config,
35        })
36    }
37
38    /// Run the orchestrator
39    pub async fn run(self) -> Result<()> {
40        // Progress tracking for startup
41        let total_steps = 6; // Total startup steps
42        let mut current_step = 0;
43
44        // Handle subcommands
45        current_step += 1;
46        log_progress(current_step, total_steps, "Processing commands");
47        if let Some(command) = &self.cli.command
48            && handle_command(command).await? {
49                return Ok(()); // Command handled, exit
50            }
51            // Continue to chat for Commands::Chat
52
53        // Determine model to use (CLI arg > last_used > default_model)
54        current_step += 1;
55        log_progress(current_step, total_steps, "Configuring model");
56
57        let cli_model_provided = self.cli.model.is_some();
58        let model_id = if let Some(model) = &self.cli.model {
59            // CLI flag takes precedence
60            model.clone()
61        } else if let Some(last_model) = &self.config.last_used_model {
62            // Use last used model from config
63            last_model.clone()
64        } else if !self.config.default_model.provider.is_empty()
65            && !self.config.default_model.name.is_empty()
66        {
67            // Fall back to default_model if set
68            format!(
69                "{}/{}",
70                self.config.default_model.provider, self.config.default_model.name
71            )
72        } else {
73            // No model configured - check if any models are available
74            let available = require_any_model().await?;
75            // Use first available model
76            format!("ollama/{}", available[0])
77        };
78
79        log_info(
80            "MERMAID",
81            format!("Starting Mermaid with model: {}", model_id),
82        );
83
84        // Check Ollama availability (all models route through Ollama)
85        current_step += 1;
86        log_progress(current_step, total_steps, "Checking Ollama availability");
87        let ollama_check = check_ollama_available(
88            &self.config.ollama.host,
89            self.config.ollama.port,
90        ).await;
91
92        if !ollama_check.available {
93            log_error("OLLAMA", &ollama_check.message);
94            std::process::exit(1);
95        }
96
97        // Validate model exists
98        current_step += 1;
99        log_progress(current_step, total_steps, "Checking model availability");
100        ensure_ollama_model(&model_id).await?;
101
102        // Persist model if CLI flag was used
103        if cli_model_provided
104            && let Err(e) = persist_last_model(&model_id) {
105                log_warn("CONFIG", format!("Failed to persist model choice: {}", e));
106            }
107
108        // Create model instance with config for authentication and optional backend override
109        current_step += 1;
110        log_progress(current_step, total_steps, "Initializing model");
111        // Use config.behavior.backend ("auto" means None)
112        let backend = if self.config.behavior.backend == "auto" {
113            None
114        } else {
115            Some(self.config.behavior.backend.as_str())
116        };
117        let model = match ModelFactory::create_with_backend(
118            &model_id,
119            Some(&self.config),
120            backend,
121        )
122        .await
123        {
124            Ok(m) => m,
125            Err(e) => {
126                log_error("ERROR", format!("Failed to initialize model: {}", e));
127                log_error(
128                    "",
129                    "Make sure the model is available and properly configured.",
130                );
131                std::process::exit(1);
132            },
133        };
134
135        // Set up project path for session management
136        let project_path = self.cli.path.clone().unwrap_or_else(|| PathBuf::from("."));
137
138        // Start UI - LLM explores codebase via tools, no context injection
139        current_step += 1;
140        log_progress(current_step, total_steps, "Starting UI");
141        let mut app = App::new(model, model_id.clone());
142
143        // Wire app config temperature/max_tokens into model state
144        app.model_state.temperature = self.config.default_model.temperature;
145        app.model_state.max_tokens = self.config.default_model.max_tokens;
146
147        // Handle session loading
148        // Default: start fresh (no history)
149        // --continue: resume last conversation
150        // --sessions: show picker to choose a previous conversation
151        if self.cli.continue_session || self.cli.sessions {
152            let conversation_manager = ConversationManager::new(&project_path)?;
153
154            if self.cli.sessions {
155                // Show selection UI for choosing a conversation
156                let conversations = conversation_manager.list_conversations()?;
157                if !conversations.is_empty() {
158                    if let Some(selected) = select_conversation(conversations)? {
159                        log_info(
160                            "RESUME",
161                            format!("Resuming conversation: {}", selected.title),
162                        );
163                        app.load_conversation(selected);
164                    }
165                } else {
166                    log_info("INFO", "No previous conversations found in this directory");
167                }
168            } else {
169                // --continue: resume last conversation
170                if let Some(last_conv) = conversation_manager.load_last_conversation()? {
171                    log_info(
172                        "RESUME",
173                        format!("Resuming: {}", last_conv.title),
174                    );
175                    app.load_conversation(last_conv);
176                } else {
177                    log_info("INFO", "No previous conversation to continue");
178                }
179            }
180        }
181
182        // Run the TUI
183        run_ui(app).await
184    }
185}
186