Skip to main content

mermaid_cli/runtime/
orchestrator.rs

1use anyhow::Result;
2use std::path::PathBuf;
3
4use crate::{
5    app::{load_config, persist_last_model, Config},
6    cli::{handle_command, Cli},
7    models::ModelFactory,
8    ollama::{ensure_model as ensure_ollama_model, require_any_model},
9    session::{select_conversation, ConversationManager},
10    tui::{run_ui, App},
11    utils::{check_ollama_available, log_error, log_info, log_progress, log_warn},
12};
13
14/// Main runtime orchestrator
15pub struct Orchestrator {
16    cli: Cli,
17    config: Config,
18}
19
20impl Orchestrator {
21    /// Create a new orchestrator from CLI args
22    pub fn new(cli: Cli) -> Result<Self> {
23        // Load configuration (single file + defaults)
24        let config = match load_config() {
25            Ok(cfg) => cfg,
26            Err(e) => {
27                log_warn("CONFIG", format!("Config load failed: {:#}. Using defaults.", e));
28                Config::default()
29            },
30        };
31
32        Ok(Self {
33            cli,
34            config,
35        })
36    }
37
38    /// Run the orchestrator
39    pub async fn run(self) -> Result<()> {
40        // Progress tracking for startup
41        let total_steps = 6; // Total startup steps
42        let mut current_step = 0;
43
44        // Handle subcommands
45        current_step += 1;
46        log_progress(current_step, total_steps, "Processing commands");
47        if let Some(command) = &self.cli.command {
48            if handle_command(command).await? {
49                return Ok(()); // Command handled, exit
50            }
51            // Continue to chat for Commands::Chat
52        }
53
54        // Determine model to use (CLI arg > last_used > default_model)
55        current_step += 1;
56        log_progress(current_step, total_steps, "Configuring model");
57
58        let cli_model_provided = self.cli.model.is_some();
59        let model_id = if let Some(model) = &self.cli.model {
60            // CLI flag takes precedence
61            model.clone()
62        } else if let Some(last_model) = &self.config.last_used_model {
63            // Use last used model from config
64            last_model.clone()
65        } else if !self.config.default_model.provider.is_empty()
66            && !self.config.default_model.name.is_empty()
67        {
68            // Fall back to default_model if set
69            format!(
70                "{}/{}",
71                self.config.default_model.provider, self.config.default_model.name
72            )
73        } else {
74            // No model configured - check if any models are available
75            let available = require_any_model().await?;
76            // Use first available model
77            format!("ollama/{}", available[0])
78        };
79
80        log_info(
81            "MERMAID",
82            format!("Starting Mermaid with model: {}", model_id),
83        );
84
85        // Check Ollama availability for local models
86        current_step += 1;
87        if is_local_model(&model_id) {
88            log_progress(current_step, total_steps, "Checking Ollama availability");
89            let ollama_check = check_ollama_available(
90                &self.config.ollama.host,
91                self.config.ollama.port,
92            ).await;
93
94            if !ollama_check.available {
95                log_error("OLLAMA", &ollama_check.message);
96                std::process::exit(1);
97            }
98        } else {
99            log_progress(current_step, total_steps, "Using API provider");
100        }
101
102        // Validate model exists
103        current_step += 1;
104        log_progress(current_step, total_steps, "Checking model availability");
105        ensure_ollama_model(&model_id, true).await?;
106
107        // Persist model if CLI flag was used
108        if cli_model_provided {
109            if let Err(e) = persist_last_model(&model_id) {
110                log_warn("CONFIG", format!("Failed to persist model choice: {}", e));
111            }
112        }
113
114        // Create model instance with config for authentication and optional backend override
115        current_step += 1;
116        log_progress(current_step, total_steps, "Initializing model");
117        // Use config.behavior.backend ("auto" means None)
118        let backend = if self.config.behavior.backend == "auto" {
119            None
120        } else {
121            Some(self.config.behavior.backend.as_str())
122        };
123        let model = match ModelFactory::create_with_backend(
124            &model_id,
125            Some(&self.config),
126            backend,
127        )
128        .await
129        {
130            Ok(m) => m,
131            Err(e) => {
132                log_error("ERROR", format!("Failed to initialize model: {}", e));
133                log_error(
134                    "",
135                    "Make sure the model is available and properly configured.",
136                );
137                std::process::exit(1);
138            },
139        };
140
141        // Set up project path for session management
142        let project_path = self.cli.path.clone().unwrap_or_else(|| PathBuf::from("."));
143
144        // Start UI - LLM explores codebase via tools, no context injection
145        current_step += 1;
146        log_progress(current_step, total_steps, "Starting UI");
147        let mut app = App::new(model, model_id.clone());
148
149        // Handle session loading
150        // Default: start fresh (no history)
151        // --continue: resume last conversation
152        // --sessions: show picker to choose a previous conversation
153        if self.cli.continue_session || self.cli.sessions {
154            let conversation_manager = ConversationManager::new(&project_path)?;
155
156            if self.cli.sessions {
157                // Show selection UI for choosing a conversation
158                let conversations = conversation_manager.list_conversations()?;
159                if !conversations.is_empty() {
160                    if let Some(selected) = select_conversation(conversations)? {
161                        log_info(
162                            "RESUME",
163                            format!("Resuming conversation: {}", selected.title),
164                        );
165                        app.load_conversation(selected);
166                    }
167                } else {
168                    log_info("INFO", "No previous conversations found in this directory");
169                }
170            } else {
171                // --continue: resume last conversation
172                if let Some(last_conv) = conversation_manager.load_last_conversation()? {
173                    log_info(
174                        "RESUME",
175                        format!("Resuming: {}", last_conv.title),
176                    );
177                    app.load_conversation(last_conv);
178                } else {
179                    log_info("INFO", "No previous conversation to continue");
180                }
181            }
182        }
183
184        // Run the TUI
185        run_ui(app).await
186    }
187}
188
189/// Check if a model uses local Ollama inference
190///
191/// All models go through Ollama:
192/// - `ollama/` prefix - explicit Ollama models
193/// - `:cloud` suffix - Ollama cloud routing (e.g., kimi-k2:cloud)
194/// - Models without prefix - auto-discovered on Ollama
195fn is_local_model(_model_id: &str) -> bool {
196    // All models use Ollama (local or cloud routing)
197    true
198}