Skip to main content

mermaid_cli/runtime/
orchestrator.rs

1use anyhow::Result;
2use std::path::PathBuf;
3
4use crate::{
5    app::{Config, load_config, persist_last_model},
6    cli::{Cli, handle_command},
7    models::{ModelFactory, OllamaOptions},
8    ollama::ensure_model as ensure_ollama_model,
9    session::{ConversationManager, select_conversation},
10    tui::{App, run_ui},
11    utils::{check_ollama_available, log_error, log_info, log_progress, log_warn},
12};
13
14/// Main runtime orchestrator
15pub struct Orchestrator {
16    cli: Cli,
17    config: Config,
18}
19
20impl Orchestrator {
21    /// Create a new orchestrator from CLI args
22    pub fn new(cli: Cli) -> Result<Self> {
23        // Load configuration (single file + defaults)
24        let config = match load_config() {
25            Ok(cfg) => cfg,
26            Err(e) => {
27                log_warn(
28                    "CONFIG",
29                    format!("Config load failed: {:#}. Using defaults.", e),
30                );
31                Config::default()
32            },
33        };
34
35        Ok(Self { cli, config })
36    }
37
38    /// Run the orchestrator
39    pub async fn run(self) -> Result<()> {
40        // Progress tracking for startup
41        let total_steps = 6; // Total startup steps
42        let mut current_step = 0;
43
44        // Handle subcommands
45        current_step += 1;
46        log_progress(current_step, total_steps, "Processing commands");
47        if let Some(command) = &self.cli.command
48            && handle_command(command).await?
49        {
50            return Ok(()); // Command handled, exit
51        }
52        // Continue to chat for Commands::Chat
53
54        // Determine model to use (CLI arg > last_used > default_model)
55        current_step += 1;
56        log_progress(current_step, total_steps, "Configuring model");
57
58        let cli_model_provided = self.cli.model.is_some();
59        let model_id =
60            crate::app::resolve_model_id(self.cli.model.as_deref(), &self.config).await?;
61
62        log_info(
63            "MERMAID",
64            format!("Starting Mermaid with model: {}", model_id),
65        );
66
67        // Check Ollama availability (all models route through Ollama)
68        current_step += 1;
69        log_progress(current_step, total_steps, "Checking Ollama availability");
70        let ollama_check =
71            check_ollama_available(&self.config.ollama.host, self.config.ollama.port).await;
72
73        if !ollama_check.available {
74            log_error("OLLAMA", &ollama_check.message);
75            anyhow::bail!("{}", ollama_check.message);
76        }
77
78        // Validate model exists
79        current_step += 1;
80        log_progress(current_step, total_steps, "Checking model availability");
81        ensure_ollama_model(&model_id).await?;
82
83        // Persist model if CLI flag was used
84        if cli_model_provided && let Err(e) = persist_last_model(&model_id) {
85            log_warn("CONFIG", format!("Failed to persist model choice: {}", e));
86        }
87
88        // Create model instance with config for authentication
89        current_step += 1;
90        log_progress(current_step, total_steps, "Initializing model");
91        let model = ModelFactory::create(
92            &model_id,
93            Some(&self.config),
94        )
95        .await
96        .map_err(|e| {
97            log_error("ERROR", format!("Failed to initialize model: {}", e));
98            anyhow::anyhow!(
99                "Failed to initialize model: {}. Make sure the model is available and properly configured.",
100                e
101            )
102        })?;
103
104        // Set up project path for session management
105        let project_path = self.cli.path.clone().unwrap_or_else(|| PathBuf::from("."));
106
107        // Start UI - LLM explores codebase via tools, no context injection
108        current_step += 1;
109        log_progress(current_step, total_steps, "Starting UI");
110        let mut app = App::new(model, model_id.clone());
111
112        // Wire app config temperature/max_tokens into model state
113        app.model_state.temperature = self.config.default_model.temperature;
114        app.model_state.max_tokens = self.config.default_model.max_tokens;
115
116        // Wire Ollama hardware options from config
117        app.model_state.ollama_options = OllamaOptions {
118            num_gpu: self.config.ollama.num_gpu,
119            num_thread: self.config.ollama.num_thread,
120            num_ctx: self.config.ollama.num_ctx,
121            numa: self.config.ollama.numa,
122        };
123
124        // Handle session loading
125        // Default: start fresh (no history)
126        // --continue: resume last conversation
127        // --sessions: show picker to choose a previous conversation
128        if self.cli.continue_session || self.cli.sessions {
129            let conversation_manager = ConversationManager::new(&project_path)?;
130
131            if self.cli.sessions {
132                // Show selection UI for choosing a conversation
133                let conversations = conversation_manager.list_conversations()?;
134                if !conversations.is_empty() {
135                    if let Some(selected) = select_conversation(conversations)? {
136                        log_info(
137                            "RESUME",
138                            format!("Resuming conversation: {}", selected.title),
139                        );
140                        app.load_conversation(selected);
141                    }
142                } else {
143                    log_info("INFO", "No previous conversations found in this directory");
144                }
145            } else {
146                // --continue: resume last conversation
147                if let Some(last_conv) = conversation_manager.load_last_conversation()? {
148                    log_info("RESUME", format!("Resuming: {}", last_conv.title));
149                    app.load_conversation(last_conv);
150                } else {
151                    log_info("INFO", "No previous conversation to continue");
152                }
153            }
154        }
155
156        // Run the TUI
157        run_ui(app).await
158    }
159}