1use anyhow::Result;
2use colored::Colorize;
3use std::path::PathBuf;
4
5use crate::{
6 app::{load_config, Config},
7 cli::{handle_command, Cli},
8 context::ContextManager,
9 models::ModelFactory,
10 ollama::ensure_model as ensure_ollama_model,
11 proxy::{count_mermaid_processes, ensure_proxy, is_proxy_running, stop_proxy},
12 session::{select_conversation, ConversationManager, SessionState},
13 tui::{run_ui, App},
14 utils::{log_error, log_info, log_progress, log_warn},
15};
16
17pub struct Orchestrator {
19 cli: Cli,
20 config: Config,
21 session: SessionState,
22 proxy_started_by_us: bool,
23}
24
25impl Orchestrator {
26 pub fn new(cli: Cli) -> Result<Self> {
28 let mut config = if let Some(config_path) = &cli.config {
30 let toml_str = std::fs::read_to_string(config_path)?;
31 toml::from_str::<Config>(&toml_str)?
32 } else {
33 match load_config() {
34 Ok(cfg) => cfg,
35 Err(e) => {
36 let err_msg = format!("{:?}", e);
38 if err_msg.contains("MERMAID_") && err_msg.contains("environment variable") {
39 log_warn(
40 "CONFIG",
41 "Ignoring invalid MERMAID_* environment variables. Using config file and defaults.".to_string(),
42 );
43 } else {
44 log_warn(
45 "CONFIG",
46 format!("Config load failed: {}. Using defaults.", e),
47 );
48 }
49 Config::default()
50 },
51 }
52 };
53
54 if cli.num_gpu.is_some() {
56 config.ollama.num_gpu = cli.num_gpu;
57 }
58 if cli.num_thread.is_some() {
59 config.ollama.num_thread = cli.num_thread;
60 }
61 if cli.num_ctx.is_some() {
62 config.ollama.num_ctx = cli.num_ctx;
63 }
64 if cli.numa.is_some() {
65 config.ollama.numa = cli.numa;
66 }
67
68 let session = SessionState::load().unwrap_or_default();
70
71 Ok(Self {
72 cli,
73 config,
74 session,
75 proxy_started_by_us: false,
76 })
77 }
78
79 pub async fn run(mut self) -> Result<()> {
81 let total_steps = 7; let mut current_step = 0;
84
85 current_step += 1;
87 log_progress(current_step, total_steps, "Processing commands");
88 if let Some(command) = &self.cli.command {
89 if handle_command(command).await? {
90 return Ok(()); }
92 }
94
95 current_step += 1;
97 log_progress(current_step, total_steps, "Configuring model");
98 let (model_id, should_save_session) = if let Some(model) = &self.cli.model {
99 (model.clone(), true)
101 } else if let Some(last_model) = self.session.get_model() {
102 (last_model.to_string(), false)
104 } else {
105 (
107 format!(
108 "{}/{}",
109 self.config.default_model.provider, self.config.default_model.name
110 ),
111 true,
112 )
113 };
114
115 if should_save_session {
117 self.session.set_model(model_id.clone());
118 if let Err(e) = self.session.save() {
119 log_warn("WARNING", format!("Failed to save initial session: {}", e));
120 }
121 }
122
123 log_info(
124 "MERMAID",
125 format!("Starting Mermaid with model: {}", model_id.green()),
126 );
127
128 current_step += 1;
130 if requires_proxy(&model_id) {
131 log_progress(current_step, total_steps, "Checking LiteLLM proxy");
132 if !is_proxy_running().await {
133 ensure_proxy(self.cli.no_auto_proxy).await?;
134 self.proxy_started_by_us = !self.cli.no_auto_proxy;
135 }
136 } else {
137 log_progress(current_step, total_steps, "Using direct Ollama connection");
138 }
139
140 current_step += 1;
142 log_progress(current_step, total_steps, "Checking model availability");
143 ensure_ollama_model(&model_id, self.cli.no_auto_install).await?;
144
145 current_step += 1;
147 log_progress(current_step, total_steps, "Initializing model");
148 let model = match ModelFactory::create_with_backend(
149 &model_id,
150 Some(&self.config),
151 self.cli.backend.as_deref(),
152 )
153 .await
154 {
155 Ok(m) => m,
156 Err(e) => {
157 log_error("ERROR", format!("Failed to initialize model: {}", e));
158 log_error(
159 "",
160 "Make sure the model is available and properly configured.",
161 );
162 std::process::exit(1);
163 },
164 };
165
166 let project_path = self.cli.path.clone().unwrap_or_else(|| PathBuf::from("."));
168
169 current_step += 1;
171 log_progress(current_step, total_steps, "Loading project structure");
172 let context_manager = self.load_project_structure(&project_path).await?;
173
174 current_step += 1;
176 log_progress(current_step, total_steps, "Starting UI");
177 let context = context_manager.build_context();
178 let mut app = App::new(model, context, model_id.clone());
179 app.set_context_manager(context_manager);
180
181 if self.cli.resume || self.cli.continue_conversation {
183 let conversation_manager = ConversationManager::new(&project_path)?;
184 let conversations = conversation_manager.list_conversations()?;
185
186 if self.cli.continue_conversation {
187 if let Some(last_conv) = conversation_manager.load_last_conversation()? {
189 log_info(
190 "CONTINUE",
191 format!("Continuing last conversation: {}", last_conv.title.green()),
192 );
193 app.load_conversation(last_conv);
194 } else {
195 log_info("INFO", "No previous conversations found in this directory");
196 }
197 } else if self.cli.resume {
198 if !conversations.is_empty() {
200 if let Some(selected) = select_conversation(conversations)? {
201 log_info(
202 "RESUME",
203 format!("Resuming conversation: {}", selected.title.green()),
204 );
205 app.load_conversation(selected);
206 }
207 } else {
208 log_info("INFO", "No previous conversations found in this directory");
209 }
210 }
211 }
212
213 let result = run_ui(app).await;
215
216 self.cleanup().await?;
221
222 result
223 }
224
225 async fn load_project_structure(
227 &self,
228 project_path: &PathBuf,
229 ) -> Result<ContextManager> {
230 log_info(
231 "FILES",
232 format!("Loading project structure from: {}", project_path.display()),
233 );
234
235 let mut manager = ContextManager::new(project_path);
236 manager.reload().await?;
237
238 log_info(
239 "STATS",
240 format!(
241 "Found {} files in project",
242 manager.total_files()
243 ),
244 );
245
246 Ok(manager)
247 }
248
249 async fn cleanup(&self) -> Result<()> {
251 if self.proxy_started_by_us {
255 let should_stop = if self.cli.stop_proxy_on_exit {
256 true
257 } else {
258 let mermaid_count = count_mermaid_processes();
260 mermaid_count <= 1 };
262
263 if should_stop {
264 log_info(
265 "STOP",
266 "Stopping LiteLLM proxy (no other Mermaid instances running)...",
267 );
268 stop_proxy().await?;
269 }
270 }
271
272 Ok(())
273 }
274}
275
276fn requires_proxy(model_id: &str) -> bool {
281 !model_id.starts_with("ollama/")
282}