mermaid_cli/proxy/
manager.rs1use anyhow::Result;
2use tokio::process::Command;
3
4use super::{get_compose_dir, is_container_runtime_available, is_proxy_running};
5use crate::constants::{
6 PROXY_MAX_STARTUP_ATTEMPTS, PROXY_POLL_INTERVAL_MS, PROXY_STARTUP_WAIT_SECS,
7};
8use crate::utils::{log_error, log_info, log_warn};
9
10pub async fn start_proxy() -> Result<()> {
12 let runtime = is_container_runtime_available().ok_or_else(|| {
14 anyhow::anyhow!(
15 "ERROR: Neither Podman nor Docker found\n \
16 Install Podman: sudo apt-get install podman podman-compose\n \
17 Or install Docker: https://docs.docker.com/engine/install/"
18 )
19 })?;
20
21 let compose_dir = get_compose_dir()?;
23
24 let env_path = compose_dir.join(".env");
26 if !env_path.exists() {
27 let env_example = compose_dir.join(".env.example");
28 if env_example.exists() {
29 log_warn("WARNING", "No .env file found, creating from template...");
30 std::fs::copy(&env_example, &env_path)?;
31 log_info("SETUP", format!("Created .env at: {}", env_path.display()));
32 log_info(
33 "",
34 "Edit this file to add your API keys for non-Ollama models",
35 );
36 } else {
37 log_warn(
38 "WARNING",
39 format!("No .env file found at: {}", env_path.display()),
40 );
41 log_warn(
42 "",
43 format!(
44 "Copy .env.example and add your API keys:\n \
45 cp {} {}",
46 env_example.display(),
47 env_path.display()
48 ),
49 );
50 }
51 }
52
53 log_info(
54 "START",
55 format!("Starting LiteLLM proxy with {}...", runtime),
56 );
57
58 let output = if runtime == "podman" || runtime == "docker" {
60 Command::new(runtime)
62 .args(&["compose", "up", "-d", "litellm"])
63 .current_dir(&compose_dir)
64 .output()
65 .await?
66 } else {
67 Command::new(runtime)
69 .args(&["up", "-d", "litellm"])
70 .current_dir(&compose_dir)
71 .output()
72 .await?
73 };
74
75 if !output.status.success() {
76 let stderr = String::from_utf8_lossy(&output.stderr);
77 anyhow::bail!("Failed to start LiteLLM proxy: {}", stderr);
78 }
79
80 log_info("WAIT", "Waiting for LiteLLM proxy to be ready...");
82
83 let max_wait_time = std::time::Duration::from_secs(
84 PROXY_STARTUP_WAIT_SECS + (PROXY_MAX_STARTUP_ATTEMPTS as u64),
85 );
86 let poll_interval = std::time::Duration::from_millis(PROXY_POLL_INTERVAL_MS);
87 let start_time = std::time::Instant::now();
88
89 while start_time.elapsed() < max_wait_time {
90 if is_proxy_running().await {
91 let elapsed = start_time.elapsed();
92 log_info(
93 "SUCCESS",
94 format!(
95 "LiteLLM proxy started successfully in {:.1}s",
96 elapsed.as_secs_f64()
97 ),
98 );
99 return Ok(());
100 }
101 tokio::time::sleep(poll_interval).await;
102 }
103
104 let compose_path = compose_dir.join("docker-compose.yml");
105 let env_path = compose_dir.join(".env");
106
107 anyhow::bail!(
108 "LiteLLM proxy failed to start properly.\n\
109 \n\
110 Troubleshooting:\n\
111 1. Check logs: {} -f {} logs litellm\n\
112 2. Verify .env file exists: {}\n\
113 3. For Ollama-only usage: Use Ollama models (e.g., ollama/qwen3-coder:30b)\n\
114 4. Manual start: ./scripts/start_litellm.sh",
115 runtime,
116 compose_path.display(),
117 env_path.display()
118 )
119}
120
121pub async fn stop_proxy() -> Result<()> {
123 let runtime = is_container_runtime_available()
124 .ok_or_else(|| anyhow::anyhow!("No container runtime found (Podman or Docker)"))?;
125
126 let compose_dir = get_compose_dir()?;
127
128 log_info("STOP", "Stopping LiteLLM proxy...");
129
130 let output = if runtime == "podman" || runtime == "docker" {
132 Command::new(runtime)
133 .args(&["compose", "stop", "litellm"])
134 .current_dir(&compose_dir)
135 .output()
136 .await?
137 } else {
138 Command::new(runtime)
139 .args(&["stop", "litellm"])
140 .current_dir(&compose_dir)
141 .output()
142 .await?
143 };
144
145 if !output.status.success() {
146 let stderr = String::from_utf8_lossy(&output.stderr);
147 log_warn(
148 "WARNING",
149 format!("Failed to stop LiteLLM proxy gracefully: {}", stderr),
150 );
151 } else {
152 log_info("SUCCESS", "LiteLLM proxy stopped");
153 }
154
155 Ok(())
156}
157
158pub async fn ensure_proxy(no_auto_proxy: bool) -> Result<()> {
160 if is_proxy_running().await {
162 return Ok(());
163 }
164
165 if no_auto_proxy {
167 log_error("ERROR", "LiteLLM proxy is not running");
168 log_error("", "Start it manually with: ./start_litellm.sh");
169 log_error("", "Or remove the --no-auto-proxy flag");
170 std::process::exit(1);
171 }
172
173 start_proxy().await
175}