Skip to main content

battlecommand_forge/
hardware.rs

1//! Full hardware monitoring — CPU, RAM, thermal, Ollama VRAM.
2//! Ported from battleclaw-v2. Polls every 2 seconds.
3
4use anyhow::Result;
5
6#[derive(Debug, Default, Clone)]
7pub struct HardwareMetrics {
8    pub cpu_usage_total: f32,
9    pub cpu_name: String,
10    pub core_count: usize,
11    pub mem_total_gb: f64,
12    pub mem_used_gb: f64,
13    pub mem_available_gb: f64,
14    pub temperatures: Vec<TempReading>,
15    pub ollama_models: Vec<OllamaModel>,
16    pub ollama_vram_total_gb: f64,
17    pub ollama_cpu_pct: f32,
18    pub ollama_mem_gb: f64,
19}
20
21#[derive(Debug, Clone)]
22pub struct TempReading {
23    pub label: String,
24    pub celsius: f64,
25    pub critical: f64,
26}
27
28#[derive(Debug, Clone)]
29pub struct OllamaModel {
30    pub name: String,
31    pub size_gb: f64,
32    pub vram_gb: f64,
33    pub context_length: u64,
34}
35
36/// Collect all hardware metrics.
37pub async fn collect_metrics() -> HardwareMetrics {
38    let mut m = HardwareMetrics::default();
39
40    // CPU info (cross-platform)
41    m.core_count = std::thread::available_parallelism()
42        .map(|n| n.get())
43        .unwrap_or(4);
44    m.cpu_name = get_cpu_name();
45
46    // CPU usage + memory — platform-specific
47    collect_cpu_memory(&mut m);
48
49    // Ollama running models (cross-platform via HTTP API)
50    if let Ok(models) = get_ollama_running_models().await {
51        m.ollama_vram_total_gb = models.iter().map(|m| m.vram_gb).sum();
52        m.ollama_models = models;
53    }
54
55    // Ollama process stats (Unix only — gracefully skipped on Windows)
56    collect_ollama_process_stats(&mut m);
57
58    m
59}
60
61fn collect_cpu_memory(m: &mut HardwareMetrics) {
62    if cfg!(target_os = "macos") {
63        // macOS: sysctl + vm_stat
64        if let Ok(output) = std::process::Command::new("sysctl")
65            .args(["-n", "vm.loadavg"])
66            .output()
67        {
68            if let Ok(s) = String::from_utf8(output.stdout) {
69                let nums: Vec<f64> = s
70                    .split_whitespace()
71                    .filter_map(|w| {
72                        w.trim_matches(|c: char| !c.is_numeric() && c != '.')
73                            .parse()
74                            .ok()
75                    })
76                    .collect();
77                if !nums.is_empty() {
78                    m.cpu_usage_total = ((nums[0] / m.core_count as f64) * 100.0).min(100.0) as f32;
79                }
80            }
81        }
82        if let Ok(output) = std::process::Command::new("sysctl")
83            .args(["-n", "hw.memsize"])
84            .output()
85        {
86            if let Ok(s) = String::from_utf8(output.stdout) {
87                if let Ok(bytes) = s.trim().parse::<u64>() {
88                    m.mem_total_gb = bytes as f64 / 1_073_741_824.0;
89                }
90            }
91        }
92        if let Ok(output) = std::process::Command::new("vm_stat").output() {
93            if let Ok(s) = String::from_utf8(output.stdout) {
94                let page_size = 16384u64; // Apple Silicon default
95                let mut pages_active = 0u64;
96                let mut pages_wired = 0u64;
97                let mut pages_compressed = 0u64;
98                for line in s.lines() {
99                    if line.contains("Pages active") {
100                        pages_active = extract_vm_stat_value(line);
101                    } else if line.contains("Pages wired") {
102                        pages_wired = extract_vm_stat_value(line);
103                    } else if line.contains("Pages occupied by compressor") {
104                        pages_compressed = extract_vm_stat_value(line);
105                    }
106                }
107                m.mem_used_gb = ((pages_active + pages_wired + pages_compressed) * page_size)
108                    as f64
109                    / 1_073_741_824.0;
110                m.mem_available_gb = m.mem_total_gb - m.mem_used_gb;
111            }
112        }
113    } else if cfg!(target_os = "linux") {
114        // Linux: /proc/loadavg + /proc/meminfo
115        if let Ok(s) = std::fs::read_to_string("/proc/loadavg") {
116            if let Some(load1) = s
117                .split_whitespace()
118                .next()
119                .and_then(|w| w.parse::<f64>().ok())
120            {
121                m.cpu_usage_total = ((load1 / m.core_count as f64) * 100.0).min(100.0) as f32;
122            }
123        }
124        if let Ok(s) = std::fs::read_to_string("/proc/meminfo") {
125            for line in s.lines() {
126                if line.starts_with("MemTotal:") {
127                    if let Some(kb) = extract_meminfo_kb(line) {
128                        m.mem_total_gb = kb as f64 / 1_048_576.0;
129                    }
130                } else if line.starts_with("MemAvailable:") {
131                    if let Some(kb) = extract_meminfo_kb(line) {
132                        m.mem_available_gb = kb as f64 / 1_048_576.0;
133                    }
134                }
135            }
136            m.mem_used_gb = m.mem_total_gb - m.mem_available_gb;
137        }
138    }
139    // Windows: falls through with defaults (0.0) — acceptable for v1
140}
141
142fn collect_ollama_process_stats(m: &mut HardwareMetrics) {
143    if cfg!(windows) {
144        return;
145    }
146    if let Ok(output) = std::process::Command::new("ps")
147        .args(["-eo", "pid,%cpu,rss,comm"])
148        .output()
149    {
150        if let Ok(s) = String::from_utf8(output.stdout) {
151            for line in s.lines() {
152                if line.contains("ollama") && !line.contains("grep") {
153                    let parts: Vec<&str> = line.split_whitespace().collect();
154                    if parts.len() >= 3 {
155                        m.ollama_cpu_pct = parts[1].parse().unwrap_or(0.0);
156                        let rss_kb: f64 = parts[2].parse().unwrap_or(0.0);
157                        m.ollama_mem_gb = rss_kb / 1_048_576.0;
158                    }
159                }
160            }
161        }
162    }
163}
164
165fn get_cpu_name() -> String {
166    if cfg!(target_os = "macos") {
167        if let Ok(output) = std::process::Command::new("sysctl")
168            .args(["-n", "machdep.cpu.brand_string"])
169            .output()
170        {
171            if let Ok(s) = String::from_utf8(output.stdout) {
172                let name = s.trim().to_string();
173                if !name.is_empty() {
174                    return name;
175                }
176            }
177        }
178    } else if cfg!(target_os = "linux") {
179        if let Ok(s) = std::fs::read_to_string("/proc/cpuinfo") {
180            for line in s.lines() {
181                if line.starts_with("model name") {
182                    if let Some(name) = line.split(':').nth(1) {
183                        return name.trim().to_string();
184                    }
185                }
186            }
187        }
188    }
189    format!(
190        "{}-core CPU",
191        std::thread::available_parallelism()
192            .map(|n| n.get())
193            .unwrap_or(1)
194    )
195}
196
197fn extract_meminfo_kb(line: &str) -> Option<u64> {
198    line.split_whitespace().nth(1)?.parse().ok()
199}
200
201fn extract_vm_stat_value(line: &str) -> u64 {
202    line.split(':')
203        .nth(1)
204        .and_then(|v| v.trim().trim_end_matches('.').parse().ok())
205        .unwrap_or(0)
206}
207
208async fn get_ollama_running_models() -> Result<Vec<OllamaModel>> {
209    let client = reqwest::Client::builder()
210        .timeout(std::time::Duration::from_secs(3))
211        .build()?;
212
213    let resp = client
214        .get(format!("{}/api/ps", crate::llm::ollama_url()))
215        .send()
216        .await?;
217    let body: serde_json::Value = resp.json().await?;
218
219    let models = body["models"]
220        .as_array()
221        .map(|arr| {
222            arr.iter()
223                .map(|m| OllamaModel {
224                    name: m["name"].as_str().unwrap_or("").to_string(),
225                    size_gb: m["size"].as_u64().unwrap_or(0) as f64 / 1_073_741_824.0,
226                    vram_gb: m["size_vram"].as_u64().unwrap_or(0) as f64 / 1_073_741_824.0,
227                    context_length: m["details"]["parameter_size"]
228                        .as_str()
229                        .and_then(|s| s.replace("B", "").parse().ok())
230                        .unwrap_or(0),
231                })
232                .collect()
233        })
234        .unwrap_or_default();
235
236    Ok(models)
237}
238
239/// Render hardware metrics as colored text for TUI.
240pub fn render_for_tui(m: &HardwareMetrics) -> Vec<String> {
241    let mut lines = Vec::new();
242
243    // CPU
244    let cpu_bar = progress_bar(m.cpu_usage_total as f64, 100.0, 20);
245    lines.push(format!(
246        "CPU:  {} {:.0}%  {}",
247        cpu_bar, m.cpu_usage_total, m.cpu_name
248    ));
249    lines.push(format!("      {} cores", m.core_count));
250
251    // Memory
252    let mem_pct = if m.mem_total_gb > 0.0 {
253        (m.mem_used_gb / m.mem_total_gb) * 100.0
254    } else {
255        0.0
256    };
257    let mem_bar = progress_bar(m.mem_used_gb, m.mem_total_gb, 20);
258    lines.push(format!(
259        "RAM:  {} {:.1}/{:.1} GB ({:.0}%)",
260        mem_bar, m.mem_used_gb, m.mem_total_gb, mem_pct
261    ));
262    lines.push(format!("      {:.1} GB available", m.mem_available_gb));
263    lines.push(String::new());
264
265    // Ollama
266    if m.ollama_models.is_empty() {
267        lines.push("Ollama: no models loaded".to_string());
268    } else {
269        lines.push(format!(
270            "Ollama: {} models loaded ({:.1} GB VRAM)",
271            m.ollama_models.len(),
272            m.ollama_vram_total_gb
273        ));
274        for model in &m.ollama_models {
275            lines.push(format!(
276                "  {} — {:.1} GB (VRAM: {:.1} GB)",
277                model.name, model.size_gb, model.vram_gb
278            ));
279        }
280    }
281    if m.ollama_cpu_pct > 0.0 || m.ollama_mem_gb > 0.0 {
282        lines.push(format!(
283            "  Process: {:.0}% CPU, {:.1} GB RAM",
284            m.ollama_cpu_pct, m.ollama_mem_gb
285        ));
286    }
287
288    // Thermal
289    if !m.temperatures.is_empty() {
290        lines.push(String::new());
291        lines.push("Thermal:".to_string());
292        for t in &m.temperatures {
293            lines.push(format!(
294                "  {}: {:.0}°C (critical: {:.0}°C)",
295                t.label, t.celsius, t.critical
296            ));
297        }
298    }
299
300    lines
301}
302
303fn progress_bar(value: f64, max: f64, width: usize) -> String {
304    let ratio = (value / max).clamp(0.0, 1.0);
305    let filled = (ratio * width as f64) as usize;
306    let empty = width - filled;
307    format!("[{}{}]", "█".repeat(filled), "░".repeat(empty))
308}
309
310impl std::fmt::Display for HardwareMetrics {
311    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
312        for line in render_for_tui(self) {
313            writeln!(f, "{}", line)?;
314        }
315        Ok(())
316    }
317}
318
319#[cfg(test)]
320mod tests {
321    use super::*;
322
323    #[test]
324    fn test_progress_bar() {
325        let bar = progress_bar(50.0, 100.0, 10);
326        assert_eq!(bar, "[█████░░░░░]");
327    }
328
329    #[test]
330    fn test_num_cpus() {
331        let n = std::thread::available_parallelism()
332            .map(|n| n.get())
333            .unwrap_or(1);
334        assert!(n >= 1);
335    }
336}