Skip to main content

batty_cli/team/
load.rs

1//! Team load monitoring and historical load graphing.
2
3use std::path::Path;
4
5use anyhow::{Result, bail};
6use tracing::warn;
7
8use super::{config, events, hierarchy, now_unix, status, team_config_path, team_events_path};
9use crate::tmux;
10
11/// Default duration window for load graph rendering, in seconds (1 hour).
12const LOAD_GRAPH_WINDOW_SECONDS: u64 = 3_600;
13const LOAD_GRAPH_WIDTH: usize = 30;
14
15#[derive(Debug, Clone, Copy)]
16pub struct TeamLoadSnapshot {
17    pub timestamp: u64,
18    pub total_members: usize,
19    pub working_members: usize,
20    pub load: f64,
21    pub session_running: bool,
22}
23
24/// Show an estimated team load value from live state, store it, and show recent load trends.
25pub fn show_load(project_root: &Path) -> Result<()> {
26    let current = capture_team_load(project_root)?;
27    if let Err(error) = log_team_load_snapshot(project_root, &current) {
28        warn!(error = %error, "failed to append load snapshot to team event log");
29    }
30
31    let mut history = read_team_load_history(project_root)?;
32    history.push(current);
33    history.sort_by_key(|snapshot| snapshot.timestamp);
34
35    println!(
36        "Current load: {:.1}% ({} / {} members working)",
37        current.load * 100.0,
38        current.working_members,
39        current.total_members.max(1)
40    );
41    println!(
42        "Session: {}",
43        if current.session_running {
44            "running"
45        } else {
46            "stopped"
47        }
48    );
49
50    if let Some(avg) = average_load(&history, current.timestamp, 10 * 60) {
51        println!("10m avg: {:.1}%", avg * 100.0);
52    } else {
53        println!("10m avg: n/a");
54    }
55    println!(
56        "30m avg: {}",
57        average_load(&history, current.timestamp, 30 * 60)
58            .map(|avg| format!("{:.1}%", avg * 100.0))
59            .unwrap_or_else(|| "n/a".to_string())
60    );
61    println!(
62        "60m avg: {}",
63        average_load(&history, current.timestamp, 60 * 60)
64            .map(|avg| format!("{:.1}%", avg * 100.0))
65            .unwrap_or_else(|| "n/a".to_string())
66    );
67
68    println!("Load graph (1h):");
69    println!("{}", render_load_graph(&history, current.timestamp));
70    Ok(())
71}
72
73fn capture_team_load(project_root: &Path) -> Result<TeamLoadSnapshot> {
74    let config_path = team_config_path(project_root);
75    if !config_path.exists() {
76        bail!("no team config found at {}", config_path.display());
77    }
78
79    let team_config = config::TeamConfig::load(&config_path)?;
80    let members = hierarchy::resolve_hierarchy(&team_config)?;
81    let session = format!("batty-{}", team_config.name);
82    let session_running = tmux::session_exists(&session);
83    let runtime_statuses = if session_running {
84        match status::list_runtime_member_statuses(&session) {
85            Ok(statuses) => statuses,
86            Err(error) => {
87                warn!(session = %session, error = %error, "failed to read runtime statuses for load sampling");
88                std::collections::HashMap::new()
89            }
90        }
91    } else {
92        std::collections::HashMap::new()
93    };
94
95    let triage_backlog_counts = status::triage_backlog_counts(project_root, &members);
96    let owned_task_buckets = status::owned_task_buckets(project_root, &members);
97    let branch_mismatches = status::branch_mismatch_by_member(project_root, &members);
98    let rows = status::build_team_status_rows(
99        &members,
100        session_running,
101        &runtime_statuses,
102        &Default::default(),
103        &triage_backlog_counts,
104        &owned_task_buckets,
105        &branch_mismatches,
106        &Default::default(),
107        &Default::default(),
108    );
109    let mut total_members = 0usize;
110    let mut working_members = 0usize;
111
112    for row in &rows {
113        if row.role_type == "User" {
114            continue;
115        }
116        total_members += 1;
117        if counts_as_active_load(row) {
118            working_members += 1;
119        }
120    }
121
122    let load = if total_members == 0 {
123        0.0
124    } else {
125        working_members as f64 / total_members as f64
126    };
127
128    Ok(TeamLoadSnapshot {
129        timestamp: now_unix(),
130        total_members,
131        working_members: working_members.min(total_members),
132        load,
133        session_running,
134    })
135}
136
137fn counts_as_active_load(row: &status::TeamStatusRow) -> bool {
138    matches!(row.state.as_str(), "working" | "triaging" | "reviewing")
139}
140
141fn log_team_load_snapshot(project_root: &Path, snapshot: &TeamLoadSnapshot) -> Result<()> {
142    let events_path = team_events_path(project_root);
143    let mut sink = events::EventSink::new(&events_path)?;
144    let event = events::TeamEvent::load_snapshot(
145        snapshot.working_members as u32,
146        snapshot.total_members as u32,
147        snapshot.session_running,
148    );
149    sink.emit(event)?;
150    Ok(())
151}
152
153fn read_team_load_history(project_root: &Path) -> Result<Vec<TeamLoadSnapshot>> {
154    let events_path = team_events_path(project_root);
155    let events = events::read_events(&events_path)?;
156    let mut history = Vec::new();
157    for event in events {
158        if event.event != "load_snapshot" {
159            continue;
160        }
161        let Some(load) = event.load else {
162            continue;
163        };
164        let Some(working_members) = event.working_members else {
165            continue;
166        };
167        let Some(total_members) = event.total_members else {
168            continue;
169        };
170
171        history.push(TeamLoadSnapshot {
172            timestamp: event.ts,
173            total_members: total_members as usize,
174            working_members: working_members as usize,
175            load,
176            session_running: event.session_running.unwrap_or(false),
177        });
178    }
179    Ok(history)
180}
181
182fn average_load(samples: &[TeamLoadSnapshot], now: u64, window_seconds: u64) -> Option<f64> {
183    let cutoff = now.saturating_sub(window_seconds);
184    let mut values = Vec::new();
185    for sample in samples {
186        if sample.timestamp >= cutoff && sample.timestamp <= now {
187            values.push(sample.load);
188        }
189    }
190    if values.is_empty() {
191        return None;
192    }
193    let sum: f64 = values.iter().copied().sum();
194    Some(sum / values.len() as f64)
195}
196
197fn render_load_graph(samples: &[TeamLoadSnapshot], now: u64) -> String {
198    if samples.is_empty() {
199        return "(no historical load data yet)".to_string();
200    }
201
202    let bucket_size = (LOAD_GRAPH_WINDOW_SECONDS / LOAD_GRAPH_WIDTH as u64).max(1);
203    let window_start = now.saturating_sub(LOAD_GRAPH_WINDOW_SECONDS);
204    let mut history = String::new();
205    let mut previous = 0.0;
206    for index in 0..LOAD_GRAPH_WIDTH {
207        let bucket_start = window_start + (index as u64 * bucket_size);
208        let bucket_end = if index + 1 == LOAD_GRAPH_WIDTH {
209            now + 1
210        } else {
211            bucket_start + bucket_size
212        };
213
214        let mut sum = 0.0;
215        let mut count = 0usize;
216        for sample in samples {
217            if sample.timestamp >= bucket_start && sample.timestamp < bucket_end {
218                sum += sample.load;
219                count += 1;
220            }
221        }
222
223        let value = if count == 0 {
224            previous
225        } else {
226            sum / count as f64
227        };
228        previous = value;
229        history.push(load_point_char(value));
230    }
231
232    history
233}
234
235fn load_point_char(value: f64) -> char {
236    let clamped = value.clamp(0.0, 1.0);
237    match (clamped * 5.0).round() as usize {
238        0 => ' ',
239        1 => '.',
240        2 => ':',
241        3 => '=',
242        4 => '#',
243        _ => '@',
244    }
245}
246
247#[cfg(test)]
248mod tests {
249    use super::*;
250
251    #[test]
252    fn counts_as_active_load_treats_triaging_as_working() {
253        let triaging = status::TeamStatusRow {
254            name: "lead".to_string(),
255            role: "lead".to_string(),
256            role_type: "Manager".to_string(),
257            agent: Some("codex".to_string()),
258            reports_to: Some("architect".to_string()),
259            state: "triaging".to_string(),
260            pending_inbox: 0,
261            triage_backlog: 2,
262            active_owned_tasks: vec![191],
263            review_owned_tasks: vec![193],
264            signal: Some("needs triage (2)".to_string()),
265            runtime_label: Some("idle".to_string()),
266            worktree_staleness: None,
267            health: status::AgentHealthSummary::default(),
268            health_summary: "-".to_string(),
269            eta: "-".to_string(),
270        };
271        let reviewing = status::TeamStatusRow {
272            state: "reviewing".to_string(),
273            triage_backlog: 0,
274            signal: Some("needs review (1)".to_string()),
275            runtime_label: Some("idle".to_string()),
276            ..triaging.clone()
277        };
278        let idle = status::TeamStatusRow {
279            state: "idle".to_string(),
280            triage_backlog: 0,
281            signal: None,
282            runtime_label: Some("idle".to_string()),
283            ..triaging.clone()
284        };
285
286        assert!(counts_as_active_load(&triaging));
287        assert!(counts_as_active_load(&reviewing));
288        assert!(!counts_as_active_load(&idle));
289    }
290
291    #[test]
292    fn average_load_ignores_points_older_than_window() {
293        let now = 10_000u64;
294        let samples = vec![
295            TeamLoadSnapshot {
296                timestamp: now - 3_000,
297                total_members: 10,
298                working_members: 0,
299                load: 0.8,
300                session_running: true,
301            },
302            TeamLoadSnapshot {
303                timestamp: now - 10,
304                total_members: 10,
305                working_members: 0,
306                load: 0.4,
307                session_running: true,
308            },
309            TeamLoadSnapshot {
310                timestamp: now - 20,
311                total_members: 10,
312                working_members: 0,
313                load: 0.6,
314                session_running: true,
315            },
316        ];
317
318        let avg_60s = average_load(&samples, now, 60).unwrap();
319        assert!((avg_60s - 0.5).abs() < 0.0001);
320        assert!(average_load(&samples, now, 5).is_none());
321    }
322
323    #[test]
324    fn render_load_graph_returns_expected_width() {
325        let now = 10_000u64;
326        let samples = vec![
327            TeamLoadSnapshot {
328                timestamp: now - 3_600,
329                total_members: 10,
330                working_members: 2,
331                load: 0.2,
332                session_running: true,
333            },
334            TeamLoadSnapshot {
335                timestamp: now - 1_800,
336                total_members: 10,
337                working_members: 5,
338                load: 0.5,
339                session_running: true,
340            },
341            TeamLoadSnapshot {
342                timestamp: now - 900,
343                total_members: 10,
344                working_members: 10,
345                load: 1.0,
346                session_running: true,
347            },
348            TeamLoadSnapshot {
349                timestamp: now - 600,
350                total_members: 10,
351                working_members: 0,
352                load: 0.0,
353                session_running: true,
354            },
355        ];
356
357        let graph = render_load_graph(&samples, now);
358        assert_eq!(graph.len(), LOAD_GRAPH_WIDTH);
359        assert!(graph.chars().all(|c| " .:=#@".contains(c)));
360    }
361}