1use std::collections::{HashMap, HashSet};
4use std::fs::{self, OpenOptions};
5use std::io::{BufRead, BufReader, Write};
6use std::path::{Path, PathBuf};
7
8use anyhow::{Context, Result};
9use serde::{Deserialize, Serialize};
10
11use crate::task::Task;
12
13const LEARNINGS_DIR: &str = "learnings";
14const TASK_LEARNINGS_FILE: &str = "task_learnings.jsonl";
15const MAX_RELEVANT_LEARNINGS: usize = 3;
16const MAX_RELATED_TASKS: usize = 3;
17const MAX_FILE_PREDICTIONS: usize = 5;
18const MAX_FAILURE_PATTERNS: usize = 2;
19const MAX_CONTEXT_WORDS: usize = 500;
20
21#[derive(Debug, Clone, Default)]
22struct TaskMetricsSummary {
23 retries: i64,
24 escalations: i64,
25 context_restart_count: i64,
26 completed_at: Option<i64>,
27}
28
29#[derive(Debug)]
30struct RelatedTaskContext {
31 task: Task,
32 score: usize,
33 changed_paths: Vec<String>,
34 metrics: TaskMetricsSummary,
35 git_summary: Option<String>,
36}
37
38#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
39pub(crate) struct LearningEntry {
40 pub task_id: u32,
41 pub title: String,
42 pub summary: String,
43 pub tags: Vec<String>,
44 pub keywords: Vec<String>,
45 pub engineer: String,
46 pub completed_at: String,
47}
48
49pub(crate) fn append_task_completion_learning(
50 project_root: &Path,
51 task: &Task,
52 engineer: &str,
53 summary: &str,
54) -> Result<()> {
55 let path = task_learnings_path(project_root);
56 if let Some(parent) = path.parent() {
57 fs::create_dir_all(parent)
58 .with_context(|| format!("failed to create {}", parent.display()))?;
59 }
60
61 let mut keywords = extract_keywords(&task.title);
62 keywords.extend(extract_keywords(&task.description));
63 keywords.sort();
64 keywords.dedup();
65
66 let entry = LearningEntry {
67 task_id: task.id,
68 title: task.title.clone(),
69 summary: summary.trim().to_string(),
70 tags: task.tags.clone(),
71 keywords,
72 engineer: engineer.to_string(),
73 completed_at: task
74 .completed
75 .clone()
76 .unwrap_or_else(|| chrono::Utc::now().to_rfc3339()),
77 };
78
79 let mut file = OpenOptions::new()
80 .create(true)
81 .append(true)
82 .open(&path)
83 .with_context(|| format!("failed to open {}", path.display()))?;
84 serde_json::to_writer(&mut file, &entry)
85 .with_context(|| format!("failed to serialize learning entry to {}", path.display()))?;
86 writeln!(file).with_context(|| format!("failed to write {}", path.display()))?;
87 Ok(())
88}
89
90fn assignment_packet(task: &Task, ack_recipient: &str) -> String {
91 let allowed_files = crate::team::daemon::verification::parse_scope_fence(&task.description);
92 let ack_token = crate::team::daemon::verification::scope_ack_token(task.id);
93 let ack_required = !allowed_files.is_empty();
94 let mut packet = String::from("Assignment Packet:\n```yaml\n");
95 packet.push_str(&format!("task_id: {}\n", task.id));
96 if allowed_files.is_empty() {
97 packet.push_str("allowed_files: []\n");
98 } else {
99 packet.push_str("allowed_files:\n");
100 for path in allowed_files {
101 packet.push_str(&format!(" - {path}\n"));
102 }
103 }
104 packet.push_str(&format!("scope_ack_required: {ack_required}\n"));
105 packet.push_str(&format!("scope_ack_token: \"{ack_token}\"\n"));
106 packet.push_str(&format!(
107 "scope_ack_command: \"batty send {ack_recipient} \\\"{ack_token}\\\"\"\n"
108 ));
109 packet.push_str("```\n");
110 if ack_required {
111 packet.push_str(&format!(
112 "Before your first file write, run `batty send {ack_recipient} \"{ack_token}\"`.\n"
113 ));
114 }
115 packet
116}
117
118pub(crate) fn augment_assignment_message(
119 project_root: &Path,
120 task: &Task,
121 ack_recipient: &str,
122) -> Result<String> {
123 let mut body = format!("Task #{}: {}\n\n{}", task.id, task.title, task.description);
124 body.push_str("\n\n");
125 body.push_str(&assignment_packet(task, ack_recipient));
126 let context = build_dispatch_context(project_root, task)?;
127 if context.is_empty() {
128 return Ok(body);
129 }
130
131 body.push_str("\n\nDispatch context:\n");
132 body.push_str(&context);
133 Ok(body)
134}
135
136fn build_dispatch_context(project_root: &Path, task: &Task) -> Result<String> {
137 let related = related_completed_tasks(project_root, task, MAX_RELATED_TASKS)?;
138 let learnings = relevant_learnings(project_root, task, MAX_RELEVANT_LEARNINGS)?;
139 let file_predictions = predict_files(&related, task);
140 let failure_patterns = relevant_failure_patterns(project_root, &related, MAX_FAILURE_PATTERNS)?;
141
142 let mut sections = Vec::new();
143
144 if !related.is_empty() {
145 let mut section = String::from("Recent related completions:\n");
146 for entry in &related {
147 let mut notes = Vec::new();
148 if let Some(summary) = entry.git_summary.as_deref() {
149 notes.push(format!("git: {summary}"));
150 }
151 if entry.metrics.retries > 0 {
152 notes.push(format!("retries={}", entry.metrics.retries));
153 }
154 if entry.metrics.escalations > 0 {
155 notes.push(format!("escalations={}", entry.metrics.escalations));
156 }
157 if entry.metrics.context_restart_count > 0 {
158 notes.push(format!(
159 "context_restarts={}",
160 entry.metrics.context_restart_count
161 ));
162 }
163 let suffix = if notes.is_empty() {
164 String::new()
165 } else {
166 format!(" ({})", notes.join(", "))
167 };
168 section.push_str(&format!(
169 "- Task #{}: {}{}\n",
170 entry.task.id, entry.task.title, suffix
171 ));
172 }
173 sections.push(section);
174 }
175
176 if !failure_patterns.is_empty() {
177 let mut section = String::from("Failure history from similar tasks:\n");
178 for pattern in failure_patterns {
179 section.push_str(&format!("- {}\n", pattern.description));
180 }
181 sections.push(section);
182 }
183
184 if !file_predictions.is_empty() {
185 let mut section = String::from("Likely files to inspect:\n");
186 for path in file_predictions {
187 section.push_str(&format!("- {path}\n"));
188 }
189 sections.push(section);
190 }
191
192 if !learnings.is_empty() {
193 let mut section = String::from("Relevant prior learnings:\n");
194 for learning in learnings {
195 let tags = if learning.tags.is_empty() {
196 "untagged".to_string()
197 } else {
198 learning.tags.join(", ")
199 };
200 section.push_str(&format!(
201 "- Task #{} [{}] {}\n",
202 learning.task_id, tags, learning.summary
203 ));
204 }
205 sections.push(section);
206 }
207
208 Ok(limit_word_count(§ions.join("\n"), MAX_CONTEXT_WORDS))
209}
210
211fn task_learnings_path(project_root: &Path) -> PathBuf {
212 project_root
213 .join(".batty")
214 .join(LEARNINGS_DIR)
215 .join(TASK_LEARNINGS_FILE)
216}
217
218fn load_task_learnings(project_root: &Path) -> Result<Vec<LearningEntry>> {
219 let path = task_learnings_path(project_root);
220 if !path.exists() {
221 return Ok(Vec::new());
222 }
223
224 let file =
225 fs::File::open(&path).with_context(|| format!("failed to read {}", path.display()))?;
226 let reader = BufReader::new(file);
227 let mut entries = Vec::new();
228 for line in reader.lines() {
229 let line = line.with_context(|| format!("failed to read {}", path.display()))?;
230 let trimmed = line.trim();
231 if trimmed.is_empty() {
232 continue;
233 }
234 match serde_json::from_str::<LearningEntry>(trimmed) {
235 Ok(entry) => entries.push(entry),
236 Err(error) => {
237 tracing::warn!(path = %path.display(), error = %error, "skipping malformed learning entry");
238 }
239 }
240 }
241 Ok(entries)
242}
243
244fn related_completed_tasks(
245 project_root: &Path,
246 task: &Task,
247 limit: usize,
248) -> Result<Vec<RelatedTaskContext>> {
249 let board_dir = project_root
250 .join(".batty")
251 .join("team_config")
252 .join("board");
253 let tasks_dir = board_dir.join("tasks");
254 if !tasks_dir.exists() {
255 return Ok(Vec::new());
256 }
257 let mut metrics_by_task = load_task_metrics(project_root)?;
258
259 let mut scored = Vec::new();
260 for candidate in crate::task::load_tasks_from_dir(&tasks_dir)? {
261 if candidate.id == task.id || candidate.status != "done" {
262 continue;
263 }
264
265 let changed_paths = load_changed_paths(candidate.source_path.as_path())?;
266 let score = related_task_score(task, &candidate, &changed_paths);
267 if score == 0 {
268 continue;
269 }
270
271 let metrics = metrics_by_task.remove(&candidate.id).unwrap_or_default();
272 let git_summary = candidate
273 .commit
274 .as_deref()
275 .and_then(|commit| git_commit_summary(project_root, commit));
276 scored.push(RelatedTaskContext {
277 metrics,
278 git_summary,
279 changed_paths,
280 score,
281 task: candidate,
282 });
283 }
284
285 scored.sort_by(|left, right| {
286 right
287 .score
288 .cmp(&left.score)
289 .then_with(|| right.metrics.completed_at.cmp(&left.metrics.completed_at))
290 .then_with(|| right.task.completed.cmp(&left.task.completed))
291 .then_with(|| right.task.id.cmp(&left.task.id))
292 });
293 scored.truncate(limit);
294 Ok(scored)
295}
296
297fn relevant_learnings(
298 project_root: &Path,
299 task: &Task,
300 limit: usize,
301) -> Result<Vec<LearningEntry>> {
302 let task_keywords: HashSet<String> =
303 extract_keywords(&format!("{}\n{}", task.title, task.description))
304 .into_iter()
305 .collect();
306 let task_tags: HashSet<String> = task
307 .tags
308 .iter()
309 .map(|tag| tag.to_ascii_lowercase())
310 .collect();
311
312 let mut scored: Vec<(usize, LearningEntry)> = load_task_learnings(project_root)?
313 .into_iter()
314 .filter(|entry| entry.task_id != task.id)
315 .filter_map(|entry| {
316 let tag_matches = entry
317 .tags
318 .iter()
319 .map(|tag| tag.to_ascii_lowercase())
320 .filter(|tag| task_tags.contains(tag))
321 .count();
322 let keyword_matches = entry
323 .keywords
324 .iter()
325 .filter(|keyword| task_keywords.contains(*keyword))
326 .count();
327 let score = tag_matches * 3 + keyword_matches;
328 (score > 0).then_some((score, entry))
329 })
330 .collect();
331
332 scored.sort_by(|left, right| {
333 right
334 .0
335 .cmp(&left.0)
336 .then_with(|| right.1.completed_at.cmp(&left.1.completed_at))
337 });
338 Ok(scored
339 .into_iter()
340 .take(limit)
341 .map(|(_, entry)| entry)
342 .collect())
343}
344
345fn relevant_failure_patterns(
346 project_root: &Path,
347 related: &[RelatedTaskContext],
348 limit: usize,
349) -> Result<Vec<crate::team::failure_patterns::PatternMatch>> {
350 if related.is_empty() {
351 return Ok(Vec::new());
352 }
353
354 let related_ids: HashSet<String> = related
355 .iter()
356 .map(|entry| entry.task.id.to_string())
357 .collect();
358 let events_path = crate::team::team_events_path(project_root);
359 if !events_path.exists() {
360 return Ok(Vec::new());
361 }
362 let events = crate::team::events::read_events(&events_path)?;
363 let mut window = crate::team::failure_patterns::FailureWindow::new(100);
364 for event in events.into_iter().filter(|event| {
365 event
366 .task
367 .as_deref()
368 .is_some_and(|task_id| related_ids.contains(task_id))
369 }) {
370 window.push(&event);
371 }
372
373 let mut patterns = window.detect_failure_patterns();
374 patterns.truncate(limit);
375 Ok(patterns)
376}
377
378fn predict_files(related: &[RelatedTaskContext], task: &Task) -> Vec<String> {
379 let mut counts: HashMap<String, usize> = HashMap::new();
380 for entry in related {
381 for path in &entry.changed_paths {
382 *counts.entry(path.clone()).or_insert(0) += entry.score.max(1);
383 }
384 }
385 for hinted in extract_path_hints(task) {
386 *counts.entry(hinted).or_insert(0) += 2;
387 }
388
389 let mut ranked: Vec<(String, usize)> = counts.into_iter().collect();
390 ranked.sort_by(|left, right| right.1.cmp(&left.1).then_with(|| left.0.cmp(&right.0)));
391 ranked
392 .into_iter()
393 .take(MAX_FILE_PREDICTIONS)
394 .map(|(path, _)| path)
395 .collect()
396}
397
398fn load_task_metrics(project_root: &Path) -> Result<HashMap<u32, TaskMetricsSummary>> {
399 let conn = match crate::team::telemetry_db::open(project_root) {
400 Ok(conn) => conn,
401 Err(_) => return Ok(HashMap::new()),
402 };
403 let mut metrics = HashMap::new();
404 for row in crate::team::telemetry_db::query_task_metrics(&conn)? {
405 let Ok(task_id) = row.task_id.parse::<u32>() else {
406 continue;
407 };
408 metrics.insert(
409 task_id,
410 TaskMetricsSummary {
411 retries: row.retries,
412 escalations: row.escalations,
413 context_restart_count: row.context_restart_count,
414 completed_at: row.completed_at,
415 },
416 );
417 }
418 Ok(metrics)
419}
420
421fn related_task_score(task: &Task, candidate: &Task, changed_paths: &[String]) -> usize {
422 let task_tags: HashSet<String> = task
423 .tags
424 .iter()
425 .map(|tag| tag.to_ascii_lowercase())
426 .collect();
427 let candidate_tags: HashSet<String> = candidate
428 .tags
429 .iter()
430 .map(|tag| tag.to_ascii_lowercase())
431 .collect();
432 let task_keywords: HashSet<String> =
433 extract_keywords(&format!("{}\n{}", task.title, task.description))
434 .into_iter()
435 .collect();
436 let candidate_keywords: HashSet<String> =
437 extract_keywords(&format!("{}\n{}", candidate.title, candidate.description))
438 .into_iter()
439 .collect();
440 let task_dirs: HashSet<String> = extract_path_hints(task)
441 .into_iter()
442 .filter_map(|path| parent_dir(&path))
443 .collect();
444 let candidate_dirs: HashSet<String> = changed_paths
445 .iter()
446 .filter_map(|path| parent_dir(path))
447 .collect();
448
449 let tag_matches = task_tags.intersection(&candidate_tags).count();
450 let keyword_matches = task_keywords.intersection(&candidate_keywords).count();
451 let dir_matches = task_dirs.intersection(&candidate_dirs).count();
452 tag_matches * 4 + dir_matches * 3 + keyword_matches
453}
454
455fn load_changed_paths(path: &Path) -> Result<Vec<String>> {
456 if path.as_os_str().is_empty() || !path.exists() {
457 return Ok(Vec::new());
458 }
459
460 let content = fs::read_to_string(path)?;
461 let Some(frontmatter) = extract_frontmatter(&content) else {
462 return Ok(Vec::new());
463 };
464 let parsed: LearningTaskFrontmatter = serde_yaml::from_str(frontmatter).unwrap_or_default();
465 Ok(parsed.changed_paths)
466}
467
468fn git_commit_summary(project_root: &Path, commit: &str) -> Option<String> {
469 let output = std::process::Command::new("git")
470 .args(["log", "-1", "--format=%s", commit])
471 .current_dir(project_root)
472 .output()
473 .ok()?;
474 if !output.status.success() {
475 return None;
476 }
477 let summary = String::from_utf8(output.stdout).ok()?;
478 let trimmed = summary.trim();
479 (!trimmed.is_empty()).then(|| trimmed.to_string())
480}
481
482fn extract_path_hints(task: &Task) -> HashSet<String> {
483 task.description
484 .split_whitespace()
485 .filter_map(clean_task_path_token)
486 .collect()
487}
488
489fn clean_task_path_token(token: &str) -> Option<String> {
490 let cleaned = token.trim_matches(|ch: char| {
491 matches!(
492 ch,
493 '"' | '\'' | ',' | ':' | ';' | '(' | ')' | '[' | ']' | '`'
494 )
495 });
496 parent_dir(cleaned).map(|_| cleaned.to_string())
497}
498
499fn parent_dir(path: &str) -> Option<String> {
500 PathBuf::from(path)
501 .parent()
502 .map(|parent| parent.to_string_lossy().replace('\\', "/"))
503 .filter(|parent| !parent.is_empty() && parent != ".")
504}
505
506fn extract_frontmatter(content: &str) -> Option<&str> {
507 let trimmed = content.trim_start();
508 if !trimmed.starts_with("---") {
509 return None;
510 }
511 let after_open = trimmed[3..].strip_prefix('\n').unwrap_or(&trimmed[3..]);
512 let close_pos = after_open.find("\n---")?;
513 Some(&after_open[..close_pos])
514}
515
516fn limit_word_count(text: &str, max_words: usize) -> String {
517 let mut words = 0usize;
518 let mut lines = Vec::new();
519 for line in text.lines() {
520 let line_words = line.split_whitespace().count();
521 if line_words == 0 {
522 if !lines.is_empty() && !lines.last().is_some_and(|last: &String| last.is_empty()) {
523 lines.push(String::new());
524 }
525 continue;
526 }
527
528 if words + line_words <= max_words {
529 lines.push(line.to_string());
530 words += line_words;
531 continue;
532 }
533
534 let remaining = max_words.saturating_sub(words);
535 if remaining == 0 {
536 break;
537 }
538 let truncated = line
539 .split_whitespace()
540 .take(remaining)
541 .collect::<Vec<_>>()
542 .join(" ");
543 if !truncated.is_empty() {
544 lines.push(format!("{truncated} ..."));
545 }
546 break;
547 }
548 lines.join("\n").trim().to_string()
549}
550
551#[derive(Debug, Default, Deserialize)]
552struct LearningTaskFrontmatter {
553 #[serde(default)]
554 changed_paths: Vec<String>,
555}
556
557fn extract_keywords(text: &str) -> Vec<String> {
558 let mut seen = HashSet::new();
559 text.split(|ch: char| !ch.is_ascii_alphanumeric())
560 .filter_map(|word| {
561 let normalized = word.trim().to_ascii_lowercase();
562 if normalized.len() < 4 || normalized.chars().all(|ch| ch.is_ascii_digit()) {
563 return None;
564 }
565 seen.insert(normalized.clone()).then_some(normalized)
566 })
567 .collect()
568}
569
570#[cfg(test)]
571mod tests {
572 use super::*;
573 use crate::team::events::EventSink;
574 use crate::team::events::TeamEvent;
575 use crate::team::telemetry_db;
576
577 fn write_task_file(project_root: &Path, filename: &str, content: &str) {
578 let tasks_dir = project_root
579 .join(".batty")
580 .join("team_config")
581 .join("board")
582 .join("tasks");
583 fs::create_dir_all(&tasks_dir).unwrap();
584 fs::write(tasks_dir.join(filename), content).unwrap();
585 }
586
587 fn ensure_batty_dirs(project_root: &Path) {
588 fs::create_dir_all(project_root.join(".batty").join("team_config")).unwrap();
589 }
590
591 fn sample_task() -> Task {
592 Task {
593 id: 42,
594 title: "Improve dispatch scoring".to_string(),
595 status: "todo".to_string(),
596 priority: "high".to_string(),
597 claimed_by: Some("eng-1".to_string()),
598 claimed_at: None,
599 claim_ttl_secs: None,
600 claim_expires_at: None,
601 last_progress_at: None,
602 claim_warning_sent_at: None,
603 claim_extensions: None,
604 last_output_bytes: None,
605 blocked: None,
606 tags: vec!["dispatch".to_string(), "daemon".to_string()],
607 depends_on: Vec::new(),
608 review_owner: None,
609 blocked_on: None,
610 worktree_path: None,
611 branch: None,
612 commit: None,
613 artifacts: Vec::new(),
614 next_action: None,
615 scheduled_for: None,
616 cron_schedule: None,
617 cron_last_run: None,
618 completed: Some("2026-04-06T08:00:00Z".to_string()),
619 description: "Teach dispatch queue scoring to prefer daemon work.".to_string(),
620 batty_config: None,
621 source_path: PathBuf::from("/tmp/task.md"),
622 }
623 }
624
625 #[test]
626 fn append_and_match_learnings_by_tag_and_keyword() {
627 let tmp = tempfile::tempdir().unwrap();
628 let completed = sample_task();
629 append_task_completion_learning(
630 tmp.path(),
631 &completed,
632 "eng-1",
633 "Prefer prior daemon dispatch patterns over generic scoring.",
634 )
635 .unwrap();
636
637 let mut current = sample_task();
638 current.id = 99;
639 current.tags = vec!["dispatch".to_string()];
640 current.description = "Refine daemon dispatch prompts using queue history.".to_string();
641
642 let augmented = augment_assignment_message(tmp.path(), ¤t, "manager").unwrap();
643 assert!(augmented.contains("Relevant prior learnings:"));
644 assert!(augmented.contains("Prefer prior daemon dispatch patterns"));
645 assert!(augmented.contains("[dispatch, daemon]"));
646 }
647
648 #[test]
649 fn augment_assignment_message_skips_when_no_matches() {
650 let tmp = tempfile::tempdir().unwrap();
651 let mut prior = sample_task();
652 prior.tags = vec!["grafana".to_string()];
653 prior.description = "Alerting rules and dashboards.".to_string();
654 append_task_completion_learning(
655 tmp.path(),
656 &prior,
657 "eng-1",
658 "Keep alert thresholds conservative.",
659 )
660 .unwrap();
661
662 let mut current = sample_task();
663 current.tags = vec!["dispatch".to_string()];
664 current.description = "Dispatch prompt and work allocation.".to_string();
665
666 let augmented = augment_assignment_message(tmp.path(), ¤t, "manager").unwrap();
667 assert!(!augmented.contains("Relevant prior learnings:"));
668 }
669
670 #[test]
671 fn augment_assignment_message_includes_richer_dispatch_context() {
672 let tmp = tempfile::tempdir().unwrap();
673 ensure_batty_dirs(tmp.path());
674 let conn = telemetry_db::open(tmp.path()).unwrap();
675 telemetry_db::insert_event(&conn, &TeamEvent::task_assigned("eng-2", "10")).unwrap();
676 telemetry_db::insert_event(&conn, &TeamEvent::task_completed("eng-2", Some("10"))).unwrap();
677 let mut escalation = TeamEvent::task_escalated("eng-2", "10", Some("needed rework"));
678 escalation.task = Some("10".to_string());
679 telemetry_db::insert_event(&conn, &escalation).unwrap();
680 let mut escalation_repeat = TeamEvent::task_escalated("eng-2", "10", Some("second rework"));
681 escalation_repeat.task = Some("10".to_string());
682 telemetry_db::insert_event(&conn, &escalation_repeat).unwrap();
683 let mut event_sink = EventSink::new(&crate::team::team_events_path(tmp.path())).unwrap();
684 event_sink.emit(escalation.clone()).unwrap();
685 event_sink.emit(escalation_repeat).unwrap();
686
687 write_task_file(
688 tmp.path(),
689 "010-related.md",
690 "---\nid: 10\ntitle: Prior dispatch context work\nstatus: done\npriority: high\nclaimed_by: eng-2\ncommit: abc123\ntags:\n - dispatch\nchanged_paths:\n - src/team/learnings.rs\n - src/team/dispatch/queue.rs\nclass: standard\ncompleted: 2026-04-06T08:00:00Z\n---\n\nImprove dispatch context for queue prompts.\n",
691 );
692 append_task_completion_learning(
693 tmp.path(),
694 &Task::from_file(
695 &tmp.path()
696 .join(".batty")
697 .join("team_config")
698 .join("board")
699 .join("tasks")
700 .join("010-related.md"),
701 )
702 .unwrap(),
703 "eng-2",
704 "Call out likely queue files before coding.",
705 )
706 .unwrap();
707
708 let mut current = sample_task();
709 current.id = 99;
710 current.description =
711 "Expand dispatch context in src/team/learnings.rs and src/team/dispatch/queue.rs."
712 .to_string();
713
714 let augmented = augment_assignment_message(tmp.path(), ¤t, "manager").unwrap();
715 assert!(augmented.contains("Dispatch context:"));
716 assert!(augmented.contains("Recent related completions:"));
717 assert!(augmented.contains("Failure history from similar tasks:"));
718 assert!(augmented.contains("Likely files to inspect:"));
719 assert!(augmented.contains("Relevant prior learnings:"));
720 assert!(augmented.contains("src/team/learnings.rs"));
721 }
722
723 #[test]
724 fn dispatch_context_is_capped_to_500_words() {
725 let repeated = std::iter::repeat_n("context", 700)
726 .collect::<Vec<_>>()
727 .join(" ");
728 let truncated = limit_word_count(&repeated, MAX_CONTEXT_WORDS);
729 assert!(truncated.split_whitespace().count() <= MAX_CONTEXT_WORDS + 1);
730 }
731
732 #[test]
733 fn augment_assignment_message_includes_scope_packet() {
734 let tmp = tempfile::tempdir().unwrap();
735 let mut current = sample_task();
736 current.id = 587;
737 current.description =
738 "Harden scope validation.\nSCOPE FENCE: src/team/completion.rs, src/team/review.rs\n"
739 .to_string();
740
741 let augmented = augment_assignment_message(tmp.path(), ¤t, "manager").unwrap();
742 assert!(augmented.contains("Assignment Packet:"));
743 assert!(augmented.contains("allowed_files:"));
744 assert!(augmented.contains("src/team/completion.rs"));
745 assert!(augmented.contains("src/team/review.rs"));
746 assert!(augmented.contains("scope_ack_required: true"));
747 assert!(augmented.contains("Scope ACK #587"));
748 assert!(augmented.contains("batty send manager"));
749 }
750}