1use crate::{
2 ai::{ProviderKind, read_api_key, resolve_model, resolve_provider},
3 config::{LoadedConfig, load_config, resolve_config_relative_path},
4 path_glob,
5 tui::{
6 DashboardEvent, DashboardInit, DashboardItem, DashboardItemStatus, DashboardKind,
7 DashboardLogTone, PlainReporter, ResolvedUiMode, RunReporter, SummaryRow, TuiReporter,
8 UiMode, resolve_ui_mode_for_current_terminal,
9 },
10 validation::validate_language_code,
11};
12use async_trait::async_trait;
13use langcodec::{
14 Codec, Entry, FormatType, ReadOptions, Resource, Translation,
15 formats::{AndroidStringsFormat, StringsFormat, XcstringsFormat},
16 infer_format_from_extension, infer_language_from_path,
17 traits::Parser,
18};
19use mentra::{
20 AgentConfig, ContentBlock, ModelInfo, Runtime,
21 agent::{AgentEvent, ToolProfile, WorkspaceConfig},
22 provider::{ProviderRequestOptions, ResponsesRequestOptions},
23 runtime::RunOptions,
24};
25use serde::{Deserialize, Serialize};
26use serde_json::Value;
27use std::{
28 collections::{BTreeMap, HashMap, VecDeque},
29 fs,
30 path::{Path, PathBuf},
31 sync::Arc,
32};
33use tokio::{
34 runtime::Builder,
35 sync::{Mutex as AsyncMutex, broadcast, mpsc},
36 task::JoinSet,
37};
38
39const DEFAULT_CONCURRENCY: usize = 4;
40const DEFAULT_TOOL_BUDGET: usize = 16;
41const GENERATED_COMMENT_MARKER: &str = "langcodec:auto-generated";
42const ANNOTATION_SYSTEM_PROMPT: &str = "You write translator-facing comments for application localization entries. Use the files tool or shell tool when needed to inspect source code. Prefer shell commands like rg for fast code search, then read the most relevant files before drafting. Prefer a short, concrete explanation of where or how the text is used so a translator can choose the right wording. If you are uncertain, say what the UI usage appears to be instead of inventing product meaning. Return JSON only with the shape {\"comment\":\"...\",\"confidence\":\"high|medium|low\"}.";
43
44#[derive(Debug, Clone)]
45pub struct AnnotateOptions {
46 pub input: Option<String>,
47 pub source_roots: Vec<String>,
48 pub output: Option<String>,
49 pub source_lang: Option<String>,
50 pub provider: Option<String>,
51 pub model: Option<String>,
52 pub concurrency: Option<usize>,
53 pub config: Option<String>,
54 pub dry_run: bool,
55 pub check: bool,
56 pub ui_mode: UiMode,
57}
58
59#[derive(Debug, Clone)]
60struct ResolvedAnnotateOptions {
61 input: String,
62 output: String,
63 source_roots: Vec<String>,
64 source_lang: Option<String>,
65 provider: ProviderKind,
66 model: String,
67 concurrency: usize,
68 dry_run: bool,
69 check: bool,
70 workspace_root: PathBuf,
71 ui_mode: ResolvedUiMode,
72}
73
74#[derive(Debug, Clone)]
75struct AnnotationRequest {
76 key: String,
77 source_lang: String,
78 source_value: String,
79 existing_comment: Option<String>,
80 source_roots: Vec<String>,
81}
82
83#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)]
84struct AnnotationResponse {
85 comment: String,
86 confidence: String,
87}
88
89#[derive(Debug, Clone, Copy, PartialEq, Eq)]
90enum AnnotationFormat {
91 Xcstrings,
92 Strings,
93 AndroidStrings,
94}
95
96impl AnnotationFormat {
97 fn to_format_type(self) -> FormatType {
98 match self {
99 Self::Xcstrings => FormatType::Xcstrings,
100 Self::Strings => FormatType::Strings(None),
101 Self::AndroidStrings => FormatType::AndroidStrings(None),
102 }
103 }
104}
105
106#[derive(Debug, Clone)]
107struct AnnotationTarget {
108 key: String,
109 existing_comment: Option<String>,
110}
111
112enum WorkerUpdate {
113 Started {
114 worker_id: usize,
115 key: String,
116 candidate_count: usize,
117 top_candidate: Option<String>,
118 },
119 ToolCall {
120 tone: DashboardLogTone,
121 message: String,
122 },
123 Finished {
124 worker_id: usize,
125 key: String,
126 result: Result<Option<AnnotationResponse>, String>,
127 },
128}
129
130#[async_trait]
131trait AnnotationBackend: Send + Sync {
132 async fn annotate(
133 &self,
134 request: AnnotationRequest,
135 event_tx: Option<mpsc::UnboundedSender<WorkerUpdate>>,
136 ) -> Result<Option<AnnotationResponse>, String>;
137}
138
139struct MentraAnnotatorBackend {
140 runtime: Arc<Runtime>,
141 model: ModelInfo,
142 workspace_root: PathBuf,
143}
144
145impl MentraAnnotatorBackend {
146 fn new(opts: &ResolvedAnnotateOptions) -> Result<Self, String> {
147 let api_key = read_api_key(&opts.provider)?;
148 let provider = opts.provider.builtin_provider();
149 let runtime = Runtime::builder()
150 .with_provider(provider, api_key)
151 .build()
152 .map_err(|e| format!("Failed to build Mentra runtime: {}", e))?;
153
154 Ok(Self {
155 runtime: Arc::new(runtime),
156 model: ModelInfo::new(opts.model.clone(), provider),
157 workspace_root: opts.workspace_root.clone(),
158 })
159 }
160
161 #[cfg(test)]
162 fn from_runtime(runtime: Runtime, model: ModelInfo, workspace_root: PathBuf) -> Self {
163 Self {
164 runtime: Arc::new(runtime),
165 model,
166 workspace_root,
167 }
168 }
169}
170
171#[async_trait]
172impl AnnotationBackend for MentraAnnotatorBackend {
173 async fn annotate(
174 &self,
175 request: AnnotationRequest,
176 event_tx: Option<mpsc::UnboundedSender<WorkerUpdate>>,
177 ) -> Result<Option<AnnotationResponse>, String> {
178 let config = build_agent_config(&self.workspace_root);
179 let mut agent = self
180 .runtime
181 .spawn_with_config("annotate", self.model.clone(), config)
182 .map_err(|e| format!("Failed to spawn Mentra agent: {}", e))?;
183 let tool_logger =
184 spawn_tool_call_logger(agent.subscribe_events(), request.key.clone(), event_tx);
185
186 let response = agent
187 .run(
188 vec![ContentBlock::text(build_annotation_prompt(&request))],
189 RunOptions {
190 tool_budget: Some(DEFAULT_TOOL_BUDGET),
191 ..RunOptions::default()
192 },
193 )
194 .await;
195 tool_logger.abort();
196 let _ = tool_logger.await;
197
198 let response = response.map_err(|e| format!("Annotation agent failed: {}", e))?;
199
200 parse_annotation_response(&response.text()).map(Some)
201 }
202}
203
204pub fn run_annotate_command(opts: AnnotateOptions) -> Result<(), String> {
205 let config = load_config(opts.config.as_deref())?;
206 let runs = expand_annotate_invocations(&opts, config.as_ref())?;
207
208 for resolved in runs {
209 let backend: Arc<dyn AnnotationBackend> = Arc::new(MentraAnnotatorBackend::new(&resolved)?);
210 run_annotate_with_backend(resolved, backend)?;
211 }
212
213 Ok(())
214}
215
216fn run_annotate_with_backend(
217 opts: ResolvedAnnotateOptions,
218 backend: Arc<dyn AnnotationBackend>,
219) -> Result<(), String> {
220 let annotation_format = annotation_format_from_path(&opts.input)?;
221 let mut codec = read_annotation_codec(&opts.input, annotation_format)?;
222 let source_lang = opts
223 .source_lang
224 .clone()
225 .or_else(|| default_source_language(&codec))
226 .ok_or_else(|| {
227 format!(
228 "Could not infer source language for '{}'; pass --source-lang",
229 opts.input
230 )
231 })?;
232 validate_language_code(&source_lang)?;
233
234 let source_values = source_value_map(&codec.resources, &source_lang);
235 let requests = build_annotation_requests(
236 &codec,
237 annotation_format,
238 &source_lang,
239 &source_values,
240 &opts.source_roots,
241 &opts.workspace_root,
242 );
243
244 if requests.is_empty() {
245 println!("No entries require annotation updates.");
246 return Ok(());
247 }
248
249 let mut reporter = create_annotate_reporter(&opts, &source_lang, &requests)?;
250 reporter.emit(DashboardEvent::Log {
251 tone: DashboardLogTone::Info,
252 message: format!("Annotating {}", opts.input),
253 });
254 reporter.emit(DashboardEvent::Log {
255 tone: DashboardLogTone::Info,
256 message: format!(
257 "Generating translator comments for {} entr{} with {} worker(s)...",
258 requests.len(),
259 if requests.len() == 1 { "y" } else { "ies" },
260 opts.concurrency
261 ),
262 });
263 let results = annotate_requests(requests.clone(), backend, opts.concurrency, &mut *reporter);
264 let results = results?;
265 let mut changed = 0usize;
266 let mut unmatched = 0usize;
267
268 for request in &requests {
269 match results.get(&request.key) {
270 Some(Some(annotation)) => {
271 if apply_annotation(
272 &mut codec,
273 annotation_format,
274 &request.key,
275 &annotation.comment,
276 )? {
277 changed += 1;
278 }
279 }
280 Some(None) => unmatched += 1,
281 None => {}
282 }
283 }
284
285 if opts.check && changed > 0 {
286 reporter.emit(DashboardEvent::Log {
287 tone: DashboardLogTone::Warning,
288 message: format!("would change: {}", opts.output),
289 });
290 reporter.finish()?;
291 println!("would change: {}", opts.output);
292 return Err(format!("would change: {}", opts.output));
293 }
294
295 if opts.dry_run {
296 reporter.emit(DashboardEvent::Log {
297 tone: DashboardLogTone::Info,
298 message: format!(
299 "DRY-RUN: would update {} comment(s) in {}",
300 changed, opts.output
301 ),
302 });
303 reporter.finish()?;
304 println!(
305 "DRY-RUN: would update {} comment(s) in {}",
306 changed, opts.output
307 );
308 if unmatched > 0 {
309 println!("Skipped {} entry(s) without generated comments", unmatched);
310 }
311 return Ok(());
312 }
313
314 if changed == 0 {
315 reporter.emit(DashboardEvent::Log {
316 tone: DashboardLogTone::Success,
317 message: "No comment updates were necessary.".to_string(),
318 });
319 reporter.finish()?;
320 println!("No comment updates were necessary.");
321 if unmatched > 0 {
322 println!("Skipped {} entry(s) without generated comments", unmatched);
323 }
324 return Ok(());
325 }
326
327 reporter.emit(DashboardEvent::Log {
328 tone: DashboardLogTone::Info,
329 message: format!("Writing {}", opts.output),
330 });
331 if let Err(err) = write_annotated_codec(&codec, annotation_format, &opts.output) {
332 let err = format!("Failed to write '{}': {}", opts.output, err);
333 reporter.emit(DashboardEvent::Log {
334 tone: DashboardLogTone::Error,
335 message: err.clone(),
336 });
337 reporter.finish()?;
338 return Err(err);
339 }
340 reporter.emit(DashboardEvent::Log {
341 tone: DashboardLogTone::Success,
342 message: format!("Updated {} comment(s) in {}", changed, opts.output),
343 });
344 reporter.finish()?;
345
346 println!("Updated {} comment(s) in {}", changed, opts.output);
347 if unmatched > 0 {
348 println!("Skipped {} entry(s) without generated comments", unmatched);
349 }
350 Ok(())
351}
352
353fn expand_annotate_invocations(
354 opts: &AnnotateOptions,
355 config: Option<&LoadedConfig>,
356) -> Result<Vec<ResolvedAnnotateOptions>, String> {
357 let cfg = config.map(|item| &item.data.annotate);
358 let config_dir = config.and_then(LoadedConfig::config_dir);
359
360 if cfg
361 .and_then(|item| item.input.as_ref())
362 .is_some_and(|_| cfg.and_then(|item| item.inputs.as_ref()).is_some())
363 {
364 return Err("Config annotate.input and annotate.inputs cannot both be set".to_string());
365 }
366
367 let inputs = resolve_config_inputs(opts, cfg, config_dir)?;
368 if inputs.is_empty() {
369 return Err(
370 "--input is required unless annotate.input or annotate.inputs is set in langcodec.toml"
371 .to_string(),
372 );
373 }
374
375 let output = if let Some(output) = &opts.output {
376 Some(output.clone())
377 } else {
378 cfg.and_then(|item| item.output.clone())
379 .map(|path| resolve_config_relative_path(config_dir, &path))
380 };
381
382 if inputs.len() > 1 && output.is_some() {
383 return Err(
384 "annotate.inputs cannot be combined with annotate.output or CLI --output; use in-place annotation for multiple inputs"
385 .to_string(),
386 );
387 }
388
389 inputs
390 .into_iter()
391 .map(|input| {
392 resolve_annotate_options(
393 &AnnotateOptions {
394 input: Some(input),
395 source_roots: opts.source_roots.clone(),
396 output: output.clone(),
397 source_lang: opts.source_lang.clone(),
398 provider: opts.provider.clone(),
399 model: opts.model.clone(),
400 concurrency: opts.concurrency,
401 config: opts.config.clone(),
402 dry_run: opts.dry_run,
403 check: opts.check,
404 ui_mode: opts.ui_mode,
405 },
406 config,
407 )
408 })
409 .collect()
410}
411
412fn resolve_config_inputs(
413 opts: &AnnotateOptions,
414 cfg: Option<&crate::config::AnnotateConfig>,
415 config_dir: Option<&Path>,
416) -> Result<Vec<String>, String> {
417 fn has_glob_meta(path: &str) -> bool {
418 path.bytes().any(|b| matches!(b, b'*' | b'?' | b'[' | b'{'))
419 }
420
421 if let Some(input) = &opts.input {
422 return Ok(vec![input.clone()]);
423 }
424
425 if let Some(input) = cfg.and_then(|item| item.input.as_ref()) {
426 let resolved = vec![resolve_config_relative_path(config_dir, input)];
427 return if resolved.iter().any(|path| has_glob_meta(path)) {
428 path_glob::expand_input_globs(&resolved)
429 } else {
430 Ok(resolved)
431 };
432 }
433
434 if let Some(inputs) = cfg.and_then(|item| item.inputs.as_ref()) {
435 let resolved = inputs
436 .iter()
437 .map(|input| resolve_config_relative_path(config_dir, input))
438 .collect::<Vec<_>>();
439 return if resolved.iter().any(|path| has_glob_meta(path)) {
440 path_glob::expand_input_globs(&resolved)
441 } else {
442 Ok(resolved)
443 };
444 }
445
446 Ok(Vec::new())
447}
448
449fn resolve_annotate_options(
450 opts: &AnnotateOptions,
451 config: Option<&LoadedConfig>,
452) -> Result<ResolvedAnnotateOptions, String> {
453 let cfg = config.map(|item| &item.data.annotate);
454 let config_dir = config.and_then(LoadedConfig::config_dir);
455 let cwd = std::env::current_dir()
456 .map_err(|e| format!("Failed to determine current directory: {}", e))?;
457
458 let input = if let Some(input) = &opts.input {
459 absolutize_path(input, &cwd)
460 } else if let Some(input) = cfg.and_then(|item| item.input.as_deref()) {
461 absolutize_path(&resolve_config_relative_path(config_dir, input), &cwd)
462 } else {
463 return Err(
464 "--input is required unless annotate.input or annotate.inputs is set in langcodec.toml"
465 .to_string(),
466 );
467 };
468
469 let source_roots = if !opts.source_roots.is_empty() {
470 opts.source_roots
471 .iter()
472 .map(|path| absolutize_path(path, &cwd))
473 .collect::<Vec<_>>()
474 } else if let Some(roots) = cfg.and_then(|item| item.source_roots.as_ref()) {
475 roots
476 .iter()
477 .map(|path| absolutize_path(&resolve_config_relative_path(config_dir, path), &cwd))
478 .collect::<Vec<_>>()
479 } else {
480 Vec::new()
481 };
482 if source_roots.is_empty() {
483 return Err(
484 "--source-root is required unless annotate.source_roots is set in langcodec.toml"
485 .to_string(),
486 );
487 }
488 for root in &source_roots {
489 let path = Path::new(root);
490 if !path.is_dir() {
491 return Err(format!(
492 "Source root does not exist or is not a directory: {}",
493 root
494 ));
495 }
496 }
497
498 let output = if let Some(output) = &opts.output {
499 absolutize_path(output, &cwd)
500 } else if let Some(output) = cfg.and_then(|item| item.output.as_deref()) {
501 absolutize_path(&resolve_config_relative_path(config_dir, output), &cwd)
502 } else {
503 input.clone()
504 };
505 validate_annotate_paths(&input, &output)?;
506
507 let concurrency = opts
508 .concurrency
509 .or_else(|| cfg.and_then(|item| item.concurrency))
510 .unwrap_or(DEFAULT_CONCURRENCY);
511 if concurrency == 0 {
512 return Err("Concurrency must be greater than zero".to_string());
513 }
514
515 let provider = resolve_provider(
516 opts.provider.as_deref(),
517 config.map(|item| &item.data),
518 None,
519 )?;
520 let model = resolve_model(
521 opts.model.as_deref(),
522 config.map(|item| &item.data),
523 &provider,
524 None,
525 )?;
526
527 let source_lang = opts
528 .source_lang
529 .clone()
530 .or_else(|| cfg.and_then(|item| item.source_lang.clone()));
531 if let Some(lang) = &source_lang {
532 validate_language_code(lang)?;
533 }
534 let ui_mode = resolve_ui_mode_for_current_terminal(opts.ui_mode)?;
535
536 let workspace_root = derive_workspace_root(&input, &source_roots, &cwd);
537
538 Ok(ResolvedAnnotateOptions {
539 input,
540 output,
541 source_roots,
542 source_lang,
543 provider,
544 model,
545 concurrency,
546 dry_run: opts.dry_run,
547 check: opts.check,
548 workspace_root,
549 ui_mode,
550 })
551}
552
553fn validate_annotate_paths(input: &str, output: &str) -> Result<(), String> {
554 let input_format = annotation_format_from_path(input)?;
555 let output_format = annotation_format_from_path(output)?;
556 if input_format != output_format {
557 return Err(format!(
558 "Annotate output format must match input format (input='{}', output='{}')",
559 input, output
560 ));
561 }
562 Ok(())
563}
564
565fn annotation_format_from_path(path: &str) -> Result<AnnotationFormat, String> {
566 match infer_format_from_extension(path)
567 .ok_or_else(|| format!("Cannot infer annotate format from path: {}", path))?
568 {
569 FormatType::Xcstrings => Ok(AnnotationFormat::Xcstrings),
570 FormatType::Strings(_) => Ok(AnnotationFormat::Strings),
571 FormatType::AndroidStrings(_) => Ok(AnnotationFormat::AndroidStrings),
572 _ => Err(format!(
573 "annotate supports only .xcstrings, .strings, and Android strings.xml files, got '{}'",
574 path
575 )),
576 }
577}
578
579fn read_annotation_codec(path: &str, format: AnnotationFormat) -> Result<Codec, String> {
580 let format_type = format.to_format_type();
581 let language_hint = infer_language_from_path(path, &format_type).ok().flatten();
582 let mut codec = Codec::new();
583 codec
584 .read_file_by_extension_with_options(
585 path,
586 &ReadOptions::new().with_language_hint(language_hint),
587 )
588 .map_err(|e| format!("Failed to read '{}': {}", path, e))?;
589 Ok(codec)
590}
591
592fn default_source_language(codec: &Codec) -> Option<String> {
593 codec
594 .resources
595 .iter()
596 .find_map(|resource| resource.metadata.custom.get("source_language").cloned())
597 .or_else(|| {
598 (codec.resources.len() == 1)
599 .then(|| codec.resources[0].metadata.language.trim().to_string())
600 .filter(|lang| !lang.is_empty())
601 })
602}
603
604fn annotate_requests(
605 requests: Vec<AnnotationRequest>,
606 backend: Arc<dyn AnnotationBackend>,
607 concurrency: usize,
608 reporter: &mut dyn RunReporter,
609) -> Result<BTreeMap<String, Option<AnnotationResponse>>, String> {
610 let runtime = Builder::new_multi_thread()
611 .enable_all()
612 .build()
613 .map_err(|e| format!("Failed to start async runtime: {}", e))?;
614
615 let total = requests.len();
616 runtime.block_on(async {
617 let worker_count = concurrency.min(total).max(1);
618 let queue = Arc::new(AsyncMutex::new(VecDeque::from(requests)));
619 let (tx, mut rx) = mpsc::unbounded_channel::<WorkerUpdate>();
620 let mut set = JoinSet::new();
621 for worker_id in 1..=worker_count {
622 let backend = Arc::clone(&backend);
623 let queue = Arc::clone(&queue);
624 let tx = tx.clone();
625 set.spawn(async move {
626 loop {
627 let request = {
628 let mut queue = queue.lock().await;
629 queue.pop_front()
630 };
631
632 let Some(request) = request else {
633 break;
634 };
635
636 let key = request.key.clone();
637 let _ = tx.send(WorkerUpdate::Started {
638 worker_id,
639 key: key.clone(),
640 candidate_count: 0,
641 top_candidate: None,
642 });
643 let result = backend.annotate(request, Some(tx.clone())).await;
644 let _ = tx.send(WorkerUpdate::Finished {
645 worker_id,
646 key,
647 result,
648 });
649 }
650
651 Ok::<(), String>(())
652 });
653 }
654 drop(tx);
655
656 let mut results = BTreeMap::new();
657 let mut generated = 0usize;
658 let mut unmatched = 0usize;
659 let mut first_error = None;
660
661 while let Some(update) = rx.recv().await {
662 match update {
663 WorkerUpdate::Started {
664 worker_id,
665 key,
666 candidate_count,
667 top_candidate,
668 } => {
669 reporter.emit(DashboardEvent::Log {
670 tone: DashboardLogTone::Info,
671 message: annotate_worker_started_message(
672 worker_id,
673 &key,
674 candidate_count,
675 top_candidate.as_deref(),
676 ),
677 });
678 reporter.emit(DashboardEvent::UpdateItem {
679 id: key,
680 status: Some(DashboardItemStatus::Running),
681 subtitle: None,
682 source_text: None,
683 output_text: None,
684 note_text: None,
685 error_text: None,
686 extra_rows: None,
687 });
688 }
689 WorkerUpdate::ToolCall { tone, message } => {
690 reporter.emit(DashboardEvent::Log { tone, message });
691 }
692 WorkerUpdate::Finished {
693 worker_id,
694 key,
695 result,
696 } => {
697 match result {
698 Ok(annotation) => {
699 if annotation.is_some() {
700 generated += 1;
701 } else {
702 unmatched += 1;
703 }
704 let status = if annotation.is_some() {
705 DashboardItemStatus::Succeeded
706 } else {
707 DashboardItemStatus::Skipped
708 };
709 reporter.emit(DashboardEvent::Log {
710 tone: if annotation.is_some() {
711 DashboardLogTone::Success
712 } else {
713 DashboardLogTone::Warning
714 },
715 message: annotate_worker_finished_message(
716 worker_id,
717 &key,
718 &annotation,
719 ),
720 });
721 reporter.emit(DashboardEvent::UpdateItem {
722 id: key.clone(),
723 status: Some(status),
724 subtitle: None,
725 source_text: None,
726 output_text: annotation.as_ref().map(|item| item.comment.clone()),
727 note_text: None,
728 error_text: None,
729 extra_rows: annotation.as_ref().map(|item| {
730 vec![SummaryRow::new("Confidence", item.confidence.clone())]
731 }),
732 });
733 results.insert(key, annotation);
734 }
735 Err(err) => {
736 reporter.emit(DashboardEvent::Log {
737 tone: DashboardLogTone::Error,
738 message: format!(
739 "Worker {} finished key={} result=failed",
740 worker_id, key
741 ),
742 });
743 reporter.emit(DashboardEvent::UpdateItem {
744 id: key,
745 status: Some(DashboardItemStatus::Failed),
746 subtitle: None,
747 source_text: None,
748 output_text: None,
749 note_text: None,
750 error_text: Some(err.clone()),
751 extra_rows: None,
752 });
753 if first_error.is_none() {
754 first_error = Some(err);
755 }
756 }
757 }
758 reporter.emit(DashboardEvent::SummaryRows {
759 rows: annotate_summary_rows(total, generated, unmatched),
760 });
761 }
762 }
763 }
764
765 while let Some(joined) = set.join_next().await {
766 match joined {
767 Ok(Ok(())) => {}
768 Ok(Err(err)) => {
769 if first_error.is_none() {
770 first_error = Some(err);
771 }
772 }
773 Err(err) => {
774 if first_error.is_none() {
775 first_error = Some(format!("Annotation task failed: {}", err));
776 }
777 }
778 }
779 }
780
781 if let Some(err) = first_error {
782 return Err(err);
783 }
784
785 Ok(results)
786 })
787}
788
789fn build_annotation_requests(
790 codec: &Codec,
791 annotation_format: AnnotationFormat,
792 source_lang: &str,
793 source_values: &HashMap<String, String>,
794 source_roots: &[String],
795 workspace_root: &Path,
796) -> Vec<AnnotationRequest> {
797 let mut requests = Vec::new();
798 for target in collect_annotation_targets(codec, annotation_format) {
799 let source_value = source_values
800 .get(&target.key)
801 .cloned()
802 .unwrap_or_else(|| target.key.clone());
803
804 requests.push(AnnotationRequest {
805 key: target.key,
806 source_lang: source_lang.to_string(),
807 source_value,
808 existing_comment: target.existing_comment,
809 source_roots: source_roots
810 .iter()
811 .map(|root| display_path(workspace_root, Path::new(root)))
812 .collect(),
813 });
814 }
815
816 requests
817}
818
819fn collect_annotation_targets(
820 codec: &Codec,
821 annotation_format: AnnotationFormat,
822) -> Vec<AnnotationTarget> {
823 let mut targets = BTreeMap::<String, AnnotationTarget>::new();
824 let mut preserve_manual = BTreeMap::<String, bool>::new();
825
826 for resource in &codec.resources {
827 for entry in &resource.entries {
828 let key = entry.id.clone();
829 let target = targets
830 .entry(key.clone())
831 .or_insert_with(|| AnnotationTarget {
832 key: key.clone(),
833 existing_comment: None,
834 });
835
836 if target.existing_comment.is_none() {
837 target.existing_comment = display_comment(annotation_format, entry);
838 }
839
840 if should_preserve_manual_comment(annotation_format, entry) {
841 preserve_manual.insert(key, true);
842 }
843 }
844 }
845
846 targets
847 .into_iter()
848 .filter_map(|(key, target)| {
849 (!preserve_manual.get(&key).copied().unwrap_or(false)).then_some(target)
850 })
851 .collect()
852}
853
854fn should_preserve_manual_comment(annotation_format: AnnotationFormat, entry: &Entry) -> bool {
855 let Some(raw_comment) = entry.comment.as_deref() else {
856 return false;
857 };
858
859 match annotation_format {
860 AnnotationFormat::Xcstrings => !entry
861 .custom
862 .get("is_comment_auto_generated")
863 .and_then(|value| value.parse::<bool>().ok())
864 .unwrap_or(false),
865 AnnotationFormat::Strings | AnnotationFormat::AndroidStrings => {
866 !is_generated_inline_comment(annotation_format, raw_comment)
867 }
868 }
869}
870
871fn display_comment(annotation_format: AnnotationFormat, entry: &Entry) -> Option<String> {
872 let raw_comment = entry.comment.as_deref()?;
873 let comment = match annotation_format {
874 AnnotationFormat::Xcstrings => raw_comment.trim().to_string(),
875 AnnotationFormat::Strings => normalize_strings_comment(raw_comment),
876 AnnotationFormat::AndroidStrings => normalize_inline_comment(raw_comment),
877 };
878
879 (!comment.is_empty()).then_some(comment)
880}
881
882fn normalize_strings_comment(raw_comment: &str) -> String {
883 let stripped = if raw_comment.starts_with("/*") && raw_comment.ends_with("*/") {
884 raw_comment[2..raw_comment.len() - 2].trim()
885 } else if let Some(comment) = raw_comment.strip_prefix("//") {
886 comment.trim()
887 } else {
888 raw_comment.trim()
889 };
890
891 extract_generated_comment_body(stripped)
892 .unwrap_or(stripped)
893 .trim()
894 .to_string()
895}
896
897fn normalize_inline_comment(raw_comment: &str) -> String {
898 let trimmed = raw_comment.trim();
899 extract_generated_comment_body(trimmed)
900 .unwrap_or(trimmed)
901 .trim()
902 .to_string()
903}
904
905fn extract_generated_comment_body(comment: &str) -> Option<&str> {
906 let trimmed = comment.trim();
907 if trimmed == GENERATED_COMMENT_MARKER {
908 return Some("");
909 }
910
911 trimmed
912 .strip_prefix(GENERATED_COMMENT_MARKER)
913 .map(str::trim_start)
914}
915
916fn is_generated_inline_comment(annotation_format: AnnotationFormat, raw_comment: &str) -> bool {
917 match annotation_format {
918 AnnotationFormat::Xcstrings => false,
919 AnnotationFormat::Strings => {
920 extract_generated_comment_body(&normalize_strings_comment_storage(raw_comment))
921 .is_some()
922 }
923 AnnotationFormat::AndroidStrings => extract_generated_comment_body(raw_comment).is_some(),
924 }
925}
926
927fn normalize_strings_comment_storage(raw_comment: &str) -> String {
928 if raw_comment.starts_with("/*") && raw_comment.ends_with("*/") {
929 raw_comment[2..raw_comment.len() - 2].trim().to_string()
930 } else if let Some(comment) = raw_comment.strip_prefix("//") {
931 comment.trim().to_string()
932 } else {
933 raw_comment.trim().to_string()
934 }
935}
936
937fn generated_comment_storage(annotation_format: AnnotationFormat, comment: &str) -> String {
938 match annotation_format {
939 AnnotationFormat::Xcstrings => comment.to_string(),
940 AnnotationFormat::Strings => {
941 let body = comment.replace("*/", "* /").trim().to_string();
942 format!("/* {}\n{} */", GENERATED_COMMENT_MARKER, body)
943 }
944 AnnotationFormat::AndroidStrings => {
945 format!("{}\n{}", GENERATED_COMMENT_MARKER, comment.trim())
946 }
947 }
948}
949
950fn apply_annotation(
951 codec: &mut Codec,
952 annotation_format: AnnotationFormat,
953 key: &str,
954 comment: &str,
955) -> Result<bool, String> {
956 let stored_comment = generated_comment_storage(annotation_format, comment);
957 let mut changed = false;
958 let mut matched = false;
959
960 for resource in &mut codec.resources {
961 for entry in &mut resource.entries {
962 if entry.id != key {
963 continue;
964 }
965
966 matched = true;
967 match annotation_format {
968 AnnotationFormat::Xcstrings => {
969 let already_generated = entry
970 .custom
971 .get("is_comment_auto_generated")
972 .and_then(|value| value.parse::<bool>().ok())
973 .unwrap_or(false);
974 if entry.comment.as_deref() != Some(comment) || !already_generated {
975 changed = true;
976 }
977 entry.comment = Some(comment.to_string());
978 entry
979 .custom
980 .insert("is_comment_auto_generated".to_string(), "true".to_string());
981 }
982 AnnotationFormat::Strings | AnnotationFormat::AndroidStrings => {
983 if entry.comment.as_deref() != Some(stored_comment.as_str()) {
984 changed = true;
985 }
986 entry.comment = Some(stored_comment.clone());
987 }
988 }
989 }
990 }
991
992 if !matched {
993 return Err(format!(
994 "Annotation target '{}' was not found in loaded resources",
995 key
996 ));
997 }
998
999 Ok(changed)
1000}
1001
1002fn write_annotated_codec(
1003 codec: &Codec,
1004 annotation_format: AnnotationFormat,
1005 output: &str,
1006) -> Result<(), String> {
1007 match annotation_format {
1008 AnnotationFormat::Xcstrings => XcstringsFormat::try_from(codec.resources.clone())
1009 .map_err(|e| format!("Failed to build xcstrings output: {}", e))?
1010 .write_to(output)
1011 .map_err(|e| e.to_string()),
1012 AnnotationFormat::Strings => {
1013 let resource = single_resource_for_annotation(codec, output)?;
1014 StringsFormat::try_from(resource.clone())
1015 .map_err(|e| format!("Failed to build .strings output: {}", e))?
1016 .write_to(output)
1017 .map_err(|e| e.to_string())
1018 }
1019 AnnotationFormat::AndroidStrings => {
1020 let resource = single_resource_for_annotation(codec, output)?;
1021 AndroidStringsFormat::from(resource.clone())
1022 .write_to(output)
1023 .map_err(|e| e.to_string())
1024 }
1025 }
1026}
1027
1028fn single_resource_for_annotation<'a>(
1029 codec: &'a Codec,
1030 output: &str,
1031) -> Result<&'a Resource, String> {
1032 if codec.resources.len() != 1 {
1033 return Err(format!(
1034 "Expected exactly one resource when writing '{}', found {}",
1035 output,
1036 codec.resources.len()
1037 ));
1038 }
1039
1040 Ok(&codec.resources[0])
1041}
1042
1043fn create_annotate_reporter(
1044 opts: &ResolvedAnnotateOptions,
1045 source_lang: &str,
1046 requests: &[AnnotationRequest],
1047) -> Result<Box<dyn RunReporter>, String> {
1048 let init = DashboardInit {
1049 kind: DashboardKind::Annotate,
1050 title: Path::new(&opts.input)
1051 .file_name()
1052 .and_then(|name| name.to_str())
1053 .unwrap_or(opts.input.as_str())
1054 .to_string(),
1055 metadata: annotate_metadata_rows(opts, source_lang),
1056 summary_rows: annotate_summary_rows(requests.len(), 0, 0),
1057 items: requests.iter().map(annotate_dashboard_item).collect(),
1058 };
1059 match opts.ui_mode {
1060 ResolvedUiMode::Plain => Ok(Box::new(PlainReporter::new(init))),
1061 ResolvedUiMode::Tui => Ok(Box::new(TuiReporter::new(init)?)),
1062 }
1063}
1064
1065fn annotate_metadata_rows(opts: &ResolvedAnnotateOptions, source_lang: &str) -> Vec<SummaryRow> {
1066 let mut rows = vec![
1067 SummaryRow::new(
1068 "Provider",
1069 format!("{}:{}", opts.provider.display_name(), opts.model),
1070 ),
1071 SummaryRow::new("Input", opts.input.clone()),
1072 SummaryRow::new("Output", opts.output.clone()),
1073 SummaryRow::new("Source language", source_lang.to_string()),
1074 SummaryRow::new("Concurrency", opts.concurrency.to_string()),
1075 ];
1076 if opts.dry_run {
1077 rows.push(SummaryRow::new("Mode", "dry-run"));
1078 }
1079 if opts.check {
1080 rows.push(SummaryRow::new("Check", "enabled"));
1081 }
1082 rows
1083}
1084
1085fn annotate_summary_rows(total: usize, generated: usize, unmatched: usize) -> Vec<SummaryRow> {
1086 vec![
1087 SummaryRow::new("Total", total.to_string()),
1088 SummaryRow::new("Generated", generated.to_string()),
1089 SummaryRow::new("Skipped", unmatched.to_string()),
1090 ]
1091}
1092
1093fn annotate_dashboard_item(request: &AnnotationRequest) -> DashboardItem {
1094 let mut item = DashboardItem::new(
1095 request.key.clone(),
1096 request.key.clone(),
1097 request.source_lang.clone(),
1098 DashboardItemStatus::Queued,
1099 );
1100 item.source_text = Some(request.source_value.clone());
1101 item.note_text = request.existing_comment.clone();
1102 item
1103}
1104
1105fn annotate_worker_started_message(
1106 worker_id: usize,
1107 key: &str,
1108 candidate_count: usize,
1109 top_candidate: Option<&str>,
1110) -> String {
1111 let mut message = format!(
1112 "Worker {} started key={} shortlist={}",
1113 worker_id, key, candidate_count
1114 );
1115 if let Some(path) = top_candidate {
1116 message.push_str(" top=");
1117 message.push_str(path);
1118 }
1119 message
1120}
1121
1122fn annotate_worker_finished_message(
1123 worker_id: usize,
1124 key: &str,
1125 result: &Option<AnnotationResponse>,
1126) -> String {
1127 let status = if result.is_some() {
1128 "generated"
1129 } else {
1130 "skipped"
1131 };
1132 format!(
1133 "Worker {} finished key={} result={}",
1134 worker_id, key, status
1135 )
1136}
1137
1138fn source_value_map(resources: &[Resource], source_lang: &str) -> HashMap<String, String> {
1139 resources
1140 .iter()
1141 .find(|resource| lang_matches(&resource.metadata.language, source_lang))
1142 .map(|resource| {
1143 resource
1144 .entries
1145 .iter()
1146 .map(|entry| {
1147 (
1148 entry.id.clone(),
1149 translation_to_text(&entry.value, &entry.id),
1150 )
1151 })
1152 .collect()
1153 })
1154 .unwrap_or_default()
1155}
1156
1157fn translation_to_text(value: &Translation, fallback_key: &str) -> String {
1158 match value {
1159 Translation::Empty => fallback_key.to_string(),
1160 Translation::Singular(text) => text.clone(),
1161 Translation::Plural(plural) => plural
1162 .forms
1163 .values()
1164 .next()
1165 .cloned()
1166 .unwrap_or_else(|| fallback_key.to_string()),
1167 }
1168}
1169
1170fn build_agent_config(workspace_root: &Path) -> AgentConfig {
1171 AgentConfig {
1172 system: Some(ANNOTATION_SYSTEM_PROMPT.to_string()),
1173 temperature: Some(0.2),
1174 max_output_tokens: Some(512),
1175 tool_profile: ToolProfile::only(["files", "shell"]),
1176 provider_request_options: ProviderRequestOptions {
1177 responses: ResponsesRequestOptions {
1178 parallel_tool_calls: Some(false),
1179 ..ResponsesRequestOptions::default()
1180 },
1181 ..ProviderRequestOptions::default()
1182 },
1183 workspace: WorkspaceConfig {
1184 base_dir: workspace_root.to_path_buf(),
1185 auto_route_shell: false,
1186 },
1187 ..AgentConfig::default()
1188 }
1189}
1190
1191fn build_annotation_prompt(request: &AnnotationRequest) -> String {
1192 let mut prompt = format!(
1193 "Write one translator-facing comment for this localization entry.\n\nKey: {}\nSource language: {}\nSource value: {}\n",
1194 request.key, request.source_lang, request.source_value
1195 );
1196
1197 if let Some(existing_comment) = &request.existing_comment {
1198 prompt.push_str("\nExisting auto-generated comment:\n");
1199 prompt.push_str(existing_comment);
1200 prompt.push('\n');
1201 }
1202
1203 prompt.push_str("\nSource roots you may inspect with the files tool:\n");
1204 for root in &request.source_roots {
1205 prompt.push_str("- ");
1206 prompt.push_str(root);
1207 prompt.push('\n');
1208 }
1209
1210 prompt.push_str(
1211 "\nUse the shell tool for fast code search, preferably with rg, within these roots before drafting when the usage is not already obvious. Then use files reads for only the most relevant hits. Avoid broad repeated searches or directory listings.\n",
1212 );
1213
1214 prompt.push_str(
1215 "\nRequirements:\n- Keep the comment concise and useful for translators.\n- Prefer describing UI role or user-facing context.\n- If confidence is low, mention the concrete code usage you found instead of guessing product meaning.\n- Use as few tool calls as practical; usually one rg search plus a small number of targeted file reads is enough.\n- Do not mention internal file paths unless they clarify usage.\n- Return JSON only: {\"comment\":\"...\",\"confidence\":\"high|medium|low\"}.\n",
1216 );
1217 prompt
1218}
1219
1220fn spawn_tool_call_logger(
1221 mut events: broadcast::Receiver<AgentEvent>,
1222 key: String,
1223 event_tx: Option<mpsc::UnboundedSender<WorkerUpdate>>,
1224) -> tokio::task::JoinHandle<()> {
1225 tokio::spawn(async move {
1226 loop {
1227 match events.recv().await {
1228 Ok(AgentEvent::ToolExecutionStarted { call }) => {
1229 if let Some(tx) = &event_tx {
1230 let _ = tx.send(WorkerUpdate::ToolCall {
1231 tone: DashboardLogTone::Info,
1232 message: format!(
1233 "Tool call key={} tool={} input={}",
1234 key,
1235 call.name,
1236 compact_tool_input(&call.input)
1237 ),
1238 });
1239 }
1240 }
1241 Ok(AgentEvent::ToolExecutionFinished { result }) => {
1242 let status = match result {
1243 ContentBlock::ToolResult { is_error, .. } if is_error => "error",
1244 ContentBlock::ToolResult { .. } => "ok",
1245 _ => "unknown",
1246 };
1247 if let Some(tx) = &event_tx {
1248 let tone = if status == "error" {
1249 DashboardLogTone::Error
1250 } else {
1251 DashboardLogTone::Success
1252 };
1253 let _ = tx.send(WorkerUpdate::ToolCall {
1254 tone,
1255 message: format!("Tool result key={} status={}", key, status),
1256 });
1257 }
1258 }
1259 Ok(_) => {}
1260 Err(broadcast::error::RecvError::Closed) => break,
1261 Err(broadcast::error::RecvError::Lagged(_)) => continue,
1262 }
1263 }
1264 })
1265}
1266
1267fn compact_tool_input(input: &Value) -> String {
1268 const MAX_TOOL_INPUT_CHARS: usize = 180;
1269
1270 let rendered = serde_json::to_string(input).unwrap_or_else(|_| "<unserializable>".to_string());
1271 let mut preview = rendered
1272 .chars()
1273 .take(MAX_TOOL_INPUT_CHARS)
1274 .collect::<String>();
1275 if rendered.chars().count() > MAX_TOOL_INPUT_CHARS {
1276 preview.push_str("...");
1277 }
1278 preview
1279}
1280
1281fn parse_annotation_response(text: &str) -> Result<AnnotationResponse, String> {
1282 let trimmed = text.trim();
1283 if trimmed.is_empty() {
1284 return Err("Model returned an empty annotation response".to_string());
1285 }
1286
1287 if let Ok(payload) = serde_json::from_str::<AnnotationResponse>(trimmed) {
1288 return validate_annotation_response(payload);
1289 }
1290
1291 if let Some(json_body) = extract_json_body(trimmed)
1292 && let Ok(payload) = serde_json::from_str::<AnnotationResponse>(&json_body)
1293 {
1294 return validate_annotation_response(payload);
1295 }
1296
1297 Err(format!(
1298 "Model response was not valid annotation JSON: {}",
1299 trimmed
1300 ))
1301}
1302
1303fn validate_annotation_response(payload: AnnotationResponse) -> Result<AnnotationResponse, String> {
1304 if payload.comment.trim().is_empty() {
1305 return Err("Model returned an empty annotation comment".to_string());
1306 }
1307 Ok(payload)
1308}
1309
1310fn extract_json_body(text: &str) -> Option<String> {
1311 let fenced = text
1312 .strip_prefix("```json")
1313 .or_else(|| text.strip_prefix("```"))
1314 .map(str::trim_start)?;
1315 let unfenced = fenced.strip_suffix("```")?.trim();
1316 Some(unfenced.to_string())
1317}
1318
1319fn absolutize_path(path: &str, cwd: &Path) -> String {
1320 let candidate = Path::new(path);
1321 if candidate.is_absolute() {
1322 candidate.to_string_lossy().to_string()
1323 } else {
1324 cwd.join(candidate).to_string_lossy().to_string()
1325 }
1326}
1327
1328fn derive_workspace_root(input: &str, source_roots: &[String], fallback: &Path) -> PathBuf {
1329 let mut candidates = Vec::new();
1330 candidates.push(path_root_candidate(Path::new(input)));
1331 for root in source_roots {
1332 candidates.push(path_root_candidate(Path::new(root)));
1333 }
1334
1335 common_ancestor(candidates.into_iter().flatten().collect::<Vec<_>>())
1336 .unwrap_or_else(|| fallback.to_path_buf())
1337}
1338
1339fn path_root_candidate(path: &Path) -> Option<PathBuf> {
1340 let absolute = fs::canonicalize(path).ok().or_else(|| {
1341 if path.is_absolute() {
1342 Some(path.to_path_buf())
1343 } else {
1344 None
1345 }
1346 })?;
1347
1348 if absolute.is_dir() {
1349 Some(absolute)
1350 } else {
1351 absolute.parent().map(Path::to_path_buf)
1352 }
1353}
1354
1355fn common_ancestor(paths: Vec<PathBuf>) -> Option<PathBuf> {
1356 let mut iter = paths.into_iter();
1357 let first = iter.next()?;
1358 let mut current = first;
1359
1360 for path in iter {
1361 let mut next = current.clone();
1362 while !path.starts_with(&next) {
1363 if !next.pop() {
1364 return None;
1365 }
1366 }
1367 current = next;
1368 }
1369
1370 Some(current)
1371}
1372
1373fn display_path(workspace_root: &Path, path: &Path) -> String {
1374 path.strip_prefix(workspace_root)
1375 .map(|relative| relative.to_string_lossy().to_string())
1376 .unwrap_or_else(|_| path.to_string_lossy().to_string())
1377}
1378
1379fn lang_matches(left: &str, right: &str) -> bool {
1380 normalize_lang(left) == normalize_lang(right)
1381}
1382
1383fn normalize_lang(lang: &str) -> String {
1384 lang.trim().replace('_', "-").to_ascii_lowercase()
1385}
1386
1387#[cfg(test)]
1388mod tests {
1389 use super::*;
1390 use mentra::{
1391 BuiltinProvider, ModelInfo, ProviderDescriptor,
1392 provider::{
1393 ContentBlockDelta, ContentBlockStart, Provider, ProviderEvent, ProviderEventStream,
1394 Request, Response, Role, provider_event_stream_from_response,
1395 },
1396 runtime::RunOptions,
1397 };
1398 use std::sync::{Arc, Mutex};
1399 use tempfile::TempDir;
1400
1401 struct FakeBackend {
1402 responses: HashMap<String, Option<AnnotationResponse>>,
1403 }
1404
1405 #[async_trait]
1406 impl AnnotationBackend for FakeBackend {
1407 async fn annotate(
1408 &self,
1409 request: AnnotationRequest,
1410 _event_tx: Option<mpsc::UnboundedSender<WorkerUpdate>>,
1411 ) -> Result<Option<AnnotationResponse>, String> {
1412 Ok(self.responses.get(&request.key).cloned().flatten())
1413 }
1414 }
1415
1416 struct RuntimeHoldingBackend {
1417 _runtime: Arc<tokio::runtime::Runtime>,
1418 }
1419
1420 #[async_trait]
1421 impl AnnotationBackend for RuntimeHoldingBackend {
1422 async fn annotate(
1423 &self,
1424 _request: AnnotationRequest,
1425 _event_tx: Option<mpsc::UnboundedSender<WorkerUpdate>>,
1426 ) -> Result<Option<AnnotationResponse>, String> {
1427 Ok(Some(AnnotationResponse {
1428 comment: "Generated comment".to_string(),
1429 confidence: "high".to_string(),
1430 }))
1431 }
1432 }
1433
1434 struct RecordingProvider {
1435 requests: Arc<Mutex<Vec<Request<'static>>>>,
1436 }
1437
1438 struct ScriptedStreamingProvider {
1439 requests: Arc<Mutex<Vec<Request<'static>>>>,
1440 scripts: Arc<Mutex<VecDeque<Vec<ProviderEvent>>>>,
1441 }
1442
1443 #[async_trait]
1444 impl Provider for RecordingProvider {
1445 fn descriptor(&self) -> ProviderDescriptor {
1446 ProviderDescriptor::new(BuiltinProvider::OpenAI)
1447 }
1448
1449 async fn list_models(&self) -> Result<Vec<ModelInfo>, mentra::provider::ProviderError> {
1450 Ok(vec![ModelInfo::new("test-model", BuiltinProvider::OpenAI)])
1451 }
1452
1453 async fn stream(
1454 &self,
1455 request: Request<'_>,
1456 ) -> Result<ProviderEventStream, mentra::provider::ProviderError> {
1457 self.requests
1458 .lock()
1459 .expect("requests lock")
1460 .push(request.clone().into_owned());
1461 Ok(provider_event_stream_from_response(Response {
1462 id: "resp-1".to_string(),
1463 model: request.model.to_string(),
1464 role: Role::Assistant,
1465 content: vec![ContentBlock::text(
1466 r#"{"comment":"A button label that starts the game.","confidence":"high"}"#,
1467 )],
1468 stop_reason: Some("end_turn".to_string()),
1469 usage: None,
1470 }))
1471 }
1472 }
1473
1474 #[async_trait]
1475 impl Provider for ScriptedStreamingProvider {
1476 fn descriptor(&self) -> ProviderDescriptor {
1477 ProviderDescriptor::new(BuiltinProvider::OpenAI)
1478 }
1479
1480 async fn list_models(&self) -> Result<Vec<ModelInfo>, mentra::provider::ProviderError> {
1481 Ok(vec![ModelInfo::new("test-model", BuiltinProvider::OpenAI)])
1482 }
1483
1484 async fn stream(
1485 &self,
1486 request: Request<'_>,
1487 ) -> Result<ProviderEventStream, mentra::provider::ProviderError> {
1488 self.requests
1489 .lock()
1490 .expect("requests lock")
1491 .push(request.clone().into_owned());
1492 let script = self
1493 .scripts
1494 .lock()
1495 .expect("scripts lock")
1496 .pop_front()
1497 .expect("missing scripted response");
1498
1499 let (tx, rx) = mpsc::unbounded_channel();
1500 for event in script {
1501 tx.send(Ok(event)).expect("send provider event");
1502 }
1503 Ok(rx)
1504 }
1505 }
1506
1507 #[test]
1508 fn build_agent_config_limits_tools_to_files() {
1509 let config = build_agent_config(Path::new("/tmp/project"));
1510 assert!(config.tool_profile.allows("files"));
1511 assert!(config.tool_profile.allows("shell"));
1512 assert!(!config.tool_profile.allows("task"));
1513 }
1514
1515 #[test]
1516 fn parse_annotation_response_accepts_fenced_json() {
1517 let parsed = parse_annotation_response(
1518 "```json\n{\"comment\":\"Dialog title for room exit confirmation.\",\"confidence\":\"medium\"}\n```",
1519 )
1520 .expect("parse response");
1521 assert_eq!(
1522 parsed,
1523 AnnotationResponse {
1524 comment: "Dialog title for room exit confirmation.".to_string(),
1525 confidence: "medium".to_string(),
1526 }
1527 );
1528 }
1529
1530 #[test]
1531 fn run_annotate_updates_missing_and_auto_generated_comments_only() {
1532 let temp_dir = TempDir::new().expect("temp dir");
1533 let input = temp_dir.path().join("Localizable.xcstrings");
1534 let source_root = temp_dir.path().join("Sources");
1535 fs::create_dir_all(&source_root).expect("create root");
1536 fs::write(
1537 source_root.join("GameView.swift"),
1538 r#"Text("Start", bundle: .module)"#,
1539 )
1540 .expect("write swift");
1541 fs::write(
1542 &input,
1543 r#"{
1544 "sourceLanguage": "en",
1545 "version": "1.0",
1546 "strings": {
1547 "start": {
1548 "localizations": {
1549 "en": { "stringUnit": { "state": "translated", "value": "Start" } }
1550 }
1551 },
1552 "cancel": {
1553 "comment": "Written by a human.",
1554 "localizations": {
1555 "en": { "stringUnit": { "state": "translated", "value": "Cancel" } }
1556 }
1557 },
1558 "retry": {
1559 "comment": "Old auto comment",
1560 "isCommentAutoGenerated": true,
1561 "localizations": {
1562 "en": { "stringUnit": { "state": "translated", "value": "Retry" } }
1563 }
1564 }
1565 }
1566}"#,
1567 )
1568 .expect("write xcstrings");
1569
1570 let mut responses = HashMap::new();
1571 responses.insert(
1572 "start".to_string(),
1573 Some(AnnotationResponse {
1574 comment: "A button label that starts the game.".to_string(),
1575 confidence: "high".to_string(),
1576 }),
1577 );
1578 responses.insert(
1579 "retry".to_string(),
1580 Some(AnnotationResponse {
1581 comment: "A button label shown when the user can try the action again.".to_string(),
1582 confidence: "high".to_string(),
1583 }),
1584 );
1585
1586 let opts = ResolvedAnnotateOptions {
1587 input: input.to_string_lossy().to_string(),
1588 output: input.to_string_lossy().to_string(),
1589 source_roots: vec![source_root.to_string_lossy().to_string()],
1590 source_lang: Some("en".to_string()),
1591 provider: ProviderKind::OpenAI,
1592 model: "test-model".to_string(),
1593 concurrency: 1,
1594 dry_run: false,
1595 check: false,
1596 workspace_root: temp_dir.path().to_path_buf(),
1597 ui_mode: ResolvedUiMode::Plain,
1598 };
1599
1600 run_annotate_with_backend(opts, Arc::new(FakeBackend { responses }))
1601 .expect("annotate command");
1602
1603 let payload = serde_json::from_str::<serde_json::Value>(
1604 &fs::read_to_string(&input).expect("read output"),
1605 )
1606 .expect("parse output");
1607
1608 assert_eq!(
1609 payload["strings"]["start"]["comment"],
1610 serde_json::Value::String("A button label that starts the game.".to_string())
1611 );
1612 assert_eq!(
1613 payload["strings"]["start"]["isCommentAutoGenerated"],
1614 serde_json::Value::Bool(true)
1615 );
1616 assert_eq!(
1617 payload["strings"]["retry"]["comment"],
1618 serde_json::Value::String(
1619 "A button label shown when the user can try the action again.".to_string()
1620 )
1621 );
1622 assert_eq!(
1623 payload["strings"]["cancel"]["comment"],
1624 serde_json::Value::String("Written by a human.".to_string())
1625 );
1626 }
1627
1628 #[test]
1629 fn run_annotate_supports_apple_strings_files() {
1630 let temp_dir = TempDir::new().expect("temp dir");
1631 let input_dir = temp_dir.path().join("en.lproj");
1632 let input = input_dir.join("Localizable.strings");
1633 let source_root = temp_dir.path().join("Sources");
1634 fs::create_dir_all(&input_dir).expect("create input dir");
1635 fs::create_dir_all(&source_root).expect("create root");
1636 fs::write(
1637 &input,
1638 r#"/* Written by a human. */
1639"cancel" = "Cancel";
1640"start" = "Start";
1641/* langcodec:auto-generated
1642Old auto comment */
1643"retry" = "Retry";
1644"#,
1645 )
1646 .expect("write strings");
1647
1648 let mut responses = HashMap::new();
1649 responses.insert(
1650 "start".to_string(),
1651 Some(AnnotationResponse {
1652 comment: "A button label that starts the game.".to_string(),
1653 confidence: "high".to_string(),
1654 }),
1655 );
1656 responses.insert(
1657 "retry".to_string(),
1658 Some(AnnotationResponse {
1659 comment: "A button label shown when the user can try the action again.".to_string(),
1660 confidence: "high".to_string(),
1661 }),
1662 );
1663
1664 let opts = ResolvedAnnotateOptions {
1665 input: input.to_string_lossy().to_string(),
1666 output: input.to_string_lossy().to_string(),
1667 source_roots: vec![source_root.to_string_lossy().to_string()],
1668 source_lang: Some("en".to_string()),
1669 provider: ProviderKind::OpenAI,
1670 model: "test-model".to_string(),
1671 concurrency: 1,
1672 dry_run: false,
1673 check: false,
1674 workspace_root: temp_dir.path().to_path_buf(),
1675 ui_mode: ResolvedUiMode::Plain,
1676 };
1677
1678 run_annotate_with_backend(opts, Arc::new(FakeBackend { responses }))
1679 .expect("annotate strings");
1680
1681 let format = StringsFormat::read_from(&input).expect("read strings output");
1682 let mut comments = HashMap::new();
1683 for pair in format.pairs {
1684 let key = pair.key.clone();
1685 comments.insert(
1686 key,
1687 pair.comment
1688 .as_deref()
1689 .map(normalize_strings_comment)
1690 .unwrap_or_default(),
1691 );
1692 }
1693
1694 assert_eq!(
1695 comments.get("start").map(String::as_str),
1696 Some("A button label that starts the game.")
1697 );
1698 assert_eq!(
1699 comments.get("retry").map(String::as_str),
1700 Some("A button label shown when the user can try the action again.")
1701 );
1702 assert_eq!(
1703 comments.get("cancel").map(String::as_str),
1704 Some("Written by a human.")
1705 );
1706
1707 let written = fs::read_to_string(&input).expect("read written strings");
1708 assert!(written.contains("langcodec:auto-generated"));
1709 }
1710
1711 #[test]
1712 fn run_annotate_supports_android_strings_files() {
1713 let temp_dir = TempDir::new().expect("temp dir");
1714 let values_dir = temp_dir.path().join("values");
1715 let input = values_dir.join("strings.xml");
1716 let source_root = temp_dir.path().join("Sources");
1717 fs::create_dir_all(&values_dir).expect("create values dir");
1718 fs::create_dir_all(&source_root).expect("create root");
1719 fs::write(
1720 &input,
1721 r#"<resources>
1722<!-- Written by a human. -->
1723<string name="cancel">Cancel</string>
1724<string name="start">Start</string>
1725<!-- langcodec:auto-generated
1726Old auto comment -->
1727<string name="retry">Retry</string>
1728<plurals name="apples">
1729<item quantity="one">One apple</item>
1730<item quantity="other">%d apples</item>
1731</plurals>
1732</resources>
1733"#,
1734 )
1735 .expect("write xml");
1736
1737 let mut responses = HashMap::new();
1738 responses.insert(
1739 "start".to_string(),
1740 Some(AnnotationResponse {
1741 comment: "A button label that starts the game.".to_string(),
1742 confidence: "high".to_string(),
1743 }),
1744 );
1745 responses.insert(
1746 "retry".to_string(),
1747 Some(AnnotationResponse {
1748 comment: "A button label shown when the user can try the action again.".to_string(),
1749 confidence: "high".to_string(),
1750 }),
1751 );
1752 responses.insert(
1753 "apples".to_string(),
1754 Some(AnnotationResponse {
1755 comment: "Pluralized inventory count for apples.".to_string(),
1756 confidence: "high".to_string(),
1757 }),
1758 );
1759
1760 let opts = ResolvedAnnotateOptions {
1761 input: input.to_string_lossy().to_string(),
1762 output: input.to_string_lossy().to_string(),
1763 source_roots: vec![source_root.to_string_lossy().to_string()],
1764 source_lang: Some("en".to_string()),
1765 provider: ProviderKind::OpenAI,
1766 model: "test-model".to_string(),
1767 concurrency: 1,
1768 dry_run: false,
1769 check: false,
1770 workspace_root: temp_dir.path().to_path_buf(),
1771 ui_mode: ResolvedUiMode::Plain,
1772 };
1773
1774 run_annotate_with_backend(opts, Arc::new(FakeBackend { responses }))
1775 .expect("annotate android");
1776
1777 let format = AndroidStringsFormat::read_from(&input).expect("read android output");
1778 let mut string_comments = HashMap::new();
1779 for item in format.strings {
1780 string_comments.insert(item.name, item.comment.unwrap_or_default());
1781 }
1782 let mut plural_comments = HashMap::new();
1783 for item in format.plurals {
1784 plural_comments.insert(item.name, item.comment.unwrap_or_default());
1785 }
1786
1787 assert_eq!(
1788 normalize_inline_comment(string_comments["start"].as_str()),
1789 "A button label that starts the game."
1790 );
1791 assert_eq!(
1792 normalize_inline_comment(string_comments["retry"].as_str()),
1793 "A button label shown when the user can try the action again."
1794 );
1795 assert_eq!(
1796 normalize_inline_comment(string_comments["cancel"].as_str()),
1797 "Written by a human."
1798 );
1799 assert_eq!(
1800 normalize_inline_comment(plural_comments["apples"].as_str()),
1801 "Pluralized inventory count for apples."
1802 );
1803
1804 let written = fs::read_to_string(&input).expect("read written xml");
1805 assert!(written.contains("langcodec:auto-generated"));
1806 }
1807
1808 #[test]
1809 fn run_annotate_dry_run_does_not_write_changes() {
1810 let temp_dir = TempDir::new().expect("temp dir");
1811 let input = temp_dir.path().join("Localizable.xcstrings");
1812 let source_root = temp_dir.path().join("Sources");
1813 fs::create_dir_all(&source_root).expect("create root");
1814 fs::write(
1815 &input,
1816 r#"{
1817 "sourceLanguage": "en",
1818 "version": "1.0",
1819 "strings": {
1820 "start": {
1821 "localizations": {
1822 "en": { "stringUnit": { "state": "translated", "value": "Start" } }
1823 }
1824 }
1825 }
1826}"#,
1827 )
1828 .expect("write xcstrings");
1829
1830 let original = fs::read_to_string(&input).expect("read original");
1831 let mut responses = HashMap::new();
1832 responses.insert(
1833 "start".to_string(),
1834 Some(AnnotationResponse {
1835 comment: "A button label that starts the game.".to_string(),
1836 confidence: "high".to_string(),
1837 }),
1838 );
1839
1840 let opts = ResolvedAnnotateOptions {
1841 input: input.to_string_lossy().to_string(),
1842 output: input.to_string_lossy().to_string(),
1843 source_roots: vec![source_root.to_string_lossy().to_string()],
1844 source_lang: Some("en".to_string()),
1845 provider: ProviderKind::OpenAI,
1846 model: "test-model".to_string(),
1847 concurrency: 1,
1848 dry_run: true,
1849 check: false,
1850 workspace_root: temp_dir.path().to_path_buf(),
1851 ui_mode: ResolvedUiMode::Plain,
1852 };
1853
1854 run_annotate_with_backend(opts, Arc::new(FakeBackend { responses }))
1855 .expect("annotate command");
1856
1857 assert_eq!(fs::read_to_string(&input).expect("read output"), original);
1858 }
1859
1860 #[test]
1861 fn run_annotate_check_fails_when_changes_would_be_written() {
1862 let temp_dir = TempDir::new().expect("temp dir");
1863 let input = temp_dir.path().join("Localizable.xcstrings");
1864 let source_root = temp_dir.path().join("Sources");
1865 fs::create_dir_all(&source_root).expect("create root");
1866 fs::write(
1867 &input,
1868 r#"{
1869 "sourceLanguage": "en",
1870 "version": "1.0",
1871 "strings": {
1872 "start": {
1873 "localizations": {
1874 "en": { "stringUnit": { "state": "translated", "value": "Start" } }
1875 }
1876 }
1877 }
1878}"#,
1879 )
1880 .expect("write xcstrings");
1881
1882 let mut responses = HashMap::new();
1883 responses.insert(
1884 "start".to_string(),
1885 Some(AnnotationResponse {
1886 comment: "A button label that starts the game.".to_string(),
1887 confidence: "high".to_string(),
1888 }),
1889 );
1890
1891 let opts = ResolvedAnnotateOptions {
1892 input: input.to_string_lossy().to_string(),
1893 output: input.to_string_lossy().to_string(),
1894 source_roots: vec![source_root.to_string_lossy().to_string()],
1895 source_lang: Some("en".to_string()),
1896 provider: ProviderKind::OpenAI,
1897 model: "test-model".to_string(),
1898 concurrency: 1,
1899 dry_run: false,
1900 check: true,
1901 workspace_root: temp_dir.path().to_path_buf(),
1902 ui_mode: ResolvedUiMode::Plain,
1903 };
1904
1905 let error = run_annotate_with_backend(opts, Arc::new(FakeBackend { responses }))
1906 .expect_err("check mode should fail");
1907 assert!(error.contains("would change"));
1908 }
1909
1910 #[test]
1911 fn annotate_requests_does_not_drop_backend_runtime_inside_async_context() {
1912 let requests = vec![AnnotationRequest {
1913 key: "start".to_string(),
1914 source_lang: "en".to_string(),
1915 source_value: "Start".to_string(),
1916 existing_comment: None,
1917 source_roots: vec!["Sources".to_string()],
1918 }];
1919 let backend: Arc<dyn AnnotationBackend> = Arc::new(RuntimeHoldingBackend {
1920 _runtime: Arc::new(
1921 tokio::runtime::Builder::new_current_thread()
1922 .enable_all()
1923 .build()
1924 .expect("build nested runtime"),
1925 ),
1926 });
1927 let init = DashboardInit {
1928 kind: DashboardKind::Annotate,
1929 title: "test".to_string(),
1930 metadata: Vec::new(),
1931 summary_rows: annotate_summary_rows(1, 0, 0),
1932 items: requests.iter().map(annotate_dashboard_item).collect(),
1933 };
1934 let mut reporter = PlainReporter::new(init);
1935
1936 let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
1937 annotate_requests(requests, Arc::clone(&backend), 1, &mut reporter)
1938 }));
1939
1940 assert!(result.is_ok(), "annotate_requests should not panic");
1941 let annotations = result.expect("no panic").expect("annotation results");
1942 assert_eq!(annotations.len(), 1);
1943 assert!(annotations["start"].is_some());
1944 }
1945
1946 #[test]
1947 fn resolve_annotate_options_uses_provider_section_defaults() {
1948 let temp_dir = TempDir::new().expect("temp dir");
1949 let project_dir = temp_dir.path().join("project");
1950 let sources_dir = project_dir.join("Sources");
1951 let modules_dir = project_dir.join("Modules");
1952 fs::create_dir_all(&sources_dir).expect("create Sources");
1953 fs::create_dir_all(&modules_dir).expect("create Modules");
1954 let input = project_dir.join("Localizable.xcstrings");
1955 fs::write(
1956 &input,
1957 r#"{
1958 "sourceLanguage": "en",
1959 "version": "1.0",
1960 "strings": {}
1961}"#,
1962 )
1963 .expect("write xcstrings");
1964
1965 let config_path = project_dir.join("langcodec.toml");
1966 fs::write(
1967 &config_path,
1968 r#"[openai]
1969model = "gpt-5.4"
1970
1971[annotate]
1972input = "Localizable.xcstrings"
1973source_roots = ["Sources", "Modules"]
1974output = "Annotated.xcstrings"
1975source_lang = "en"
1976concurrency = 2
1977"#,
1978 )
1979 .expect("write config");
1980
1981 let loaded = load_config(Some(config_path.to_str().expect("config path")))
1982 .expect("load config")
1983 .expect("config present");
1984
1985 let resolved = resolve_annotate_options(
1986 &AnnotateOptions {
1987 input: None,
1988 source_roots: Vec::new(),
1989 output: None,
1990 source_lang: None,
1991 provider: None,
1992 model: None,
1993 concurrency: None,
1994 config: Some(config_path.to_string_lossy().to_string()),
1995 dry_run: false,
1996 check: false,
1997 ui_mode: UiMode::Plain,
1998 },
1999 Some(&loaded),
2000 )
2001 .expect("resolve annotate options");
2002
2003 assert_eq!(resolved.input, input.to_string_lossy().to_string());
2004 assert_eq!(
2005 resolved.output,
2006 project_dir
2007 .join("Annotated.xcstrings")
2008 .to_string_lossy()
2009 .to_string()
2010 );
2011 assert_eq!(
2012 resolved.source_roots,
2013 vec![
2014 sources_dir.to_string_lossy().to_string(),
2015 modules_dir.to_string_lossy().to_string()
2016 ]
2017 );
2018 assert_eq!(resolved.source_lang.as_deref(), Some("en"));
2019 assert_eq!(resolved.provider, ProviderKind::OpenAI);
2020 assert_eq!(resolved.model, "gpt-5.4");
2021 assert_eq!(resolved.concurrency, 2);
2022 }
2023
2024 #[test]
2025 fn resolve_annotate_options_prefers_cli_over_config() {
2026 let temp_dir = TempDir::new().expect("temp dir");
2027 let project_dir = temp_dir.path().join("project");
2028 let config_sources_dir = project_dir.join("Sources");
2029 let cli_sources_dir = project_dir.join("AppSources");
2030 fs::create_dir_all(&config_sources_dir).expect("create config Sources");
2031 fs::create_dir_all(&cli_sources_dir).expect("create cli Sources");
2032 let config_input = project_dir.join("Localizable.xcstrings");
2033 let cli_input = project_dir.join("Runtime.xcstrings");
2034 fs::write(
2035 &config_input,
2036 r#"{
2037 "sourceLanguage": "en",
2038 "version": "1.0",
2039 "strings": {}
2040}"#,
2041 )
2042 .expect("write config xcstrings");
2043 fs::write(
2044 &cli_input,
2045 r#"{
2046 "sourceLanguage": "en",
2047 "version": "1.0",
2048 "strings": {}
2049}"#,
2050 )
2051 .expect("write cli xcstrings");
2052
2053 let config_path = project_dir.join("langcodec.toml");
2054 fs::write(
2055 &config_path,
2056 r#"[openai]
2057model = "gpt-5.4"
2058
2059[annotate]
2060input = "Localizable.xcstrings"
2061source_roots = ["Sources"]
2062source_lang = "en"
2063concurrency = 2
2064"#,
2065 )
2066 .expect("write config");
2067
2068 let loaded = load_config(Some(config_path.to_str().expect("config path")))
2069 .expect("load config")
2070 .expect("config present");
2071
2072 let resolved = resolve_annotate_options(
2073 &AnnotateOptions {
2074 input: Some(cli_input.to_string_lossy().to_string()),
2075 source_roots: vec![cli_sources_dir.to_string_lossy().to_string()],
2076 output: Some(
2077 project_dir
2078 .join("Output.xcstrings")
2079 .to_string_lossy()
2080 .to_string(),
2081 ),
2082 source_lang: Some("fr".to_string()),
2083 provider: Some("anthropic".to_string()),
2084 model: Some("claude-sonnet".to_string()),
2085 concurrency: Some(6),
2086 config: Some(config_path.to_string_lossy().to_string()),
2087 dry_run: true,
2088 check: true,
2089 ui_mode: UiMode::Plain,
2090 },
2091 Some(&loaded),
2092 )
2093 .expect("resolve annotate options");
2094
2095 assert_eq!(resolved.input, cli_input.to_string_lossy().to_string());
2096 assert_eq!(
2097 resolved.source_roots,
2098 vec![cli_sources_dir.to_string_lossy().to_string()]
2099 );
2100 assert_eq!(resolved.source_lang.as_deref(), Some("fr"));
2101 assert_eq!(resolved.provider, ProviderKind::Anthropic);
2102 assert_eq!(resolved.model, "claude-sonnet");
2103 assert_eq!(resolved.concurrency, 6);
2104 assert!(resolved.dry_run);
2105 assert!(resolved.check);
2106 }
2107
2108 #[test]
2109 fn expand_annotate_invocations_supports_multiple_config_inputs() {
2110 let temp_dir = TempDir::new().expect("temp dir");
2111 let project_dir = temp_dir.path().join("project");
2112 let sources_dir = project_dir.join("Sources");
2113 fs::create_dir_all(&sources_dir).expect("create Sources");
2114 let first = project_dir.join("First.xcstrings");
2115 let second = project_dir.join("Second.xcstrings");
2116 fs::write(
2117 &first,
2118 r#"{"sourceLanguage":"en","version":"1.0","strings":{}}"#,
2119 )
2120 .expect("write first");
2121 fs::write(
2122 &second,
2123 r#"{"sourceLanguage":"en","version":"1.0","strings":{}}"#,
2124 )
2125 .expect("write second");
2126
2127 let config_path = project_dir.join("langcodec.toml");
2128 fs::write(
2129 &config_path,
2130 r#"[openai]
2131model = "gpt-5.4"
2132
2133[annotate]
2134inputs = ["First.xcstrings", "Second.xcstrings"]
2135source_roots = ["Sources"]
2136source_lang = "en"
2137concurrency = 2
2138"#,
2139 )
2140 .expect("write config");
2141
2142 let loaded = load_config(Some(config_path.to_str().expect("config path")))
2143 .expect("load config")
2144 .expect("config present");
2145
2146 let runs = expand_annotate_invocations(
2147 &AnnotateOptions {
2148 input: None,
2149 source_roots: Vec::new(),
2150 output: None,
2151 source_lang: None,
2152 provider: None,
2153 model: None,
2154 concurrency: None,
2155 config: Some(config_path.to_string_lossy().to_string()),
2156 dry_run: false,
2157 check: false,
2158 ui_mode: UiMode::Plain,
2159 },
2160 Some(&loaded),
2161 )
2162 .expect("expand annotate invocations");
2163
2164 assert_eq!(runs.len(), 2);
2165 assert_eq!(runs[0].input, first.to_string_lossy().to_string());
2166 assert_eq!(runs[1].input, second.to_string_lossy().to_string());
2167 assert_eq!(
2168 runs[0].source_roots,
2169 vec![sources_dir.to_string_lossy().to_string()]
2170 );
2171 assert_eq!(
2172 runs[1].source_roots,
2173 vec![sources_dir.to_string_lossy().to_string()]
2174 );
2175 }
2176
2177 #[test]
2178 fn expand_annotate_invocations_expands_globbed_config_inputs() {
2179 let temp_dir = TempDir::new().expect("temp dir");
2180 let project_dir = temp_dir.path().join("project");
2181 let sources_dir = project_dir.join("Sources");
2182 let app_dir = project_dir.join("App").join("Resources");
2183 let module_dir = project_dir.join("Modules").join("Feature");
2184 fs::create_dir_all(&sources_dir).expect("create Sources");
2185 fs::create_dir_all(&app_dir).expect("create app dir");
2186 fs::create_dir_all(&module_dir).expect("create module dir");
2187
2188 let first = app_dir.join("Localizable.xcstrings");
2189 let second = module_dir.join("Localizable.xcstrings");
2190 fs::write(
2191 &first,
2192 r#"{"sourceLanguage":"en","version":"1.0","strings":{}}"#,
2193 )
2194 .expect("write first");
2195 fs::write(
2196 &second,
2197 r#"{"sourceLanguage":"en","version":"1.0","strings":{}}"#,
2198 )
2199 .expect("write second");
2200
2201 let config_path = project_dir.join("langcodec.toml");
2202 fs::write(
2203 &config_path,
2204 r#"[openai]
2205model = "gpt-5.4"
2206
2207[annotate]
2208inputs = ["*/**/Localizable.xcstrings"]
2209source_roots = ["Sources"]
2210"#,
2211 )
2212 .expect("write config");
2213
2214 let loaded = load_config(Some(config_path.to_str().expect("config path")))
2215 .expect("load config")
2216 .expect("config present");
2217
2218 let runs = expand_annotate_invocations(
2219 &AnnotateOptions {
2220 input: None,
2221 source_roots: Vec::new(),
2222 output: None,
2223 source_lang: None,
2224 provider: None,
2225 model: None,
2226 concurrency: None,
2227 config: Some(config_path.to_string_lossy().to_string()),
2228 dry_run: false,
2229 check: false,
2230 ui_mode: UiMode::Plain,
2231 },
2232 Some(&loaded),
2233 )
2234 .expect("expand annotate invocations");
2235
2236 let mut inputs = runs.into_iter().map(|run| run.input).collect::<Vec<_>>();
2237 inputs.sort();
2238
2239 let mut expected = vec![
2240 first.to_string_lossy().to_string(),
2241 second.to_string_lossy().to_string(),
2242 ];
2243 expected.sort();
2244
2245 assert_eq!(inputs, expected);
2246 }
2247
2248 #[test]
2249 fn expand_annotate_invocations_rejects_input_and_inputs_together() {
2250 let temp_dir = TempDir::new().expect("temp dir");
2251 let config_path = temp_dir.path().join("langcodec.toml");
2252 fs::write(
2253 &config_path,
2254 r#"[annotate]
2255input = "Localizable.xcstrings"
2256inputs = ["One.xcstrings", "Two.xcstrings"]
2257source_roots = ["Sources"]
2258"#,
2259 )
2260 .expect("write config");
2261
2262 let loaded = load_config(Some(config_path.to_str().expect("config path")))
2263 .expect("load config")
2264 .expect("config present");
2265
2266 let err = expand_annotate_invocations(
2267 &AnnotateOptions {
2268 input: None,
2269 source_roots: Vec::new(),
2270 output: None,
2271 source_lang: None,
2272 provider: None,
2273 model: None,
2274 concurrency: None,
2275 config: Some(config_path.to_string_lossy().to_string()),
2276 dry_run: false,
2277 check: false,
2278 ui_mode: UiMode::Plain,
2279 },
2280 Some(&loaded),
2281 )
2282 .expect_err("expected conflicting config to fail");
2283
2284 assert!(err.contains("annotate.input and annotate.inputs"));
2285 }
2286
2287 #[test]
2288 fn expand_annotate_invocations_rejects_shared_output_for_multiple_inputs() {
2289 let temp_dir = TempDir::new().expect("temp dir");
2290 let project_dir = temp_dir.path().join("project");
2291 let sources_dir = project_dir.join("Sources");
2292 fs::create_dir_all(&sources_dir).expect("create Sources");
2293 fs::write(
2294 project_dir.join("One.xcstrings"),
2295 r#"{"sourceLanguage":"en","version":"1.0","strings":{}}"#,
2296 )
2297 .expect("write One");
2298 fs::write(
2299 project_dir.join("Two.xcstrings"),
2300 r#"{"sourceLanguage":"en","version":"1.0","strings":{}}"#,
2301 )
2302 .expect("write Two");
2303
2304 let config_path = project_dir.join("langcodec.toml");
2305 fs::write(
2306 &config_path,
2307 r#"[openai]
2308model = "gpt-5.4"
2309
2310[annotate]
2311inputs = ["One.xcstrings", "Two.xcstrings"]
2312source_roots = ["Sources"]
2313output = "Annotated.xcstrings"
2314"#,
2315 )
2316 .expect("write config");
2317
2318 let loaded = load_config(Some(config_path.to_str().expect("config path")))
2319 .expect("load config")
2320 .expect("config present");
2321
2322 let err = expand_annotate_invocations(
2323 &AnnotateOptions {
2324 input: None,
2325 source_roots: Vec::new(),
2326 output: None,
2327 source_lang: None,
2328 provider: None,
2329 model: None,
2330 concurrency: None,
2331 config: Some(config_path.to_string_lossy().to_string()),
2332 dry_run: false,
2333 check: false,
2334 ui_mode: UiMode::Plain,
2335 },
2336 Some(&loaded),
2337 )
2338 .expect_err("expected multiple input/output conflict");
2339
2340 assert!(err.contains("annotate.inputs cannot be combined"));
2341 }
2342
2343 #[tokio::test]
2344 async fn mentra_backend_requests_files_tool() {
2345 let requests = Arc::new(Mutex::new(Vec::new()));
2346 let provider = RecordingProvider {
2347 requests: Arc::clone(&requests),
2348 };
2349 let runtime = Runtime::builder()
2350 .with_provider_instance(provider)
2351 .build()
2352 .expect("build runtime");
2353 let backend = MentraAnnotatorBackend::from_runtime(
2354 runtime,
2355 ModelInfo::new("test-model", BuiltinProvider::OpenAI),
2356 PathBuf::from("/tmp/project"),
2357 );
2358
2359 let response = backend
2360 .annotate(
2361 AnnotationRequest {
2362 key: "start".to_string(),
2363 source_lang: "en".to_string(),
2364 source_value: "Start".to_string(),
2365 existing_comment: None,
2366 source_roots: vec!["Sources".to_string()],
2367 },
2368 None,
2369 )
2370 .await
2371 .expect("annotate")
2372 .expect("response");
2373
2374 assert_eq!(response.comment, "A button label that starts the game.");
2375 let recorded = requests.lock().expect("requests lock");
2376 assert_eq!(recorded.len(), 1);
2377 let tool_names = recorded[0]
2378 .tools
2379 .iter()
2380 .map(|tool| tool.name.as_str())
2381 .collect::<Vec<_>>();
2382 assert!(tool_names.contains(&"files"));
2383 assert!(tool_names.contains(&"shell"));
2384 }
2385
2386 #[tokio::test]
2387 async fn old_tool_enabled_annotate_flow_recovers_from_malformed_tool_json_on_mentra_030() {
2388 let requests = Arc::new(Mutex::new(Vec::new()));
2389 let scripts = VecDeque::from([
2390 vec![
2391 ProviderEvent::MessageStarted {
2392 id: "msg-1".to_string(),
2393 model: "test-model".to_string(),
2394 role: Role::Assistant,
2395 },
2396 ProviderEvent::ContentBlockStarted {
2397 index: 0,
2398 kind: ContentBlockStart::ToolUse {
2399 id: "tool-1".to_string(),
2400 name: "files".to_string(),
2401 },
2402 },
2403 ProviderEvent::ContentBlockDelta {
2404 index: 0,
2405 delta: ContentBlockDelta::ToolUseInputJson(
2406 r#"{"path":"Sources/GameView.swift"#.to_string(),
2407 ),
2408 },
2409 ProviderEvent::ContentBlockStopped { index: 0 },
2410 ProviderEvent::MessageStopped,
2411 ],
2412 Response {
2413 id: "resp-2".to_string(),
2414 model: "test-model".to_string(),
2415 role: Role::Assistant,
2416 content: vec![ContentBlock::text(
2417 r#"{"comment":"A button label that starts the game.","confidence":"high"}"#,
2418 )],
2419 stop_reason: Some("end_turn".to_string()),
2420 usage: None,
2421 }
2422 .into_provider_events(),
2423 ]);
2424 let provider = ScriptedStreamingProvider {
2425 requests: Arc::clone(&requests),
2426 scripts: Arc::new(Mutex::new(scripts)),
2427 };
2428 let runtime = Runtime::builder()
2429 .with_provider_instance(provider)
2430 .build()
2431 .expect("build runtime");
2432 let mut agent = runtime
2433 .spawn_with_config(
2434 "annotate",
2435 ModelInfo::new("test-model", BuiltinProvider::OpenAI),
2436 build_agent_config(Path::new("/tmp/project")),
2437 )
2438 .expect("spawn agent");
2439 let request = AnnotationRequest {
2440 key: "start".to_string(),
2441 source_lang: "en".to_string(),
2442 source_value: "Start".to_string(),
2443 existing_comment: None,
2444 source_roots: vec!["Sources".to_string()],
2445 };
2446
2447 let response = agent
2448 .run(
2449 vec![ContentBlock::text(build_annotation_prompt(&request))],
2450 RunOptions {
2451 tool_budget: Some(DEFAULT_TOOL_BUDGET),
2452 ..RunOptions::default()
2453 },
2454 )
2455 .await
2456 .expect("run annotate");
2457 let parsed = parse_annotation_response(&response.text()).expect("parse annotation");
2458
2459 assert_eq!(parsed.comment, "A button label that starts the game.");
2460 let recorded = requests.lock().expect("requests lock");
2461 assert_eq!(recorded.len(), 2);
2462 assert!(
2463 recorded[1]
2464 .messages
2465 .iter()
2466 .flat_map(|message| message.content.iter())
2467 .any(|block| matches!(block, ContentBlock::Text { text } if text.contains("One or more tool calls could not be executed because their JSON arguments were invalid.")))
2468 );
2469 }
2470}