1use anyhow::Result;
34use std::fmt::Write;
35use tokmd_analysis_types::{
36 AnalysisReceipt, EffortDriverDirection, EffortEstimateReport, FileStatRow,
37};
38use tokmd_types::AnalysisFormat;
39
40pub enum RenderedOutput {
41 Text(String),
42 Binary(Vec<u8>),
43}
44
45pub fn render(receipt: &AnalysisReceipt, format: AnalysisFormat) -> Result<RenderedOutput> {
46 match format {
47 AnalysisFormat::Md => Ok(RenderedOutput::Text(render_md(receipt))),
48 AnalysisFormat::Json => Ok(RenderedOutput::Text(serde_json::to_string_pretty(receipt)?)),
49 AnalysisFormat::Jsonld => Ok(RenderedOutput::Text(render_jsonld(receipt))),
50 AnalysisFormat::Xml => Ok(RenderedOutput::Text(render_xml(receipt))),
51 AnalysisFormat::Svg => Ok(RenderedOutput::Text(render_svg(receipt))),
52 AnalysisFormat::Mermaid => Ok(RenderedOutput::Text(render_mermaid(receipt))),
53 AnalysisFormat::Obj => Ok(RenderedOutput::Text(render_obj(receipt)?)),
54 AnalysisFormat::Midi => Ok(RenderedOutput::Binary(render_midi(receipt)?)),
55 AnalysisFormat::Tree => Ok(RenderedOutput::Text(render_tree(receipt))),
56 AnalysisFormat::Html => Ok(RenderedOutput::Text(render_html(receipt))),
57 }
58}
59
60fn render_md(receipt: &AnalysisReceipt) -> String {
61 let mut out = String::new();
62 out.push_str("# tokmd analysis\n\n");
63 let _ = writeln!(out, "Preset: `{}`\n", receipt.args.preset);
64
65 if !receipt.source.inputs.is_empty() {
66 out.push_str("## Inputs\n\n");
67 for input in &receipt.source.inputs {
68 let _ = writeln!(out, "- `{}`", input);
69 }
70 out.push('\n');
71 }
72
73 if let Some(archetype) = &receipt.archetype {
74 out.push_str("## Archetype\n\n");
75 let _ = writeln!(out, "- Kind: `{}`", archetype.kind);
76 if !archetype.evidence.is_empty() {
77 let _ = writeln!(out, "- Evidence: `{}`", archetype.evidence.join("`, `"));
78 }
79 out.push('\n');
80 }
81
82 if let Some(topics) = &receipt.topics {
83 out.push_str("## Topics\n\n");
84 if !topics.overall.is_empty() {
85 let _ = writeln!(
86 out,
87 "- Overall: `{}`",
88 topics
89 .overall
90 .iter()
91 .map(|t| t.term.as_str())
92 .collect::<Vec<_>>()
93 .join(", ")
94 );
95 }
96 for (module, terms) in &topics.per_module {
97 if terms.is_empty() {
98 continue;
99 }
100 let line = terms
101 .iter()
102 .map(|t| t.term.as_str())
103 .collect::<Vec<_>>()
104 .join(", ");
105 let _ = writeln!(out, "- `{}`: {}", module, line);
106 }
107 out.push('\n');
108 }
109
110 if let Some(entropy) = &receipt.entropy {
111 out.push_str("## Entropy profiling\n\n");
112 if entropy.suspects.is_empty() {
113 out.push_str("- No entropy outliers detected.\n\n");
114 } else {
115 out.push_str("|Path|Module|Entropy|Sample bytes|Class|\n");
116 out.push_str("|---|---|---:|---:|---|\n");
117 for row in entropy.suspects.iter().take(10) {
118 let _ = writeln!(
119 out,
120 "|{}|{}|{}|{}|{:?}|",
121 row.path,
122 row.module,
123 fmt_f64(row.entropy_bits_per_byte as f64, 2),
124 row.sample_bytes,
125 row.class
126 );
127 }
128 out.push('\n');
129 }
130 }
131
132 if let Some(license) = &receipt.license {
133 out.push_str("## License radar\n\n");
134 if let Some(effective) = &license.effective {
135 let _ = writeln!(out, "- Effective: `{}`", effective);
136 }
137 out.push_str("- Heuristic detection; not legal advice.\n\n");
138 if !license.findings.is_empty() {
139 out.push_str("|SPDX|Confidence|Source|Kind|\n");
140 out.push_str("|---|---:|---|---|\n");
141 for row in license.findings.iter().take(10) {
142 let _ = writeln!(
143 out,
144 "|{}|{}|{}|{:?}|",
145 row.spdx,
146 fmt_f64(row.confidence as f64, 2),
147 row.source_path,
148 row.source_kind
149 );
150 }
151 out.push('\n');
152 }
153 }
154
155 if let Some(fingerprint) = &receipt.corporate_fingerprint {
156 out.push_str("## Corporate fingerprint\n\n");
157 if fingerprint.domains.is_empty() {
158 out.push_str("- No commit domains detected.\n\n");
159 } else {
160 out.push_str("|Domain|Commits|Pct|\n");
161 out.push_str("|---|---:|---:|\n");
162 for row in fingerprint.domains.iter().take(10) {
163 let _ = writeln!(
164 out,
165 "|{}|{}|{}|",
166 row.domain,
167 row.commits,
168 fmt_pct(row.pct as f64)
169 );
170 }
171 out.push('\n');
172 }
173 }
174
175 if let Some(churn) = &receipt.predictive_churn {
176 out.push_str("## Predictive churn\n\n");
177 let mut rows: Vec<_> = churn.per_module.iter().collect();
178 rows.sort_by(|a, b| {
179 b.1.slope
180 .partial_cmp(&a.1.slope)
181 .unwrap_or(std::cmp::Ordering::Equal)
182 .then_with(|| a.0.cmp(b.0))
183 });
184 if rows.is_empty() {
185 out.push_str("- No churn signals detected.\n\n");
186 } else {
187 out.push_str("|Module|Slope|R²|Recent change|Class|\n");
188 out.push_str("|---|---:|---:|---:|---|\n");
189 for (module, trend) in rows.into_iter().take(10) {
190 let _ = writeln!(
191 out,
192 "|{}|{}|{}|{}|{:?}|",
193 module,
194 fmt_f64(trend.slope, 4),
195 fmt_f64(trend.r2, 2),
196 trend.recent_change,
197 trend.classification
198 );
199 }
200 out.push('\n');
201 }
202 }
203
204 if let Some(derived) = &receipt.derived {
205 out.push_str("## Totals\n\n");
206 out.push_str("|Files|Code|Comments|Blanks|Lines|Bytes|Tokens|\n");
207 out.push_str("|---:|---:|---:|---:|---:|---:|---:|\n");
208 let _ = writeln!(
209 out,
210 "|{}|{}|{}|{}|{}|{}|{}|\n",
211 derived.totals.files,
212 derived.totals.code,
213 derived.totals.comments,
214 derived.totals.blanks,
215 derived.totals.lines,
216 derived.totals.bytes,
217 derived.totals.tokens
218 );
219
220 out.push_str("## Ratios\n\n");
221 out.push_str("|Metric|Value|\n");
222 out.push_str("|---|---:|\n");
223 let _ = writeln!(
224 out,
225 "|Doc density|{}|",
226 fmt_pct(derived.doc_density.total.ratio)
227 );
228 let _ = writeln!(
229 out,
230 "|Whitespace ratio|{}|",
231 fmt_pct(derived.whitespace.total.ratio)
232 );
233 let _ = writeln!(
234 out,
235 "|Bytes per line|{}|\n",
236 fmt_f64(derived.verbosity.total.rate, 2)
237 );
238
239 out.push_str("### Doc density by language\n\n");
240 out.push_str("|Lang|Doc%|Comments|Code|\n");
241 out.push_str("|---|---:|---:|---:|\n");
242 for row in derived.doc_density.by_lang.iter().take(10) {
243 let _ = writeln!(
244 out,
245 "|{}|{}|{}|{}|",
246 row.key,
247 fmt_pct(row.ratio),
248 row.numerator,
249 row.denominator.saturating_sub(row.numerator)
250 );
251 }
252 out.push('\n');
253
254 out.push_str("### Whitespace ratio by language\n\n");
255 out.push_str("|Lang|Blank%|Blanks|Code+Comments|\n");
256 out.push_str("|---|---:|---:|---:|\n");
257 for row in derived.whitespace.by_lang.iter().take(10) {
258 let _ = writeln!(
259 out,
260 "|{}|{}|{}|{}|",
261 row.key,
262 fmt_pct(row.ratio),
263 row.numerator,
264 row.denominator
265 );
266 }
267 out.push('\n');
268
269 out.push_str("### Verbosity by language\n\n");
270 out.push_str("|Lang|Bytes/Line|Bytes|Lines|\n");
271 out.push_str("|---|---:|---:|---:|\n");
272 for row in derived.verbosity.by_lang.iter().take(10) {
273 let _ = writeln!(
274 out,
275 "|{}|{}|{}|{}|",
276 row.key,
277 fmt_f64(row.rate, 2),
278 row.numerator,
279 row.denominator
280 );
281 }
282 out.push('\n');
283
284 out.push_str("## Distribution\n\n");
285 out.push_str("|Count|Min|Max|Mean|Median|P90|P99|Gini|\n");
286 out.push_str("|---:|---:|---:|---:|---:|---:|---:|---:|\n");
287 let _ = writeln!(
288 out,
289 "|{}|{}|{}|{}|{}|{}|{}|{}|\n",
290 derived.distribution.count,
291 derived.distribution.min,
292 derived.distribution.max,
293 fmt_f64(derived.distribution.mean, 2),
294 fmt_f64(derived.distribution.median, 2),
295 fmt_f64(derived.distribution.p90, 2),
296 fmt_f64(derived.distribution.p99, 2),
297 fmt_f64(derived.distribution.gini, 4)
298 );
299
300 out.push_str("## File size histogram\n\n");
301 out.push_str("|Bucket|Min|Max|Files|Pct|\n");
302 out.push_str("|---|---:|---:|---:|---:|\n");
303 for bucket in &derived.histogram {
304 let max = bucket
305 .max
306 .map(|v| v.to_string())
307 .unwrap_or_else(|| "∞".to_string());
308 let _ = writeln!(
309 out,
310 "|{}|{}|{}|{}|{}|",
311 bucket.label,
312 bucket.min,
313 max,
314 bucket.files,
315 fmt_pct(bucket.pct)
316 );
317 }
318 out.push('\n');
319
320 out.push_str("## Top offenders\n\n");
321 out.push_str("### Largest files by lines\n\n");
322 out.push_str(&render_file_table(&derived.top.largest_lines));
323 out.push('\n');
324
325 out.push_str("### Largest files by tokens\n\n");
326 out.push_str(&render_file_table(&derived.top.largest_tokens));
327 out.push('\n');
328
329 out.push_str("### Largest files by bytes\n\n");
330 out.push_str(&render_file_table(&derived.top.largest_bytes));
331 out.push('\n');
332
333 out.push_str("### Least documented (min LOC)\n\n");
334 out.push_str(&render_file_table(&derived.top.least_documented));
335 out.push('\n');
336
337 out.push_str("### Most dense (bytes/line)\n\n");
338 out.push_str(&render_file_table(&derived.top.most_dense));
339 out.push('\n');
340
341 out.push_str("## Structure\n\n");
342 let _ = writeln!(
343 out,
344 "- Max depth: `{}`\n- Avg depth: `{}`\n",
345 derived.nesting.max,
346 fmt_f64(derived.nesting.avg, 2)
347 );
348
349 out.push_str("## Test density\n\n");
350 let _ = writeln!(
351 out,
352 "- Test lines: `{}`\n- Prod lines: `{}`\n- Test ratio: `{}`\n",
353 derived.test_density.test_lines,
354 derived.test_density.prod_lines,
355 fmt_pct(derived.test_density.ratio)
356 );
357
358 if let Some(todo) = &derived.todo {
359 out.push_str("## TODOs\n\n");
360 let _ = writeln!(
361 out,
362 "- Total: `{}`\n- Density (per KLOC): `{}`\n",
363 todo.total,
364 fmt_f64(todo.density_per_kloc, 2)
365 );
366 out.push_str("|Tag|Count|\n");
367 out.push_str("|---|---:|\n");
368 for tag in &todo.tags {
369 let _ = writeln!(out, "|{}|{}|", tag.tag, tag.count);
370 }
371 out.push('\n');
372 }
373
374 out.push_str("## Boilerplate ratio\n\n");
375 let _ = writeln!(
376 out,
377 "- Infra lines: `{}`\n- Logic lines: `{}`\n- Infra ratio: `{}`\n",
378 derived.boilerplate.infra_lines,
379 derived.boilerplate.logic_lines,
380 fmt_pct(derived.boilerplate.ratio)
381 );
382
383 out.push_str("## Polyglot\n\n");
384 let _ = writeln!(
385 out,
386 "- Languages: `{}`\n- Dominant: `{}` ({})\n- Entropy: `{}`\n",
387 derived.polyglot.lang_count,
388 derived.polyglot.dominant_lang,
389 fmt_pct(derived.polyglot.dominant_pct),
390 fmt_f64(derived.polyglot.entropy, 4)
391 );
392
393 out.push_str("## Reading time\n\n");
394 let _ = writeln!(
395 out,
396 "- Minutes: `{}` ({} lines/min)\n",
397 fmt_f64(derived.reading_time.minutes, 2),
398 derived.reading_time.lines_per_minute
399 );
400
401 if let Some(context) = &derived.context_window {
402 out.push_str("## Context window\n\n");
403 let _ = writeln!(
404 out,
405 "- Window tokens: `{}`\n- Total tokens: `{}`\n- Utilization: `{}`\n- Fits: `{}`\n",
406 context.window_tokens,
407 context.total_tokens,
408 fmt_pct(context.pct),
409 context.fits
410 );
411 }
412
413 if let Some(effort) = &receipt.effort {
416 render_effort_report(&mut out, effort);
417 } else if let Some(cocomo) = &derived.cocomo {
418 render_legacy_cocomo_report(&mut out, derived, cocomo);
419 }
420
421 out.push_str("## Integrity\n\n");
422 let _ = writeln!(
423 out,
424 "- Hash: `{}` (`{}`)\n- Entries: `{}`\n",
425 derived.integrity.hash, derived.integrity.algo, derived.integrity.entries
426 );
427 }
428
429 if let Some(assets) = &receipt.assets {
430 out.push_str("## Assets\n\n");
431 let _ = writeln!(
432 out,
433 "- Total files: `{}`\n- Total bytes: `{}`\n",
434 assets.total_files, assets.total_bytes
435 );
436 if !assets.categories.is_empty() {
437 out.push_str("|Category|Files|Bytes|Extensions|\n");
438 out.push_str("|---|---:|---:|---|\n");
439 for row in &assets.categories {
440 let _ = writeln!(
441 out,
442 "|{}|{}|{}|{}|",
443 row.category,
444 row.files,
445 row.bytes,
446 row.extensions.join(", ")
447 );
448 }
449 out.push('\n');
450 }
451 if !assets.top_files.is_empty() {
452 out.push_str("|File|Bytes|Category|\n");
453 out.push_str("|---|---:|---|\n");
454 for row in &assets.top_files {
455 let _ = writeln!(out, "|{}|{}|{}|", row.path, row.bytes, row.category);
456 }
457 out.push('\n');
458 }
459 }
460
461 if let Some(deps) = &receipt.deps {
462 out.push_str("## Dependencies\n\n");
463 let _ = writeln!(out, "- Total: `{}`\n", deps.total);
464 if !deps.lockfiles.is_empty() {
465 out.push_str("|Lockfile|Kind|Dependencies|\n");
466 out.push_str("|---|---|---:|\n");
467 for row in &deps.lockfiles {
468 let _ = writeln!(out, "|{}|{}|{}|", row.path, row.kind, row.dependencies);
469 }
470 out.push('\n');
471 }
472 }
473
474 if let Some(git) = &receipt.git {
475 out.push_str("## Git metrics\n\n");
476 let _ = writeln!(
477 out,
478 "- Commits scanned: `{}`\n- Files seen: `{}`\n",
479 git.commits_scanned, git.files_seen
480 );
481 if !git.hotspots.is_empty() {
482 out.push_str("### Hotspots\n\n");
483 out.push_str("|File|Commits|Lines|Score|\n");
484 out.push_str("|---|---:|---:|---:|\n");
485 for row in git.hotspots.iter().take(10) {
486 let _ = writeln!(
487 out,
488 "|{}|{}|{}|{}|",
489 row.path, row.commits, row.lines, row.score
490 );
491 }
492 out.push('\n');
493 }
494 if !git.bus_factor.is_empty() {
495 out.push_str("### Bus factor\n\n");
496 out.push_str("|Module|Authors|\n");
497 out.push_str("|---|---:|\n");
498 for row in git.bus_factor.iter().take(10) {
499 let _ = writeln!(out, "|{}|{}|", row.module, row.authors);
500 }
501 out.push('\n');
502 }
503 out.push_str("### Freshness\n\n");
504 let _ = writeln!(
505 out,
506 "- Stale threshold (days): `{}`\n- Stale files: `{}` / `{}` ({})\n",
507 git.freshness.threshold_days,
508 git.freshness.stale_files,
509 git.freshness.total_files,
510 fmt_pct(git.freshness.stale_pct)
511 );
512 if !git.freshness.by_module.is_empty() {
513 out.push_str("|Module|Avg days|P90 days|Stale%|\n");
514 out.push_str("|---|---:|---:|---:|\n");
515 for row in git.freshness.by_module.iter().take(10) {
516 let _ = writeln!(
517 out,
518 "|{}|{}|{}|{}|",
519 row.module,
520 fmt_f64(row.avg_days, 2),
521 fmt_f64(row.p90_days, 2),
522 fmt_pct(row.stale_pct)
523 );
524 }
525 out.push('\n');
526 }
527 if let Some(age) = &git.age_distribution {
528 out.push_str("### Code age\n\n");
529 let _ = writeln!(
530 out,
531 "- Refresh trend: `{:?}` (recent: `{}`, prior: `{}`)\n",
532 age.refresh_trend, age.recent_refreshes, age.prior_refreshes
533 );
534 if !age.buckets.is_empty() {
535 out.push_str("|Bucket|Min days|Max days|Files|Pct|\n");
536 out.push_str("|---|---:|---:|---:|---:|\n");
537 for bucket in &age.buckets {
538 let max = bucket
539 .max_days
540 .map(|v| v.to_string())
541 .unwrap_or_else(|| "∞".to_string());
542 let _ = writeln!(
543 out,
544 "|{}|{}|{}|{}|{}|",
545 bucket.label,
546 bucket.min_days,
547 max,
548 bucket.files,
549 fmt_pct(bucket.pct)
550 );
551 }
552 out.push('\n');
553 }
554 }
555 if !git.coupling.is_empty() {
556 let filtered: Vec<_> = git.coupling.iter().filter(|r| r.count >= 2).collect();
559 if !filtered.is_empty() {
560 out.push_str("### Coupling\n\n");
561 out.push_str("|Left|Right|Count|Jaccard|Lift|\n");
562 out.push_str("|---|---|---:|---:|---:|\n");
563 for row in filtered.iter().take(10) {
564 let jaccard = row
565 .jaccard
566 .map(|v| fmt_f64(v, 4))
567 .unwrap_or_else(|| "-".to_string());
568 let lift = row
569 .lift
570 .map(|v| fmt_f64(v, 4))
571 .unwrap_or_else(|| "-".to_string());
572 let _ = writeln!(
573 out,
574 "|{}|{}|{}|{}|{}|",
575 row.left, row.right, row.count, jaccard, lift
576 );
577 }
578 out.push('\n');
579 }
580 }
581
582 if let Some(intent) = &git.intent {
583 out.push_str("### Commit intent\n\n");
584 out.push_str("|Type|Count|\n");
585 out.push_str("|---|---:|\n");
586 let o = &intent.overall;
587 let entries = [
588 ("feat", o.feat),
589 ("fix", o.fix),
590 ("refactor", o.refactor),
591 ("docs", o.docs),
592 ("test", o.test),
593 ("chore", o.chore),
594 ("ci", o.ci),
595 ("build", o.build),
596 ("perf", o.perf),
597 ("style", o.style),
598 ("revert", o.revert),
599 ("other", o.other),
600 ];
601 for (name, count) in entries {
602 if count > 0 {
603 let _ = writeln!(out, "|{}|{}|", name, count);
604 }
605 }
606 let _ = writeln!(out, "|**total**|{}|", o.total);
607 let _ = writeln!(out, "\n- Unknown: `{}`", fmt_pct(intent.unknown_pct));
608 if let Some(cr) = intent.corrective_ratio {
609 let _ = writeln!(
610 out,
611 "- Corrective ratio (fix+revert/total): `{}`",
612 fmt_pct(cr)
613 );
614 }
615 out.push('\n');
616
617 let mut maintenance: Vec<_> = intent
619 .by_module
620 .iter()
621 .filter(|m| m.counts.total > 0)
622 .map(|m| {
623 let fix_revert = m.counts.fix + m.counts.revert;
624 let share = fix_revert as f64 / m.counts.total as f64;
625 (m, share)
626 })
627 .filter(|(_, share)| *share > 0.0)
628 .collect();
629 maintenance.sort_by(|a, b| {
630 b.1.partial_cmp(&a.1)
631 .unwrap_or(std::cmp::Ordering::Equal)
632 .then_with(|| a.0.module.cmp(&b.0.module))
633 });
634
635 if !maintenance.is_empty() {
636 out.push_str("#### Maintenance hotspots\n\n");
637 out.push_str("|Module|Fix+Revert|Total|Share|\n");
638 out.push_str("|---|---:|---:|---:|\n");
639 for (m, share) in maintenance.iter().take(10) {
640 let _ = writeln!(
641 out,
642 "|{}|{}|{}|{}|",
643 m.module,
644 m.counts.fix + m.counts.revert,
645 m.counts.total,
646 fmt_pct(*share)
647 );
648 }
649 out.push('\n');
650 }
651 }
652 }
653
654 if let Some(imports) = &receipt.imports {
655 out.push_str("## Imports\n\n");
656 let _ = writeln!(out, "- Granularity: `{}`\n", imports.granularity);
657 if !imports.edges.is_empty() {
658 out.push_str("|From|To|Count|\n");
659 out.push_str("|---|---|---:|\n");
660 for row in imports.edges.iter().take(20) {
661 let _ = writeln!(out, "|{}|{}|{}|", row.from, row.to, row.count);
662 }
663 out.push('\n');
664 }
665 }
666
667 if let Some(dup) = &receipt.dup {
668 out.push_str("## Duplicates\n\n");
669 let _ = writeln!(
670 out,
671 "- Wasted bytes: `{}`\n- Strategy: `{}`\n",
672 dup.wasted_bytes, dup.strategy
673 );
674 if let Some(density) = &dup.density {
675 out.push_str("### Duplication density\n\n");
676 let _ = writeln!(
677 out,
678 "- Duplicate groups: `{}`\n- Duplicate files: `{}`\n- Duplicated bytes: `{}`\n- Waste vs codebase: `{}`\n",
679 density.duplicate_groups,
680 density.duplicate_files,
681 density.duplicated_bytes,
682 fmt_pct(density.wasted_pct_of_codebase)
683 );
684 if !density.by_module.is_empty() {
685 out.push_str(
686 "|Module|Dup files|Wasted files|Dup bytes|Wasted bytes|Module bytes|Density|\n",
687 );
688 out.push_str("|---|---:|---:|---:|---:|---:|---:|\n");
689 for row in density.by_module.iter().take(10) {
690 let _ = writeln!(
691 out,
692 "|{}|{}|{}|{}|{}|{}|{}|",
693 row.module,
694 row.duplicate_files,
695 row.wasted_files,
696 row.duplicated_bytes,
697 row.wasted_bytes,
698 row.module_bytes,
699 fmt_pct(row.density)
700 );
701 }
702 out.push('\n');
703 }
704 }
705 if !dup.groups.is_empty() {
706 out.push_str("|Hash|Bytes|Files|\n");
707 out.push_str("|---|---:|---:|\n");
708 for row in dup.groups.iter().take(10) {
709 let _ = writeln!(out, "|{}|{}|{}|", row.hash, row.bytes, row.files.len());
710 }
711 out.push('\n');
712 }
713
714 if let Some(near) = &dup.near {
715 out.push_str("### Near duplicates\n\n");
716 let _ = writeln!(
717 out,
718 "- Files analyzed: `{}`\n- Files skipped: `{}`\n- Threshold: `{}`\n- Scope: `{:?}`",
719 near.files_analyzed,
720 near.files_skipped,
721 fmt_f64(near.params.threshold, 2),
722 near.params.scope
723 );
724 if let Some(eligible) = near.eligible_files {
725 let _ = writeln!(out, "- Eligible files: `{}`", eligible);
726 }
727 if near.truncated {
728 out.push_str("- **Warning**: Pair list truncated by `max_pairs` limit.\n");
729 }
730 out.push('\n');
731
732 if let Some(clusters) = &near.clusters
734 && !clusters.is_empty()
735 {
736 out.push_str("#### Clusters\n\n");
737 out.push_str("|#|Files|Max Similarity|Representative|Pairs|\n");
738 out.push_str("|---:|---:|---:|---|---:|\n");
739 for (i, cluster) in clusters.iter().enumerate() {
740 let _ = writeln!(
741 out,
742 "|{}|{}|{}|{}|{}|",
743 i + 1,
744 cluster.files.len(),
745 fmt_pct(cluster.max_similarity),
746 cluster.representative,
747 cluster.pair_count
748 );
749 }
750 out.push('\n');
751 }
752
753 if near.pairs.is_empty() {
755 out.push_str("- No near-duplicate pairs detected.\n\n");
756 } else {
757 out.push_str("#### Pairs\n\n");
758 out.push_str("|Left|Right|Similarity|Shared FPs|\n");
759 out.push_str("|---|---|---:|---:|\n");
760 for pair in near.pairs.iter().take(20) {
761 let _ = writeln!(
762 out,
763 "|{}|{}|{}|{}|",
764 pair.left,
765 pair.right,
766 fmt_pct(pair.similarity),
767 pair.shared_fingerprints
768 );
769 }
770 out.push('\n');
771 }
772
773 if let Some(stats) = &near.stats {
775 let _ = writeln!(
776 out,
777 "> Near-dup stats: fingerprinting {}ms, pairing {}ms, {} bytes processed\n",
778 stats.fingerprinting_ms, stats.pairing_ms, stats.bytes_processed
779 );
780 }
781 }
782 }
783
784 if let Some(cx) = &receipt.complexity {
785 out.push_str("## Complexity\n\n");
786 out.push_str("|Metric|Value|\n");
787 out.push_str("|---|---:|\n");
788 let _ = writeln!(out, "|Total functions|{}|", cx.total_functions);
789 let _ = writeln!(
790 out,
791 "|Avg function length|{}|",
792 fmt_f64(cx.avg_function_length, 1)
793 );
794 let _ = writeln!(out, "|Max function length|{}|", cx.max_function_length);
795 let _ = writeln!(out, "|Avg cyclomatic|{}|", fmt_f64(cx.avg_cyclomatic, 2));
796 let _ = writeln!(out, "|Max cyclomatic|{}|", cx.max_cyclomatic);
797 if let Some(cog) = cx.avg_cognitive {
798 let _ = writeln!(out, "|Avg cognitive|{}|", fmt_f64(cog, 2));
799 }
800 if let Some(cog) = cx.max_cognitive {
801 let _ = writeln!(out, "|Max cognitive|{}|", cog);
802 }
803 if let Some(avg_nesting) = cx.avg_nesting_depth {
804 let _ = writeln!(out, "|Avg nesting depth|{}|", fmt_f64(avg_nesting, 2));
805 }
806 if let Some(max_nesting) = cx.max_nesting_depth {
807 let _ = writeln!(out, "|Max nesting depth|{}|", max_nesting);
808 }
809 let _ = writeln!(out, "|High risk files|{}|\n", cx.high_risk_files);
810
811 if !cx.files.is_empty() {
812 out.push_str("### Top complex files\n\n");
813 out.push_str("|Path|CC|Functions|Max fn length|\n");
814 out.push_str("|---|---:|---:|---:|\n");
815 for f in cx.files.iter().take(10) {
816 let _ = writeln!(
817 out,
818 "|{}|{}|{}|{}|",
819 f.path, f.cyclomatic_complexity, f.function_count, f.max_function_length
820 );
821 }
822 out.push('\n');
823 }
824 }
825
826 if let Some(api) = &receipt.api_surface {
827 out.push_str("## API surface\n\n");
828 out.push_str("|Metric|Value|\n");
829 out.push_str("|---|---:|\n");
830 let _ = writeln!(out, "|Total items|{}|", api.total_items);
831 let _ = writeln!(out, "|Public items|{}|", api.public_items);
832 let _ = writeln!(out, "|Internal items|{}|", api.internal_items);
833 let _ = writeln!(out, "|Public ratio|{}|", fmt_pct(api.public_ratio));
834 let _ = writeln!(
835 out,
836 "|Documented ratio|{}|\n",
837 fmt_pct(api.documented_ratio)
838 );
839
840 if !api.by_language.is_empty() {
841 out.push_str("### By language\n\n");
842 out.push_str("|Language|Total|Public|Internal|Public%|\n");
843 out.push_str("|---|---:|---:|---:|---:|\n");
844 for (lang, data) in &api.by_language {
845 let _ = writeln!(
846 out,
847 "|{}|{}|{}|{}|{}|",
848 lang,
849 data.total_items,
850 data.public_items,
851 data.internal_items,
852 fmt_pct(data.public_ratio)
853 );
854 }
855 out.push('\n');
856 }
857
858 if !api.by_module.is_empty() {
859 out.push_str("### By module\n\n");
860 out.push_str("|Module|Total|Public|Public%|\n");
861 out.push_str("|---|---:|---:|---:|\n");
862 for row in api.by_module.iter().take(20) {
863 let _ = writeln!(
864 out,
865 "|{}|{}|{}|{}|",
866 row.module,
867 row.total_items,
868 row.public_items,
869 fmt_pct(row.public_ratio)
870 );
871 }
872 out.push('\n');
873 }
874
875 if !api.top_exporters.is_empty() {
876 out.push_str("### Top exporters\n\n");
877 out.push_str("|Path|Language|Public|Total|\n");
878 out.push_str("|---|---|---:|---:|\n");
879 for item in api.top_exporters.iter().take(10) {
880 let _ = writeln!(
881 out,
882 "|{}|{}|{}|{}|",
883 item.path, item.lang, item.public_items, item.total_items
884 );
885 }
886 out.push('\n');
887 }
888 }
889
890 if let Some(fun) = &receipt.fun
891 && let Some(label) = &fun.eco_label
892 {
893 out.push_str("## Eco label\n\n");
894 let _ = writeln!(
895 out,
896 "- Label: `{}`\n- Score: `{}`\n- Bytes: `{}`\n- Notes: `{}`\n",
897 label.label,
898 fmt_f64(label.score, 1),
899 label.bytes,
900 label.notes
901 );
902 }
903
904 out
905}
906
907fn render_file_table(rows: &[FileStatRow]) -> String {
908 use std::fmt::Write;
909 let mut out = String::with_capacity((rows.len() + 3) * 80);
911 out.push_str("|Path|Lang|Lines|Code|Bytes|Tokens|Doc%|B/Line|\n");
912 out.push_str("|---|---|---:|---:|---:|---:|---:|---:|\n");
913 for row in rows {
914 let _ = writeln!(
915 out,
916 "|{}|{}|{}|{}|{}|{}|{}|{}|",
917 row.path,
918 row.lang,
919 row.lines,
920 row.code,
921 row.bytes,
922 row.tokens,
923 row.doc_pct.map(fmt_pct).unwrap_or_else(|| "-".to_string()),
924 row.bytes_per_line
925 .map(|v| fmt_f64(v, 2))
926 .unwrap_or_else(|| "-".to_string())
927 );
928 }
929 out
930}
931
932fn render_effort_report(out: &mut String, effort: &EffortEstimateReport) {
946 out.push_str("## Effort estimate\n\n");
947
948 out.push_str("### Size basis\n\n");
949 let _ = writeln!(
950 out,
951 "- Model: `{}`\n- Total LOC lines: `{}`\n- Authored LOC lines: `{}`\n- Generated LOC lines: `{}`\n- Vendored LOC lines: `{}`\n- Authoring KLOC: `{}`\n- Total KLOC: `{}`\n- Generated share: `{}`\n- Vendored share: `{}`\n- Classification confidence: `{}`\n",
952 effort.model,
953 effort.size_basis.total_lines,
954 effort.size_basis.authored_lines,
955 effort.size_basis.generated_lines,
956 effort.size_basis.vendored_lines,
957 fmt_f64(effort.size_basis.kloc_authored, 4),
958 fmt_f64(effort.size_basis.kloc_total, 4),
959 fmt_pct(effort.size_basis.generated_pct),
960 fmt_pct(effort.size_basis.vendored_pct),
961 effort.size_basis.classification_confidence
962 );
963
964 if !effort.size_basis.by_tag.is_empty() {
965 out.push_str("### Size by tag\n\n");
966 out.push_str("|Tag|Lines|Authored|Share|\n");
967 out.push_str("|---|---:|---:|---:|\n");
968 for row in &effort.size_basis.by_tag {
969 let _ = writeln!(
970 out,
971 "|{}|{}|{}|{}|",
972 row.tag,
973 row.lines,
974 row.authored_lines,
975 fmt_pct(row.pct_of_total)
976 );
977 }
978 out.push('\n');
979 }
980
981 out.push_str("### Headline\n\n");
982 let _ = writeln!(
983 out,
984 "- Effort p50: `{}` person-months (low `{}` / p80 `{}`)\n- Schedule p50: `{}` months (low `{}` / p80 `{}`)\n- Staff p50: `{}` FTE (low `{}` / p80 `{}`)\n",
985 fmt_f64(effort.results.effort_pm_p50, 4),
986 fmt_f64(effort.results.effort_pm_low, 4),
987 fmt_f64(effort.results.effort_pm_p80, 4),
988 fmt_f64(effort.results.schedule_months_p50, 4),
989 fmt_f64(effort.results.schedule_months_low, 4),
990 fmt_f64(effort.results.schedule_months_p80, 4),
991 fmt_f64(effort.results.staff_p50, 4),
992 fmt_f64(effort.results.staff_low, 4),
993 fmt_f64(effort.results.staff_p80, 4),
994 );
995
996 out.push_str("### Why\n\n");
997 let _ = writeln!(out, "- Confidence level: `{}`", effort.confidence.level);
998 if let Some(coverage) = effort.confidence.data_coverage_pct {
999 let _ = writeln!(out, "- Data coverage: `{}`", fmt_pct(coverage));
1000 }
1001 if !effort.confidence.reasons.is_empty() {
1002 out.push_str("- Reasons:\n");
1003 for reason in &effort.confidence.reasons {
1004 let _ = writeln!(out, " - {reason}");
1005 }
1006 }
1007 out.push('\n');
1008
1009 out.push_str("### Drivers\n\n");
1010 if effort.drivers.is_empty() {
1011 out.push_str("- No material drivers were inferred.\n\n");
1012 } else {
1013 out.push_str("|Driver|Direction|Weight|Evidence|\n");
1014 out.push_str("|---|---|---:|---|\n");
1015 for row in effort.drivers.iter().take(35) {
1016 let direction = match row.direction {
1017 EffortDriverDirection::Raises => "raises",
1018 EffortDriverDirection::Lowers => "lowers",
1019 EffortDriverDirection::Neutral => "neutral",
1020 };
1021 let _ = writeln!(
1022 out,
1023 "|{}|{}|{}|{}|",
1024 row.label,
1025 direction,
1026 fmt_f64(row.weight, 4),
1027 row.evidence
1028 );
1029 }
1030 out.push('\n');
1031 }
1032
1033 if !effort.assumptions.notes.is_empty() {
1034 out.push_str("### Assumptions\n\n");
1035 for note in &effort.assumptions.notes {
1036 let _ = writeln!(out, "- {note}");
1037 }
1038 out.push('\n');
1039 }
1040
1041 if !effort.assumptions.overrides.is_empty() {
1042 out.push_str("### Assumption overrides\n\n");
1043 out.push_str("|Setting|Value|\n");
1044 out.push_str("|---|---|\n");
1045 for (key, value) in &effort.assumptions.overrides {
1046 let _ = writeln!(out, "|{key}|{value}|");
1047 }
1048 out.push('\n');
1049 }
1050
1051 out.push_str("### Delta\n\n");
1052 if let Some(delta) = &effort.delta {
1053 let _ = writeln!(
1054 out,
1055 "- Reference window: `{}`..`{}`\n- Files changed: `{}`\n- Modules changed: `{}`\n- Languages changed: `{}`\n- Hotspots touched: `{}`\n- Coupled neighbors touched: `{}`\n- Blast radius: `{}`\n- Classification: `{}`\n- Effort p50 impact: `{}`\n- Effort p80 impact: `{}`\n",
1056 delta.base,
1057 delta.head,
1058 delta.files_changed,
1059 delta.modules_changed,
1060 delta.langs_changed,
1061 delta.hotspot_files_touched,
1062 delta.coupled_neighbors_touched,
1063 fmt_f64(delta.blast_radius, 4),
1064 delta.classification,
1065 fmt_f64(delta.effort_pm_est, 4),
1066 fmt_f64(delta.effort_pm_high, 4)
1067 );
1068 let _ = writeln!(
1069 out,
1070 "- Effort low bound (delta): `{}`\n",
1071 fmt_f64(delta.effort_pm_low, 4),
1072 );
1073 } else {
1074 out.push_str("- Baseline comparison is not available for this receipt.\n\n");
1075 }
1076}
1077
1078fn render_legacy_cocomo_report(
1088 out: &mut String,
1089 derived: &tokmd_analysis_types::DerivedReport,
1090 cocomo: &tokmd_analysis_types::CocomoReport,
1091) {
1092 out.push_str("## Effort estimate\n\n");
1093
1094 out.push_str("### Size basis\n\n");
1095 let _ = writeln!(
1096 out,
1097 "- Source lines: `{}`\n- Total lines: `{}`\n- KLOC: `{}`\n",
1098 derived.totals.code,
1099 derived.totals.lines,
1100 fmt_f64(cocomo.kloc, 4)
1101 );
1102
1103 out.push_str("### Headline\n\n");
1104 let _ = writeln!(
1105 out,
1106 "- Effort: `{}` person-months\n- Duration: `{}` months\n- Staff: `{}`\n",
1107 fmt_f64(cocomo.effort_pm, 2),
1108 fmt_f64(cocomo.duration_months, 2),
1109 fmt_f64(cocomo.staff, 2)
1110 );
1111
1112 out.push_str("### Why\n\n");
1113 let _ = writeln!(
1114 out,
1115 "- Model: `COCOMO` (`{}` mode)\n- Formula: `E = a * KLOC^b`\n- Coefficients: `a={}`, `b={}`, `c={}`, `d={}`\n",
1116 cocomo.mode,
1117 fmt_f64(cocomo.a, 2),
1118 fmt_f64(cocomo.b, 2),
1119 fmt_f64(cocomo.c, 2),
1120 fmt_f64(cocomo.d, 2)
1121 );
1122
1123 out.push_str("### Delta\n\n");
1124 out.push_str("- Baseline comparison is not available for this receipt.\n\n");
1125}
1126
1127fn fmt_pct(ratio: f64) -> String {
1128 format!("{:.1}%", ratio * 100.0)
1129}
1130
1131fn fmt_f64(value: f64, decimals: usize) -> String {
1132 format!("{value:.decimals$}")
1133}
1134
1135fn render_jsonld(receipt: &AnalysisReceipt) -> String {
1136 let name = receipt
1137 .source
1138 .inputs
1139 .first()
1140 .cloned()
1141 .unwrap_or_else(|| "tokmd".to_string());
1142 let totals = receipt.derived.as_ref().map(|d| &d.totals);
1143 let payload = serde_json::json!({
1144 "@context": "https://schema.org",
1145 "@type": "SoftwareSourceCode",
1146 "name": name,
1147 "codeLines": totals.map(|t| t.code).unwrap_or(0),
1148 "commentCount": totals.map(|t| t.comments).unwrap_or(0),
1149 "lineCount": totals.map(|t| t.lines).unwrap_or(0),
1150 "fileSize": totals.map(|t| t.bytes).unwrap_or(0),
1151 "interactionStatistic": {
1152 "@type": "InteractionCounter",
1153 "interactionType": "http://schema.org/ReadAction",
1154 "userInteractionCount": totals.map(|t| t.tokens).unwrap_or(0)
1155 }
1156 });
1157 serde_json::to_string_pretty(&payload).unwrap_or_else(|_| "{}".to_string())
1158}
1159
1160fn render_xml(receipt: &AnalysisReceipt) -> String {
1161 let totals = receipt.derived.as_ref().map(|d| &d.totals);
1162 let mut out = String::new();
1163 out.push_str("<analysis>");
1164 if let Some(totals) = totals {
1165 let _ = write!(
1166 out,
1167 "<totals files=\"{}\" code=\"{}\" comments=\"{}\" blanks=\"{}\" lines=\"{}\" bytes=\"{}\" tokens=\"{}\"/>",
1168 totals.files,
1169 totals.code,
1170 totals.comments,
1171 totals.blanks,
1172 totals.lines,
1173 totals.bytes,
1174 totals.tokens
1175 );
1176 }
1177 out.push_str("</analysis>");
1178 out
1179}
1180
1181fn render_svg(receipt: &AnalysisReceipt) -> String {
1182 let (label, value) = if let Some(derived) = &receipt.derived {
1183 if let Some(ctx) = &derived.context_window {
1184 ("context".to_string(), format!("{:.1}%", ctx.pct * 100.0))
1185 } else {
1186 ("tokens".to_string(), derived.totals.tokens.to_string())
1187 }
1188 } else {
1189 ("tokens".to_string(), "0".to_string())
1190 };
1191
1192 let width = 240;
1193 let height = 32;
1194 let label_width = 80;
1195 let value_width = width - label_width;
1196 format!(
1197 "<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"{width}\" height=\"{height}\" role=\"img\"><rect width=\"{label_width}\" height=\"{height}\" fill=\"#555\"/><rect x=\"{label_width}\" width=\"{value_width}\" height=\"{height}\" fill=\"#4c9aff\"/><text x=\"{lx}\" y=\"{ty}\" fill=\"#fff\" font-family=\"Verdana\" font-size=\"12\" text-anchor=\"middle\">{label}</text><text x=\"{vx}\" y=\"{ty}\" fill=\"#fff\" font-family=\"Verdana\" font-size=\"12\" text-anchor=\"middle\">{value}</text></svg>",
1198 width = width,
1199 height = height,
1200 label_width = label_width,
1201 value_width = value_width,
1202 lx = label_width / 2,
1203 vx = label_width + value_width / 2,
1204 ty = 20,
1205 label = label,
1206 value = value
1207 )
1208}
1209
1210fn render_mermaid(receipt: &AnalysisReceipt) -> String {
1211 let mut out = String::from("graph TD\n");
1212 if let Some(imports) = &receipt.imports {
1213 for edge in imports.edges.iter().take(200) {
1214 let from = sanitize_mermaid(&edge.from);
1215 let to = sanitize_mermaid(&edge.to);
1216 let _ = writeln!(out, " {} -->|{}| {}", from, edge.count, to);
1217 }
1218 }
1219 out
1220}
1221
1222fn render_tree(receipt: &AnalysisReceipt) -> String {
1223 receipt
1224 .derived
1225 .as_ref()
1226 .and_then(|d| d.tree.clone())
1227 .unwrap_or_else(|| "(tree unavailable)".to_string())
1228}
1229
1230#[cfg(feature = "fun")]
1232fn render_obj_fun(receipt: &AnalysisReceipt) -> Result<String> {
1233 if let Some(derived) = &receipt.derived {
1234 let buildings: Vec<tokmd_fun::ObjBuilding> = derived
1235 .top
1236 .largest_lines
1237 .iter()
1238 .enumerate()
1239 .map(|(idx, row)| {
1240 let x = (idx % 5) as f32 * 2.0;
1241 let y = (idx / 5) as f32 * 2.0;
1242 let h = (row.lines as f32 / 10.0).max(0.5);
1243 tokmd_fun::ObjBuilding {
1244 name: row.path.clone(),
1245 x,
1246 y,
1247 w: 1.5,
1248 d: 1.5,
1249 h,
1250 }
1251 })
1252 .collect();
1253 return Ok(tokmd_fun::render_obj(&buildings));
1254 }
1255 Ok("# tokmd code city\n".to_string())
1256}
1257
1258#[cfg(feature = "fun")]
1259fn render_midi_fun(receipt: &AnalysisReceipt) -> Result<Vec<u8>> {
1260 let mut notes = Vec::new();
1261 if let Some(derived) = &receipt.derived {
1262 for (idx, row) in derived.top.largest_lines.iter().enumerate() {
1263 let key = 60u8 + (row.depth as u8 % 12);
1264 let velocity = (40 + (row.lines.min(127) as u8 / 2)).min(120);
1265 let start = (idx as u32) * 240;
1266 notes.push(tokmd_fun::MidiNote {
1267 key,
1268 velocity,
1269 start,
1270 duration: 180,
1271 channel: 0,
1272 });
1273 }
1274 }
1275 tokmd_fun::render_midi(¬es, 120)
1276}
1277
1278#[cfg(not(feature = "fun"))]
1280fn render_obj_disabled(_receipt: &AnalysisReceipt) -> Result<String> {
1281 anyhow::bail!(
1282 "OBJ format requires the `fun` feature: tokmd-analysis-format = {{ version = \"1.3\", features = [\"fun\"] }}"
1283 )
1284}
1285
1286#[cfg(not(feature = "fun"))]
1287fn render_midi_disabled(_receipt: &AnalysisReceipt) -> Result<Vec<u8>> {
1288 anyhow::bail!(
1289 "MIDI format requires the `fun` feature: tokmd-analysis-format = {{ version = \"1.3\", features = [\"fun\"] }}"
1290 )
1291}
1292
1293fn render_obj(receipt: &AnalysisReceipt) -> Result<String> {
1295 #[cfg(feature = "fun")]
1296 {
1297 render_obj_fun(receipt)
1298 }
1299 #[cfg(not(feature = "fun"))]
1300 {
1301 render_obj_disabled(receipt)
1302 }
1303}
1304
1305fn render_midi(receipt: &AnalysisReceipt) -> Result<Vec<u8>> {
1306 #[cfg(feature = "fun")]
1307 {
1308 render_midi_fun(receipt)
1309 }
1310 #[cfg(not(feature = "fun"))]
1311 {
1312 render_midi_disabled(receipt)
1313 }
1314}
1315
1316fn sanitize_mermaid(name: &str) -> String {
1317 name.chars()
1318 .map(|c| if c.is_ascii_alphanumeric() { c } else { '_' })
1319 .collect()
1320}
1321
1322fn render_html(receipt: &AnalysisReceipt) -> String {
1323 tokmd_analysis_html::render(receipt)
1324}
1325
1326#[cfg(test)]
1327mod tests {
1328 use super::*;
1329 use std::collections::BTreeMap;
1330 use tokmd_analysis_types::*;
1331
1332 fn minimal_receipt() -> AnalysisReceipt {
1333 AnalysisReceipt {
1334 schema_version: 2,
1335 generated_at_ms: 0,
1336 tool: tokmd_types::ToolInfo {
1337 name: "tokmd".to_string(),
1338 version: "0.0.0".to_string(),
1339 },
1340 mode: "analysis".to_string(),
1341 status: tokmd_types::ScanStatus::Complete,
1342 warnings: vec![],
1343 source: AnalysisSource {
1344 inputs: vec!["test".to_string()],
1345 export_path: None,
1346 base_receipt_path: None,
1347 export_schema_version: None,
1348 export_generated_at_ms: None,
1349 base_signature: None,
1350 module_roots: vec![],
1351 module_depth: 1,
1352 children: "collapse".to_string(),
1353 },
1354 args: AnalysisArgsMeta {
1355 preset: "receipt".to_string(),
1356 format: "md".to_string(),
1357 window_tokens: None,
1358 git: None,
1359 max_files: None,
1360 max_bytes: None,
1361 max_commits: None,
1362 max_commit_files: None,
1363 max_file_bytes: None,
1364 import_granularity: "module".to_string(),
1365 },
1366 archetype: None,
1367 topics: None,
1368 entropy: None,
1369 predictive_churn: None,
1370 corporate_fingerprint: None,
1371 license: None,
1372 derived: None,
1373 assets: None,
1374 deps: None,
1375 git: None,
1376 imports: None,
1377 dup: None,
1378 complexity: None,
1379 api_surface: None,
1380 fun: None,
1381 effort: None,
1382 }
1383 }
1384
1385 fn sample_derived() -> DerivedReport {
1386 DerivedReport {
1387 totals: DerivedTotals {
1388 files: 10,
1389 code: 1000,
1390 comments: 200,
1391 blanks: 100,
1392 lines: 1300,
1393 bytes: 50000,
1394 tokens: 2500,
1395 },
1396 doc_density: RatioReport {
1397 total: RatioRow {
1398 key: "total".to_string(),
1399 numerator: 200,
1400 denominator: 1200,
1401 ratio: 0.1667,
1402 },
1403 by_lang: vec![],
1404 by_module: vec![],
1405 },
1406 whitespace: RatioReport {
1407 total: RatioRow {
1408 key: "total".to_string(),
1409 numerator: 100,
1410 denominator: 1300,
1411 ratio: 0.0769,
1412 },
1413 by_lang: vec![],
1414 by_module: vec![],
1415 },
1416 verbosity: RateReport {
1417 total: RateRow {
1418 key: "total".to_string(),
1419 numerator: 50000,
1420 denominator: 1300,
1421 rate: 38.46,
1422 },
1423 by_lang: vec![],
1424 by_module: vec![],
1425 },
1426 max_file: MaxFileReport {
1427 overall: FileStatRow {
1428 path: "src/lib.rs".to_string(),
1429 module: "src".to_string(),
1430 lang: "Rust".to_string(),
1431 code: 500,
1432 comments: 100,
1433 blanks: 50,
1434 lines: 650,
1435 bytes: 25000,
1436 tokens: 1250,
1437 doc_pct: Some(0.167),
1438 bytes_per_line: Some(38.46),
1439 depth: 1,
1440 },
1441 by_lang: vec![],
1442 by_module: vec![],
1443 },
1444 lang_purity: LangPurityReport { rows: vec![] },
1445 nesting: NestingReport {
1446 max: 3,
1447 avg: 1.5,
1448 by_module: vec![],
1449 },
1450 test_density: TestDensityReport {
1451 test_lines: 200,
1452 prod_lines: 1000,
1453 test_files: 5,
1454 prod_files: 5,
1455 ratio: 0.2,
1456 },
1457 boilerplate: BoilerplateReport {
1458 infra_lines: 100,
1459 logic_lines: 1100,
1460 ratio: 0.083,
1461 infra_langs: vec!["TOML".to_string()],
1462 },
1463 polyglot: PolyglotReport {
1464 lang_count: 2,
1465 entropy: 0.5,
1466 dominant_lang: "Rust".to_string(),
1467 dominant_lines: 1000,
1468 dominant_pct: 0.833,
1469 },
1470 distribution: DistributionReport {
1471 count: 10,
1472 min: 50,
1473 max: 650,
1474 mean: 130.0,
1475 median: 100.0,
1476 p90: 400.0,
1477 p99: 650.0,
1478 gini: 0.3,
1479 },
1480 histogram: vec![HistogramBucket {
1481 label: "Small".to_string(),
1482 min: 0,
1483 max: Some(100),
1484 files: 5,
1485 pct: 0.5,
1486 }],
1487 top: TopOffenders {
1488 largest_lines: vec![FileStatRow {
1489 path: "src/lib.rs".to_string(),
1490 module: "src".to_string(),
1491 lang: "Rust".to_string(),
1492 code: 500,
1493 comments: 100,
1494 blanks: 50,
1495 lines: 650,
1496 bytes: 25000,
1497 tokens: 1250,
1498 doc_pct: Some(0.167),
1499 bytes_per_line: Some(38.46),
1500 depth: 1,
1501 }],
1502 largest_tokens: vec![],
1503 largest_bytes: vec![],
1504 least_documented: vec![],
1505 most_dense: vec![],
1506 },
1507 tree: Some("test-tree".to_string()),
1508 reading_time: ReadingTimeReport {
1509 minutes: 65.0,
1510 lines_per_minute: 20,
1511 basis_lines: 1300,
1512 },
1513 context_window: Some(ContextWindowReport {
1514 window_tokens: 100000,
1515 total_tokens: 2500,
1516 pct: 0.025,
1517 fits: true,
1518 }),
1519 cocomo: Some(CocomoReport {
1520 mode: "organic".to_string(),
1521 kloc: 1.0,
1522 effort_pm: 2.4,
1523 duration_months: 2.5,
1524 staff: 1.0,
1525 a: 2.4,
1526 b: 1.05,
1527 c: 2.5,
1528 d: 0.38,
1529 }),
1530 todo: Some(TodoReport {
1531 total: 5,
1532 density_per_kloc: 5.0,
1533 tags: vec![TodoTagRow {
1534 tag: "TODO".to_string(),
1535 count: 5,
1536 }],
1537 }),
1538 integrity: IntegrityReport {
1539 algo: "blake3".to_string(),
1540 hash: "abc123".to_string(),
1541 entries: 10,
1542 },
1543 }
1544 }
1545
1546 #[test]
1548 fn test_fmt_pct() {
1549 assert_eq!(fmt_pct(0.5), "50.0%");
1550 assert_eq!(fmt_pct(0.0), "0.0%");
1551 assert_eq!(fmt_pct(1.0), "100.0%");
1552 assert_eq!(fmt_pct(0.1234), "12.3%");
1553 }
1554
1555 #[test]
1557 #[allow(clippy::approx_constant)]
1558 fn test_fmt_f64() {
1559 assert_eq!(fmt_f64(3.14159, 2), "3.14");
1560 assert_eq!(fmt_f64(3.14159, 4), "3.1416");
1561 assert_eq!(fmt_f64(0.0, 2), "0.00");
1562 assert_eq!(fmt_f64(100.0, 0), "100");
1563 }
1564
1565 #[test]
1567 fn test_sanitize_mermaid() {
1568 assert_eq!(sanitize_mermaid("hello"), "hello");
1569 assert_eq!(sanitize_mermaid("hello-world"), "hello_world");
1570 assert_eq!(sanitize_mermaid("src/lib.rs"), "src_lib_rs");
1571 assert_eq!(sanitize_mermaid("test123"), "test123");
1572 assert_eq!(sanitize_mermaid("a b c"), "a_b_c");
1573 }
1574
1575 #[test]
1577 fn test_render_file_table() {
1578 let rows = vec![FileStatRow {
1579 path: "src/lib.rs".to_string(),
1580 module: "src".to_string(),
1581 lang: "Rust".to_string(),
1582 code: 100,
1583 comments: 20,
1584 blanks: 10,
1585 lines: 130,
1586 bytes: 5000,
1587 tokens: 250,
1588 doc_pct: Some(0.167),
1589 bytes_per_line: Some(38.46),
1590 depth: 1,
1591 }];
1592 let result = render_file_table(&rows);
1593 assert!(result.contains("|Path|Lang|Lines|Code|Bytes|Tokens|Doc%|B/Line|"));
1594 assert!(result.contains("|src/lib.rs|Rust|130|100|5000|250|16.7%|38.46|"));
1595 }
1596
1597 #[test]
1599 fn test_render_file_table_none_values() {
1600 let rows = vec![FileStatRow {
1601 path: "test.txt".to_string(),
1602 module: "root".to_string(),
1603 lang: "Text".to_string(),
1604 code: 50,
1605 comments: 0,
1606 blanks: 5,
1607 lines: 55,
1608 bytes: 1000,
1609 tokens: 100,
1610 doc_pct: None,
1611 bytes_per_line: None,
1612 depth: 0,
1613 }];
1614 let result = render_file_table(&rows);
1615 assert!(result.contains("|-|-|")); }
1617
1618 #[test]
1620 fn test_render_xml() {
1621 let mut receipt = minimal_receipt();
1622 receipt.derived = Some(sample_derived());
1623 let result = render_xml(&receipt);
1624 assert!(result.starts_with("<analysis>"));
1625 assert!(result.ends_with("</analysis>"));
1626 assert!(result.contains("files=\"10\""));
1627 assert!(result.contains("code=\"1000\""));
1628 }
1629
1630 #[test]
1632 fn test_render_xml_no_derived() {
1633 let receipt = minimal_receipt();
1634 let result = render_xml(&receipt);
1635 assert_eq!(result, "<analysis></analysis>");
1636 }
1637
1638 #[test]
1640 fn test_render_jsonld() {
1641 let mut receipt = minimal_receipt();
1642 receipt.derived = Some(sample_derived());
1643 let result = render_jsonld(&receipt);
1644 assert!(result.contains("\"@context\": \"https://schema.org\""));
1645 assert!(result.contains("\"@type\": \"SoftwareSourceCode\""));
1646 assert!(result.contains("\"name\": \"test\""));
1647 assert!(result.contains("\"codeLines\": 1000"));
1648 }
1649
1650 #[test]
1652 fn test_render_jsonld_empty_inputs() {
1653 let mut receipt = minimal_receipt();
1654 receipt.source.inputs.clear();
1655 let result = render_jsonld(&receipt);
1656 assert!(result.contains("\"name\": \"tokmd\""));
1657 }
1658
1659 #[test]
1661 fn test_render_svg() {
1662 let mut receipt = minimal_receipt();
1663 receipt.derived = Some(sample_derived());
1664 let result = render_svg(&receipt);
1665 assert!(result.contains("<svg"));
1666 assert!(result.contains("</svg>"));
1667 assert!(result.contains("context")); assert!(result.contains("2.5%")); }
1670
1671 #[test]
1673 fn test_render_svg_no_context() {
1674 let mut receipt = minimal_receipt();
1675 let mut derived = sample_derived();
1676 derived.context_window = None;
1677 receipt.derived = Some(derived);
1678 let result = render_svg(&receipt);
1679 assert!(result.contains("tokens"));
1680 assert!(result.contains("2500")); }
1682
1683 #[test]
1685 fn test_render_svg_no_derived() {
1686 let receipt = minimal_receipt();
1687 let result = render_svg(&receipt);
1688 assert!(result.contains("tokens"));
1689 assert!(result.contains(">0<")); }
1691
1692 #[test]
1694 fn test_render_svg_dimensions() {
1695 let receipt = minimal_receipt();
1696 let result = render_svg(&receipt);
1697 assert!(result.contains("width=\"160\"")); }
1700
1701 #[test]
1703 fn test_render_mermaid() {
1704 let mut receipt = minimal_receipt();
1705 receipt.imports = Some(ImportReport {
1706 granularity: "module".to_string(),
1707 edges: vec![ImportEdge {
1708 from: "src/main".to_string(),
1709 to: "src/lib".to_string(),
1710 count: 5,
1711 }],
1712 });
1713 let result = render_mermaid(&receipt);
1714 assert!(result.starts_with("graph TD\n"));
1715 assert!(result.contains("src_main -->|5| src_lib"));
1716 }
1717
1718 #[test]
1720 fn test_render_mermaid_no_imports() {
1721 let receipt = minimal_receipt();
1722 let result = render_mermaid(&receipt);
1723 assert_eq!(result, "graph TD\n");
1724 }
1725
1726 #[test]
1728 fn test_render_tree() {
1729 let mut receipt = minimal_receipt();
1730 receipt.derived = Some(sample_derived());
1731 let result = render_tree(&receipt);
1732 assert_eq!(result, "test-tree");
1733 }
1734
1735 #[test]
1737 fn test_render_tree_no_derived() {
1738 let receipt = minimal_receipt();
1739 let result = render_tree(&receipt);
1740 assert_eq!(result, "(tree unavailable)");
1741 }
1742
1743 #[test]
1745 fn test_render_tree_none() {
1746 let mut receipt = minimal_receipt();
1747 let mut derived = sample_derived();
1748 derived.tree = None;
1749 receipt.derived = Some(derived);
1750 let result = render_tree(&receipt);
1751 assert_eq!(result, "(tree unavailable)");
1752 }
1753
1754 #[cfg(not(feature = "fun"))]
1756 #[test]
1757 fn test_render_obj_no_fun() {
1758 let receipt = minimal_receipt();
1759 let result = render_obj(&receipt);
1760 assert!(result.is_err());
1761 assert!(result.unwrap_err().to_string().contains("fun"));
1762 }
1763
1764 #[cfg(not(feature = "fun"))]
1766 #[test]
1767 fn test_render_midi_no_fun() {
1768 let receipt = minimal_receipt();
1769 let result = render_midi(&receipt);
1770 assert!(result.is_err());
1771 assert!(result.unwrap_err().to_string().contains("fun"));
1772 }
1773
1774 #[cfg(feature = "fun")]
1781 #[test]
1782 fn test_render_obj_coordinate_math() {
1783 let mut receipt = minimal_receipt();
1784 let mut derived = sample_derived();
1785 derived.top.largest_lines = vec![
1795 FileStatRow {
1796 path: "file0.rs".to_string(),
1797 module: "src".to_string(),
1798 lang: "Rust".to_string(),
1799 code: 100,
1800 comments: 10,
1801 blanks: 5,
1802 lines: 100, bytes: 1000,
1804 tokens: 200,
1805 doc_pct: None,
1806 bytes_per_line: None,
1807 depth: 1,
1808 },
1809 FileStatRow {
1810 path: "file1.rs".to_string(),
1811 module: "src".to_string(),
1812 lang: "Rust".to_string(),
1813 code: 50,
1814 comments: 5,
1815 blanks: 2,
1816 lines: 3, bytes: 500,
1818 tokens: 100,
1819 doc_pct: None,
1820 bytes_per_line: None,
1821 depth: 2,
1822 },
1823 FileStatRow {
1824 path: "file2.rs".to_string(),
1825 module: "src".to_string(),
1826 lang: "Rust".to_string(),
1827 code: 200,
1828 comments: 20,
1829 blanks: 10,
1830 lines: 200, bytes: 2000,
1832 tokens: 400,
1833 doc_pct: None,
1834 bytes_per_line: None,
1835 depth: 3,
1836 },
1837 FileStatRow {
1838 path: "file3.rs".to_string(),
1839 module: "src".to_string(),
1840 lang: "Rust".to_string(),
1841 code: 75,
1842 comments: 7,
1843 blanks: 3,
1844 lines: 75, bytes: 750,
1846 tokens: 150,
1847 doc_pct: None,
1848 bytes_per_line: None,
1849 depth: 0,
1850 },
1851 FileStatRow {
1852 path: "file4.rs".to_string(),
1853 module: "src".to_string(),
1854 lang: "Rust".to_string(),
1855 code: 150,
1856 comments: 15,
1857 blanks: 8,
1858 lines: 150, bytes: 1500,
1860 tokens: 300,
1861 doc_pct: None,
1862 bytes_per_line: None,
1863 depth: 1,
1864 },
1865 FileStatRow {
1867 path: "file5.rs".to_string(),
1868 module: "src".to_string(),
1869 lang: "Rust".to_string(),
1870 code: 80,
1871 comments: 8,
1872 blanks: 4,
1873 lines: 80, bytes: 800,
1875 tokens: 160,
1876 doc_pct: None,
1877 bytes_per_line: None,
1878 depth: 2,
1879 },
1880 FileStatRow {
1882 path: "file6.rs".to_string(),
1883 module: "src".to_string(),
1884 lang: "Rust".to_string(),
1885 code: 60,
1886 comments: 6,
1887 blanks: 3,
1888 lines: 60, bytes: 600,
1890 tokens: 120,
1891 doc_pct: None,
1892 bytes_per_line: None,
1893 depth: 1,
1894 },
1895 ];
1896 receipt.derived = Some(derived);
1897 let result = render_obj(&receipt).expect("render_obj should succeed with fun feature");
1898
1899 #[allow(clippy::type_complexity)]
1902 let objects: Vec<(&str, Vec<(f32, f32, f32)>)> = result
1903 .split("o ")
1904 .skip(1)
1905 .map(|section| {
1906 let lines: Vec<&str> = section.lines().collect();
1907 let name = lines[0];
1908 let vertices: Vec<(f32, f32, f32)> = lines[1..]
1909 .iter()
1910 .filter(|l| l.starts_with("v "))
1911 .take(8)
1912 .map(|l| {
1913 let parts: Vec<f32> = l[2..]
1914 .split_whitespace()
1915 .map(|p| p.parse().unwrap())
1916 .collect();
1917 (parts[0], parts[1], parts[2])
1918 })
1919 .collect();
1920 (name, vertices)
1921 })
1922 .collect();
1923
1924 assert_eq!(objects.len(), 7, "expected 7 buildings");
1926
1927 fn base_corner(obj: &(&str, Vec<(f32, f32, f32)>)) -> (f32, f32, f32) {
1929 obj.1[0]
1930 }
1931 fn top_corner(obj: &(&str, Vec<(f32, f32, f32)>)) -> (f32, f32, f32) {
1932 obj.1[4] }
1934
1935 assert_eq!(
1937 base_corner(&objects[0]),
1938 (0.0, 0.0, 0.0),
1939 "file0 base position"
1940 );
1941 assert_eq!(
1942 top_corner(&objects[0]).2,
1943 10.0,
1944 "file0 height should be 10.0 (100/10)"
1945 );
1946
1947 assert_eq!(
1950 base_corner(&objects[1]),
1951 (2.0, 0.0, 0.0),
1952 "file1 base position"
1953 );
1954 assert_eq!(
1955 top_corner(&objects[1]).2,
1956 0.5,
1957 "file1 height should be 0.5 (clamped from 3/10=0.3)"
1958 );
1959
1960 assert_eq!(
1962 base_corner(&objects[2]),
1963 (4.0, 0.0, 0.0),
1964 "file2 base position"
1965 );
1966 assert_eq!(
1967 top_corner(&objects[2]).2,
1968 20.0,
1969 "file2 height should be 20.0 (200/10)"
1970 );
1971
1972 assert_eq!(
1974 base_corner(&objects[3]),
1975 (6.0, 0.0, 0.0),
1976 "file3 base position"
1977 );
1978 assert_eq!(
1979 top_corner(&objects[3]).2,
1980 7.5,
1981 "file3 height should be 7.5 (75/10)"
1982 );
1983
1984 assert_eq!(
1987 base_corner(&objects[4]),
1988 (8.0, 0.0, 0.0),
1989 "file4 base position (x = 4*2 = 8)"
1990 );
1991 assert_eq!(
1992 top_corner(&objects[4]).2,
1993 15.0,
1994 "file4 height should be 15.0 (150/10)"
1995 );
1996
1997 assert_eq!(
2001 base_corner(&objects[5]),
2002 (0.0, 2.0, 0.0),
2003 "file5 base position (x=0 from 5%5, y=2 from 5/5*2)"
2004 );
2005 assert_eq!(
2006 top_corner(&objects[5]).2,
2007 8.0,
2008 "file5 height should be 8.0 (80/10)"
2009 );
2010
2011 assert_eq!(
2014 base_corner(&objects[6]),
2015 (2.0, 2.0, 0.0),
2016 "file6 base position (x=2 from 6%5*2, y=2 from 6/5*2)"
2017 );
2018 assert_eq!(
2019 top_corner(&objects[6]).2,
2020 6.0,
2021 "file6 height should be 6.0 (60/10)"
2022 );
2023
2024 assert!(result.contains("f 1 2 3 4"), "missing face definition");
2026 }
2027
2028 #[cfg(feature = "fun")]
2034 #[test]
2035 fn test_render_midi_note_math() {
2036 use midly::{MidiMessage, Smf, TrackEventKind};
2037
2038 let mut receipt = minimal_receipt();
2039 let mut derived = sample_derived();
2040 derived.top.largest_lines = vec![
2046 FileStatRow {
2048 path: "a.rs".to_string(),
2049 module: "src".to_string(),
2050 lang: "Rust".to_string(),
2051 code: 50,
2052 comments: 5,
2053 blanks: 2,
2054 lines: 60,
2055 bytes: 500,
2056 tokens: 100,
2057 doc_pct: None,
2058 bytes_per_line: None,
2059 depth: 5,
2060 },
2061 FileStatRow {
2064 path: "b.rs".to_string(),
2065 module: "src".to_string(),
2066 lang: "Rust".to_string(),
2067 code: 100,
2068 comments: 10,
2069 blanks: 5,
2070 lines: 200, bytes: 1000,
2072 tokens: 200,
2073 doc_pct: None,
2074 bytes_per_line: None,
2075 depth: 15,
2076 },
2077 FileStatRow {
2079 path: "c.rs".to_string(),
2080 module: "src".to_string(),
2081 lang: "Rust".to_string(),
2082 code: 20,
2083 comments: 2,
2084 blanks: 1,
2085 lines: 20,
2086 bytes: 200,
2087 tokens: 40,
2088 doc_pct: None,
2089 bytes_per_line: None,
2090 depth: 0,
2091 },
2092 FileStatRow {
2095 path: "d.rs".to_string(),
2096 module: "src".to_string(),
2097 lang: "Rust".to_string(),
2098 code: 160,
2099 comments: 16,
2100 blanks: 8,
2101 lines: 160,
2102 bytes: 1600,
2103 tokens: 320,
2104 doc_pct: None,
2105 bytes_per_line: None,
2106 depth: 12,
2107 },
2108 ];
2109 receipt.derived = Some(derived);
2110
2111 let result = render_midi(&receipt).unwrap();
2112
2113 let smf = Smf::parse(&result).expect("should parse as valid MIDI");
2115
2116 let mut notes: Vec<(u32, u8, u8)> = Vec::new(); let mut abs_time = 0u32;
2119
2120 for event in &smf.tracks[0] {
2121 abs_time += event.delta.as_int();
2122 if let TrackEventKind::Midi {
2123 message: MidiMessage::NoteOn { key, vel },
2124 ..
2125 } = event.kind
2126 {
2127 notes.push((abs_time, key.as_int(), vel.as_int()));
2128 }
2129 }
2130
2131 assert_eq!(notes.len(), 4, "expected 4 NoteOn events, got {:?}", notes);
2133
2134 assert_eq!(
2137 notes[0],
2138 (0, 65, 70),
2139 "note 0: expected (time=0, key=65=60+5, vel=70=40+60/2), got {:?}",
2140 notes[0]
2141 );
2142
2143 assert_eq!(
2146 notes[1],
2147 (240, 63, 103),
2148 "note 1: expected (time=240=1*240, key=63=60+(15%12), vel=103=40+127/2), got {:?}",
2149 notes[1]
2150 );
2151
2152 assert_eq!(
2154 notes[2],
2155 (480, 60, 50),
2156 "note 2: expected (time=480=2*240, key=60=60+0, vel=50=40+20/2), got {:?}",
2157 notes[2]
2158 );
2159
2160 assert_eq!(
2163 notes[3],
2164 (720, 60, 103),
2165 "note 3: expected (time=720=3*240, key=60=60+(12%12), vel=103=40+127/2), got {:?}",
2166 notes[3]
2167 );
2168
2169 let mut note_offs: Vec<(u32, u8)> = Vec::new(); abs_time = 0;
2172 for event in &smf.tracks[0] {
2173 abs_time += event.delta.as_int();
2174 if let TrackEventKind::Midi {
2175 message: MidiMessage::NoteOff { key, .. },
2176 ..
2177 } = event.kind
2178 {
2179 note_offs.push((abs_time, key.as_int()));
2180 }
2181 }
2182
2183 assert!(
2185 note_offs.iter().any(|&(t, k)| t == 180 && k == 65),
2186 "expected NoteOff for key 65 at time 180, got {:?}",
2187 note_offs
2188 );
2189 assert!(
2190 note_offs.iter().any(|&(t, k)| t == 420 && k == 63),
2191 "expected NoteOff for key 63 at time 420 (240+180), got {:?}",
2192 note_offs
2193 );
2194 assert!(
2195 note_offs.iter().any(|&(t, k)| t == 660 && k == 60),
2196 "expected NoteOff for key 60 at time 660 (480+180), got {:?}",
2197 note_offs
2198 );
2199 assert!(
2200 note_offs.iter().any(|&(t, k)| t == 900 && k == 60),
2201 "expected NoteOff for key 60 at time 900 (720+180), got {:?}",
2202 note_offs
2203 );
2204 }
2205
2206 #[cfg(feature = "fun")]
2208 #[test]
2209 fn test_render_midi_no_derived() {
2210 use midly::Smf;
2211
2212 let receipt = minimal_receipt();
2213 let result = render_midi(&receipt).unwrap();
2214
2215 assert!(!result.is_empty(), "MIDI output should not be empty");
2217 assert!(
2218 result.len() > 14,
2219 "MIDI should have header (14 bytes) + track data"
2220 );
2221
2222 let smf = Smf::parse(&result).expect("should be valid MIDI even with no notes");
2224 assert_eq!(smf.tracks.len(), 1, "should have exactly one track");
2225 }
2226
2227 #[cfg(feature = "fun")]
2229 #[test]
2230 fn test_render_obj_no_derived() {
2231 let receipt = minimal_receipt();
2232 let result = render_obj(&receipt).expect("render_obj should succeed");
2233
2234 assert_eq!(result, "# tokmd code city\n");
2236 }
2237
2238 #[test]
2240 fn test_render_md_basic() {
2241 let receipt = minimal_receipt();
2242 let result = render_md(&receipt);
2243 assert!(result.starts_with("# tokmd analysis\n"));
2244 assert!(result.contains("Preset: `receipt`"));
2245 }
2246
2247 #[test]
2249 fn test_render_md_inputs() {
2250 let mut receipt = minimal_receipt();
2251 receipt.source.inputs = vec!["path1".to_string(), "path2".to_string()];
2252 let result = render_md(&receipt);
2253 assert!(result.contains("## Inputs"));
2254 assert!(result.contains("- `path1`"));
2255 assert!(result.contains("- `path2`"));
2256 }
2257
2258 #[test]
2260 fn test_render_md_empty_inputs() {
2261 let mut receipt = minimal_receipt();
2262 receipt.source.inputs.clear();
2263 let result = render_md(&receipt);
2264 assert!(!result.contains("## Inputs"));
2265 }
2266
2267 #[test]
2269 fn test_render_md_archetype() {
2270 let mut receipt = minimal_receipt();
2271 receipt.archetype = Some(Archetype {
2272 kind: "library".to_string(),
2273 evidence: vec!["Cargo.toml".to_string(), "src/lib.rs".to_string()],
2274 });
2275 let result = render_md(&receipt);
2276 assert!(result.contains("## Archetype"));
2277 assert!(result.contains("- Kind: `library`"));
2278 assert!(result.contains("- Evidence: `Cargo.toml`, `src/lib.rs`"));
2279 }
2280
2281 #[test]
2283 fn test_render_md_archetype_no_evidence() {
2284 let mut receipt = minimal_receipt();
2285 receipt.archetype = Some(Archetype {
2286 kind: "app".to_string(),
2287 evidence: vec![],
2288 });
2289 let result = render_md(&receipt);
2290 assert!(result.contains("## Archetype"));
2291 assert!(result.contains("- Kind: `app`"));
2292 assert!(!result.contains("Evidence"));
2293 }
2294
2295 #[test]
2297 fn test_render_md_topics() {
2298 use std::collections::BTreeMap;
2299 let mut per_module = BTreeMap::new();
2300 per_module.insert(
2301 "src".to_string(),
2302 vec![TopicTerm {
2303 term: "parser".to_string(),
2304 score: 1.5,
2305 tf: 10,
2306 df: 2,
2307 }],
2308 );
2309 let mut receipt = minimal_receipt();
2310 receipt.topics = Some(TopicClouds {
2311 overall: vec![TopicTerm {
2312 term: "code".to_string(),
2313 score: 2.0,
2314 tf: 20,
2315 df: 5,
2316 }],
2317 per_module,
2318 });
2319 let result = render_md(&receipt);
2320 assert!(result.contains("## Topics"));
2321 assert!(result.contains("- Overall: `code`"));
2322 assert!(result.contains("- `src`: parser"));
2323 }
2324
2325 #[test]
2327 fn test_render_md_topics_empty_module() {
2328 use std::collections::BTreeMap;
2329 let mut per_module = BTreeMap::new();
2330 per_module.insert("empty_module".to_string(), vec![]);
2331 let mut receipt = minimal_receipt();
2332 receipt.topics = Some(TopicClouds {
2333 overall: vec![],
2334 per_module,
2335 });
2336 let result = render_md(&receipt);
2337 assert!(!result.contains("empty_module"));
2339 }
2340
2341 #[test]
2343 fn test_render_md_entropy() {
2344 let mut receipt = minimal_receipt();
2345 receipt.entropy = Some(EntropyReport {
2346 suspects: vec![EntropyFinding {
2347 path: "secret.bin".to_string(),
2348 module: "root".to_string(),
2349 entropy_bits_per_byte: 7.5,
2350 sample_bytes: 1024,
2351 class: EntropyClass::High,
2352 }],
2353 });
2354 let result = render_md(&receipt);
2355 assert!(result.contains("## Entropy profiling"));
2356 assert!(result.contains("|secret.bin|root|7.50|1024|High|"));
2357 }
2358
2359 #[test]
2361 fn test_render_md_entropy_no_suspects() {
2362 let mut receipt = minimal_receipt();
2363 receipt.entropy = Some(EntropyReport { suspects: vec![] });
2364 let result = render_md(&receipt);
2365 assert!(result.contains("## Entropy profiling"));
2366 assert!(result.contains("No entropy outliers detected"));
2367 }
2368
2369 #[test]
2371 fn test_render_md_license() {
2372 let mut receipt = minimal_receipt();
2373 receipt.license = Some(LicenseReport {
2374 effective: Some("MIT".to_string()),
2375 findings: vec![LicenseFinding {
2376 spdx: "MIT".to_string(),
2377 confidence: 0.95,
2378 source_path: "LICENSE".to_string(),
2379 source_kind: LicenseSourceKind::Text,
2380 }],
2381 });
2382 let result = render_md(&receipt);
2383 assert!(result.contains("## License radar"));
2384 assert!(result.contains("- Effective: `MIT`"));
2385 assert!(result.contains("|MIT|0.95|LICENSE|Text|"));
2386 }
2387
2388 #[test]
2390 fn test_render_md_license_no_findings() {
2391 let mut receipt = minimal_receipt();
2392 receipt.license = Some(LicenseReport {
2393 effective: None,
2394 findings: vec![],
2395 });
2396 let result = render_md(&receipt);
2397 assert!(result.contains("## License radar"));
2398 assert!(result.contains("Heuristic detection"));
2399 assert!(!result.contains("|SPDX|")); }
2401
2402 #[test]
2404 fn test_render_md_corporate_fingerprint() {
2405 let mut receipt = minimal_receipt();
2406 receipt.corporate_fingerprint = Some(CorporateFingerprint {
2407 domains: vec![DomainStat {
2408 domain: "example.com".to_string(),
2409 commits: 50,
2410 pct: 0.75,
2411 }],
2412 });
2413 let result = render_md(&receipt);
2414 assert!(result.contains("## Corporate fingerprint"));
2415 assert!(result.contains("|example.com|50|75.0%|"));
2416 }
2417
2418 #[test]
2420 fn test_render_md_corporate_fingerprint_no_domains() {
2421 let mut receipt = minimal_receipt();
2422 receipt.corporate_fingerprint = Some(CorporateFingerprint { domains: vec![] });
2423 let result = render_md(&receipt);
2424 assert!(result.contains("## Corporate fingerprint"));
2425 assert!(result.contains("No commit domains detected"));
2426 }
2427
2428 #[test]
2430 fn test_render_md_churn() {
2431 use std::collections::BTreeMap;
2432 let mut per_module = BTreeMap::new();
2433 per_module.insert(
2434 "src".to_string(),
2435 ChurnTrend {
2436 slope: 0.5,
2437 r2: 0.8,
2438 recent_change: 5,
2439 classification: TrendClass::Rising,
2440 },
2441 );
2442 let mut receipt = minimal_receipt();
2443 receipt.predictive_churn = Some(PredictiveChurnReport { per_module });
2444 let result = render_md(&receipt);
2445 assert!(result.contains("## Predictive churn"));
2446 assert!(result.contains("|src|0.5000|0.80|5|Rising|"));
2447 }
2448
2449 #[test]
2451 fn test_render_md_churn_empty() {
2452 use std::collections::BTreeMap;
2453 let mut receipt = minimal_receipt();
2454 receipt.predictive_churn = Some(PredictiveChurnReport {
2455 per_module: BTreeMap::new(),
2456 });
2457 let result = render_md(&receipt);
2458 assert!(result.contains("## Predictive churn"));
2459 assert!(result.contains("No churn signals detected"));
2460 }
2461
2462 #[test]
2464 fn test_render_md_assets() {
2465 let mut receipt = minimal_receipt();
2466 receipt.assets = Some(AssetReport {
2467 total_files: 5,
2468 total_bytes: 1000000,
2469 categories: vec![AssetCategoryRow {
2470 category: "images".to_string(),
2471 files: 3,
2472 bytes: 500000,
2473 extensions: vec!["png".to_string(), "jpg".to_string()],
2474 }],
2475 top_files: vec![AssetFileRow {
2476 path: "logo.png".to_string(),
2477 bytes: 100000,
2478 category: "images".to_string(),
2479 extension: "png".to_string(),
2480 }],
2481 });
2482 let result = render_md(&receipt);
2483 assert!(result.contains("## Assets"));
2484 assert!(result.contains("- Total files: `5`"));
2485 assert!(result.contains("|images|3|500000|png, jpg|"));
2486 assert!(result.contains("|logo.png|100000|images|"));
2487 }
2488
2489 #[test]
2491 fn test_render_md_assets_empty() {
2492 let mut receipt = minimal_receipt();
2493 receipt.assets = Some(AssetReport {
2494 total_files: 0,
2495 total_bytes: 0,
2496 categories: vec![],
2497 top_files: vec![],
2498 });
2499 let result = render_md(&receipt);
2500 assert!(result.contains("## Assets"));
2501 assert!(result.contains("- Total files: `0`"));
2502 assert!(!result.contains("|Category|")); }
2504
2505 #[test]
2507 fn test_render_md_deps() {
2508 let mut receipt = minimal_receipt();
2509 receipt.deps = Some(DependencyReport {
2510 total: 50,
2511 lockfiles: vec![LockfileReport {
2512 path: "Cargo.lock".to_string(),
2513 kind: "cargo".to_string(),
2514 dependencies: 50,
2515 }],
2516 });
2517 let result = render_md(&receipt);
2518 assert!(result.contains("## Dependencies"));
2519 assert!(result.contains("- Total: `50`"));
2520 assert!(result.contains("|Cargo.lock|cargo|50|"));
2521 }
2522
2523 #[test]
2525 fn test_render_md_deps_empty() {
2526 let mut receipt = minimal_receipt();
2527 receipt.deps = Some(DependencyReport {
2528 total: 0,
2529 lockfiles: vec![],
2530 });
2531 let result = render_md(&receipt);
2532 assert!(result.contains("## Dependencies"));
2533 assert!(!result.contains("|Lockfile|"));
2534 }
2535
2536 #[test]
2538 fn test_render_md_git() {
2539 let mut receipt = minimal_receipt();
2540 receipt.git = Some(GitReport {
2541 commits_scanned: 100,
2542 files_seen: 50,
2543 hotspots: vec![HotspotRow {
2544 path: "src/lib.rs".to_string(),
2545 commits: 25,
2546 lines: 500,
2547 score: 12500,
2548 }],
2549 bus_factor: vec![BusFactorRow {
2550 module: "src".to_string(),
2551 authors: 3,
2552 }],
2553 freshness: FreshnessReport {
2554 threshold_days: 90,
2555 stale_files: 5,
2556 total_files: 50,
2557 stale_pct: 0.1,
2558 by_module: vec![ModuleFreshnessRow {
2559 module: "src".to_string(),
2560 avg_days: 30.0,
2561 p90_days: 60.0,
2562 stale_pct: 0.05,
2563 }],
2564 },
2565 coupling: vec![CouplingRow {
2566 left: "src/a.rs".to_string(),
2567 right: "src/b.rs".to_string(),
2568 count: 10,
2569 jaccard: Some(0.5),
2570 lift: Some(1.2),
2571 n_left: Some(15),
2572 n_right: Some(12),
2573 }],
2574 age_distribution: Some(CodeAgeDistributionReport {
2575 buckets: vec![CodeAgeBucket {
2576 label: "0-30d".to_string(),
2577 min_days: 0,
2578 max_days: Some(30),
2579 files: 10,
2580 pct: 0.2,
2581 }],
2582 recent_refreshes: 12,
2583 prior_refreshes: 8,
2584 refresh_trend: TrendClass::Rising,
2585 }),
2586 intent: None,
2587 });
2588 let result = render_md(&receipt);
2589 assert!(result.contains("## Git metrics"));
2590 assert!(result.contains("- Commits scanned: `100`"));
2591 assert!(result.contains("|src/lib.rs|25|500|12500|"));
2592 assert!(result.contains("|src|3|"));
2593 assert!(result.contains("Stale threshold (days): `90`"));
2594 assert!(result.contains("|src|30.00|60.00|5.0%|"));
2595 assert!(result.contains("### Code age"));
2596 assert!(result.contains("Refresh trend: `Rising`"));
2597 assert!(result.contains("|0-30d|0|30|10|20.0%|"));
2598 assert!(result.contains("|src/a.rs|src/b.rs|10|"));
2599 }
2600
2601 #[test]
2603 fn test_render_md_git_empty() {
2604 let mut receipt = minimal_receipt();
2605 receipt.git = Some(GitReport {
2606 commits_scanned: 0,
2607 files_seen: 0,
2608 hotspots: vec![],
2609 bus_factor: vec![],
2610 freshness: FreshnessReport {
2611 threshold_days: 90,
2612 stale_files: 0,
2613 total_files: 0,
2614 stale_pct: 0.0,
2615 by_module: vec![],
2616 },
2617 coupling: vec![],
2618 age_distribution: None,
2619 intent: None,
2620 });
2621 let result = render_md(&receipt);
2622 assert!(result.contains("## Git metrics"));
2623 assert!(!result.contains("### Hotspots"));
2624 assert!(!result.contains("### Bus factor"));
2625 assert!(!result.contains("### Coupling"));
2626 }
2627
2628 #[test]
2630 fn test_render_md_imports() {
2631 let mut receipt = minimal_receipt();
2632 receipt.imports = Some(ImportReport {
2633 granularity: "file".to_string(),
2634 edges: vec![ImportEdge {
2635 from: "src/main.rs".to_string(),
2636 to: "src/lib.rs".to_string(),
2637 count: 5,
2638 }],
2639 });
2640 let result = render_md(&receipt);
2641 assert!(result.contains("## Imports"));
2642 assert!(result.contains("- Granularity: `file`"));
2643 assert!(result.contains("|src/main.rs|src/lib.rs|5|"));
2644 }
2645
2646 #[test]
2648 fn test_render_md_imports_empty() {
2649 let mut receipt = minimal_receipt();
2650 receipt.imports = Some(ImportReport {
2651 granularity: "module".to_string(),
2652 edges: vec![],
2653 });
2654 let result = render_md(&receipt);
2655 assert!(result.contains("## Imports"));
2656 assert!(!result.contains("|From|To|"));
2657 }
2658
2659 #[test]
2661 fn test_render_md_dup() {
2662 let mut receipt = minimal_receipt();
2663 receipt.dup = Some(DuplicateReport {
2664 wasted_bytes: 50000,
2665 strategy: "content".to_string(),
2666 groups: vec![DuplicateGroup {
2667 hash: "abc123".to_string(),
2668 bytes: 1000,
2669 files: vec!["a.txt".to_string(), "b.txt".to_string()],
2670 }],
2671 density: Some(DuplicationDensityReport {
2672 duplicate_groups: 1,
2673 duplicate_files: 2,
2674 duplicated_bytes: 2000,
2675 wasted_bytes: 1000,
2676 wasted_pct_of_codebase: 0.1,
2677 by_module: vec![ModuleDuplicationDensityRow {
2678 module: "src".to_string(),
2679 duplicate_files: 2,
2680 wasted_files: 1,
2681 duplicated_bytes: 2000,
2682 wasted_bytes: 1000,
2683 module_bytes: 10_000,
2684 density: 0.1,
2685 }],
2686 }),
2687 near: None,
2688 });
2689 let result = render_md(&receipt);
2690 assert!(result.contains("## Duplicates"));
2691 assert!(result.contains("- Wasted bytes: `50000`"));
2692 assert!(result.contains("### Duplication density"));
2693 assert!(result.contains("Waste vs codebase: `10.0%`"));
2694 assert!(result.contains("|src|2|1|2000|1000|10000|10.0%|"));
2695 assert!(result.contains("|abc123|1000|2|")); }
2697
2698 #[test]
2700 fn test_render_md_dup_empty() {
2701 let mut receipt = minimal_receipt();
2702 receipt.dup = Some(DuplicateReport {
2703 wasted_bytes: 0,
2704 strategy: "content".to_string(),
2705 groups: vec![],
2706 density: None,
2707 near: None,
2708 });
2709 let result = render_md(&receipt);
2710 assert!(result.contains("## Duplicates"));
2711 assert!(!result.contains("|Hash|Bytes|"));
2712 }
2713
2714 #[test]
2716 fn test_render_md_fun() {
2717 let mut receipt = minimal_receipt();
2718 receipt.fun = Some(FunReport {
2719 eco_label: Some(EcoLabel {
2720 label: "A+".to_string(),
2721 score: 95.5,
2722 bytes: 10000,
2723 notes: "Very efficient".to_string(),
2724 }),
2725 });
2726 let result = render_md(&receipt);
2727 assert!(result.contains("## Eco label"));
2728 assert!(result.contains("- Label: `A+`"));
2729 assert!(result.contains("- Score: `95.5`"));
2730 }
2731
2732 #[test]
2734 fn test_render_md_fun_no_label() {
2735 let mut receipt = minimal_receipt();
2736 receipt.fun = Some(FunReport { eco_label: None });
2737 let result = render_md(&receipt);
2738 assert!(!result.contains("## Eco label"));
2740 }
2741
2742 #[test]
2744 fn test_render_md_derived() {
2745 let mut receipt = minimal_receipt();
2746 receipt.derived = Some(sample_derived());
2747 let result = render_md(&receipt);
2748 assert!(result.contains("## Totals"));
2749 assert!(result.contains("|10|1000|200|100|1300|50000|2500|"));
2750 assert!(result.contains("## Ratios"));
2751 assert!(result.contains("## Distribution"));
2752 assert!(result.contains("## File size histogram"));
2753 assert!(result.contains("## Top offenders"));
2754 assert!(result.contains("## Structure"));
2755 assert!(result.contains("## Test density"));
2756 assert!(result.contains("## TODOs"));
2757 assert!(result.contains("## Boilerplate ratio"));
2758 assert!(result.contains("## Polyglot"));
2759 assert!(result.contains("## Reading time"));
2760 assert!(result.contains("## Context window"));
2761 assert!(result.contains("## Effort estimate"));
2762 assert!(result.contains("### Size basis"));
2763 assert!(result.contains("### Headline"));
2764 assert!(result.contains("### Why"));
2765 assert!(result.contains("### Delta"));
2766 assert!(result.contains("## Integrity"));
2767 }
2768
2769 #[test]
2771 fn test_render_dispatch_md() {
2772 let receipt = minimal_receipt();
2773 let result = render(&receipt, AnalysisFormat::Md).unwrap();
2774 match result {
2775 RenderedOutput::Text(s) => assert!(s.starts_with("# tokmd analysis")),
2776 RenderedOutput::Binary(_) => panic!("expected text"),
2777 }
2778 }
2779
2780 #[test]
2781 fn test_render_dispatch_json() {
2782 let receipt = minimal_receipt();
2783 let result = render(&receipt, AnalysisFormat::Json).unwrap();
2784 match result {
2785 RenderedOutput::Text(s) => assert!(s.contains("\"schema_version\": 2")),
2786 RenderedOutput::Binary(_) => panic!("expected text"),
2787 }
2788 }
2789
2790 #[test]
2791 fn test_render_dispatch_xml() {
2792 let receipt = minimal_receipt();
2793 let result = render(&receipt, AnalysisFormat::Xml).unwrap();
2794 match result {
2795 RenderedOutput::Text(s) => assert!(s.contains("<analysis>")),
2796 RenderedOutput::Binary(_) => panic!("expected text"),
2797 }
2798 }
2799
2800 #[test]
2801 fn test_render_dispatch_tree() {
2802 let receipt = minimal_receipt();
2803 let result = render(&receipt, AnalysisFormat::Tree).unwrap();
2804 match result {
2805 RenderedOutput::Text(s) => assert!(s.contains("(tree unavailable)")),
2806 RenderedOutput::Binary(_) => panic!("expected text"),
2807 }
2808 }
2809
2810 #[test]
2811 fn test_render_dispatch_svg() {
2812 let receipt = minimal_receipt();
2813 let result = render(&receipt, AnalysisFormat::Svg).unwrap();
2814 match result {
2815 RenderedOutput::Text(s) => assert!(s.contains("<svg")),
2816 RenderedOutput::Binary(_) => panic!("expected text"),
2817 }
2818 }
2819
2820 #[test]
2821 fn test_render_dispatch_mermaid() {
2822 let receipt = minimal_receipt();
2823 let result = render(&receipt, AnalysisFormat::Mermaid).unwrap();
2824 match result {
2825 RenderedOutput::Text(s) => assert!(s.starts_with("graph TD")),
2826 RenderedOutput::Binary(_) => panic!("expected text"),
2827 }
2828 }
2829
2830 #[test]
2831 fn test_render_dispatch_jsonld() {
2832 let receipt = minimal_receipt();
2833 let result = render(&receipt, AnalysisFormat::Jsonld).unwrap();
2834 match result {
2835 RenderedOutput::Text(s) => assert!(s.contains("@context")),
2836 RenderedOutput::Binary(_) => panic!("expected text"),
2837 }
2838 }
2839
2840 #[test]
2842 fn test_render_html() {
2843 let mut receipt = minimal_receipt();
2844 receipt.derived = Some(sample_derived());
2845 let result = render_html(&receipt);
2846 assert!(result.contains("<!DOCTYPE html>") || result.contains("<html"));
2847 }
2848
2849 #[test]
2850 fn render_effort_report_renders_full_sections_when_effort_is_present() {
2851 let effort = test_effort_report(true, true);
2852 let mut out = String::new();
2853
2854 render_effort_report(&mut out, &effort);
2855
2856 assert!(out.contains("## Effort estimate"));
2857 assert!(out.contains("### Size basis"));
2858 assert!(out.contains("### Size by tag"));
2859 assert!(out.contains("### Headline"));
2860 assert!(out.contains("### Why"));
2861 assert!(out.contains("### Drivers"));
2862 assert!(out.contains("### Assumptions"));
2863 assert!(out.contains("### Assumption overrides"));
2864 assert!(out.contains("### Delta"));
2865
2866 assert!(out.contains("Authored LOC lines: `8500`"));
2867 assert!(out.contains("Generated LOC lines: `1000`"));
2868 assert!(out.contains("Vendored LOC lines: `500`"));
2869 assert!(out.contains("Authoring KLOC: `8.5000`"));
2870 assert!(out.contains("Effort p50: `18.5000` person-months"));
2871 assert!(out.contains("Schedule p50: `7.0000` months"));
2872 assert!(out.contains("Staff p50: `2.6000` FTE"));
2873
2874 assert!(out.contains("|core|7000|7000|70.0%|"));
2875 assert!(out.contains("|generated|1000|0|10.0%|"));
2876 assert!(out.contains("|vendored|500|0|5.0%|"));
2877
2878 assert!(out.contains("top 5 files drive 61% of churn"));
2879 assert!(out.contains("documented ratio 0.42"));
2880 assert!(out.contains("Reference window: `main`..`HEAD`"));
2881 assert!(out.contains("Files changed: `14`"));
2882 assert!(out.contains("Blast radius: `17.5000`"));
2883 }
2884
2885 #[test]
2886 fn render_effort_report_emits_empty_state_when_drivers_and_delta_are_absent() {
2887 let effort = test_effort_report(false, false);
2888 let mut out = String::new();
2889
2890 render_effort_report(&mut out, &effort);
2891
2892 assert!(out.contains("## Effort estimate"));
2893 assert!(out.contains("### Drivers"));
2894 assert!(out.contains("No material drivers were inferred."));
2895 assert!(out.contains("### Delta"));
2896 assert!(out.contains("Baseline comparison is not available for this receipt."));
2897 assert!(!out.contains("|Driver|Direction|Weight|Evidence|"));
2898 }
2899
2900 #[test]
2901 fn render_legacy_cocomo_report_uses_derived_totals_for_size_basis() {
2902 let derived = test_derived_report_for_effort(2500);
2903 let cocomo = CocomoReport {
2904 mode: "organic".to_string(),
2905 kloc: 2.5,
2906 effort_pm: 11.23,
2907 duration_months: 6.42,
2908 staff: 1.75,
2909 a: 2.4,
2910 b: 1.05,
2911 c: 2.5,
2912 d: 0.38,
2913 };
2914 let mut out = String::new();
2915
2916 render_legacy_cocomo_report(&mut out, &derived, &cocomo);
2917
2918 assert!(out.contains("## Effort estimate"));
2919 assert!(out.contains("### Size basis"));
2920 assert!(out.contains("Source lines: `2500`"));
2921 assert!(out.contains(&format!("Total lines: `{}`", derived.totals.lines)));
2922 assert!(out.contains("KLOC: `2.5000`"));
2923
2924 assert!(out.contains("### Headline"));
2925 assert!(out.contains("Effort: `11.23` person-months"));
2926 assert!(out.contains("Duration: `6.42` months"));
2927 assert!(out.contains("Staff: `1.75`"));
2928
2929 assert!(out.contains("### Why"));
2930 assert!(out.contains("Model: `COCOMO` (`organic` mode)"));
2931 assert!(out.contains("Coefficients: `a=2.40`, `b=1.05`, `c=2.50`, `d=0.38`"));
2932
2933 assert!(out.contains("### Delta"));
2934 assert!(out.contains("Baseline comparison is not available for this receipt."));
2935 }
2936
2937 fn test_effort_report(with_delta: bool, with_drivers: bool) -> EffortEstimateReport {
2940 let delta = with_delta.then(|| EffortDeltaReport {
2941 base: "main".to_string(),
2942 head: "HEAD".to_string(),
2943 files_changed: 14,
2944 modules_changed: 3,
2945 langs_changed: 2,
2946 hotspot_files_touched: 2,
2947 coupled_neighbors_touched: 4,
2948 blast_radius: 17.5,
2949 classification: EffortDeltaClassification::Medium,
2950 effort_pm_low: 1.5,
2951 effort_pm_est: 3.2,
2952 effort_pm_high: 5.8,
2953 });
2954
2955 let drivers = if with_drivers {
2956 vec![
2957 EffortDriver {
2958 key: "hotspots".to_string(),
2959 label: "High hotspot concentration".to_string(),
2960 weight: 0.42,
2961 direction: EffortDriverDirection::Raises,
2962 evidence: "top 5 files drive 61% of churn".to_string(),
2963 },
2964 EffortDriver {
2965 key: "docs".to_string(),
2966 label: "Weak API documentation coverage".to_string(),
2967 weight: 0.22,
2968 direction: EffortDriverDirection::Raises,
2969 evidence: "documented ratio 0.42".to_string(),
2970 },
2971 ]
2972 } else {
2973 Vec::new()
2974 };
2975
2976 EffortEstimateReport {
2977 model: EffortModel::Cocomo81Basic,
2978 size_basis: EffortSizeBasis {
2979 total_lines: 10_000,
2980 authored_lines: 8_500,
2981 generated_lines: 1_000,
2982 vendored_lines: 500,
2983 kloc_total: 10.0,
2984 kloc_authored: 8.5,
2985 generated_pct: 0.10,
2986 vendored_pct: 0.05,
2987 classification_confidence: EffortConfidenceLevel::High,
2988 warnings: vec!["1 path matched generated heuristics".to_string()],
2989 by_tag: vec![
2990 EffortTagSizeRow {
2991 tag: "core".to_string(),
2992 lines: 7_000,
2993 authored_lines: 7_000,
2994 pct_of_total: 0.70,
2995 },
2996 EffortTagSizeRow {
2997 tag: "generated".to_string(),
2998 lines: 1_000,
2999 authored_lines: 0,
3000 pct_of_total: 0.10,
3001 },
3002 EffortTagSizeRow {
3003 tag: "vendored".to_string(),
3004 lines: 500,
3005 authored_lines: 0,
3006 pct_of_total: 0.05,
3007 },
3008 ],
3009 },
3010 results: EffortResults {
3011 effort_pm_low: 12.0,
3012 effort_pm_p50: 18.5,
3013 effort_pm_p80: 27.0,
3014 schedule_months_low: 5.5,
3015 schedule_months_p50: 7.0,
3016 schedule_months_p80: 9.8,
3017 staff_low: 2.0,
3018 staff_p50: 2.6,
3019 staff_p80: 3.8,
3020 },
3021 confidence: EffortConfidence {
3022 level: EffortConfidenceLevel::Medium,
3023 reasons: vec![
3024 "git history available for hotspot and coupling signals".to_string(),
3025 "size basis includes generated/vendored classification".to_string(),
3026 ],
3027 data_coverage_pct: Some(0.83),
3028 },
3029 drivers,
3030 assumptions: EffortAssumptions {
3031 notes: vec![
3032 "uses authored lines as effort size basis".to_string(),
3033 "p80 widens the deterministic baseline using risk signals".to_string(),
3034 ],
3035 overrides: BTreeMap::from([
3036 ("effort_model".to_string(), "cocomo81_basic".to_string()),
3037 (
3038 "classification_mode".to_string(),
3039 "heuristic+paths".to_string(),
3040 ),
3041 ]),
3042 },
3043 delta,
3044 }
3045 }
3046
3047 fn test_derived_report_for_effort(code_lines: usize) -> DerivedReport {
3050 let ratio_zero = RatioReport {
3051 total: RatioRow {
3052 key: "total".into(),
3053 numerator: 0,
3054 denominator: code_lines,
3055 ratio: 0.0,
3056 },
3057 by_lang: vec![],
3058 by_module: vec![],
3059 };
3060
3061 let rate_zero = RateReport {
3062 total: RateRow {
3063 key: "total".into(),
3064 numerator: 0,
3065 denominator: code_lines,
3066 rate: 0.0,
3067 },
3068 by_lang: vec![],
3069 by_module: vec![],
3070 };
3071
3072 DerivedReport {
3073 totals: DerivedTotals {
3074 files: 10,
3075 code: code_lines,
3076 comments: 100,
3077 blanks: 50,
3078 lines: code_lines + 150,
3079 bytes: code_lines * 40,
3080 tokens: code_lines * 3,
3081 },
3082 doc_density: ratio_zero.clone(),
3083 whitespace: ratio_zero,
3084 verbosity: rate_zero,
3085 max_file: MaxFileReport {
3086 overall: FileStatRow {
3087 path: "src/main.rs".into(),
3088 module: "src".into(),
3089 lang: "Rust".into(),
3090 code: code_lines,
3091 comments: 0,
3092 blanks: 0,
3093 lines: code_lines,
3094 bytes: code_lines * 40,
3095 tokens: code_lines * 3,
3096 doc_pct: None,
3097 bytes_per_line: Some(40.0),
3098 depth: 1,
3099 },
3100 by_lang: vec![],
3101 by_module: vec![],
3102 },
3103 lang_purity: LangPurityReport { rows: vec![] },
3104 nesting: NestingReport {
3105 max: 1,
3106 avg: 1.0,
3107 by_module: vec![],
3108 },
3109 test_density: TestDensityReport {
3110 test_lines: 0,
3111 prod_lines: code_lines,
3112 test_files: 0,
3113 prod_files: 10,
3114 ratio: 0.0,
3115 },
3116 boilerplate: BoilerplateReport {
3117 infra_lines: 0,
3118 logic_lines: code_lines,
3119 ratio: 0.0,
3120 infra_langs: vec![],
3121 },
3122 polyglot: PolyglotReport {
3123 lang_count: 1,
3124 entropy: 0.0,
3125 dominant_lang: "Rust".into(),
3126 dominant_lines: code_lines,
3127 dominant_pct: 1.0,
3128 },
3129 distribution: DistributionReport {
3130 count: 10,
3131 min: 10,
3132 max: code_lines,
3133 mean: code_lines as f64 / 10.0,
3134 median: code_lines as f64 / 10.0,
3135 p90: code_lines as f64,
3136 p99: code_lines as f64,
3137 gini: 0.0,
3138 },
3139 histogram: vec![],
3140 top: TopOffenders {
3141 largest_lines: vec![],
3142 largest_tokens: vec![],
3143 largest_bytes: vec![],
3144 least_documented: vec![],
3145 most_dense: vec![],
3146 },
3147 tree: None,
3148 reading_time: ReadingTimeReport {
3149 minutes: 1.0,
3150 lines_per_minute: 200,
3151 basis_lines: code_lines,
3152 },
3153 context_window: None,
3154 cocomo: None,
3155 todo: None,
3156 integrity: IntegrityReport {
3157 algo: "blake3".into(),
3158 hash: "test".into(),
3159 entries: 10,
3160 },
3161 }
3162 }
3163}