1use anyhow::Result;
19use tokmd_analysis_types::{AnalysisReceipt, FileStatRow};
20use tokmd_types::AnalysisFormat;
21
22pub enum RenderedOutput {
23 Text(String),
24 Binary(Vec<u8>),
25}
26
27pub fn render(receipt: &AnalysisReceipt, format: AnalysisFormat) -> Result<RenderedOutput> {
28 match format {
29 AnalysisFormat::Md => Ok(RenderedOutput::Text(render_md(receipt))),
30 AnalysisFormat::Json => Ok(RenderedOutput::Text(serde_json::to_string_pretty(receipt)?)),
31 AnalysisFormat::Jsonld => Ok(RenderedOutput::Text(render_jsonld(receipt))),
32 AnalysisFormat::Xml => Ok(RenderedOutput::Text(render_xml(receipt))),
33 AnalysisFormat::Svg => Ok(RenderedOutput::Text(render_svg(receipt))),
34 AnalysisFormat::Mermaid => Ok(RenderedOutput::Text(render_mermaid(receipt))),
35 AnalysisFormat::Obj => Ok(RenderedOutput::Text(render_obj(receipt)?)),
36 AnalysisFormat::Midi => Ok(RenderedOutput::Binary(render_midi(receipt)?)),
37 AnalysisFormat::Tree => Ok(RenderedOutput::Text(render_tree(receipt))),
38 AnalysisFormat::Html => Ok(RenderedOutput::Text(render_html(receipt))),
39 }
40}
41
42fn render_md(receipt: &AnalysisReceipt) -> String {
43 let mut out = String::new();
44 out.push_str("# tokmd analysis\n\n");
45 out.push_str(&format!("Preset: `{}`\n\n", receipt.args.preset));
46
47 if !receipt.source.inputs.is_empty() {
48 out.push_str("## Inputs\n\n");
49 for input in &receipt.source.inputs {
50 out.push_str(&format!("- `{}`\n", input));
51 }
52 out.push('\n');
53 }
54
55 if let Some(archetype) = &receipt.archetype {
56 out.push_str("## Archetype\n\n");
57 out.push_str(&format!("- Kind: `{}`\n", archetype.kind));
58 if !archetype.evidence.is_empty() {
59 out.push_str(&format!(
60 "- Evidence: `{}`\n",
61 archetype.evidence.join("`, `")
62 ));
63 }
64 out.push('\n');
65 }
66
67 if let Some(topics) = &receipt.topics {
68 out.push_str("## Topics\n\n");
69 if !topics.overall.is_empty() {
70 out.push_str(&format!(
71 "- Overall: `{}`\n",
72 topics
73 .overall
74 .iter()
75 .map(|t| t.term.as_str())
76 .collect::<Vec<_>>()
77 .join(", ")
78 ));
79 }
80 for (module, terms) in &topics.per_module {
81 if terms.is_empty() {
82 continue;
83 }
84 let line = terms
85 .iter()
86 .map(|t| t.term.as_str())
87 .collect::<Vec<_>>()
88 .join(", ");
89 out.push_str(&format!("- `{}`: {}\n", module, line));
90 }
91 out.push('\n');
92 }
93
94 if let Some(entropy) = &receipt.entropy {
95 out.push_str("## Entropy profiling\n\n");
96 if entropy.suspects.is_empty() {
97 out.push_str("- No entropy outliers detected.\n\n");
98 } else {
99 out.push_str("|Path|Module|Entropy|Sample bytes|Class|\n");
100 out.push_str("|---|---|---:|---:|---|\n");
101 for row in entropy.suspects.iter().take(10) {
102 out.push_str(&format!(
103 "|{}|{}|{}|{}|{:?}|\n",
104 row.path,
105 row.module,
106 fmt_f64(row.entropy_bits_per_byte as f64, 2),
107 row.sample_bytes,
108 row.class
109 ));
110 }
111 out.push('\n');
112 }
113 }
114
115 if let Some(license) = &receipt.license {
116 out.push_str("## License radar\n\n");
117 if let Some(effective) = &license.effective {
118 out.push_str(&format!("- Effective: `{}`\n", effective));
119 }
120 out.push_str("- Heuristic detection; not legal advice.\n\n");
121 if !license.findings.is_empty() {
122 out.push_str("|SPDX|Confidence|Source|Kind|\n");
123 out.push_str("|---|---:|---|---|\n");
124 for row in license.findings.iter().take(10) {
125 out.push_str(&format!(
126 "|{}|{}|{}|{:?}|\n",
127 row.spdx,
128 fmt_f64(row.confidence as f64, 2),
129 row.source_path,
130 row.source_kind
131 ));
132 }
133 out.push('\n');
134 }
135 }
136
137 if let Some(fingerprint) = &receipt.corporate_fingerprint {
138 out.push_str("## Corporate fingerprint\n\n");
139 if fingerprint.domains.is_empty() {
140 out.push_str("- No commit domains detected.\n\n");
141 } else {
142 out.push_str("|Domain|Commits|Pct|\n");
143 out.push_str("|---|---:|---:|\n");
144 for row in fingerprint.domains.iter().take(10) {
145 out.push_str(&format!(
146 "|{}|{}|{}|\n",
147 row.domain,
148 row.commits,
149 fmt_pct(row.pct as f64)
150 ));
151 }
152 out.push('\n');
153 }
154 }
155
156 if let Some(churn) = &receipt.predictive_churn {
157 out.push_str("## Predictive churn\n\n");
158 let mut rows: Vec<_> = churn.per_module.iter().collect();
159 rows.sort_by(|a, b| {
160 b.1.slope
161 .partial_cmp(&a.1.slope)
162 .unwrap_or(std::cmp::Ordering::Equal)
163 .then_with(|| a.0.cmp(b.0))
164 });
165 if rows.is_empty() {
166 out.push_str("- No churn signals detected.\n\n");
167 } else {
168 out.push_str("|Module|Slope|R²|Recent change|Class|\n");
169 out.push_str("|---|---:|---:|---:|---|\n");
170 for (module, trend) in rows.into_iter().take(10) {
171 out.push_str(&format!(
172 "|{}|{}|{}|{}|{:?}|\n",
173 module,
174 fmt_f64(trend.slope, 4),
175 fmt_f64(trend.r2, 2),
176 trend.recent_change,
177 trend.classification
178 ));
179 }
180 out.push('\n');
181 }
182 }
183
184 if let Some(derived) = &receipt.derived {
185 out.push_str("## Totals\n\n");
186 out.push_str("|Files|Code|Comments|Blanks|Lines|Bytes|Tokens|\n");
187 out.push_str("|---:|---:|---:|---:|---:|---:|---:|\n");
188 out.push_str(&format!(
189 "|{}|{}|{}|{}|{}|{}|{}|\n\n",
190 derived.totals.files,
191 derived.totals.code,
192 derived.totals.comments,
193 derived.totals.blanks,
194 derived.totals.lines,
195 derived.totals.bytes,
196 derived.totals.tokens
197 ));
198
199 out.push_str("## Ratios\n\n");
200 out.push_str("|Metric|Value|\n");
201 out.push_str("|---|---:|\n");
202 out.push_str(&format!(
203 "|Doc density|{}|\n",
204 fmt_pct(derived.doc_density.total.ratio)
205 ));
206 out.push_str(&format!(
207 "|Whitespace ratio|{}|\n",
208 fmt_pct(derived.whitespace.total.ratio)
209 ));
210 out.push_str(&format!(
211 "|Bytes per line|{}|\n\n",
212 fmt_f64(derived.verbosity.total.rate, 2)
213 ));
214
215 out.push_str("### Doc density by language\n\n");
216 out.push_str("|Lang|Doc%|Comments|Code|\n");
217 out.push_str("|---|---:|---:|---:|\n");
218 for row in derived.doc_density.by_lang.iter().take(10) {
219 out.push_str(&format!(
220 "|{}|{}|{}|{}|\n",
221 row.key,
222 fmt_pct(row.ratio),
223 row.numerator,
224 row.denominator.saturating_sub(row.numerator)
225 ));
226 }
227 out.push('\n');
228
229 out.push_str("### Whitespace ratio by language\n\n");
230 out.push_str("|Lang|Blank%|Blanks|Code+Comments|\n");
231 out.push_str("|---|---:|---:|---:|\n");
232 for row in derived.whitespace.by_lang.iter().take(10) {
233 out.push_str(&format!(
234 "|{}|{}|{}|{}|\n",
235 row.key,
236 fmt_pct(row.ratio),
237 row.numerator,
238 row.denominator
239 ));
240 }
241 out.push('\n');
242
243 out.push_str("### Verbosity by language\n\n");
244 out.push_str("|Lang|Bytes/Line|Bytes|Lines|\n");
245 out.push_str("|---|---:|---:|---:|\n");
246 for row in derived.verbosity.by_lang.iter().take(10) {
247 out.push_str(&format!(
248 "|{}|{}|{}|{}|\n",
249 row.key,
250 fmt_f64(row.rate, 2),
251 row.numerator,
252 row.denominator
253 ));
254 }
255 out.push('\n');
256
257 out.push_str("## Distribution\n\n");
258 out.push_str("|Count|Min|Max|Mean|Median|P90|P99|Gini|\n");
259 out.push_str("|---:|---:|---:|---:|---:|---:|---:|---:|\n");
260 out.push_str(&format!(
261 "|{}|{}|{}|{}|{}|{}|{}|{}|\n\n",
262 derived.distribution.count,
263 derived.distribution.min,
264 derived.distribution.max,
265 fmt_f64(derived.distribution.mean, 2),
266 fmt_f64(derived.distribution.median, 2),
267 fmt_f64(derived.distribution.p90, 2),
268 fmt_f64(derived.distribution.p99, 2),
269 fmt_f64(derived.distribution.gini, 4)
270 ));
271
272 out.push_str("## File size histogram\n\n");
273 out.push_str("|Bucket|Min|Max|Files|Pct|\n");
274 out.push_str("|---|---:|---:|---:|---:|\n");
275 for bucket in &derived.histogram {
276 let max = bucket
277 .max
278 .map(|v| v.to_string())
279 .unwrap_or_else(|| "∞".to_string());
280 out.push_str(&format!(
281 "|{}|{}|{}|{}|{}|\n",
282 bucket.label,
283 bucket.min,
284 max,
285 bucket.files,
286 fmt_pct(bucket.pct)
287 ));
288 }
289 out.push('\n');
290
291 out.push_str("## Top offenders\n\n");
292 out.push_str("### Largest files by lines\n\n");
293 out.push_str(&render_file_table(&derived.top.largest_lines));
294 out.push('\n');
295
296 out.push_str("### Largest files by tokens\n\n");
297 out.push_str(&render_file_table(&derived.top.largest_tokens));
298 out.push('\n');
299
300 out.push_str("### Largest files by bytes\n\n");
301 out.push_str(&render_file_table(&derived.top.largest_bytes));
302 out.push('\n');
303
304 out.push_str("### Least documented (min LOC)\n\n");
305 out.push_str(&render_file_table(&derived.top.least_documented));
306 out.push('\n');
307
308 out.push_str("### Most dense (bytes/line)\n\n");
309 out.push_str(&render_file_table(&derived.top.most_dense));
310 out.push('\n');
311
312 out.push_str("## Structure\n\n");
313 out.push_str(&format!(
314 "- Max depth: `{}`\n- Avg depth: `{}`\n\n",
315 derived.nesting.max,
316 fmt_f64(derived.nesting.avg, 2)
317 ));
318
319 out.push_str("## Test density\n\n");
320 out.push_str(&format!(
321 "- Test lines: `{}`\n- Prod lines: `{}`\n- Test ratio: `{}`\n\n",
322 derived.test_density.test_lines,
323 derived.test_density.prod_lines,
324 fmt_pct(derived.test_density.ratio)
325 ));
326
327 if let Some(todo) = &derived.todo {
328 out.push_str("## TODOs\n\n");
329 out.push_str(&format!(
330 "- Total: `{}`\n- Density (per KLOC): `{}`\n\n",
331 todo.total,
332 fmt_f64(todo.density_per_kloc, 2)
333 ));
334 out.push_str("|Tag|Count|\n");
335 out.push_str("|---|---:|\n");
336 for tag in &todo.tags {
337 out.push_str(&format!("|{}|{}|\n", tag.tag, tag.count));
338 }
339 out.push('\n');
340 }
341
342 out.push_str("## Boilerplate ratio\n\n");
343 out.push_str(&format!(
344 "- Infra lines: `{}`\n- Logic lines: `{}`\n- Infra ratio: `{}`\n\n",
345 derived.boilerplate.infra_lines,
346 derived.boilerplate.logic_lines,
347 fmt_pct(derived.boilerplate.ratio)
348 ));
349
350 out.push_str("## Polyglot\n\n");
351 out.push_str(&format!(
352 "- Languages: `{}`\n- Dominant: `{}` ({})\n- Entropy: `{}`\n\n",
353 derived.polyglot.lang_count,
354 derived.polyglot.dominant_lang,
355 fmt_pct(derived.polyglot.dominant_pct),
356 fmt_f64(derived.polyglot.entropy, 4)
357 ));
358
359 out.push_str("## Reading time\n\n");
360 out.push_str(&format!(
361 "- Minutes: `{}` ({} lines/min)\n\n",
362 fmt_f64(derived.reading_time.minutes, 2),
363 derived.reading_time.lines_per_minute
364 ));
365
366 if let Some(context) = &derived.context_window {
367 out.push_str("## Context window\n\n");
368 out.push_str(&format!(
369 "- Window tokens: `{}`\n- Total tokens: `{}`\n- Utilization: `{}`\n- Fits: `{}`\n\n",
370 context.window_tokens,
371 context.total_tokens,
372 fmt_pct(context.pct),
373 context.fits
374 ));
375 }
376
377 if let Some(cocomo) = &derived.cocomo {
378 out.push_str("## COCOMO estimate\n\n");
379 out.push_str(&format!(
380 "- Mode: `{}`\n- KLOC: `{}`\n- Effort (PM): `{}`\n- Duration (months): `{}`\n- Staff: `{}`\n\n",
381 cocomo.mode,
382 fmt_f64(cocomo.kloc, 4),
383 fmt_f64(cocomo.effort_pm, 2),
384 fmt_f64(cocomo.duration_months, 2),
385 fmt_f64(cocomo.staff, 2)
386 ));
387 }
388
389 out.push_str("## Integrity\n\n");
390 out.push_str(&format!(
391 "- Hash: `{}` (`{}`)\n- Entries: `{}`\n\n",
392 derived.integrity.hash, derived.integrity.algo, derived.integrity.entries
393 ));
394 }
395
396 if let Some(assets) = &receipt.assets {
397 out.push_str("## Assets\n\n");
398 out.push_str(&format!(
399 "- Total files: `{}`\n- Total bytes: `{}`\n\n",
400 assets.total_files, assets.total_bytes
401 ));
402 if !assets.categories.is_empty() {
403 out.push_str("|Category|Files|Bytes|Extensions|\n");
404 out.push_str("|---|---:|---:|---|\n");
405 for row in &assets.categories {
406 out.push_str(&format!(
407 "|{}|{}|{}|{}|\n",
408 row.category,
409 row.files,
410 row.bytes,
411 row.extensions.join(", ")
412 ));
413 }
414 out.push('\n');
415 }
416 if !assets.top_files.is_empty() {
417 out.push_str("|File|Bytes|Category|\n");
418 out.push_str("|---|---:|---|\n");
419 for row in &assets.top_files {
420 out.push_str(&format!("|{}|{}|{}|\n", row.path, row.bytes, row.category));
421 }
422 out.push('\n');
423 }
424 }
425
426 if let Some(deps) = &receipt.deps {
427 out.push_str("## Dependencies\n\n");
428 out.push_str(&format!("- Total: `{}`\n\n", deps.total));
429 if !deps.lockfiles.is_empty() {
430 out.push_str("|Lockfile|Kind|Dependencies|\n");
431 out.push_str("|---|---|---:|\n");
432 for row in &deps.lockfiles {
433 out.push_str(&format!(
434 "|{}|{}|{}|\n",
435 row.path, row.kind, row.dependencies
436 ));
437 }
438 out.push('\n');
439 }
440 }
441
442 if let Some(git) = &receipt.git {
443 out.push_str("## Git metrics\n\n");
444 out.push_str(&format!(
445 "- Commits scanned: `{}`\n- Files seen: `{}`\n\n",
446 git.commits_scanned, git.files_seen
447 ));
448 if !git.hotspots.is_empty() {
449 out.push_str("### Hotspots\n\n");
450 out.push_str("|File|Commits|Lines|Score|\n");
451 out.push_str("|---|---:|---:|---:|\n");
452 for row in git.hotspots.iter().take(10) {
453 out.push_str(&format!(
454 "|{}|{}|{}|{}|\n",
455 row.path, row.commits, row.lines, row.score
456 ));
457 }
458 out.push('\n');
459 }
460 if !git.bus_factor.is_empty() {
461 out.push_str("### Bus factor\n\n");
462 out.push_str("|Module|Authors|\n");
463 out.push_str("|---|---:|\n");
464 for row in git.bus_factor.iter().take(10) {
465 out.push_str(&format!("|{}|{}|\n", row.module, row.authors));
466 }
467 out.push('\n');
468 }
469 out.push_str("### Freshness\n\n");
470 out.push_str(&format!(
471 "- Stale threshold (days): `{}`\n- Stale files: `{}` / `{}` ({})\n\n",
472 git.freshness.threshold_days,
473 git.freshness.stale_files,
474 git.freshness.total_files,
475 fmt_pct(git.freshness.stale_pct)
476 ));
477 if !git.freshness.by_module.is_empty() {
478 out.push_str("|Module|Avg days|P90 days|Stale%|\n");
479 out.push_str("|---|---:|---:|---:|\n");
480 for row in git.freshness.by_module.iter().take(10) {
481 out.push_str(&format!(
482 "|{}|{}|{}|{}|\n",
483 row.module,
484 fmt_f64(row.avg_days, 2),
485 fmt_f64(row.p90_days, 2),
486 fmt_pct(row.stale_pct)
487 ));
488 }
489 out.push('\n');
490 }
491 if let Some(age) = &git.age_distribution {
492 out.push_str("### Code age\n\n");
493 out.push_str(&format!(
494 "- Refresh trend: `{:?}` (recent: `{}`, prior: `{}`)\n\n",
495 age.refresh_trend, age.recent_refreshes, age.prior_refreshes
496 ));
497 if !age.buckets.is_empty() {
498 out.push_str("|Bucket|Min days|Max days|Files|Pct|\n");
499 out.push_str("|---|---:|---:|---:|---:|\n");
500 for bucket in &age.buckets {
501 let max = bucket
502 .max_days
503 .map(|v| v.to_string())
504 .unwrap_or_else(|| "∞".to_string());
505 out.push_str(&format!(
506 "|{}|{}|{}|{}|{}|\n",
507 bucket.label,
508 bucket.min_days,
509 max,
510 bucket.files,
511 fmt_pct(bucket.pct)
512 ));
513 }
514 out.push('\n');
515 }
516 }
517 if !git.coupling.is_empty() {
518 let filtered: Vec<_> = git.coupling.iter().filter(|r| r.count >= 2).collect();
521 if !filtered.is_empty() {
522 out.push_str("### Coupling\n\n");
523 out.push_str("|Left|Right|Count|Jaccard|Lift|\n");
524 out.push_str("|---|---|---:|---:|---:|\n");
525 for row in filtered.iter().take(10) {
526 let jaccard = row
527 .jaccard
528 .map(|v| fmt_f64(v, 4))
529 .unwrap_or_else(|| "-".to_string());
530 let lift = row
531 .lift
532 .map(|v| fmt_f64(v, 4))
533 .unwrap_or_else(|| "-".to_string());
534 out.push_str(&format!(
535 "|{}|{}|{}|{}|{}|\n",
536 row.left, row.right, row.count, jaccard, lift
537 ));
538 }
539 out.push('\n');
540 }
541 }
542
543 if let Some(intent) = &git.intent {
544 out.push_str("### Commit intent\n\n");
545 out.push_str("|Type|Count|\n");
546 out.push_str("|---|---:|\n");
547 let o = &intent.overall;
548 let entries = [
549 ("feat", o.feat),
550 ("fix", o.fix),
551 ("refactor", o.refactor),
552 ("docs", o.docs),
553 ("test", o.test),
554 ("chore", o.chore),
555 ("ci", o.ci),
556 ("build", o.build),
557 ("perf", o.perf),
558 ("style", o.style),
559 ("revert", o.revert),
560 ("other", o.other),
561 ];
562 for (name, count) in entries {
563 if count > 0 {
564 out.push_str(&format!("|{}|{}|\n", name, count));
565 }
566 }
567 out.push_str(&format!("|**total**|{}|\n", o.total));
568 out.push_str(&format!("\n- Unknown: `{}`\n", fmt_pct(intent.unknown_pct)));
569 if let Some(cr) = intent.corrective_ratio {
570 out.push_str(&format!(
571 "- Corrective ratio (fix+revert/total): `{}`\n",
572 fmt_pct(cr)
573 ));
574 }
575 out.push('\n');
576
577 let mut maintenance: Vec<_> = intent
579 .by_module
580 .iter()
581 .filter(|m| m.counts.total > 0)
582 .map(|m| {
583 let fix_revert = m.counts.fix + m.counts.revert;
584 let share = fix_revert as f64 / m.counts.total as f64;
585 (m, share)
586 })
587 .filter(|(_, share)| *share > 0.0)
588 .collect();
589 maintenance.sort_by(|a, b| {
590 b.1.partial_cmp(&a.1)
591 .unwrap_or(std::cmp::Ordering::Equal)
592 .then_with(|| a.0.module.cmp(&b.0.module))
593 });
594
595 if !maintenance.is_empty() {
596 out.push_str("#### Maintenance hotspots\n\n");
597 out.push_str("|Module|Fix+Revert|Total|Share|\n");
598 out.push_str("|---|---:|---:|---:|\n");
599 for (m, share) in maintenance.iter().take(10) {
600 out.push_str(&format!(
601 "|{}|{}|{}|{}|\n",
602 m.module,
603 m.counts.fix + m.counts.revert,
604 m.counts.total,
605 fmt_pct(*share)
606 ));
607 }
608 out.push('\n');
609 }
610 }
611 }
612
613 if let Some(imports) = &receipt.imports {
614 out.push_str("## Imports\n\n");
615 out.push_str(&format!("- Granularity: `{}`\n\n", imports.granularity));
616 if !imports.edges.is_empty() {
617 out.push_str("|From|To|Count|\n");
618 out.push_str("|---|---|---:|\n");
619 for row in imports.edges.iter().take(20) {
620 out.push_str(&format!("|{}|{}|{}|\n", row.from, row.to, row.count));
621 }
622 out.push('\n');
623 }
624 }
625
626 if let Some(dup) = &receipt.dup {
627 out.push_str("## Duplicates\n\n");
628 out.push_str(&format!(
629 "- Wasted bytes: `{}`\n- Strategy: `{}`\n\n",
630 dup.wasted_bytes, dup.strategy
631 ));
632 if let Some(density) = &dup.density {
633 out.push_str("### Duplication density\n\n");
634 out.push_str(&format!(
635 "- Duplicate groups: `{}`\n- Duplicate files: `{}`\n- Duplicated bytes: `{}`\n- Waste vs codebase: `{}`\n\n",
636 density.duplicate_groups,
637 density.duplicate_files,
638 density.duplicated_bytes,
639 fmt_pct(density.wasted_pct_of_codebase)
640 ));
641 if !density.by_module.is_empty() {
642 out.push_str(
643 "|Module|Dup files|Wasted files|Dup bytes|Wasted bytes|Module bytes|Density|\n",
644 );
645 out.push_str("|---|---:|---:|---:|---:|---:|---:|\n");
646 for row in density.by_module.iter().take(10) {
647 out.push_str(&format!(
648 "|{}|{}|{}|{}|{}|{}|{}|\n",
649 row.module,
650 row.duplicate_files,
651 row.wasted_files,
652 row.duplicated_bytes,
653 row.wasted_bytes,
654 row.module_bytes,
655 fmt_pct(row.density)
656 ));
657 }
658 out.push('\n');
659 }
660 }
661 if !dup.groups.is_empty() {
662 out.push_str("|Hash|Bytes|Files|\n");
663 out.push_str("|---|---:|---:|\n");
664 for row in dup.groups.iter().take(10) {
665 out.push_str(&format!(
666 "|{}|{}|{}|\n",
667 row.hash,
668 row.bytes,
669 row.files.len()
670 ));
671 }
672 out.push('\n');
673 }
674
675 if let Some(near) = &dup.near {
676 out.push_str("### Near duplicates\n\n");
677 out.push_str(&format!(
678 "- Files analyzed: `{}`\n- Files skipped: `{}`\n- Threshold: `{}`\n- Scope: `{:?}`\n",
679 near.files_analyzed,
680 near.files_skipped,
681 fmt_f64(near.params.threshold, 2),
682 near.params.scope
683 ));
684 if let Some(eligible) = near.eligible_files {
685 out.push_str(&format!("- Eligible files: `{}`\n", eligible));
686 }
687 if near.truncated {
688 out.push_str("- **Warning**: Pair list truncated by `max_pairs` limit.\n");
689 }
690 out.push('\n');
691
692 if let Some(clusters) = &near.clusters
694 && !clusters.is_empty()
695 {
696 out.push_str("#### Clusters\n\n");
697 out.push_str("|#|Files|Max Similarity|Representative|Pairs|\n");
698 out.push_str("|---:|---:|---:|---|---:|\n");
699 for (i, cluster) in clusters.iter().enumerate() {
700 out.push_str(&format!(
701 "|{}|{}|{}|{}|{}|\n",
702 i + 1,
703 cluster.files.len(),
704 fmt_pct(cluster.max_similarity),
705 cluster.representative,
706 cluster.pair_count
707 ));
708 }
709 out.push('\n');
710 }
711
712 if near.pairs.is_empty() {
714 out.push_str("- No near-duplicate pairs detected.\n\n");
715 } else {
716 out.push_str("#### Pairs\n\n");
717 out.push_str("|Left|Right|Similarity|Shared FPs|\n");
718 out.push_str("|---|---|---:|---:|\n");
719 for pair in near.pairs.iter().take(20) {
720 out.push_str(&format!(
721 "|{}|{}|{}|{}|\n",
722 pair.left,
723 pair.right,
724 fmt_pct(pair.similarity),
725 pair.shared_fingerprints
726 ));
727 }
728 out.push('\n');
729 }
730
731 if let Some(stats) = &near.stats {
733 out.push_str(&format!(
734 "> Near-dup stats: fingerprinting {}ms, pairing {}ms, {} bytes processed\n\n",
735 stats.fingerprinting_ms, stats.pairing_ms, stats.bytes_processed
736 ));
737 }
738 }
739 }
740
741 if let Some(cx) = &receipt.complexity {
742 out.push_str("## Complexity\n\n");
743 out.push_str("|Metric|Value|\n");
744 out.push_str("|---|---:|\n");
745 out.push_str(&format!("|Total functions|{}|\n", cx.total_functions));
746 out.push_str(&format!(
747 "|Avg function length|{}|\n",
748 fmt_f64(cx.avg_function_length, 1)
749 ));
750 out.push_str(&format!(
751 "|Max function length|{}|\n",
752 cx.max_function_length
753 ));
754 out.push_str(&format!(
755 "|Avg cyclomatic|{}|\n",
756 fmt_f64(cx.avg_cyclomatic, 2)
757 ));
758 out.push_str(&format!("|Max cyclomatic|{}|\n", cx.max_cyclomatic));
759 if let Some(cog) = cx.avg_cognitive {
760 out.push_str(&format!("|Avg cognitive|{}|\n", fmt_f64(cog, 2)));
761 }
762 if let Some(cog) = cx.max_cognitive {
763 out.push_str(&format!("|Max cognitive|{}|\n", cog));
764 }
765 if let Some(avg_nesting) = cx.avg_nesting_depth {
766 out.push_str(&format!(
767 "|Avg nesting depth|{}|\n",
768 fmt_f64(avg_nesting, 2)
769 ));
770 }
771 if let Some(max_nesting) = cx.max_nesting_depth {
772 out.push_str(&format!("|Max nesting depth|{}|\n", max_nesting));
773 }
774 out.push_str(&format!("|High risk files|{}|\n\n", cx.high_risk_files));
775
776 if !cx.files.is_empty() {
777 out.push_str("### Top complex files\n\n");
778 out.push_str("|Path|CC|Functions|Max fn length|\n");
779 out.push_str("|---|---:|---:|---:|\n");
780 for f in cx.files.iter().take(10) {
781 out.push_str(&format!(
782 "|{}|{}|{}|{}|\n",
783 f.path, f.cyclomatic_complexity, f.function_count, f.max_function_length
784 ));
785 }
786 out.push('\n');
787 }
788 }
789
790 if let Some(api) = &receipt.api_surface {
791 out.push_str("## API surface\n\n");
792 out.push_str("|Metric|Value|\n");
793 out.push_str("|---|---:|\n");
794 out.push_str(&format!("|Total items|{}|\n", api.total_items));
795 out.push_str(&format!("|Public items|{}|\n", api.public_items));
796 out.push_str(&format!("|Internal items|{}|\n", api.internal_items));
797 out.push_str(&format!("|Public ratio|{}|\n", fmt_pct(api.public_ratio)));
798 out.push_str(&format!(
799 "|Documented ratio|{}|\n\n",
800 fmt_pct(api.documented_ratio)
801 ));
802
803 if !api.by_language.is_empty() {
804 out.push_str("### By language\n\n");
805 out.push_str("|Language|Total|Public|Internal|Public%|\n");
806 out.push_str("|---|---:|---:|---:|---:|\n");
807 for (lang, data) in &api.by_language {
808 out.push_str(&format!(
809 "|{}|{}|{}|{}|{}|\n",
810 lang,
811 data.total_items,
812 data.public_items,
813 data.internal_items,
814 fmt_pct(data.public_ratio)
815 ));
816 }
817 out.push('\n');
818 }
819
820 if !api.by_module.is_empty() {
821 out.push_str("### By module\n\n");
822 out.push_str("|Module|Total|Public|Public%|\n");
823 out.push_str("|---|---:|---:|---:|\n");
824 for row in api.by_module.iter().take(20) {
825 out.push_str(&format!(
826 "|{}|{}|{}|{}|\n",
827 row.module,
828 row.total_items,
829 row.public_items,
830 fmt_pct(row.public_ratio)
831 ));
832 }
833 out.push('\n');
834 }
835
836 if !api.top_exporters.is_empty() {
837 out.push_str("### Top exporters\n\n");
838 out.push_str("|Path|Language|Public|Total|\n");
839 out.push_str("|---|---|---:|---:|\n");
840 for item in api.top_exporters.iter().take(10) {
841 out.push_str(&format!(
842 "|{}|{}|{}|{}|\n",
843 item.path, item.lang, item.public_items, item.total_items
844 ));
845 }
846 out.push('\n');
847 }
848 }
849
850 if let Some(fun) = &receipt.fun
851 && let Some(label) = &fun.eco_label
852 {
853 out.push_str("## Eco label\n\n");
854 out.push_str(&format!(
855 "- Label: `{}`\n- Score: `{}`\n- Bytes: `{}`\n- Notes: `{}`\n\n",
856 label.label,
857 fmt_f64(label.score, 1),
858 label.bytes,
859 label.notes
860 ));
861 }
862
863 out
864}
865
866fn render_file_table(rows: &[FileStatRow]) -> String {
867 use std::fmt::Write;
868 let mut out = String::with_capacity((rows.len() + 3) * 80);
870 out.push_str("|Path|Lang|Lines|Code|Bytes|Tokens|Doc%|B/Line|\n");
871 out.push_str("|---|---|---:|---:|---:|---:|---:|---:|\n");
872 for row in rows {
873 let _ = writeln!(
874 out,
875 "|{}|{}|{}|{}|{}|{}|{}|{}|",
876 row.path,
877 row.lang,
878 row.lines,
879 row.code,
880 row.bytes,
881 row.tokens,
882 row.doc_pct.map(fmt_pct).unwrap_or_else(|| "-".to_string()),
883 row.bytes_per_line
884 .map(|v| fmt_f64(v, 2))
885 .unwrap_or_else(|| "-".to_string())
886 );
887 }
888 out
889}
890
891fn fmt_pct(ratio: f64) -> String {
892 format!("{:.1}%", ratio * 100.0)
893}
894
895fn fmt_f64(value: f64, decimals: usize) -> String {
896 format!("{value:.decimals$}")
897}
898
899fn render_jsonld(receipt: &AnalysisReceipt) -> String {
900 let name = receipt
901 .source
902 .inputs
903 .first()
904 .cloned()
905 .unwrap_or_else(|| "tokmd".to_string());
906 let totals = receipt.derived.as_ref().map(|d| &d.totals);
907 let payload = serde_json::json!({
908 "@context": "https://schema.org",
909 "@type": "SoftwareSourceCode",
910 "name": name,
911 "codeLines": totals.map(|t| t.code).unwrap_or(0),
912 "commentCount": totals.map(|t| t.comments).unwrap_or(0),
913 "lineCount": totals.map(|t| t.lines).unwrap_or(0),
914 "fileSize": totals.map(|t| t.bytes).unwrap_or(0),
915 "interactionStatistic": {
916 "@type": "InteractionCounter",
917 "interactionType": "http://schema.org/ReadAction",
918 "userInteractionCount": totals.map(|t| t.tokens).unwrap_or(0)
919 }
920 });
921 serde_json::to_string_pretty(&payload).unwrap_or_else(|_| "{}".to_string())
922}
923
924fn render_xml(receipt: &AnalysisReceipt) -> String {
925 let totals = receipt.derived.as_ref().map(|d| &d.totals);
926 let mut out = String::new();
927 out.push_str("<analysis>");
928 if let Some(totals) = totals {
929 out.push_str(&format!(
930 "<totals files=\"{}\" code=\"{}\" comments=\"{}\" blanks=\"{}\" lines=\"{}\" bytes=\"{}\" tokens=\"{}\"/>",
931 totals.files,
932 totals.code,
933 totals.comments,
934 totals.blanks,
935 totals.lines,
936 totals.bytes,
937 totals.tokens
938 ));
939 }
940 out.push_str("</analysis>");
941 out
942}
943
944fn render_svg(receipt: &AnalysisReceipt) -> String {
945 let (label, value) = if let Some(derived) = &receipt.derived {
946 if let Some(ctx) = &derived.context_window {
947 ("context".to_string(), format!("{:.1}%", ctx.pct * 100.0))
948 } else {
949 ("tokens".to_string(), derived.totals.tokens.to_string())
950 }
951 } else {
952 ("tokens".to_string(), "0".to_string())
953 };
954
955 let width = 240;
956 let height = 32;
957 let label_width = 80;
958 let value_width = width - label_width;
959 format!(
960 "<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"{width}\" height=\"{height}\" role=\"img\"><rect width=\"{label_width}\" height=\"{height}\" fill=\"#555\"/><rect x=\"{label_width}\" width=\"{value_width}\" height=\"{height}\" fill=\"#4c9aff\"/><text x=\"{lx}\" y=\"{ty}\" fill=\"#fff\" font-family=\"Verdana\" font-size=\"12\" text-anchor=\"middle\">{label}</text><text x=\"{vx}\" y=\"{ty}\" fill=\"#fff\" font-family=\"Verdana\" font-size=\"12\" text-anchor=\"middle\">{value}</text></svg>",
961 width = width,
962 height = height,
963 label_width = label_width,
964 value_width = value_width,
965 lx = label_width / 2,
966 vx = label_width + value_width / 2,
967 ty = 20,
968 label = label,
969 value = value
970 )
971}
972
973fn render_mermaid(receipt: &AnalysisReceipt) -> String {
974 let mut out = String::from("graph TD\n");
975 if let Some(imports) = &receipt.imports {
976 for edge in imports.edges.iter().take(200) {
977 let from = sanitize_mermaid(&edge.from);
978 let to = sanitize_mermaid(&edge.to);
979 out.push_str(&format!(" {} -->|{}| {}\n", from, edge.count, to));
980 }
981 }
982 out
983}
984
985fn render_tree(receipt: &AnalysisReceipt) -> String {
986 receipt
987 .derived
988 .as_ref()
989 .and_then(|d| d.tree.clone())
990 .unwrap_or_else(|| "(tree unavailable)".to_string())
991}
992
993#[cfg(feature = "fun")]
995fn render_obj_fun(receipt: &AnalysisReceipt) -> Result<String> {
996 if let Some(derived) = &receipt.derived {
997 let buildings: Vec<tokmd_fun::ObjBuilding> = derived
998 .top
999 .largest_lines
1000 .iter()
1001 .enumerate()
1002 .map(|(idx, row)| {
1003 let x = (idx % 5) as f32 * 2.0;
1004 let y = (idx / 5) as f32 * 2.0;
1005 let h = (row.lines as f32 / 10.0).max(0.5);
1006 tokmd_fun::ObjBuilding {
1007 name: row.path.clone(),
1008 x,
1009 y,
1010 w: 1.5,
1011 d: 1.5,
1012 h,
1013 }
1014 })
1015 .collect();
1016 return Ok(tokmd_fun::render_obj(&buildings));
1017 }
1018 Ok("# tokmd code city\n".to_string())
1019}
1020
1021#[cfg(feature = "fun")]
1022fn render_midi_fun(receipt: &AnalysisReceipt) -> Result<Vec<u8>> {
1023 let mut notes = Vec::new();
1024 if let Some(derived) = &receipt.derived {
1025 for (idx, row) in derived.top.largest_lines.iter().enumerate() {
1026 let key = 60u8 + (row.depth as u8 % 12);
1027 let velocity = (40 + (row.lines.min(127) as u8 / 2)).min(120);
1028 let start = (idx as u32) * 240;
1029 notes.push(tokmd_fun::MidiNote {
1030 key,
1031 velocity,
1032 start,
1033 duration: 180,
1034 channel: 0,
1035 });
1036 }
1037 }
1038 tokmd_fun::render_midi(¬es, 120)
1039}
1040
1041#[cfg(not(feature = "fun"))]
1043fn render_obj_disabled(_receipt: &AnalysisReceipt) -> Result<String> {
1044 anyhow::bail!(
1045 "OBJ format requires the `fun` feature: tokmd-analysis-format = {{ version = \"1.3\", features = [\"fun\"] }}"
1046 )
1047}
1048
1049#[cfg(not(feature = "fun"))]
1050fn render_midi_disabled(_receipt: &AnalysisReceipt) -> Result<Vec<u8>> {
1051 anyhow::bail!(
1052 "MIDI format requires the `fun` feature: tokmd-analysis-format = {{ version = \"1.3\", features = [\"fun\"] }}"
1053 )
1054}
1055
1056fn render_obj(receipt: &AnalysisReceipt) -> Result<String> {
1058 #[cfg(feature = "fun")]
1059 {
1060 render_obj_fun(receipt)
1061 }
1062 #[cfg(not(feature = "fun"))]
1063 {
1064 render_obj_disabled(receipt)
1065 }
1066}
1067
1068fn render_midi(receipt: &AnalysisReceipt) -> Result<Vec<u8>> {
1069 #[cfg(feature = "fun")]
1070 {
1071 render_midi_fun(receipt)
1072 }
1073 #[cfg(not(feature = "fun"))]
1074 {
1075 render_midi_disabled(receipt)
1076 }
1077}
1078
1079fn sanitize_mermaid(name: &str) -> String {
1080 name.chars()
1081 .map(|c| if c.is_ascii_alphanumeric() { c } else { '_' })
1082 .collect()
1083}
1084
1085fn render_html(receipt: &AnalysisReceipt) -> String {
1086 tokmd_analysis_html::render(receipt)
1087}
1088
1089#[cfg(test)]
1090mod tests {
1091 use super::*;
1092 use tokmd_analysis_types::*;
1093
1094 fn minimal_receipt() -> AnalysisReceipt {
1095 AnalysisReceipt {
1096 schema_version: 2,
1097 generated_at_ms: 0,
1098 tool: tokmd_types::ToolInfo {
1099 name: "tokmd".to_string(),
1100 version: "0.0.0".to_string(),
1101 },
1102 mode: "analysis".to_string(),
1103 status: tokmd_types::ScanStatus::Complete,
1104 warnings: vec![],
1105 source: AnalysisSource {
1106 inputs: vec!["test".to_string()],
1107 export_path: None,
1108 base_receipt_path: None,
1109 export_schema_version: None,
1110 export_generated_at_ms: None,
1111 base_signature: None,
1112 module_roots: vec![],
1113 module_depth: 1,
1114 children: "collapse".to_string(),
1115 },
1116 args: AnalysisArgsMeta {
1117 preset: "receipt".to_string(),
1118 format: "md".to_string(),
1119 window_tokens: None,
1120 git: None,
1121 max_files: None,
1122 max_bytes: None,
1123 max_commits: None,
1124 max_commit_files: None,
1125 max_file_bytes: None,
1126 import_granularity: "module".to_string(),
1127 },
1128 archetype: None,
1129 topics: None,
1130 entropy: None,
1131 predictive_churn: None,
1132 corporate_fingerprint: None,
1133 license: None,
1134 derived: None,
1135 assets: None,
1136 deps: None,
1137 git: None,
1138 imports: None,
1139 dup: None,
1140 complexity: None,
1141 api_surface: None,
1142 fun: None,
1143 }
1144 }
1145
1146 fn sample_derived() -> DerivedReport {
1147 DerivedReport {
1148 totals: DerivedTotals {
1149 files: 10,
1150 code: 1000,
1151 comments: 200,
1152 blanks: 100,
1153 lines: 1300,
1154 bytes: 50000,
1155 tokens: 2500,
1156 },
1157 doc_density: RatioReport {
1158 total: RatioRow {
1159 key: "total".to_string(),
1160 numerator: 200,
1161 denominator: 1200,
1162 ratio: 0.1667,
1163 },
1164 by_lang: vec![],
1165 by_module: vec![],
1166 },
1167 whitespace: RatioReport {
1168 total: RatioRow {
1169 key: "total".to_string(),
1170 numerator: 100,
1171 denominator: 1300,
1172 ratio: 0.0769,
1173 },
1174 by_lang: vec![],
1175 by_module: vec![],
1176 },
1177 verbosity: RateReport {
1178 total: RateRow {
1179 key: "total".to_string(),
1180 numerator: 50000,
1181 denominator: 1300,
1182 rate: 38.46,
1183 },
1184 by_lang: vec![],
1185 by_module: vec![],
1186 },
1187 max_file: MaxFileReport {
1188 overall: FileStatRow {
1189 path: "src/lib.rs".to_string(),
1190 module: "src".to_string(),
1191 lang: "Rust".to_string(),
1192 code: 500,
1193 comments: 100,
1194 blanks: 50,
1195 lines: 650,
1196 bytes: 25000,
1197 tokens: 1250,
1198 doc_pct: Some(0.167),
1199 bytes_per_line: Some(38.46),
1200 depth: 1,
1201 },
1202 by_lang: vec![],
1203 by_module: vec![],
1204 },
1205 lang_purity: LangPurityReport { rows: vec![] },
1206 nesting: NestingReport {
1207 max: 3,
1208 avg: 1.5,
1209 by_module: vec![],
1210 },
1211 test_density: TestDensityReport {
1212 test_lines: 200,
1213 prod_lines: 1000,
1214 test_files: 5,
1215 prod_files: 5,
1216 ratio: 0.2,
1217 },
1218 boilerplate: BoilerplateReport {
1219 infra_lines: 100,
1220 logic_lines: 1100,
1221 ratio: 0.083,
1222 infra_langs: vec!["TOML".to_string()],
1223 },
1224 polyglot: PolyglotReport {
1225 lang_count: 2,
1226 entropy: 0.5,
1227 dominant_lang: "Rust".to_string(),
1228 dominant_lines: 1000,
1229 dominant_pct: 0.833,
1230 },
1231 distribution: DistributionReport {
1232 count: 10,
1233 min: 50,
1234 max: 650,
1235 mean: 130.0,
1236 median: 100.0,
1237 p90: 400.0,
1238 p99: 650.0,
1239 gini: 0.3,
1240 },
1241 histogram: vec![HistogramBucket {
1242 label: "Small".to_string(),
1243 min: 0,
1244 max: Some(100),
1245 files: 5,
1246 pct: 0.5,
1247 }],
1248 top: TopOffenders {
1249 largest_lines: vec![FileStatRow {
1250 path: "src/lib.rs".to_string(),
1251 module: "src".to_string(),
1252 lang: "Rust".to_string(),
1253 code: 500,
1254 comments: 100,
1255 blanks: 50,
1256 lines: 650,
1257 bytes: 25000,
1258 tokens: 1250,
1259 doc_pct: Some(0.167),
1260 bytes_per_line: Some(38.46),
1261 depth: 1,
1262 }],
1263 largest_tokens: vec![],
1264 largest_bytes: vec![],
1265 least_documented: vec![],
1266 most_dense: vec![],
1267 },
1268 tree: Some("test-tree".to_string()),
1269 reading_time: ReadingTimeReport {
1270 minutes: 65.0,
1271 lines_per_minute: 20,
1272 basis_lines: 1300,
1273 },
1274 context_window: Some(ContextWindowReport {
1275 window_tokens: 100000,
1276 total_tokens: 2500,
1277 pct: 0.025,
1278 fits: true,
1279 }),
1280 cocomo: Some(CocomoReport {
1281 mode: "organic".to_string(),
1282 kloc: 1.0,
1283 effort_pm: 2.4,
1284 duration_months: 2.5,
1285 staff: 1.0,
1286 a: 2.4,
1287 b: 1.05,
1288 c: 2.5,
1289 d: 0.38,
1290 }),
1291 todo: Some(TodoReport {
1292 total: 5,
1293 density_per_kloc: 5.0,
1294 tags: vec![TodoTagRow {
1295 tag: "TODO".to_string(),
1296 count: 5,
1297 }],
1298 }),
1299 integrity: IntegrityReport {
1300 algo: "blake3".to_string(),
1301 hash: "abc123".to_string(),
1302 entries: 10,
1303 },
1304 }
1305 }
1306
1307 #[test]
1309 fn test_fmt_pct() {
1310 assert_eq!(fmt_pct(0.5), "50.0%");
1311 assert_eq!(fmt_pct(0.0), "0.0%");
1312 assert_eq!(fmt_pct(1.0), "100.0%");
1313 assert_eq!(fmt_pct(0.1234), "12.3%");
1314 }
1315
1316 #[test]
1318 #[allow(clippy::approx_constant)]
1319 fn test_fmt_f64() {
1320 assert_eq!(fmt_f64(3.14159, 2), "3.14");
1321 assert_eq!(fmt_f64(3.14159, 4), "3.1416");
1322 assert_eq!(fmt_f64(0.0, 2), "0.00");
1323 assert_eq!(fmt_f64(100.0, 0), "100");
1324 }
1325
1326 #[test]
1328 fn test_sanitize_mermaid() {
1329 assert_eq!(sanitize_mermaid("hello"), "hello");
1330 assert_eq!(sanitize_mermaid("hello-world"), "hello_world");
1331 assert_eq!(sanitize_mermaid("src/lib.rs"), "src_lib_rs");
1332 assert_eq!(sanitize_mermaid("test123"), "test123");
1333 assert_eq!(sanitize_mermaid("a b c"), "a_b_c");
1334 }
1335
1336 #[test]
1338 fn test_render_file_table() {
1339 let rows = vec![FileStatRow {
1340 path: "src/lib.rs".to_string(),
1341 module: "src".to_string(),
1342 lang: "Rust".to_string(),
1343 code: 100,
1344 comments: 20,
1345 blanks: 10,
1346 lines: 130,
1347 bytes: 5000,
1348 tokens: 250,
1349 doc_pct: Some(0.167),
1350 bytes_per_line: Some(38.46),
1351 depth: 1,
1352 }];
1353 let result = render_file_table(&rows);
1354 assert!(result.contains("|Path|Lang|Lines|Code|Bytes|Tokens|Doc%|B/Line|"));
1355 assert!(result.contains("|src/lib.rs|Rust|130|100|5000|250|16.7%|38.46|"));
1356 }
1357
1358 #[test]
1360 fn test_render_file_table_none_values() {
1361 let rows = vec![FileStatRow {
1362 path: "test.txt".to_string(),
1363 module: "root".to_string(),
1364 lang: "Text".to_string(),
1365 code: 50,
1366 comments: 0,
1367 blanks: 5,
1368 lines: 55,
1369 bytes: 1000,
1370 tokens: 100,
1371 doc_pct: None,
1372 bytes_per_line: None,
1373 depth: 0,
1374 }];
1375 let result = render_file_table(&rows);
1376 assert!(result.contains("|-|-|")); }
1378
1379 #[test]
1381 fn test_render_xml() {
1382 let mut receipt = minimal_receipt();
1383 receipt.derived = Some(sample_derived());
1384 let result = render_xml(&receipt);
1385 assert!(result.starts_with("<analysis>"));
1386 assert!(result.ends_with("</analysis>"));
1387 assert!(result.contains("files=\"10\""));
1388 assert!(result.contains("code=\"1000\""));
1389 }
1390
1391 #[test]
1393 fn test_render_xml_no_derived() {
1394 let receipt = minimal_receipt();
1395 let result = render_xml(&receipt);
1396 assert_eq!(result, "<analysis></analysis>");
1397 }
1398
1399 #[test]
1401 fn test_render_jsonld() {
1402 let mut receipt = minimal_receipt();
1403 receipt.derived = Some(sample_derived());
1404 let result = render_jsonld(&receipt);
1405 assert!(result.contains("\"@context\": \"https://schema.org\""));
1406 assert!(result.contains("\"@type\": \"SoftwareSourceCode\""));
1407 assert!(result.contains("\"name\": \"test\""));
1408 assert!(result.contains("\"codeLines\": 1000"));
1409 }
1410
1411 #[test]
1413 fn test_render_jsonld_empty_inputs() {
1414 let mut receipt = minimal_receipt();
1415 receipt.source.inputs.clear();
1416 let result = render_jsonld(&receipt);
1417 assert!(result.contains("\"name\": \"tokmd\""));
1418 }
1419
1420 #[test]
1422 fn test_render_svg() {
1423 let mut receipt = minimal_receipt();
1424 receipt.derived = Some(sample_derived());
1425 let result = render_svg(&receipt);
1426 assert!(result.contains("<svg"));
1427 assert!(result.contains("</svg>"));
1428 assert!(result.contains("context")); assert!(result.contains("2.5%")); }
1431
1432 #[test]
1434 fn test_render_svg_no_context() {
1435 let mut receipt = minimal_receipt();
1436 let mut derived = sample_derived();
1437 derived.context_window = None;
1438 receipt.derived = Some(derived);
1439 let result = render_svg(&receipt);
1440 assert!(result.contains("tokens"));
1441 assert!(result.contains("2500")); }
1443
1444 #[test]
1446 fn test_render_svg_no_derived() {
1447 let receipt = minimal_receipt();
1448 let result = render_svg(&receipt);
1449 assert!(result.contains("tokens"));
1450 assert!(result.contains(">0<")); }
1452
1453 #[test]
1455 fn test_render_svg_dimensions() {
1456 let receipt = minimal_receipt();
1457 let result = render_svg(&receipt);
1458 assert!(result.contains("width=\"160\"")); }
1461
1462 #[test]
1464 fn test_render_mermaid() {
1465 let mut receipt = minimal_receipt();
1466 receipt.imports = Some(ImportReport {
1467 granularity: "module".to_string(),
1468 edges: vec![ImportEdge {
1469 from: "src/main".to_string(),
1470 to: "src/lib".to_string(),
1471 count: 5,
1472 }],
1473 });
1474 let result = render_mermaid(&receipt);
1475 assert!(result.starts_with("graph TD\n"));
1476 assert!(result.contains("src_main -->|5| src_lib"));
1477 }
1478
1479 #[test]
1481 fn test_render_mermaid_no_imports() {
1482 let receipt = minimal_receipt();
1483 let result = render_mermaid(&receipt);
1484 assert_eq!(result, "graph TD\n");
1485 }
1486
1487 #[test]
1489 fn test_render_tree() {
1490 let mut receipt = minimal_receipt();
1491 receipt.derived = Some(sample_derived());
1492 let result = render_tree(&receipt);
1493 assert_eq!(result, "test-tree");
1494 }
1495
1496 #[test]
1498 fn test_render_tree_no_derived() {
1499 let receipt = minimal_receipt();
1500 let result = render_tree(&receipt);
1501 assert_eq!(result, "(tree unavailable)");
1502 }
1503
1504 #[test]
1506 fn test_render_tree_none() {
1507 let mut receipt = minimal_receipt();
1508 let mut derived = sample_derived();
1509 derived.tree = None;
1510 receipt.derived = Some(derived);
1511 let result = render_tree(&receipt);
1512 assert_eq!(result, "(tree unavailable)");
1513 }
1514
1515 #[cfg(not(feature = "fun"))]
1517 #[test]
1518 fn test_render_obj_no_fun() {
1519 let receipt = minimal_receipt();
1520 let result = render_obj(&receipt);
1521 assert!(result.is_err());
1522 assert!(result.unwrap_err().to_string().contains("fun"));
1523 }
1524
1525 #[cfg(not(feature = "fun"))]
1527 #[test]
1528 fn test_render_midi_no_fun() {
1529 let receipt = minimal_receipt();
1530 let result = render_midi(&receipt);
1531 assert!(result.is_err());
1532 assert!(result.unwrap_err().to_string().contains("fun"));
1533 }
1534
1535 #[cfg(feature = "fun")]
1542 #[test]
1543 fn test_render_obj_coordinate_math() {
1544 let mut receipt = minimal_receipt();
1545 let mut derived = sample_derived();
1546 derived.top.largest_lines = vec![
1556 FileStatRow {
1557 path: "file0.rs".to_string(),
1558 module: "src".to_string(),
1559 lang: "Rust".to_string(),
1560 code: 100,
1561 comments: 10,
1562 blanks: 5,
1563 lines: 100, bytes: 1000,
1565 tokens: 200,
1566 doc_pct: None,
1567 bytes_per_line: None,
1568 depth: 1,
1569 },
1570 FileStatRow {
1571 path: "file1.rs".to_string(),
1572 module: "src".to_string(),
1573 lang: "Rust".to_string(),
1574 code: 50,
1575 comments: 5,
1576 blanks: 2,
1577 lines: 3, bytes: 500,
1579 tokens: 100,
1580 doc_pct: None,
1581 bytes_per_line: None,
1582 depth: 2,
1583 },
1584 FileStatRow {
1585 path: "file2.rs".to_string(),
1586 module: "src".to_string(),
1587 lang: "Rust".to_string(),
1588 code: 200,
1589 comments: 20,
1590 blanks: 10,
1591 lines: 200, bytes: 2000,
1593 tokens: 400,
1594 doc_pct: None,
1595 bytes_per_line: None,
1596 depth: 3,
1597 },
1598 FileStatRow {
1599 path: "file3.rs".to_string(),
1600 module: "src".to_string(),
1601 lang: "Rust".to_string(),
1602 code: 75,
1603 comments: 7,
1604 blanks: 3,
1605 lines: 75, bytes: 750,
1607 tokens: 150,
1608 doc_pct: None,
1609 bytes_per_line: None,
1610 depth: 0,
1611 },
1612 FileStatRow {
1613 path: "file4.rs".to_string(),
1614 module: "src".to_string(),
1615 lang: "Rust".to_string(),
1616 code: 150,
1617 comments: 15,
1618 blanks: 8,
1619 lines: 150, bytes: 1500,
1621 tokens: 300,
1622 doc_pct: None,
1623 bytes_per_line: None,
1624 depth: 1,
1625 },
1626 FileStatRow {
1628 path: "file5.rs".to_string(),
1629 module: "src".to_string(),
1630 lang: "Rust".to_string(),
1631 code: 80,
1632 comments: 8,
1633 blanks: 4,
1634 lines: 80, bytes: 800,
1636 tokens: 160,
1637 doc_pct: None,
1638 bytes_per_line: None,
1639 depth: 2,
1640 },
1641 FileStatRow {
1643 path: "file6.rs".to_string(),
1644 module: "src".to_string(),
1645 lang: "Rust".to_string(),
1646 code: 60,
1647 comments: 6,
1648 blanks: 3,
1649 lines: 60, bytes: 600,
1651 tokens: 120,
1652 doc_pct: None,
1653 bytes_per_line: None,
1654 depth: 1,
1655 },
1656 ];
1657 receipt.derived = Some(derived);
1658 let result = render_obj(&receipt).expect("render_obj should succeed with fun feature");
1659
1660 #[allow(clippy::type_complexity)]
1663 let objects: Vec<(&str, Vec<(f32, f32, f32)>)> = result
1664 .split("o ")
1665 .skip(1)
1666 .map(|section| {
1667 let lines: Vec<&str> = section.lines().collect();
1668 let name = lines[0];
1669 let vertices: Vec<(f32, f32, f32)> = lines[1..]
1670 .iter()
1671 .filter(|l| l.starts_with("v "))
1672 .take(8)
1673 .map(|l| {
1674 let parts: Vec<f32> = l[2..]
1675 .split_whitespace()
1676 .map(|p| p.parse().unwrap())
1677 .collect();
1678 (parts[0], parts[1], parts[2])
1679 })
1680 .collect();
1681 (name, vertices)
1682 })
1683 .collect();
1684
1685 assert_eq!(objects.len(), 7, "expected 7 buildings");
1687
1688 fn base_corner(obj: &(&str, Vec<(f32, f32, f32)>)) -> (f32, f32, f32) {
1690 obj.1[0]
1691 }
1692 fn top_corner(obj: &(&str, Vec<(f32, f32, f32)>)) -> (f32, f32, f32) {
1693 obj.1[4] }
1695
1696 assert_eq!(
1698 base_corner(&objects[0]),
1699 (0.0, 0.0, 0.0),
1700 "file0 base position"
1701 );
1702 assert_eq!(
1703 top_corner(&objects[0]).2,
1704 10.0,
1705 "file0 height should be 10.0 (100/10)"
1706 );
1707
1708 assert_eq!(
1711 base_corner(&objects[1]),
1712 (2.0, 0.0, 0.0),
1713 "file1 base position"
1714 );
1715 assert_eq!(
1716 top_corner(&objects[1]).2,
1717 0.5,
1718 "file1 height should be 0.5 (clamped from 3/10=0.3)"
1719 );
1720
1721 assert_eq!(
1723 base_corner(&objects[2]),
1724 (4.0, 0.0, 0.0),
1725 "file2 base position"
1726 );
1727 assert_eq!(
1728 top_corner(&objects[2]).2,
1729 20.0,
1730 "file2 height should be 20.0 (200/10)"
1731 );
1732
1733 assert_eq!(
1735 base_corner(&objects[3]),
1736 (6.0, 0.0, 0.0),
1737 "file3 base position"
1738 );
1739 assert_eq!(
1740 top_corner(&objects[3]).2,
1741 7.5,
1742 "file3 height should be 7.5 (75/10)"
1743 );
1744
1745 assert_eq!(
1748 base_corner(&objects[4]),
1749 (8.0, 0.0, 0.0),
1750 "file4 base position (x = 4*2 = 8)"
1751 );
1752 assert_eq!(
1753 top_corner(&objects[4]).2,
1754 15.0,
1755 "file4 height should be 15.0 (150/10)"
1756 );
1757
1758 assert_eq!(
1762 base_corner(&objects[5]),
1763 (0.0, 2.0, 0.0),
1764 "file5 base position (x=0 from 5%5, y=2 from 5/5*2)"
1765 );
1766 assert_eq!(
1767 top_corner(&objects[5]).2,
1768 8.0,
1769 "file5 height should be 8.0 (80/10)"
1770 );
1771
1772 assert_eq!(
1775 base_corner(&objects[6]),
1776 (2.0, 2.0, 0.0),
1777 "file6 base position (x=2 from 6%5*2, y=2 from 6/5*2)"
1778 );
1779 assert_eq!(
1780 top_corner(&objects[6]).2,
1781 6.0,
1782 "file6 height should be 6.0 (60/10)"
1783 );
1784
1785 assert!(result.contains("f 1 2 3 4"), "missing face definition");
1787 }
1788
1789 #[cfg(feature = "fun")]
1795 #[test]
1796 fn test_render_midi_note_math() {
1797 use midly::{MidiMessage, Smf, TrackEventKind};
1798
1799 let mut receipt = minimal_receipt();
1800 let mut derived = sample_derived();
1801 derived.top.largest_lines = vec![
1807 FileStatRow {
1809 path: "a.rs".to_string(),
1810 module: "src".to_string(),
1811 lang: "Rust".to_string(),
1812 code: 50,
1813 comments: 5,
1814 blanks: 2,
1815 lines: 60,
1816 bytes: 500,
1817 tokens: 100,
1818 doc_pct: None,
1819 bytes_per_line: None,
1820 depth: 5,
1821 },
1822 FileStatRow {
1825 path: "b.rs".to_string(),
1826 module: "src".to_string(),
1827 lang: "Rust".to_string(),
1828 code: 100,
1829 comments: 10,
1830 blanks: 5,
1831 lines: 200, bytes: 1000,
1833 tokens: 200,
1834 doc_pct: None,
1835 bytes_per_line: None,
1836 depth: 15,
1837 },
1838 FileStatRow {
1840 path: "c.rs".to_string(),
1841 module: "src".to_string(),
1842 lang: "Rust".to_string(),
1843 code: 20,
1844 comments: 2,
1845 blanks: 1,
1846 lines: 20,
1847 bytes: 200,
1848 tokens: 40,
1849 doc_pct: None,
1850 bytes_per_line: None,
1851 depth: 0,
1852 },
1853 FileStatRow {
1856 path: "d.rs".to_string(),
1857 module: "src".to_string(),
1858 lang: "Rust".to_string(),
1859 code: 160,
1860 comments: 16,
1861 blanks: 8,
1862 lines: 160,
1863 bytes: 1600,
1864 tokens: 320,
1865 doc_pct: None,
1866 bytes_per_line: None,
1867 depth: 12,
1868 },
1869 ];
1870 receipt.derived = Some(derived);
1871
1872 let result = render_midi(&receipt).unwrap();
1873
1874 let smf = Smf::parse(&result).expect("should parse as valid MIDI");
1876
1877 let mut notes: Vec<(u32, u8, u8)> = Vec::new(); let mut abs_time = 0u32;
1880
1881 for event in &smf.tracks[0] {
1882 abs_time += event.delta.as_int();
1883 if let TrackEventKind::Midi {
1884 message: MidiMessage::NoteOn { key, vel },
1885 ..
1886 } = event.kind
1887 {
1888 notes.push((abs_time, key.as_int(), vel.as_int()));
1889 }
1890 }
1891
1892 assert_eq!(notes.len(), 4, "expected 4 NoteOn events, got {:?}", notes);
1894
1895 assert_eq!(
1898 notes[0],
1899 (0, 65, 70),
1900 "note 0: expected (time=0, key=65=60+5, vel=70=40+60/2), got {:?}",
1901 notes[0]
1902 );
1903
1904 assert_eq!(
1907 notes[1],
1908 (240, 63, 103),
1909 "note 1: expected (time=240=1*240, key=63=60+(15%12), vel=103=40+127/2), got {:?}",
1910 notes[1]
1911 );
1912
1913 assert_eq!(
1915 notes[2],
1916 (480, 60, 50),
1917 "note 2: expected (time=480=2*240, key=60=60+0, vel=50=40+20/2), got {:?}",
1918 notes[2]
1919 );
1920
1921 assert_eq!(
1924 notes[3],
1925 (720, 60, 103),
1926 "note 3: expected (time=720=3*240, key=60=60+(12%12), vel=103=40+127/2), got {:?}",
1927 notes[3]
1928 );
1929
1930 let mut note_offs: Vec<(u32, u8)> = Vec::new(); abs_time = 0;
1933 for event in &smf.tracks[0] {
1934 abs_time += event.delta.as_int();
1935 if let TrackEventKind::Midi {
1936 message: MidiMessage::NoteOff { key, .. },
1937 ..
1938 } = event.kind
1939 {
1940 note_offs.push((abs_time, key.as_int()));
1941 }
1942 }
1943
1944 assert!(
1946 note_offs.iter().any(|&(t, k)| t == 180 && k == 65),
1947 "expected NoteOff for key 65 at time 180, got {:?}",
1948 note_offs
1949 );
1950 assert!(
1951 note_offs.iter().any(|&(t, k)| t == 420 && k == 63),
1952 "expected NoteOff for key 63 at time 420 (240+180), got {:?}",
1953 note_offs
1954 );
1955 assert!(
1956 note_offs.iter().any(|&(t, k)| t == 660 && k == 60),
1957 "expected NoteOff for key 60 at time 660 (480+180), got {:?}",
1958 note_offs
1959 );
1960 assert!(
1961 note_offs.iter().any(|&(t, k)| t == 900 && k == 60),
1962 "expected NoteOff for key 60 at time 900 (720+180), got {:?}",
1963 note_offs
1964 );
1965 }
1966
1967 #[cfg(feature = "fun")]
1969 #[test]
1970 fn test_render_midi_no_derived() {
1971 use midly::Smf;
1972
1973 let receipt = minimal_receipt();
1974 let result = render_midi(&receipt).unwrap();
1975
1976 assert!(!result.is_empty(), "MIDI output should not be empty");
1978 assert!(
1979 result.len() > 14,
1980 "MIDI should have header (14 bytes) + track data"
1981 );
1982
1983 let smf = Smf::parse(&result).expect("should be valid MIDI even with no notes");
1985 assert_eq!(smf.tracks.len(), 1, "should have exactly one track");
1986 }
1987
1988 #[cfg(feature = "fun")]
1990 #[test]
1991 fn test_render_obj_no_derived() {
1992 let receipt = minimal_receipt();
1993 let result = render_obj(&receipt).expect("render_obj should succeed");
1994
1995 assert_eq!(result, "# tokmd code city\n");
1997 }
1998
1999 #[test]
2001 fn test_render_md_basic() {
2002 let receipt = minimal_receipt();
2003 let result = render_md(&receipt);
2004 assert!(result.starts_with("# tokmd analysis\n"));
2005 assert!(result.contains("Preset: `receipt`"));
2006 }
2007
2008 #[test]
2010 fn test_render_md_inputs() {
2011 let mut receipt = minimal_receipt();
2012 receipt.source.inputs = vec!["path1".to_string(), "path2".to_string()];
2013 let result = render_md(&receipt);
2014 assert!(result.contains("## Inputs"));
2015 assert!(result.contains("- `path1`"));
2016 assert!(result.contains("- `path2`"));
2017 }
2018
2019 #[test]
2021 fn test_render_md_empty_inputs() {
2022 let mut receipt = minimal_receipt();
2023 receipt.source.inputs.clear();
2024 let result = render_md(&receipt);
2025 assert!(!result.contains("## Inputs"));
2026 }
2027
2028 #[test]
2030 fn test_render_md_archetype() {
2031 let mut receipt = minimal_receipt();
2032 receipt.archetype = Some(Archetype {
2033 kind: "library".to_string(),
2034 evidence: vec!["Cargo.toml".to_string(), "src/lib.rs".to_string()],
2035 });
2036 let result = render_md(&receipt);
2037 assert!(result.contains("## Archetype"));
2038 assert!(result.contains("- Kind: `library`"));
2039 assert!(result.contains("- Evidence: `Cargo.toml`, `src/lib.rs`"));
2040 }
2041
2042 #[test]
2044 fn test_render_md_archetype_no_evidence() {
2045 let mut receipt = minimal_receipt();
2046 receipt.archetype = Some(Archetype {
2047 kind: "app".to_string(),
2048 evidence: vec![],
2049 });
2050 let result = render_md(&receipt);
2051 assert!(result.contains("## Archetype"));
2052 assert!(result.contains("- Kind: `app`"));
2053 assert!(!result.contains("Evidence"));
2054 }
2055
2056 #[test]
2058 fn test_render_md_topics() {
2059 use std::collections::BTreeMap;
2060 let mut per_module = BTreeMap::new();
2061 per_module.insert(
2062 "src".to_string(),
2063 vec![TopicTerm {
2064 term: "parser".to_string(),
2065 score: 1.5,
2066 tf: 10,
2067 df: 2,
2068 }],
2069 );
2070 let mut receipt = minimal_receipt();
2071 receipt.topics = Some(TopicClouds {
2072 overall: vec![TopicTerm {
2073 term: "code".to_string(),
2074 score: 2.0,
2075 tf: 20,
2076 df: 5,
2077 }],
2078 per_module,
2079 });
2080 let result = render_md(&receipt);
2081 assert!(result.contains("## Topics"));
2082 assert!(result.contains("- Overall: `code`"));
2083 assert!(result.contains("- `src`: parser"));
2084 }
2085
2086 #[test]
2088 fn test_render_md_topics_empty_module() {
2089 use std::collections::BTreeMap;
2090 let mut per_module = BTreeMap::new();
2091 per_module.insert("empty_module".to_string(), vec![]);
2092 let mut receipt = minimal_receipt();
2093 receipt.topics = Some(TopicClouds {
2094 overall: vec![],
2095 per_module,
2096 });
2097 let result = render_md(&receipt);
2098 assert!(!result.contains("empty_module"));
2100 }
2101
2102 #[test]
2104 fn test_render_md_entropy() {
2105 let mut receipt = minimal_receipt();
2106 receipt.entropy = Some(EntropyReport {
2107 suspects: vec![EntropyFinding {
2108 path: "secret.bin".to_string(),
2109 module: "root".to_string(),
2110 entropy_bits_per_byte: 7.5,
2111 sample_bytes: 1024,
2112 class: EntropyClass::High,
2113 }],
2114 });
2115 let result = render_md(&receipt);
2116 assert!(result.contains("## Entropy profiling"));
2117 assert!(result.contains("|secret.bin|root|7.50|1024|High|"));
2118 }
2119
2120 #[test]
2122 fn test_render_md_entropy_no_suspects() {
2123 let mut receipt = minimal_receipt();
2124 receipt.entropy = Some(EntropyReport { suspects: vec![] });
2125 let result = render_md(&receipt);
2126 assert!(result.contains("## Entropy profiling"));
2127 assert!(result.contains("No entropy outliers detected"));
2128 }
2129
2130 #[test]
2132 fn test_render_md_license() {
2133 let mut receipt = minimal_receipt();
2134 receipt.license = Some(LicenseReport {
2135 effective: Some("MIT".to_string()),
2136 findings: vec![LicenseFinding {
2137 spdx: "MIT".to_string(),
2138 confidence: 0.95,
2139 source_path: "LICENSE".to_string(),
2140 source_kind: LicenseSourceKind::Text,
2141 }],
2142 });
2143 let result = render_md(&receipt);
2144 assert!(result.contains("## License radar"));
2145 assert!(result.contains("- Effective: `MIT`"));
2146 assert!(result.contains("|MIT|0.95|LICENSE|Text|"));
2147 }
2148
2149 #[test]
2151 fn test_render_md_license_no_findings() {
2152 let mut receipt = minimal_receipt();
2153 receipt.license = Some(LicenseReport {
2154 effective: None,
2155 findings: vec![],
2156 });
2157 let result = render_md(&receipt);
2158 assert!(result.contains("## License radar"));
2159 assert!(result.contains("Heuristic detection"));
2160 assert!(!result.contains("|SPDX|")); }
2162
2163 #[test]
2165 fn test_render_md_corporate_fingerprint() {
2166 let mut receipt = minimal_receipt();
2167 receipt.corporate_fingerprint = Some(CorporateFingerprint {
2168 domains: vec![DomainStat {
2169 domain: "example.com".to_string(),
2170 commits: 50,
2171 pct: 0.75,
2172 }],
2173 });
2174 let result = render_md(&receipt);
2175 assert!(result.contains("## Corporate fingerprint"));
2176 assert!(result.contains("|example.com|50|75.0%|"));
2177 }
2178
2179 #[test]
2181 fn test_render_md_corporate_fingerprint_no_domains() {
2182 let mut receipt = minimal_receipt();
2183 receipt.corporate_fingerprint = Some(CorporateFingerprint { domains: vec![] });
2184 let result = render_md(&receipt);
2185 assert!(result.contains("## Corporate fingerprint"));
2186 assert!(result.contains("No commit domains detected"));
2187 }
2188
2189 #[test]
2191 fn test_render_md_churn() {
2192 use std::collections::BTreeMap;
2193 let mut per_module = BTreeMap::new();
2194 per_module.insert(
2195 "src".to_string(),
2196 ChurnTrend {
2197 slope: 0.5,
2198 r2: 0.8,
2199 recent_change: 5,
2200 classification: TrendClass::Rising,
2201 },
2202 );
2203 let mut receipt = minimal_receipt();
2204 receipt.predictive_churn = Some(PredictiveChurnReport { per_module });
2205 let result = render_md(&receipt);
2206 assert!(result.contains("## Predictive churn"));
2207 assert!(result.contains("|src|0.5000|0.80|5|Rising|"));
2208 }
2209
2210 #[test]
2212 fn test_render_md_churn_empty() {
2213 use std::collections::BTreeMap;
2214 let mut receipt = minimal_receipt();
2215 receipt.predictive_churn = Some(PredictiveChurnReport {
2216 per_module: BTreeMap::new(),
2217 });
2218 let result = render_md(&receipt);
2219 assert!(result.contains("## Predictive churn"));
2220 assert!(result.contains("No churn signals detected"));
2221 }
2222
2223 #[test]
2225 fn test_render_md_assets() {
2226 let mut receipt = minimal_receipt();
2227 receipt.assets = Some(AssetReport {
2228 total_files: 5,
2229 total_bytes: 1000000,
2230 categories: vec![AssetCategoryRow {
2231 category: "images".to_string(),
2232 files: 3,
2233 bytes: 500000,
2234 extensions: vec!["png".to_string(), "jpg".to_string()],
2235 }],
2236 top_files: vec![AssetFileRow {
2237 path: "logo.png".to_string(),
2238 bytes: 100000,
2239 category: "images".to_string(),
2240 extension: "png".to_string(),
2241 }],
2242 });
2243 let result = render_md(&receipt);
2244 assert!(result.contains("## Assets"));
2245 assert!(result.contains("- Total files: `5`"));
2246 assert!(result.contains("|images|3|500000|png, jpg|"));
2247 assert!(result.contains("|logo.png|100000|images|"));
2248 }
2249
2250 #[test]
2252 fn test_render_md_assets_empty() {
2253 let mut receipt = minimal_receipt();
2254 receipt.assets = Some(AssetReport {
2255 total_files: 0,
2256 total_bytes: 0,
2257 categories: vec![],
2258 top_files: vec![],
2259 });
2260 let result = render_md(&receipt);
2261 assert!(result.contains("## Assets"));
2262 assert!(result.contains("- Total files: `0`"));
2263 assert!(!result.contains("|Category|")); }
2265
2266 #[test]
2268 fn test_render_md_deps() {
2269 let mut receipt = minimal_receipt();
2270 receipt.deps = Some(DependencyReport {
2271 total: 50,
2272 lockfiles: vec![LockfileReport {
2273 path: "Cargo.lock".to_string(),
2274 kind: "cargo".to_string(),
2275 dependencies: 50,
2276 }],
2277 });
2278 let result = render_md(&receipt);
2279 assert!(result.contains("## Dependencies"));
2280 assert!(result.contains("- Total: `50`"));
2281 assert!(result.contains("|Cargo.lock|cargo|50|"));
2282 }
2283
2284 #[test]
2286 fn test_render_md_deps_empty() {
2287 let mut receipt = minimal_receipt();
2288 receipt.deps = Some(DependencyReport {
2289 total: 0,
2290 lockfiles: vec![],
2291 });
2292 let result = render_md(&receipt);
2293 assert!(result.contains("## Dependencies"));
2294 assert!(!result.contains("|Lockfile|"));
2295 }
2296
2297 #[test]
2299 fn test_render_md_git() {
2300 let mut receipt = minimal_receipt();
2301 receipt.git = Some(GitReport {
2302 commits_scanned: 100,
2303 files_seen: 50,
2304 hotspots: vec![HotspotRow {
2305 path: "src/lib.rs".to_string(),
2306 commits: 25,
2307 lines: 500,
2308 score: 12500,
2309 }],
2310 bus_factor: vec![BusFactorRow {
2311 module: "src".to_string(),
2312 authors: 3,
2313 }],
2314 freshness: FreshnessReport {
2315 threshold_days: 90,
2316 stale_files: 5,
2317 total_files: 50,
2318 stale_pct: 0.1,
2319 by_module: vec![ModuleFreshnessRow {
2320 module: "src".to_string(),
2321 avg_days: 30.0,
2322 p90_days: 60.0,
2323 stale_pct: 0.05,
2324 }],
2325 },
2326 coupling: vec![CouplingRow {
2327 left: "src/a.rs".to_string(),
2328 right: "src/b.rs".to_string(),
2329 count: 10,
2330 jaccard: Some(0.5),
2331 lift: Some(1.2),
2332 n_left: Some(15),
2333 n_right: Some(12),
2334 }],
2335 age_distribution: Some(CodeAgeDistributionReport {
2336 buckets: vec![CodeAgeBucket {
2337 label: "0-30d".to_string(),
2338 min_days: 0,
2339 max_days: Some(30),
2340 files: 10,
2341 pct: 0.2,
2342 }],
2343 recent_refreshes: 12,
2344 prior_refreshes: 8,
2345 refresh_trend: TrendClass::Rising,
2346 }),
2347 intent: None,
2348 });
2349 let result = render_md(&receipt);
2350 assert!(result.contains("## Git metrics"));
2351 assert!(result.contains("- Commits scanned: `100`"));
2352 assert!(result.contains("|src/lib.rs|25|500|12500|"));
2353 assert!(result.contains("|src|3|"));
2354 assert!(result.contains("Stale threshold (days): `90`"));
2355 assert!(result.contains("|src|30.00|60.00|5.0%|"));
2356 assert!(result.contains("### Code age"));
2357 assert!(result.contains("Refresh trend: `Rising`"));
2358 assert!(result.contains("|0-30d|0|30|10|20.0%|"));
2359 assert!(result.contains("|src/a.rs|src/b.rs|10|"));
2360 }
2361
2362 #[test]
2364 fn test_render_md_git_empty() {
2365 let mut receipt = minimal_receipt();
2366 receipt.git = Some(GitReport {
2367 commits_scanned: 0,
2368 files_seen: 0,
2369 hotspots: vec![],
2370 bus_factor: vec![],
2371 freshness: FreshnessReport {
2372 threshold_days: 90,
2373 stale_files: 0,
2374 total_files: 0,
2375 stale_pct: 0.0,
2376 by_module: vec![],
2377 },
2378 coupling: vec![],
2379 age_distribution: None,
2380 intent: None,
2381 });
2382 let result = render_md(&receipt);
2383 assert!(result.contains("## Git metrics"));
2384 assert!(!result.contains("### Hotspots"));
2385 assert!(!result.contains("### Bus factor"));
2386 assert!(!result.contains("### Coupling"));
2387 }
2388
2389 #[test]
2391 fn test_render_md_imports() {
2392 let mut receipt = minimal_receipt();
2393 receipt.imports = Some(ImportReport {
2394 granularity: "file".to_string(),
2395 edges: vec![ImportEdge {
2396 from: "src/main.rs".to_string(),
2397 to: "src/lib.rs".to_string(),
2398 count: 5,
2399 }],
2400 });
2401 let result = render_md(&receipt);
2402 assert!(result.contains("## Imports"));
2403 assert!(result.contains("- Granularity: `file`"));
2404 assert!(result.contains("|src/main.rs|src/lib.rs|5|"));
2405 }
2406
2407 #[test]
2409 fn test_render_md_imports_empty() {
2410 let mut receipt = minimal_receipt();
2411 receipt.imports = Some(ImportReport {
2412 granularity: "module".to_string(),
2413 edges: vec![],
2414 });
2415 let result = render_md(&receipt);
2416 assert!(result.contains("## Imports"));
2417 assert!(!result.contains("|From|To|"));
2418 }
2419
2420 #[test]
2422 fn test_render_md_dup() {
2423 let mut receipt = minimal_receipt();
2424 receipt.dup = Some(DuplicateReport {
2425 wasted_bytes: 50000,
2426 strategy: "content".to_string(),
2427 groups: vec![DuplicateGroup {
2428 hash: "abc123".to_string(),
2429 bytes: 1000,
2430 files: vec!["a.txt".to_string(), "b.txt".to_string()],
2431 }],
2432 density: Some(DuplicationDensityReport {
2433 duplicate_groups: 1,
2434 duplicate_files: 2,
2435 duplicated_bytes: 2000,
2436 wasted_bytes: 1000,
2437 wasted_pct_of_codebase: 0.1,
2438 by_module: vec![ModuleDuplicationDensityRow {
2439 module: "src".to_string(),
2440 duplicate_files: 2,
2441 wasted_files: 1,
2442 duplicated_bytes: 2000,
2443 wasted_bytes: 1000,
2444 module_bytes: 10_000,
2445 density: 0.1,
2446 }],
2447 }),
2448 near: None,
2449 });
2450 let result = render_md(&receipt);
2451 assert!(result.contains("## Duplicates"));
2452 assert!(result.contains("- Wasted bytes: `50000`"));
2453 assert!(result.contains("### Duplication density"));
2454 assert!(result.contains("Waste vs codebase: `10.0%`"));
2455 assert!(result.contains("|src|2|1|2000|1000|10000|10.0%|"));
2456 assert!(result.contains("|abc123|1000|2|")); }
2458
2459 #[test]
2461 fn test_render_md_dup_empty() {
2462 let mut receipt = minimal_receipt();
2463 receipt.dup = Some(DuplicateReport {
2464 wasted_bytes: 0,
2465 strategy: "content".to_string(),
2466 groups: vec![],
2467 density: None,
2468 near: None,
2469 });
2470 let result = render_md(&receipt);
2471 assert!(result.contains("## Duplicates"));
2472 assert!(!result.contains("|Hash|Bytes|"));
2473 }
2474
2475 #[test]
2477 fn test_render_md_fun() {
2478 let mut receipt = minimal_receipt();
2479 receipt.fun = Some(FunReport {
2480 eco_label: Some(EcoLabel {
2481 label: "A+".to_string(),
2482 score: 95.5,
2483 bytes: 10000,
2484 notes: "Very efficient".to_string(),
2485 }),
2486 });
2487 let result = render_md(&receipt);
2488 assert!(result.contains("## Eco label"));
2489 assert!(result.contains("- Label: `A+`"));
2490 assert!(result.contains("- Score: `95.5`"));
2491 }
2492
2493 #[test]
2495 fn test_render_md_fun_no_label() {
2496 let mut receipt = minimal_receipt();
2497 receipt.fun = Some(FunReport { eco_label: None });
2498 let result = render_md(&receipt);
2499 assert!(!result.contains("## Eco label"));
2501 }
2502
2503 #[test]
2505 fn test_render_md_derived() {
2506 let mut receipt = minimal_receipt();
2507 receipt.derived = Some(sample_derived());
2508 let result = render_md(&receipt);
2509 assert!(result.contains("## Totals"));
2510 assert!(result.contains("|10|1000|200|100|1300|50000|2500|"));
2511 assert!(result.contains("## Ratios"));
2512 assert!(result.contains("## Distribution"));
2513 assert!(result.contains("## File size histogram"));
2514 assert!(result.contains("## Top offenders"));
2515 assert!(result.contains("## Structure"));
2516 assert!(result.contains("## Test density"));
2517 assert!(result.contains("## TODOs"));
2518 assert!(result.contains("## Boilerplate ratio"));
2519 assert!(result.contains("## Polyglot"));
2520 assert!(result.contains("## Reading time"));
2521 assert!(result.contains("## Context window"));
2522 assert!(result.contains("## COCOMO estimate"));
2523 assert!(result.contains("## Integrity"));
2524 }
2525
2526 #[test]
2528 fn test_render_dispatch_md() {
2529 let receipt = minimal_receipt();
2530 let result = render(&receipt, AnalysisFormat::Md).unwrap();
2531 match result {
2532 RenderedOutput::Text(s) => assert!(s.starts_with("# tokmd analysis")),
2533 RenderedOutput::Binary(_) => panic!("expected text"),
2534 }
2535 }
2536
2537 #[test]
2538 fn test_render_dispatch_json() {
2539 let receipt = minimal_receipt();
2540 let result = render(&receipt, AnalysisFormat::Json).unwrap();
2541 match result {
2542 RenderedOutput::Text(s) => assert!(s.contains("\"schema_version\": 2")),
2543 RenderedOutput::Binary(_) => panic!("expected text"),
2544 }
2545 }
2546
2547 #[test]
2548 fn test_render_dispatch_xml() {
2549 let receipt = minimal_receipt();
2550 let result = render(&receipt, AnalysisFormat::Xml).unwrap();
2551 match result {
2552 RenderedOutput::Text(s) => assert!(s.contains("<analysis>")),
2553 RenderedOutput::Binary(_) => panic!("expected text"),
2554 }
2555 }
2556
2557 #[test]
2558 fn test_render_dispatch_tree() {
2559 let receipt = minimal_receipt();
2560 let result = render(&receipt, AnalysisFormat::Tree).unwrap();
2561 match result {
2562 RenderedOutput::Text(s) => assert!(s.contains("(tree unavailable)")),
2563 RenderedOutput::Binary(_) => panic!("expected text"),
2564 }
2565 }
2566
2567 #[test]
2568 fn test_render_dispatch_svg() {
2569 let receipt = minimal_receipt();
2570 let result = render(&receipt, AnalysisFormat::Svg).unwrap();
2571 match result {
2572 RenderedOutput::Text(s) => assert!(s.contains("<svg")),
2573 RenderedOutput::Binary(_) => panic!("expected text"),
2574 }
2575 }
2576
2577 #[test]
2578 fn test_render_dispatch_mermaid() {
2579 let receipt = minimal_receipt();
2580 let result = render(&receipt, AnalysisFormat::Mermaid).unwrap();
2581 match result {
2582 RenderedOutput::Text(s) => assert!(s.starts_with("graph TD")),
2583 RenderedOutput::Binary(_) => panic!("expected text"),
2584 }
2585 }
2586
2587 #[test]
2588 fn test_render_dispatch_jsonld() {
2589 let receipt = minimal_receipt();
2590 let result = render(&receipt, AnalysisFormat::Jsonld).unwrap();
2591 match result {
2592 RenderedOutput::Text(s) => assert!(s.contains("@context")),
2593 RenderedOutput::Binary(_) => panic!("expected text"),
2594 }
2595 }
2596
2597 #[test]
2599 fn test_render_html() {
2600 let mut receipt = minimal_receipt();
2601 receipt.derived = Some(sample_derived());
2602 let result = render_html(&receipt);
2603 assert!(result.contains("<!DOCTYPE html>") || result.contains("<html"));
2604 }
2605}