1use anyhow::Result;
19use tokmd_analysis_types::{AnalysisReceipt, FileStatRow};
20use tokmd_types::AnalysisFormat;
21
22pub enum RenderedOutput {
23 Text(String),
24 Binary(Vec<u8>),
25}
26
27pub fn render(receipt: &AnalysisReceipt, format: AnalysisFormat) -> Result<RenderedOutput> {
28 match format {
29 AnalysisFormat::Md => Ok(RenderedOutput::Text(render_md(receipt))),
30 AnalysisFormat::Json => Ok(RenderedOutput::Text(serde_json::to_string_pretty(receipt)?)),
31 AnalysisFormat::Jsonld => Ok(RenderedOutput::Text(render_jsonld(receipt))),
32 AnalysisFormat::Xml => Ok(RenderedOutput::Text(render_xml(receipt))),
33 AnalysisFormat::Svg => Ok(RenderedOutput::Text(render_svg(receipt))),
34 AnalysisFormat::Mermaid => Ok(RenderedOutput::Text(render_mermaid(receipt))),
35 AnalysisFormat::Obj => Ok(RenderedOutput::Text(render_obj(receipt)?)),
36 AnalysisFormat::Midi => Ok(RenderedOutput::Binary(render_midi(receipt)?)),
37 AnalysisFormat::Tree => Ok(RenderedOutput::Text(render_tree(receipt))),
38 AnalysisFormat::Html => Ok(RenderedOutput::Text(render_html(receipt))),
39 }
40}
41
42fn render_md(receipt: &AnalysisReceipt) -> String {
43 let mut out = String::new();
44 out.push_str("# tokmd analysis\n\n");
45 out.push_str(&format!("Preset: `{}`\n\n", receipt.args.preset));
46
47 if !receipt.source.inputs.is_empty() {
48 out.push_str("## Inputs\n\n");
49 for input in &receipt.source.inputs {
50 out.push_str(&format!("- `{}`\n", input));
51 }
52 out.push('\n');
53 }
54
55 if let Some(archetype) = &receipt.archetype {
56 out.push_str("## Archetype\n\n");
57 out.push_str(&format!("- Kind: `{}`\n", archetype.kind));
58 if !archetype.evidence.is_empty() {
59 out.push_str(&format!(
60 "- Evidence: `{}`\n",
61 archetype.evidence.join("`, `")
62 ));
63 }
64 out.push('\n');
65 }
66
67 if let Some(topics) = &receipt.topics {
68 out.push_str("## Topics\n\n");
69 if !topics.overall.is_empty() {
70 out.push_str(&format!(
71 "- Overall: `{}`\n",
72 topics
73 .overall
74 .iter()
75 .map(|t| t.term.as_str())
76 .collect::<Vec<_>>()
77 .join(", ")
78 ));
79 }
80 for (module, terms) in &topics.per_module {
81 if terms.is_empty() {
82 continue;
83 }
84 let line = terms
85 .iter()
86 .map(|t| t.term.as_str())
87 .collect::<Vec<_>>()
88 .join(", ");
89 out.push_str(&format!("- `{}`: {}\n", module, line));
90 }
91 out.push('\n');
92 }
93
94 if let Some(entropy) = &receipt.entropy {
95 out.push_str("## Entropy profiling\n\n");
96 if entropy.suspects.is_empty() {
97 out.push_str("- No entropy outliers detected.\n\n");
98 } else {
99 out.push_str("|Path|Module|Entropy|Sample bytes|Class|\n");
100 out.push_str("|---|---|---:|---:|---|\n");
101 for row in entropy.suspects.iter().take(10) {
102 out.push_str(&format!(
103 "|{}|{}|{}|{}|{:?}|\n",
104 row.path,
105 row.module,
106 fmt_f64(row.entropy_bits_per_byte as f64, 2),
107 row.sample_bytes,
108 row.class
109 ));
110 }
111 out.push('\n');
112 }
113 }
114
115 if let Some(license) = &receipt.license {
116 out.push_str("## License radar\n\n");
117 if let Some(effective) = &license.effective {
118 out.push_str(&format!("- Effective: `{}`\n", effective));
119 }
120 out.push_str("- Heuristic detection; not legal advice.\n\n");
121 if !license.findings.is_empty() {
122 out.push_str("|SPDX|Confidence|Source|Kind|\n");
123 out.push_str("|---|---:|---|---|\n");
124 for row in license.findings.iter().take(10) {
125 out.push_str(&format!(
126 "|{}|{}|{}|{:?}|\n",
127 row.spdx,
128 fmt_f64(row.confidence as f64, 2),
129 row.source_path,
130 row.source_kind
131 ));
132 }
133 out.push('\n');
134 }
135 }
136
137 if let Some(fingerprint) = &receipt.corporate_fingerprint {
138 out.push_str("## Corporate fingerprint\n\n");
139 if fingerprint.domains.is_empty() {
140 out.push_str("- No commit domains detected.\n\n");
141 } else {
142 out.push_str("|Domain|Commits|Pct|\n");
143 out.push_str("|---|---:|---:|\n");
144 for row in fingerprint.domains.iter().take(10) {
145 out.push_str(&format!(
146 "|{}|{}|{}|\n",
147 row.domain,
148 row.commits,
149 fmt_pct(row.pct as f64)
150 ));
151 }
152 out.push('\n');
153 }
154 }
155
156 if let Some(churn) = &receipt.predictive_churn {
157 out.push_str("## Predictive churn\n\n");
158 let mut rows: Vec<_> = churn.per_module.iter().collect();
159 rows.sort_by(|a, b| {
160 b.1.slope
161 .partial_cmp(&a.1.slope)
162 .unwrap_or(std::cmp::Ordering::Equal)
163 .then_with(|| a.0.cmp(b.0))
164 });
165 if rows.is_empty() {
166 out.push_str("- No churn signals detected.\n\n");
167 } else {
168 out.push_str("|Module|Slope|R²|Recent change|Class|\n");
169 out.push_str("|---|---:|---:|---:|---|\n");
170 for (module, trend) in rows.into_iter().take(10) {
171 out.push_str(&format!(
172 "|{}|{}|{}|{}|{:?}|\n",
173 module,
174 fmt_f64(trend.slope, 4),
175 fmt_f64(trend.r2, 2),
176 trend.recent_change,
177 trend.classification
178 ));
179 }
180 out.push('\n');
181 }
182 }
183
184 if let Some(derived) = &receipt.derived {
185 out.push_str("## Totals\n\n");
186 out.push_str("|Files|Code|Comments|Blanks|Lines|Bytes|Tokens|\n");
187 out.push_str("|---:|---:|---:|---:|---:|---:|---:|\n");
188 out.push_str(&format!(
189 "|{}|{}|{}|{}|{}|{}|{}|\n\n",
190 derived.totals.files,
191 derived.totals.code,
192 derived.totals.comments,
193 derived.totals.blanks,
194 derived.totals.lines,
195 derived.totals.bytes,
196 derived.totals.tokens
197 ));
198
199 out.push_str("## Ratios\n\n");
200 out.push_str("|Metric|Value|\n");
201 out.push_str("|---|---:|\n");
202 out.push_str(&format!(
203 "|Doc density|{}|\n",
204 fmt_pct(derived.doc_density.total.ratio)
205 ));
206 out.push_str(&format!(
207 "|Whitespace ratio|{}|\n",
208 fmt_pct(derived.whitespace.total.ratio)
209 ));
210 out.push_str(&format!(
211 "|Bytes per line|{}|\n\n",
212 fmt_f64(derived.verbosity.total.rate, 2)
213 ));
214
215 out.push_str("### Doc density by language\n\n");
216 out.push_str("|Lang|Doc%|Comments|Code|\n");
217 out.push_str("|---|---:|---:|---:|\n");
218 for row in derived.doc_density.by_lang.iter().take(10) {
219 out.push_str(&format!(
220 "|{}|{}|{}|{}|\n",
221 row.key,
222 fmt_pct(row.ratio),
223 row.numerator,
224 row.denominator.saturating_sub(row.numerator)
225 ));
226 }
227 out.push('\n');
228
229 out.push_str("### Whitespace ratio by language\n\n");
230 out.push_str("|Lang|Blank%|Blanks|Code+Comments|\n");
231 out.push_str("|---|---:|---:|---:|\n");
232 for row in derived.whitespace.by_lang.iter().take(10) {
233 out.push_str(&format!(
234 "|{}|{}|{}|{}|\n",
235 row.key,
236 fmt_pct(row.ratio),
237 row.numerator,
238 row.denominator
239 ));
240 }
241 out.push('\n');
242
243 out.push_str("### Verbosity by language\n\n");
244 out.push_str("|Lang|Bytes/Line|Bytes|Lines|\n");
245 out.push_str("|---|---:|---:|---:|\n");
246 for row in derived.verbosity.by_lang.iter().take(10) {
247 out.push_str(&format!(
248 "|{}|{}|{}|{}|\n",
249 row.key,
250 fmt_f64(row.rate, 2),
251 row.numerator,
252 row.denominator
253 ));
254 }
255 out.push('\n');
256
257 out.push_str("## Distribution\n\n");
258 out.push_str("|Count|Min|Max|Mean|Median|P90|P99|Gini|\n");
259 out.push_str("|---:|---:|---:|---:|---:|---:|---:|---:|\n");
260 out.push_str(&format!(
261 "|{}|{}|{}|{}|{}|{}|{}|{}|\n\n",
262 derived.distribution.count,
263 derived.distribution.min,
264 derived.distribution.max,
265 fmt_f64(derived.distribution.mean, 2),
266 fmt_f64(derived.distribution.median, 2),
267 fmt_f64(derived.distribution.p90, 2),
268 fmt_f64(derived.distribution.p99, 2),
269 fmt_f64(derived.distribution.gini, 4)
270 ));
271
272 out.push_str("## File size histogram\n\n");
273 out.push_str("|Bucket|Min|Max|Files|Pct|\n");
274 out.push_str("|---|---:|---:|---:|---:|\n");
275 for bucket in &derived.histogram {
276 let max = bucket
277 .max
278 .map(|v| v.to_string())
279 .unwrap_or_else(|| "∞".to_string());
280 out.push_str(&format!(
281 "|{}|{}|{}|{}|{}|\n",
282 bucket.label,
283 bucket.min,
284 max,
285 bucket.files,
286 fmt_pct(bucket.pct)
287 ));
288 }
289 out.push('\n');
290
291 out.push_str("## Top offenders\n\n");
292 out.push_str("### Largest files by lines\n\n");
293 out.push_str(&render_file_table(&derived.top.largest_lines));
294 out.push('\n');
295
296 out.push_str("### Largest files by tokens\n\n");
297 out.push_str(&render_file_table(&derived.top.largest_tokens));
298 out.push('\n');
299
300 out.push_str("### Largest files by bytes\n\n");
301 out.push_str(&render_file_table(&derived.top.largest_bytes));
302 out.push('\n');
303
304 out.push_str("### Least documented (min LOC)\n\n");
305 out.push_str(&render_file_table(&derived.top.least_documented));
306 out.push('\n');
307
308 out.push_str("### Most dense (bytes/line)\n\n");
309 out.push_str(&render_file_table(&derived.top.most_dense));
310 out.push('\n');
311
312 out.push_str("## Structure\n\n");
313 out.push_str(&format!(
314 "- Max depth: `{}`\n- Avg depth: `{}`\n\n",
315 derived.nesting.max,
316 fmt_f64(derived.nesting.avg, 2)
317 ));
318
319 out.push_str("## Test density\n\n");
320 out.push_str(&format!(
321 "- Test lines: `{}`\n- Prod lines: `{}`\n- Test ratio: `{}`\n\n",
322 derived.test_density.test_lines,
323 derived.test_density.prod_lines,
324 fmt_pct(derived.test_density.ratio)
325 ));
326
327 if let Some(todo) = &derived.todo {
328 out.push_str("## TODOs\n\n");
329 out.push_str(&format!(
330 "- Total: `{}`\n- Density (per KLOC): `{}`\n\n",
331 todo.total,
332 fmt_f64(todo.density_per_kloc, 2)
333 ));
334 out.push_str("|Tag|Count|\n");
335 out.push_str("|---|---:|\n");
336 for tag in &todo.tags {
337 out.push_str(&format!("|{}|{}|\n", tag.tag, tag.count));
338 }
339 out.push('\n');
340 }
341
342 out.push_str("## Boilerplate ratio\n\n");
343 out.push_str(&format!(
344 "- Infra lines: `{}`\n- Logic lines: `{}`\n- Infra ratio: `{}`\n\n",
345 derived.boilerplate.infra_lines,
346 derived.boilerplate.logic_lines,
347 fmt_pct(derived.boilerplate.ratio)
348 ));
349
350 out.push_str("## Polyglot\n\n");
351 out.push_str(&format!(
352 "- Languages: `{}`\n- Dominant: `{}` ({})\n- Entropy: `{}`\n\n",
353 derived.polyglot.lang_count,
354 derived.polyglot.dominant_lang,
355 fmt_pct(derived.polyglot.dominant_pct),
356 fmt_f64(derived.polyglot.entropy, 4)
357 ));
358
359 out.push_str("## Reading time\n\n");
360 out.push_str(&format!(
361 "- Minutes: `{}` ({} lines/min)\n\n",
362 fmt_f64(derived.reading_time.minutes, 2),
363 derived.reading_time.lines_per_minute
364 ));
365
366 if let Some(context) = &derived.context_window {
367 out.push_str("## Context window\n\n");
368 out.push_str(&format!(
369 "- Window tokens: `{}`\n- Total tokens: `{}`\n- Utilization: `{}`\n- Fits: `{}`\n\n",
370 context.window_tokens,
371 context.total_tokens,
372 fmt_pct(context.pct),
373 context.fits
374 ));
375 }
376
377 if let Some(cocomo) = &derived.cocomo {
378 out.push_str("## COCOMO estimate\n\n");
379 out.push_str(&format!(
380 "- Mode: `{}`\n- KLOC: `{}`\n- Effort (PM): `{}`\n- Duration (months): `{}`\n- Staff: `{}`\n\n",
381 cocomo.mode,
382 fmt_f64(cocomo.kloc, 4),
383 fmt_f64(cocomo.effort_pm, 2),
384 fmt_f64(cocomo.duration_months, 2),
385 fmt_f64(cocomo.staff, 2)
386 ));
387 }
388
389 out.push_str("## Integrity\n\n");
390 out.push_str(&format!(
391 "- Hash: `{}` (`{}`)\n- Entries: `{}`\n\n",
392 derived.integrity.hash, derived.integrity.algo, derived.integrity.entries
393 ));
394 }
395
396 if let Some(assets) = &receipt.assets {
397 out.push_str("## Assets\n\n");
398 out.push_str(&format!(
399 "- Total files: `{}`\n- Total bytes: `{}`\n\n",
400 assets.total_files, assets.total_bytes
401 ));
402 if !assets.categories.is_empty() {
403 out.push_str("|Category|Files|Bytes|Extensions|\n");
404 out.push_str("|---|---:|---:|---|\n");
405 for row in &assets.categories {
406 out.push_str(&format!(
407 "|{}|{}|{}|{}|\n",
408 row.category,
409 row.files,
410 row.bytes,
411 row.extensions.join(", ")
412 ));
413 }
414 out.push('\n');
415 }
416 if !assets.top_files.is_empty() {
417 out.push_str("|File|Bytes|Category|\n");
418 out.push_str("|---|---:|---|\n");
419 for row in &assets.top_files {
420 out.push_str(&format!("|{}|{}|{}|\n", row.path, row.bytes, row.category));
421 }
422 out.push('\n');
423 }
424 }
425
426 if let Some(deps) = &receipt.deps {
427 out.push_str("## Dependencies\n\n");
428 out.push_str(&format!("- Total: `{}`\n\n", deps.total));
429 if !deps.lockfiles.is_empty() {
430 out.push_str("|Lockfile|Kind|Dependencies|\n");
431 out.push_str("|---|---|---:|\n");
432 for row in &deps.lockfiles {
433 out.push_str(&format!(
434 "|{}|{}|{}|\n",
435 row.path, row.kind, row.dependencies
436 ));
437 }
438 out.push('\n');
439 }
440 }
441
442 if let Some(git) = &receipt.git {
443 out.push_str("## Git metrics\n\n");
444 out.push_str(&format!(
445 "- Commits scanned: `{}`\n- Files seen: `{}`\n\n",
446 git.commits_scanned, git.files_seen
447 ));
448 if !git.hotspots.is_empty() {
449 out.push_str("### Hotspots\n\n");
450 out.push_str("|File|Commits|Lines|Score|\n");
451 out.push_str("|---|---:|---:|---:|\n");
452 for row in git.hotspots.iter().take(10) {
453 out.push_str(&format!(
454 "|{}|{}|{}|{}|\n",
455 row.path, row.commits, row.lines, row.score
456 ));
457 }
458 out.push('\n');
459 }
460 if !git.bus_factor.is_empty() {
461 out.push_str("### Bus factor\n\n");
462 out.push_str("|Module|Authors|\n");
463 out.push_str("|---|---:|\n");
464 for row in git.bus_factor.iter().take(10) {
465 out.push_str(&format!("|{}|{}|\n", row.module, row.authors));
466 }
467 out.push('\n');
468 }
469 out.push_str("### Freshness\n\n");
470 out.push_str(&format!(
471 "- Stale threshold (days): `{}`\n- Stale files: `{}` / `{}` ({})\n\n",
472 git.freshness.threshold_days,
473 git.freshness.stale_files,
474 git.freshness.total_files,
475 fmt_pct(git.freshness.stale_pct)
476 ));
477 if !git.freshness.by_module.is_empty() {
478 out.push_str("|Module|Avg days|P90 days|Stale%|\n");
479 out.push_str("|---|---:|---:|---:|\n");
480 for row in git.freshness.by_module.iter().take(10) {
481 out.push_str(&format!(
482 "|{}|{}|{}|{}|\n",
483 row.module,
484 fmt_f64(row.avg_days, 2),
485 fmt_f64(row.p90_days, 2),
486 fmt_pct(row.stale_pct)
487 ));
488 }
489 out.push('\n');
490 }
491 if let Some(age) = &git.age_distribution {
492 out.push_str("### Code age\n\n");
493 out.push_str(&format!(
494 "- Refresh trend: `{:?}` (recent: `{}`, prior: `{}`)\n\n",
495 age.refresh_trend, age.recent_refreshes, age.prior_refreshes
496 ));
497 if !age.buckets.is_empty() {
498 out.push_str("|Bucket|Min days|Max days|Files|Pct|\n");
499 out.push_str("|---|---:|---:|---:|---:|\n");
500 for bucket in &age.buckets {
501 let max = bucket
502 .max_days
503 .map(|v| v.to_string())
504 .unwrap_or_else(|| "∞".to_string());
505 out.push_str(&format!(
506 "|{}|{}|{}|{}|{}|\n",
507 bucket.label,
508 bucket.min_days,
509 max,
510 bucket.files,
511 fmt_pct(bucket.pct)
512 ));
513 }
514 out.push('\n');
515 }
516 }
517 if !git.coupling.is_empty() {
518 let filtered: Vec<_> = git.coupling.iter().filter(|r| r.count >= 2).collect();
521 if !filtered.is_empty() {
522 out.push_str("### Coupling\n\n");
523 out.push_str("|Left|Right|Count|Jaccard|Lift|\n");
524 out.push_str("|---|---|---:|---:|---:|\n");
525 for row in filtered.iter().take(10) {
526 let jaccard = row
527 .jaccard
528 .map(|v| fmt_f64(v, 4))
529 .unwrap_or_else(|| "-".to_string());
530 let lift = row
531 .lift
532 .map(|v| fmt_f64(v, 4))
533 .unwrap_or_else(|| "-".to_string());
534 out.push_str(&format!(
535 "|{}|{}|{}|{}|{}|\n",
536 row.left, row.right, row.count, jaccard, lift
537 ));
538 }
539 out.push('\n');
540 }
541 }
542
543 if let Some(intent) = &git.intent {
544 out.push_str("### Commit intent\n\n");
545 out.push_str("|Type|Count|\n");
546 out.push_str("|---|---:|\n");
547 let o = &intent.overall;
548 let entries = [
549 ("feat", o.feat),
550 ("fix", o.fix),
551 ("refactor", o.refactor),
552 ("docs", o.docs),
553 ("test", o.test),
554 ("chore", o.chore),
555 ("ci", o.ci),
556 ("build", o.build),
557 ("perf", o.perf),
558 ("style", o.style),
559 ("revert", o.revert),
560 ("other", o.other),
561 ];
562 for (name, count) in entries {
563 if count > 0 {
564 out.push_str(&format!("|{}|{}|\n", name, count));
565 }
566 }
567 out.push_str(&format!("|**total**|{}|\n", o.total));
568 out.push_str(&format!("\n- Unknown: `{}`\n", fmt_pct(intent.unknown_pct)));
569 if let Some(cr) = intent.corrective_ratio {
570 out.push_str(&format!(
571 "- Corrective ratio (fix+revert/total): `{}`\n",
572 fmt_pct(cr)
573 ));
574 }
575 out.push('\n');
576
577 let mut maintenance: Vec<_> = intent
579 .by_module
580 .iter()
581 .filter(|m| m.counts.total > 0)
582 .map(|m| {
583 let fix_revert = m.counts.fix + m.counts.revert;
584 let share = fix_revert as f64 / m.counts.total as f64;
585 (m, share)
586 })
587 .filter(|(_, share)| *share > 0.0)
588 .collect();
589 maintenance.sort_by(|a, b| {
590 b.1.partial_cmp(&a.1)
591 .unwrap_or(std::cmp::Ordering::Equal)
592 .then_with(|| a.0.module.cmp(&b.0.module))
593 });
594
595 if !maintenance.is_empty() {
596 out.push_str("#### Maintenance hotspots\n\n");
597 out.push_str("|Module|Fix+Revert|Total|Share|\n");
598 out.push_str("|---|---:|---:|---:|\n");
599 for (m, share) in maintenance.iter().take(10) {
600 out.push_str(&format!(
601 "|{}|{}|{}|{}|\n",
602 m.module,
603 m.counts.fix + m.counts.revert,
604 m.counts.total,
605 fmt_pct(*share)
606 ));
607 }
608 out.push('\n');
609 }
610 }
611 }
612
613 if let Some(imports) = &receipt.imports {
614 out.push_str("## Imports\n\n");
615 out.push_str(&format!("- Granularity: `{}`\n\n", imports.granularity));
616 if !imports.edges.is_empty() {
617 out.push_str("|From|To|Count|\n");
618 out.push_str("|---|---|---:|\n");
619 for row in imports.edges.iter().take(20) {
620 out.push_str(&format!("|{}|{}|{}|\n", row.from, row.to, row.count));
621 }
622 out.push('\n');
623 }
624 }
625
626 if let Some(dup) = &receipt.dup {
627 out.push_str("## Duplicates\n\n");
628 out.push_str(&format!(
629 "- Wasted bytes: `{}`\n- Strategy: `{}`\n\n",
630 dup.wasted_bytes, dup.strategy
631 ));
632 if let Some(density) = &dup.density {
633 out.push_str("### Duplication density\n\n");
634 out.push_str(&format!(
635 "- Duplicate groups: `{}`\n- Duplicate files: `{}`\n- Duplicated bytes: `{}`\n- Waste vs codebase: `{}`\n\n",
636 density.duplicate_groups,
637 density.duplicate_files,
638 density.duplicated_bytes,
639 fmt_pct(density.wasted_pct_of_codebase)
640 ));
641 if !density.by_module.is_empty() {
642 out.push_str(
643 "|Module|Dup files|Wasted files|Dup bytes|Wasted bytes|Module bytes|Density|\n",
644 );
645 out.push_str("|---|---:|---:|---:|---:|---:|---:|\n");
646 for row in density.by_module.iter().take(10) {
647 out.push_str(&format!(
648 "|{}|{}|{}|{}|{}|{}|{}|\n",
649 row.module,
650 row.duplicate_files,
651 row.wasted_files,
652 row.duplicated_bytes,
653 row.wasted_bytes,
654 row.module_bytes,
655 fmt_pct(row.density)
656 ));
657 }
658 out.push('\n');
659 }
660 }
661 if !dup.groups.is_empty() {
662 out.push_str("|Hash|Bytes|Files|\n");
663 out.push_str("|---|---:|---:|\n");
664 for row in dup.groups.iter().take(10) {
665 out.push_str(&format!(
666 "|{}|{}|{}|\n",
667 row.hash,
668 row.bytes,
669 row.files.len()
670 ));
671 }
672 out.push('\n');
673 }
674
675 if let Some(near) = &dup.near {
676 out.push_str("### Near duplicates\n\n");
677 out.push_str(&format!(
678 "- Files analyzed: `{}`\n- Files skipped: `{}`\n- Threshold: `{}`\n- Scope: `{:?}`\n",
679 near.files_analyzed,
680 near.files_skipped,
681 fmt_f64(near.params.threshold, 2),
682 near.params.scope
683 ));
684 if let Some(eligible) = near.eligible_files {
685 out.push_str(&format!("- Eligible files: `{}`\n", eligible));
686 }
687 if near.truncated {
688 out.push_str("- **Warning**: Pair list truncated by `max_pairs` limit.\n");
689 }
690 out.push('\n');
691
692 if let Some(clusters) = &near.clusters
694 && !clusters.is_empty()
695 {
696 out.push_str("#### Clusters\n\n");
697 out.push_str("|#|Files|Max Similarity|Representative|Pairs|\n");
698 out.push_str("|---:|---:|---:|---|---:|\n");
699 for (i, cluster) in clusters.iter().enumerate() {
700 out.push_str(&format!(
701 "|{}|{}|{}|{}|{}|\n",
702 i + 1,
703 cluster.files.len(),
704 fmt_pct(cluster.max_similarity),
705 cluster.representative,
706 cluster.pair_count
707 ));
708 }
709 out.push('\n');
710 }
711
712 if near.pairs.is_empty() {
714 out.push_str("- No near-duplicate pairs detected.\n\n");
715 } else {
716 out.push_str("#### Pairs\n\n");
717 out.push_str("|Left|Right|Similarity|Shared FPs|\n");
718 out.push_str("|---|---|---:|---:|\n");
719 for pair in near.pairs.iter().take(20) {
720 out.push_str(&format!(
721 "|{}|{}|{}|{}|\n",
722 pair.left,
723 pair.right,
724 fmt_pct(pair.similarity),
725 pair.shared_fingerprints
726 ));
727 }
728 out.push('\n');
729 }
730
731 if let Some(stats) = &near.stats {
733 out.push_str(&format!(
734 "> Near-dup stats: fingerprinting {}ms, pairing {}ms, {} bytes processed\n\n",
735 stats.fingerprinting_ms, stats.pairing_ms, stats.bytes_processed
736 ));
737 }
738 }
739 }
740
741 if let Some(cx) = &receipt.complexity {
742 out.push_str("## Complexity\n\n");
743 out.push_str("|Metric|Value|\n");
744 out.push_str("|---|---:|\n");
745 out.push_str(&format!("|Total functions|{}|\n", cx.total_functions));
746 out.push_str(&format!(
747 "|Avg function length|{}|\n",
748 fmt_f64(cx.avg_function_length, 1)
749 ));
750 out.push_str(&format!(
751 "|Max function length|{}|\n",
752 cx.max_function_length
753 ));
754 out.push_str(&format!(
755 "|Avg cyclomatic|{}|\n",
756 fmt_f64(cx.avg_cyclomatic, 2)
757 ));
758 out.push_str(&format!("|Max cyclomatic|{}|\n", cx.max_cyclomatic));
759 if let Some(cog) = cx.avg_cognitive {
760 out.push_str(&format!("|Avg cognitive|{}|\n", fmt_f64(cog, 2)));
761 }
762 if let Some(cog) = cx.max_cognitive {
763 out.push_str(&format!("|Max cognitive|{}|\n", cog));
764 }
765 if let Some(avg_nesting) = cx.avg_nesting_depth {
766 out.push_str(&format!(
767 "|Avg nesting depth|{}|\n",
768 fmt_f64(avg_nesting, 2)
769 ));
770 }
771 if let Some(max_nesting) = cx.max_nesting_depth {
772 out.push_str(&format!("|Max nesting depth|{}|\n", max_nesting));
773 }
774 out.push_str(&format!("|High risk files|{}|\n\n", cx.high_risk_files));
775
776 if !cx.files.is_empty() {
777 out.push_str("### Top complex files\n\n");
778 out.push_str("|Path|CC|Functions|Max fn length|\n");
779 out.push_str("|---|---:|---:|---:|\n");
780 for f in cx.files.iter().take(10) {
781 out.push_str(&format!(
782 "|{}|{}|{}|{}|\n",
783 f.path, f.cyclomatic_complexity, f.function_count, f.max_function_length
784 ));
785 }
786 out.push('\n');
787 }
788 }
789
790 if let Some(api) = &receipt.api_surface {
791 out.push_str("## API surface\n\n");
792 out.push_str("|Metric|Value|\n");
793 out.push_str("|---|---:|\n");
794 out.push_str(&format!("|Total items|{}|\n", api.total_items));
795 out.push_str(&format!("|Public items|{}|\n", api.public_items));
796 out.push_str(&format!("|Internal items|{}|\n", api.internal_items));
797 out.push_str(&format!("|Public ratio|{}|\n", fmt_pct(api.public_ratio)));
798 out.push_str(&format!(
799 "|Documented ratio|{}|\n\n",
800 fmt_pct(api.documented_ratio)
801 ));
802
803 if !api.by_language.is_empty() {
804 out.push_str("### By language\n\n");
805 out.push_str("|Language|Total|Public|Internal|Public%|\n");
806 out.push_str("|---|---:|---:|---:|---:|\n");
807 for (lang, data) in &api.by_language {
808 out.push_str(&format!(
809 "|{}|{}|{}|{}|{}|\n",
810 lang,
811 data.total_items,
812 data.public_items,
813 data.internal_items,
814 fmt_pct(data.public_ratio)
815 ));
816 }
817 out.push('\n');
818 }
819
820 if !api.by_module.is_empty() {
821 out.push_str("### By module\n\n");
822 out.push_str("|Module|Total|Public|Public%|\n");
823 out.push_str("|---|---:|---:|---:|\n");
824 for row in api.by_module.iter().take(20) {
825 out.push_str(&format!(
826 "|{}|{}|{}|{}|\n",
827 row.module,
828 row.total_items,
829 row.public_items,
830 fmt_pct(row.public_ratio)
831 ));
832 }
833 out.push('\n');
834 }
835
836 if !api.top_exporters.is_empty() {
837 out.push_str("### Top exporters\n\n");
838 out.push_str("|Path|Language|Public|Total|\n");
839 out.push_str("|---|---|---:|---:|\n");
840 for item in api.top_exporters.iter().take(10) {
841 out.push_str(&format!(
842 "|{}|{}|{}|{}|\n",
843 item.path, item.lang, item.public_items, item.total_items
844 ));
845 }
846 out.push('\n');
847 }
848 }
849
850 if let Some(fun) = &receipt.fun
851 && let Some(label) = &fun.eco_label
852 {
853 out.push_str("## Eco label\n\n");
854 out.push_str(&format!(
855 "- Label: `{}`\n- Score: `{}`\n- Bytes: `{}`\n- Notes: `{}`\n\n",
856 label.label,
857 fmt_f64(label.score, 1),
858 label.bytes,
859 label.notes
860 ));
861 }
862
863 out
864}
865
866fn render_file_table(rows: &[FileStatRow]) -> String {
867 let mut out = String::new();
868 out.push_str("|Path|Lang|Lines|Code|Bytes|Tokens|Doc%|B/Line|\n");
869 out.push_str("|---|---|---:|---:|---:|---:|---:|---:|\n");
870 for row in rows {
871 out.push_str(&format!(
872 "|{}|{}|{}|{}|{}|{}|{}|{}|\n",
873 row.path,
874 row.lang,
875 row.lines,
876 row.code,
877 row.bytes,
878 row.tokens,
879 row.doc_pct.map(fmt_pct).unwrap_or_else(|| "-".to_string()),
880 row.bytes_per_line
881 .map(|v| fmt_f64(v, 2))
882 .unwrap_or_else(|| "-".to_string())
883 ));
884 }
885 out
886}
887
888fn fmt_pct(ratio: f64) -> String {
889 format!("{:.1}%", ratio * 100.0)
890}
891
892fn fmt_f64(value: f64, decimals: usize) -> String {
893 format!("{value:.decimals$}")
894}
895
896fn render_jsonld(receipt: &AnalysisReceipt) -> String {
897 let name = receipt
898 .source
899 .inputs
900 .first()
901 .cloned()
902 .unwrap_or_else(|| "tokmd".to_string());
903 let totals = receipt.derived.as_ref().map(|d| &d.totals);
904 let payload = serde_json::json!({
905 "@context": "https://schema.org",
906 "@type": "SoftwareSourceCode",
907 "name": name,
908 "codeLines": totals.map(|t| t.code).unwrap_or(0),
909 "commentCount": totals.map(|t| t.comments).unwrap_or(0),
910 "lineCount": totals.map(|t| t.lines).unwrap_or(0),
911 "fileSize": totals.map(|t| t.bytes).unwrap_or(0),
912 "interactionStatistic": {
913 "@type": "InteractionCounter",
914 "interactionType": "http://schema.org/ReadAction",
915 "userInteractionCount": totals.map(|t| t.tokens).unwrap_or(0)
916 }
917 });
918 serde_json::to_string_pretty(&payload).unwrap_or_else(|_| "{}".to_string())
919}
920
921fn render_xml(receipt: &AnalysisReceipt) -> String {
922 let totals = receipt.derived.as_ref().map(|d| &d.totals);
923 let mut out = String::new();
924 out.push_str("<analysis>");
925 if let Some(totals) = totals {
926 out.push_str(&format!(
927 "<totals files=\"{}\" code=\"{}\" comments=\"{}\" blanks=\"{}\" lines=\"{}\" bytes=\"{}\" tokens=\"{}\"/>",
928 totals.files,
929 totals.code,
930 totals.comments,
931 totals.blanks,
932 totals.lines,
933 totals.bytes,
934 totals.tokens
935 ));
936 }
937 out.push_str("</analysis>");
938 out
939}
940
941fn render_svg(receipt: &AnalysisReceipt) -> String {
942 let (label, value) = if let Some(derived) = &receipt.derived {
943 if let Some(ctx) = &derived.context_window {
944 ("context".to_string(), format!("{:.1}%", ctx.pct * 100.0))
945 } else {
946 ("tokens".to_string(), derived.totals.tokens.to_string())
947 }
948 } else {
949 ("tokens".to_string(), "0".to_string())
950 };
951
952 let width = 240;
953 let height = 32;
954 let label_width = 80;
955 let value_width = width - label_width;
956 format!(
957 "<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"{width}\" height=\"{height}\" role=\"img\"><rect width=\"{label_width}\" height=\"{height}\" fill=\"#555\"/><rect x=\"{label_width}\" width=\"{value_width}\" height=\"{height}\" fill=\"#4c9aff\"/><text x=\"{lx}\" y=\"{ty}\" fill=\"#fff\" font-family=\"Verdana\" font-size=\"12\" text-anchor=\"middle\">{label}</text><text x=\"{vx}\" y=\"{ty}\" fill=\"#fff\" font-family=\"Verdana\" font-size=\"12\" text-anchor=\"middle\">{value}</text></svg>",
958 width = width,
959 height = height,
960 label_width = label_width,
961 value_width = value_width,
962 lx = label_width / 2,
963 vx = label_width + value_width / 2,
964 ty = 20,
965 label = label,
966 value = value
967 )
968}
969
970fn render_mermaid(receipt: &AnalysisReceipt) -> String {
971 let mut out = String::from("graph TD\n");
972 if let Some(imports) = &receipt.imports {
973 for edge in imports.edges.iter().take(200) {
974 let from = sanitize_mermaid(&edge.from);
975 let to = sanitize_mermaid(&edge.to);
976 out.push_str(&format!(" {} -->|{}| {}\n", from, edge.count, to));
977 }
978 }
979 out
980}
981
982fn render_tree(receipt: &AnalysisReceipt) -> String {
983 receipt
984 .derived
985 .as_ref()
986 .and_then(|d| d.tree.clone())
987 .unwrap_or_else(|| "(tree unavailable)".to_string())
988}
989
990#[cfg(feature = "fun")]
992fn render_obj_fun(receipt: &AnalysisReceipt) -> Result<String> {
993 if let Some(derived) = &receipt.derived {
994 let buildings: Vec<tokmd_fun::ObjBuilding> = derived
995 .top
996 .largest_lines
997 .iter()
998 .enumerate()
999 .map(|(idx, row)| {
1000 let x = (idx % 5) as f32 * 2.0;
1001 let y = (idx / 5) as f32 * 2.0;
1002 let h = (row.lines as f32 / 10.0).max(0.5);
1003 tokmd_fun::ObjBuilding {
1004 name: row.path.clone(),
1005 x,
1006 y,
1007 w: 1.5,
1008 d: 1.5,
1009 h,
1010 }
1011 })
1012 .collect();
1013 return Ok(tokmd_fun::render_obj(&buildings));
1014 }
1015 Ok("# tokmd code city\n".to_string())
1016}
1017
1018#[cfg(feature = "fun")]
1019fn render_midi_fun(receipt: &AnalysisReceipt) -> Result<Vec<u8>> {
1020 let mut notes = Vec::new();
1021 if let Some(derived) = &receipt.derived {
1022 for (idx, row) in derived.top.largest_lines.iter().enumerate() {
1023 let key = 60u8 + (row.depth as u8 % 12);
1024 let velocity = (40 + (row.lines.min(127) as u8 / 2)).min(120);
1025 let start = (idx as u32) * 240;
1026 notes.push(tokmd_fun::MidiNote {
1027 key,
1028 velocity,
1029 start,
1030 duration: 180,
1031 channel: 0,
1032 });
1033 }
1034 }
1035 tokmd_fun::render_midi(¬es, 120)
1036}
1037
1038#[cfg(not(feature = "fun"))]
1040fn render_obj_disabled(_receipt: &AnalysisReceipt) -> Result<String> {
1041 anyhow::bail!(
1042 "OBJ format requires the `fun` feature: tokmd-analysis-format = {{ version = \"1.3\", features = [\"fun\"] }}"
1043 )
1044}
1045
1046#[cfg(not(feature = "fun"))]
1047fn render_midi_disabled(_receipt: &AnalysisReceipt) -> Result<Vec<u8>> {
1048 anyhow::bail!(
1049 "MIDI format requires the `fun` feature: tokmd-analysis-format = {{ version = \"1.3\", features = [\"fun\"] }}"
1050 )
1051}
1052
1053fn render_obj(receipt: &AnalysisReceipt) -> Result<String> {
1055 #[cfg(feature = "fun")]
1056 {
1057 render_obj_fun(receipt)
1058 }
1059 #[cfg(not(feature = "fun"))]
1060 {
1061 render_obj_disabled(receipt)
1062 }
1063}
1064
1065fn render_midi(receipt: &AnalysisReceipt) -> Result<Vec<u8>> {
1066 #[cfg(feature = "fun")]
1067 {
1068 render_midi_fun(receipt)
1069 }
1070 #[cfg(not(feature = "fun"))]
1071 {
1072 render_midi_disabled(receipt)
1073 }
1074}
1075
1076fn sanitize_mermaid(name: &str) -> String {
1077 name.chars()
1078 .map(|c| if c.is_ascii_alphanumeric() { c } else { '_' })
1079 .collect()
1080}
1081
1082fn render_html(receipt: &AnalysisReceipt) -> String {
1083 tokmd_analysis_html::render(receipt)
1084}
1085
1086#[cfg(test)]
1087mod tests {
1088 use super::*;
1089 use tokmd_analysis_types::*;
1090
1091 fn minimal_receipt() -> AnalysisReceipt {
1092 AnalysisReceipt {
1093 schema_version: 2,
1094 generated_at_ms: 0,
1095 tool: tokmd_types::ToolInfo {
1096 name: "tokmd".to_string(),
1097 version: "0.0.0".to_string(),
1098 },
1099 mode: "analysis".to_string(),
1100 status: tokmd_types::ScanStatus::Complete,
1101 warnings: vec![],
1102 source: AnalysisSource {
1103 inputs: vec!["test".to_string()],
1104 export_path: None,
1105 base_receipt_path: None,
1106 export_schema_version: None,
1107 export_generated_at_ms: None,
1108 base_signature: None,
1109 module_roots: vec![],
1110 module_depth: 1,
1111 children: "collapse".to_string(),
1112 },
1113 args: AnalysisArgsMeta {
1114 preset: "receipt".to_string(),
1115 format: "md".to_string(),
1116 window_tokens: None,
1117 git: None,
1118 max_files: None,
1119 max_bytes: None,
1120 max_commits: None,
1121 max_commit_files: None,
1122 max_file_bytes: None,
1123 import_granularity: "module".to_string(),
1124 },
1125 archetype: None,
1126 topics: None,
1127 entropy: None,
1128 predictive_churn: None,
1129 corporate_fingerprint: None,
1130 license: None,
1131 derived: None,
1132 assets: None,
1133 deps: None,
1134 git: None,
1135 imports: None,
1136 dup: None,
1137 complexity: None,
1138 api_surface: None,
1139 fun: None,
1140 }
1141 }
1142
1143 fn sample_derived() -> DerivedReport {
1144 DerivedReport {
1145 totals: DerivedTotals {
1146 files: 10,
1147 code: 1000,
1148 comments: 200,
1149 blanks: 100,
1150 lines: 1300,
1151 bytes: 50000,
1152 tokens: 2500,
1153 },
1154 doc_density: RatioReport {
1155 total: RatioRow {
1156 key: "total".to_string(),
1157 numerator: 200,
1158 denominator: 1200,
1159 ratio: 0.1667,
1160 },
1161 by_lang: vec![],
1162 by_module: vec![],
1163 },
1164 whitespace: RatioReport {
1165 total: RatioRow {
1166 key: "total".to_string(),
1167 numerator: 100,
1168 denominator: 1300,
1169 ratio: 0.0769,
1170 },
1171 by_lang: vec![],
1172 by_module: vec![],
1173 },
1174 verbosity: RateReport {
1175 total: RateRow {
1176 key: "total".to_string(),
1177 numerator: 50000,
1178 denominator: 1300,
1179 rate: 38.46,
1180 },
1181 by_lang: vec![],
1182 by_module: vec![],
1183 },
1184 max_file: MaxFileReport {
1185 overall: FileStatRow {
1186 path: "src/lib.rs".to_string(),
1187 module: "src".to_string(),
1188 lang: "Rust".to_string(),
1189 code: 500,
1190 comments: 100,
1191 blanks: 50,
1192 lines: 650,
1193 bytes: 25000,
1194 tokens: 1250,
1195 doc_pct: Some(0.167),
1196 bytes_per_line: Some(38.46),
1197 depth: 1,
1198 },
1199 by_lang: vec![],
1200 by_module: vec![],
1201 },
1202 lang_purity: LangPurityReport { rows: vec![] },
1203 nesting: NestingReport {
1204 max: 3,
1205 avg: 1.5,
1206 by_module: vec![],
1207 },
1208 test_density: TestDensityReport {
1209 test_lines: 200,
1210 prod_lines: 1000,
1211 test_files: 5,
1212 prod_files: 5,
1213 ratio: 0.2,
1214 },
1215 boilerplate: BoilerplateReport {
1216 infra_lines: 100,
1217 logic_lines: 1100,
1218 ratio: 0.083,
1219 infra_langs: vec!["TOML".to_string()],
1220 },
1221 polyglot: PolyglotReport {
1222 lang_count: 2,
1223 entropy: 0.5,
1224 dominant_lang: "Rust".to_string(),
1225 dominant_lines: 1000,
1226 dominant_pct: 0.833,
1227 },
1228 distribution: DistributionReport {
1229 count: 10,
1230 min: 50,
1231 max: 650,
1232 mean: 130.0,
1233 median: 100.0,
1234 p90: 400.0,
1235 p99: 650.0,
1236 gini: 0.3,
1237 },
1238 histogram: vec![HistogramBucket {
1239 label: "Small".to_string(),
1240 min: 0,
1241 max: Some(100),
1242 files: 5,
1243 pct: 0.5,
1244 }],
1245 top: TopOffenders {
1246 largest_lines: vec![FileStatRow {
1247 path: "src/lib.rs".to_string(),
1248 module: "src".to_string(),
1249 lang: "Rust".to_string(),
1250 code: 500,
1251 comments: 100,
1252 blanks: 50,
1253 lines: 650,
1254 bytes: 25000,
1255 tokens: 1250,
1256 doc_pct: Some(0.167),
1257 bytes_per_line: Some(38.46),
1258 depth: 1,
1259 }],
1260 largest_tokens: vec![],
1261 largest_bytes: vec![],
1262 least_documented: vec![],
1263 most_dense: vec![],
1264 },
1265 tree: Some("test-tree".to_string()),
1266 reading_time: ReadingTimeReport {
1267 minutes: 65.0,
1268 lines_per_minute: 20,
1269 basis_lines: 1300,
1270 },
1271 context_window: Some(ContextWindowReport {
1272 window_tokens: 100000,
1273 total_tokens: 2500,
1274 pct: 0.025,
1275 fits: true,
1276 }),
1277 cocomo: Some(CocomoReport {
1278 mode: "organic".to_string(),
1279 kloc: 1.0,
1280 effort_pm: 2.4,
1281 duration_months: 2.5,
1282 staff: 1.0,
1283 a: 2.4,
1284 b: 1.05,
1285 c: 2.5,
1286 d: 0.38,
1287 }),
1288 todo: Some(TodoReport {
1289 total: 5,
1290 density_per_kloc: 5.0,
1291 tags: vec![TodoTagRow {
1292 tag: "TODO".to_string(),
1293 count: 5,
1294 }],
1295 }),
1296 integrity: IntegrityReport {
1297 algo: "blake3".to_string(),
1298 hash: "abc123".to_string(),
1299 entries: 10,
1300 },
1301 }
1302 }
1303
1304 #[test]
1306 fn test_fmt_pct() {
1307 assert_eq!(fmt_pct(0.5), "50.0%");
1308 assert_eq!(fmt_pct(0.0), "0.0%");
1309 assert_eq!(fmt_pct(1.0), "100.0%");
1310 assert_eq!(fmt_pct(0.1234), "12.3%");
1311 }
1312
1313 #[test]
1315 #[allow(clippy::approx_constant)]
1316 fn test_fmt_f64() {
1317 assert_eq!(fmt_f64(3.14159, 2), "3.14");
1318 assert_eq!(fmt_f64(3.14159, 4), "3.1416");
1319 assert_eq!(fmt_f64(0.0, 2), "0.00");
1320 assert_eq!(fmt_f64(100.0, 0), "100");
1321 }
1322
1323 #[test]
1325 fn test_sanitize_mermaid() {
1326 assert_eq!(sanitize_mermaid("hello"), "hello");
1327 assert_eq!(sanitize_mermaid("hello-world"), "hello_world");
1328 assert_eq!(sanitize_mermaid("src/lib.rs"), "src_lib_rs");
1329 assert_eq!(sanitize_mermaid("test123"), "test123");
1330 assert_eq!(sanitize_mermaid("a b c"), "a_b_c");
1331 }
1332
1333 #[test]
1335 fn test_render_file_table() {
1336 let rows = vec![FileStatRow {
1337 path: "src/lib.rs".to_string(),
1338 module: "src".to_string(),
1339 lang: "Rust".to_string(),
1340 code: 100,
1341 comments: 20,
1342 blanks: 10,
1343 lines: 130,
1344 bytes: 5000,
1345 tokens: 250,
1346 doc_pct: Some(0.167),
1347 bytes_per_line: Some(38.46),
1348 depth: 1,
1349 }];
1350 let result = render_file_table(&rows);
1351 assert!(result.contains("|Path|Lang|Lines|Code|Bytes|Tokens|Doc%|B/Line|"));
1352 assert!(result.contains("|src/lib.rs|Rust|130|100|5000|250|16.7%|38.46|"));
1353 }
1354
1355 #[test]
1357 fn test_render_file_table_none_values() {
1358 let rows = vec![FileStatRow {
1359 path: "test.txt".to_string(),
1360 module: "root".to_string(),
1361 lang: "Text".to_string(),
1362 code: 50,
1363 comments: 0,
1364 blanks: 5,
1365 lines: 55,
1366 bytes: 1000,
1367 tokens: 100,
1368 doc_pct: None,
1369 bytes_per_line: None,
1370 depth: 0,
1371 }];
1372 let result = render_file_table(&rows);
1373 assert!(result.contains("|-|-|")); }
1375
1376 #[test]
1378 fn test_render_xml() {
1379 let mut receipt = minimal_receipt();
1380 receipt.derived = Some(sample_derived());
1381 let result = render_xml(&receipt);
1382 assert!(result.starts_with("<analysis>"));
1383 assert!(result.ends_with("</analysis>"));
1384 assert!(result.contains("files=\"10\""));
1385 assert!(result.contains("code=\"1000\""));
1386 }
1387
1388 #[test]
1390 fn test_render_xml_no_derived() {
1391 let receipt = minimal_receipt();
1392 let result = render_xml(&receipt);
1393 assert_eq!(result, "<analysis></analysis>");
1394 }
1395
1396 #[test]
1398 fn test_render_jsonld() {
1399 let mut receipt = minimal_receipt();
1400 receipt.derived = Some(sample_derived());
1401 let result = render_jsonld(&receipt);
1402 assert!(result.contains("\"@context\": \"https://schema.org\""));
1403 assert!(result.contains("\"@type\": \"SoftwareSourceCode\""));
1404 assert!(result.contains("\"name\": \"test\""));
1405 assert!(result.contains("\"codeLines\": 1000"));
1406 }
1407
1408 #[test]
1410 fn test_render_jsonld_empty_inputs() {
1411 let mut receipt = minimal_receipt();
1412 receipt.source.inputs.clear();
1413 let result = render_jsonld(&receipt);
1414 assert!(result.contains("\"name\": \"tokmd\""));
1415 }
1416
1417 #[test]
1419 fn test_render_svg() {
1420 let mut receipt = minimal_receipt();
1421 receipt.derived = Some(sample_derived());
1422 let result = render_svg(&receipt);
1423 assert!(result.contains("<svg"));
1424 assert!(result.contains("</svg>"));
1425 assert!(result.contains("context")); assert!(result.contains("2.5%")); }
1428
1429 #[test]
1431 fn test_render_svg_no_context() {
1432 let mut receipt = minimal_receipt();
1433 let mut derived = sample_derived();
1434 derived.context_window = None;
1435 receipt.derived = Some(derived);
1436 let result = render_svg(&receipt);
1437 assert!(result.contains("tokens"));
1438 assert!(result.contains("2500")); }
1440
1441 #[test]
1443 fn test_render_svg_no_derived() {
1444 let receipt = minimal_receipt();
1445 let result = render_svg(&receipt);
1446 assert!(result.contains("tokens"));
1447 assert!(result.contains(">0<")); }
1449
1450 #[test]
1452 fn test_render_svg_dimensions() {
1453 let receipt = minimal_receipt();
1454 let result = render_svg(&receipt);
1455 assert!(result.contains("width=\"160\"")); }
1458
1459 #[test]
1461 fn test_render_mermaid() {
1462 let mut receipt = minimal_receipt();
1463 receipt.imports = Some(ImportReport {
1464 granularity: "module".to_string(),
1465 edges: vec![ImportEdge {
1466 from: "src/main".to_string(),
1467 to: "src/lib".to_string(),
1468 count: 5,
1469 }],
1470 });
1471 let result = render_mermaid(&receipt);
1472 assert!(result.starts_with("graph TD\n"));
1473 assert!(result.contains("src_main -->|5| src_lib"));
1474 }
1475
1476 #[test]
1478 fn test_render_mermaid_no_imports() {
1479 let receipt = minimal_receipt();
1480 let result = render_mermaid(&receipt);
1481 assert_eq!(result, "graph TD\n");
1482 }
1483
1484 #[test]
1486 fn test_render_tree() {
1487 let mut receipt = minimal_receipt();
1488 receipt.derived = Some(sample_derived());
1489 let result = render_tree(&receipt);
1490 assert_eq!(result, "test-tree");
1491 }
1492
1493 #[test]
1495 fn test_render_tree_no_derived() {
1496 let receipt = minimal_receipt();
1497 let result = render_tree(&receipt);
1498 assert_eq!(result, "(tree unavailable)");
1499 }
1500
1501 #[test]
1503 fn test_render_tree_none() {
1504 let mut receipt = minimal_receipt();
1505 let mut derived = sample_derived();
1506 derived.tree = None;
1507 receipt.derived = Some(derived);
1508 let result = render_tree(&receipt);
1509 assert_eq!(result, "(tree unavailable)");
1510 }
1511
1512 #[cfg(not(feature = "fun"))]
1514 #[test]
1515 fn test_render_obj_no_fun() {
1516 let receipt = minimal_receipt();
1517 let result = render_obj(&receipt);
1518 assert!(result.is_err());
1519 assert!(result.unwrap_err().to_string().contains("fun"));
1520 }
1521
1522 #[cfg(not(feature = "fun"))]
1524 #[test]
1525 fn test_render_midi_no_fun() {
1526 let receipt = minimal_receipt();
1527 let result = render_midi(&receipt);
1528 assert!(result.is_err());
1529 assert!(result.unwrap_err().to_string().contains("fun"));
1530 }
1531
1532 #[cfg(feature = "fun")]
1539 #[test]
1540 fn test_render_obj_coordinate_math() {
1541 let mut receipt = minimal_receipt();
1542 let mut derived = sample_derived();
1543 derived.top.largest_lines = vec![
1553 FileStatRow {
1554 path: "file0.rs".to_string(),
1555 module: "src".to_string(),
1556 lang: "Rust".to_string(),
1557 code: 100,
1558 comments: 10,
1559 blanks: 5,
1560 lines: 100, bytes: 1000,
1562 tokens: 200,
1563 doc_pct: None,
1564 bytes_per_line: None,
1565 depth: 1,
1566 },
1567 FileStatRow {
1568 path: "file1.rs".to_string(),
1569 module: "src".to_string(),
1570 lang: "Rust".to_string(),
1571 code: 50,
1572 comments: 5,
1573 blanks: 2,
1574 lines: 3, bytes: 500,
1576 tokens: 100,
1577 doc_pct: None,
1578 bytes_per_line: None,
1579 depth: 2,
1580 },
1581 FileStatRow {
1582 path: "file2.rs".to_string(),
1583 module: "src".to_string(),
1584 lang: "Rust".to_string(),
1585 code: 200,
1586 comments: 20,
1587 blanks: 10,
1588 lines: 200, bytes: 2000,
1590 tokens: 400,
1591 doc_pct: None,
1592 bytes_per_line: None,
1593 depth: 3,
1594 },
1595 FileStatRow {
1596 path: "file3.rs".to_string(),
1597 module: "src".to_string(),
1598 lang: "Rust".to_string(),
1599 code: 75,
1600 comments: 7,
1601 blanks: 3,
1602 lines: 75, bytes: 750,
1604 tokens: 150,
1605 doc_pct: None,
1606 bytes_per_line: None,
1607 depth: 0,
1608 },
1609 FileStatRow {
1610 path: "file4.rs".to_string(),
1611 module: "src".to_string(),
1612 lang: "Rust".to_string(),
1613 code: 150,
1614 comments: 15,
1615 blanks: 8,
1616 lines: 150, bytes: 1500,
1618 tokens: 300,
1619 doc_pct: None,
1620 bytes_per_line: None,
1621 depth: 1,
1622 },
1623 FileStatRow {
1625 path: "file5.rs".to_string(),
1626 module: "src".to_string(),
1627 lang: "Rust".to_string(),
1628 code: 80,
1629 comments: 8,
1630 blanks: 4,
1631 lines: 80, bytes: 800,
1633 tokens: 160,
1634 doc_pct: None,
1635 bytes_per_line: None,
1636 depth: 2,
1637 },
1638 FileStatRow {
1640 path: "file6.rs".to_string(),
1641 module: "src".to_string(),
1642 lang: "Rust".to_string(),
1643 code: 60,
1644 comments: 6,
1645 blanks: 3,
1646 lines: 60, bytes: 600,
1648 tokens: 120,
1649 doc_pct: None,
1650 bytes_per_line: None,
1651 depth: 1,
1652 },
1653 ];
1654 receipt.derived = Some(derived);
1655 let result = render_obj(&receipt).expect("render_obj should succeed with fun feature");
1656
1657 #[allow(clippy::type_complexity)]
1660 let objects: Vec<(&str, Vec<(f32, f32, f32)>)> = result
1661 .split("o ")
1662 .skip(1)
1663 .map(|section| {
1664 let lines: Vec<&str> = section.lines().collect();
1665 let name = lines[0];
1666 let vertices: Vec<(f32, f32, f32)> = lines[1..]
1667 .iter()
1668 .filter(|l| l.starts_with("v "))
1669 .take(8)
1670 .map(|l| {
1671 let parts: Vec<f32> = l[2..]
1672 .split_whitespace()
1673 .map(|p| p.parse().unwrap())
1674 .collect();
1675 (parts[0], parts[1], parts[2])
1676 })
1677 .collect();
1678 (name, vertices)
1679 })
1680 .collect();
1681
1682 assert_eq!(objects.len(), 7, "expected 7 buildings");
1684
1685 fn base_corner(obj: &(&str, Vec<(f32, f32, f32)>)) -> (f32, f32, f32) {
1687 obj.1[0]
1688 }
1689 fn top_corner(obj: &(&str, Vec<(f32, f32, f32)>)) -> (f32, f32, f32) {
1690 obj.1[4] }
1692
1693 assert_eq!(
1695 base_corner(&objects[0]),
1696 (0.0, 0.0, 0.0),
1697 "file0 base position"
1698 );
1699 assert_eq!(
1700 top_corner(&objects[0]).2,
1701 10.0,
1702 "file0 height should be 10.0 (100/10)"
1703 );
1704
1705 assert_eq!(
1708 base_corner(&objects[1]),
1709 (2.0, 0.0, 0.0),
1710 "file1 base position"
1711 );
1712 assert_eq!(
1713 top_corner(&objects[1]).2,
1714 0.5,
1715 "file1 height should be 0.5 (clamped from 3/10=0.3)"
1716 );
1717
1718 assert_eq!(
1720 base_corner(&objects[2]),
1721 (4.0, 0.0, 0.0),
1722 "file2 base position"
1723 );
1724 assert_eq!(
1725 top_corner(&objects[2]).2,
1726 20.0,
1727 "file2 height should be 20.0 (200/10)"
1728 );
1729
1730 assert_eq!(
1732 base_corner(&objects[3]),
1733 (6.0, 0.0, 0.0),
1734 "file3 base position"
1735 );
1736 assert_eq!(
1737 top_corner(&objects[3]).2,
1738 7.5,
1739 "file3 height should be 7.5 (75/10)"
1740 );
1741
1742 assert_eq!(
1745 base_corner(&objects[4]),
1746 (8.0, 0.0, 0.0),
1747 "file4 base position (x = 4*2 = 8)"
1748 );
1749 assert_eq!(
1750 top_corner(&objects[4]).2,
1751 15.0,
1752 "file4 height should be 15.0 (150/10)"
1753 );
1754
1755 assert_eq!(
1759 base_corner(&objects[5]),
1760 (0.0, 2.0, 0.0),
1761 "file5 base position (x=0 from 5%5, y=2 from 5/5*2)"
1762 );
1763 assert_eq!(
1764 top_corner(&objects[5]).2,
1765 8.0,
1766 "file5 height should be 8.0 (80/10)"
1767 );
1768
1769 assert_eq!(
1772 base_corner(&objects[6]),
1773 (2.0, 2.0, 0.0),
1774 "file6 base position (x=2 from 6%5*2, y=2 from 6/5*2)"
1775 );
1776 assert_eq!(
1777 top_corner(&objects[6]).2,
1778 6.0,
1779 "file6 height should be 6.0 (60/10)"
1780 );
1781
1782 assert!(result.contains("f 1 2 3 4"), "missing face definition");
1784 }
1785
1786 #[cfg(feature = "fun")]
1792 #[test]
1793 fn test_render_midi_note_math() {
1794 use midly::{MidiMessage, Smf, TrackEventKind};
1795
1796 let mut receipt = minimal_receipt();
1797 let mut derived = sample_derived();
1798 derived.top.largest_lines = vec![
1804 FileStatRow {
1806 path: "a.rs".to_string(),
1807 module: "src".to_string(),
1808 lang: "Rust".to_string(),
1809 code: 50,
1810 comments: 5,
1811 blanks: 2,
1812 lines: 60,
1813 bytes: 500,
1814 tokens: 100,
1815 doc_pct: None,
1816 bytes_per_line: None,
1817 depth: 5,
1818 },
1819 FileStatRow {
1822 path: "b.rs".to_string(),
1823 module: "src".to_string(),
1824 lang: "Rust".to_string(),
1825 code: 100,
1826 comments: 10,
1827 blanks: 5,
1828 lines: 200, bytes: 1000,
1830 tokens: 200,
1831 doc_pct: None,
1832 bytes_per_line: None,
1833 depth: 15,
1834 },
1835 FileStatRow {
1837 path: "c.rs".to_string(),
1838 module: "src".to_string(),
1839 lang: "Rust".to_string(),
1840 code: 20,
1841 comments: 2,
1842 blanks: 1,
1843 lines: 20,
1844 bytes: 200,
1845 tokens: 40,
1846 doc_pct: None,
1847 bytes_per_line: None,
1848 depth: 0,
1849 },
1850 FileStatRow {
1853 path: "d.rs".to_string(),
1854 module: "src".to_string(),
1855 lang: "Rust".to_string(),
1856 code: 160,
1857 comments: 16,
1858 blanks: 8,
1859 lines: 160,
1860 bytes: 1600,
1861 tokens: 320,
1862 doc_pct: None,
1863 bytes_per_line: None,
1864 depth: 12,
1865 },
1866 ];
1867 receipt.derived = Some(derived);
1868
1869 let result = render_midi(&receipt).unwrap();
1870
1871 let smf = Smf::parse(&result).expect("should parse as valid MIDI");
1873
1874 let mut notes: Vec<(u32, u8, u8)> = Vec::new(); let mut abs_time = 0u32;
1877
1878 for event in &smf.tracks[0] {
1879 abs_time += event.delta.as_int();
1880 if let TrackEventKind::Midi {
1881 message: MidiMessage::NoteOn { key, vel },
1882 ..
1883 } = event.kind
1884 {
1885 notes.push((abs_time, key.as_int(), vel.as_int()));
1886 }
1887 }
1888
1889 assert_eq!(notes.len(), 4, "expected 4 NoteOn events, got {:?}", notes);
1891
1892 assert_eq!(
1895 notes[0],
1896 (0, 65, 70),
1897 "note 0: expected (time=0, key=65=60+5, vel=70=40+60/2), got {:?}",
1898 notes[0]
1899 );
1900
1901 assert_eq!(
1904 notes[1],
1905 (240, 63, 103),
1906 "note 1: expected (time=240=1*240, key=63=60+(15%12), vel=103=40+127/2), got {:?}",
1907 notes[1]
1908 );
1909
1910 assert_eq!(
1912 notes[2],
1913 (480, 60, 50),
1914 "note 2: expected (time=480=2*240, key=60=60+0, vel=50=40+20/2), got {:?}",
1915 notes[2]
1916 );
1917
1918 assert_eq!(
1921 notes[3],
1922 (720, 60, 103),
1923 "note 3: expected (time=720=3*240, key=60=60+(12%12), vel=103=40+127/2), got {:?}",
1924 notes[3]
1925 );
1926
1927 let mut note_offs: Vec<(u32, u8)> = Vec::new(); abs_time = 0;
1930 for event in &smf.tracks[0] {
1931 abs_time += event.delta.as_int();
1932 if let TrackEventKind::Midi {
1933 message: MidiMessage::NoteOff { key, .. },
1934 ..
1935 } = event.kind
1936 {
1937 note_offs.push((abs_time, key.as_int()));
1938 }
1939 }
1940
1941 assert!(
1943 note_offs.iter().any(|&(t, k)| t == 180 && k == 65),
1944 "expected NoteOff for key 65 at time 180, got {:?}",
1945 note_offs
1946 );
1947 assert!(
1948 note_offs.iter().any(|&(t, k)| t == 420 && k == 63),
1949 "expected NoteOff for key 63 at time 420 (240+180), got {:?}",
1950 note_offs
1951 );
1952 assert!(
1953 note_offs.iter().any(|&(t, k)| t == 660 && k == 60),
1954 "expected NoteOff for key 60 at time 660 (480+180), got {:?}",
1955 note_offs
1956 );
1957 assert!(
1958 note_offs.iter().any(|&(t, k)| t == 900 && k == 60),
1959 "expected NoteOff for key 60 at time 900 (720+180), got {:?}",
1960 note_offs
1961 );
1962 }
1963
1964 #[cfg(feature = "fun")]
1966 #[test]
1967 fn test_render_midi_no_derived() {
1968 use midly::Smf;
1969
1970 let receipt = minimal_receipt();
1971 let result = render_midi(&receipt).unwrap();
1972
1973 assert!(!result.is_empty(), "MIDI output should not be empty");
1975 assert!(
1976 result.len() > 14,
1977 "MIDI should have header (14 bytes) + track data"
1978 );
1979
1980 let smf = Smf::parse(&result).expect("should be valid MIDI even with no notes");
1982 assert_eq!(smf.tracks.len(), 1, "should have exactly one track");
1983 }
1984
1985 #[cfg(feature = "fun")]
1987 #[test]
1988 fn test_render_obj_no_derived() {
1989 let receipt = minimal_receipt();
1990 let result = render_obj(&receipt).expect("render_obj should succeed");
1991
1992 assert_eq!(result, "# tokmd code city\n");
1994 }
1995
1996 #[test]
1998 fn test_render_md_basic() {
1999 let receipt = minimal_receipt();
2000 let result = render_md(&receipt);
2001 assert!(result.starts_with("# tokmd analysis\n"));
2002 assert!(result.contains("Preset: `receipt`"));
2003 }
2004
2005 #[test]
2007 fn test_render_md_inputs() {
2008 let mut receipt = minimal_receipt();
2009 receipt.source.inputs = vec!["path1".to_string(), "path2".to_string()];
2010 let result = render_md(&receipt);
2011 assert!(result.contains("## Inputs"));
2012 assert!(result.contains("- `path1`"));
2013 assert!(result.contains("- `path2`"));
2014 }
2015
2016 #[test]
2018 fn test_render_md_empty_inputs() {
2019 let mut receipt = minimal_receipt();
2020 receipt.source.inputs.clear();
2021 let result = render_md(&receipt);
2022 assert!(!result.contains("## Inputs"));
2023 }
2024
2025 #[test]
2027 fn test_render_md_archetype() {
2028 let mut receipt = minimal_receipt();
2029 receipt.archetype = Some(Archetype {
2030 kind: "library".to_string(),
2031 evidence: vec!["Cargo.toml".to_string(), "src/lib.rs".to_string()],
2032 });
2033 let result = render_md(&receipt);
2034 assert!(result.contains("## Archetype"));
2035 assert!(result.contains("- Kind: `library`"));
2036 assert!(result.contains("- Evidence: `Cargo.toml`, `src/lib.rs`"));
2037 }
2038
2039 #[test]
2041 fn test_render_md_archetype_no_evidence() {
2042 let mut receipt = minimal_receipt();
2043 receipt.archetype = Some(Archetype {
2044 kind: "app".to_string(),
2045 evidence: vec![],
2046 });
2047 let result = render_md(&receipt);
2048 assert!(result.contains("## Archetype"));
2049 assert!(result.contains("- Kind: `app`"));
2050 assert!(!result.contains("Evidence"));
2051 }
2052
2053 #[test]
2055 fn test_render_md_topics() {
2056 use std::collections::BTreeMap;
2057 let mut per_module = BTreeMap::new();
2058 per_module.insert(
2059 "src".to_string(),
2060 vec![TopicTerm {
2061 term: "parser".to_string(),
2062 score: 1.5,
2063 tf: 10,
2064 df: 2,
2065 }],
2066 );
2067 let mut receipt = minimal_receipt();
2068 receipt.topics = Some(TopicClouds {
2069 overall: vec![TopicTerm {
2070 term: "code".to_string(),
2071 score: 2.0,
2072 tf: 20,
2073 df: 5,
2074 }],
2075 per_module,
2076 });
2077 let result = render_md(&receipt);
2078 assert!(result.contains("## Topics"));
2079 assert!(result.contains("- Overall: `code`"));
2080 assert!(result.contains("- `src`: parser"));
2081 }
2082
2083 #[test]
2085 fn test_render_md_topics_empty_module() {
2086 use std::collections::BTreeMap;
2087 let mut per_module = BTreeMap::new();
2088 per_module.insert("empty_module".to_string(), vec![]);
2089 let mut receipt = minimal_receipt();
2090 receipt.topics = Some(TopicClouds {
2091 overall: vec![],
2092 per_module,
2093 });
2094 let result = render_md(&receipt);
2095 assert!(!result.contains("empty_module"));
2097 }
2098
2099 #[test]
2101 fn test_render_md_entropy() {
2102 let mut receipt = minimal_receipt();
2103 receipt.entropy = Some(EntropyReport {
2104 suspects: vec![EntropyFinding {
2105 path: "secret.bin".to_string(),
2106 module: "root".to_string(),
2107 entropy_bits_per_byte: 7.5,
2108 sample_bytes: 1024,
2109 class: EntropyClass::High,
2110 }],
2111 });
2112 let result = render_md(&receipt);
2113 assert!(result.contains("## Entropy profiling"));
2114 assert!(result.contains("|secret.bin|root|7.50|1024|High|"));
2115 }
2116
2117 #[test]
2119 fn test_render_md_entropy_no_suspects() {
2120 let mut receipt = minimal_receipt();
2121 receipt.entropy = Some(EntropyReport { suspects: vec![] });
2122 let result = render_md(&receipt);
2123 assert!(result.contains("## Entropy profiling"));
2124 assert!(result.contains("No entropy outliers detected"));
2125 }
2126
2127 #[test]
2129 fn test_render_md_license() {
2130 let mut receipt = minimal_receipt();
2131 receipt.license = Some(LicenseReport {
2132 effective: Some("MIT".to_string()),
2133 findings: vec![LicenseFinding {
2134 spdx: "MIT".to_string(),
2135 confidence: 0.95,
2136 source_path: "LICENSE".to_string(),
2137 source_kind: LicenseSourceKind::Text,
2138 }],
2139 });
2140 let result = render_md(&receipt);
2141 assert!(result.contains("## License radar"));
2142 assert!(result.contains("- Effective: `MIT`"));
2143 assert!(result.contains("|MIT|0.95|LICENSE|Text|"));
2144 }
2145
2146 #[test]
2148 fn test_render_md_license_no_findings() {
2149 let mut receipt = minimal_receipt();
2150 receipt.license = Some(LicenseReport {
2151 effective: None,
2152 findings: vec![],
2153 });
2154 let result = render_md(&receipt);
2155 assert!(result.contains("## License radar"));
2156 assert!(result.contains("Heuristic detection"));
2157 assert!(!result.contains("|SPDX|")); }
2159
2160 #[test]
2162 fn test_render_md_corporate_fingerprint() {
2163 let mut receipt = minimal_receipt();
2164 receipt.corporate_fingerprint = Some(CorporateFingerprint {
2165 domains: vec![DomainStat {
2166 domain: "example.com".to_string(),
2167 commits: 50,
2168 pct: 0.75,
2169 }],
2170 });
2171 let result = render_md(&receipt);
2172 assert!(result.contains("## Corporate fingerprint"));
2173 assert!(result.contains("|example.com|50|75.0%|"));
2174 }
2175
2176 #[test]
2178 fn test_render_md_corporate_fingerprint_no_domains() {
2179 let mut receipt = minimal_receipt();
2180 receipt.corporate_fingerprint = Some(CorporateFingerprint { domains: vec![] });
2181 let result = render_md(&receipt);
2182 assert!(result.contains("## Corporate fingerprint"));
2183 assert!(result.contains("No commit domains detected"));
2184 }
2185
2186 #[test]
2188 fn test_render_md_churn() {
2189 use std::collections::BTreeMap;
2190 let mut per_module = BTreeMap::new();
2191 per_module.insert(
2192 "src".to_string(),
2193 ChurnTrend {
2194 slope: 0.5,
2195 r2: 0.8,
2196 recent_change: 5,
2197 classification: TrendClass::Rising,
2198 },
2199 );
2200 let mut receipt = minimal_receipt();
2201 receipt.predictive_churn = Some(PredictiveChurnReport { per_module });
2202 let result = render_md(&receipt);
2203 assert!(result.contains("## Predictive churn"));
2204 assert!(result.contains("|src|0.5000|0.80|5|Rising|"));
2205 }
2206
2207 #[test]
2209 fn test_render_md_churn_empty() {
2210 use std::collections::BTreeMap;
2211 let mut receipt = minimal_receipt();
2212 receipt.predictive_churn = Some(PredictiveChurnReport {
2213 per_module: BTreeMap::new(),
2214 });
2215 let result = render_md(&receipt);
2216 assert!(result.contains("## Predictive churn"));
2217 assert!(result.contains("No churn signals detected"));
2218 }
2219
2220 #[test]
2222 fn test_render_md_assets() {
2223 let mut receipt = minimal_receipt();
2224 receipt.assets = Some(AssetReport {
2225 total_files: 5,
2226 total_bytes: 1000000,
2227 categories: vec![AssetCategoryRow {
2228 category: "images".to_string(),
2229 files: 3,
2230 bytes: 500000,
2231 extensions: vec!["png".to_string(), "jpg".to_string()],
2232 }],
2233 top_files: vec![AssetFileRow {
2234 path: "logo.png".to_string(),
2235 bytes: 100000,
2236 category: "images".to_string(),
2237 extension: "png".to_string(),
2238 }],
2239 });
2240 let result = render_md(&receipt);
2241 assert!(result.contains("## Assets"));
2242 assert!(result.contains("- Total files: `5`"));
2243 assert!(result.contains("|images|3|500000|png, jpg|"));
2244 assert!(result.contains("|logo.png|100000|images|"));
2245 }
2246
2247 #[test]
2249 fn test_render_md_assets_empty() {
2250 let mut receipt = minimal_receipt();
2251 receipt.assets = Some(AssetReport {
2252 total_files: 0,
2253 total_bytes: 0,
2254 categories: vec![],
2255 top_files: vec![],
2256 });
2257 let result = render_md(&receipt);
2258 assert!(result.contains("## Assets"));
2259 assert!(result.contains("- Total files: `0`"));
2260 assert!(!result.contains("|Category|")); }
2262
2263 #[test]
2265 fn test_render_md_deps() {
2266 let mut receipt = minimal_receipt();
2267 receipt.deps = Some(DependencyReport {
2268 total: 50,
2269 lockfiles: vec![LockfileReport {
2270 path: "Cargo.lock".to_string(),
2271 kind: "cargo".to_string(),
2272 dependencies: 50,
2273 }],
2274 });
2275 let result = render_md(&receipt);
2276 assert!(result.contains("## Dependencies"));
2277 assert!(result.contains("- Total: `50`"));
2278 assert!(result.contains("|Cargo.lock|cargo|50|"));
2279 }
2280
2281 #[test]
2283 fn test_render_md_deps_empty() {
2284 let mut receipt = minimal_receipt();
2285 receipt.deps = Some(DependencyReport {
2286 total: 0,
2287 lockfiles: vec![],
2288 });
2289 let result = render_md(&receipt);
2290 assert!(result.contains("## Dependencies"));
2291 assert!(!result.contains("|Lockfile|"));
2292 }
2293
2294 #[test]
2296 fn test_render_md_git() {
2297 let mut receipt = minimal_receipt();
2298 receipt.git = Some(GitReport {
2299 commits_scanned: 100,
2300 files_seen: 50,
2301 hotspots: vec![HotspotRow {
2302 path: "src/lib.rs".to_string(),
2303 commits: 25,
2304 lines: 500,
2305 score: 12500,
2306 }],
2307 bus_factor: vec![BusFactorRow {
2308 module: "src".to_string(),
2309 authors: 3,
2310 }],
2311 freshness: FreshnessReport {
2312 threshold_days: 90,
2313 stale_files: 5,
2314 total_files: 50,
2315 stale_pct: 0.1,
2316 by_module: vec![ModuleFreshnessRow {
2317 module: "src".to_string(),
2318 avg_days: 30.0,
2319 p90_days: 60.0,
2320 stale_pct: 0.05,
2321 }],
2322 },
2323 coupling: vec![CouplingRow {
2324 left: "src/a.rs".to_string(),
2325 right: "src/b.rs".to_string(),
2326 count: 10,
2327 jaccard: Some(0.5),
2328 lift: Some(1.2),
2329 n_left: Some(15),
2330 n_right: Some(12),
2331 }],
2332 age_distribution: Some(CodeAgeDistributionReport {
2333 buckets: vec![CodeAgeBucket {
2334 label: "0-30d".to_string(),
2335 min_days: 0,
2336 max_days: Some(30),
2337 files: 10,
2338 pct: 0.2,
2339 }],
2340 recent_refreshes: 12,
2341 prior_refreshes: 8,
2342 refresh_trend: TrendClass::Rising,
2343 }),
2344 intent: None,
2345 });
2346 let result = render_md(&receipt);
2347 assert!(result.contains("## Git metrics"));
2348 assert!(result.contains("- Commits scanned: `100`"));
2349 assert!(result.contains("|src/lib.rs|25|500|12500|"));
2350 assert!(result.contains("|src|3|"));
2351 assert!(result.contains("Stale threshold (days): `90`"));
2352 assert!(result.contains("|src|30.00|60.00|5.0%|"));
2353 assert!(result.contains("### Code age"));
2354 assert!(result.contains("Refresh trend: `Rising`"));
2355 assert!(result.contains("|0-30d|0|30|10|20.0%|"));
2356 assert!(result.contains("|src/a.rs|src/b.rs|10|"));
2357 }
2358
2359 #[test]
2361 fn test_render_md_git_empty() {
2362 let mut receipt = minimal_receipt();
2363 receipt.git = Some(GitReport {
2364 commits_scanned: 0,
2365 files_seen: 0,
2366 hotspots: vec![],
2367 bus_factor: vec![],
2368 freshness: FreshnessReport {
2369 threshold_days: 90,
2370 stale_files: 0,
2371 total_files: 0,
2372 stale_pct: 0.0,
2373 by_module: vec![],
2374 },
2375 coupling: vec![],
2376 age_distribution: None,
2377 intent: None,
2378 });
2379 let result = render_md(&receipt);
2380 assert!(result.contains("## Git metrics"));
2381 assert!(!result.contains("### Hotspots"));
2382 assert!(!result.contains("### Bus factor"));
2383 assert!(!result.contains("### Coupling"));
2384 }
2385
2386 #[test]
2388 fn test_render_md_imports() {
2389 let mut receipt = minimal_receipt();
2390 receipt.imports = Some(ImportReport {
2391 granularity: "file".to_string(),
2392 edges: vec![ImportEdge {
2393 from: "src/main.rs".to_string(),
2394 to: "src/lib.rs".to_string(),
2395 count: 5,
2396 }],
2397 });
2398 let result = render_md(&receipt);
2399 assert!(result.contains("## Imports"));
2400 assert!(result.contains("- Granularity: `file`"));
2401 assert!(result.contains("|src/main.rs|src/lib.rs|5|"));
2402 }
2403
2404 #[test]
2406 fn test_render_md_imports_empty() {
2407 let mut receipt = minimal_receipt();
2408 receipt.imports = Some(ImportReport {
2409 granularity: "module".to_string(),
2410 edges: vec![],
2411 });
2412 let result = render_md(&receipt);
2413 assert!(result.contains("## Imports"));
2414 assert!(!result.contains("|From|To|"));
2415 }
2416
2417 #[test]
2419 fn test_render_md_dup() {
2420 let mut receipt = minimal_receipt();
2421 receipt.dup = Some(DuplicateReport {
2422 wasted_bytes: 50000,
2423 strategy: "content".to_string(),
2424 groups: vec![DuplicateGroup {
2425 hash: "abc123".to_string(),
2426 bytes: 1000,
2427 files: vec!["a.txt".to_string(), "b.txt".to_string()],
2428 }],
2429 density: Some(DuplicationDensityReport {
2430 duplicate_groups: 1,
2431 duplicate_files: 2,
2432 duplicated_bytes: 2000,
2433 wasted_bytes: 1000,
2434 wasted_pct_of_codebase: 0.1,
2435 by_module: vec![ModuleDuplicationDensityRow {
2436 module: "src".to_string(),
2437 duplicate_files: 2,
2438 wasted_files: 1,
2439 duplicated_bytes: 2000,
2440 wasted_bytes: 1000,
2441 module_bytes: 10_000,
2442 density: 0.1,
2443 }],
2444 }),
2445 near: None,
2446 });
2447 let result = render_md(&receipt);
2448 assert!(result.contains("## Duplicates"));
2449 assert!(result.contains("- Wasted bytes: `50000`"));
2450 assert!(result.contains("### Duplication density"));
2451 assert!(result.contains("Waste vs codebase: `10.0%`"));
2452 assert!(result.contains("|src|2|1|2000|1000|10000|10.0%|"));
2453 assert!(result.contains("|abc123|1000|2|")); }
2455
2456 #[test]
2458 fn test_render_md_dup_empty() {
2459 let mut receipt = minimal_receipt();
2460 receipt.dup = Some(DuplicateReport {
2461 wasted_bytes: 0,
2462 strategy: "content".to_string(),
2463 groups: vec![],
2464 density: None,
2465 near: None,
2466 });
2467 let result = render_md(&receipt);
2468 assert!(result.contains("## Duplicates"));
2469 assert!(!result.contains("|Hash|Bytes|"));
2470 }
2471
2472 #[test]
2474 fn test_render_md_fun() {
2475 let mut receipt = minimal_receipt();
2476 receipt.fun = Some(FunReport {
2477 eco_label: Some(EcoLabel {
2478 label: "A+".to_string(),
2479 score: 95.5,
2480 bytes: 10000,
2481 notes: "Very efficient".to_string(),
2482 }),
2483 });
2484 let result = render_md(&receipt);
2485 assert!(result.contains("## Eco label"));
2486 assert!(result.contains("- Label: `A+`"));
2487 assert!(result.contains("- Score: `95.5`"));
2488 }
2489
2490 #[test]
2492 fn test_render_md_fun_no_label() {
2493 let mut receipt = minimal_receipt();
2494 receipt.fun = Some(FunReport { eco_label: None });
2495 let result = render_md(&receipt);
2496 assert!(!result.contains("## Eco label"));
2498 }
2499
2500 #[test]
2502 fn test_render_md_derived() {
2503 let mut receipt = minimal_receipt();
2504 receipt.derived = Some(sample_derived());
2505 let result = render_md(&receipt);
2506 assert!(result.contains("## Totals"));
2507 assert!(result.contains("|10|1000|200|100|1300|50000|2500|"));
2508 assert!(result.contains("## Ratios"));
2509 assert!(result.contains("## Distribution"));
2510 assert!(result.contains("## File size histogram"));
2511 assert!(result.contains("## Top offenders"));
2512 assert!(result.contains("## Structure"));
2513 assert!(result.contains("## Test density"));
2514 assert!(result.contains("## TODOs"));
2515 assert!(result.contains("## Boilerplate ratio"));
2516 assert!(result.contains("## Polyglot"));
2517 assert!(result.contains("## Reading time"));
2518 assert!(result.contains("## Context window"));
2519 assert!(result.contains("## COCOMO estimate"));
2520 assert!(result.contains("## Integrity"));
2521 }
2522
2523 #[test]
2525 fn test_render_dispatch_md() {
2526 let receipt = minimal_receipt();
2527 let result = render(&receipt, AnalysisFormat::Md).unwrap();
2528 match result {
2529 RenderedOutput::Text(s) => assert!(s.starts_with("# tokmd analysis")),
2530 RenderedOutput::Binary(_) => panic!("expected text"),
2531 }
2532 }
2533
2534 #[test]
2535 fn test_render_dispatch_json() {
2536 let receipt = minimal_receipt();
2537 let result = render(&receipt, AnalysisFormat::Json).unwrap();
2538 match result {
2539 RenderedOutput::Text(s) => assert!(s.contains("\"schema_version\": 2")),
2540 RenderedOutput::Binary(_) => panic!("expected text"),
2541 }
2542 }
2543
2544 #[test]
2545 fn test_render_dispatch_xml() {
2546 let receipt = minimal_receipt();
2547 let result = render(&receipt, AnalysisFormat::Xml).unwrap();
2548 match result {
2549 RenderedOutput::Text(s) => assert!(s.contains("<analysis>")),
2550 RenderedOutput::Binary(_) => panic!("expected text"),
2551 }
2552 }
2553
2554 #[test]
2555 fn test_render_dispatch_tree() {
2556 let receipt = minimal_receipt();
2557 let result = render(&receipt, AnalysisFormat::Tree).unwrap();
2558 match result {
2559 RenderedOutput::Text(s) => assert!(s.contains("(tree unavailable)")),
2560 RenderedOutput::Binary(_) => panic!("expected text"),
2561 }
2562 }
2563
2564 #[test]
2565 fn test_render_dispatch_svg() {
2566 let receipt = minimal_receipt();
2567 let result = render(&receipt, AnalysisFormat::Svg).unwrap();
2568 match result {
2569 RenderedOutput::Text(s) => assert!(s.contains("<svg")),
2570 RenderedOutput::Binary(_) => panic!("expected text"),
2571 }
2572 }
2573
2574 #[test]
2575 fn test_render_dispatch_mermaid() {
2576 let receipt = minimal_receipt();
2577 let result = render(&receipt, AnalysisFormat::Mermaid).unwrap();
2578 match result {
2579 RenderedOutput::Text(s) => assert!(s.starts_with("graph TD")),
2580 RenderedOutput::Binary(_) => panic!("expected text"),
2581 }
2582 }
2583
2584 #[test]
2585 fn test_render_dispatch_jsonld() {
2586 let receipt = minimal_receipt();
2587 let result = render(&receipt, AnalysisFormat::Jsonld).unwrap();
2588 match result {
2589 RenderedOutput::Text(s) => assert!(s.contains("@context")),
2590 RenderedOutput::Binary(_) => panic!("expected text"),
2591 }
2592 }
2593
2594 #[test]
2596 fn test_render_html() {
2597 let mut receipt = minimal_receipt();
2598 receipt.derived = Some(sample_derived());
2599 let result = render_html(&receipt);
2600 assert!(result.contains("<!DOCTYPE html>") || result.contains("<html"));
2601 }
2602}