1use std::path::{Path, PathBuf};
51#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
52use std::time::{SystemTime, UNIX_EPOCH};
53
54use anyhow::Result;
55#[cfg(feature = "analysis")]
56use tokmd_analysis as analysis;
57#[cfg(feature = "analysis")]
58use tokmd_analysis_types::{AnalysisArgsMeta, AnalysisSource};
59
60pub mod error;
62pub mod ffi;
63pub mod settings;
64
65pub use tokmd_config as config;
67pub use tokmd_scan::InMemoryFile;
68pub use tokmd_types as types;
69
70use settings::{DiffSettings, ExportSettings, LangSettings, ModuleSettings, ScanSettings};
71use tokmd_config::GlobalArgs;
72use tokmd_scan_args::scan_args;
73use tokmd_settings::ScanOptions;
74use tokmd_types::{
75 ChildIncludeMode, DiffReceipt, ExportArgsMeta, ExportData, ExportReceipt, FileRow, LangArgs,
76 LangArgsMeta, LangReceipt, LangReport, ModuleArgsMeta, ModuleReceipt, ModuleReport, RedactMode,
77 SCHEMA_VERSION, ScanStatus, ToolInfo,
78};
79
80#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
81fn now_ms() -> u128 {
82 0
85}
86
87#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
88fn now_ms() -> u128 {
89 SystemTime::now()
90 .duration_since(UNIX_EPOCH)
91 .unwrap_or_default()
92 .as_millis()
93}
94
95pub fn lang_workflow(scan: &ScanSettings, lang: &LangSettings) -> Result<LangReceipt> {
112 let scan_opts = settings_to_scan_options(scan);
113 let paths: Vec<PathBuf> = scan.paths.iter().map(PathBuf::from).collect();
114
115 let languages = tokmd_scan::scan(&paths, &scan_opts)?;
117
118 let report = tokmd_model::create_lang_report(&languages, lang.top, lang.files, lang.children);
120
121 Ok(build_lang_receipt(&paths, &scan_opts, lang, report))
122}
123
124pub fn lang_workflow_from_inputs(
126 inputs: &[InMemoryFile],
127 scan_opts: &ScanOptions,
128 lang: &LangSettings,
129) -> Result<LangReceipt> {
130 let scan_opts = deterministic_in_memory_scan_options(scan_opts);
131 let (paths, rows) =
132 collect_pure_in_memory_rows(inputs, &scan_opts, &[], 1, ChildIncludeMode::Separate)?;
133 let report =
134 tokmd_model::create_lang_report_from_rows(&rows, lang.top, lang.files, lang.children);
135
136 Ok(build_lang_receipt(&paths, &scan_opts, lang, report))
137}
138
139pub fn module_workflow(scan: &ScanSettings, module: &ModuleSettings) -> Result<ModuleReceipt> {
165 let scan_opts = settings_to_scan_options(scan);
166 let paths: Vec<PathBuf> = scan.paths.iter().map(PathBuf::from).collect();
167
168 let languages = tokmd_scan::scan(&paths, &scan_opts)?;
170
171 let report = tokmd_model::create_module_report(
173 &languages,
174 &module.module_roots,
175 module.module_depth,
176 module.children,
177 module.top,
178 );
179
180 Ok(build_module_receipt(&paths, &scan_opts, module, report))
181}
182
183pub fn module_workflow_from_inputs(
185 inputs: &[InMemoryFile],
186 scan_opts: &ScanOptions,
187 module: &ModuleSettings,
188) -> Result<ModuleReceipt> {
189 let scan_opts = deterministic_in_memory_scan_options(scan_opts);
190 let (paths, rows) = collect_pure_in_memory_rows(
191 inputs,
192 &scan_opts,
193 &module.module_roots,
194 module.module_depth,
195 module.children,
196 )?;
197 let report = tokmd_model::create_module_report_from_rows(
198 &rows,
199 &module.module_roots,
200 module.module_depth,
201 module.children,
202 module.top,
203 );
204
205 Ok(build_module_receipt(&paths, &scan_opts, module, report))
206}
207
208pub fn export_workflow(scan: &ScanSettings, export: &ExportSettings) -> Result<ExportReceipt> {
231 let scan_opts = settings_to_scan_options(scan);
232 let paths: Vec<PathBuf> = scan.paths.iter().map(PathBuf::from).collect();
233 let strip_prefix = export.strip_prefix.as_deref();
234
235 let languages = tokmd_scan::scan(&paths, &scan_opts)?;
237
238 let data = tokmd_model::create_export_data(
240 &languages,
241 &export.module_roots,
242 export.module_depth,
243 export.children,
244 strip_prefix.map(std::path::Path::new),
245 export.min_code,
246 export.max_rows,
247 );
248
249 Ok(build_export_receipt(&paths, &scan_opts, export, data))
250}
251
252pub fn export_workflow_from_inputs(
254 inputs: &[InMemoryFile],
255 scan_opts: &ScanOptions,
256 export: &ExportSettings,
257) -> Result<ExportReceipt> {
258 let scan_opts = deterministic_in_memory_scan_options(scan_opts);
259 let (paths, mut rows) = collect_pure_in_memory_rows(
260 inputs,
261 &scan_opts,
262 &export.module_roots,
263 export.module_depth,
264 export.children,
265 )?;
266 if let Some(strip_prefix) = export.strip_prefix.as_deref() {
267 rows = strip_virtual_export_prefix(
268 rows,
269 strip_prefix,
270 &export.module_roots,
271 export.module_depth,
272 );
273 }
274 let data = tokmd_model::create_export_data_from_rows(
275 rows,
276 &export.module_roots,
277 export.module_depth,
278 export.children,
279 export.min_code,
280 export.max_rows,
281 );
282
283 Ok(build_export_receipt(&paths, &scan_opts, export, data))
284}
285
286pub fn diff_workflow(settings: &DiffSettings) -> Result<DiffReceipt> {
311 let from_report = load_lang_report(&settings.from)?;
313
314 let to_report = load_lang_report(&settings.to)?;
316
317 let rows = tokmd_format::compute_diff_rows(&from_report, &to_report);
319 let totals = tokmd_format::compute_diff_totals(&rows);
320
321 Ok(tokmd_format::create_diff_receipt(
322 &settings.from,
323 &settings.to,
324 rows,
325 totals,
326 ))
327}
328
329#[cfg(feature = "analysis")]
348pub fn analyze_workflow(
349 scan: &ScanSettings,
350 analyze: &settings::AnalyzeSettings,
351) -> Result<tokmd_analysis_types::AnalysisReceipt> {
352 let export_receipt = export_workflow(scan, &ExportSettings::default())?;
353 let root = derive_analysis_root(scan)
354 .or_else(|| std::env::current_dir().ok())
355 .unwrap_or_else(|| PathBuf::from("."));
356
357 analyze_with_export_receipt(export_receipt, scan.paths.clone(), root, analyze)
358}
359
360#[cfg(feature = "analysis")]
369pub fn analyze_workflow_from_inputs(
370 inputs: &[InMemoryFile],
371 scan_opts: &ScanOptions,
372 analyze: &settings::AnalyzeSettings,
373) -> Result<tokmd_analysis_types::AnalysisReceipt> {
374 let export = ExportSettings::default();
375 let scan_opts = deterministic_in_memory_scan_options(scan_opts);
376 if supports_rootless_in_memory_analyze_preset(&analyze.preset) {
377 let (paths, rows) = collect_pure_in_memory_rows(
378 inputs,
379 &scan_opts,
380 &export.module_roots,
381 export.module_depth,
382 export.children,
383 )?;
384 let data = tokmd_model::create_export_data_from_rows(
385 rows,
386 &export.module_roots,
387 export.module_depth,
388 export.children,
389 export.min_code,
390 export.max_rows,
391 );
392 let logical_inputs: Vec<String> = paths
393 .iter()
394 .map(|path| tokmd_model::normalize_path(path, None))
395 .collect();
396 let export_receipt = build_export_receipt(&paths, &scan_opts, &export, data);
397
398 return analyze_with_export_receipt(
399 export_receipt,
400 logical_inputs,
401 PathBuf::new(),
402 analyze,
403 );
404 }
405
406 let scan = tokmd_scan::scan_in_memory(inputs, &scan_opts)?;
407 let data = collect_materialized_export_data(&scan, &export);
408 let logical_inputs: Vec<String> = scan
409 .logical_paths()
410 .iter()
411 .map(|path| tokmd_model::normalize_path(path, None))
412 .collect();
413 let root = scan.strip_prefix().to_path_buf();
414 let export_receipt = build_export_receipt(scan.logical_paths(), &scan_opts, &export, data);
415
416 analyze_with_export_receipt(export_receipt, logical_inputs, root, analyze)
417}
418
419#[cfg(feature = "analysis")]
420#[doc(hidden)]
421pub fn supports_rootless_in_memory_analyze_preset(preset: &str) -> bool {
422 let preset = preset.trim();
423 preset.eq_ignore_ascii_case("receipt") || preset.eq_ignore_ascii_case("estimate")
424}
425
426#[cfg(feature = "analysis")]
427fn analyze_with_export_receipt(
428 export_receipt: ExportReceipt,
429 inputs: Vec<String>,
430 root: PathBuf,
431 analyze: &settings::AnalyzeSettings,
432) -> Result<tokmd_analysis_types::AnalysisReceipt> {
433 let request = build_analysis_request(analyze)?;
434 let source = AnalysisSource {
435 inputs,
436 export_path: None,
437 base_receipt_path: None,
438 export_schema_version: Some(export_receipt.schema_version),
439 export_generated_at_ms: Some(export_receipt.generated_at_ms),
440 base_signature: None,
441 module_roots: export_receipt.data.module_roots.clone(),
442 module_depth: export_receipt.data.module_depth,
443 children: child_include_mode_to_string(export_receipt.data.children),
444 };
445
446 let ctx = analysis::AnalysisContext {
447 export: export_receipt.data,
448 root,
449 source,
450 };
451
452 analysis::analyze(ctx, request)
453}
454
455#[cfg(feature = "analysis")]
456fn build_analysis_request(
457 analyze: &settings::AnalyzeSettings,
458) -> Result<analysis::AnalysisRequest> {
459 let (preset, preset_meta) = parse_analysis_preset(&analyze.preset)?;
460 let (granularity, granularity_meta) = parse_import_granularity(&analyze.granularity)?;
461 let effort = parse_effort_request(analyze, &preset_meta)?;
462
463 Ok(analysis::AnalysisRequest {
464 preset,
465 args: AnalysisArgsMeta {
466 preset: preset_meta,
467 format: "json".to_string(),
468 window_tokens: analyze.window,
469 git: analyze.git,
470 max_files: analyze.max_files,
471 max_bytes: analyze.max_bytes,
472 max_file_bytes: analyze.max_file_bytes,
473 max_commits: analyze.max_commits,
474 max_commit_files: analyze.max_commit_files,
475 import_granularity: granularity_meta,
476 },
477 limits: analysis::AnalysisLimits {
478 max_files: analyze.max_files,
479 max_bytes: analyze.max_bytes,
480 max_file_bytes: analyze.max_file_bytes,
481 max_commits: analyze.max_commits,
482 max_commit_files: analyze.max_commit_files,
483 },
484 window_tokens: analyze.window,
485 git: analyze.git,
486 import_granularity: granularity,
487 detail_functions: false,
488 near_dup: false,
489 near_dup_threshold: 0.80,
490 near_dup_max_files: 2000,
491 near_dup_scope: analysis::NearDupScope::Module,
492 near_dup_max_pairs: None,
493 near_dup_exclude: Vec::new(),
494 effort,
495 })
496}
497
498#[cfg(feature = "cockpit")]
514pub fn cockpit_workflow(
515 settings: &settings::CockpitSettings,
516) -> Result<tokmd_types::cockpit::CockpitReceipt> {
517 use tokmd_types::cockpit::CockpitReceipt;
518
519 if !tokmd_git::git_available() {
520 anyhow::bail!("git is not available on PATH");
521 }
522
523 let cwd = std::env::current_dir().context("Failed to resolve current directory")?;
524 let repo_root =
525 tokmd_git::repo_root(&cwd).ok_or_else(|| anyhow::anyhow!("not inside a git repository"))?;
526
527 let range_mode = match settings.range_mode.as_str() {
528 "three-dot" | "3dot" => tokmd_git::GitRangeMode::ThreeDot,
529 _ => tokmd_git::GitRangeMode::TwoDot,
530 };
531
532 let resolved_base =
533 tokmd_git::resolve_base_ref(&repo_root, &settings.base).ok_or_else(|| {
534 anyhow::anyhow!(
535 "base ref '{}' not found and no fallback resolved",
536 settings.base
537 )
538 })?;
539
540 let baseline_path = settings.baseline.as_deref();
541
542 let mut receipt: CockpitReceipt = tokmd_cockpit::compute_cockpit(
543 &repo_root,
544 &resolved_base,
545 &settings.head,
546 range_mode,
547 baseline_path.map(std::path::Path::new),
548 )?;
549
550 if let Some(baseline_path) = baseline_path {
552 receipt.trend = Some(tokmd_cockpit::load_and_compute_trend(
553 std::path::Path::new(baseline_path),
554 &receipt,
555 )?);
556 }
557
558 Ok(receipt)
559}
560
561#[cfg(feature = "cockpit")]
562use anyhow::Context as _;
563
564pub fn scan_workflow(
584 global: &GlobalArgs,
585 lang: &LangArgs,
586 redact: Option<RedactMode>,
587) -> Result<LangReceipt> {
588 let scan_opts = ScanOptions::from(global);
590 let languages = tokmd_scan::scan(&lang.paths, &scan_opts)?;
591
592 let report = tokmd_model::create_lang_report(&languages, lang.top, lang.files, lang.children);
595
596 let scan_args = scan_args(&lang.paths, &scan_opts, redact);
599
600 let receipt = LangReceipt {
601 schema_version: SCHEMA_VERSION,
602 generated_at_ms: now_ms(),
603 tool: ToolInfo::current(),
604 mode: "lang".to_string(),
605 status: ScanStatus::Complete,
606 warnings: vec![], scan: scan_args,
608 args: LangArgsMeta {
609 format: format!("{:?}", lang.format), top: lang.top,
611 with_files: lang.files,
612 children: lang.children,
613 },
614 report,
615 };
616
617 Ok(receipt)
618}
619
620fn settings_to_scan_options(scan: &ScanSettings) -> ScanOptions {
626 scan.options.clone()
627}
628
629fn deterministic_in_memory_scan_options(scan_opts: &ScanOptions) -> ScanOptions {
630 let mut effective = scan_opts.clone();
631 effective.config = tokmd_types::ConfigMode::None;
634 effective.hidden = true;
635 effective.excluded.clear();
636 effective
637}
638
639fn collect_pure_in_memory_rows(
640 inputs: &[InMemoryFile],
641 scan_opts: &ScanOptions,
642 module_roots: &[String],
643 module_depth: usize,
644 children: ChildIncludeMode,
645) -> Result<(Vec<PathBuf>, Vec<FileRow>)> {
646 let paths = tokmd_scan::normalize_in_memory_paths(inputs)?;
647 let config = tokmd_scan::config_from_scan_options(scan_opts);
648 let row_inputs: Vec<tokmd_model::InMemoryRowInput<'_>> = paths
649 .iter()
650 .zip(inputs)
651 .map(|(path, input)| {
652 tokmd_model::InMemoryRowInput::new(path.as_path(), input.bytes.as_slice())
653 })
654 .collect();
655 let rows = tokmd_model::collect_in_memory_file_rows(
656 &row_inputs,
657 module_roots,
658 module_depth,
659 children,
660 &config,
661 );
662 Ok((paths, rows))
663}
664
665#[cfg(feature = "analysis")]
666fn collect_materialized_rows(
667 scan: &tokmd_scan::MaterializedScan,
668 module_roots: &[String],
669 module_depth: usize,
670 children: ChildIncludeMode,
671) -> Vec<FileRow> {
672 tokmd_model::collect_file_rows(
673 scan.languages(),
674 module_roots,
675 module_depth,
676 children,
677 Some(scan.strip_prefix()),
678 )
679}
680
681fn strip_virtual_export_prefix(
682 rows: Vec<FileRow>,
683 strip_prefix: &str,
684 module_roots: &[String],
685 module_depth: usize,
686) -> Vec<FileRow> {
687 rows.into_iter()
688 .map(|mut row| {
689 let normalized =
690 tokmd_model::normalize_path(Path::new(&row.path), Some(Path::new(strip_prefix)));
691 row.path = normalized.clone();
692 row.module = tokmd_model::module_key(&normalized, module_roots, module_depth);
693 row
694 })
695 .collect()
696}
697
698#[cfg(feature = "analysis")]
699fn collect_materialized_export_data(
700 scan: &tokmd_scan::MaterializedScan,
701 export: &ExportSettings,
702) -> ExportData {
703 let mut rows = collect_materialized_rows(
704 scan,
705 &export.module_roots,
706 export.module_depth,
707 export.children,
708 );
709
710 if let Some(strip_prefix) = export.strip_prefix.as_deref() {
711 rows = strip_virtual_export_prefix(
712 rows,
713 strip_prefix,
714 &export.module_roots,
715 export.module_depth,
716 );
717 }
718
719 tokmd_model::create_export_data_from_rows(
720 rows,
721 &export.module_roots,
722 export.module_depth,
723 export.children,
724 export.min_code,
725 export.max_rows,
726 )
727}
728
729fn build_lang_receipt(
730 paths: &[PathBuf],
731 scan_opts: &ScanOptions,
732 lang: &LangSettings,
733 report: LangReport,
734) -> LangReceipt {
735 LangReceipt {
736 schema_version: SCHEMA_VERSION,
737 generated_at_ms: now_ms(),
738 tool: ToolInfo::current(),
739 mode: "lang".to_string(),
740 status: ScanStatus::Complete,
741 warnings: vec![],
742 scan: scan_args(paths, scan_opts, lang.redact),
743 args: LangArgsMeta {
744 format: "json".to_string(),
745 top: lang.top,
746 with_files: lang.files,
747 children: lang.children,
748 },
749 report,
750 }
751}
752
753fn build_module_receipt(
754 paths: &[PathBuf],
755 scan_opts: &ScanOptions,
756 module: &ModuleSettings,
757 report: ModuleReport,
758) -> ModuleReceipt {
759 ModuleReceipt {
760 schema_version: SCHEMA_VERSION,
761 generated_at_ms: now_ms(),
762 tool: ToolInfo::current(),
763 mode: "module".to_string(),
764 status: ScanStatus::Complete,
765 warnings: vec![],
766 scan: scan_args(paths, scan_opts, module.redact),
767 args: ModuleArgsMeta {
768 format: "json".to_string(),
769 top: module.top,
770 module_roots: module.module_roots.clone(),
771 module_depth: module.module_depth,
772 children: module.children,
773 },
774 report,
775 }
776}
777
778fn build_export_receipt(
779 paths: &[PathBuf],
780 scan_opts: &ScanOptions,
781 export: &ExportSettings,
782 data: ExportData,
783) -> ExportReceipt {
784 let should_redact = export.redact == RedactMode::Paths || export.redact == RedactMode::All;
785 let strip_prefix_redacted = should_redact && export.strip_prefix.is_some();
786
787 ExportReceipt {
788 schema_version: SCHEMA_VERSION,
789 generated_at_ms: now_ms(),
790 tool: ToolInfo::current(),
791 mode: "export".to_string(),
792 status: ScanStatus::Complete,
793 warnings: vec![],
794 scan: scan_args(paths, scan_opts, Some(export.redact)),
795 args: ExportArgsMeta {
796 format: export.format,
797 module_roots: export.module_roots.clone(),
798 module_depth: export.module_depth,
799 children: export.children,
800 min_code: export.min_code,
801 max_rows: export.max_rows,
802 redact: export.redact,
803 strip_prefix: if should_redact {
804 export
805 .strip_prefix
806 .as_ref()
807 .map(|p| tokmd_format::redact_path(p))
808 } else {
809 export.strip_prefix.clone()
810 },
811 strip_prefix_redacted,
812 },
813 data: redact_export_data(data, export.redact),
814 }
815}
816
817#[cfg(feature = "analysis")]
818fn parse_analysis_preset(value: &str) -> Result<(analysis::AnalysisPreset, String)> {
819 let normalized = value.trim().to_ascii_lowercase();
820 let preset = match normalized.as_str() {
821 "receipt" => analysis::AnalysisPreset::Receipt,
822 "estimate" => analysis::AnalysisPreset::Estimate,
823 "health" => analysis::AnalysisPreset::Health,
824 "risk" => analysis::AnalysisPreset::Risk,
825 "supply" => analysis::AnalysisPreset::Supply,
826 "architecture" => analysis::AnalysisPreset::Architecture,
827 "topics" => analysis::AnalysisPreset::Topics,
828 "security" => analysis::AnalysisPreset::Security,
829 "identity" => analysis::AnalysisPreset::Identity,
830 "git" => analysis::AnalysisPreset::Git,
831 "deep" => analysis::AnalysisPreset::Deep,
832 "fun" => analysis::AnalysisPreset::Fun,
833 _ => {
834 return Err(error::TokmdError::invalid_field(
835 "preset",
836 "'receipt', 'estimate', 'health', 'risk', 'supply', 'architecture', 'topics', 'security', 'identity', 'git', 'deep', or 'fun'",
837 )
838 .into());
839 }
840 };
841 Ok((preset, normalized))
842}
843
844#[cfg(feature = "analysis")]
845fn parse_import_granularity(value: &str) -> Result<(analysis::ImportGranularity, String)> {
846 let normalized = value.trim().to_ascii_lowercase();
847 let granularity = match normalized.as_str() {
848 "module" => analysis::ImportGranularity::Module,
849 "file" => analysis::ImportGranularity::File,
850 _ => {
851 return Err(
852 error::TokmdError::invalid_field("granularity", "'module' or 'file'").into(),
853 );
854 }
855 };
856 Ok((granularity, normalized))
857}
858
859#[cfg(feature = "analysis")]
860fn parse_effort_request(
861 analyze: &settings::AnalyzeSettings,
862 preset: &str,
863) -> Result<Option<analysis::EffortRequest>> {
864 let request = analysis::EffortRequest::default();
865 let requested = preset == "estimate"
866 || analyze.effort_model.is_some()
867 || analyze.effort_layer.is_some()
868 || analyze.effort_base_ref.is_some()
869 || analyze.effort_head_ref.is_some()
870 || analyze.effort_monte_carlo.unwrap_or(false)
871 || analyze.effort_mc_iterations.is_some()
872 || analyze.effort_mc_seed.is_some();
873
874 if !requested {
875 return Ok(None);
876 }
877
878 if (analyze.effort_base_ref.is_some() && analyze.effort_head_ref.is_none())
879 || (analyze.effort_base_ref.is_none() && analyze.effort_head_ref.is_some())
880 {
881 return Err(error::TokmdError::invalid_field(
882 "effort_base_ref/effort_head_ref",
883 "both effort_base_ref and effort_head_ref must be provided together",
884 )
885 .into());
886 }
887
888 let model = analyze
889 .effort_model
890 .as_deref()
891 .map(parse_effort_model)
892 .transpose()?
893 .unwrap_or(request.model);
894 let layer = analyze
895 .effort_layer
896 .as_deref()
897 .map(parse_effort_layer)
898 .transpose()?
899 .unwrap_or(request.layer);
900
901 let monte_carlo = analyze.effort_monte_carlo.unwrap_or(false);
902
903 let mc_iterations = analyze
904 .effort_mc_iterations
905 .unwrap_or(request.mc_iterations);
906
907 if mc_iterations == 0 {
908 return Err(error::TokmdError::invalid_field(
909 "effort_mc_iterations",
910 "must be greater than 0",
911 )
912 .into());
913 }
914
915 Ok(Some(analysis::EffortRequest {
916 model,
917 layer,
918 base_ref: analyze.effort_base_ref.clone(),
919 head_ref: analyze.effort_head_ref.clone(),
920 monte_carlo,
921 mc_iterations,
922 mc_seed: analyze.effort_mc_seed,
923 }))
924}
925
926#[cfg(feature = "analysis")]
927fn parse_effort_model(value: &str) -> Result<analysis::EffortModelKind> {
928 match value.trim().to_ascii_lowercase().as_str() {
929 "cocomo81-basic" => Ok(analysis::EffortModelKind::Cocomo81Basic),
930 "cocomo2-early" | "ensemble" => Err(error::TokmdError::invalid_field(
931 "effort_model",
932 "only 'cocomo81-basic' is currently supported",
933 )
934 .into()),
935 _ => Err(error::TokmdError::invalid_field("effort_model", "'cocomo81-basic'").into()),
936 }
937}
938
939#[cfg(feature = "analysis")]
940fn parse_effort_layer(value: &str) -> Result<analysis::EffortLayer> {
941 match value.trim().to_ascii_lowercase().as_str() {
942 "headline" => Ok(analysis::EffortLayer::Headline),
943 "why" => Ok(analysis::EffortLayer::Why),
944 "full" => Ok(analysis::EffortLayer::Full),
945 _ => Err(
946 error::TokmdError::invalid_field("effort_layer", "'headline', 'why', or 'full'").into(),
947 ),
948 }
949}
950
951#[cfg(feature = "analysis")]
952fn child_include_mode_to_string(mode: tokmd_types::ChildIncludeMode) -> String {
953 match mode {
954 tokmd_types::ChildIncludeMode::Separate => "separate".to_string(),
955 tokmd_types::ChildIncludeMode::ParentsOnly => "parents-only".to_string(),
956 }
957}
958
959#[cfg(feature = "analysis")]
960fn derive_analysis_root(scan: &ScanSettings) -> Option<PathBuf> {
961 let first = scan.paths.first()?;
962 if first.trim().is_empty() {
963 return None;
964 }
965
966 let candidate = PathBuf::from(first);
967 let absolute = if candidate.is_absolute() {
968 candidate
969 } else {
970 std::env::current_dir().ok()?.join(candidate)
971 };
972
973 if absolute.is_dir() {
974 Some(absolute)
975 } else {
976 absolute.parent().map(|p| p.to_path_buf())
977 }
978}
979
980fn load_lang_report(source: &str) -> Result<LangReport> {
982 let path = std::path::Path::new(source);
983
984 if path.exists() && path.is_file() {
985 let content = std::fs::read_to_string(path)?;
987 if let Ok(receipt) = serde_json::from_str::<LangReceipt>(&content) {
988 return Ok(receipt.report);
989 }
990 }
992
993 let scan = ScanSettings::for_paths(vec![source.to_string()]);
995 let lang = LangSettings::default();
996 let receipt = lang_workflow(&scan, &lang)?;
997 Ok(receipt.report)
998}
999
1000fn redact_export_data(data: ExportData, mode: RedactMode) -> ExportData {
1002 if mode == RedactMode::None {
1003 return data;
1004 }
1005
1006 let rows = data
1007 .rows
1008 .into_iter()
1009 .map(|mut row| {
1010 if mode == RedactMode::Paths || mode == RedactMode::All {
1011 row.path = tokmd_format::redact_path(&row.path);
1012 }
1013 if mode == RedactMode::All {
1014 row.module = tokmd_format::short_hash(&row.module);
1015 }
1016 row
1017 })
1018 .collect();
1019
1020 ExportData {
1021 rows,
1022 module_roots: data.module_roots,
1023 module_depth: data.module_depth,
1024 children: data.children,
1025 }
1026}
1027
1028pub const CORE_SCHEMA_VERSION: u32 = SCHEMA_VERSION;
1034
1035pub fn version() -> &'static str {
1037 env!("CARGO_PKG_VERSION")
1038}
1039
1040#[cfg(test)]
1041mod tests {
1042 use super::*;
1043 #[cfg(feature = "analysis")]
1044 use crate::settings::AnalyzeSettings;
1045 #[cfg(feature = "analysis")]
1046 use std::fs;
1047 #[cfg(feature = "analysis")]
1048 use std::path::{Path, PathBuf};
1049 #[cfg(feature = "analysis")]
1050 use std::time::{SystemTime, UNIX_EPOCH};
1051
1052 #[cfg(feature = "analysis")]
1053 #[derive(Debug)]
1054 struct TempDirGuard(PathBuf);
1055
1056 #[cfg(feature = "analysis")]
1057 impl Drop for TempDirGuard {
1058 fn drop(&mut self) {
1059 let _ = fs::remove_dir_all(&self.0);
1060 }
1061 }
1062
1063 #[test]
1064 fn version_not_empty() {
1065 assert!(!version().is_empty());
1066 }
1067
1068 #[test]
1069 fn settings_to_scan_options_preserves_values() {
1070 let scan = ScanSettings {
1071 paths: vec!["src".to_string()],
1072 options: ScanOptions {
1073 excluded: vec!["target".to_string()],
1074 hidden: true,
1075 no_ignore: true,
1076 ..Default::default()
1077 },
1078 };
1079
1080 let opts = settings_to_scan_options(&scan);
1081 assert_eq!(opts.excluded, vec!["target"]);
1082 assert!(opts.hidden);
1083 assert!(opts.no_ignore);
1084 }
1085
1086 #[test]
1087 fn scan_settings_current_dir() {
1088 let settings = ScanSettings::current_dir();
1089 assert_eq!(settings.paths, vec!["."]);
1090 }
1091
1092 #[test]
1093 fn scan_settings_for_paths() {
1094 let settings = ScanSettings::for_paths(vec!["src".to_string(), "lib".to_string()]);
1095 assert_eq!(settings.paths, vec!["src", "lib"]);
1096 }
1097
1098 #[cfg(feature = "analysis")]
1099 #[test]
1100 fn effort_request_defaults_to_estimate_preset() {
1101 let analyze = AnalyzeSettings {
1102 preset: "estimate".to_string(),
1103 ..Default::default()
1104 };
1105 let req = parse_effort_request(&analyze, "estimate").expect("parse effort request");
1106 let req = req.expect("estimate should imply effort request");
1107 assert_eq!(
1108 req.model.as_str(),
1109 analysis::EffortModelKind::Cocomo81Basic.as_str()
1110 );
1111 assert_eq!(req.layer.as_str(), analysis::EffortLayer::Full.as_str());
1112 }
1113
1114 #[cfg(feature = "analysis")]
1115 #[test]
1116 fn effort_request_not_implied_for_non_estimate_without_flags() {
1117 let analyze = AnalyzeSettings {
1118 preset: "receipt".to_string(),
1119 ..Default::default()
1120 };
1121 let req = parse_effort_request(&analyze, "receipt").expect("parse effort request");
1122 assert!(req.is_none());
1123 }
1124
1125 #[cfg(feature = "analysis")]
1126 #[test]
1127 fn effort_request_rejects_unsupported_model() {
1128 let analyze = AnalyzeSettings {
1129 preset: "estimate".to_string(),
1130 effort_model: Some("cocomo2-early".to_string()),
1131 ..Default::default()
1132 };
1133 let err =
1134 parse_effort_request(&analyze, "estimate").expect_err("unsupported model should fail");
1135 assert!(err.to_string().contains("only 'cocomo81-basic'"));
1136 }
1137
1138 #[cfg(feature = "analysis")]
1139 fn mk_temp_dir(prefix: &str) -> PathBuf {
1140 let timestamp = SystemTime::now()
1141 .duration_since(UNIX_EPOCH)
1142 .unwrap_or_default()
1143 .as_nanos();
1144 let mut root = std::env::temp_dir();
1145 root.push(format!("{prefix}-{timestamp}-{}", std::process::id()));
1146 root
1147 }
1148
1149 #[cfg(feature = "analysis")]
1150 fn write_file(path: &Path, contents: &str) {
1151 if let Some(parent) = path.parent() {
1152 fs::create_dir_all(parent).unwrap();
1153 }
1154 fs::write(path, contents).unwrap();
1155 }
1156
1157 #[cfg(feature = "analysis")]
1158 #[test]
1159 fn analyze_workflow_estimate_preset_populates_effort_and_size_basis_breakdown() {
1160 let root = mk_temp_dir("tokmd-core-estimate-preset");
1161 let _guard = TempDirGuard(root.clone());
1162 write_file(&root.join("src/main.rs"), "fn main() {}\n");
1163 write_file(
1164 &root.join("target/generated/bundle.min.js"),
1165 "console.log(1);\n",
1166 );
1167 write_file(
1168 &root.join("vendor/lib/external.rs"),
1169 "pub fn external() {}\n",
1170 );
1171
1172 let scan = settings::ScanSettings::for_paths(vec![root.display().to_string()]);
1173 let analyze = AnalyzeSettings {
1174 preset: "estimate".to_string(),
1175 ..Default::default()
1176 };
1177
1178 let receipt = analyze_workflow(&scan, &analyze).expect("estimate analyze failed");
1179 let effort = receipt
1180 .effort
1181 .as_ref()
1182 .expect("estimate preset should produce effort");
1183
1184 assert!(effort.results.effort_pm_p50 > 0.0);
1185 assert_eq!(
1186 effort.size_basis.total_lines,
1187 effort.size_basis.authored_lines
1188 + effort.size_basis.generated_lines
1189 + effort.size_basis.vendored_lines
1190 );
1191 assert!(effort.size_basis.authored_lines > 0);
1192 assert!(
1193 effort.size_basis.generated_lines + effort.size_basis.vendored_lines > 0,
1194 "expected deterministic generated or vendored lines"
1195 );
1196 }
1197}
1198
1199#[cfg(doctest)]
1200#[doc = include_str!("../README.md")]
1201pub mod readme_doctests {}