Skip to main content

tokmd_types/
lib.rs

1//! # tokmd-types
2//!
3//! **Tier 0 (Core Types)**
4//!
5//! This crate defines the core data structures and contracts for `tokmd`.
6//! It contains only data types, Serde definitions, and `schema_version`.
7//!
8//! ## Stability Policy
9//!
10//! **JSON-first stability**: The primary contract is the JSON schema, not Rust struct literals.
11//!
12//! - **JSON consumers**: Stable. New fields have sensible defaults; removed/renamed fields
13//!   bump `SCHEMA_VERSION`.
14//! - **Rust library consumers**: Semi-stable. New fields may be added in minor versions,
15//!   which can break struct literal construction. Use `Default` + field mutation or
16//!   `..Default::default()` patterns for forward compatibility.
17//!
18//! If you need strict Rust API stability, pin to an exact version.
19//!
20//! ## What belongs here
21//! * Pure data structs (Receipts, Rows, Reports)
22//! * Serialization/Deserialization logic
23//! * Stability markers (SCHEMA_VERSION)
24//!
25//! ## What does NOT belong here
26//! * File I/O
27//! * CLI argument parsing
28//! * Complex business logic
29//! * Tokei dependencies
30
31pub mod cockpit;
32
33use std::path::PathBuf;
34
35use serde::{Deserialize, Serialize};
36
37/// The current schema version for core receipt types (`lang`, `module`, `export`, `diff`, `run`).
38///
39/// # Examples
40///
41/// ```
42/// assert_eq!(tokmd_types::SCHEMA_VERSION, 2);
43/// ```
44pub const SCHEMA_VERSION: u32 = 2;
45
46/// A small totals struct shared by summary outputs.
47///
48/// # Examples
49///
50/// ```
51/// use tokmd_types::Totals;
52///
53/// let totals = Totals {
54///     code: 1000,
55///     lines: 1500,
56///     files: 10,
57///     bytes: 40000,
58///     tokens: 10000,
59///     avg_lines: 150,
60/// };
61/// assert_eq!(totals.code, 1000);
62/// ```
63#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
64pub struct Totals {
65    pub code: usize,
66    pub lines: usize,
67    pub files: usize,
68    pub bytes: usize,
69    pub tokens: usize,
70    pub avg_lines: usize,
71}
72
73/// A single language row in the lang summary.
74///
75/// # Examples
76///
77/// ```
78/// use tokmd_types::LangRow;
79///
80/// let row = LangRow {
81///     lang: "Rust".to_string(),
82///     code: 5000,
83///     lines: 6500,
84///     files: 42,
85///     bytes: 180_000,
86///     tokens: 45_000,
87///     avg_lines: 154,
88/// };
89/// assert_eq!(row.lang, "Rust");
90/// assert_eq!(row.files, 42);
91/// ```
92#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
93pub struct LangRow {
94    pub lang: String,
95    pub code: usize,
96    pub lines: usize,
97    pub files: usize,
98    pub bytes: usize,
99    pub tokens: usize,
100    pub avg_lines: usize,
101}
102
103#[derive(Debug, Clone, Serialize, Deserialize)]
104pub struct LangReport {
105    pub rows: Vec<LangRow>,
106    pub total: Totals,
107    pub with_files: bool,
108    pub children: ChildrenMode,
109    pub top: usize,
110}
111
112/// A single module row in the module breakdown.
113///
114/// # Examples
115///
116/// ```
117/// use tokmd_types::ModuleRow;
118///
119/// let row = ModuleRow {
120///     module: "crates/tokmd-types".to_string(),
121///     code: 800,
122///     lines: 1100,
123///     files: 3,
124///     bytes: 32_000,
125///     tokens: 8_000,
126///     avg_lines: 366,
127/// };
128/// assert_eq!(row.module, "crates/tokmd-types");
129/// assert_eq!(row.code, 800);
130/// ```
131#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
132pub struct ModuleRow {
133    pub module: String,
134    pub code: usize,
135    pub lines: usize,
136    pub files: usize,
137    pub bytes: usize,
138    pub tokens: usize,
139    pub avg_lines: usize,
140}
141
142#[derive(Debug, Clone, Serialize, Deserialize)]
143pub struct ModuleReport {
144    pub rows: Vec<ModuleRow>,
145    pub total: Totals,
146    pub module_roots: Vec<String>,
147    pub module_depth: usize,
148    pub children: ChildIncludeMode,
149    pub top: usize,
150}
151
152#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
153#[serde(rename_all = "snake_case")]
154pub enum FileKind {
155    Parent,
156    Child,
157}
158
159/// A single file row in the export inventory.
160///
161/// # Examples
162///
163/// ```
164/// use tokmd_types::{FileRow, FileKind};
165///
166/// let row = FileRow {
167///     path: "src/main.rs".to_string(),
168///     module: "src".to_string(),
169///     lang: "Rust".to_string(),
170///     kind: FileKind::Parent,
171///     code: 120,
172///     comments: 30,
173///     blanks: 20,
174///     lines: 170,
175///     bytes: 4_800,
176///     tokens: 1_200,
177/// };
178/// assert_eq!(row.path, "src/main.rs");
179/// assert_eq!(row.kind, FileKind::Parent);
180/// assert_eq!(row.lines, row.code + row.comments + row.blanks);
181/// ```
182#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
183pub struct FileRow {
184    pub path: String,
185    pub module: String,
186    pub lang: String,
187    pub kind: FileKind,
188    pub code: usize,
189    pub comments: usize,
190    pub blanks: usize,
191    pub lines: usize,
192    pub bytes: usize,
193    pub tokens: usize,
194}
195
196#[derive(Debug, Clone, Serialize, Deserialize)]
197pub struct ExportData {
198    pub rows: Vec<FileRow>,
199    pub module_roots: Vec<String>,
200    pub module_depth: usize,
201    pub children: ChildIncludeMode,
202}
203
204#[derive(Debug, Clone, Serialize, Deserialize)]
205pub struct RunReceipt {
206    pub schema_version: u32,
207    pub generated_at_ms: u128,
208    pub lang_file: String,
209    pub module_file: String,
210    pub export_file: String,
211    // We could store the scan args here too
212}
213
214#[derive(Debug, Clone, Serialize, Deserialize)]
215#[serde(rename_all = "snake_case")]
216pub enum ScanStatus {
217    Complete,
218    Partial,
219}
220
221/// Classification of a commit's intent, derived from subject line.
222///
223/// Lives in `tokmd-types` (Tier 0) so that both `tokmd-git` (Tier 2) and
224/// `tokmd-analysis-types` (Tier 0) can reference it without creating
225/// upward dependency edges.
226#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
227#[serde(rename_all = "snake_case")]
228pub enum CommitIntentKind {
229    Feat,
230    Fix,
231    Refactor,
232    Docs,
233    Test,
234    Chore,
235    Ci,
236    Build,
237    Perf,
238    Style,
239    Revert,
240    Other,
241}
242
243#[derive(Debug, Clone, Serialize, Deserialize, Default)]
244pub struct ToolInfo {
245    pub name: String,
246    pub version: String,
247}
248
249impl ToolInfo {
250    pub fn current() -> Self {
251        Self {
252            name: "tokmd".to_string(),
253            version: env!("CARGO_PKG_VERSION").to_string(),
254        }
255    }
256}
257
258#[derive(Debug, Clone, Serialize, Deserialize)]
259pub struct ScanArgs {
260    pub paths: Vec<String>,
261    pub excluded: Vec<String>,
262    /// True if `excluded` patterns were redacted (replaced with hashes).
263    #[serde(default, skip_serializing_if = "std::ops::Not::not")]
264    pub excluded_redacted: bool,
265    pub config: ConfigMode,
266    pub hidden: bool,
267    pub no_ignore: bool,
268    pub no_ignore_parent: bool,
269    pub no_ignore_dot: bool,
270    pub no_ignore_vcs: bool,
271    pub treat_doc_strings_as_comments: bool,
272}
273
274#[derive(Debug, Clone, Serialize, Deserialize)]
275pub struct LangArgsMeta {
276    pub format: String,
277    pub top: usize,
278    pub with_files: bool,
279    pub children: ChildrenMode,
280}
281
282#[derive(Debug, Clone, Serialize, Deserialize)]
283pub struct LangReceipt {
284    pub schema_version: u32,
285    pub generated_at_ms: u128,
286    pub tool: ToolInfo,
287    pub mode: String, // "lang"
288    pub status: ScanStatus,
289    pub warnings: Vec<String>,
290    pub scan: ScanArgs,
291    pub args: LangArgsMeta,
292    #[serde(flatten)]
293    pub report: LangReport,
294}
295
296#[derive(Debug, Clone, Serialize, Deserialize)]
297pub struct ModuleArgsMeta {
298    pub format: String,
299    pub module_roots: Vec<String>,
300    pub module_depth: usize,
301    pub children: ChildIncludeMode,
302    pub top: usize,
303}
304
305#[derive(Debug, Clone, Serialize, Deserialize)]
306pub struct ModuleReceipt {
307    pub schema_version: u32,
308    pub generated_at_ms: u128,
309    pub tool: ToolInfo,
310    pub mode: String, // "module"
311    pub status: ScanStatus,
312    pub warnings: Vec<String>,
313    pub scan: ScanArgs,
314    pub args: ModuleArgsMeta,
315    #[serde(flatten)]
316    pub report: ModuleReport,
317}
318
319#[derive(Debug, Clone, Serialize, Deserialize)]
320pub struct ExportArgsMeta {
321    pub format: ExportFormat,
322    pub module_roots: Vec<String>,
323    pub module_depth: usize,
324    pub children: ChildIncludeMode,
325    pub min_code: usize,
326    pub max_rows: usize,
327    pub redact: RedactMode,
328    pub strip_prefix: Option<String>,
329    /// True if `strip_prefix` was redacted (replaced with a hash).
330    #[serde(default, skip_serializing_if = "std::ops::Not::not")]
331    pub strip_prefix_redacted: bool,
332}
333
334#[derive(Debug, Clone, Serialize, Deserialize)]
335pub struct ExportReceipt {
336    pub schema_version: u32,
337    pub generated_at_ms: u128,
338    pub tool: ToolInfo,
339    pub mode: String, // "export"
340    pub status: ScanStatus,
341    pub warnings: Vec<String>,
342    pub scan: ScanArgs,
343    pub args: ExportArgsMeta,
344    #[serde(flatten)]
345    pub data: ExportData,
346}
347
348#[derive(Debug, Clone, Serialize, Deserialize)]
349pub struct LangArgs {
350    pub paths: Vec<PathBuf>,
351    pub format: TableFormat,
352    pub top: usize,
353    pub files: bool,
354    pub children: ChildrenMode,
355}
356
357#[derive(Debug, Clone, Serialize, Deserialize)]
358pub struct ModuleArgs {
359    pub paths: Vec<PathBuf>,
360    pub format: TableFormat,
361    pub top: usize,
362    pub module_roots: Vec<String>,
363    pub module_depth: usize,
364    pub children: ChildIncludeMode,
365}
366
367#[derive(Debug, Clone, Serialize, Deserialize)]
368pub struct ExportArgs {
369    pub paths: Vec<PathBuf>,
370    pub format: ExportFormat,
371    pub output: Option<PathBuf>,
372    pub module_roots: Vec<String>,
373    pub module_depth: usize,
374    pub children: ChildIncludeMode,
375    pub min_code: usize,
376    pub max_rows: usize,
377    pub redact: RedactMode,
378    pub meta: bool,
379    pub strip_prefix: Option<PathBuf>,
380}
381
382#[derive(Debug, Clone, Serialize, Deserialize)]
383pub struct ContextReceipt {
384    pub schema_version: u32,
385    pub generated_at_ms: u128,
386    pub tool: ToolInfo,
387    pub mode: String,
388    pub budget_tokens: usize,
389    pub used_tokens: usize,
390    pub utilization_pct: f64,
391    pub strategy: String,
392    pub rank_by: String,
393    pub file_count: usize,
394    pub files: Vec<ContextFileRow>,
395    /// Effective ranking metric (may differ from rank_by if fallback occurred).
396    #[serde(default, skip_serializing_if = "Option::is_none")]
397    pub rank_by_effective: Option<String>,
398    /// Reason for fallback if rank_by_effective differs from rank_by.
399    #[serde(default, skip_serializing_if = "Option::is_none")]
400    pub fallback_reason: Option<String>,
401    /// Files excluded by per-file cap / classification policy.
402    #[serde(default, skip_serializing_if = "Vec::is_empty")]
403    pub excluded_by_policy: Vec<PolicyExcludedFile>,
404    /// Token estimation envelope with uncertainty bounds.
405    #[serde(default, skip_serializing_if = "Option::is_none")]
406    pub token_estimation: Option<TokenEstimationMeta>,
407    /// Post-bundle audit comparing actual bytes to estimates.
408    #[serde(default, skip_serializing_if = "Option::is_none")]
409    pub bundle_audit: Option<TokenAudit>,
410}
411
412#[derive(Debug, Clone, Serialize, Deserialize)]
413pub struct ContextFileRow {
414    pub path: String,
415    pub module: String,
416    pub lang: String,
417    pub tokens: usize,
418    pub code: usize,
419    pub lines: usize,
420    pub bytes: usize,
421    pub value: usize,
422    #[serde(default, skip_serializing_if = "String::is_empty")]
423    pub rank_reason: String,
424    /// Inclusion policy applied to this file.
425    #[serde(default, skip_serializing_if = "is_default_policy")]
426    pub policy: InclusionPolicy,
427    /// Effective token count when policy != Full (None means same as `tokens`).
428    #[serde(default, skip_serializing_if = "Option::is_none")]
429    pub effective_tokens: Option<usize>,
430    /// Reason for the applied policy.
431    #[serde(default, skip_serializing_if = "Option::is_none")]
432    pub policy_reason: Option<String>,
433    /// File classifications detected by hygiene analysis.
434    #[serde(default, skip_serializing_if = "Vec::is_empty")]
435    pub classifications: Vec<FileClassification>,
436}
437
438// -----------------------
439// Diff types
440// -----------------------
441
442/// A row in the diff output showing changes for a single language.
443///
444/// # Examples
445///
446/// ```
447/// use tokmd_types::DiffRow;
448///
449/// let row = DiffRow {
450///     lang: "Rust".to_string(),
451///     old_code: 1000, new_code: 1200, delta_code: 200,
452///     old_lines: 1500, new_lines: 1800, delta_lines: 300,
453///     old_files: 10,   new_files: 12,   delta_files: 2,
454///     old_bytes: 40000, new_bytes: 48000, delta_bytes: 8000,
455///     old_tokens: 10000, new_tokens: 12000, delta_tokens: 2000,
456/// };
457/// assert_eq!(row.delta_code, (row.new_code as i64) - (row.old_code as i64));
458/// ```
459#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
460pub struct DiffRow {
461    pub lang: String,
462    pub old_code: usize,
463    pub new_code: usize,
464    pub delta_code: i64,
465    pub old_lines: usize,
466    pub new_lines: usize,
467    pub delta_lines: i64,
468    pub old_files: usize,
469    pub new_files: usize,
470    pub delta_files: i64,
471    pub old_bytes: usize,
472    pub new_bytes: usize,
473    pub delta_bytes: i64,
474    pub old_tokens: usize,
475    pub new_tokens: usize,
476    pub delta_tokens: i64,
477}
478
479/// Aggregate totals for the diff.
480///
481/// # Examples
482///
483/// ```
484/// use tokmd_types::DiffTotals;
485///
486/// // Default is all zeros
487/// let totals = DiffTotals::default();
488/// assert_eq!(totals.delta_code, 0);
489/// assert_eq!(totals.delta_files, 0);
490/// ```
491#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
492pub struct DiffTotals {
493    pub old_code: usize,
494    pub new_code: usize,
495    pub delta_code: i64,
496    pub old_lines: usize,
497    pub new_lines: usize,
498    pub delta_lines: i64,
499    pub old_files: usize,
500    pub new_files: usize,
501    pub delta_files: i64,
502    pub old_bytes: usize,
503    pub new_bytes: usize,
504    pub delta_bytes: i64,
505    pub old_tokens: usize,
506    pub new_tokens: usize,
507    pub delta_tokens: i64,
508}
509
510/// JSON receipt for diff output with envelope metadata.
511#[derive(Debug, Clone, Serialize, Deserialize)]
512pub struct DiffReceipt {
513    pub schema_version: u32,
514    pub generated_at_ms: u128,
515    pub tool: ToolInfo,
516    pub mode: String,
517    pub from_source: String,
518    pub to_source: String,
519    pub diff_rows: Vec<DiffRow>,
520    pub totals: DiffTotals,
521}
522
523// -----------------------------------------------------------------------------
524// Enums shared with CLI (moved from tokmd-config)
525// -----------------------------------------------------------------------------
526
527#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
528#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
529#[serde(rename_all = "kebab-case")]
530pub enum TableFormat {
531    /// Markdown table (great for pasting into ChatGPT).
532    Md,
533    /// Tab-separated values (good for piping to other tools).
534    Tsv,
535    /// JSON (compact).
536    Json,
537}
538
539#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
540#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
541#[serde(rename_all = "kebab-case")]
542pub enum ExportFormat {
543    /// CSV with a header row.
544    Csv,
545    /// One JSON object per line.
546    Jsonl,
547    /// A single JSON array.
548    Json,
549    /// CycloneDX 1.6 JSON SBOM format.
550    Cyclonedx,
551}
552
553#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
554#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
555#[serde(rename_all = "kebab-case")]
556pub enum ConfigMode {
557    /// Read `tokei.toml` / `.tokeirc` if present.
558    #[default]
559    Auto,
560    /// Ignore config files.
561    None,
562}
563
564#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
565#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
566#[serde(rename_all = "kebab-case")]
567pub enum ChildrenMode {
568    /// Merge embedded content into the parent language totals.
569    Collapse,
570    /// Show embedded languages as separate "(embedded)" rows.
571    Separate,
572}
573
574#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
575#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
576#[serde(rename_all = "kebab-case")]
577pub enum ChildIncludeMode {
578    /// Include embedded languages as separate contributions.
579    Separate,
580    /// Ignore embedded languages.
581    ParentsOnly,
582}
583
584#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
585#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
586#[serde(rename_all = "kebab-case")]
587pub enum RedactMode {
588    /// Do not redact.
589    None,
590    /// Redact file paths.
591    Paths,
592    /// Redact file paths and module names.
593    All,
594}
595
596#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
597#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
598#[serde(rename_all = "kebab-case")]
599pub enum AnalysisFormat {
600    Md,
601    Json,
602    Jsonld,
603    Xml,
604    Svg,
605    Mermaid,
606    Obj,
607    Midi,
608    Tree,
609    Html,
610}
611
612/// Log record for context command JSONL append mode.
613/// Contains metadata only (not file contents) for lightweight logging.
614#[derive(Debug, Clone, Serialize, Deserialize)]
615pub struct ContextLogRecord {
616    pub schema_version: u32,
617    pub generated_at_ms: u128,
618    pub tool: ToolInfo,
619    pub budget_tokens: usize,
620    pub used_tokens: usize,
621    pub utilization_pct: f64,
622    pub strategy: String,
623    pub rank_by: String,
624    pub file_count: usize,
625    pub total_bytes: usize,
626    pub output_destination: String,
627}
628
629// -----------------------
630// Handoff types
631// -----------------------
632
633/// Schema version for handoff receipts.
634///
635/// ```
636/// assert_eq!(tokmd_types::HANDOFF_SCHEMA_VERSION, 5);
637/// ```
638pub const HANDOFF_SCHEMA_VERSION: u32 = 5;
639
640/// Schema version for context bundle manifests.
641///
642/// ```
643/// assert_eq!(tokmd_types::CONTEXT_BUNDLE_SCHEMA_VERSION, 2);
644/// ```
645pub const CONTEXT_BUNDLE_SCHEMA_VERSION: u32 = 2;
646
647/// Schema version for context receipts (separate from SCHEMA_VERSION used by lang/module/export/diff).
648///
649/// ```
650/// assert_eq!(tokmd_types::CONTEXT_SCHEMA_VERSION, 4);
651/// ```
652pub const CONTEXT_SCHEMA_VERSION: u32 = 4;
653
654// -----------------------
655// Token estimation types
656// -----------------------
657
658/// Metadata about how token estimates were produced.
659///
660/// Rails are NOT guaranteed bounds — they are heuristic fences.
661/// Default divisors: est=4.0, low=3.0 (conservative → more tokens),
662/// high=5.0 (optimistic → fewer tokens).
663///
664/// **Invariant**: `tokens_min <= tokens_est <= tokens_max`.
665#[derive(Debug, Clone, Serialize, Deserialize)]
666pub struct TokenEstimationMeta {
667    /// Divisor used for main estimate (default 4.0).
668    pub bytes_per_token_est: f64,
669    /// Conservative divisor — more tokens (default 3.0).
670    pub bytes_per_token_low: f64,
671    /// Optimistic divisor — fewer tokens (default 5.0).
672    pub bytes_per_token_high: f64,
673    /// tokens = source_bytes / bytes_per_token_high (optimistic, fewest tokens).
674    #[serde(alias = "tokens_high")]
675    pub tokens_min: usize,
676    /// tokens = source_bytes / bytes_per_token_est.
677    pub tokens_est: usize,
678    /// tokens = source_bytes / bytes_per_token_low (conservative, most tokens).
679    #[serde(alias = "tokens_low")]
680    pub tokens_max: usize,
681    /// Total source bytes used to compute estimates.
682    pub source_bytes: usize,
683}
684
685impl TokenEstimationMeta {
686    /// Default bytes-per-token divisors.
687    pub const DEFAULT_BPT_EST: f64 = 4.0;
688    pub const DEFAULT_BPT_LOW: f64 = 3.0;
689    pub const DEFAULT_BPT_HIGH: f64 = 5.0;
690
691    /// Create estimation from source byte count using default divisors.
692    ///
693    /// # Examples
694    ///
695    /// ```
696    /// use tokmd_types::TokenEstimationMeta;
697    ///
698    /// let est = TokenEstimationMeta::from_bytes(4000, 4.0);
699    /// assert_eq!(est.tokens_est, 1000);
700    /// assert_eq!(est.source_bytes, 4000);
701    /// // Invariant: tokens_min <= tokens_est <= tokens_max
702    /// assert!(est.tokens_min <= est.tokens_est);
703    /// assert!(est.tokens_est <= est.tokens_max);
704    /// ```
705    pub fn from_bytes(bytes: usize, bpt: f64) -> Self {
706        Self::from_bytes_with_bounds(bytes, bpt, Self::DEFAULT_BPT_LOW, Self::DEFAULT_BPT_HIGH)
707    }
708
709    /// Create estimation from source byte count with explicit low/high divisors.
710    pub fn from_bytes_with_bounds(bytes: usize, bpt_est: f64, bpt_low: f64, bpt_high: f64) -> Self {
711        Self {
712            bytes_per_token_est: bpt_est,
713            bytes_per_token_low: bpt_low,
714            bytes_per_token_high: bpt_high,
715            tokens_min: (bytes as f64 / bpt_high).ceil() as usize,
716            tokens_est: (bytes as f64 / bpt_est).ceil() as usize,
717            tokens_max: (bytes as f64 / bpt_low).ceil() as usize,
718            source_bytes: bytes,
719        }
720    }
721}
722
723/// Post-write audit comparing actual output to estimates.
724#[derive(Debug, Clone, Serialize, Deserialize)]
725pub struct TokenAudit {
726    /// Actual bytes written to the output bundle.
727    pub output_bytes: u64,
728    /// tokens = output_bytes / bytes_per_token_high (optimistic, fewest tokens).
729    #[serde(alias = "tokens_high")]
730    pub tokens_min: usize,
731    /// tokens = output_bytes / bytes_per_token_est.
732    pub tokens_est: usize,
733    /// tokens = output_bytes / bytes_per_token_low (conservative, most tokens).
734    #[serde(alias = "tokens_low")]
735    pub tokens_max: usize,
736    /// Bytes of framing/separators/headers (output_bytes - content_bytes).
737    pub overhead_bytes: u64,
738    /// overhead_bytes / output_bytes (0.0-1.0).
739    pub overhead_pct: f64,
740}
741
742impl TokenAudit {
743    /// Create an audit from output bytes and content bytes.
744    ///
745    /// # Examples
746    ///
747    /// ```
748    /// use tokmd_types::TokenAudit;
749    ///
750    /// let audit = TokenAudit::from_output(5000, 4500);
751    /// assert_eq!(audit.output_bytes, 5000);
752    /// assert_eq!(audit.overhead_bytes, 500);
753    /// assert!(audit.overhead_pct > 0.0);
754    /// ```
755    pub fn from_output(output_bytes: u64, content_bytes: u64) -> Self {
756        Self::from_output_with_divisors(
757            output_bytes,
758            content_bytes,
759            TokenEstimationMeta::DEFAULT_BPT_EST,
760            TokenEstimationMeta::DEFAULT_BPT_LOW,
761            TokenEstimationMeta::DEFAULT_BPT_HIGH,
762        )
763    }
764
765    /// Create an audit from output bytes with explicit divisors.
766    pub fn from_output_with_divisors(
767        output_bytes: u64,
768        content_bytes: u64,
769        bpt_est: f64,
770        bpt_low: f64,
771        bpt_high: f64,
772    ) -> Self {
773        let overhead_bytes = output_bytes.saturating_sub(content_bytes);
774        let overhead_pct = if output_bytes > 0 {
775            overhead_bytes as f64 / output_bytes as f64
776        } else {
777            0.0
778        };
779        Self {
780            output_bytes,
781            tokens_min: (output_bytes as f64 / bpt_high).ceil() as usize,
782            tokens_est: (output_bytes as f64 / bpt_est).ceil() as usize,
783            tokens_max: (output_bytes as f64 / bpt_low).ceil() as usize,
784            overhead_bytes,
785            overhead_pct,
786        }
787    }
788}
789
790// -----------------------
791// Bundle hygiene types
792// -----------------------
793
794/// Classification of a file for bundle hygiene purposes.
795#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
796#[serde(rename_all = "snake_case")]
797pub enum FileClassification {
798    /// Protobuf output, parser tables, node-types.json, etc.
799    Generated,
800    /// Test fixtures, golden snapshots.
801    Fixture,
802    /// Third-party vendored code.
803    Vendored,
804    /// Cargo.lock, package-lock.json, etc.
805    Lockfile,
806    /// *.min.js, *.min.css.
807    Minified,
808    /// Files with very high tokens-per-line ratio.
809    DataBlob,
810    /// *.js.map, *.css.map.
811    Sourcemap,
812}
813
814/// How a file is included in the context/handoff bundle.
815#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, Default)]
816#[serde(rename_all = "snake_case")]
817pub enum InclusionPolicy {
818    /// Full file content.
819    #[default]
820    Full,
821    /// First N + last N lines.
822    HeadTail,
823    /// Structural summary (placeholder, behaves as Skip for now).
824    Summary,
825    /// Excluded from payload entirely.
826    Skip,
827}
828
829/// Helper for serde skip_serializing_if on InclusionPolicy.
830fn is_default_policy(policy: &InclusionPolicy) -> bool {
831    *policy == InclusionPolicy::Full
832}
833
834/// A file excluded by per-file cap / classification policy.
835#[derive(Debug, Clone, Serialize, Deserialize)]
836pub struct PolicyExcludedFile {
837    pub path: String,
838    pub original_tokens: usize,
839    pub policy: InclusionPolicy,
840    pub reason: String,
841    pub classifications: Vec<FileClassification>,
842}
843
844/// Manifest for a handoff bundle containing LLM-ready artifacts.
845#[derive(Debug, Clone, Serialize, Deserialize)]
846pub struct HandoffManifest {
847    pub schema_version: u32,
848    pub generated_at_ms: u128,
849    pub tool: ToolInfo,
850    pub mode: String,
851    pub inputs: Vec<String>,
852    pub output_dir: String,
853    pub budget_tokens: usize,
854    pub used_tokens: usize,
855    pub utilization_pct: f64,
856    pub strategy: String,
857    pub rank_by: String,
858    pub capabilities: Vec<CapabilityStatus>,
859    pub artifacts: Vec<ArtifactEntry>,
860    pub included_files: Vec<ContextFileRow>,
861    pub excluded_paths: Vec<HandoffExcludedPath>,
862    pub excluded_patterns: Vec<String>,
863    pub smart_excluded_files: Vec<SmartExcludedFile>,
864    pub total_files: usize,
865    pub bundled_files: usize,
866    pub intelligence_preset: String,
867    /// Effective ranking metric (may differ from rank_by if fallback occurred).
868    #[serde(default, skip_serializing_if = "Option::is_none")]
869    pub rank_by_effective: Option<String>,
870    /// Reason for fallback if rank_by_effective differs from rank_by.
871    #[serde(default, skip_serializing_if = "Option::is_none")]
872    pub fallback_reason: Option<String>,
873    /// Files excluded by per-file cap / classification policy.
874    #[serde(default, skip_serializing_if = "Vec::is_empty")]
875    pub excluded_by_policy: Vec<PolicyExcludedFile>,
876    /// Token estimation envelope with uncertainty bounds.
877    #[serde(default, skip_serializing_if = "Option::is_none")]
878    pub token_estimation: Option<TokenEstimationMeta>,
879    /// Post-bundle audit comparing actual code bundle bytes to estimates.
880    #[serde(default, skip_serializing_if = "Option::is_none")]
881    pub code_audit: Option<TokenAudit>,
882}
883
884/// A file excluded by smart-exclude heuristics (lockfiles, minified, etc.).
885#[derive(Debug, Clone, Serialize, Deserialize)]
886pub struct SmartExcludedFile {
887    pub path: String,
888    pub reason: String,
889    pub tokens: usize,
890}
891
892/// Manifest for a context bundle directory (bundle.txt + receipt.json + manifest.json).
893#[derive(Debug, Clone, Serialize, Deserialize)]
894pub struct ContextBundleManifest {
895    pub schema_version: u32,
896    pub generated_at_ms: u128,
897    pub tool: ToolInfo,
898    pub mode: String,
899    pub budget_tokens: usize,
900    pub used_tokens: usize,
901    pub utilization_pct: f64,
902    pub strategy: String,
903    pub rank_by: String,
904    pub file_count: usize,
905    pub bundle_bytes: usize,
906    pub artifacts: Vec<ArtifactEntry>,
907    pub included_files: Vec<ContextFileRow>,
908    pub excluded_paths: Vec<ContextExcludedPath>,
909    pub excluded_patterns: Vec<String>,
910    /// Effective ranking metric (may differ from rank_by if fallback occurred).
911    #[serde(default, skip_serializing_if = "Option::is_none")]
912    pub rank_by_effective: Option<String>,
913    /// Reason for fallback if rank_by_effective differs from rank_by.
914    #[serde(default, skip_serializing_if = "Option::is_none")]
915    pub fallback_reason: Option<String>,
916    /// Files excluded by per-file cap / classification policy.
917    #[serde(default, skip_serializing_if = "Vec::is_empty")]
918    pub excluded_by_policy: Vec<PolicyExcludedFile>,
919    /// Token estimation envelope with uncertainty bounds.
920    #[serde(default, skip_serializing_if = "Option::is_none")]
921    pub token_estimation: Option<TokenEstimationMeta>,
922    /// Post-bundle audit comparing actual bundle bytes to estimates.
923    #[serde(default, skip_serializing_if = "Option::is_none")]
924    pub bundle_audit: Option<TokenAudit>,
925}
926
927/// Explicitly excluded path with reason for context bundles.
928#[derive(Debug, Clone, Serialize, Deserialize)]
929pub struct ContextExcludedPath {
930    pub path: String,
931    pub reason: String,
932}
933
934/// Intelligence bundle for handoff containing tree, hotspots, complexity, and derived metrics.
935#[derive(Debug, Clone, Serialize, Deserialize)]
936pub struct HandoffIntelligence {
937    pub tree: Option<String>,
938    pub tree_depth: Option<usize>,
939    pub hotspots: Option<Vec<HandoffHotspot>>,
940    pub complexity: Option<HandoffComplexity>,
941    pub derived: Option<HandoffDerived>,
942    pub warnings: Vec<String>,
943}
944
945/// Explicitly excluded path with reason.
946#[derive(Debug, Clone, Serialize, Deserialize)]
947pub struct HandoffExcludedPath {
948    pub path: String,
949    pub reason: String,
950}
951
952/// Simplified hotspot row for handoff intelligence.
953#[derive(Debug, Clone, Serialize, Deserialize)]
954pub struct HandoffHotspot {
955    pub path: String,
956    pub commits: usize,
957    pub lines: usize,
958    pub score: usize,
959}
960
961/// Simplified complexity report for handoff intelligence.
962#[derive(Debug, Clone, Serialize, Deserialize)]
963pub struct HandoffComplexity {
964    pub total_functions: usize,
965    pub avg_function_length: f64,
966    pub max_function_length: usize,
967    pub avg_cyclomatic: f64,
968    pub max_cyclomatic: usize,
969    pub high_risk_files: usize,
970}
971
972/// Simplified derived metrics for handoff intelligence.
973#[derive(Debug, Clone, Serialize, Deserialize)]
974pub struct HandoffDerived {
975    pub total_files: usize,
976    pub total_code: usize,
977    pub total_lines: usize,
978    pub total_tokens: usize,
979    pub lang_count: usize,
980    pub dominant_lang: String,
981    pub dominant_pct: f64,
982}
983
984/// Status of a detected capability.
985#[derive(Debug, Clone, Serialize, Deserialize)]
986pub struct CapabilityStatus {
987    pub name: String,
988    pub status: CapabilityState,
989    #[serde(skip_serializing_if = "Option::is_none")]
990    pub reason: Option<String>,
991}
992
993/// State of a capability: available, skipped, or unavailable.
994#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
995#[serde(rename_all = "snake_case")]
996pub enum CapabilityState {
997    /// Capability is available and was used.
998    Available,
999    /// Capability is available but was skipped (e.g., --no-git flag).
1000    Skipped,
1001    /// Capability is unavailable (e.g., not in a git repo).
1002    Unavailable,
1003}
1004
1005/// Entry describing an artifact in the handoff bundle.
1006#[derive(Debug, Clone, Serialize, Deserialize)]
1007pub struct ArtifactEntry {
1008    pub name: String,
1009    pub path: String,
1010    pub description: String,
1011    pub bytes: u64,
1012    #[serde(skip_serializing_if = "Option::is_none")]
1013    pub hash: Option<ArtifactHash>,
1014}
1015
1016/// Hash for artifact integrity.
1017#[derive(Debug, Clone, Serialize, Deserialize)]
1018pub struct ArtifactHash {
1019    pub algo: String,
1020    pub hash: String,
1021}
1022
1023#[cfg(test)]
1024mod tests {
1025    use super::*;
1026
1027    // ── Schema version constants ──────────────────────────────────────
1028    #[test]
1029    fn schema_version_constants() {
1030        assert_eq!(SCHEMA_VERSION, 2);
1031        assert_eq!(HANDOFF_SCHEMA_VERSION, 5);
1032        assert_eq!(CONTEXT_BUNDLE_SCHEMA_VERSION, 2);
1033        assert_eq!(CONTEXT_SCHEMA_VERSION, 4);
1034    }
1035
1036    // ── Default impls ─────────────────────────────────────────────────
1037    #[test]
1038    fn config_mode_default_is_auto() {
1039        assert_eq!(ConfigMode::default(), ConfigMode::Auto);
1040    }
1041
1042    #[test]
1043    fn inclusion_policy_default_is_full() {
1044        assert_eq!(InclusionPolicy::default(), InclusionPolicy::Full);
1045    }
1046
1047    #[test]
1048    fn diff_totals_default_is_zeroed() {
1049        let dt = DiffTotals::default();
1050        assert_eq!(dt.old_code, 0);
1051        assert_eq!(dt.new_code, 0);
1052        assert_eq!(dt.delta_code, 0);
1053        assert_eq!(dt.delta_tokens, 0);
1054    }
1055
1056    #[test]
1057    fn tool_info_default_is_empty() {
1058        let ti = ToolInfo::default();
1059        assert!(ti.name.is_empty());
1060        assert!(ti.version.is_empty());
1061    }
1062
1063    #[test]
1064    fn tool_info_current() {
1065        let ti = ToolInfo::current();
1066        assert_eq!(ti.name, "tokmd");
1067        assert!(!ti.version.is_empty());
1068    }
1069
1070    // ── Serde roundtrips for enums ────────────────────────────────────
1071    #[test]
1072    fn table_format_serde_roundtrip() {
1073        for variant in [TableFormat::Md, TableFormat::Tsv, TableFormat::Json] {
1074            let json = serde_json::to_string(&variant).unwrap();
1075            let back: TableFormat = serde_json::from_str(&json).unwrap();
1076            assert_eq!(back, variant);
1077        }
1078    }
1079
1080    #[test]
1081    fn export_format_serde_roundtrip() {
1082        for variant in [
1083            ExportFormat::Csv,
1084            ExportFormat::Jsonl,
1085            ExportFormat::Json,
1086            ExportFormat::Cyclonedx,
1087        ] {
1088            let json = serde_json::to_string(&variant).unwrap();
1089            let back: ExportFormat = serde_json::from_str(&json).unwrap();
1090            assert_eq!(back, variant);
1091        }
1092    }
1093
1094    #[test]
1095    fn config_mode_serde_roundtrip() {
1096        for variant in [ConfigMode::Auto, ConfigMode::None] {
1097            let json = serde_json::to_string(&variant).unwrap();
1098            let back: ConfigMode = serde_json::from_str(&json).unwrap();
1099            assert_eq!(back, variant);
1100        }
1101    }
1102
1103    #[test]
1104    fn children_mode_serde_roundtrip() {
1105        for variant in [ChildrenMode::Collapse, ChildrenMode::Separate] {
1106            let json = serde_json::to_string(&variant).unwrap();
1107            let back: ChildrenMode = serde_json::from_str(&json).unwrap();
1108            assert_eq!(back, variant);
1109        }
1110    }
1111
1112    #[test]
1113    fn redact_mode_serde_roundtrip() {
1114        for variant in [RedactMode::None, RedactMode::Paths, RedactMode::All] {
1115            let json = serde_json::to_string(&variant).unwrap();
1116            let back: RedactMode = serde_json::from_str(&json).unwrap();
1117            assert_eq!(back, variant);
1118        }
1119    }
1120
1121    #[test]
1122    fn file_kind_serde_roundtrip() {
1123        for variant in [FileKind::Parent, FileKind::Child] {
1124            let json = serde_json::to_string(&variant).unwrap();
1125            let back: FileKind = serde_json::from_str(&json).unwrap();
1126            assert_eq!(back, variant);
1127        }
1128    }
1129
1130    #[test]
1131    fn scan_status_serde_roundtrip() {
1132        let json = serde_json::to_string(&ScanStatus::Complete).unwrap();
1133        assert_eq!(json, "\"complete\"");
1134        let back: ScanStatus = serde_json::from_str(&json).unwrap();
1135        assert!(matches!(back, ScanStatus::Complete));
1136    }
1137
1138    #[test]
1139    fn file_classification_serde_roundtrip() {
1140        for variant in [
1141            FileClassification::Generated,
1142            FileClassification::Fixture,
1143            FileClassification::Vendored,
1144            FileClassification::Lockfile,
1145            FileClassification::Minified,
1146            FileClassification::DataBlob,
1147            FileClassification::Sourcemap,
1148        ] {
1149            let json = serde_json::to_string(&variant).unwrap();
1150            let back: FileClassification = serde_json::from_str(&json).unwrap();
1151            assert_eq!(back, variant);
1152        }
1153    }
1154
1155    #[test]
1156    fn inclusion_policy_serde_roundtrip() {
1157        for variant in [
1158            InclusionPolicy::Full,
1159            InclusionPolicy::HeadTail,
1160            InclusionPolicy::Summary,
1161            InclusionPolicy::Skip,
1162        ] {
1163            let json = serde_json::to_string(&variant).unwrap();
1164            let back: InclusionPolicy = serde_json::from_str(&json).unwrap();
1165            assert_eq!(back, variant);
1166        }
1167    }
1168
1169    #[test]
1170    fn capability_state_serde_roundtrip() {
1171        for variant in [
1172            CapabilityState::Available,
1173            CapabilityState::Skipped,
1174            CapabilityState::Unavailable,
1175        ] {
1176            let json = serde_json::to_string(&variant).unwrap();
1177            let back: CapabilityState = serde_json::from_str(&json).unwrap();
1178            assert_eq!(back, variant);
1179        }
1180    }
1181
1182    #[test]
1183    fn analysis_format_serde_roundtrip() {
1184        for variant in [
1185            AnalysisFormat::Md,
1186            AnalysisFormat::Json,
1187            AnalysisFormat::Jsonld,
1188            AnalysisFormat::Xml,
1189            AnalysisFormat::Svg,
1190            AnalysisFormat::Mermaid,
1191            AnalysisFormat::Obj,
1192            AnalysisFormat::Midi,
1193            AnalysisFormat::Tree,
1194            AnalysisFormat::Html,
1195        ] {
1196            let json = serde_json::to_string(&variant).unwrap();
1197            let back: AnalysisFormat = serde_json::from_str(&json).unwrap();
1198            assert_eq!(back, variant);
1199        }
1200    }
1201
1202    #[test]
1203    fn commit_intent_kind_serde_roundtrip() {
1204        for variant in [
1205            CommitIntentKind::Feat,
1206            CommitIntentKind::Fix,
1207            CommitIntentKind::Refactor,
1208            CommitIntentKind::Docs,
1209            CommitIntentKind::Test,
1210            CommitIntentKind::Chore,
1211            CommitIntentKind::Ci,
1212            CommitIntentKind::Other,
1213        ] {
1214            let json = serde_json::to_string(&variant).unwrap();
1215            let back: CommitIntentKind = serde_json::from_str(&json).unwrap();
1216            assert_eq!(back, variant);
1217        }
1218    }
1219
1220    // ── is_default_policy helper ──────────────────────────────────────
1221    #[test]
1222    fn is_default_policy_works() {
1223        assert!(is_default_policy(&InclusionPolicy::Full));
1224        assert!(!is_default_policy(&InclusionPolicy::Skip));
1225    }
1226
1227    // ── Struct serde roundtrips ───────────────────────────────────────
1228    #[test]
1229    fn totals_serde_roundtrip() {
1230        let t = Totals {
1231            code: 100,
1232            lines: 200,
1233            files: 10,
1234            bytes: 5000,
1235            tokens: 250,
1236            avg_lines: 20,
1237        };
1238        let json = serde_json::to_string(&t).unwrap();
1239        let back: Totals = serde_json::from_str(&json).unwrap();
1240        assert_eq!(back, t);
1241    }
1242
1243    #[test]
1244    fn lang_row_serde_roundtrip() {
1245        let r = LangRow {
1246            lang: "Rust".into(),
1247            code: 100,
1248            lines: 150,
1249            files: 5,
1250            bytes: 3000,
1251            tokens: 200,
1252            avg_lines: 30,
1253        };
1254        let json = serde_json::to_string(&r).unwrap();
1255        let back: LangRow = serde_json::from_str(&json).unwrap();
1256        assert_eq!(back, r);
1257    }
1258
1259    #[test]
1260    fn diff_row_serde_roundtrip() {
1261        let r = DiffRow {
1262            lang: "Rust".into(),
1263            old_code: 100,
1264            new_code: 120,
1265            delta_code: 20,
1266            old_lines: 200,
1267            new_lines: 220,
1268            delta_lines: 20,
1269            old_files: 10,
1270            new_files: 11,
1271            delta_files: 1,
1272            old_bytes: 5000,
1273            new_bytes: 6000,
1274            delta_bytes: 1000,
1275            old_tokens: 250,
1276            new_tokens: 300,
1277            delta_tokens: 50,
1278        };
1279        let json = serde_json::to_string(&r).unwrap();
1280        let back: DiffRow = serde_json::from_str(&json).unwrap();
1281        assert_eq!(back, r);
1282    }
1283
1284    #[test]
1285    fn diff_totals_serde_roundtrip() {
1286        let t = DiffTotals {
1287            old_code: 100,
1288            new_code: 120,
1289            delta_code: 20,
1290            ..DiffTotals::default()
1291        };
1292        let json = serde_json::to_string(&t).unwrap();
1293        let back: DiffTotals = serde_json::from_str(&json).unwrap();
1294        assert_eq!(back, t);
1295    }
1296
1297    // ── TokenEstimationMeta ───────────────────────────────────────────
1298    #[test]
1299    fn token_estimation_from_bytes_defaults() {
1300        let est = TokenEstimationMeta::from_bytes(4000, TokenEstimationMeta::DEFAULT_BPT_EST);
1301        assert_eq!(est.source_bytes, 4000);
1302        assert_eq!(est.tokens_est, 1000); // 4000 / 4.0
1303        // tokens_min uses bpt_high=5.0 → 4000/5.0 = 800
1304        assert_eq!(est.tokens_min, 800);
1305        // tokens_max uses bpt_low=3.0 → ceil(4000/3.0) = 1334
1306        assert_eq!(est.tokens_max, 1334);
1307    }
1308
1309    #[test]
1310    fn token_estimation_invariant_min_le_est_le_max() {
1311        let est = TokenEstimationMeta::from_bytes(12345, 4.0);
1312        assert!(est.tokens_min <= est.tokens_est);
1313        assert!(est.tokens_est <= est.tokens_max);
1314    }
1315
1316    #[test]
1317    fn token_estimation_zero_bytes() {
1318        let est = TokenEstimationMeta::from_bytes(0, 4.0);
1319        assert_eq!(est.tokens_min, 0);
1320        assert_eq!(est.tokens_est, 0);
1321        assert_eq!(est.tokens_max, 0);
1322    }
1323
1324    #[test]
1325    fn token_estimation_with_custom_bounds() {
1326        let est = TokenEstimationMeta::from_bytes_with_bounds(1000, 4.0, 2.0, 8.0);
1327        assert_eq!(est.bytes_per_token_est, 4.0);
1328        assert_eq!(est.bytes_per_token_low, 2.0);
1329        assert_eq!(est.bytes_per_token_high, 8.0);
1330        assert_eq!(est.tokens_est, 250); // 1000 / 4.0
1331        assert_eq!(est.tokens_min, 125); // 1000 / 8.0
1332        assert_eq!(est.tokens_max, 500); // 1000 / 2.0
1333    }
1334
1335    // ── TokenAudit ────────────────────────────────────────────────────
1336    #[test]
1337    fn token_audit_from_output_basic() {
1338        let audit = TokenAudit::from_output(1000, 800);
1339        assert_eq!(audit.output_bytes, 1000);
1340        assert_eq!(audit.overhead_bytes, 200);
1341        assert!((audit.overhead_pct - 0.2).abs() < f64::EPSILON);
1342    }
1343
1344    #[test]
1345    fn token_audit_zero_output() {
1346        let audit = TokenAudit::from_output(0, 0);
1347        assert_eq!(audit.output_bytes, 0);
1348        assert_eq!(audit.overhead_bytes, 0);
1349        assert_eq!(audit.overhead_pct, 0.0);
1350    }
1351
1352    #[test]
1353    fn token_audit_content_exceeds_output() {
1354        // content_bytes > output_bytes should saturate to 0 overhead
1355        let audit = TokenAudit::from_output(100, 200);
1356        assert_eq!(audit.overhead_bytes, 0);
1357        assert_eq!(audit.overhead_pct, 0.0);
1358    }
1359
1360    #[test]
1361    fn token_audit_serde_roundtrip() {
1362        let audit = TokenAudit::from_output(5000, 4500);
1363        let json = serde_json::to_string(&audit).unwrap();
1364        let back: TokenAudit = serde_json::from_str(&json).unwrap();
1365        assert_eq!(back.output_bytes, 5000);
1366        assert_eq!(back.overhead_bytes, 500);
1367    }
1368
1369    // ── Kebab-case serde naming ───────────────────────────────────────
1370    #[test]
1371    fn table_format_uses_kebab_case() {
1372        assert_eq!(serde_json::to_string(&TableFormat::Md).unwrap(), "\"md\"");
1373        assert_eq!(serde_json::to_string(&TableFormat::Tsv).unwrap(), "\"tsv\"");
1374    }
1375
1376    #[test]
1377    fn export_format_uses_kebab_case() {
1378        assert_eq!(
1379            serde_json::to_string(&ExportFormat::Cyclonedx).unwrap(),
1380            "\"cyclonedx\""
1381        );
1382    }
1383
1384    #[test]
1385    fn redact_mode_uses_kebab_case() {
1386        assert_eq!(
1387            serde_json::to_string(&RedactMode::Paths).unwrap(),
1388            "\"paths\""
1389        );
1390    }
1391
1392    // ── FileRow serde roundtrip ───────────────────────────────────────
1393    #[test]
1394    fn file_row_serde_roundtrip() {
1395        let r = FileRow {
1396            path: "src/main.rs".into(),
1397            module: "src".into(),
1398            lang: "Rust".into(),
1399            kind: FileKind::Parent,
1400            code: 50,
1401            comments: 10,
1402            blanks: 5,
1403            lines: 65,
1404            bytes: 2000,
1405            tokens: 100,
1406        };
1407        let json = serde_json::to_string(&r).unwrap();
1408        let back: FileRow = serde_json::from_str(&json).unwrap();
1409        assert_eq!(back, r);
1410    }
1411}