Skip to main content

ftui_runtime/
cost_model.rs

1#![forbid(unsafe_code)]
2
3//! Formal cost models for caches, scheduling, and batching (bd-lff4p.5.6).
4//!
5//! This module provides principled mathematical models with explicit objective
6//! functions for three subsystems:
7//!
8//! 1. **Cache cost model** — loss function `L(hit_rate, memory_bytes)` for glyph
9//!    atlas / cell caches, with optimal budget derivation under LRU.
10//! 2. **Pipeline scheduling model** — M/G/1 queue model for the
11//!    `input → update → view → diff → present` pipeline, including optimal
12//!    batch size and latency decomposition.
13//! 3. **Patch batching model** — cost of immediate-flush vs deferred-coalesce
14//!    for the `BufferDiff → CellPatch → GPU upload` path.
15//!
16//! # Mathematical Framework
17//!
18//! All models follow the same pattern:
19//! - **Objective function** `J(θ)` parameterized by policy knobs `θ`.
20//! - **Constraint set** capturing hard limits (memory, latency budget, etc.).
21//! - **Optimal policy** `θ*` = argmin J(θ) subject to constraints.
22//! - **Evidence** comparing the chosen policy to alternatives.
23//!
24//! # Cache Cost Model
25//!
26//! ## Objective
27//!
28//! ```text
29//! J(B) = c_miss × miss_rate(B) + c_mem × B
30//! ```
31//!
32//! where `B` is cache budget in bytes, `c_miss` is the cost of a cache miss
33//! (rasterization + upload latency in µs), and `c_mem` is the opportunity cost
34//! per byte of atlas memory.
35//!
36//! ## Miss Rate Model (LRU on IRM workload)
37//!
38//! Under the Independent Reference Model with Zipf(α) popularity:
39//!
40//! ```text
41//! miss_rate(B) ≈ 1 − min(1, (B / item_bytes) / N)^(1/α)
42//! ```
43//!
44//! where `N` is the working set size and `α` is the Zipf exponent.
45//!
46//! The optimal budget minimizes `J(B)` subject to `B ≤ B_max`:
47//!
48//! ```text
49//! B* = item_bytes × N × (c_miss / (α × c_mem × N))^(α/(1+α))
50//! ```
51//!
52//! clamped to `[item_bytes, B_max]`.
53//!
54//! # Pipeline Scheduling Model
55//!
56//! ## M/G/1 Queue
57//!
58//! Model the render pipeline as a single server with:
59//! - Arrival rate `λ` (frames/ms)
60//! - Service time distribution `S` with mean `E[S]` and variance `Var[S]`
61//!
62//! Pollaczek-Khinchine formula for mean sojourn time:
63//!
64//! ```text
65//! E[T] = E[S] + (λ × E[S²]) / (2 × (1 − ρ))
66//! ```
67//!
68//! where `ρ = λ × E[S]` is utilization.
69//!
70//! ## Stage Decomposition
71//!
72//! ```text
73//! S = S_input + S_update + S_view + S_diff + S_present
74//! ```
75//!
76//! Each stage `i` has independent service time `S_i` with measured mean and
77//! variance. Total variance: `Var[S] = Σ Var[S_i]`.
78//!
79//! # Patch Batching Model
80//!
81//! ## Batch vs Immediate
82//!
83//! ```text
84//! J_immediate = n × (c_overhead + c_per_patch)
85//! J_batch(k) = ceil(n/k) × (c_overhead + k × c_per_patch) + (k−1) × c_latency
86//! ```
87//!
88//! Optimal batch size:
89//!
90//! ```text
91//! k* = sqrt(n × c_overhead / c_latency)
92//! ```
93//!
94//! clamped to `[1, n]`.
95
96use std::fmt;
97
98// ─── Cache Cost Model ─────────────────────────────────────────────────────
99
100/// Parameters for the glyph/cell cache cost model.
101#[derive(Debug, Clone)]
102pub struct CacheCostParams {
103    /// Cost of a cache miss in µs (rasterize + upload).
104    pub c_miss_us: f64,
105    /// Opportunity cost per byte of atlas memory (µs/byte/frame).
106    pub c_mem_per_byte: f64,
107    /// Average item size in bytes (slot area including padding).
108    pub item_bytes: f64,
109    /// Working set size (number of distinct glyphs in a typical session).
110    pub working_set_n: f64,
111    /// Zipf exponent for access frequency distribution.
112    /// α > 1 means heavy-tail (few glyphs dominate); typical terminal ≈ 1.2–1.8.
113    pub zipf_alpha: f64,
114    /// Maximum allowed cache budget in bytes.
115    pub budget_max_bytes: f64,
116}
117
118impl Default for CacheCostParams {
119    fn default() -> Self {
120        Self {
121            // Typical glyph rasterize + GPU upload: ~50µs
122            c_miss_us: 50.0,
123            // Memory pressure: ~0.0001 µs/byte/frame (at 2MB atlas ≈ 0.2µs/frame)
124            c_mem_per_byte: 0.0001,
125            // 16×16 glyph slot with 1px padding = 18×18 = 324 bytes
126            item_bytes: 324.0,
127            // ASCII printable + common unicode ≈ 200 distinct glyphs
128            working_set_n: 200.0,
129            // Terminal text: moderately heavy-tail
130            zipf_alpha: 1.5,
131            // 4MB atlas maximum
132            budget_max_bytes: 4_194_304.0,
133        }
134    }
135}
136
137/// Result of cache cost model optimization.
138#[derive(Debug, Clone)]
139pub struct CacheCostResult {
140    /// Optimal cache budget in bytes.
141    pub optimal_budget_bytes: f64,
142    /// Total cost at optimal budget (µs/frame).
143    pub optimal_cost_us: f64,
144    /// Miss rate at optimal budget.
145    pub optimal_miss_rate: f64,
146    /// Hit rate at optimal budget.
147    pub optimal_hit_rate: f64,
148    /// Cost breakdown: miss component (µs/frame).
149    pub cost_miss_us: f64,
150    /// Cost breakdown: memory component (µs/frame).
151    pub cost_mem_us: f64,
152    /// Number of items that fit in optimal budget.
153    pub items_cached: f64,
154    /// Evidence: cost at selected comparison points.
155    pub comparison_points: Vec<CacheCostPoint>,
156}
157
158/// A single evaluation point on the cache cost surface.
159#[derive(Debug, Clone)]
160pub struct CacheCostPoint {
161    /// Budget in bytes.
162    pub budget_bytes: f64,
163    /// Miss rate at this budget.
164    pub miss_rate: f64,
165    /// Total cost at this budget (µs/frame).
166    pub total_cost_us: f64,
167    /// Miss component (µs/frame).
168    pub cost_miss_us: f64,
169    /// Memory component (µs/frame).
170    pub cost_mem_us: f64,
171}
172
173impl CacheCostParams {
174    /// Compute the miss rate under LRU with IRM/Zipf workload.
175    ///
176    /// Approximation: `miss_rate(B) ≈ 1 − min(1, (capacity / N))^(1/α)`
177    /// where capacity = B / item_bytes.
178    #[must_use]
179    pub fn miss_rate(&self, budget_bytes: f64) -> f64 {
180        let capacity = budget_bytes / self.item_bytes;
181        let ratio = (capacity / self.working_set_n).clamp(0.0, 1.0);
182        let hit_rate = ratio.powf(1.0 / self.zipf_alpha);
183        (1.0 - hit_rate).max(0.0)
184    }
185
186    /// Total cost J(B) = c_miss × miss_rate(B) × N_accesses + c_mem × B.
187    ///
188    /// We normalize to per-frame cost assuming `working_set_n` accesses/frame.
189    #[must_use]
190    pub fn total_cost(&self, budget_bytes: f64) -> f64 {
191        let mr = self.miss_rate(budget_bytes);
192        let cost_miss = self.c_miss_us * mr * self.working_set_n;
193        let cost_mem = self.c_mem_per_byte * budget_bytes;
194        cost_miss + cost_mem
195    }
196
197    /// Evaluate a single point on the cost surface.
198    #[must_use]
199    pub fn evaluate(&self, budget_bytes: f64) -> CacheCostPoint {
200        let mr = self.miss_rate(budget_bytes);
201        let cost_miss = self.c_miss_us * mr * self.working_set_n;
202        let cost_mem = self.c_mem_per_byte * budget_bytes;
203        CacheCostPoint {
204            budget_bytes,
205            miss_rate: mr,
206            total_cost_us: cost_miss + cost_mem,
207            cost_miss_us: cost_miss,
208            cost_mem_us: cost_mem,
209        }
210    }
211
212    /// Compute the optimal cache budget analytically.
213    ///
214    /// From `dJ/dB = 0`:
215    /// ```text
216    /// B* = item_bytes × N × (c_miss / (α × c_mem_per_byte × item_bytes × N))^(α/(1+α))
217    /// ```
218    /// clamped to `[item_bytes, budget_max_bytes]`.
219    #[must_use]
220    pub fn optimal_budget(&self) -> f64 {
221        let alpha = self.zipf_alpha;
222        let n = self.working_set_n;
223        let s = self.item_bytes;
224        let cm = self.c_miss_us;
225        let cmem = self.c_mem_per_byte;
226
227        // Avoid division by zero.
228        if cmem <= 0.0 || alpha <= 0.0 || n <= 0.0 || s <= 0.0 {
229            return self.budget_max_bytes;
230        }
231
232        // Analytical optimum from first-order condition.
233        let ratio = cm / (alpha * cmem * s * n);
234        let exponent = alpha / (1.0 + alpha);
235        let b_star = s * n * ratio.powf(exponent);
236
237        b_star.clamp(s, self.budget_max_bytes)
238    }
239
240    /// Run the full optimization and produce evidence.
241    #[must_use]
242    pub fn optimize(&self) -> CacheCostResult {
243        let b_star = self.optimal_budget();
244        let opt_point = self.evaluate(b_star);
245
246        // Generate comparison points at 10%, 25%, 50%, 100%, 150%, 200% of optimal.
247        let fractions = [0.1, 0.25, 0.5, 1.0, 1.5, 2.0];
248        let comparison_points: Vec<CacheCostPoint> = fractions
249            .iter()
250            .map(|f| {
251                let b = (b_star * f).clamp(self.item_bytes, self.budget_max_bytes);
252                self.evaluate(b)
253            })
254            .collect();
255
256        CacheCostResult {
257            optimal_budget_bytes: b_star,
258            optimal_cost_us: opt_point.total_cost_us,
259            optimal_miss_rate: opt_point.miss_rate,
260            optimal_hit_rate: 1.0 - opt_point.miss_rate,
261            cost_miss_us: opt_point.cost_miss_us,
262            cost_mem_us: opt_point.cost_mem_us,
263            items_cached: b_star / self.item_bytes,
264            comparison_points,
265        }
266    }
267}
268
269impl CacheCostResult {
270    /// Serialize to JSONL for evidence ledger.
271    #[must_use]
272    pub fn to_jsonl(&self) -> String {
273        let mut out = String::with_capacity(512);
274        out.push_str("{\"event\":\"cache_cost_optimal\"");
275        push_f64(&mut out, "optimal_budget_bytes", self.optimal_budget_bytes);
276        push_f64(&mut out, "optimal_cost_us", self.optimal_cost_us);
277        push_f64(&mut out, "optimal_miss_rate", self.optimal_miss_rate);
278        push_f64(&mut out, "optimal_hit_rate", self.optimal_hit_rate);
279        push_f64(&mut out, "cost_miss_us", self.cost_miss_us);
280        push_f64(&mut out, "cost_mem_us", self.cost_mem_us);
281        push_f64(&mut out, "items_cached", self.items_cached);
282        out.push_str(",\"comparisons\":[");
283        for (i, pt) in self.comparison_points.iter().enumerate() {
284            if i > 0 {
285                out.push(',');
286            }
287            out.push_str(&pt.to_json());
288        }
289        out.push_str("]}");
290        out
291    }
292}
293
294impl CacheCostPoint {
295    fn to_json(&self) -> String {
296        format!(
297            "{{\"budget_bytes\":{:.1},\"miss_rate\":{:.6},\"total_cost_us\":{:.3},\"cost_miss_us\":{:.3},\"cost_mem_us\":{:.3}}}",
298            self.budget_bytes,
299            self.miss_rate,
300            self.total_cost_us,
301            self.cost_miss_us,
302            self.cost_mem_us
303        )
304    }
305}
306
307// ─── Pipeline Scheduling Model ────────────────────────────────────────────
308
309/// Service time statistics for a single pipeline stage.
310#[derive(Debug, Clone, Copy)]
311pub struct StageStats {
312    /// Stage name.
313    pub name: &'static str,
314    /// Mean service time (µs).
315    pub mean_us: f64,
316    /// Variance of service time (µs²).
317    pub var_us2: f64,
318}
319
320impl StageStats {
321    /// Second moment E[S²] = Var[S] + E[S]².
322    #[must_use]
323    pub fn second_moment(&self) -> f64 {
324        self.var_us2 + self.mean_us * self.mean_us
325    }
326}
327
328/// Parameters for the M/G/1 pipeline scheduling model.
329#[derive(Debug, Clone)]
330pub struct PipelineCostParams {
331    /// Service time statistics for each pipeline stage.
332    pub stages: Vec<StageStats>,
333    /// Frame arrival rate (frames/µs). At 60fps: 1/16667 ≈ 0.00006.
334    pub arrival_rate: f64,
335    /// Target frame budget (µs). At 60fps: 16667.
336    pub frame_budget_us: f64,
337}
338
339impl Default for PipelineCostParams {
340    fn default() -> Self {
341        Self {
342            stages: vec![
343                StageStats {
344                    name: "input",
345                    mean_us: 50.0,
346                    var_us2: 100.0,
347                },
348                StageStats {
349                    name: "update",
350                    mean_us: 200.0,
351                    var_us2: 2500.0,
352                },
353                StageStats {
354                    name: "view",
355                    mean_us: 1500.0,
356                    var_us2: 250_000.0,
357                },
358                StageStats {
359                    name: "diff",
360                    mean_us: 800.0,
361                    var_us2: 90_000.0,
362                },
363                StageStats {
364                    name: "present",
365                    mean_us: 500.0,
366                    var_us2: 40_000.0,
367                },
368            ],
369            arrival_rate: 1.0 / 16667.0, // 60fps
370            frame_budget_us: 16667.0,
371        }
372    }
373}
374
375/// Result of pipeline cost model analysis.
376#[derive(Debug, Clone)]
377pub struct PipelineCostResult {
378    /// Total mean service time (µs).
379    pub total_mean_us: f64,
380    /// Total service time variance (µs²).
381    pub total_var_us2: f64,
382    /// Server utilization ρ = λ × E[S].
383    pub utilization: f64,
384    /// Mean sojourn time via Pollaczek-Khinchine (µs).
385    pub mean_sojourn_us: f64,
386    /// Whether the system is stable (ρ < 1).
387    pub stable: bool,
388    /// Fraction of frame budget consumed by mean service.
389    pub budget_fraction: f64,
390    /// Per-stage breakdown.
391    pub stage_breakdown: Vec<StageBreakdown>,
392    /// Headroom: frame_budget - mean_sojourn (µs).
393    pub headroom_us: f64,
394}
395
396/// Per-stage contribution to the pipeline.
397#[derive(Debug, Clone)]
398pub struct StageBreakdown {
399    /// Stage name.
400    pub name: &'static str,
401    /// Mean service time (µs).
402    pub mean_us: f64,
403    /// Fraction of total service time.
404    pub fraction: f64,
405    /// Coefficient of variation (σ/µ).
406    pub cv: f64,
407}
408
409impl PipelineCostParams {
410    /// Analyze the pipeline using M/G/1 queueing theory.
411    #[must_use]
412    pub fn analyze(&self) -> PipelineCostResult {
413        let total_mean: f64 = self.stages.iter().map(|s| s.mean_us).sum();
414        let total_var: f64 = self.stages.iter().map(|s| s.var_us2).sum();
415        let total_second_moment = total_var + total_mean * total_mean;
416
417        let rho = self.arrival_rate * total_mean;
418        let stable = rho < 1.0;
419
420        // Pollaczek-Khinchine: E[T] = E[S] + λE[S²] / (2(1-ρ))
421        let mean_sojourn = if stable && rho > 0.0 {
422            total_mean + (self.arrival_rate * total_second_moment) / (2.0 * (1.0 - rho))
423        } else if rho >= 1.0 {
424            f64::INFINITY
425        } else {
426            total_mean
427        };
428
429        let stage_breakdown: Vec<StageBreakdown> = self
430            .stages
431            .iter()
432            .map(|s| {
433                let cv = if s.mean_us > 0.0 {
434                    s.var_us2.max(0.0).sqrt() / s.mean_us
435                } else {
436                    0.0
437                };
438                StageBreakdown {
439                    name: s.name,
440                    mean_us: s.mean_us,
441                    fraction: if total_mean > 0.0 {
442                        s.mean_us / total_mean
443                    } else {
444                        0.0
445                    },
446                    cv,
447                }
448            })
449            .collect();
450
451        PipelineCostResult {
452            total_mean_us: total_mean,
453            total_var_us2: total_var,
454            utilization: rho,
455            mean_sojourn_us: mean_sojourn,
456            stable,
457            budget_fraction: if self.frame_budget_us > 0.0 {
458                total_mean / self.frame_budget_us
459            } else {
460                f64::INFINITY
461            },
462            stage_breakdown,
463            headroom_us: self.frame_budget_us - mean_sojourn,
464        }
465    }
466}
467
468impl PipelineCostResult {
469    /// Serialize to JSONL for evidence ledger.
470    #[must_use]
471    pub fn to_jsonl(&self) -> String {
472        let mut out = String::with_capacity(512);
473        out.push_str("{\"event\":\"pipeline_cost_analysis\"");
474        push_f64(&mut out, "total_mean_us", self.total_mean_us);
475        push_f64(&mut out, "total_var_us2", self.total_var_us2);
476        push_f64(&mut out, "utilization", self.utilization);
477        push_f64(&mut out, "mean_sojourn_us", self.mean_sojourn_us);
478        push_bool(&mut out, "stable", self.stable);
479        push_f64(&mut out, "budget_fraction", self.budget_fraction);
480        push_f64(&mut out, "headroom_us", self.headroom_us);
481        out.push_str(",\"stages\":[");
482        for (i, s) in self.stage_breakdown.iter().enumerate() {
483            if i > 0 {
484                out.push(',');
485            }
486            out.push_str(&format!(
487                "{{\"name\":\"{}\",\"mean_us\":{:.3},\"fraction\":{:.4},\"cv\":{:.4}}}",
488                s.name, s.mean_us, s.fraction, s.cv
489            ));
490        }
491        out.push_str("]}");
492        out
493    }
494}
495
496// ─── Patch Batching Model ─────────────────────────────────────────────────
497
498/// Parameters for the patch batching cost model.
499#[derive(Debug, Clone)]
500pub struct BatchCostParams {
501    /// Per-batch overhead in µs (GPU command buffer setup, draw call).
502    pub c_overhead_us: f64,
503    /// Per-patch processing cost in µs (cell serialization + copy).
504    pub c_per_patch_us: f64,
505    /// Latency cost per deferred patch in µs (visual staleness).
506    pub c_latency_us: f64,
507    /// Total patches to process in the frame.
508    pub total_patches: u64,
509}
510
511impl Default for BatchCostParams {
512    fn default() -> Self {
513        Self {
514            // GPU command buffer overhead: ~20µs per draw call
515            c_overhead_us: 20.0,
516            // Cell serialization: ~0.05µs per patch (16 bytes)
517            c_per_patch_us: 0.05,
518            // Latency penalty: ~0.5µs per patch deferred
519            c_latency_us: 0.5,
520            // Typical dirty cell count at 5% change rate on 120x40
521            total_patches: 240,
522        }
523    }
524}
525
526/// Result of batch cost model optimization.
527#[derive(Debug, Clone)]
528pub struct BatchCostResult {
529    /// Optimal batch size.
530    pub optimal_batch_size: u64,
531    /// Total cost with optimal batching (µs).
532    pub optimal_cost_us: f64,
533    /// Cost with immediate flush (batch_size = 1, µs).
534    pub immediate_cost_us: f64,
535    /// Cost with single batch (batch_size = n, µs).
536    pub single_batch_cost_us: f64,
537    /// Improvement ratio (immediate / optimal).
538    pub improvement_ratio: f64,
539    /// Evidence: cost at selected batch sizes.
540    pub comparison_points: Vec<BatchCostPoint>,
541}
542
543/// A single evaluation point on the batch cost surface.
544#[derive(Debug, Clone)]
545pub struct BatchCostPoint {
546    /// Batch size.
547    pub batch_size: u64,
548    /// Number of batches.
549    pub num_batches: u64,
550    /// Total cost (µs).
551    pub total_cost_us: f64,
552    /// Overhead component (µs).
553    pub overhead_us: f64,
554    /// Processing component (µs).
555    pub processing_us: f64,
556    /// Latency component (µs).
557    pub latency_us: f64,
558}
559
560impl BatchCostParams {
561    /// Total cost for a given batch size k.
562    ///
563    /// ```text
564    /// J(k) = ceil(n/k) × (c_overhead + k × c_per_patch) + (k−1) × c_latency
565    /// ```
566    #[must_use]
567    pub fn total_cost(&self, batch_size: u64) -> f64 {
568        let n = self.total_patches;
569        if n == 0 || batch_size == 0 {
570            return 0.0;
571        }
572        let k = batch_size.min(n);
573        let num_batches = n.div_ceil(k);
574
575        let overhead = num_batches as f64 * self.c_overhead_us;
576        let processing = n as f64 * self.c_per_patch_us;
577        let latency = (k.saturating_sub(1)) as f64 * self.c_latency_us;
578        overhead + processing + latency
579    }
580
581    /// Evaluate a single point.
582    #[must_use]
583    pub fn evaluate(&self, batch_size: u64) -> BatchCostPoint {
584        let n = self.total_patches;
585        let k = batch_size.max(1).min(n.max(1));
586        let num_batches = if n > 0 { n.div_ceil(k) } else { 0 };
587
588        let overhead = num_batches as f64 * self.c_overhead_us;
589        let processing = n as f64 * self.c_per_patch_us;
590        let latency = (k.saturating_sub(1)) as f64 * self.c_latency_us;
591
592        BatchCostPoint {
593            batch_size: k,
594            num_batches,
595            total_cost_us: overhead + processing + latency,
596            overhead_us: overhead,
597            processing_us: processing,
598            latency_us: latency,
599        }
600    }
601
602    /// Compute optimal batch size.
603    ///
604    /// The continuous optimum from `dJ/dk = 0` is:
605    /// ```text
606    /// k* = sqrt(n × c_overhead / c_latency)
607    /// ```
608    ///
609    /// Because `ceil(n/k)` creates discontinuities, the true discrete
610    /// optimum is found by enumerating candidate batch sizes at all
611    /// points where `ceil(n/k)` changes value. There are at most
612    /// `O(sqrt(n))` such candidates.
613    #[must_use]
614    pub fn optimal_batch_size(&self) -> u64 {
615        let n = self.total_patches;
616        if n == 0 {
617            return 1;
618        }
619        if self.c_latency_us <= 0.0 {
620            return n; // No latency cost: single batch is optimal.
621        }
622        if self.c_overhead_us <= 0.0 {
623            return 1; // No overhead: immediate flush is optimal.
624        }
625
626        // Collect candidate k values where ceil(n/k) changes.
627        // For each number of batches m, the largest k giving exactly m batches
628        // is k = ceil(n/m). We check m = 1..sqrt(n) and the reciprocal k values.
629        let mut candidates: Vec<u64> = Vec::new();
630        let sqrt_n = (n as f64).sqrt().ceil() as u64 + 1;
631
632        for m in 1..=sqrt_n.min(n) {
633            let k = n.div_ceil(m);
634            candidates.push(k);
635            if k > 1 {
636                candidates.push(k - 1);
637            }
638        }
639        // Also check k = 1..sqrt(n) directly.
640        for k in 1..=sqrt_n.min(n) {
641            candidates.push(k);
642        }
643        candidates.push(n);
644
645        candidates.sort_unstable();
646        candidates.dedup();
647
648        let mut best_k = 1u64;
649        let mut best_cost = f64::INFINITY;
650
651        for &k in &candidates {
652            if k == 0 || k > n {
653                continue;
654            }
655            let cost = self.total_cost(k);
656            if cost < best_cost {
657                best_cost = cost;
658                best_k = k;
659            }
660        }
661
662        best_k
663    }
664
665    /// Run the full optimization and produce evidence.
666    #[must_use]
667    pub fn optimize(&self) -> BatchCostResult {
668        let n = self.total_patches;
669        let k_star = self.optimal_batch_size();
670        let opt_cost = self.total_cost(k_star);
671        let immediate_cost = self.total_cost(1);
672        let single_batch_cost = self.total_cost(n.max(1));
673
674        // Comparison points: 1, k*/4, k*/2, k*, 2k*, n.
675        let mut sizes: Vec<u64> = vec![1];
676        if k_star > 4 {
677            sizes.push(k_star / 4);
678        }
679        if k_star > 2 {
680            sizes.push(k_star / 2);
681        }
682        sizes.push(k_star);
683        if k_star.saturating_mul(2) <= n {
684            sizes.push(k_star * 2);
685        }
686        if n > 1 {
687            sizes.push(n);
688        }
689        sizes.sort_unstable();
690        sizes.dedup();
691
692        let comparison_points: Vec<BatchCostPoint> =
693            sizes.iter().map(|&k| self.evaluate(k)).collect();
694
695        let improvement_ratio = if opt_cost > 0.0 {
696            immediate_cost / opt_cost
697        } else {
698            1.0
699        };
700
701        BatchCostResult {
702            optimal_batch_size: k_star,
703            optimal_cost_us: opt_cost,
704            immediate_cost_us: immediate_cost,
705            single_batch_cost_us: single_batch_cost,
706            improvement_ratio,
707            comparison_points,
708        }
709    }
710}
711
712impl BatchCostResult {
713    /// Serialize to JSONL for evidence ledger.
714    #[must_use]
715    pub fn to_jsonl(&self) -> String {
716        let mut out = String::with_capacity(512);
717        out.push_str("{\"event\":\"batch_cost_optimal\"");
718        push_u64(&mut out, "optimal_batch_size", self.optimal_batch_size);
719        push_f64(&mut out, "optimal_cost_us", self.optimal_cost_us);
720        push_f64(&mut out, "immediate_cost_us", self.immediate_cost_us);
721        push_f64(&mut out, "single_batch_cost_us", self.single_batch_cost_us);
722        push_f64(&mut out, "improvement_ratio", self.improvement_ratio);
723        out.push_str(",\"comparisons\":[");
724        for (i, pt) in self.comparison_points.iter().enumerate() {
725            if i > 0 {
726                out.push(',');
727            }
728            out.push_str(&format!(
729                "{{\"batch_size\":{},\"num_batches\":{},\"total_cost_us\":{:.3},\"overhead_us\":{:.3},\"processing_us\":{:.3},\"latency_us\":{:.3}}}",
730                pt.batch_size, pt.num_batches, pt.total_cost_us, pt.overhead_us, pt.processing_us, pt.latency_us
731            ));
732        }
733        out.push_str("]}");
734        out
735    }
736}
737
738// ─── Sensitivity Analysis ─────────────────────────────────────────────────
739
740/// Sensitivity analysis: how does the optimal policy change as a parameter varies?
741#[derive(Debug, Clone)]
742pub struct SensitivityPoint {
743    /// Parameter value.
744    pub param_value: f64,
745    /// Optimal policy value at this parameter.
746    pub optimal_value: f64,
747    /// Cost at the optimal policy.
748    pub optimal_cost: f64,
749}
750
751/// Run sensitivity analysis on cache budget vs Zipf alpha.
752///
753/// Sweeps `alpha` from `alpha_min` to `alpha_max` in `steps` increments
754/// and computes optimal budget at each point.
755#[must_use]
756pub fn cache_sensitivity_zipf(
757    base_params: &CacheCostParams,
758    alpha_min: f64,
759    alpha_max: f64,
760    steps: usize,
761) -> Vec<SensitivityPoint> {
762    let steps = steps.max(2);
763    let step = (alpha_max - alpha_min) / (steps - 1) as f64;
764
765    (0..steps)
766        .map(|i| {
767            let alpha = alpha_min + step * i as f64;
768            let mut params = base_params.clone();
769            params.zipf_alpha = alpha;
770            let b_star = params.optimal_budget();
771            let cost = params.total_cost(b_star);
772            SensitivityPoint {
773                param_value: alpha,
774                optimal_value: b_star,
775                optimal_cost: cost,
776            }
777        })
778        .collect()
779}
780
781/// Run sensitivity analysis on batch size vs total patches.
782#[must_use]
783pub fn batch_sensitivity_patches(
784    base_params: &BatchCostParams,
785    n_min: u64,
786    n_max: u64,
787    steps: usize,
788) -> Vec<SensitivityPoint> {
789    let steps = steps.max(2);
790    let step = ((n_max - n_min) as f64) / (steps - 1) as f64;
791
792    (0..steps)
793        .map(|i| {
794            let n = n_min + (step * i as f64).round() as u64;
795            let mut params = base_params.clone();
796            params.total_patches = n;
797            let k_star = params.optimal_batch_size();
798            let cost = params.total_cost(k_star);
799            SensitivityPoint {
800                param_value: n as f64,
801                optimal_value: k_star as f64,
802                optimal_cost: cost,
803            }
804        })
805        .collect()
806}
807
808impl fmt::Display for CacheCostResult {
809    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
810        writeln!(f, "Cache Cost Model — Optimal Policy")?;
811        writeln!(
812            f,
813            "  Budget:    {:.0} bytes ({:.0} items)",
814            self.optimal_budget_bytes, self.items_cached
815        )?;
816        writeln!(f, "  Hit rate:  {:.2}%", self.optimal_hit_rate * 100.0)?;
817        writeln!(
818            f,
819            "  Cost:      {:.3} µs/frame (miss: {:.3}, mem: {:.3})",
820            self.optimal_cost_us, self.cost_miss_us, self.cost_mem_us
821        )?;
822        writeln!(f, "  Comparison points:")?;
823        for pt in &self.comparison_points {
824            writeln!(
825                f,
826                "    B={:.0}: miss={:.4}, cost={:.3} µs",
827                pt.budget_bytes, pt.miss_rate, pt.total_cost_us
828            )?;
829        }
830        Ok(())
831    }
832}
833
834impl fmt::Display for PipelineCostResult {
835    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
836        writeln!(f, "Pipeline Scheduling Model (M/G/1)")?;
837        writeln!(f, "  Total mean:     {:.1} µs", self.total_mean_us)?;
838        writeln!(f, "  Utilization:    {:.4} (ρ)", self.utilization)?;
839        writeln!(f, "  Mean sojourn:   {:.1} µs", self.mean_sojourn_us)?;
840        writeln!(f, "  Budget used:    {:.1}%", self.budget_fraction * 100.0)?;
841        writeln!(f, "  Headroom:       {:.1} µs", self.headroom_us)?;
842        writeln!(f, "  Stable:         {}", self.stable)?;
843        writeln!(f, "  Stage breakdown:")?;
844        for s in &self.stage_breakdown {
845            writeln!(
846                f,
847                "    {:<10} {:.1} µs ({:.1}%, cv={:.2})",
848                s.name,
849                s.mean_us,
850                s.fraction * 100.0,
851                s.cv
852            )?;
853        }
854        Ok(())
855    }
856}
857
858impl fmt::Display for BatchCostResult {
859    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
860        writeln!(f, "Patch Batching Model")?;
861        writeln!(f, "  Optimal k:      {}", self.optimal_batch_size)?;
862        writeln!(f, "  Optimal cost:   {:.3} µs", self.optimal_cost_us)?;
863        writeln!(f, "  Immediate cost: {:.3} µs", self.immediate_cost_us)?;
864        writeln!(f, "  Single batch:   {:.3} µs", self.single_batch_cost_us)?;
865        writeln!(f, "  Improvement:    {:.2}×", self.improvement_ratio)?;
866        writeln!(f, "  Comparison points:")?;
867        for pt in &self.comparison_points {
868            writeln!(
869                f,
870                "    k={}: {} batches, {:.3} µs (overhead={:.1}, proc={:.1}, latency={:.1})",
871                pt.batch_size,
872                pt.num_batches,
873                pt.total_cost_us,
874                pt.overhead_us,
875                pt.processing_us,
876                pt.latency_us
877            )?;
878        }
879        Ok(())
880    }
881}
882
883// ─── JSONL helpers ────────────────────────────────────────────────────────
884
885fn push_f64(out: &mut String, key: &str, value: f64) {
886    use std::fmt::Write;
887    out.push_str(",\"");
888    out.push_str(key);
889    out.push_str("\":");
890    if value.is_finite() {
891        let _ = write!(out, "{value:.6}");
892    } else if value.is_nan() {
893        out.push_str("null");
894    } else if value.is_sign_positive() {
895        out.push_str("1e308");
896    } else {
897        out.push_str("-1e308");
898    }
899}
900
901fn push_u64(out: &mut String, key: &str, value: u64) {
902    use std::fmt::Write;
903    out.push_str(",\"");
904    out.push_str(key);
905    out.push_str("\":");
906    let _ = write!(out, "{value}");
907}
908
909fn push_bool(out: &mut String, key: &str, value: bool) {
910    out.push_str(",\"");
911    out.push_str(key);
912    out.push_str("\":");
913    out.push_str(if value { "true" } else { "false" });
914}
915
916// ─── Tests ────────────────────────────────────────────────────────────────
917
918#[cfg(test)]
919mod tests {
920    use super::*;
921
922    // ═══ Cache cost model ═══════════════════════════════════════════════
923
924    #[test]
925    fn cache_miss_rate_full_coverage() {
926        let params = CacheCostParams {
927            item_bytes: 100.0,
928            working_set_n: 10.0,
929            zipf_alpha: 1.5,
930            budget_max_bytes: 100_000.0,
931            ..Default::default()
932        };
933        // Budget = 10 items × 100 bytes = 1000 → capacity = N → miss_rate = 0.
934        let mr = params.miss_rate(1000.0);
935        assert!(
936            mr.abs() < 1e-10,
937            "full coverage should have zero miss rate, got {mr}"
938        );
939    }
940
941    #[test]
942    fn cache_miss_rate_zero_budget() {
943        let params = CacheCostParams::default();
944        let mr = params.miss_rate(0.0);
945        assert!(
946            (mr - 1.0).abs() < 1e-10,
947            "zero budget should have miss rate 1.0, got {mr}"
948        );
949    }
950
951    #[test]
952    fn cache_miss_rate_monotone_decreasing() {
953        let params = CacheCostParams::default();
954        let mut prev = 1.0;
955        for b in [1000.0, 5000.0, 10_000.0, 50_000.0, 100_000.0] {
956            let mr = params.miss_rate(b);
957            assert!(
958                mr <= prev + 1e-10,
959                "miss rate should decrease with budget: {mr} > {prev}"
960            );
961            prev = mr;
962        }
963    }
964
965    #[test]
966    fn cache_optimal_budget_is_interior() {
967        let params = CacheCostParams::default();
968        let b_star = params.optimal_budget();
969        assert!(
970            b_star >= params.item_bytes,
971            "optimal should be >= item_bytes"
972        );
973        assert!(
974            b_star <= params.budget_max_bytes,
975            "optimal should be <= max"
976        );
977    }
978
979    #[test]
980    fn cache_optimal_is_local_minimum() {
981        let params = CacheCostParams::default();
982        let b_star = params.optimal_budget();
983        let cost_star = params.total_cost(b_star);
984
985        // Nearby points should have >= cost.
986        let delta = params.item_bytes;
987        let cost_below = params.total_cost((b_star - delta).max(params.item_bytes));
988        let cost_above = params.total_cost((b_star + delta).min(params.budget_max_bytes));
989
990        assert!(
991            cost_star <= cost_below + 1.0,
992            "optimal cost {cost_star} should be <= cost at B-δ {cost_below}"
993        );
994        assert!(
995            cost_star <= cost_above + 1.0,
996            "optimal cost {cost_star} should be <= cost at B+δ {cost_above}"
997        );
998    }
999
1000    #[test]
1001    fn cache_optimize_produces_evidence() {
1002        let result = CacheCostParams::default().optimize();
1003        assert!(result.optimal_budget_bytes > 0.0);
1004        assert!(result.optimal_hit_rate > 0.0);
1005        assert!(result.optimal_hit_rate <= 1.0);
1006        assert!(!result.comparison_points.is_empty());
1007    }
1008
1009    #[test]
1010    fn cache_optimize_jsonl_valid() {
1011        let result = CacheCostParams::default().optimize();
1012        let jsonl = result.to_jsonl();
1013        let _: serde_json::Value = serde_json::from_str(&jsonl).expect("valid JSON");
1014    }
1015
1016    #[test]
1017    fn cache_cost_display() {
1018        let result = CacheCostParams::default().optimize();
1019        let display = format!("{result}");
1020        assert!(display.contains("Cache Cost Model"));
1021        assert!(display.contains("Hit rate"));
1022    }
1023
1024    #[test]
1025    fn cache_high_alpha_needs_less_budget() {
1026        let params_low = CacheCostParams {
1027            zipf_alpha: 1.2,
1028            ..Default::default()
1029        };
1030        let params_high = CacheCostParams {
1031            zipf_alpha: 2.0,
1032            ..Default::default()
1033        };
1034
1035        let b_low = params_low.optimal_budget();
1036        let b_high = params_high.optimal_budget();
1037
1038        // Higher Zipf alpha = more skewed = fewer distinct popular items = less cache needed.
1039        assert!(
1040            b_high < b_low,
1041            "higher zipf alpha should need less budget: {b_high} >= {b_low}"
1042        );
1043    }
1044
1045    // ═══ Pipeline scheduling model ═════════════════════════════════════
1046
1047    #[test]
1048    fn pipeline_default_is_stable() {
1049        let result = PipelineCostParams::default().analyze();
1050        assert!(result.stable, "default pipeline should be stable");
1051        assert!(result.utilization < 1.0);
1052    }
1053
1054    #[test]
1055    fn pipeline_utilization_formula() {
1056        let params = PipelineCostParams {
1057            stages: vec![StageStats {
1058                name: "test",
1059                mean_us: 1000.0,
1060                var_us2: 0.0,
1061            }],
1062            arrival_rate: 0.0005, // λ = 0.5/ms
1063            frame_budget_us: 16667.0,
1064        };
1065        let result = params.analyze();
1066        // ρ = λ × E[S] = 0.0005 × 1000 = 0.5
1067        assert!(
1068            (result.utilization - 0.5).abs() < 1e-6,
1069            "ρ should be 0.5, got {}",
1070            result.utilization
1071        );
1072    }
1073
1074    #[test]
1075    fn pipeline_deterministic_sojourn() {
1076        // Zero variance → M/D/1 → known formula.
1077        let params = PipelineCostParams {
1078            stages: vec![StageStats {
1079                name: "test",
1080                mean_us: 1000.0,
1081                var_us2: 0.0,
1082            }],
1083            arrival_rate: 0.0005,
1084            frame_budget_us: 16667.0,
1085        };
1086        let result = params.analyze();
1087        // E[T] = E[S] + λE[S²] / (2(1-ρ))
1088        // E[S²] = 0 + 1000² = 1e6
1089        // E[T] = 1000 + 0.0005 * 1e6 / (2 * 0.5) = 1000 + 500 = 1500
1090        assert!(
1091            (result.mean_sojourn_us - 1500.0).abs() < 1.0,
1092            "M/D/1 sojourn should be 1500µs, got {}",
1093            result.mean_sojourn_us
1094        );
1095    }
1096
1097    #[test]
1098    fn pipeline_overloaded_is_unstable() {
1099        let params = PipelineCostParams {
1100            stages: vec![StageStats {
1101                name: "test",
1102                mean_us: 20_000.0,
1103                var_us2: 0.0,
1104            }],
1105            arrival_rate: 1.0 / 16667.0, // 60fps
1106            frame_budget_us: 16667.0,
1107        };
1108        let result = params.analyze();
1109        assert!(!result.stable, "overloaded pipeline should be unstable");
1110        assert!(result.utilization > 1.0);
1111    }
1112
1113    #[test]
1114    fn pipeline_stage_fractions_sum_to_one() {
1115        let result = PipelineCostParams::default().analyze();
1116        let total_fraction: f64 = result.stage_breakdown.iter().map(|s| s.fraction).sum();
1117        assert!(
1118            (total_fraction - 1.0).abs() < 1e-10,
1119            "fractions should sum to 1.0"
1120        );
1121    }
1122
1123    #[test]
1124    fn pipeline_headroom_positive_when_stable() {
1125        let result = PipelineCostParams::default().analyze();
1126        assert!(
1127            result.headroom_us > 0.0,
1128            "stable pipeline should have positive headroom"
1129        );
1130    }
1131
1132    #[test]
1133    fn pipeline_jsonl_valid() {
1134        let result = PipelineCostParams::default().analyze();
1135        let jsonl = result.to_jsonl();
1136        let _: serde_json::Value = serde_json::from_str(&jsonl).expect("valid JSON");
1137    }
1138
1139    #[test]
1140    fn pipeline_display() {
1141        let result = PipelineCostParams::default().analyze();
1142        let display = format!("{result}");
1143        assert!(display.contains("Pipeline Scheduling Model"));
1144        assert!(display.contains("Utilization"));
1145    }
1146
1147    // ═══ Patch batching model ══════════════════════════════════════════
1148
1149    #[test]
1150    fn batch_optimal_between_1_and_n() {
1151        let params = BatchCostParams::default();
1152        let k_star = params.optimal_batch_size();
1153        assert!(k_star >= 1);
1154        assert!(k_star <= params.total_patches);
1155    }
1156
1157    #[test]
1158    fn batch_optimal_is_local_minimum() {
1159        let params = BatchCostParams::default();
1160        let k_star = params.optimal_batch_size();
1161        let cost_star = params.total_cost(k_star);
1162
1163        if k_star > 1 {
1164            let cost_below = params.total_cost(k_star - 1);
1165            assert!(
1166                cost_star <= cost_below + 0.01,
1167                "cost at k*={k_star} ({cost_star}) should be <= cost at k*-1 ({cost_below})"
1168            );
1169        }
1170        if k_star < params.total_patches {
1171            let cost_above = params.total_cost(k_star + 1);
1172            assert!(
1173                cost_star <= cost_above + 0.01,
1174                "cost at k*={k_star} ({cost_star}) should be <= cost at k*+1 ({cost_above})"
1175            );
1176        }
1177    }
1178
1179    #[test]
1180    fn batch_no_overhead_means_immediate() {
1181        let params = BatchCostParams {
1182            c_overhead_us: 0.0,
1183            ..Default::default()
1184        };
1185        assert_eq!(params.optimal_batch_size(), 1);
1186    }
1187
1188    #[test]
1189    fn batch_no_latency_means_single_batch() {
1190        let params = BatchCostParams {
1191            c_latency_us: 0.0,
1192            ..Default::default()
1193        };
1194        assert_eq!(params.optimal_batch_size(), params.total_patches);
1195    }
1196
1197    #[test]
1198    fn batch_zero_patches() {
1199        let params = BatchCostParams {
1200            total_patches: 0,
1201            ..Default::default()
1202        };
1203        let result = params.optimize();
1204        assert_eq!(result.optimal_batch_size, 1);
1205        assert!(result.optimal_cost_us.abs() < 1e-10);
1206    }
1207
1208    #[test]
1209    fn batch_optimize_improvement() {
1210        let result = BatchCostParams::default().optimize();
1211        assert!(
1212            result.improvement_ratio >= 1.0,
1213            "optimal should be at least as good as immediate"
1214        );
1215    }
1216
1217    #[test]
1218    fn batch_optimize_jsonl_valid() {
1219        let result = BatchCostParams::default().optimize();
1220        let jsonl = result.to_jsonl();
1221        let _: serde_json::Value = serde_json::from_str(&jsonl).expect("valid JSON");
1222    }
1223
1224    #[test]
1225    fn batch_display() {
1226        let result = BatchCostParams::default().optimize();
1227        let display = format!("{result}");
1228        assert!(display.contains("Patch Batching Model"));
1229        assert!(display.contains("Optimal k"));
1230    }
1231
1232    #[test]
1233    fn batch_cost_formula_manual_check() {
1234        // n=100, k=10, overhead=20, per_patch=0.05, latency=0.5
1235        // batches = ceil(100/10) = 10
1236        // overhead = 10 × 20 = 200
1237        // processing = 100 × 0.05 = 5
1238        // latency = (10-1) × 0.5 = 4.5
1239        // total = 209.5
1240        let params = BatchCostParams {
1241            c_overhead_us: 20.0,
1242            c_per_patch_us: 0.05,
1243            c_latency_us: 0.5,
1244            total_patches: 100,
1245        };
1246        let cost = params.total_cost(10);
1247        assert!(
1248            (cost - 209.5).abs() < 0.01,
1249            "manual check: expected 209.5, got {cost}"
1250        );
1251    }
1252
1253    // ═══ Sensitivity analysis ══════════════════════════════════════════
1254
1255    #[test]
1256    fn cache_sensitivity_zipf_monotone() {
1257        let params = CacheCostParams::default();
1258        let points = cache_sensitivity_zipf(&params, 1.0, 3.0, 10);
1259        assert_eq!(points.len(), 10);
1260        // Higher alpha → smaller optimal budget.
1261        for i in 1..points.len() {
1262            assert!(
1263                points[i].optimal_value <= points[i - 1].optimal_value + 1.0,
1264                "optimal budget should decrease with alpha"
1265            );
1266        }
1267    }
1268
1269    #[test]
1270    fn batch_sensitivity_patches_grows() {
1271        let params = BatchCostParams::default();
1272        let points = batch_sensitivity_patches(&params, 10, 1000, 10);
1273        assert_eq!(points.len(), 10);
1274        // Overall trend: more patches → larger optimal batch size (sqrt scaling).
1275        // Due to ceiling effects, individual steps may be non-monotone,
1276        // so we compare first vs last.
1277        assert!(
1278            points.last().unwrap().optimal_value > points.first().unwrap().optimal_value,
1279            "optimal batch size should be larger for more patches (overall trend)"
1280        );
1281    }
1282
1283    // ═══ Determinism ═══════════════════════════════════════════════════
1284
1285    #[test]
1286    fn all_models_deterministic() {
1287        let cache1 = CacheCostParams::default().optimize();
1288        let cache2 = CacheCostParams::default().optimize();
1289        assert!(
1290            (cache1.optimal_budget_bytes - cache2.optimal_budget_bytes).abs() < 1e-10,
1291            "cache model should be deterministic"
1292        );
1293
1294        let pipe1 = PipelineCostParams::default().analyze();
1295        let pipe2 = PipelineCostParams::default().analyze();
1296        assert!(
1297            (pipe1.mean_sojourn_us - pipe2.mean_sojourn_us).abs() < 1e-10,
1298            "pipeline model should be deterministic"
1299        );
1300
1301        let batch1 = BatchCostParams::default().optimize();
1302        let batch2 = BatchCostParams::default().optimize();
1303        assert_eq!(
1304            batch1.optimal_batch_size, batch2.optimal_batch_size,
1305            "batch model should be deterministic"
1306        );
1307    }
1308
1309    // ═══ Edge cases ════════════════════════════════════════════════════
1310
1311    #[test]
1312    fn cache_degenerate_params() {
1313        // Zero working set.
1314        let params = CacheCostParams {
1315            working_set_n: 0.0,
1316            ..Default::default()
1317        };
1318        let b = params.optimal_budget();
1319        assert!(b.is_finite());
1320
1321        // Zero miss cost → optimal = minimum budget.
1322        let params2 = CacheCostParams {
1323            c_miss_us: 0.0,
1324            ..Default::default()
1325        };
1326        let b2 = params2.optimal_budget();
1327        // With no miss cost, any budget is equally good for miss component,
1328        // but memory cost drives it to minimum.
1329        assert!(b2.is_finite());
1330    }
1331
1332    #[test]
1333    fn pipeline_empty_stages() {
1334        let params = PipelineCostParams {
1335            stages: vec![],
1336            ..Default::default()
1337        };
1338        let result = params.analyze();
1339        assert!(result.total_mean_us.abs() < 1e-10);
1340        assert!(result.stable);
1341    }
1342
1343    #[test]
1344    fn pipeline_zero_arrival() {
1345        let params = PipelineCostParams {
1346            arrival_rate: 0.0,
1347            ..Default::default()
1348        };
1349        let result = params.analyze();
1350        assert!(result.stable);
1351        // With zero arrivals, sojourn = service time (no queueing delay).
1352        assert!((result.mean_sojourn_us - result.total_mean_us).abs() < 1e-6);
1353    }
1354
1355    #[test]
1356    fn sensitivity_point_debug() {
1357        let pt = SensitivityPoint {
1358            param_value: 1.5,
1359            optimal_value: 50_000.0,
1360            optimal_cost: 123.456,
1361        };
1362        let dbg = format!("{pt:?}");
1363        assert!(dbg.contains("SensitivityPoint"));
1364    }
1365
1366    // ═══ CacheCostParams::evaluate ════════════════════════════════════
1367
1368    #[test]
1369    fn cache_evaluate_components_sum_to_total() {
1370        let params = CacheCostParams::default();
1371        let pt = params.evaluate(50_000.0);
1372        assert!(
1373            (pt.total_cost_us - (pt.cost_miss_us + pt.cost_mem_us)).abs() < 1e-10,
1374            "total should equal miss + mem components"
1375        );
1376    }
1377
1378    #[test]
1379    fn cache_evaluate_matches_individual_calls() {
1380        let params = CacheCostParams::default();
1381        let budget = 30_000.0;
1382        let pt = params.evaluate(budget);
1383        assert_eq!(pt.budget_bytes, budget);
1384        assert!(
1385            (pt.miss_rate - params.miss_rate(budget)).abs() < 1e-10,
1386            "evaluate miss_rate should match miss_rate()"
1387        );
1388        assert!(
1389            (pt.total_cost_us - params.total_cost(budget)).abs() < 1e-10,
1390            "evaluate total_cost should match total_cost()"
1391        );
1392    }
1393
1394    #[test]
1395    fn cache_evaluate_at_optimal() {
1396        let params = CacheCostParams::default();
1397        let result = params.optimize();
1398        let pt = params.evaluate(result.optimal_budget_bytes);
1399        assert!(
1400            (pt.miss_rate - result.optimal_miss_rate).abs() < 1e-10,
1401            "evaluate at optimal should match optimize result"
1402        );
1403    }
1404
1405    // ═══ Cache extreme params ═════════════════════════════════════════
1406
1407    #[test]
1408    fn cache_miss_rate_negative_budget_clamps_to_one() {
1409        let params = CacheCostParams::default();
1410        let mr = params.miss_rate(-100.0);
1411        assert!(
1412            (mr - 1.0).abs() < 1e-10,
1413            "negative budget should give miss rate 1.0, got {mr}"
1414        );
1415    }
1416
1417    #[test]
1418    fn cache_miss_rate_huge_budget_approaches_zero() {
1419        let params = CacheCostParams::default();
1420        let mr = params.miss_rate(1e12);
1421        assert!(
1422            mr.abs() < 1e-10,
1423            "huge budget should give near-zero miss rate, got {mr}"
1424        );
1425    }
1426
1427    #[test]
1428    fn cache_optimal_budget_c_mem_zero_returns_max() {
1429        let params = CacheCostParams {
1430            c_mem_per_byte: 0.0,
1431            ..Default::default()
1432        };
1433        assert_eq!(
1434            params.optimal_budget(),
1435            params.budget_max_bytes,
1436            "zero memory cost should give max budget"
1437        );
1438    }
1439
1440    #[test]
1441    fn cache_optimal_budget_alpha_zero_returns_max() {
1442        let params = CacheCostParams {
1443            zipf_alpha: 0.0,
1444            ..Default::default()
1445        };
1446        assert_eq!(params.optimal_budget(), params.budget_max_bytes);
1447    }
1448
1449    #[test]
1450    fn cache_optimal_budget_item_bytes_zero_returns_max() {
1451        let params = CacheCostParams {
1452            item_bytes: 0.0,
1453            ..Default::default()
1454        };
1455        assert_eq!(params.optimal_budget(), params.budget_max_bytes);
1456    }
1457
1458    #[test]
1459    fn cache_optimize_comparison_points_count() {
1460        let result = CacheCostParams::default().optimize();
1461        assert_eq!(
1462            result.comparison_points.len(),
1463            6,
1464            "should have 6 comparison points"
1465        );
1466    }
1467
1468    #[test]
1469    fn cache_optimize_items_cached_positive() {
1470        let result = CacheCostParams::default().optimize();
1471        assert!(result.items_cached > 0.0);
1472    }
1473
1474    #[test]
1475    fn cache_optimize_cost_components_non_negative() {
1476        let result = CacheCostParams::default().optimize();
1477        assert!(result.cost_miss_us >= 0.0);
1478        assert!(result.cost_mem_us >= 0.0);
1479        assert!(
1480            (result.optimal_cost_us - (result.cost_miss_us + result.cost_mem_us)).abs() < 1e-6,
1481            "total cost should be sum of components"
1482        );
1483    }
1484
1485    // ═══ StageStats::second_moment ════════════════════════════════════
1486
1487    #[test]
1488    fn stage_stats_second_moment_deterministic() {
1489        let s = StageStats {
1490            name: "test",
1491            mean_us: 100.0,
1492            var_us2: 0.0,
1493        };
1494        // E[S²] = Var[S] + E[S]² = 0 + 10000 = 10000
1495        assert!(
1496            (s.second_moment() - 10_000.0).abs() < 1e-10,
1497            "E[S²] = mean² when variance is zero"
1498        );
1499    }
1500
1501    #[test]
1502    fn stage_stats_second_moment_with_variance() {
1503        let s = StageStats {
1504            name: "test",
1505            mean_us: 50.0,
1506            var_us2: 400.0,
1507        };
1508        // E[S²] = 400 + 2500 = 2900
1509        assert!(
1510            (s.second_moment() - 2900.0).abs() < 1e-10,
1511            "E[S²] = Var + mean²"
1512        );
1513    }
1514
1515    // ═══ Pipeline edge cases ══════════════════════════════════════════
1516
1517    #[test]
1518    fn pipeline_multi_stage_variance_contributes() {
1519        let params = PipelineCostParams {
1520            stages: vec![
1521                StageStats {
1522                    name: "fast",
1523                    mean_us: 100.0,
1524                    var_us2: 0.0,
1525                },
1526                StageStats {
1527                    name: "variable",
1528                    mean_us: 200.0,
1529                    var_us2: 10000.0,
1530                },
1531            ],
1532            arrival_rate: 0.0001,
1533            frame_budget_us: 16667.0,
1534        };
1535        let result = params.analyze();
1536        assert!(result.stable);
1537        // Mean should be sum of stage means
1538        assert!(
1539            (result.total_mean_us - 300.0).abs() < 1e-6,
1540            "total mean should be sum of stages"
1541        );
1542    }
1543
1544    #[test]
1545    fn pipeline_stage_breakdown_names_match() {
1546        let result = PipelineCostParams::default().analyze();
1547        let names: Vec<&str> = result.stage_breakdown.iter().map(|s| s.name).collect();
1548        assert!(names.contains(&"input"));
1549        assert!(names.contains(&"update"));
1550        assert!(names.contains(&"view"));
1551    }
1552
1553    #[test]
1554    fn pipeline_unstable_headroom_zero_or_negative() {
1555        let params = PipelineCostParams {
1556            stages: vec![StageStats {
1557                name: "slow",
1558                mean_us: 50_000.0,
1559                var_us2: 0.0,
1560            }],
1561            arrival_rate: 1.0 / 16667.0,
1562            frame_budget_us: 16667.0,
1563        };
1564        let result = params.analyze();
1565        assert!(!result.stable);
1566        assert!(result.headroom_us <= 0.0);
1567    }
1568
1569    #[test]
1570    fn pipeline_jsonl_contains_expected_fields() {
1571        let result = PipelineCostParams::default().analyze();
1572        let jsonl = result.to_jsonl();
1573        let v: serde_json::Value = serde_json::from_str(&jsonl).expect("valid JSON");
1574        assert_eq!(v["event"], "pipeline_cost_analysis");
1575        assert!(v["utilization"].is_number());
1576        assert!(v["stable"].is_boolean());
1577        assert!(v["mean_sojourn_us"].is_number());
1578    }
1579
1580    // ═══ Batch evaluate ═══════════════════════════════════════════════
1581
1582    #[test]
1583    fn batch_evaluate_components_sum_to_total() {
1584        let params = BatchCostParams::default();
1585        let pt = params.evaluate(10);
1586        assert!(
1587            (pt.total_cost_us - (pt.overhead_us + pt.processing_us + pt.latency_us)).abs() < 1e-10,
1588            "total should equal sum of components"
1589        );
1590    }
1591
1592    #[test]
1593    fn batch_evaluate_single_patch() {
1594        let params = BatchCostParams {
1595            total_patches: 1,
1596            ..Default::default()
1597        };
1598        let pt = params.evaluate(1);
1599        assert_eq!(pt.batch_size, 1);
1600        assert_eq!(pt.num_batches, 1);
1601        assert!(pt.latency_us.abs() < 1e-10, "single patch → no latency");
1602    }
1603
1604    #[test]
1605    fn batch_evaluate_zero_patches() {
1606        let params = BatchCostParams {
1607            total_patches: 0,
1608            ..Default::default()
1609        };
1610        let pt = params.evaluate(1);
1611        assert_eq!(pt.num_batches, 0);
1612        assert!(pt.total_cost_us.abs() < 1e-10);
1613    }
1614
1615    // ═══ Batch total_cost edge cases ══════════════════════════════════
1616
1617    #[test]
1618    fn batch_total_cost_zero_batch_size() {
1619        let params = BatchCostParams::default();
1620        let cost = params.total_cost(0);
1621        assert!(cost.abs() < 1e-10, "batch_size=0 should give zero cost");
1622    }
1623
1624    #[test]
1625    fn batch_total_cost_larger_than_n() {
1626        let params = BatchCostParams {
1627            total_patches: 100,
1628            ..Default::default()
1629        };
1630        // batch_size > n should clamp to n
1631        let cost_at_n = params.total_cost(100);
1632        let cost_above = params.total_cost(200);
1633        assert!(
1634            (cost_at_n - cost_above).abs() < 1e-10,
1635            "batch_size > n should equal batch_size = n"
1636        );
1637    }
1638
1639    #[test]
1640    fn batch_total_cost_one_is_immediate() {
1641        let params = BatchCostParams::default();
1642        let cost = params.total_cost(1);
1643        // n batches of 1, no latency
1644        let expected = params.total_patches as f64 * params.c_overhead_us
1645            + params.total_patches as f64 * params.c_per_patch_us;
1646        assert!(
1647            (cost - expected).abs() < 1e-10,
1648            "batch_size=1 cost: expected {expected}, got {cost}"
1649        );
1650    }
1651
1652    // ═══ Batch single-patch model ═════════════════════════════════════
1653
1654    #[test]
1655    fn batch_single_patch_optimal_is_one() {
1656        let params = BatchCostParams {
1657            total_patches: 1,
1658            ..Default::default()
1659        };
1660        assert_eq!(params.optimal_batch_size(), 1);
1661    }
1662
1663    #[test]
1664    fn batch_optimize_comparison_points_non_empty() {
1665        let result = BatchCostParams::default().optimize();
1666        assert!(!result.comparison_points.is_empty());
1667    }
1668
1669    #[test]
1670    fn batch_optimize_single_batch_cost_consistent() {
1671        let params = BatchCostParams::default();
1672        let result = params.optimize();
1673        assert!(
1674            (result.single_batch_cost_us - params.total_cost(params.total_patches)).abs() < 1e-10,
1675            "single_batch_cost should match total_cost(n)"
1676        );
1677    }
1678
1679    #[test]
1680    fn batch_optimize_immediate_cost_consistent() {
1681        let params = BatchCostParams::default();
1682        let result = params.optimize();
1683        assert!(
1684            (result.immediate_cost_us - params.total_cost(1)).abs() < 1e-10,
1685            "immediate_cost should match total_cost(1)"
1686        );
1687    }
1688
1689    // ═══ JSONL validation ═════════════════════════════════════════════
1690
1691    #[test]
1692    fn cache_jsonl_contains_event() {
1693        let result = CacheCostParams::default().optimize();
1694        let jsonl = result.to_jsonl();
1695        let v: serde_json::Value = serde_json::from_str(&jsonl).expect("valid JSON");
1696        assert_eq!(v["event"], "cache_cost_optimal");
1697        assert!(v["optimal_budget_bytes"].is_number());
1698    }
1699
1700    #[test]
1701    fn batch_jsonl_contains_event() {
1702        let result = BatchCostParams::default().optimize();
1703        let jsonl = result.to_jsonl();
1704        let v: serde_json::Value = serde_json::from_str(&jsonl).expect("valid JSON");
1705        assert_eq!(v["event"], "batch_cost_optimal");
1706        assert!(v["optimal_batch_size"].is_number());
1707    }
1708
1709    // ═══ Debug formatting ═════════════════════════════════════════════
1710
1711    #[test]
1712    fn cache_cost_point_debug() {
1713        let pt = CacheCostParams::default().evaluate(10_000.0);
1714        let dbg = format!("{pt:?}");
1715        assert!(dbg.contains("CacheCostPoint"));
1716    }
1717
1718    #[test]
1719    fn batch_cost_point_debug() {
1720        let pt = BatchCostParams::default().evaluate(10);
1721        let dbg = format!("{pt:?}");
1722        assert!(dbg.contains("BatchCostPoint"));
1723    }
1724
1725    #[test]
1726    fn stage_breakdown_debug() {
1727        let result = PipelineCostParams::default().analyze();
1728        let dbg = format!("{:?}", result.stage_breakdown[0]);
1729        assert!(dbg.contains("StageBreakdown"));
1730    }
1731
1732    #[test]
1733    fn cache_cost_params_debug() {
1734        let params = CacheCostParams::default();
1735        let dbg = format!("{params:?}");
1736        assert!(dbg.contains("CacheCostParams"));
1737    }
1738
1739    #[test]
1740    fn batch_cost_params_debug() {
1741        let params = BatchCostParams::default();
1742        let dbg = format!("{params:?}");
1743        assert!(dbg.contains("BatchCostParams"));
1744    }
1745
1746    // ═══ Sensitivity edge cases ═══════════════════════════════════════
1747
1748    #[test]
1749    fn cache_sensitivity_zipf_min_steps_is_two() {
1750        let params = CacheCostParams::default();
1751        // steps=1 is clamped to 2 internally
1752        let points = cache_sensitivity_zipf(&params, 1.5, 1.5, 1);
1753        assert_eq!(points.len(), 2);
1754    }
1755
1756    #[test]
1757    fn batch_sensitivity_patches_min_steps_is_two() {
1758        let params = BatchCostParams::default();
1759        let points = batch_sensitivity_patches(&params, 100, 100, 1);
1760        assert_eq!(points.len(), 2);
1761    }
1762
1763    #[test]
1764    fn sensitivity_points_have_finite_values() {
1765        let params = CacheCostParams::default();
1766        for pt in cache_sensitivity_zipf(&params, 1.0, 3.0, 5) {
1767            assert!(pt.param_value.is_finite());
1768            assert!(pt.optimal_value.is_finite());
1769            assert!(pt.optimal_cost.is_finite());
1770        }
1771    }
1772
1773    // ═══ Clone trait ══════════════════════════════════════════════════
1774
1775    #[test]
1776    fn cache_params_clone() {
1777        let params = CacheCostParams::default();
1778        let cloned = params.clone();
1779        assert!((params.zipf_alpha - cloned.zipf_alpha).abs() < 1e-10);
1780    }
1781
1782    #[test]
1783    fn batch_params_clone() {
1784        let params = BatchCostParams::default();
1785        let cloned = params.clone();
1786        assert_eq!(params.total_patches, cloned.total_patches);
1787    }
1788}