Skip to main content

phasm_core/stego/stc/
streaming_segmented.rs

1// Copyright (c) 2026 Christoph Gaffga
2// SPDX-License-Identifier: GPL-3.0-only
3// https://github.com/cgaffga/phasmcore
4
5//! Phase 6E-C / Task #24 — Streaming-segmented Viterbi STC.
6//!
7//! Generalizes `stc_embed_segmented` from "in-memory cover slice"
8//! to "on-demand cover fetch via callback". Memory bound is
9//! `O(num_segments × num_states × 8 B + segment_size × 16 B)`
10//! instead of `O(n)` cover materialization.
11//!
12//! ## When to use
13//!
14//! - **Long-clip video stego** (15+ minutes 1080p) where the
15//!   per-domain cover bit count exceeds practical RAM
16//!   (~16 billion positions × 9 bytes = ~145 GB).
17//! - **Multi-shadow encoder integration** (deferred to a
18//!   follow-on task): collapsing the §30D-C 3-pass MVD/residual
19//!   split into a single primary STC pass requires the encoder
20//!   driver to access cover via a per-GOP fetch callback rather
21//!   than a pre-materialized slice. Streaming-segmented Viterbi
22//!   is the natural primitive on the STC side of that integration.
23//!
24//! ## Status (this commit — scaffold only)
25//!
26//! Documented, type-stub level. The full implementation requires:
27//! 1. Refactoring `stc_embed_segmented` to accept a `CoverFetch`
28//!    callback in place of the slice arguments.
29//! 2. An encoder-side adapter that runs Pass 1 per GOP via §6E-C0
30//!    streaming walker and feeds the result into the callback.
31//! 3. An encoder-side adapter that re-runs Pass 1 per GOP for
32//!    Phase B's recompute step.
33//!
34//! The actual work is broken out as separate tasks:
35//! - **Task #24.1**: refactor `stc_embed_segmented` to callback API.
36//! - **Task #24.2**: encoder per-GOP replay adapter.
37//! - **Task #24.3**: integrate into the §30D-C orchestrator
38//!   (or into a new unified primary STC encoder if the §30D-C
39//!   split is collapsed at that point).
40//!
41//! ## Architectural notes
42//!
43//! The §30D-C 3-pass split (Pass 2A MVD STC → Pass 1B → Pass 2B
44//! residual STC) is **orthogonal** to the streaming-Viterbi memory
45//! bound. Streaming-Viterbi makes the in-memory STC plan use
46//! O(√n) rather than O(n) memory; the 3-pass orchestration
47//! structure is unchanged.
48//!
49//! The §30D-C 3-pass collapse — which would benefit multi-shadow
50//! N>1 by giving a single unified position list — is a **separate**
51//! architectural change. It requires either:
52//!
53//! - **Decision cache** (Phase 6D Option A): cache full per-MB
54//!   encoder state so the planner can simulate post-MVD-plan
55//!   residuals without running Pass 1B. Heavy refactor; ~30 GB
56//!   cache for 15-min 1080p.
57//! - **Iteration to fixed point**: Pass 1 → tentative plan → Pass 3
58//!   dryrun → re-plan → ... until convergence. 4-7× single-encode
59//!   wall-clock per iteration.
60//!
61//! Both rejected at session-design time on cost/complexity grounds.
62//! Multi-shadow N>1 stays bounded by these architectural limits
63//! until a future session prioritizes the unification refactor.
64
65use crate::stego::stc::embed::{EmbedResult, STC_PROGRESS_STEPS};
66use crate::stego::stc::extract::stc_extract;
67use crate::stego::stc::hhat;
68use crate::stego::progress;
69
70/// Cover-fetch callback signature for streaming-segmented STC.
71///
72/// `get_segment(seg_idx) -> (bits, costs)` returns the cover bits
73/// and costs for segment `seg_idx`. Each segment is `K × w` cover
74/// positions where `K` is the segment size in message blocks.
75///
76/// The callback may be invoked multiple times for the same
77/// `seg_idx` (Phase A and Phase B both visit each segment).
78/// Implementations must return identical data on repeated calls
79/// for correctness.
80pub trait CoverFetch {
81    /// Total cover position count `n`.
82    fn total_positions(&self) -> usize;
83
84    /// Number of segments. Equals `m.div_ceil(K)` where `K` is
85    /// the segment size.
86    fn num_segments(&self) -> usize;
87
88    /// Segment size in message blocks (constant across segments
89    /// except possibly the last). The last segment may have fewer
90    /// blocks if `m` doesn't divide evenly by `K`.
91    fn segment_size_in_blocks(&self) -> usize;
92
93    /// Fetch one segment's cover bits and costs.
94    ///
95    /// Returned vectors have length `(K × w)` (full segment) or
96    /// shorter if `seg_idx == num_segments() - 1` and `m` doesn't
97    /// divide evenly by `K`.
98    ///
99    /// Implementations should free transient state (the encoder
100    /// per-GOP working set) after returning; the caller releases
101    /// the returned vectors after a single segment's traceback.
102    fn fetch_segment(&mut self, seg_idx: usize) -> (Vec<u8>, Vec<f32>);
103}
104
105/// §6E-C / Task #24.2 — in-memory `CoverFetch` adapter.
106///
107/// Wraps a pre-materialized `(cover_bits, costs)` pair and slices
108/// on-demand per segment. Suitable for orchestrator integrations
109/// (Task #24.3) where the encoder's Pass-1 cover already lives in
110/// memory and the streaming-Viterbi memory savings come from the
111/// STC-internal O(√n) checkpoint + back-pointer working set, not
112/// the cover side.
113///
114/// **Memory bound**: O(n) cover (caller-provided) + O(K × w) per
115/// segment fetch + O(num_segments × num_states × 8 B) checkpoints
116/// inside `stc_embed_streaming_segmented`. For long-clip video
117/// stego where the encoder's full per-GOP cover materialization
118/// is itself the OOM source, a follow-on per-GOP-replay adapter
119/// (left as v1.1+ work) replaces the cover materialization with
120/// repeated Pass 1 invocations bounded by the segment's GOP range.
121/// That follow-on requires encoder restartability at arbitrary
122/// GOP boundaries — out of scope for v1.0 (mobile clips are
123/// short enough that in-memory cover fits).
124pub struct InMemoryCoverFetch<'a> {
125    cover_bits: &'a [u8],
126    costs: &'a [f32],
127    /// Segment size in message blocks. Caller chooses;
128    /// `((m as f64).sqrt().ceil() as usize).max(1)` matches the
129    /// inline `stc_embed_segmented` checkpoint cadence and
130    /// preserves bit-exact equivalence.
131    segment_size_in_blocks: usize,
132    /// STC stride `w`.
133    w: usize,
134    /// Message length `m`.
135    m: usize,
136}
137
138impl<'a> InMemoryCoverFetch<'a> {
139    /// Construct a new in-memory cover fetcher. `cover_bits.len()`
140    /// must equal `costs.len()` and must equal `m * w`. Returns
141    /// `None` on length-mismatch.
142    pub fn new(
143        cover_bits: &'a [u8],
144        costs: &'a [f32],
145        m: usize,
146        w: usize,
147        segment_size_in_blocks: usize,
148    ) -> Option<Self> {
149        if cover_bits.len() != costs.len() {
150            return None;
151        }
152        // Mirror the inline `stc_embed` contract: caller may
153        // provide a cover slice whose length is >= m*w; the
154        // streaming-Viterbi only walks the first m*w positions
155        // (the trailing positions are unused at this `w`).
156        if cover_bits.len() < m * w {
157            return None;
158        }
159        if segment_size_in_blocks == 0 {
160            return None;
161        }
162        Some(Self {
163            cover_bits,
164            costs,
165            segment_size_in_blocks,
166            w,
167            m,
168        })
169    }
170}
171
172impl<'a> CoverFetch for InMemoryCoverFetch<'a> {
173    fn total_positions(&self) -> usize {
174        // Only the first m * w positions are exercised by streaming-
175        // Viterbi (matches inline `stc_embed_segmented`'s
176        // `for j in 0..n` where n = m*w). The trailing slack in
177        // `cover_bits` is intentionally ignored.
178        self.m * self.w
179    }
180    fn num_segments(&self) -> usize {
181        self.m.div_ceil(self.segment_size_in_blocks)
182    }
183    fn segment_size_in_blocks(&self) -> usize {
184        self.segment_size_in_blocks
185    }
186    fn fetch_segment(&mut self, seg_idx: usize) -> (Vec<u8>, Vec<f32>) {
187        let block_start = seg_idx * self.segment_size_in_blocks;
188        let block_end =
189            ((seg_idx + 1) * self.segment_size_in_blocks).min(self.m);
190        let j_start = block_start * self.w;
191        let j_end = block_end * self.w;
192        (
193            self.cover_bits[j_start..j_end].to_vec(),
194            self.costs[j_start..j_end].to_vec(),
195        )
196    }
197}
198
199/// One Phase B segment's emission, returned by
200/// `StreamingViterbiPhaseB::step()`.
201///
202/// Phase 6 uses these per-segment emissions to feed the
203/// `PerGopPlanBuilder` directly, avoiding the O(n) plan
204/// materialization that the wrapper `stc_embed_streaming_segmented`
205/// has to assemble for the legacy result type.
206#[derive(Debug, Clone)]
207pub struct PhaseBSegmentEmission {
208    /// Segment index in the cover's segment space. Step calls walk
209    /// `seg_idx` from `num_segments() - 1` down to `0`.
210    pub seg_idx: usize,
211    /// Position range within the full cover:
212    /// `[j_start, j_start + stego_bits.len())`.
213    pub j_start: usize,
214    /// Stego bits for this segment, length = `seg_blocks × w`
215    /// (or 0 for empty trailing segments).
216    pub stego_bits: Vec<u8>,
217    /// Number of modifications (`cover_bit ^ stego_bit` count) in
218    /// this segment. Tracked inline during traceback so the
219    /// streaming pipeline doesn't need a third per-segment re-fetch.
220    pub num_modifications: usize,
221}
222
223/// Step-driven streaming-segmented Viterbi STC.
224///
225/// Lifecycle:
226/// 1. `new(cover, message, hhat, h, w)` — runs Phase A (forward
227///    Viterbi) and stores per-segment checkpoints. Returns a
228///    driver primed for Phase B traceback.
229/// 2. `step()` — processes ONE segment in reverse order
230///    (`num_segments-1`, `num_segments-2`, ..., `0`). Each call
231///    returns `Some(PhaseBSegmentEmission)`; `Ok(None)` signals
232///    all segments have been emitted.
233/// 3. `total_cost()` / `final_state()` — accessors for
234///    post-traceback validation.
235///
236/// The legacy `stc_embed_streaming_segmented` function is now a
237/// thin wrapper that loops `step()` and concatenates the emissions
238/// into a single `EmbedResult`. The Phase 6 interleaved
239/// orchestrator drives 4× `StreamingViterbiPhaseB` in round-robin
240/// lockstep, feeding emissions directly into `PerGopPlanBuilder`
241/// without ever materializing the full per-domain plan.
242pub struct StreamingViterbiPhaseB<'a> {
243    cover: &'a mut dyn CoverFetch,
244    message: &'a [u8],
245    columns: Vec<usize>,
246    w: usize,
247    m: usize,
248    n: usize,
249    k: usize,
250    num_states: usize,
251    num_segments: usize,
252    /// Phase A checkpoints — `checkpoints[seg]` is the cost array at
253    /// the start of segment `seg`. Phase B copies into `prev_cost`
254    /// before recomputing the segment's back-pointers.
255    checkpoints: Vec<Vec<f64>>,
256    /// Best total cost from Phase A.
257    total_cost: f64,
258    /// Running entry state across reverse-order Phase B traceback.
259    /// Initialized to Phase A's argmin state; ends at 0 after the
260    /// last segment for valid embeddings.
261    entry_state: usize,
262    /// Reusable working buffers, owned by the driver to avoid
263    /// per-step allocation churn.
264    prev_cost: Vec<f64>,
265    curr_cost: Vec<f64>,
266    shifted_cost: Vec<f64>,
267    /// Cursor: which segment to emit next (counts down). `None`
268    /// means all segments have been emitted.
269    next_seg: Option<usize>,
270    progress_interval_b: usize,
271    progress_counter_b: usize,
272}
273
274impl<'a> StreamingViterbiPhaseB<'a> {
275    /// Run Phase A (forward Viterbi + checkpoints). Returns a
276    /// driver ready for `step()` calls.
277    pub fn new(
278        cover: &'a mut dyn CoverFetch,
279        message: &'a [u8],
280        hhat_matrix: &[Vec<u32>],
281        h: usize,
282        w: usize,
283    ) -> Result<Self, &'static str> {
284        let n = cover.total_positions();
285        let m = message.len();
286        let num_states = 1usize << h;
287        let inf = f64::INFINITY;
288
289        let k = cover.segment_size_in_blocks();
290        if k == 0 {
291            return Err("segment_size_in_blocks must be > 0");
292        }
293        let num_segments = cover.num_segments();
294        if m > 0 && num_segments != m.div_ceil(k) {
295            return Err("num_segments inconsistent with m and segment_size_in_blocks");
296        }
297
298        let columns: Vec<usize> = (0..w)
299            .map(|c| hhat::column_packed(hhat_matrix, c) as usize)
300            .collect();
301
302        let phase_a_steps = STC_PROGRESS_STEPS / 2;
303        let progress_interval_a = (n / phase_a_steps as usize).max(1);
304        let phase_b_steps = STC_PROGRESS_STEPS - phase_a_steps;
305        let progress_interval_b = (n / phase_b_steps as usize).max(1);
306
307        let mut prev_cost = vec![inf; num_states];
308        prev_cost[0] = 0.0;
309        let mut curr_cost = vec![0.0f64; num_states];
310        let mut shifted_cost = vec![inf; num_states];
311
312        let mut checkpoints: Vec<Vec<f64>> = Vec::with_capacity(num_segments.max(1));
313        checkpoints.push(prev_cost.clone());
314
315        let mut progress_counter_a = 0usize;
316        let mut msg_idx = 0;
317        let mut j_global = 0usize;
318
319        for seg in 0..num_segments {
320            let block_start = seg * k;
321            let block_end = ((seg + 1) * k).min(m);
322            let seg_blocks = block_end - block_start;
323            let seg_len = seg_blocks * w;
324            if seg_len == 0 {
325                continue;
326            }
327
328            let (seg_bits, seg_costs) = cover.fetch_segment(seg);
329            if seg_bits.len() != seg_len || seg_costs.len() != seg_len {
330                return Err("fetch_segment returned inconsistent length");
331            }
332
333            for local_j in 0..seg_len {
334                let j = j_global + local_j;
335                let col_idx = j % w;
336                let col = columns[col_idx];
337                let flip_cost = seg_costs[local_j] as f64;
338                let cover_bit = seg_bits[local_j] & 1;
339
340                let (cost_s0, cost_s1) = if cover_bit == 0 {
341                    (0.0, flip_cost)
342                } else {
343                    (flip_cost, 0.0)
344                };
345
346                for s in 0..num_states {
347                    let cost_0 = prev_cost[s] + cost_s0;
348                    let cost_1 = prev_cost[s ^ col] + cost_s1;
349                    curr_cost[s] = if cost_1 < cost_0 { cost_1 } else { cost_0 };
350                }
351
352                if col_idx == w - 1 && msg_idx < m {
353                    let required_bit = message[msg_idx] as usize;
354                    shifted_cost.fill(inf);
355                    for s in 0..num_states {
356                        if curr_cost[s] == inf {
357                            continue;
358                        }
359                        if (s & 1) != required_bit {
360                            continue;
361                        }
362                        let s_shifted = s >> 1;
363                        if curr_cost[s] < shifted_cost[s_shifted] {
364                            shifted_cost[s_shifted] = curr_cost[s];
365                        }
366                    }
367                    std::mem::swap(&mut prev_cost, &mut shifted_cost);
368                    msg_idx += 1;
369
370                    if msg_idx % k == 0 && msg_idx < m {
371                        checkpoints.push(prev_cost.clone());
372                    }
373                } else {
374                    std::mem::swap(&mut prev_cost, &mut curr_cost);
375                }
376
377                progress_counter_a += 1;
378                if progress_counter_a % progress_interval_a == 0 {
379                    if progress::is_cancelled() {
380                        return Err("cancelled");
381                    }
382                    progress::advance();
383                }
384            }
385
386            j_global += seg_len;
387        }
388
389        let (best_state, best_cost) = {
390            let mut best = 0usize;
391            let mut best_cost = inf;
392            for (s, &c) in prev_cost.iter().enumerate() {
393                if c < best_cost {
394                    best_cost = c;
395                    best = s;
396                }
397            }
398            (best, best_cost)
399        };
400        if best_cost == inf {
401            return Err("no valid embedding (all paths Inf)");
402        }
403
404        let next_seg = if num_segments == 0 {
405            None
406        } else {
407            Some(num_segments - 1)
408        };
409
410        Ok(Self {
411            cover,
412            message,
413            columns,
414            w,
415            m,
416            n,
417            k,
418            num_states,
419            num_segments,
420            checkpoints,
421            total_cost: best_cost,
422            entry_state: best_state,
423            prev_cost,
424            curr_cost,
425            shifted_cost,
426            next_seg,
427            progress_interval_b,
428            progress_counter_b: 0,
429        })
430    }
431
432    /// Total cost from Phase A. Constant across the driver's
433    /// lifetime once `new()` returns.
434    pub fn total_cost(&self) -> f64 {
435        self.total_cost
436    }
437
438    /// Cover-position count `n`.
439    pub fn total_positions(&self) -> usize {
440        self.n
441    }
442
443    /// Segment count.
444    pub fn num_segments(&self) -> usize {
445        self.num_segments
446    }
447
448    /// Segment size in message blocks (constant `K` across all
449    /// segments except possibly the last).
450    pub fn segment_size_in_blocks(&self) -> usize {
451        self.k
452    }
453
454    /// Final running state after the last `step()` call. Should
455    /// equal 0 for valid embeddings.
456    pub fn final_state(&self) -> usize {
457        self.entry_state
458    }
459
460    /// Process one Phase B segment in reverse order. Returns
461    /// `Ok(Some(emission))` for each segment, or `Ok(None)` once
462    /// every segment has been emitted.
463    pub fn step(&mut self) -> Result<Option<PhaseBSegmentEmission>, &'static str> {
464        let Some(seg) = self.next_seg else {
465            return Ok(None);
466        };
467
468        let block_start = seg * self.k;
469        let block_end = ((seg + 1) * self.k).min(self.m);
470        let seg_blocks = block_end - block_start;
471        let seg_len = seg_blocks * self.w;
472        let j_start = block_start * self.w;
473
474        if seg_len == 0 {
475            self.next_seg = seg.checked_sub(1);
476            return Ok(Some(PhaseBSegmentEmission {
477                seg_idx: seg,
478                j_start,
479                stego_bits: Vec::new(),
480                num_modifications: 0,
481            }));
482        }
483
484        let (seg_bits, seg_costs) = self.cover.fetch_segment(seg);
485        if seg_bits.len() != seg_len || seg_costs.len() != seg_len {
486            return Err("fetch_segment returned inconsistent length (Phase B)");
487        }
488
489        self.prev_cost.copy_from_slice(&self.checkpoints[seg]);
490
491        let inf = f64::INFINITY;
492        let mut seg_back_ptr: Vec<u128> = Vec::with_capacity(seg_len);
493        let mut seg_msg_idx = block_start;
494
495        for local_j in 0..seg_len {
496            let j = j_start + local_j;
497            let col_idx = j % self.w;
498            let col = self.columns[col_idx];
499            let flip_cost = seg_costs[local_j] as f64;
500            let cover_bit = seg_bits[local_j] & 1;
501
502            let (cost_s0, cost_s1) = if cover_bit == 0 {
503                (0.0, flip_cost)
504            } else {
505                (flip_cost, 0.0)
506            };
507
508            let mut packed_bp = 0u128;
509
510            for s in 0..self.num_states {
511                let cost_0 = self.prev_cost[s] + cost_s0;
512                let cost_1 = self.prev_cost[s ^ col] + cost_s1;
513                if cost_1 < cost_0 {
514                    self.curr_cost[s] = cost_1;
515                    packed_bp |= 1u128 << s;
516                } else {
517                    self.curr_cost[s] = cost_0;
518                }
519            }
520
521            seg_back_ptr.push(packed_bp);
522
523            if col_idx == self.w - 1 && seg_msg_idx < self.m {
524                let required_bit = self.message[seg_msg_idx] as usize;
525                self.shifted_cost.fill(inf);
526                for s in 0..self.num_states {
527                    if self.curr_cost[s] == inf {
528                        continue;
529                    }
530                    if (s & 1) != required_bit {
531                        continue;
532                    }
533                    let s_shifted = s >> 1;
534                    if self.curr_cost[s] < self.shifted_cost[s_shifted] {
535                        self.shifted_cost[s_shifted] = self.curr_cost[s];
536                    }
537                }
538                std::mem::swap(&mut self.prev_cost, &mut self.shifted_cost);
539                seg_msg_idx += 1;
540            } else {
541                std::mem::swap(&mut self.prev_cost, &mut self.curr_cost);
542            }
543
544            self.progress_counter_b += 1;
545            if self.progress_counter_b.is_multiple_of(self.progress_interval_b) {
546                if progress::is_cancelled() {
547                    return Err("cancelled");
548                }
549                progress::advance();
550            }
551        }
552
553        // Traceback within this segment. Mod count folded inline so
554        // Phase 6 doesn't need a third per-segment cover re-fetch.
555        let mut stego_bits = vec![0u8; seg_len];
556        let mut num_modifications = 0usize;
557        let mut s = self.entry_state;
558        for local_j in (0..seg_len).rev() {
559            let j = j_start + local_j;
560            let col_idx = j % self.w;
561
562            if col_idx == self.w - 1 && (j / self.w) < self.m {
563                let msg_bit = self.message[j / self.w] as usize;
564                s = ((s << 1) | msg_bit) & (self.num_states - 1);
565            }
566
567            let bit = ((seg_back_ptr[local_j] >> s) & 1) as u8;
568            stego_bits[local_j] = bit;
569
570            if bit != (seg_bits[local_j] & 1) {
571                num_modifications += 1;
572            }
573
574            if bit == 1 {
575                s ^= self.columns[col_idx];
576            }
577        }
578
579        self.entry_state = s;
580        self.next_seg = seg.checked_sub(1);
581
582        Ok(Some(PhaseBSegmentEmission {
583            seg_idx: seg,
584            j_start,
585            stego_bits,
586            num_modifications,
587        }))
588    }
589}
590
591/// Streaming-segmented STC embed. Bit-exact equivalent to
592/// `stc_embed_segmented` (verified by
593/// `streaming_matches_inline_segmented_large` test below) but
594/// fetches cover on-demand via the `CoverFetch` callback rather
595/// than reading a pre-materialized slice.
596///
597/// This is now a thin wrapper around `StreamingViterbiPhaseB`:
598/// `new()` runs Phase A, then `step()` is looped to drive Phase B
599/// segment-by-segment in reverse order. The wrapper assembles the
600/// per-segment emissions into a single `EmbedResult` for callers
601/// that want the legacy interface.
602///
603/// For long-form video stego where the assembled `Vec<u8>` plan is
604/// itself O(n) memory, use `StreamingViterbiPhaseB` directly and
605/// route per-segment emissions to a `PerGopPlanBuilder` (Phase 6.2)
606/// instead — that's the path that keeps mobile peak RSS under
607/// ~400 MB on 15-min 1080p clips.
608pub fn stc_embed_streaming_segmented(
609    cover: &mut dyn CoverFetch,
610    message: &[u8],
611    hhat_matrix: &[Vec<u32>],
612    h: usize,
613    w: usize,
614) -> Result<EmbedResult, &'static str> {
615    let mut driver =
616        StreamingViterbiPhaseB::new(cover, message, hhat_matrix, h, w)?;
617    let total_cost = driver.total_cost();
618    let n = driver.total_positions();
619    let m = message.len();
620
621    let mut stego_bits = vec![0u8; n];
622    let mut num_modifications = 0usize;
623
624    while let Some(em) = driver.step()? {
625        let end = em.j_start + em.stego_bits.len();
626        stego_bits[em.j_start..end].copy_from_slice(&em.stego_bits);
627        num_modifications += em.num_modifications;
628    }
629
630    debug_assert_eq!(
631        driver.final_state(),
632        0,
633        "traceback did not return to initial state 0",
634    );
635    debug_assert_eq!(stc_extract(&stego_bits, hhat_matrix, w)[..m], message[..m],);
636
637    Ok(EmbedResult {
638        stego_bits,
639        total_cost,
640        num_modifications,
641    })
642}
643
644#[cfg(test)]
645mod tests {
646    use super::*;
647    use crate::stego::stc::embed::stc_embed;
648    use crate::stego::stc::extract::stc_extract;
649    use crate::stego::stc::hhat::generate_hhat;
650
651    // The InMemoryCoverFetch adapter (Task #24.2) is exercised by
652    // streaming_matches_inline_segmented_large below.
653
654    /// Equivalence gate: streaming-segmented STC must produce
655    /// byte-exact same `(stego_bits, total_cost, num_modifications)`
656    /// as `stc_embed` (which dispatches to `stc_embed_segmented`
657    /// for large enough n). Mirrors
658    /// `inline_segmented_equivalence_large` in `embed.rs`.
659    #[test]
660    fn streaming_matches_inline_segmented_large() {
661        let h = 4;
662        let w = 1;
663        let m = 200;
664        let n = m * w;
665        let mut seed = [0u8; 32];
666        seed[..19].copy_from_slice(b"streaming-test-seed");
667        let hhat = generate_hhat(h, w, &seed);
668
669        // Deterministic cover bits + costs from a small LCG so the
670        // test is reproducible and same inputs feed both paths.
671        let mut s: u32 = 0xDEAD_BEEF;
672        let mut cover_bits = vec![0u8; n];
673        let mut costs = vec![0.0f32; n];
674        for j in 0..n {
675            s = s.wrapping_mul(1664525).wrapping_add(1013904223);
676            cover_bits[j] = ((s >> 16) & 1) as u8;
677            costs[j] = ((s >> 17) & 0xFFF) as f32 / 4096.0 + 0.01;
678        }
679
680        let message: Vec<u8> = (0..m).map(|i| (i & 1) as u8).collect();
681
682        let inline = stc_embed(&cover_bits, &costs, &message, &hhat, h, w)
683            .expect("inline embed succeeds");
684
685        // Streaming-segmented uses K = ceil(sqrt(m)).
686        let k = ((m as f64).sqrt().ceil() as usize).max(1);
687        let mut cover = InMemoryCoverFetch::new(&cover_bits, &costs, m, w, k)
688            .expect("InMemoryCoverFetch construction");
689        let streaming =
690            stc_embed_streaming_segmented(&mut cover, &message, &hhat, h, w)
691                .expect("streaming embed succeeds");
692
693        assert_eq!(
694            inline.stego_bits, streaming.stego_bits,
695            "stego bits diverge between inline and streaming",
696        );
697        assert!(
698            (inline.total_cost - streaming.total_cost).abs() < 1e-6,
699            "total_cost diverges: inline={} streaming={}",
700            inline.total_cost,
701            streaming.total_cost,
702        );
703        assert_eq!(
704            inline.num_modifications, streaming.num_modifications,
705            "num_modifications diverges",
706        );
707
708        // Sanity: the embedded message extracts cleanly.
709        let extracted = stc_extract(&streaming.stego_bits, &hhat, w);
710        assert_eq!(&extracted[..m], &message[..]);
711    }
712
713    /// Empty-message edge case: `m == 0` produces an empty
714    /// stego_bits with zero modifications and zero cost.
715    #[test]
716    fn streaming_empty_message_returns_empty() {
717        struct EmptyCover;
718        impl CoverFetch for EmptyCover {
719            fn total_positions(&self) -> usize {
720                0
721            }
722            fn num_segments(&self) -> usize {
723                0
724            }
725            fn segment_size_in_blocks(&self) -> usize {
726                1
727            }
728            fn fetch_segment(
729                &mut self,
730                _seg_idx: usize,
731            ) -> (Vec<u8>, Vec<f32>) {
732                (Vec::new(), Vec::new())
733            }
734        }
735        let mut cover = EmptyCover;
736        let hhat: Vec<Vec<u32>> = vec![vec![0u32]; 4];
737        let result =
738            stc_embed_streaming_segmented(&mut cover, &[], &hhat, 4, 1)
739                .expect("empty embed");
740        assert_eq!(result.stego_bits.len(), 0);
741        assert_eq!(result.num_modifications, 0);
742    }
743
744    /// `InMemoryCoverFetch::new` rejects length mismatches and
745    /// zero segment_size_in_blocks. Defensive constructor guard.
746    #[test]
747    fn in_memory_cover_fetch_validates_inputs() {
748        let bits = vec![0u8; 10];
749        let costs_short = vec![0.0f32; 9];
750        let costs_full = vec![0.0f32; 10];
751        // Length mismatch between bits and costs.
752        assert!(
753            InMemoryCoverFetch::new(&bits, &costs_short, 10, 1, 4).is_none(),
754            "expected None on bits/costs length mismatch",
755        );
756        // m * w exceeds bits.len() (Phase-5-relaxed contract: m*w
757        // must fit within bits.len(), can be smaller).
758        assert!(
759            InMemoryCoverFetch::new(&bits, &costs_full, 11, 1, 4).is_none(),
760            "expected None on m*w > bits.len()",
761        );
762        // m * w smaller than bits.len() is now allowed (matches
763        // inline stc_embed contract).
764        assert!(
765            InMemoryCoverFetch::new(&bits, &costs_full, 5, 1, 4).is_some(),
766            "expected Some on m*w < bits.len() (slack-allowed)",
767        );
768        // Zero segment_size_in_blocks.
769        assert!(
770            InMemoryCoverFetch::new(&bits, &costs_full, 10, 1, 0).is_none(),
771            "expected None on zero segment_size_in_blocks",
772        );
773        // Valid construction.
774        let cover = InMemoryCoverFetch::new(&bits, &costs_full, 10, 1, 4);
775        assert!(cover.is_some());
776        let cover = cover.unwrap();
777        assert_eq!(cover.total_positions(), 10);
778        assert_eq!(cover.num_segments(), 3); // ceil(10/4) = 3
779        assert_eq!(cover.segment_size_in_blocks(), 4);
780    }
781
782    /// Length-mismatch from the callback returns Err rather than
783    /// panicking. Defensive guard against buggy CoverFetch impls.
784    #[test]
785    fn streaming_rejects_inconsistent_segment_lengths() {
786        struct BadCover;
787        impl CoverFetch for BadCover {
788            fn total_positions(&self) -> usize {
789                10
790            }
791            fn num_segments(&self) -> usize {
792                1
793            }
794            fn segment_size_in_blocks(&self) -> usize {
795                10
796            }
797            fn fetch_segment(
798                &mut self,
799                _seg_idx: usize,
800            ) -> (Vec<u8>, Vec<f32>) {
801                // Returns the wrong size (5 instead of 10).
802                (vec![0u8; 5], vec![0.0f32; 5])
803            }
804        }
805        let mut cover = BadCover;
806        let hhat: Vec<Vec<u32>> = vec![vec![0u32]; 4];
807        let message = vec![0u8; 10];
808        let result =
809            stc_embed_streaming_segmented(&mut cover, &message, &hhat, 4, 1);
810        assert!(result.is_err(), "expected Err on inconsistent length");
811    }
812}