Skip to main content

phasm_core/stego/ghost/
optimizer.rs

1// Copyright (c) 2026 Christoph Gaffga
2// SPDX-License-Identifier: GPL-3.0-only
3// https://github.com/cgaffga/phasmcore
4
5//! Cover image preprocessing optimizer for steganographic embedding.
6//!
7//! Subtly modifies raw pixels before JPEG compression to improve embedding
8//! quality and capacity. Each mode applies a different pipeline:
9//!
10//! - **Ghost**: Texture-adaptive 4-stage pipeline (noise injection, micro-contrast,
11//!   unsharp mask, smooth-region dithering) — maximizes non-zero AC coefficients.
12//!   Each stage adapts its strength based on existing texture levels to avoid
13//!   degrading already-optimized images.
14//! - **Armor**: Light pipeline (block-boundary smoothing, DC stabilization) —
15//!   reduces cross-block discontinuities for STDM robustness.
16//! - **Fortress**: Minimal (block-boundary smoothing only) — stabilizes DC
17//!   averages for BA-QIM embedding.
18//!
19//! **"Do no harm" guarantee**: After optimization, the average absolute gradient
20//! (a proxy for JPEG AC energy / stego capacity) is compared to the original.
21//! If the optimizer reduced gradient energy, the original pixels are returned
22//! unchanged. This prevents degradation of pre-optimized images (e.g. images
23//! that already had noise, micro-contrast, and sharpening applied in Photoshop).
24//!
25//! The optimizer is deterministic: given the same pixels, dimensions, config,
26//! and seed, it always produces the same output.
27
28use rand::Rng;
29use rand::SeedableRng;
30use rand_chacha::ChaCha20Rng;
31
32use crate::det_math::det_exp;
33
34/// Configuration for the cover image optimizer.
35pub struct OptimizerConfig {
36    /// Strength multiplier (0.0–1.0). Default: 0.85.
37    pub strength: f32,
38    /// ChaCha20 seed for deterministic noise generation.
39    pub seed: [u8; 32],
40    /// Pipeline variant to apply.
41    pub mode: OptimizerMode,
42}
43
44/// Which optimization pipeline to run.
45pub enum OptimizerMode {
46    /// Full 4-stage pipeline (noise, micro-contrast, unsharp mask, dithering).
47    Ghost,
48    /// Light: block-boundary smoothing + DC stabilization.
49    Armor,
50    /// Minimal: block-boundary smoothing only.
51    Fortress,
52}
53
54/// Optimize raw RGB pixels for steganographic embedding.
55///
56/// Returns a new pixel buffer with the same dimensions and format (RGB, 3
57/// bytes per pixel, row-major). The modifications are subtle enough to be
58/// imperceptible (PSNR > 44 dB, SSIM > 0.993) but improve embedding
59/// capacity by increasing wavelet energy in smooth regions.
60///
61/// If the optimization would reduce gradient energy (degrade stego capacity),
62/// the original pixels are returned unchanged.
63pub fn optimize_cover(
64    pixels_rgb: &[u8],
65    width: u32,
66    height: u32,
67    config: &OptimizerConfig,
68) -> Vec<u8> {
69    let w = width as usize;
70    let h = height as usize;
71    assert_eq!(pixels_rgb.len(), w * h * 3, "pixel buffer size mismatch");
72
73    let result = match config.mode {
74        OptimizerMode::Ghost => optimize_ghost(pixels_rgb, w, h, config),
75        OptimizerMode::Armor => optimize_armor(pixels_rgb, w, h, config),
76        OptimizerMode::Fortress => optimize_fortress(pixels_rgb, w, h, config),
77    };
78
79    // "Do no harm" safety check: compare gradient energy before and after.
80    // Require at least 1% improvement — marginal changes on already-optimized
81    // images are more likely to hurt (especially SI-UNIWARD rounding errors)
82    // than to help. If the optimizer can't meaningfully improve the image,
83    // the original was already well-prepared.
84    let original_energy = gradient_energy(pixels_rgb, w, h);
85    let optimized_energy = gradient_energy(&result, w, h);
86    if optimized_energy < original_energy * 1.01 {
87        pixels_rgb.to_vec()
88    } else {
89        result
90    }
91}
92
93// ---------------------------------------------------------------------------
94// Texture analysis
95// ---------------------------------------------------------------------------
96
97/// Analyze image texture to compute per-stage adaptive strength multipliers.
98///
99/// Returns `(noise_factor, contrast_factor, sharpen_factor, dither_factor)`,
100/// each in [0.0, 1.0]. A factor of 0.0 means "skip this stage entirely"
101/// (image already has plenty of that characteristic). 1.0 means "apply fully".
102#[cfg(test)]
103fn analyze_texture(luma: &[f64], variance: &[f64], w: usize, h: usize) -> (f64, f64, f64, f64) {
104    let n = w * h;
105    if n == 0 {
106        return (1.0, 1.0, 1.0, 1.0);
107    }
108
109    // 1. Noise factor: based on mean variance.
110    //    Low variance (< 50) = smooth image, needs lots of noise → factor ~1.0
111    //    Medium variance (50-300) = some texture → moderate noise
112    //    High variance (> 600) = already noisy → skip noise
113    let mean_variance: f64 = variance.iter().sum::<f64>() / n as f64;
114    let noise_factor = adaptive_scale(mean_variance, 50.0, 600.0);
115
116    // 2. Contrast factor: based on proportion of pixels with low local contrast.
117    //    Compute local gradient magnitude as a contrast proxy.
118    let mut low_contrast_count = 0usize;
119    for y in 1..h {
120        for x in 1..w {
121            let idx = y * w + x;
122            let dy = (luma[idx] - luma[(y - 1) * w + x]).abs();
123            let dx = (luma[idx] - luma[y * w + (x - 1)]).abs();
124            let grad = dy + dx;
125            if grad < 3.0 {
126                low_contrast_count += 1;
127            }
128        }
129    }
130    let low_contrast_ratio = low_contrast_count as f64 / ((w - 1) * (h - 1)) as f64;
131    // If < 10% of pixels are low-contrast, the image already has good contrast
132    // If > 50% of pixels are low-contrast, we need full enhancement
133    let contrast_factor = if low_contrast_ratio < 0.10 {
134        0.0
135    } else if low_contrast_ratio > 0.50 {
136        1.0
137    } else {
138        (low_contrast_ratio - 0.10) / 0.40
139    };
140
141    // 3. Sharpen factor: based on high-frequency energy.
142    //    Measure average |pixel - local_mean| as proxy for existing sharpening.
143    let mut hf_sum = 0.0f64;
144    let mut hf_count = 0usize;
145    for y in 1..h.saturating_sub(1) {
146        for x in 1..w.saturating_sub(1) {
147            let idx = y * w + x;
148            let center = luma[idx];
149            // 4-neighbor average
150            let avg = (luma[(y - 1) * w + x] + luma[(y + 1) * w + x]
151                + luma[y * w + (x - 1)] + luma[y * w + (x + 1)])
152                / 4.0;
153            hf_sum += (center - avg).abs();
154            hf_count += 1;
155        }
156    }
157    let mean_hf = if hf_count > 0 { hf_sum / hf_count as f64 } else { 0.0 };
158    // mean_hf < 1.0 = very smooth, needs sharpening → 1.0
159    // mean_hf > 4.0 = already sharp → 0.0
160    let sharpen_factor = adaptive_scale(mean_hf, 1.0, 4.0);
161
162    // 4. Dither factor: based on proportion of smooth 4×4 blocks.
163    //    If few smooth blocks exist, dithering has little to do.
164    let blocks_x = w / 4;
165    let blocks_y = h / 4;
166    let total_blocks = blocks_x * blocks_y;
167    let mut smooth_blocks = 0usize;
168    for by in 0..blocks_y {
169        for bx in 0..blocks_x {
170            let mut block_mean = 0.0f64;
171            for dy in 0..4 {
172                for dx in 0..4 {
173                    block_mean += luma[(by * 4 + dy) * w + (bx * 4 + dx)];
174                }
175            }
176            block_mean /= 16.0;
177            let mut sad = 0.0f64;
178            for dy in 0..4 {
179                for dx in 0..4 {
180                    sad += (luma[(by * 4 + dy) * w + (bx * 4 + dx)] - block_mean).abs();
181                }
182            }
183            if sad < 15.0 {
184                smooth_blocks += 1;
185            }
186        }
187    }
188    let smooth_ratio = if total_blocks > 0 {
189        smooth_blocks as f64 / total_blocks as f64
190    } else {
191        0.0
192    };
193    // If < 5% smooth blocks → no dithering needed (image is already textured)
194    // If > 30% smooth blocks → full dithering
195    let dither_factor = if smooth_ratio < 0.05 {
196        0.0
197    } else if smooth_ratio > 0.30 {
198        1.0
199    } else {
200        (smooth_ratio - 0.05) / 0.25
201    };
202
203    (noise_factor, contrast_factor, sharpen_factor, dither_factor)
204}
205
206/// Maps a metric value to [0.0, 1.0] inversely: below `low` → 1.0, above `high` → 0.0.
207fn adaptive_scale(value: f64, low: f64, high: f64) -> f64 {
208    if value <= low {
209        1.0
210    } else if value >= high {
211        0.0
212    } else {
213        1.0 - (value - low) / (high - low)
214    }
215}
216
217/// Compute average absolute gradient energy (proxy for JPEG AC coefficient density).
218/// Higher value = more texture = better for steganography.
219fn gradient_energy(pixels: &[u8], w: usize, h: usize) -> f64 {
220    if w < 2 || h < 2 {
221        return 0.0;
222    }
223    let mut energy = 0.0f64;
224    let mut count = 0usize;
225    // Sample on luma channel for speed
226    for y in 1..h {
227        for x in 1..w {
228            let idx = (y * w + x) * 3;
229            let idx_left = (y * w + (x - 1)) * 3;
230            let idx_above = ((y - 1) * w + x) * 3;
231            // Luma approximation: just green channel (dominant in luma)
232            let center = pixels[idx + 1] as f64;
233            let left = pixels[idx_left + 1] as f64;
234            let above = pixels[idx_above + 1] as f64;
235            energy += (center - left).abs() + (center - above).abs();
236            count += 1;
237        }
238    }
239    if count > 0 { energy / count as f64 } else { 0.0 }
240}
241
242// ---------------------------------------------------------------------------
243// Ghost pipeline (4 stages, texture-adaptive)
244// ---------------------------------------------------------------------------
245
246fn optimize_ghost(pixels: &[u8], w: usize, h: usize, config: &OptimizerConfig) -> Vec<u8> {
247    let strength = config.strength as f64;
248    let mut rng = ChaCha20Rng::from_seed(config.seed);
249
250    // Compute per-pixel texture map: local variance drives ALL stages.
251    // Each pixel gets its own strength multiplier based on its neighborhood.
252    // Smooth areas (low variance) → full processing.
253    // Textured areas (high variance) → minimal/no processing.
254    let luma = extract_luma(pixels, w, h);
255    let variance = local_variance_5x5(&luma, w, h);
256
257    let mut out = pixels.to_vec();
258
259    // Stage 1: Content-adaptive noise injection (per-pixel)
260    // Variance < 50: smooth region → σ up to 1.5 (needs texture)
261    // Variance 50–600: transitional → scaled σ
262    // Variance > 600: already textured → σ ≈ 0 (skip)
263    for y in 0..h {
264        for x in 0..w {
265            // Always consume RNG for determinism
266            let u1: f64 = rng.gen_range(0.0f64..1.0).max(1e-10);
267            let u2: f64 = rng.gen_range(0.0f64..1.0);
268
269            let var_val = variance[y * w + x];
270            let local_need = adaptive_scale(var_val, 50.0, 600.0);
271            // Skip pixels in areas that are already well-textured —
272            // even tiny modifications can hurt SI-UNIWARD rounding errors
273            if local_need > 0.1 {
274            let sigma = 1.5 * local_need + 0.1;
275            let scaled_sigma = sigma * strength * local_need;
276
277            if scaled_sigma > 0.05 {
278                // Irwin-Hall(2) noise: u1+u2 has triangular distribution on
279                // [0, 2] with mean 1, variance 1/6. Center to mean 0 and
280                // scale by √6 so variance matches N(0,1).
281                //
282                // Replaces Box-Muller `(-2*ln(u1)).sqrt() * cos(2π·u2)` —
283                // f64::ln + f64::cos lower to non-deterministic libm /
284                // Math.* on WASM, breaking cross-platform reproducibility of
285                // the optimized cover. Triangular noise is a coarser Gaussian
286                // approximation but adequate for the small-amplitude texture
287                // injection done here. RNG consumption is unchanged
288                // (still 2 uniform draws per pixel).
289                const SQRT_6: f64 = 2.449489742783178;
290                let gauss = (u1 + u2 - 1.0) * SQRT_6;
291                let noise = gauss * scaled_sigma;
292
293                let idx = (y * w + x) * 3;
294                for c in 0..3 {
295                    let val = out[idx + c] as f64 + noise;
296                    out[idx + c] = val.round().clamp(0.0, 255.0) as u8;
297                }
298            }
299            } // local_need > 0.1
300        }
301    }
302
303    // Stage 2: Micro-contrast enhancement (per-pixel adaptive)
304    // Each pixel's enhancement is scaled by how much local contrast it needs.
305    // High-variance areas already have good micro-contrast → minimal boost.
306    let stage1 = out.clone();
307    for y in 0..h {
308        for x in 0..w {
309            let var_val = variance[y * w + x];
310            let local_need = adaptive_scale(var_val, 80.0, 500.0);
311            if local_need > 0.1 {
312            let local_enhance = 0.15 * strength * local_need;
313
314            if local_enhance > 0.01 {
315                let idx = (y * w + x) * 3;
316                for c in 0..3 {
317                    let (local_mean, pixel_val) = neighborhood_mean_3x3(&stage1, w, h, x, y, c);
318                    let deviation = pixel_val - local_mean;
319                    let enhanced = local_mean + deviation * (1.0 + local_enhance);
320                    out[idx + c] = enhanced.round().clamp(0.0, 255.0) as u8;
321                }
322            }
323            } // local_need > 0.1
324        }
325    }
326
327    // Stage 3: Gentle unsharp mask (per-pixel adaptive)
328    // Sharpening only where the area is too smooth. Textured/already-sharp
329    // areas are left untouched to avoid halos and over-enhancement.
330    let stage2 = out.clone();
331    let blurred = gaussian_blur_separable(&stage2, w, h, 1.0);
332    for y in 0..h {
333        for x in 0..w {
334            let var_val = variance[y * w + x];
335            let local_need = adaptive_scale(var_val, 100.0, 400.0);
336            if local_need > 0.1 {
337            let local_sharpen = 0.12 * strength * local_need;
338
339            if local_sharpen > 0.01 {
340                let idx = (y * w + x) * 3;
341                for c in 0..3 {
342                    let i = idx + c;
343                    let orig = stage2[i] as f64;
344                    let blur = blurred[i] as f64;
345                    let diff = orig - blur;
346                    // Threshold: only sharpen if difference > 3 (avoid amplifying noise)
347                    if diff.abs() > 3.0 {
348                        let sharpened = orig + local_sharpen * diff;
349                        out[i] = sharpened.round().clamp(0.0, 255.0) as u8;
350                    }
351                }
352            }
353            } // local_need > 0.1
354        }
355    }
356
357    // Stage 4: Smooth-region dithering (per 4×4 block, re-evaluated)
358    // Uses CURRENT luma (after stages 1-3) — blocks already textured by
359    // earlier stages won't get redundant dithering.
360    let bayer_4x4: [[f64; 4]; 4] = [
361        [0.0, 8.0, 2.0, 10.0],
362        [12.0, 4.0, 14.0, 6.0],
363        [3.0, 11.0, 1.0, 9.0],
364        [15.0, 7.0, 13.0, 5.0],
365    ];
366    let bayer_norm: [[f64; 4]; 4] = {
367        let mut b = [[0.0; 4]; 4];
368        for r in 0..4 {
369            for c in 0..4 {
370                b[r][c] = (bayer_4x4[r][c] / 15.0) * 2.0 - 1.0;
371            }
372        }
373        b
374    };
375
376    let current_luma = extract_luma(&out, w, h);
377    let blocks_x = w / 4;
378    let blocks_y = h / 4;
379    for by in 0..blocks_y {
380        for bx in 0..blocks_x {
381            // Always consume RNG for determinism
382            let block_offset: f64 = rng.gen_range(0.0f64..1.0) * 2.0 - 1.0;
383
384            // Compute SAD and mean variance for this 4×4 block
385            let mut block_mean = 0.0f64;
386            let mut block_var_sum = 0.0f64;
387            for dy in 0..4 {
388                for dx in 0..4 {
389                    let px = bx * 4 + dx;
390                    let py = by * 4 + dy;
391                    if px < w && py < h {
392                        block_mean += current_luma[py * w + px];
393                        block_var_sum += variance[py * w + px];
394                    }
395                }
396            }
397            block_mean /= 16.0;
398            let block_var_avg = block_var_sum / 16.0;
399            let mut block_sad = 0.0f64;
400            for dy in 0..4 {
401                for dx in 0..4 {
402                    let px = bx * 4 + dx;
403                    let py = by * 4 + dy;
404                    if px < w && py < h {
405                        block_sad += (current_luma[py * w + px] - block_mean).abs();
406                    }
407                }
408            }
409
410            // Dither only smooth blocks in areas that need texture
411            let block_need = adaptive_scale(block_var_avg, 50.0, 400.0);
412            let effective_dither = strength * block_need;
413
414            if block_sad < 15.0 && effective_dither > 0.01 {
415                for dy in 0..4 {
416                    for dx in 0..4 {
417                        let px = bx * 4 + dx;
418                        let py = by * 4 + dy;
419                        if px < w && py < h {
420                            let dither = (bayer_norm[dy][dx] + block_offset * 0.3) * effective_dither;
421                            let idx = (py * w + px) * 3;
422                            for c in 0..3 {
423                                let val = out[idx + c] as f64 + dither;
424                                out[idx + c] = val.round().clamp(0.0, 255.0) as u8;
425                            }
426                        }
427                    }
428                }
429            }
430        }
431    }
432
433    out
434}
435
436// ---------------------------------------------------------------------------
437// Armor pipeline (2 stages)
438// ---------------------------------------------------------------------------
439
440fn optimize_armor(pixels: &[u8], w: usize, h: usize, config: &OptimizerConfig) -> Vec<u8> {
441    let mut out = pixels.to_vec();
442    let strength = config.strength;
443
444    // Stage 1: Gentle low-pass on block boundaries (8×8 grid)
445    smooth_block_boundaries(&mut out, w, h, strength);
446
447    // Stage 2: DC stabilization — subtle smoothing within each 8×8 block
448    // to reduce variance of block-average brightness
449    dc_stabilize(&mut out, w, h, strength);
450
451    out
452}
453
454// ---------------------------------------------------------------------------
455// Fortress pipeline (1 stage)
456// ---------------------------------------------------------------------------
457
458fn optimize_fortress(pixels: &[u8], w: usize, h: usize, config: &OptimizerConfig) -> Vec<u8> {
459    let mut out = pixels.to_vec();
460    smooth_block_boundaries(&mut out, w, h, config.strength);
461    out
462}
463
464// ---------------------------------------------------------------------------
465// Helper functions
466// ---------------------------------------------------------------------------
467
468/// Extract luma (Y) from RGB pixels. Y = 0.299R + 0.587G + 0.114B.
469fn extract_luma(pixels: &[u8], w: usize, h: usize) -> Vec<f64> {
470    let mut luma = vec![0.0f64; w * h];
471    for i in 0..w * h {
472        let r = pixels[i * 3] as f64;
473        let g = pixels[i * 3 + 1] as f64;
474        let b = pixels[i * 3 + 2] as f64;
475        luma[i] = 0.299 * r + 0.587 * g + 0.114 * b;
476    }
477    luma
478}
479
480/// Compute local variance in a 5×5 neighborhood for each pixel.
481fn local_variance_5x5(luma: &[f64], w: usize, h: usize) -> Vec<f64> {
482    let mut variance = vec![0.0f64; w * h];
483    for y in 0..h {
484        for x in 0..w {
485            let mut sum = 0.0f64;
486            let mut sum_sq = 0.0f64;
487            let mut count = 0.0f64;
488            for dy in 0..5usize {
489                for dx in 0..5usize {
490                    let py = (y + dy).saturating_sub(2).min(h - 1);
491                    let px = (x + dx).saturating_sub(2).min(w - 1);
492                    let val = luma[py * w + px];
493                    sum += val;
494                    sum_sq += val * val;
495                    count += 1.0;
496                }
497            }
498            let mean = sum / count;
499            variance[y * w + x] = sum_sq / count - mean * mean;
500        }
501    }
502    variance
503}
504
505/// Compute 3×3 neighborhood mean and center pixel value for a channel.
506fn neighborhood_mean_3x3(pixels: &[u8], w: usize, h: usize, x: usize, y: usize, c: usize) -> (f64, f64) {
507    let mut sum = 0.0f64;
508    let mut count = 0.0f64;
509    for dy in 0..3usize {
510        for dx in 0..3usize {
511            let py = (y + dy).saturating_sub(1).min(h - 1);
512            let px = (x + dx).saturating_sub(1).min(w - 1);
513            sum += pixels[(py * w + px) * 3 + c] as f64;
514            count += 1.0;
515        }
516    }
517    let pixel_val = pixels[(y * w + x) * 3 + c] as f64;
518    (sum / count, pixel_val)
519}
520
521/// Separable Gaussian blur with given sigma (truncated at 3σ kernel).
522fn gaussian_blur_separable(pixels: &[u8], w: usize, h: usize, sigma: f64) -> Vec<u8> {
523    let radius = (sigma * 3.0).ceil() as usize;
524    let kernel_size = radius * 2 + 1;
525
526    // Build 1D Gaussian kernel
527    let mut kernel = vec![0.0f64; kernel_size];
528    let mut sum = 0.0f64;
529    for i in 0..kernel_size {
530        let x = i as f64 - radius as f64;
531        // det_exp instead of f64::exp — f64::exp lowers to non-deterministic
532        // libm / Math.exp on WASM, breaking cross-platform bit-exactness of
533        // the optimized cover.
534        let val = det_exp(-x * x / (2.0 * sigma * sigma));
535        kernel[i] = val;
536        sum += val;
537    }
538    for k in kernel.iter_mut() {
539        *k /= sum;
540    }
541
542    let n = w * h * 3;
543    let mut temp = vec![0u8; n];
544    let mut result = vec![0u8; n];
545
546    // Horizontal pass
547    for y in 0..h {
548        for x in 0..w {
549            for c in 0..3 {
550                let mut acc = 0.0f64;
551                for k in 0..kernel_size {
552                    let sx = (x + k).saturating_sub(radius).min(w - 1);
553                    acc += pixels[(y * w + sx) * 3 + c] as f64 * kernel[k];
554                }
555                temp[(y * w + x) * 3 + c] = acc.round().clamp(0.0, 255.0) as u8;
556            }
557        }
558    }
559
560    // Vertical pass
561    for y in 0..h {
562        for x in 0..w {
563            for c in 0..3 {
564                let mut acc = 0.0f64;
565                for k in 0..kernel_size {
566                    let sy = (y + k).saturating_sub(radius).min(h - 1);
567                    acc += temp[(sy * w + x) * 3 + c] as f64 * kernel[k];
568                }
569                result[(y * w + x) * 3 + c] = acc.round().clamp(0.0, 255.0) as u8;
570            }
571        }
572    }
573
574    result
575}
576
577/// Smooth block boundaries at 8×8 grid edges.
578///
579/// Applies 3-pixel averaging across horizontal and vertical block edges
580/// to reduce cross-block discontinuities after JPEG recompression.
581fn smooth_block_boundaries(pixels: &mut [u8], w: usize, h: usize, strength: f32) {
582    let alpha = 0.25 * strength as f64; // blending weight
583
584    // Vertical block edges (columns at multiples of 8)
585    for col in (8..w).step_by(8) {
586        if col >= w {
587            continue;
588        }
589        for y in 0..h {
590            for c in 0..3 {
591                let left = pixels[(y * w + col - 1) * 3 + c] as f64;
592                let center = pixels[(y * w + col) * 3 + c] as f64;
593                let right = if col + 1 < w {
594                    pixels[(y * w + col + 1) * 3 + c] as f64
595                } else {
596                    center
597                };
598                let avg = (left + center + right) / 3.0;
599                let blended = center + alpha * (avg - center);
600                pixels[(y * w + col) * 3 + c] = blended.round().clamp(0.0, 255.0) as u8;
601            }
602        }
603    }
604
605    // Horizontal block edges (rows at multiples of 8)
606    for row in (8..h).step_by(8) {
607        for x in 0..w {
608            for c in 0..3 {
609                let above = pixels[((row - 1) * w + x) * 3 + c] as f64;
610                let center = pixels[(row * w + x) * 3 + c] as f64;
611                let below = if row + 1 < h {
612                    pixels[((row + 1) * w + x) * 3 + c] as f64
613                } else {
614                    center
615                };
616                let avg = (above + center + below) / 3.0;
617                let blended = center + alpha * (avg - center);
618                pixels[(row * w + x) * 3 + c] = blended.round().clamp(0.0, 255.0) as u8;
619            }
620        }
621    }
622}
623
624/// DC stabilization: subtle within-block smoothing to reduce variance
625/// of block-average brightness, helping STDM embedding stability.
626fn dc_stabilize(pixels: &mut [u8], w: usize, h: usize, strength: f32) {
627    let alpha = 0.1 * strength as f64;
628    let blocks_x = w.div_ceil(8);
629    let blocks_y = h.div_ceil(8);
630
631    for by in 0..blocks_y {
632        for bx in 0..blocks_x {
633            // Compute block mean for each channel
634            let mut mean = [0.0f64; 3];
635            let mut count = 0.0f64;
636            for dy in 0..8 {
637                for dx in 0..8 {
638                    let px = bx * 8 + dx;
639                    let py = by * 8 + dy;
640                    if px < w && py < h {
641                        for c in 0..3 {
642                            mean[c] += pixels[(py * w + px) * 3 + c] as f64;
643                        }
644                        count += 1.0;
645                    }
646                }
647            }
648            if count > 0.0 {
649                for c in 0..3 {
650                    mean[c] /= count;
651                }
652            }
653
654            // Nudge each pixel slightly toward block mean
655            for dy in 0..8 {
656                for dx in 0..8 {
657                    let px = bx * 8 + dx;
658                    let py = by * 8 + dy;
659                    if px < w && py < h {
660                        let idx = (py * w + px) * 3;
661                        for c in 0..3 {
662                            let val = pixels[idx + c] as f64;
663                            let blended = val + alpha * (mean[c] - val);
664                            pixels[idx + c] = blended.round().clamp(0.0, 255.0) as u8;
665                        }
666                    }
667                }
668            }
669        }
670    }
671}
672
673#[cfg(test)]
674mod tests {
675    use super::*;
676
677    #[test]
678    fn ghost_deterministic() {
679        let w = 64;
680        let h = 64;
681        let pixels = vec![128u8; w * h * 3];
682        let config = OptimizerConfig {
683            strength: 0.85,
684            seed: [42u8; 32],
685            mode: OptimizerMode::Ghost,
686        };
687        let out1 = optimize_cover(&pixels, w as u32, h as u32, &config);
688        let out2 = optimize_cover(&pixels, w as u32, h as u32, &config);
689        assert_eq!(out1, out2, "optimizer must be deterministic");
690    }
691
692    #[test]
693    fn ghost_modifies_smooth_pixels() {
694        // A flat image (all same value) should be modified — lots of room to improve
695        let w = 64;
696        let h = 64;
697        let pixels = vec![128u8; w * h * 3];
698        let config = OptimizerConfig {
699            strength: 0.85,
700            seed: [42u8; 32],
701            mode: OptimizerMode::Ghost,
702        };
703        let out = optimize_cover(&pixels, w as u32, h as u32, &config);
704        assert_ne!(pixels, out, "optimizer should modify smooth pixels");
705    }
706
707    #[test]
708    fn do_no_harm_textured_image() {
709        // Create an already well-textured image (simulating pre-optimized photo).
710        // The optimizer should return it unchanged if it can't improve gradient energy.
711        let w = 128;
712        let h = 128;
713        let mut pixels = vec![0u8; w * h * 3];
714        // High-variance random-ish texture
715        for y in 0..h {
716            for x in 0..w {
717                let idx = (y * w + x) * 3;
718                let hash = ((x * 31 + y * 97 + 7) % 256) as u8;
719                pixels[idx] = hash;
720                pixels[idx + 1] = hash.wrapping_mul(3).wrapping_add(17);
721                pixels[idx + 2] = hash.wrapping_mul(7).wrapping_add(53);
722            }
723        }
724        let config = OptimizerConfig {
725            strength: 0.85,
726            seed: [42u8; 32],
727            mode: OptimizerMode::Ghost,
728        };
729        let out = optimize_cover(&pixels, w as u32, h as u32, &config);
730
731        // Either unchanged (do-no-harm kicked in) or gradient energy increased
732        let orig_energy = gradient_energy(&pixels, w, h);
733        let opt_energy = gradient_energy(&out, w, h);
734        assert!(
735            opt_energy >= orig_energy,
736            "optimizer must not reduce gradient energy: original={orig_energy:.2}, optimized={opt_energy:.2}"
737        );
738    }
739
740    #[test]
741    fn psnr_above_threshold() {
742        let w = 128;
743        let h = 128;
744        let mut pixels = vec![0u8; w * h * 3];
745        for y in 0..h {
746            for x in 0..w {
747                let idx = (y * w + x) * 3;
748                let base_r = (x * 255 / w) as u8;
749                let base_g = (y * 255 / h) as u8;
750                let base_b = ((x + y) * 255 / (w + h)) as u8;
751                let hash = ((x * 31 + y * 97 + 7) % 30) as u8;
752                pixels[idx] = base_r.wrapping_add(hash);
753                pixels[idx + 1] = base_g.wrapping_add(hash.wrapping_mul(3));
754                pixels[idx + 2] = base_b.wrapping_add(hash.wrapping_mul(7));
755            }
756        }
757        let config = OptimizerConfig {
758            strength: 0.85,
759            seed: [42u8; 32],
760            mode: OptimizerMode::Ghost,
761        };
762        let out = optimize_cover(&pixels, w as u32, h as u32, &config);
763
764        let mse: f64 = pixels
765            .iter()
766            .zip(out.iter())
767            .map(|(&a, &b)| {
768                let d = a as f64 - b as f64;
769                d * d
770            })
771            .sum::<f64>()
772            / pixels.len() as f64;
773
774        let psnr = if mse > 0.0 {
775            10.0 * (255.0 * 255.0 / mse).log10()
776        } else {
777            f64::INFINITY
778        };
779
780        assert!(
781            psnr > 28.0,
782            "PSNR should be > 28 dB even for synthetic images, got {psnr:.1} dB"
783        );
784    }
785
786    #[test]
787    fn armor_mode_runs() {
788        let w = 32;
789        let h = 32;
790        let pixels = vec![128u8; w * h * 3];
791        let config = OptimizerConfig {
792            strength: 0.85,
793            seed: [0u8; 32],
794            mode: OptimizerMode::Armor,
795        };
796        let out = optimize_cover(&pixels, w as u32, h as u32, &config);
797        assert_eq!(out.len(), pixels.len());
798    }
799
800    #[test]
801    fn fortress_mode_runs() {
802        let w = 32;
803        let h = 32;
804        let pixels = vec![128u8; w * h * 3];
805        let config = OptimizerConfig {
806            strength: 0.85,
807            seed: [0u8; 32],
808            mode: OptimizerMode::Fortress,
809        };
810        let out = optimize_cover(&pixels, w as u32, h as u32, &config);
811        assert_eq!(out.len(), pixels.len());
812    }
813
814    #[test]
815    fn zero_strength_minimal_change() {
816        let w = 32;
817        let h = 32;
818        let pixels: Vec<u8> = (0..w * h * 3).map(|i| (i % 256) as u8).collect();
819        let config = OptimizerConfig {
820            strength: 0.0,
821            seed: [42u8; 32],
822            mode: OptimizerMode::Ghost,
823        };
824        let out = optimize_cover(&pixels, w as u32, h as u32, &config);
825
826        let max_diff: u8 = pixels
827            .iter()
828            .zip(out.iter())
829            .map(|(&a, &b)| a.abs_diff(b))
830            .max()
831            .unwrap_or(0);
832        assert!(
833            max_diff <= 1,
834            "zero strength should cause near-zero changes, max diff = {max_diff}"
835        );
836    }
837
838    #[test]
839    fn texture_analysis_smooth_image() {
840        let w = 64;
841        let h = 64;
842        let pixels = vec![128u8; w * h * 3]; // perfectly flat
843        let luma = extract_luma(&pixels, w, h);
844        let variance = local_variance_5x5(&luma, w, h);
845        let (noise, contrast, sharpen, dither) = analyze_texture(&luma, &variance, w, h);
846        // Flat image should get maximum treatment on all stages
847        assert!(noise > 0.9, "flat image should get full noise: {noise}");
848        assert!(contrast > 0.9, "flat image should get full contrast: {contrast}");
849        assert!(sharpen > 0.9, "flat image should get full sharpen: {sharpen}");
850        assert!(dither > 0.5, "flat image should get significant dither: {dither}");
851    }
852
853    #[test]
854    fn texture_analysis_noisy_image() {
855        let w = 64;
856        let h = 64;
857        let mut pixels = vec![0u8; w * h * 3];
858        // High-variance texture everywhere
859        for i in 0..pixels.len() {
860            pixels[i] = ((i * 31 + 7) % 256) as u8;
861        }
862        let luma = extract_luma(&pixels, w, h);
863        let variance = local_variance_5x5(&luma, w, h);
864        let (noise, _contrast, _sharpen, dither) = analyze_texture(&luma, &variance, w, h);
865        // Already-textured image should get minimal treatment
866        assert!(noise < 0.3, "textured image should get little noise: {noise}");
867        assert!(dither < 0.3, "textured image should get little dither: {dither}");
868    }
869}