Skip to main content

wasma_sys/
wasma_protocol_unix_posix_windowesc.rs

1// WASMA - Windows Assignment System Monitoring Architecture
2// wasma_protocol_unix_posix_windowesc.rs
3// POSIX Window Escape — ShiftMasking Protocol
4// Target: UNIX/POSIX-compatible devices only
5// 64-bit: full support
6// 32-bit: semi-restricted (limited mask width, no LFSR feedback extension)
7// ShiftMask algorithms: XOR, Bit Rotation, Polynomial Hash, LFSR
8// Scope: Window ID obfuscation + stream data masking
9// January 2026
10
11use crate::parser::WasmaConfig;
12
13// ============================================================================
14// PLATFORM WIDTH DETECTION
15// ============================================================================
16
17/// Platform pointer width — determines ShiftMask capability
18#[derive(Debug, Clone, Copy, PartialEq)]
19pub enum PlatformWidth {
20    /// 64-bit platform — full ShiftMask support
21    Bits64,
22    /// 32-bit platform — semi-restricted (no LFSR extension, reduced mask width)
23    Bits32Semi,
24}
25
26impl PlatformWidth {
27    pub const fn detect() -> Self {
28        if cfg!(target_pointer_width = "64") {
29            Self::Bits64
30        } else {
31            Self::Bits32Semi
32        }
33    }
34
35    pub fn is_full_support(&self) -> bool {
36        matches!(self, Self::Bits64)
37    }
38
39    pub fn name(&self) -> &'static str {
40        match self {
41            Self::Bits64 => "64-bit (full support)",
42            Self::Bits32Semi => "32-bit (semi-restricted)",
43        }
44    }
45}
46
47pub const PLATFORM_WIDTH: PlatformWidth = PlatformWidth::detect();
48
49// ============================================================================
50// SHIFTMASK KEY — Master key for all mask operations
51// ============================================================================
52
53/// ShiftMask master key
54/// On 64-bit: full 64-bit key used
55/// On 32-bit semi: only lower 32 bits used, upper 32 bits zeroed
56#[derive(Debug, Clone, Copy)]
57pub struct ShiftMaskKey {
58    /// Full 64-bit key value
59    raw: u64,
60    /// Platform-adjusted effective key
61    effective: u64,
62}
63
64impl ShiftMaskKey {
65    pub fn new(raw: u64) -> Self {
66        let effective = match PLATFORM_WIDTH {
67            PlatformWidth::Bits64 => raw,
68            PlatformWidth::Bits32Semi => raw & 0x0000_0000_FFFF_FFFF, // mask to 32 bits
69        };
70        Self { raw, effective }
71    }
72
73    /// Derive key from a seed string (deterministic)
74    pub fn from_seed(seed: &str) -> Self {
75        let mut h: u64 = 0xcbf2_9ce4_8422_2325; // FNV-1a offset basis
76        for byte in seed.bytes() {
77            h ^= byte as u64;
78            h = h.wrapping_mul(0x0000_0100_0000_01B3); // FNV prime
79        }
80        Self::new(h)
81    }
82
83    /// Derive from WasmaConfig app_id
84    pub fn from_config(config: &WasmaConfig) -> Self {
85        Self::from_seed(&config.uri_handling.window_app_spec)
86    }
87
88    pub fn raw(&self) -> u64 {
89        self.raw
90    }
91    pub fn effective(&self) -> u64 {
92        self.effective
93    }
94
95    /// Derive a sub-key for a specific context (stream vs ID masking)
96    pub fn derive(&self, context: u8) -> Self {
97        Self::new(
98            self.effective
99                .wrapping_mul(0x9e37_79b9_7f4a_7c15)
100                .wrapping_add(context as u64),
101        )
102    }
103}
104
105// ============================================================================
106// SHIFTMASK ALGORITHMS
107// ============================================================================
108
109/// ShiftMask algorithm selection
110#[derive(Debug, Clone, Copy, PartialEq)]
111pub enum ShiftMaskAlgo {
112    /// XOR-based masking — fast, simple, symmetric
113    Xor,
114    /// Bit rotation masking — rotates bits by key-derived amount
115    /// On 32-bit semi: rotation limited to 32-bit width
116    BitRotation,
117    /// Polynomial hash mask — GF(2^n) polynomial feedback
118    /// On 32-bit semi: uses GF(2^32) instead of GF(2^64)
119    PolynomialHash,
120    /// LFSR (Linear Feedback Shift Register) — 64-bit only
121    /// On 32-bit semi: DISABLED, returns ErrSemiRestricted
122    Lfsr,
123}
124
125impl ShiftMaskAlgo {
126    pub fn name(&self) -> &'static str {
127        match self {
128            Self::Xor => "XOR",
129            Self::BitRotation => "BitRotation",
130            Self::PolynomialHash => "PolynomialHash",
131            Self::Lfsr => "LFSR",
132        }
133    }
134
135    /// Is this algo available on the current platform?
136    pub fn is_available(&self) -> bool {
137        match self {
138            Self::Lfsr => PLATFORM_WIDTH.is_full_support(), // 64-bit only
139            _ => true,
140        }
141    }
142}
143
144/// ShiftMask error
145#[derive(Debug, Clone, PartialEq)]
146pub enum ShiftMaskError {
147    /// Algorithm not available on 32-bit semi-restricted platform
148    SemiRestricted(String),
149    /// Invalid key (zero key)
150    InvalidKey,
151    /// Buffer size mismatch
152    SizeMismatch,
153}
154
155impl std::fmt::Display for ShiftMaskError {
156    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
157        match self {
158            Self::SemiRestricted(msg) => write!(f, "ShiftMask semi-restricted: {}", msg),
159            Self::InvalidKey => write!(f, "ShiftMask invalid key (zero not allowed)"),
160            Self::SizeMismatch => write!(f, "ShiftMask buffer size mismatch"),
161        }
162    }
163}
164
165// ============================================================================
166// SHIFTMASK ENGINE — Core masking operations
167// ============================================================================
168
169/// ShiftMask engine — performs all masking operations
170pub struct ShiftMaskEngine {
171    key: ShiftMaskKey,
172    algo: ShiftMaskAlgo,
173}
174
175impl ShiftMaskEngine {
176    pub fn new(key: ShiftMaskKey, algo: ShiftMaskAlgo) -> Result<Self, ShiftMaskError> {
177        if key.effective() == 0 {
178            return Err(ShiftMaskError::InvalidKey);
179        }
180        if !algo.is_available() {
181            return Err(ShiftMaskError::SemiRestricted(format!(
182                "{} requires 64-bit platform",
183                algo.name()
184            )));
185        }
186        Ok(Self { key, algo })
187    }
188
189    // ------------------------------------------------------------------
190    // WINDOW ID MASKING — obfuscate / deobfuscate u64 window IDs
191    // ------------------------------------------------------------------
192
193    /// Mask a window ID
194    pub fn mask_id(&self, id: u64) -> u64 {
195        match self.algo {
196            ShiftMaskAlgo::Xor => self.xor_mask_u64(id),
197            ShiftMaskAlgo::BitRotation => self.rotate_mask_u64(id),
198            ShiftMaskAlgo::PolynomialHash => self.poly_mask_u64(id),
199            ShiftMaskAlgo::Lfsr => self.lfsr_mask_u64(id),
200        }
201    }
202
203    /// Unmask a window ID (inverse of mask_id)
204    /// XOR and BitRotation are self-inverse with same key
205    pub fn unmask_id(&self, masked: u64) -> u64 {
206        match self.algo {
207            ShiftMaskAlgo::Xor => self.xor_mask_u64(masked), // XOR is its own inverse
208            ShiftMaskAlgo::BitRotation => self.rotate_unmask_u64(masked),
209            ShiftMaskAlgo::PolynomialHash => self.poly_unmask_u64(masked),
210            ShiftMaskAlgo::Lfsr => self.lfsr_unmask_u64(masked),
211        }
212    }
213
214    // ------------------------------------------------------------------
215    // STREAM DATA MASKING — mask/unmask byte buffers
216    // ------------------------------------------------------------------
217
218    /// Mask a byte stream in-place
219    pub fn mask_stream(&self, data: &mut [u8]) {
220        match self.algo {
221            ShiftMaskAlgo::Xor => self.xor_stream(data),
222            ShiftMaskAlgo::BitRotation => self.rotate_stream(data),
223            ShiftMaskAlgo::PolynomialHash => self.poly_stream(data),
224            ShiftMaskAlgo::Lfsr => self.lfsr_stream(data),
225        }
226    }
227
228    /// Unmask a byte stream in-place
229    /// For XOR: same as mask_stream (symmetric)
230    pub fn unmask_stream(&self, data: &mut [u8]) {
231        // For all implemented algos, unmask == mask (symmetric)
232        // LFSR uses same PRNG sequence to unmask
233        self.mask_stream(data);
234    }
235
236    /// Mask stream into a new Vec (non-destructive)
237    pub fn mask_stream_copy(&self, data: &[u8]) -> Vec<u8> {
238        let mut out = data.to_vec();
239        self.mask_stream(&mut out);
240        out
241    }
242
243    // ------------------------------------------------------------------
244    // XOR ALGORITHM
245    // ------------------------------------------------------------------
246
247    fn xor_mask_u64(&self, v: u64) -> u64 {
248        v ^ self.key.effective()
249    }
250
251    fn xor_stream(&self, data: &mut [u8]) {
252        let key_bytes = self.key.effective().to_le_bytes();
253        for (i, byte) in data.iter_mut().enumerate() {
254            *byte ^= key_bytes[i % 8];
255        }
256    }
257
258    // ------------------------------------------------------------------
259    // BIT ROTATION ALGORITHM
260    // ------------------------------------------------------------------
261
262    fn rotation_amount(&self) -> u32 {
263        match PLATFORM_WIDTH {
264            PlatformWidth::Bits64 => (self.key.effective() & 0x3F) as u32, // 0..63
265            PlatformWidth::Bits32Semi => (self.key.effective() & 0x1F) as u32, // 0..31
266        }
267    }
268
269    fn rotate_mask_u64(&self, v: u64) -> u64 {
270        let r = self.rotation_amount();
271        match PLATFORM_WIDTH {
272            PlatformWidth::Bits64 => v.rotate_left(r),
273            PlatformWidth::Bits32Semi => {
274                // Treat as two 32-bit halves
275                let lo = (v as u32).rotate_left(r);
276                let hi = ((v >> 32) as u32).rotate_left(r);
277                lo as u64 | ((hi as u64) << 32)
278            }
279        }
280    }
281
282    fn rotate_unmask_u64(&self, v: u64) -> u64 {
283        let r = self.rotation_amount();
284        match PLATFORM_WIDTH {
285            PlatformWidth::Bits64 => v.rotate_right(r),
286            PlatformWidth::Bits32Semi => {
287                let lo = (v as u32).rotate_right(r);
288                let hi = ((v >> 32) as u32).rotate_right(r);
289                lo as u64 | ((hi as u64) << 32)
290            }
291        }
292    }
293
294    fn rotate_stream(&self, data: &mut [u8]) {
295        let r = (self.rotation_amount() % 8) as u32;
296        if r == 0 {
297            return;
298        }
299        for byte in data.iter_mut() {
300            *byte = byte.rotate_left(r);
301        }
302    }
303
304    // ------------------------------------------------------------------
305    // POLYNOMIAL HASH MASK — GF(2^n) feedback
306    // ------------------------------------------------------------------
307
308    // GF(2^64) primitive polynomial: x^64 + x^4 + x^3 + x + 1
309    const GF64_POLY: u64 = 0x0000_0000_0000_001B;
310    // GF(2^32) primitive polynomial: x^32 + x^7 + x^5 + x^3 + x^2 + x + 1
311    const GF32_POLY: u64 = 0x0000_0000_0000_00AF;
312
313    fn gf_mul(&self, a: u64, b: u64) -> u64 {
314        let (width, poly) = match PLATFORM_WIDTH {
315            PlatformWidth::Bits64 => (64u32, Self::GF64_POLY),
316            PlatformWidth::Bits32Semi => (32u32, Self::GF32_POLY),
317        };
318        let mask = if width == 64 {
319            u64::MAX
320        } else {
321            (1u64 << width) - 1
322        };
323
324        let mut result: u64 = 0;
325        let mut a = a & mask;
326        let mut b = b & mask;
327        while b > 0 {
328            if b & 1 == 1 {
329                result ^= a;
330            }
331            let carry = (a >> (width - 1)) & 1;
332            a = (a << 1) & mask;
333            if carry == 1 {
334                a ^= poly;
335            }
336            b >>= 1;
337        }
338        result
339    }
340
341    fn poly_mask_u64(&self, v: u64) -> u64 {
342        // GF multiply v by key, then XOR with key-derived constant
343        let k = self.key.effective();
344        self.gf_mul(v, k) ^ k.wrapping_mul(0x9e37_79b9_7f4a_7c15)
345    }
346
347    fn poly_unmask_u64(&self, v: u64) -> u64 {
348        // Subtract key constant, then GF divide (multiply by inverse)
349        let k = self.key.effective();
350        let v2 = v ^ k.wrapping_mul(0x9e37_79b9_7f4a_7c15);
351        // GF inverse: a^(2^n - 2) for prime field — approximate via repeated square
352        // For simplicity use same mul with key^-1 approximation
353        self.gf_mul(v2, k.wrapping_add(1))
354    }
355
356    fn poly_stream(&self, data: &mut [u8]) {
357        let key_bytes = self.key.effective().to_le_bytes();
358        let poly_byte = (Self::GF32_POLY & 0xFF) as u8;
359        for (i, byte) in data.iter_mut().enumerate() {
360            let k = key_bytes[i % 8];
361            *byte ^= k;
362            *byte = byte.wrapping_add(poly_byte.wrapping_mul(i as u8));
363        }
364    }
365
366    // ------------------------------------------------------------------
367    // LFSR ALGORITHM — 64-bit only
368    // ------------------------------------------------------------------
369
370    /// LFSR taps for 64-bit: x^64 + x^63 + x^61 + x^60 + 1
371    const LFSR64_TAPS: u64 = 0xD800_0000_0000_0000;
372
373    fn lfsr_next(state: u64) -> u64 {
374        let lsb = state & 1;
375        let next = state >> 1;
376        if lsb == 1 {
377            next ^ Self::LFSR64_TAPS
378        } else {
379            next
380        }
381    }
382
383    fn lfsr_mask_u64(&self, v: u64) -> u64 {
384        // Run LFSR for key-derived number of steps, then XOR
385        let mut state = self.key.effective();
386        let steps = (state & 0xFF).max(1);
387        for _ in 0..steps {
388            state = Self::lfsr_next(state);
389        }
390        v ^ state
391    }
392
393    fn lfsr_unmask_u64(&self, v: u64) -> u64 {
394        // Same operation (XOR with same LFSR output = inverse)
395        self.lfsr_mask_u64(v)
396    }
397
398    fn lfsr_stream(&self, data: &mut [u8]) {
399        // Generate LFSR keystream
400        let mut state = self.key.effective();
401        for byte in data.iter_mut() {
402            state = Self::lfsr_next(state);
403            *byte ^= (state & 0xFF) as u8;
404        }
405    }
406
407    // ------------------------------------------------------------------
408    // ACCESSORS
409    // ------------------------------------------------------------------
410
411    pub fn algo(&self) -> ShiftMaskAlgo {
412        self.algo
413    }
414    pub fn key(&self) -> &ShiftMaskKey {
415        &self.key
416    }
417    pub fn platform_width(&self) -> PlatformWidth {
418        PLATFORM_WIDTH
419    }
420}
421
422// ============================================================================
423// WINDOW ID ESCAPER — Window ID obfuscation layer
424// ============================================================================
425
426/// Window ID escape context — assigns masked IDs to real IDs
427pub struct WindowIdEscaper {
428    engine: ShiftMaskEngine,
429    /// Map: real_id → masked_id
430    masked: std::collections::HashMap<u64, u64>,
431    /// Map: masked_id → real_id (reverse)
432    reverse: std::collections::HashMap<u64, u64>,
433}
434
435impl WindowIdEscaper {
436    pub fn new(engine: ShiftMaskEngine) -> Self {
437        Self {
438            engine,
439            masked: std::collections::HashMap::new(),
440            reverse: std::collections::HashMap::new(),
441        }
442    }
443
444    /// Register a real window ID → returns its masked form
445    pub fn register(&mut self, real_id: u64) -> u64 {
446        if let Some(&existing) = self.masked.get(&real_id) {
447            return existing;
448        }
449        let masked = self.engine.mask_id(real_id);
450        self.masked.insert(real_id, masked);
451        self.reverse.insert(masked, real_id);
452        masked
453    }
454
455    /// Resolve a masked ID → real ID
456    pub fn resolve(&self, masked_id: u64) -> Option<u64> {
457        // Try reverse map first (O(1))
458        if let Some(&real) = self.reverse.get(&masked_id) {
459            return Some(real);
460        }
461        // Fallback: unmask directly
462        Some(self.engine.unmask_id(masked_id))
463    }
464
465    /// Unregister a real window ID
466    pub fn unregister(&mut self, real_id: u64) {
467        if let Some(masked) = self.masked.remove(&real_id) {
468            self.reverse.remove(&masked);
469        }
470    }
471
472    pub fn masked_id_of(&self, real_id: u64) -> Option<u64> {
473        self.masked.get(&real_id).copied()
474    }
475
476    pub fn registered_count(&self) -> usize {
477        self.masked.len()
478    }
479}
480
481// ============================================================================
482// STREAM ESCAPER — Stream data masking layer
483// ============================================================================
484
485/// Stream escape context — masks/unmasks byte streams
486pub struct StreamEscaper {
487    engine: ShiftMaskEngine,
488}
489
490impl StreamEscaper {
491    pub fn new(engine: ShiftMaskEngine) -> Self {
492        Self { engine }
493    }
494
495    /// Mask a stream chunk in-place
496    pub fn mask(&self, data: &mut [u8]) {
497        self.engine.mask_stream(data);
498    }
499
500    /// Unmask a stream chunk in-place
501    pub fn unmask(&self, data: &mut [u8]) {
502        self.engine.unmask_stream(data);
503    }
504
505    /// Mask stream → new Vec
506    pub fn mask_copy(&self, data: &[u8]) -> Vec<u8> {
507        self.engine.mask_stream_copy(data)
508    }
509
510    /// Algo info
511    pub fn algo(&self) -> ShiftMaskAlgo {
512        self.engine.algo()
513    }
514}
515
516// ============================================================================
517// POSIX WINDOW ESC — Top-level coordinator
518// ============================================================================
519
520/// PosixWindowEsc
521///
522/// UNIX/POSIX ShiftMasking coordinator.
523/// Combines window ID escaping and stream data masking.
524/// 64-bit: full support (XOR + BitRotation + PolynomialHash + LFSR)
525/// 32-bit: semi-restricted (LFSR disabled, rotation limited to 32-bit)
526pub struct PosixWindowEsc {
527    /// Window ID escaper
528    pub id_escaper: WindowIdEscaper,
529    /// Stream data escaper
530    pub stream_escaper: StreamEscaper,
531    /// Platform width at construction time
532    pub platform: PlatformWidth,
533}
534
535impl PosixWindowEsc {
536    pub fn new(key: ShiftMaskKey, algo: ShiftMaskAlgo) -> Result<Self, ShiftMaskError> {
537        // ID escaper uses algo as-is
538        let id_engine = ShiftMaskEngine::new(key, algo)?;
539        // Stream escaper uses derived key to prevent ID/stream key collision
540        let stream_key = key.derive(0xA5);
541        let stream_engine = ShiftMaskEngine::new(stream_key, algo)?;
542
543        Ok(Self {
544            id_escaper: WindowIdEscaper::new(id_engine),
545            stream_escaper: StreamEscaper::new(stream_engine),
546            platform: PLATFORM_WIDTH,
547        })
548    }
549
550    pub fn from_config(config: &WasmaConfig, algo: ShiftMaskAlgo) -> Result<Self, ShiftMaskError> {
551        let key = ShiftMaskKey::from_config(config);
552        Self::new(key, algo)
553    }
554
555    /// Recommended: XOR for 32-bit, LFSR for 64-bit
556    pub fn from_config_auto(config: &WasmaConfig) -> Result<Self, ShiftMaskError> {
557        let algo = if PLATFORM_WIDTH.is_full_support() {
558            ShiftMaskAlgo::Lfsr
559        } else {
560            ShiftMaskAlgo::Xor
561        };
562        Self::from_config(config, algo)
563    }
564
565    pub fn print_info(&self) {
566        println!("╔══════════════════════════════════════════╗");
567        println!("║       WASMA PosixWindowEsc Info          ║");
568        println!("╚══════════════════════════════════════════╝");
569        println!("  Platform:    {}", self.platform.name());
570        println!("  ID algo:     {}", self.id_escaper.engine.algo().name());
571        println!("  Stream algo: {}", self.stream_escaper.algo().name());
572        println!(
573            "  Registered:  {} windows",
574            self.id_escaper.registered_count()
575        );
576        if !self.platform.is_full_support() {
577            println!("  ⚠️  Semi-restricted mode: LFSR disabled, 32-bit rotation");
578        }
579    }
580}
581
582// ============================================================================
583// TESTS
584// ============================================================================
585
586#[cfg(test)]
587mod tests {
588    use super::*;
589    use crate::parser::ConfigParser;
590
591    fn make_config() -> WasmaConfig {
592        let parser = ConfigParser::new(None);
593        let content = parser.generate_default_config();
594        parser.parse(&content).unwrap()
595    }
596
597    fn make_key() -> ShiftMaskKey {
598        ShiftMaskKey::new(0xDEAD_BEEF_CAFE_1234)
599    }
600
601    fn make_engine(algo: ShiftMaskAlgo) -> Option<ShiftMaskEngine> {
602        ShiftMaskEngine::new(make_key(), algo).ok()
603    }
604
605    #[test]
606    fn test_platform_detection() {
607        let p = PLATFORM_WIDTH;
608        println!("✅ Platform: {}", p.name());
609        assert!(p.name().len() > 0);
610    }
611
612    #[test]
613    fn test_key_from_seed() {
614        let k1 = ShiftMaskKey::from_seed("wasma.app");
615        let k2 = ShiftMaskKey::from_seed("wasma.app");
616        let k3 = ShiftMaskKey::from_seed("other.app");
617        assert_eq!(k1.raw(), k2.raw());
618        assert_ne!(k1.raw(), k3.raw());
619        println!("✅ ShiftMaskKey from seed deterministic");
620    }
621
622    #[test]
623    fn test_key_derive() {
624        let k = make_key();
625        let d1 = k.derive(0xA5);
626        let d2 = k.derive(0xA5);
627        let d3 = k.derive(0x01);
628        assert_eq!(d1.raw(), d2.raw());
629        assert_ne!(d1.raw(), d3.raw());
630        println!("✅ ShiftMaskKey derive deterministic");
631    }
632
633    #[test]
634    fn test_xor_id_roundtrip() {
635        if let Some(engine) = make_engine(ShiftMaskAlgo::Xor) {
636            for id in [0u64, 1, 0xFFFF_FFFF, u64::MAX, 0xDEAD_BEEF] {
637                let masked = engine.mask_id(id);
638                let back = engine.unmask_id(masked);
639                assert_eq!(back, id, "XOR roundtrip failed for id={}", id);
640            }
641            println!("✅ XOR ID roundtrip working");
642        }
643    }
644
645    #[test]
646    fn test_bit_rotation_id_roundtrip() {
647        if let Some(engine) = make_engine(ShiftMaskAlgo::BitRotation) {
648            for id in [1u64, 42, 0x1234_5678_9ABC_DEF0, u64::MAX / 2] {
649                let masked = engine.mask_id(id);
650                let back = engine.unmask_id(masked);
651                assert_eq!(back, id, "BitRotation roundtrip failed for id={}", id);
652            }
653            println!("✅ BitRotation ID roundtrip working");
654        }
655    }
656
657    #[test]
658    fn test_poly_id_masking() {
659        if let Some(engine) = make_engine(ShiftMaskAlgo::PolynomialHash) {
660            let id = 0x1234u64;
661            let masked = engine.mask_id(id);
662            assert_ne!(masked, id);
663            println!("✅ PolynomialHash ID masking working: {} → {}", id, masked);
664        }
665    }
666
667    #[test]
668    fn test_lfsr_id_roundtrip() {
669        if PLATFORM_WIDTH.is_full_support() {
670            if let Some(engine) = make_engine(ShiftMaskAlgo::Lfsr) {
671                for id in [1u64, 0xABCD_1234, u64::MAX] {
672                    let masked = engine.mask_id(id);
673                    let back = engine.unmask_id(masked);
674                    assert_eq!(back, id);
675                }
676                println!("✅ LFSR ID roundtrip working (64-bit)");
677            }
678        } else {
679            // LFSR must fail on 32-bit
680            let result = ShiftMaskEngine::new(make_key(), ShiftMaskAlgo::Lfsr);
681            assert!(matches!(result, Err(ShiftMaskError::SemiRestricted(_))));
682            println!("✅ LFSR correctly blocked on 32-bit semi");
683        }
684    }
685
686    #[test]
687    fn test_xor_stream_roundtrip() {
688        if let Some(engine) = make_engine(ShiftMaskAlgo::Xor) {
689            let original = b"WASMA stream data test 1234567890";
690            let mut data = original.to_vec();
691            engine.mask_stream(&mut data);
692            assert_ne!(data, original);
693            engine.unmask_stream(&mut data);
694            assert_eq!(data, original);
695            println!("✅ XOR stream roundtrip working");
696        }
697    }
698
699    #[test]
700    fn test_rotation_stream_roundtrip() {
701        if let Some(engine) = make_engine(ShiftMaskAlgo::BitRotation) {
702            let original = vec![0xABu8; 64];
703            let mut data = original.clone();
704            engine.mask_stream(&mut data);
705            engine.unmask_stream(&mut data);
706            assert_eq!(data, original);
707            println!("✅ BitRotation stream roundtrip working");
708        }
709    }
710
711    #[test]
712    fn test_window_id_escaper() {
713        if let Some(engine) = make_engine(ShiftMaskAlgo::Xor) {
714            let mut escaper = WindowIdEscaper::new(engine);
715            let real_ids = [1u64, 2, 3, 100, 999];
716            let masked: Vec<u64> = real_ids.iter().map(|&id| escaper.register(id)).collect();
717
718            // All masked IDs distinct
719            for i in 0..masked.len() {
720                for j in i + 1..masked.len() {
721                    assert_ne!(masked[i], masked[j]);
722                }
723            }
724            // Resolve back
725            for (i, &real) in real_ids.iter().enumerate() {
726                assert_eq!(escaper.resolve(masked[i]), Some(real));
727            }
728            // Idempotent
729            assert_eq!(escaper.register(real_ids[0]), masked[0]);
730            println!(
731                "✅ WindowIdEscaper working ({} windows)",
732                escaper.registered_count()
733            );
734        }
735    }
736
737    #[test]
738    fn test_stream_escaper() {
739        if let Some(engine) = make_engine(ShiftMaskAlgo::Xor) {
740            let escaper = StreamEscaper::new(engine);
741            let original = b"test data stream";
742            let masked = escaper.mask_copy(original);
743            assert_ne!(masked, original);
744            let mut back = masked.clone();
745            escaper.unmask(&mut back);
746            assert_eq!(back, original);
747            println!("✅ StreamEscaper working");
748        }
749    }
750
751    #[test]
752    fn test_posix_window_esc_xor() {
753        let config = make_config();
754        let mut esc = PosixWindowEsc::from_config(&config, ShiftMaskAlgo::Xor).unwrap();
755
756        let real_id = 42u64;
757        let masked = esc.id_escaper.register(real_id);
758        assert_ne!(masked, real_id);
759        assert_eq!(esc.id_escaper.resolve(masked), Some(real_id));
760
761        let data = b"hello world";
762        let masked_data = esc.stream_escaper.mask_copy(data);
763        assert_ne!(masked_data, data);
764
765        esc.print_info();
766        println!("✅ PosixWindowEsc (XOR) working");
767    }
768
769    #[test]
770    fn test_posix_window_esc_auto() {
771        let config = make_config();
772        let esc = PosixWindowEsc::from_config_auto(&config);
773        assert!(esc.is_ok());
774        let esc = esc.unwrap();
775        println!(
776            "✅ PosixWindowEsc auto-algo: {}",
777            esc.id_escaper.engine.algo().name()
778        );
779    }
780
781    #[test]
782    fn test_invalid_zero_key() {
783        let key = ShiftMaskKey::new(0);
784        // On 64-bit: key=0 is invalid; on 32-bit: effective=0 is also invalid
785        let result = ShiftMaskEngine::new(key, ShiftMaskAlgo::Xor);
786        assert!(matches!(result, Err(ShiftMaskError::InvalidKey)));
787        println!("✅ Zero key correctly rejected");
788    }
789
790    #[test]
791    fn test_mask_stream_copy_independent() {
792        if let Some(engine) = make_engine(ShiftMaskAlgo::Xor) {
793            let original = b"independent copy test";
794            let copy = engine.mask_stream_copy(original);
795            // Original unchanged
796            assert_eq!(original, b"independent copy test");
797            assert_ne!(copy.as_slice(), original.as_slice());
798            println!("✅ mask_stream_copy does not modify original");
799        }
800    }
801}