Skip to main content

asupersync/trace/
replay.rs

1//! Replay event schema for deterministic record/replay.
2//!
3//! This module defines the [`ReplayEvent`] enum that captures all sources of
4//! non-determinism in the Lab runtime. By recording these events during execution,
5//! we can replay the exact same execution later for debugging or verification.
6//!
7//! # Design Goals
8//!
9//! - **Compact**: Events should typically be < 64 bytes for efficient storage
10//! - **Complete**: All non-determinism sources must be captured
11//! - **Versioned**: Format is versioned for forward compatibility
12//! - **Deterministic**: Same events → same execution
13//!
14//! # Non-Determinism Sources
15//!
16//! | Category | Events | What It Captures |
17//! |----------|--------|------------------|
18//! | Scheduling | TaskScheduled, TaskYielded, TaskCompleted | Which task runs when |
19//! | Time | TimeAdvanced, TimerCreated, TimerFired | Virtual time progression |
20//! | I/O | IoReady, IoError | Simulated I/O results |
21//! | RNG | RngSeed, RngValue | Deterministic randomness |
22//! | Chaos | ChaosInjection | Fault injection decisions |
23//!
24//! # Example
25//!
26//! ```ignore
27//! use asupersync::trace::replay::{ReplayEvent, TraceMetadata, ReplayTrace};
28//! use asupersync::types::TaskId;
29//!
30//! // Create trace metadata
31//! let metadata = TraceMetadata::new(42); // seed
32//!
33//! // Record events
34//! let mut trace = ReplayTrace::new(metadata);
35//! trace.push(ReplayEvent::RngSeed { seed: 42 });
36//! trace.push(ReplayEvent::TaskScheduled {
37//!     task_id: TaskId::testing_default(),
38//!     at_tick: 0,
39//! });
40//!
41//! // Serialize for storage
42//! let bytes = trace.to_bytes().expect("serialize");
43//!
44//! // Later: load and replay
45//! let loaded = ReplayTrace::from_bytes(&bytes).expect("deserialize");
46//! ```
47
48use crate::types::{RegionId, Severity, TaskId, Time};
49use serde::{Deserialize, Serialize};
50use std::io;
51
52// =============================================================================
53// Trace Metadata
54// =============================================================================
55
56/// Current schema version for replay traces.
57///
58/// Increment this when making breaking changes to the schema.
59pub const REPLAY_SCHEMA_VERSION: u32 = 1;
60
61/// Metadata about a replay trace.
62///
63/// This header is written at the start of every trace file and contains
64/// information needed to replay the trace correctly.
65#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
66pub struct TraceMetadata {
67    /// Schema version for forward compatibility.
68    pub version: u32,
69
70    /// Original RNG seed used for the execution.
71    pub seed: u64,
72
73    /// Deterministic recording stamp for this trace.
74    ///
75    /// `0` means no wall-clock timestamp was attached. Deterministic runtime
76    /// paths use `0` by default so identical runs produce identical metadata.
77    pub recorded_at: u64,
78
79    /// Runtime configuration hash for compatibility checking.
80    ///
81    /// If the config hash differs during replay, results may not match.
82    pub config_hash: u64,
83
84    /// Optional description or test name.
85    #[serde(default, skip_serializing_if = "Option::is_none")]
86    pub description: Option<String>,
87}
88
89impl TraceMetadata {
90    /// Creates new trace metadata with the given seed.
91    #[must_use]
92    pub fn new(seed: u64) -> Self {
93        Self {
94            version: REPLAY_SCHEMA_VERSION,
95            seed,
96            recorded_at: 0,
97            config_hash: 0,
98            description: None,
99        }
100    }
101
102    /// Sets the configuration hash.
103    #[must_use]
104    pub const fn with_config_hash(mut self, hash: u64) -> Self {
105        self.config_hash = hash;
106        self
107    }
108
109    /// Sets the description.
110    #[must_use]
111    pub fn with_description(mut self, desc: impl Into<String>) -> Self {
112        self.description = Some(desc.into());
113        self
114    }
115
116    /// Checks if this trace is compatible with the current schema.
117    #[must_use]
118    pub fn is_compatible(&self) -> bool {
119        self.version == REPLAY_SCHEMA_VERSION
120    }
121}
122
123// =============================================================================
124// Compact ID Types for Serialization
125// =============================================================================
126
127/// Compact task identifier for serialization.
128///
129/// Uses raw u64 instead of `TaskId` for minimal size.
130/// The high 32 bits are the index, low 32 bits are the generation.
131#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
132#[repr(transparent)]
133pub struct CompactTaskId(pub u64);
134
135impl From<TaskId> for CompactTaskId {
136    fn from(id: TaskId) -> Self {
137        let idx = id.arena_index();
138        let packed = (u64::from(idx.index()) << 32) | u64::from(idx.generation());
139        Self(packed)
140    }
141}
142
143impl CompactTaskId {
144    /// Unpacks into index and generation components.
145    #[must_use]
146    pub const fn unpack(self) -> (u32, u32) {
147        let index = (self.0 >> 32) as u32;
148        let generation = self.0 as u32;
149        (index, generation)
150    }
151
152    /// Creates a `TaskId` for testing (requires test-internals feature).
153    #[cfg(any(test, feature = "test-internals"))]
154    #[must_use]
155    pub fn to_task_id(self) -> TaskId {
156        let (index, generation) = self.unpack();
157        TaskId::new_for_test(index, generation)
158    }
159}
160
161/// Compact region identifier for serialization.
162#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
163#[repr(transparent)]
164pub struct CompactRegionId(pub u64);
165
166impl From<RegionId> for CompactRegionId {
167    fn from(id: RegionId) -> Self {
168        let idx = id.arena_index();
169        let packed = (u64::from(idx.index()) << 32) | u64::from(idx.generation());
170        Self(packed)
171    }
172}
173
174impl CompactRegionId {
175    /// Unpacks into index and generation components.
176    #[must_use]
177    pub const fn unpack(self) -> (u32, u32) {
178        let index = (self.0 >> 32) as u32;
179        let generation = self.0 as u32;
180        (index, generation)
181    }
182
183    /// Creates a `RegionId` for testing (requires test-internals feature).
184    #[cfg(any(test, feature = "test-internals"))]
185    #[must_use]
186    pub fn to_region_id(self) -> RegionId {
187        let (index, generation) = self.unpack();
188        RegionId::new_for_test(index, generation)
189    }
190}
191
192// =============================================================================
193// Replay Events
194// =============================================================================
195
196/// A replay event capturing a source of non-determinism.
197///
198/// Events are ordered by their sequence number. During replay, the runtime
199/// consumes events in order to reproduce the same execution.
200///
201/// # Size Optimization
202///
203/// Events are designed to be compact:
204/// - Enum discriminant: 1 byte
205/// - Most variants: 8-24 bytes of payload
206/// - Typical event: < 32 bytes
207/// - Maximum event: < 64 bytes (IoError with message)
208#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
209#[serde(tag = "type")]
210pub enum ReplayEvent {
211    // =========================================================================
212    // Scheduling Decisions
213    // =========================================================================
214    /// A task was chosen for scheduling.
215    ///
216    /// Records which task was selected when multiple were ready.
217    TaskScheduled {
218        /// The task that was scheduled.
219        task: CompactTaskId,
220        /// Virtual time tick when scheduled.
221        at_tick: u64,
222    },
223
224    /// A task voluntarily yielded.
225    TaskYielded {
226        /// The task that yielded.
227        task: CompactTaskId,
228    },
229
230    /// A task completed execution.
231    TaskCompleted {
232        /// The task that completed.
233        task: CompactTaskId,
234        /// Outcome severity (0=Ok, 1=Err, 2=Cancelled, 3=Panicked).
235        outcome: u8,
236    },
237
238    /// A task was spawned.
239    TaskSpawned {
240        /// The new task.
241        task: CompactTaskId,
242        /// The parent region.
243        region: CompactRegionId,
244        /// Virtual time tick when spawned.
245        at_tick: u64,
246    },
247
248    // =========================================================================
249    // Time Events
250    // =========================================================================
251    /// Virtual time advanced.
252    TimeAdvanced {
253        /// Previous time in nanoseconds.
254        from_nanos: u64,
255        /// New time in nanoseconds.
256        to_nanos: u64,
257    },
258
259    /// A timer was created.
260    TimerCreated {
261        /// Timer identifier (token).
262        timer_id: u64,
263        /// Deadline in nanoseconds.
264        deadline_nanos: u64,
265    },
266
267    /// A timer fired.
268    TimerFired {
269        /// Timer identifier (token).
270        timer_id: u64,
271    },
272
273    /// A timer was cancelled.
274    TimerCancelled {
275        /// Timer identifier (token).
276        timer_id: u64,
277    },
278
279    // =========================================================================
280    // I/O Events (Lab Reactor)
281    // =========================================================================
282    /// I/O became ready.
283    IoReady {
284        /// I/O token.
285        token: u64,
286        /// Readiness flags (readable=1, writable=2, error=4, hangup=8).
287        readiness: u8,
288    },
289
290    /// Simulated I/O result (bytes transferred).
291    IoResult {
292        /// I/O token.
293        token: u64,
294        /// Bytes read/written (negative for errors).
295        bytes: i64,
296    },
297
298    /// I/O error was injected.
299    IoError {
300        /// I/O token.
301        token: u64,
302        /// Error kind as u8 (maps to io::ErrorKind).
303        kind: u8,
304    },
305
306    // =========================================================================
307    // RNG Events
308    // =========================================================================
309    /// RNG was seeded.
310    RngSeed {
311        /// The seed value.
312        seed: u64,
313    },
314
315    /// An RNG value was generated (for verification).
316    RngValue {
317        /// The generated value.
318        value: u64,
319    },
320
321    // =========================================================================
322    // Chaos Injection
323    // =========================================================================
324    /// Chaos was injected.
325    ChaosInjection {
326        /// Kind of chaos (0=cancel, 1=delay, 2=io_error, 3=wakeup_storm, 4=budget).
327        kind: u8,
328        /// Affected task, if any.
329        task: Option<CompactTaskId>,
330        /// Additional data (e.g., delay nanos, error kind).
331        data: u64,
332    },
333
334    // =========================================================================
335    // Region Lifecycle Events
336    // =========================================================================
337    /// A region was created.
338    ///
339    /// Records when structured concurrency regions are established.
340    /// This is needed to track the region tree during replay.
341    RegionCreated {
342        /// The new region.
343        region: CompactRegionId,
344        /// The parent region (None for root).
345        parent: Option<CompactRegionId>,
346        /// Virtual time tick when created.
347        at_tick: u64,
348    },
349
350    /// A region was closed (completed normally or after draining).
351    ///
352    /// Records when all children have completed and finalizers have run.
353    RegionClosed {
354        /// The region that closed.
355        region: CompactRegionId,
356        /// Outcome severity (0=Ok, 1=Err, 2=Cancelled, 3=Panicked).
357        outcome: u8,
358    },
359
360    /// A region received a cancellation request.
361    ///
362    /// Records the start of the cancellation protocol for a region.
363    RegionCancelled {
364        /// The region being cancelled.
365        region: CompactRegionId,
366        /// Cancel kind (severity level 0-5).
367        cancel_kind: u8,
368    },
369
370    // =========================================================================
371    // Waker Events
372    // =========================================================================
373    /// A waker was invoked.
374    WakerWake {
375        /// The task that was woken.
376        task: CompactTaskId,
377    },
378
379    /// Multiple wakers were invoked (batch).
380    WakerBatchWake {
381        /// Number of tasks woken.
382        count: u32,
383    },
384
385    // =========================================================================
386    // Checkpoint Events
387    // =========================================================================
388    /// A checkpoint for replay synchronization.
389    ///
390    /// Checkpoints are inserted periodically to:
391    /// - Verify replay is still synchronized with the recording
392    /// - Provide restart points for long traces
393    /// - Mark significant state transitions
394    Checkpoint {
395        /// Monotonic sequence number.
396        sequence: u64,
397        /// Virtual time at checkpoint in nanoseconds.
398        time_nanos: u64,
399        /// Number of active tasks.
400        active_tasks: u32,
401        /// Number of active regions.
402        active_regions: u32,
403    },
404}
405
406impl ReplayEvent {
407    /// Returns the approximate serialized size in bytes.
408    ///
409    /// This is an estimate for capacity planning; actual size may vary
410    /// slightly due to serde encoding overhead.
411    #[must_use]
412    pub const fn estimated_size(&self) -> usize {
413        match self {
414            Self::TaskYielded { .. }
415            | Self::TimerFired { .. }
416            | Self::TimerCancelled { .. }
417            | Self::RngSeed { .. }
418            | Self::RngValue { .. }
419            | Self::WakerWake { .. } => 9, // 1 + 8
420            Self::TaskCompleted { .. }
421            | Self::IoReady { .. }
422            | Self::IoError { .. }
423            | Self::RegionClosed { .. }
424            | Self::RegionCancelled { .. } => 10, // 1 + 8 + 1
425            Self::TaskScheduled { .. }
426            | Self::TimeAdvanced { .. }
427            | Self::TimerCreated { .. }
428            | Self::IoResult { .. }
429            | Self::RegionCreated { parent: None, .. } => 17, // 1 + 8 + 8
430            Self::TaskSpawned { .. }
431            | Self::RegionCreated {
432                parent: Some(_), ..
433            }
434            | Self::Checkpoint { .. } => 25, // 1 + 8 + 8 + 8
435            Self::ChaosInjection { task: None, .. } => 11, // 1 + 1 + 1 + 8
436            Self::ChaosInjection { task: Some(_), .. } => 19, // 1 + 1 + 9 + 8
437            Self::WakerBatchWake { .. } => 5,              // 1 + 4
438        }
439    }
440
441    /// Creates a task scheduled event.
442    #[must_use]
443    pub fn task_scheduled(task: impl Into<CompactTaskId>, at_tick: u64) -> Self {
444        Self::TaskScheduled {
445            task: task.into(),
446            at_tick,
447        }
448    }
449
450    /// Creates a task completed event from outcome severity.
451    #[must_use]
452    pub fn task_completed(task: impl Into<CompactTaskId>, severity: Severity) -> Self {
453        Self::TaskCompleted {
454            task: task.into(),
455            outcome: severity.as_u8(),
456        }
457    }
458
459    /// Creates a time advanced event.
460    #[must_use]
461    pub fn time_advanced(from: Time, to: Time) -> Self {
462        Self::TimeAdvanced {
463            from_nanos: from.as_nanos(),
464            to_nanos: to.as_nanos(),
465        }
466    }
467
468    /// Creates an I/O ready event.
469    #[must_use]
470    #[allow(clippy::fn_params_excessive_bools)]
471    pub fn io_ready(token: u64, readable: bool, writable: bool, error: bool, hangup: bool) -> Self {
472        let mut readiness = 0u8;
473        if readable {
474            readiness |= 1;
475        }
476        if writable {
477            readiness |= 2;
478        }
479        if error {
480            readiness |= 4;
481        }
482        if hangup {
483            readiness |= 8;
484        }
485        Self::IoReady { token, readiness }
486    }
487
488    /// Creates an I/O error event.
489    #[must_use]
490    pub fn io_error(token: u64, kind: io::ErrorKind) -> Self {
491        Self::IoError {
492            token,
493            kind: error_kind_to_u8(kind),
494        }
495    }
496
497    /// Creates a region created event.
498    #[must_use]
499    pub fn region_created(
500        region: impl Into<CompactRegionId>,
501        parent: Option<impl Into<CompactRegionId>>,
502        at_tick: u64,
503    ) -> Self {
504        Self::RegionCreated {
505            region: region.into(),
506            parent: parent.map(Into::into),
507            at_tick,
508        }
509    }
510
511    /// Creates a region closed event.
512    #[must_use]
513    pub fn region_closed(region: impl Into<CompactRegionId>, severity: Severity) -> Self {
514        Self::RegionClosed {
515            region: region.into(),
516            outcome: severity.as_u8(),
517        }
518    }
519
520    /// Creates a region cancelled event.
521    #[must_use]
522    pub fn region_cancelled(region: impl Into<CompactRegionId>, cancel_kind: u8) -> Self {
523        Self::RegionCancelled {
524            region: region.into(),
525            cancel_kind,
526        }
527    }
528
529    /// Creates a checkpoint event.
530    #[must_use]
531    pub fn checkpoint(
532        sequence: u64,
533        time_nanos: u64,
534        active_tasks: u32,
535        active_regions: u32,
536    ) -> Self {
537        Self::Checkpoint {
538            sequence,
539            time_nanos,
540            active_tasks,
541            active_regions,
542        }
543    }
544}
545
546// =============================================================================
547// Replay Trace Container
548// =============================================================================
549
550/// A complete replay trace with metadata and events.
551#[derive(Debug, Clone, Serialize, Deserialize)]
552pub struct ReplayTrace {
553    /// Trace metadata header.
554    pub metadata: TraceMetadata,
555    /// Sequence of replay events.
556    pub events: Vec<ReplayEvent>,
557    /// Cursor for O(1) event consumption via [`EventSource`](super::replayer::EventSource).
558    #[serde(skip)]
559    pub cursor: usize,
560}
561
562impl ReplayTrace {
563    /// Creates a new replay trace with the given metadata.
564    #[must_use]
565    pub fn new(metadata: TraceMetadata) -> Self {
566        Self {
567            metadata,
568            events: Vec::new(),
569            cursor: 0,
570        }
571    }
572
573    /// Creates a new replay trace with estimated capacity.
574    #[must_use]
575    pub fn with_capacity(metadata: TraceMetadata, capacity: usize) -> Self {
576        Self {
577            metadata,
578            events: Vec::with_capacity(capacity),
579            cursor: 0,
580        }
581    }
582
583    /// Appends an event to the trace.
584    pub fn push(&mut self, event: ReplayEvent) {
585        self.events.push(event);
586    }
587
588    /// Returns the number of events.
589    #[must_use]
590    pub fn len(&self) -> usize {
591        self.events.len()
592    }
593
594    /// Returns true if the trace has no events.
595    #[must_use]
596    pub fn is_empty(&self) -> bool {
597        self.events.is_empty()
598    }
599
600    /// Serializes the trace to MessagePack bytes.
601    ///
602    /// # Errors
603    ///
604    /// Returns an error if serialization fails.
605    pub fn to_bytes(&self) -> Result<Vec<u8>, rmp_serde::encode::Error> {
606        rmp_serde::to_vec(self)
607    }
608
609    /// Deserializes a trace from MessagePack bytes.
610    ///
611    /// # Errors
612    ///
613    /// Returns an error if deserialization fails or the version is incompatible.
614    pub fn from_bytes(bytes: &[u8]) -> Result<Self, ReplayTraceError> {
615        let trace: Self = rmp_serde::from_slice(bytes)?;
616        if !trace.metadata.is_compatible() {
617            return Err(ReplayTraceError::IncompatibleVersion {
618                expected: REPLAY_SCHEMA_VERSION,
619                found: trace.metadata.version,
620            });
621        }
622        Ok(trace)
623    }
624
625    /// Returns an iterator over the events.
626    pub fn iter(&self) -> impl Iterator<Item = &ReplayEvent> {
627        self.events.iter()
628    }
629
630    /// Estimates the total serialized size in bytes.
631    #[must_use]
632    pub fn estimated_size(&self) -> usize {
633        // Metadata overhead (~50 bytes) + events
634        50 + self
635            .events
636            .iter()
637            .map(ReplayEvent::estimated_size)
638            .sum::<usize>()
639    }
640}
641
642/// Errors that can occur when working with replay traces.
643#[derive(Debug, thiserror::Error)]
644pub enum ReplayTraceError {
645    /// Serialization/deserialization error.
646    #[error("serialization error: {0}")]
647    Serde(#[from] rmp_serde::decode::Error),
648
649    /// Version mismatch.
650    #[error("incompatible trace version: expected {expected}, found {found}")]
651    IncompatibleVersion {
652        /// Expected schema version.
653        expected: u32,
654        /// Found schema version.
655        found: u32,
656    },
657}
658
659// =============================================================================
660// Helper Functions
661// =============================================================================
662
663/// Converts an `io::ErrorKind` to a u8 for compact serialization.
664#[must_use]
665fn error_kind_to_u8(kind: io::ErrorKind) -> u8 {
666    use io::ErrorKind::{
667        AddrInUse, AddrNotAvailable, AlreadyExists, BrokenPipe, ConnectionAborted,
668        ConnectionRefused, ConnectionReset, Interrupted, InvalidData, InvalidInput, NotConnected,
669        NotFound, OutOfMemory, PermissionDenied, TimedOut, UnexpectedEof, WouldBlock, WriteZero,
670    };
671    match kind {
672        NotFound => 1,
673        PermissionDenied => 2,
674        ConnectionRefused => 3,
675        ConnectionReset => 4,
676        ConnectionAborted => 5,
677        NotConnected => 6,
678        AddrInUse => 7,
679        AddrNotAvailable => 8,
680        BrokenPipe => 9,
681        AlreadyExists => 10,
682        WouldBlock => 11,
683        InvalidInput => 12,
684        InvalidData => 13,
685        TimedOut => 14,
686        WriteZero => 15,
687        Interrupted => 16,
688        UnexpectedEof => 17,
689        OutOfMemory => 18,
690        _ => 255, // Other/unknown
691    }
692}
693
694/// Converts a u8 back to an `io::ErrorKind`.
695#[must_use]
696pub fn u8_to_error_kind(value: u8) -> io::ErrorKind {
697    use io::ErrorKind::{
698        AddrInUse, AddrNotAvailable, AlreadyExists, BrokenPipe, ConnectionAborted,
699        ConnectionRefused, ConnectionReset, Interrupted, InvalidData, InvalidInput, NotConnected,
700        NotFound, Other, OutOfMemory, PermissionDenied, TimedOut, UnexpectedEof, WouldBlock,
701        WriteZero,
702    };
703    match value {
704        1 => NotFound,
705        2 => PermissionDenied,
706        3 => ConnectionRefused,
707        4 => ConnectionReset,
708        5 => ConnectionAborted,
709        6 => NotConnected,
710        7 => AddrInUse,
711        8 => AddrNotAvailable,
712        9 => BrokenPipe,
713        10 => AlreadyExists,
714        11 => WouldBlock,
715        12 => InvalidInput,
716        13 => InvalidData,
717        14 => TimedOut,
718        15 => WriteZero,
719        16 => Interrupted,
720        17 => UnexpectedEof,
721        18 => OutOfMemory,
722        _ => Other,
723    }
724}
725
726// =============================================================================
727// Tests
728// =============================================================================
729
730#[cfg(test)]
731mod tests {
732    use super::*;
733
734    #[test]
735    fn metadata_creation() {
736        let meta = TraceMetadata::new(42);
737        assert_eq!(meta.version, REPLAY_SCHEMA_VERSION);
738        assert_eq!(meta.seed, 42);
739        assert_eq!(meta.recorded_at, 0);
740        assert!(meta.is_compatible());
741    }
742
743    #[test]
744    fn metadata_creation_is_deterministic_for_same_seed() {
745        let first = TraceMetadata::new(42);
746        let second = TraceMetadata::new(42);
747
748        assert_eq!(first, second);
749        assert_eq!(first.recorded_at, 0);
750    }
751
752    #[test]
753    fn metadata_builder() {
754        let meta = TraceMetadata::new(42)
755            .with_config_hash(0xDEAD_BEEF)
756            .with_description("test trace");
757        assert_eq!(meta.config_hash, 0xDEAD_BEEF);
758        assert_eq!(meta.description, Some("test trace".to_string()));
759    }
760
761    #[test]
762    fn compact_task_id_roundtrip() {
763        let task = TaskId::new_for_test(123, 456);
764        let compact = CompactTaskId::from(task);
765        let (index, generation) = compact.unpack();
766        assert_eq!(index, 123);
767        assert_eq!(generation, 456);
768        assert_eq!(compact.to_task_id(), task);
769    }
770
771    #[test]
772    fn replay_event_sizes() {
773        // Verify events are compact
774        let events = [
775            ReplayEvent::TaskScheduled {
776                task: CompactTaskId(0),
777                at_tick: 0,
778            },
779            ReplayEvent::TaskYielded {
780                task: CompactTaskId(0),
781            },
782            ReplayEvent::TaskCompleted {
783                task: CompactTaskId(0),
784                outcome: 0,
785            },
786            ReplayEvent::TimeAdvanced {
787                from_nanos: 0,
788                to_nanos: 0,
789            },
790            ReplayEvent::TimerFired { timer_id: 0 },
791            ReplayEvent::IoReady {
792                token: 0,
793                readiness: 0,
794            },
795            ReplayEvent::RngSeed { seed: 0 },
796            ReplayEvent::WakerWake {
797                task: CompactTaskId(0),
798            },
799        ];
800
801        for event in &events {
802            let size = event.estimated_size();
803            assert!(size < 64, "Event {event:?} exceeds 64 bytes: {size} bytes");
804        }
805    }
806
807    #[test]
808    fn trace_serialization_roundtrip() {
809        let mut trace = ReplayTrace::new(TraceMetadata::new(42));
810        trace.push(ReplayEvent::RngSeed { seed: 42 });
811        trace.push(ReplayEvent::TaskScheduled {
812            task: CompactTaskId(1),
813            at_tick: 0,
814        });
815        trace.push(ReplayEvent::TimeAdvanced {
816            from_nanos: 0,
817            to_nanos: 1_000_000,
818        });
819        trace.push(ReplayEvent::TaskCompleted {
820            task: CompactTaskId(1),
821            outcome: 0,
822        });
823
824        let bytes = trace.to_bytes().expect("serialize");
825        let loaded = ReplayTrace::from_bytes(&bytes).expect("deserialize");
826
827        assert_eq!(loaded.metadata.seed, 42);
828        assert_eq!(loaded.events.len(), 4);
829        assert_eq!(loaded.events[0], ReplayEvent::RngSeed { seed: 42 });
830    }
831
832    #[test]
833    fn trace_actual_serialized_size() {
834        let mut trace = ReplayTrace::new(TraceMetadata::new(42));
835
836        // Add typical events
837        for i in 0..100 {
838            trace.push(ReplayEvent::TaskScheduled {
839                task: CompactTaskId(i),
840                at_tick: i,
841            });
842        }
843
844        let bytes = trace.to_bytes().expect("serialize");
845        let avg_size = bytes.len() / 100;
846
847        // Verify average event size is reasonable (should be well under 64 bytes)
848        assert!(
849            avg_size < 32,
850            "Average serialized event size {avg_size} bytes exceeds expected"
851        );
852    }
853
854    #[test]
855    fn error_kind_roundtrip() {
856        use io::ErrorKind::*;
857        let kinds = [
858            NotFound,
859            PermissionDenied,
860            ConnectionRefused,
861            ConnectionReset,
862            BrokenPipe,
863            WouldBlock,
864            TimedOut,
865        ];
866
867        for kind in kinds {
868            let encoded = error_kind_to_u8(kind);
869            let decoded = u8_to_error_kind(encoded);
870            assert_eq!(kind, decoded, "Failed roundtrip for {kind:?}");
871        }
872    }
873
874    #[test]
875    fn version_compatibility_check() {
876        let mut trace = ReplayTrace::new(TraceMetadata::new(42));
877        trace.push(ReplayEvent::RngSeed { seed: 42 });
878
879        // Serialize
880        let bytes = trace.to_bytes().expect("serialize");
881
882        // Modify version in raw bytes would require manual byte manipulation
883        // Just verify normal case works
884        let loaded = ReplayTrace::from_bytes(&bytes).expect("deserialize");
885        assert!(loaded.metadata.is_compatible());
886    }
887
888    #[test]
889    fn io_ready_flags() {
890        let event = ReplayEvent::io_ready(123, true, false, false, false);
891        if let ReplayEvent::IoReady { token, readiness } = event {
892            assert_eq!(token, 123);
893            assert_eq!(readiness & 1, 1); // readable
894            assert_eq!(readiness & 2, 0); // not writable
895        } else {
896            panic!("Expected IoReady");
897        }
898
899        let event = ReplayEvent::io_ready(456, true, true, true, true);
900        if let ReplayEvent::IoReady { readiness, .. } = event {
901            assert_eq!(readiness, 0b1111); // all flags set
902        } else {
903            panic!("Expected IoReady");
904        }
905    }
906
907    #[test]
908    fn chaos_injection_variants() {
909        let event_no_task = ReplayEvent::ChaosInjection {
910            kind: 1, // delay
911            task: None,
912            data: 1_000_000, // 1ms in nanos
913        };
914        assert!(event_no_task.estimated_size() < 64);
915
916        let event_with_task = ReplayEvent::ChaosInjection {
917            kind: 0, // cancel
918            task: Some(CompactTaskId(42)),
919            data: 0,
920        };
921        assert!(event_with_task.estimated_size() < 64);
922    }
923
924    #[test]
925    fn region_created_event() {
926        let event = ReplayEvent::region_created(CompactRegionId(1), Some(CompactRegionId(0)), 100);
927
928        if let ReplayEvent::RegionCreated {
929            region,
930            parent,
931            at_tick,
932        } = event
933        {
934            assert_eq!(region.0, 1);
935            assert_eq!(parent.map(|p| p.0), Some(0));
936            assert_eq!(at_tick, 100);
937        } else {
938            panic!("Expected RegionCreated");
939        }
940
941        // Test without parent (root region)
942        let root = ReplayEvent::region_created(CompactRegionId(0), None::<CompactRegionId>, 0);
943        if let ReplayEvent::RegionCreated { parent, .. } = root {
944            assert!(parent.is_none());
945        } else {
946            panic!("Expected RegionCreated");
947        }
948    }
949
950    #[test]
951    fn region_closed_event() {
952        let event = ReplayEvent::region_closed(CompactRegionId(5), Severity::Ok);
953
954        if let ReplayEvent::RegionClosed { region, outcome } = event {
955            assert_eq!(region.0, 5);
956            assert_eq!(outcome, Severity::Ok.as_u8());
957        } else {
958            panic!("Expected RegionClosed");
959        }
960    }
961
962    #[test]
963    fn region_cancelled_event() {
964        let event = ReplayEvent::region_cancelled(CompactRegionId(3), 1);
965
966        if let ReplayEvent::RegionCancelled {
967            region,
968            cancel_kind,
969        } = event
970        {
971            assert_eq!(region.0, 3);
972            assert_eq!(cancel_kind, 1);
973        } else {
974            panic!("Expected RegionCancelled");
975        }
976    }
977
978    #[test]
979    fn checkpoint_event() {
980        let event = ReplayEvent::checkpoint(42, 1_000_000_000, 5, 2);
981
982        if let ReplayEvent::Checkpoint {
983            sequence,
984            time_nanos,
985            active_tasks,
986            active_regions,
987        } = event
988        {
989            assert_eq!(sequence, 42);
990            assert_eq!(time_nanos, 1_000_000_000);
991            assert_eq!(active_tasks, 5);
992            assert_eq!(active_regions, 2);
993        } else {
994            panic!("Expected Checkpoint");
995        }
996    }
997
998    #[test]
999    fn region_events_size() {
1000        // Verify all region events stay compact (< 64 bytes)
1001        let events = [
1002            ReplayEvent::RegionCreated {
1003                region: CompactRegionId(0),
1004                parent: None,
1005                at_tick: 0,
1006            },
1007            ReplayEvent::RegionCreated {
1008                region: CompactRegionId(0),
1009                parent: Some(CompactRegionId(1)),
1010                at_tick: 0,
1011            },
1012            ReplayEvent::RegionClosed {
1013                region: CompactRegionId(0),
1014                outcome: 0,
1015            },
1016            ReplayEvent::RegionCancelled {
1017                region: CompactRegionId(0),
1018                cancel_kind: 0,
1019            },
1020            ReplayEvent::Checkpoint {
1021                sequence: 0,
1022                time_nanos: 0,
1023                active_tasks: 0,
1024                active_regions: 0,
1025            },
1026        ];
1027
1028        for event in &events {
1029            let size = event.estimated_size();
1030            assert!(size < 64, "Event {event:?} exceeds 64 bytes: {size} bytes");
1031        }
1032    }
1033
1034    #[test]
1035    fn empty_trace_serialization_roundtrip() {
1036        let trace = ReplayTrace::new(TraceMetadata::new(0));
1037        assert!(trace.is_empty());
1038        assert_eq!(trace.len(), 0);
1039
1040        let bytes = trace.to_bytes().expect("serialize empty");
1041        let loaded = ReplayTrace::from_bytes(&bytes).expect("deserialize empty");
1042
1043        assert_eq!(loaded.metadata.seed, 0);
1044        assert!(loaded.is_empty());
1045    }
1046
1047    #[test]
1048    fn incompatible_version_rejected() {
1049        let mut trace = ReplayTrace::new(TraceMetadata::new(42));
1050        trace.push(ReplayEvent::RngSeed { seed: 42 });
1051
1052        let _bytes = trace.to_bytes().expect("serialize");
1053
1054        // Manually tamper with the version in the serialized bytes
1055        // TraceMetadata is serialized via msgpack, version is the first field
1056        // Instead, create a trace with wrong version directly
1057        let meta = TraceMetadata {
1058            version: 999,
1059            seed: 42,
1060            recorded_at: 0,
1061            config_hash: 0,
1062            description: None,
1063        };
1064        let bad_trace = ReplayTrace {
1065            metadata: meta,
1066            events: vec![ReplayEvent::RngSeed { seed: 42 }],
1067            cursor: 0,
1068        };
1069        let bad_bytes = bad_trace.to_bytes().expect("serialize bad version");
1070        let err = ReplayTrace::from_bytes(&bad_bytes).unwrap_err();
1071        assert!(matches!(
1072            err,
1073            ReplayTraceError::IncompatibleVersion {
1074                expected: REPLAY_SCHEMA_VERSION,
1075                found: 999
1076            }
1077        ));
1078    }
1079
1080    #[test]
1081    fn trace_with_capacity_preallocates() {
1082        let trace = ReplayTrace::with_capacity(TraceMetadata::new(1), 100);
1083        assert!(trace.is_empty());
1084        assert_eq!(trace.len(), 0);
1085    }
1086
1087    #[test]
1088    fn estimated_size_increases_with_events() {
1089        let mut trace = ReplayTrace::new(TraceMetadata::new(42));
1090        let base_size = trace.estimated_size();
1091
1092        trace.push(ReplayEvent::RngSeed { seed: 42 });
1093        let one_event_size = trace.estimated_size();
1094        assert!(one_event_size > base_size);
1095
1096        trace.push(ReplayEvent::TaskScheduled {
1097            task: CompactTaskId(1),
1098            at_tick: 0,
1099        });
1100        let two_event_size = trace.estimated_size();
1101        assert!(two_event_size > one_event_size);
1102    }
1103
1104    #[test]
1105    fn compact_region_id_roundtrip() {
1106        let region = RegionId::new_for_test(456, 789);
1107        let compact = CompactRegionId::from(region);
1108        let (index, generation) = compact.unpack();
1109        assert_eq!(index, 456);
1110        assert_eq!(generation, 789);
1111        assert_eq!(compact.to_region_id(), region);
1112    }
1113
1114    #[test]
1115    fn metadata_compatibility_flag() {
1116        let meta = TraceMetadata::new(42);
1117        assert!(meta.is_compatible());
1118
1119        let old_meta = TraceMetadata {
1120            version: 0,
1121            seed: 42,
1122            recorded_at: 0,
1123            config_hash: 0,
1124            description: None,
1125        };
1126        assert!(!old_meta.is_compatible());
1127    }
1128
1129    #[test]
1130    fn io_error_roundtrip_all_known_kinds() {
1131        use io::ErrorKind::*;
1132        let all_known = [
1133            NotFound,
1134            PermissionDenied,
1135            ConnectionRefused,
1136            ConnectionReset,
1137            ConnectionAborted,
1138            NotConnected,
1139            AddrInUse,
1140            AddrNotAvailable,
1141            BrokenPipe,
1142            AlreadyExists,
1143            WouldBlock,
1144            InvalidInput,
1145            InvalidData,
1146            TimedOut,
1147            WriteZero,
1148            Interrupted,
1149            UnexpectedEof,
1150            OutOfMemory,
1151        ];
1152
1153        for kind in all_known {
1154            let encoded = error_kind_to_u8(kind);
1155            let decoded = u8_to_error_kind(encoded);
1156            assert_eq!(kind, decoded, "Roundtrip failed for {kind:?}");
1157        }
1158    }
1159
1160    #[test]
1161    fn unknown_error_kind_maps_to_other() {
1162        let decoded = u8_to_error_kind(255);
1163        assert_eq!(decoded, io::ErrorKind::Other);
1164        let decoded = u8_to_error_kind(200);
1165        assert_eq!(decoded, io::ErrorKind::Other);
1166    }
1167
1168    #[test]
1169    fn trace_iter_yields_all_events() {
1170        let mut trace = ReplayTrace::new(TraceMetadata::new(42));
1171        trace.push(ReplayEvent::RngSeed { seed: 1 });
1172        trace.push(ReplayEvent::RngSeed { seed: 2 });
1173        trace.push(ReplayEvent::RngSeed { seed: 3 });
1174
1175        assert_eq!(trace.iter().count(), 3);
1176    }
1177
1178    #[test]
1179    fn region_events_serialization_roundtrip() {
1180        let mut trace = ReplayTrace::new(TraceMetadata::new(123));
1181
1182        // Add region lifecycle events
1183        trace.push(ReplayEvent::RegionCreated {
1184            region: CompactRegionId(0),
1185            parent: None,
1186            at_tick: 0,
1187        });
1188        trace.push(ReplayEvent::RegionCreated {
1189            region: CompactRegionId(1),
1190            parent: Some(CompactRegionId(0)),
1191            at_tick: 10,
1192        });
1193        trace.push(ReplayEvent::RegionCancelled {
1194            region: CompactRegionId(1),
1195            cancel_kind: 2,
1196        });
1197        trace.push(ReplayEvent::RegionClosed {
1198            region: CompactRegionId(1),
1199            outcome: 2, // Cancelled
1200        });
1201        trace.push(ReplayEvent::RegionClosed {
1202            region: CompactRegionId(0),
1203            outcome: 0, // Ok
1204        });
1205        trace.push(ReplayEvent::Checkpoint {
1206            sequence: 1,
1207            time_nanos: 1_000_000,
1208            active_tasks: 0,
1209            active_regions: 0,
1210        });
1211
1212        let bytes = trace.to_bytes().expect("serialize");
1213        let loaded = ReplayTrace::from_bytes(&bytes).expect("deserialize");
1214
1215        assert_eq!(loaded.events.len(), 6);
1216
1217        // Verify first event (root region created)
1218        match &loaded.events[0] {
1219            ReplayEvent::RegionCreated {
1220                region,
1221                parent,
1222                at_tick,
1223            } => {
1224                assert_eq!(region.0, 0);
1225                assert!(parent.is_none());
1226                assert_eq!(*at_tick, 0);
1227            }
1228            _ => panic!("Expected RegionCreated"),
1229        }
1230
1231        // Verify checkpoint event
1232        match &loaded.events[5] {
1233            ReplayEvent::Checkpoint {
1234                sequence,
1235                time_nanos,
1236                active_tasks,
1237                active_regions,
1238            } => {
1239                assert_eq!(*sequence, 1);
1240                assert_eq!(*time_nanos, 1_000_000);
1241                assert_eq!(*active_tasks, 0);
1242                assert_eq!(*active_regions, 0);
1243            }
1244            _ => panic!("Expected Checkpoint"),
1245        }
1246    }
1247
1248    // --- wave 77 trait coverage ---
1249
1250    #[test]
1251    fn trace_metadata_debug_clone_eq() {
1252        let m = TraceMetadata {
1253            version: REPLAY_SCHEMA_VERSION,
1254            seed: 42,
1255            recorded_at: 0,
1256            config_hash: 0xABC,
1257            description: Some("test".into()),
1258        };
1259        let m2 = m.clone();
1260        assert_eq!(m, m2);
1261        let dbg = format!("{m:?}");
1262        assert!(dbg.contains("TraceMetadata"));
1263    }
1264
1265    #[test]
1266    fn compact_task_id_debug_clone_copy_eq() {
1267        let id = CompactTaskId(42);
1268        let id2 = id; // Copy
1269        let id3 = id;
1270        assert_eq!(id, id2);
1271        assert_eq!(id, id3);
1272        assert_ne!(id, CompactTaskId(99));
1273        let dbg = format!("{id:?}");
1274        assert!(dbg.contains("42"));
1275    }
1276
1277    #[test]
1278    fn compact_region_id_debug_clone_copy_eq() {
1279        let id = CompactRegionId(7);
1280        let id2 = id; // Copy
1281        let id3 = id;
1282        assert_eq!(id, id2);
1283        assert_eq!(id, id3);
1284        assert_ne!(id, CompactRegionId(99));
1285        let dbg = format!("{id:?}");
1286        assert!(dbg.contains('7'));
1287    }
1288
1289    #[test]
1290    fn replay_event_debug_clone_eq() {
1291        let e = ReplayEvent::TaskScheduled {
1292            task: CompactTaskId(1),
1293            at_tick: 100,
1294        };
1295        let e2 = e.clone();
1296        assert_eq!(e, e2);
1297        assert_ne!(
1298            e,
1299            ReplayEvent::TaskYielded {
1300                task: CompactTaskId(1),
1301            }
1302        );
1303        let dbg = format!("{e:?}");
1304        assert!(dbg.contains("TaskScheduled"));
1305    }
1306}