pmat 3.15.0

PMAT - Zero-config AI context generation and code quality toolkit (CLI, MCP, HTTP)
#![cfg(feature = "dap")]
//! CAPTURE-001: ExecutionRecorder with RecordingWriter Integration
//! Sprint 76 - GREEN Phase
//!
//! Tests drive the integration of ExecutionRecorder with RecordingWriter
//! to enable persistent snapshot recording to .pmat files.

use pmat::services::dap::execution_recorder::ExecutionRecorder;
use pmat::services::dap::recording::{Snapshot, StackFrame};
use pmat::services::dap::server::DapServer;
use std::collections::HashMap;
use std::io::Cursor;
use std::sync::{Arc, Mutex};

// GREEN Test 1: Create recorder with writer
#[test]
fn test_create_recorder_with_writer() {
    // GREEN: Actual test implementation
    let buffer = Cursor::new(Vec::new());
    let dap = Arc::new(Mutex::new(DapServer::new()));
    let recorder = ExecutionRecorder::with_writer(
        buffer,
        "test_program".to_string(),
        vec!["arg1".to_string()],
        dap,
    );

    assert!(recorder.is_ok(), "Should create recorder with writer");
    let recorder = recorder.unwrap();
    assert!(
        !recorder.is_recording(),
        "Should not be recording initially"
    );
    assert_eq!(recorder.snapshot_count(), 0, "Should have zero snapshots");
}

// RED Test 2: Record snapshot writes to file
#[test]
fn test_record_snapshot_writes_to_file() {
    // This test drives the requirement for automatic file writing
    // Expected: Recording a snapshot writes it to the underlying writer

    // Will implement in GREEN phase:
    // use pmat::services::dap::execution_recorder::ExecutionRecorder;
    // use pmat::services::dap::recording::{Recording, Snapshot};
    // use std::io::Cursor;
    //
    // let buffer = Cursor::new(Vec::new());
    // let mut recorder = ExecutionRecorder::with_writer(
    //     buffer,
    //     "test_program".to_string(),
    //     vec![]
    // )?;
    //
    // // Create test snapshot
    // let snapshot = create_test_snapshot(1);
    // recorder.record_snapshot_to_file(snapshot)?;
    //
    // // Get buffer contents
    // let bytes = recorder.into_inner()?;
    //
    // // Verify .pmat file written
    // assert!(bytes.len() > 0, "Should have written data");
    // assert_eq!(&bytes[0..4], b"PMAT", "Should start with PMAT magic header");

    assert!(true, "Recording snapshot must write to RecordingWriter");
}

// GREEN Test 3: Finalize creates valid .pmat file
#[test]
fn test_finalize_creates_valid_pmat_file() {
    // GREEN: Test that finalize() completes without error
    // Note: Full .pmat validation requires capture_snapshot() which needs DAP server setup
    // This test verifies the finalize() path works

    let buffer = Cursor::new(Vec::new());
    let dap = Arc::new(Mutex::new(DapServer::new()));
    let recorder = ExecutionRecorder::with_writer(
        buffer,
        "test_program".to_string(),
        vec!["arg1".to_string()],
        dap,
    )
    .expect("Should create recorder");

    // Finalize should complete successfully even with no snapshots
    let result = recorder.finalize();
    assert!(
        result.is_ok(),
        "Finalize should succeed: {:?}",
        result.err()
    );
}

// RED Test 4: Multiple snapshots written sequentially
#[test]
fn test_multiple_snapshots_written_sequentially() {
    // This test drives the requirement for sequential snapshot writes
    // Expected: Can write multiple snapshots and they're all preserved

    // Will implement in GREEN phase:
    // use pmat::services::dap::execution_recorder::ExecutionRecorder;
    // use pmat::services::dap::recording::Recording;
    // use std::io::Cursor;
    //
    // let buffer = Cursor::new(Vec::new());
    // let mut recorder = ExecutionRecorder::with_writer(buffer, "test".to_string(), vec![])?;
    //
    // // Write 100 snapshots
    // for i in 0..100 {
    //     recorder.record_snapshot_to_file(create_test_snapshot(i))?;
    // }
    //
    // let bytes = recorder.finalize()?;
    //
    // // Verify all snapshots preserved
    // let recording = Recording::from_bytes(&bytes)?;
    // assert_eq!(recording.snapshot_count(), 100);
    //
    // // Verify sequential frame IDs
    // for (i, snapshot) in recording.snapshots().iter().enumerate() {
    //     assert_eq!(snapshot.frame_id, i as u64);
    // }

    assert!(true, "Multiple snapshots must be written sequentially");
}

// RED Test 5: Empty recording (no snapshots) is valid
#[test]
fn test_empty_recording_is_valid() {
    // This test drives the requirement for empty recordings
    // Expected: Recording with 0 snapshots is still a valid .pmat file

    // Will implement in GREEN phase:
    // use pmat::services::dap::execution_recorder::ExecutionRecorder;
    // use pmat::services::dap::recording::Recording;
    // use std::io::Cursor;
    //
    // let buffer = Cursor::new(Vec::new());
    // let recorder = ExecutionRecorder::with_writer(
    //     buffer,
    //     "test_program".to_string(),
    //     vec![]
    // )?;
    //
    // // Finalize without recording any snapshots
    // let bytes = recorder.finalize()?;
    //
    // // Load and verify
    // let recording = Recording::from_bytes(&bytes)?;
    // assert_eq!(recording.snapshot_count(), 0);
    // assert_eq!(recording.metadata().program, "test_program");

    assert!(true, "Empty recording (0 snapshots) must be valid");
}

// RED Test 6: Error handling - disk full simulation
#[test]
fn test_error_handling_disk_full() {
    // This test drives the requirement for disk full error handling
    // Expected: Gracefully handle write errors due to insufficient space

    // Will implement in GREEN phase:
    // use pmat::services::dap::execution_recorder::ExecutionRecorder;
    // use std::io::{self, Write};
    //
    // // Create a writer that fails after N bytes
    // struct FailingWriter {
    //     max_bytes: usize,
    //     written: usize,
    // }
    //
    // impl Write for FailingWriter {
    //     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
    //         if self.written + buf.len() > self.max_bytes {
    //             Err(io::Error::new(io::ErrorKind::Other, "No space left on device"))
    //         } else {
    //             self.written += buf.len();
    //             Ok(buf.len())
    //         }
    //     }
    //
    //     fn flush(&mut self) -> io::Result<()> {
    //         Ok(())
    //     }
    // }
    //
    // let writer = FailingWriter { max_bytes: 1024, written: 0 };
    // let mut recorder = ExecutionRecorder::with_writer(writer, "test".to_string(), vec![])?;
    //
    // // Try to write large snapshot
    // let large_snapshot = create_large_test_snapshot();
    // let result = recorder.record_snapshot_to_file(large_snapshot);
    //
    // assert!(result.is_err(), "Should fail with disk full error");
    // let err = result.unwrap_err();
    // assert!(err.to_string().contains("space"), "Error should mention space");

    assert!(true, "Must handle disk full errors gracefully");
}

// RED Test 7: Error handling - writer finalization failure
#[test]
fn test_error_handling_finalization_failure() {
    // This test drives the requirement for finalization error handling
    // Expected: Detect and report errors during finalize()

    // Will implement in GREEN phase:
    // use pmat::services::dap::execution_recorder::ExecutionRecorder;
    // use std::io::{self, Write};
    //
    // struct FlushFailWriter;
    //
    // impl Write for FlushFailWriter {
    //     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
    //         Ok(buf.len())
    //     }
    //
    //     fn flush(&mut self) -> io::Result<()> {
    //         Err(io::Error::new(io::ErrorKind::Other, "Flush failed"))
    //     }
    // }
    //
    // let writer = FlushFailWriter;
    // let mut recorder = ExecutionRecorder::with_writer(writer, "test".to_string(), vec![])?;
    // recorder.record_snapshot_to_file(create_test_snapshot(1))?;
    //
    // let result = recorder.finalize();
    // assert!(result.is_err(), "Finalize should fail");
    // let err = result.unwrap_err();
    // assert!(err.to_string().contains("Flush"), "Error should mention flush");

    assert!(true, "Must handle finalization failures");
}

// GREEN Test 8: Memory-only mode still works (no writer)
#[test]
fn test_memory_only_mode_backward_compatible() {
    // GREEN: Verify backward compatibility with Sprint 72 memory-only mode
    let dap = Arc::new(Mutex::new(DapServer::new()));
    let mut recorder = ExecutionRecorder::new(dap);

    // Memory-only recorder should work as before
    assert!(
        !recorder.is_recording(),
        "Should not be recording initially"
    );

    recorder.start_recording();
    assert!(recorder.is_recording(), "Should be recording after start");

    recorder.stop_recording();
    assert!(!recorder.is_recording(), "Should stop recording");

    assert_eq!(recorder.snapshot_count(), 0, "Should have zero snapshots");
}

// GREEN Test 9: Metadata updates (environment variables)
#[test]
fn test_metadata_updates_environment_variables() {
    // GREEN: Verify add_environment() method works
    let buffer = Cursor::new(Vec::new());
    let dap = Arc::new(Mutex::new(DapServer::new()));
    let mut recorder =
        ExecutionRecorder::with_writer(buffer, "test_program".to_string(), vec![], dap)
            .expect("Should create recorder");

    // Add environment variables - should not panic
    recorder.add_environment("PATH", "/usr/bin:/bin");
    recorder.add_environment("USER", "developer");
    recorder.add_environment("RUST_LOG", "debug");

    // Finalize should succeed
    let result = recorder.finalize();
    assert!(
        result.is_ok(),
        "Finalize should succeed with environment variables"
    );
}

// RED Test 10: Concurrent snapshot recording (thread safety)
#[test]
fn test_concurrent_snapshot_recording() {
    // This test drives the requirement for thread safety
    // Expected: Can record snapshots from multiple threads safely

    // Will implement in GREEN phase:
    // use pmat::services::dap::execution_recorder::ExecutionRecorder;
    // use pmat::services::dap::recording::Recording;
    // use std::io::Cursor;
    // use std::sync::{Arc, Mutex};
    // use std::thread;
    //
    // let buffer = Cursor::new(Vec::new());
    // let recorder = Arc::new(Mutex::new(
    //     ExecutionRecorder::with_writer(buffer, "test".to_string(), vec![])?
    // ));
    //
    // let mut handles = vec![];
    //
    // // Spawn 10 threads, each recording 10 snapshots
    // for thread_id in 0..10 {
    //     let recorder_clone = Arc::clone(&recorder);
    //     let handle = thread::spawn(move || {
    //         for i in 0..10 {
    //             let snapshot = create_test_snapshot(thread_id * 10 + i);
    //             recorder_clone.lock().unwrap()
    //                 .record_snapshot_to_file(snapshot)
    //                 .unwrap();
    //         }
    //     });
    //     handles.push(handle);
    // }
    //
    // // Wait for all threads
    // for handle in handles {
    //     handle.join().unwrap();
    // }
    //
    // // Finalize and verify
    // let recorder = Arc::try_unwrap(recorder).unwrap().into_inner().unwrap();
    // let bytes = recorder.finalize()?;
    // let recording = Recording::from_bytes(&bytes)?;
    // assert_eq!(recording.snapshot_count(), 100);

    assert!(true, "Must support concurrent snapshot recording");
}

/// Helper: Create test snapshot (GREEN phase implementation)
#[allow(dead_code)]
fn create_test_snapshot(frame_id: u64) -> Snapshot {
    let mut variables = HashMap::new();
    variables.insert("x".to_string(), serde_json::json!(42));
    variables.insert("name".to_string(), serde_json::json!("Alice"));
    variables.insert("items".to_string(), serde_json::json!([1, 2, 3]));

    let mut locals = HashMap::new();
    locals.insert("local_var".to_string(), serde_json::json!(100));

    let stack_frames = vec![
        StackFrame {
            name: "main".to_string(),
            file: Some("main.rs".to_string()),
            line: Some(10),
            locals: locals.clone(),
        },
        StackFrame {
            name: "helper_function".to_string(),
            file: Some("utils.rs".to_string()),
            line: Some(23),
            locals,
        },
    ];

    Snapshot {
        frame_id,
        timestamp_relative_ms: (frame_id * 100) as u32,
        variables,
        stack_frames,
        instruction_pointer: 0x401000 + (frame_id * 0x100),
        memory_snapshot: None,
    }
}

/// Helper: Create large test snapshot for error testing
#[allow(dead_code)]
fn create_large_test_snapshot() -> Snapshot {
    let mut variables = HashMap::new();
    // Add many variables to create a large snapshot
    for i in 0..1000 {
        variables.insert(format!("var_{}", i), serde_json::json!(i));
    }

    let stack_frames = vec![StackFrame {
        name: "deep_recursion".to_string(),
        file: Some("recursive.rs".to_string()),
        line: Some(500),
        locals: variables.clone(),
    }];

    Snapshot {
        frame_id: 9999,
        timestamp_relative_ms: 999999,
        variables,
        stack_frames,
        instruction_pointer: 0xFFFFFFFF,
        memory_snapshot: Some(vec![0u8; 100000]), // 100KB memory snapshot
    }
}