Skip to main content

ripsed_core/
undo.rs

1use serde::{Deserialize, Serialize};
2
3/// An entry in the undo log, storing enough information to reverse an operation.
4#[derive(Debug, Clone, Serialize, Deserialize)]
5pub struct UndoEntry {
6    /// The full original text before the operation.
7    pub original_text: String,
8}
9
10/// A record in the persistent undo log file (.ripsed/undo.jsonl).
11#[derive(Debug, Clone, Serialize, Deserialize)]
12pub struct UndoRecord {
13    /// Unix epoch seconds as a decimal string (e.g., `"1711550400"`).
14    pub timestamp: String,
15    pub file_path: String,
16    pub entry: UndoEntry,
17}
18
19/// Manages the undo log.
20pub struct UndoLog {
21    records: Vec<UndoRecord>,
22    max_entries: usize,
23}
24
25impl UndoLog {
26    pub fn new(max_entries: usize) -> Self {
27        Self {
28            records: Vec::new(),
29            max_entries,
30        }
31    }
32
33    /// Load undo log from JSONL content.
34    pub fn from_jsonl(content: &str, max_entries: usize) -> Self {
35        let records: Vec<UndoRecord> = content
36            .lines()
37            .filter(|line| !line.trim().is_empty())
38            .filter_map(|line| serde_json::from_str(line).ok())
39            .collect();
40
41        Self {
42            records,
43            max_entries,
44        }
45    }
46
47    /// Serialize the log to JSONL format.
48    pub fn to_jsonl(&self) -> String {
49        self.records
50            .iter()
51            .filter_map(|r| serde_json::to_string(r).ok())
52            .collect::<Vec<_>>()
53            .join("\n")
54            + if self.records.is_empty() { "" } else { "\n" }
55    }
56
57    /// Append a new undo record.
58    pub fn push(&mut self, record: UndoRecord) {
59        self.records.push(record);
60        self.prune();
61    }
62
63    /// Remove the last N records and return them (for undo).
64    pub fn pop(&mut self, count: usize) -> Vec<UndoRecord> {
65        let drain_start = self.records.len().saturating_sub(count);
66        self.records.drain(drain_start..).rev().collect()
67    }
68
69    /// Number of entries in the log.
70    pub fn len(&self) -> usize {
71        self.records.len()
72    }
73
74    pub fn is_empty(&self) -> bool {
75        self.records.is_empty()
76    }
77
78    /// Get recent entries for display.
79    pub fn recent(&self, count: usize) -> &[UndoRecord] {
80        let start = self.records.len().saturating_sub(count);
81        &self.records[start..]
82    }
83
84    fn prune(&mut self) {
85        if self.records.len() > self.max_entries {
86            let excess = self.records.len() - self.max_entries;
87            self.records.drain(..excess);
88        }
89    }
90}
91
92#[cfg(test)]
93mod tests {
94    use super::*;
95
96    #[test]
97    fn test_push_and_pop() {
98        let mut log = UndoLog::new(100);
99        log.push(UndoRecord {
100            timestamp: "2026-01-01T00:00:00Z".to_string(),
101            file_path: "test.txt".to_string(),
102            entry: UndoEntry {
103                original_text: "hello".to_string(),
104            },
105        });
106        assert_eq!(log.len(), 1);
107        let popped = log.pop(1);
108        assert_eq!(popped.len(), 1);
109        assert_eq!(popped[0].file_path, "test.txt");
110        assert!(log.is_empty());
111    }
112
113    #[test]
114    fn test_pruning() {
115        let mut log = UndoLog::new(2);
116        for i in 0..5 {
117            log.push(UndoRecord {
118                timestamp: format!("2026-01-0{i}T00:00:00Z"),
119                file_path: format!("file{i}.txt"),
120                entry: UndoEntry {
121                    original_text: format!("content{i}"),
122                },
123            });
124        }
125        assert_eq!(log.len(), 2);
126    }
127
128    #[test]
129    fn test_jsonl_roundtrip() {
130        let mut log = UndoLog::new(100);
131        log.push(UndoRecord {
132            timestamp: "2026-01-01T00:00:00Z".to_string(),
133            file_path: "test.txt".to_string(),
134            entry: UndoEntry {
135                original_text: "original".to_string(),
136            },
137        });
138        let jsonl = log.to_jsonl();
139        let loaded = UndoLog::from_jsonl(&jsonl, 100);
140        assert_eq!(loaded.len(), 1);
141    }
142
143    // ---- Adversarial edge-case tests ----
144
145    fn record(path: &str, text: &str) -> UndoRecord {
146        UndoRecord {
147            timestamp: "2026-01-01T00:00:00Z".to_string(),
148            file_path: path.to_string(),
149            entry: UndoEntry {
150                original_text: text.to_string(),
151            },
152        }
153    }
154
155    /// **Adversarial**: `pop(0)` must be a no-op — returning an empty Vec
156    /// and leaving the log unchanged. Agents call this to roll back "the
157    /// last 0 operations" when they compute a dynamic count, and a buggy
158    /// implementation could e.g. drain the whole log.
159    #[test]
160    fn pop_zero_is_noop() {
161        let mut log = UndoLog::new(10);
162        log.push(record("a.txt", "A"));
163        log.push(record("b.txt", "B"));
164        let popped = log.pop(0);
165        assert!(popped.is_empty(), "pop(0) should return empty");
166        assert_eq!(log.len(), 2, "pop(0) should not drain the log");
167    }
168
169    /// **Adversarial**: `pop(n)` with n > log length must drain all
170    /// records without panicking (saturating subtraction).
171    #[test]
172    fn pop_exceeds_length_drains_all() {
173        let mut log = UndoLog::new(10);
174        log.push(record("a.txt", "A"));
175        log.push(record("b.txt", "B"));
176        let popped = log.pop(100);
177        assert_eq!(popped.len(), 2);
178        assert!(log.is_empty());
179    }
180
181    /// **Adversarial**: `pop` must return records in REVERSE insertion
182    /// order (newest first). Undo replays in this order to correctly
183    /// reverse a sequence of operations — reversed ordering is critical
184    /// when the same file was edited multiple times.
185    #[test]
186    fn pop_returns_newest_first() {
187        let mut log = UndoLog::new(10);
188        log.push(record("a.txt", "first"));
189        log.push(record("a.txt", "second"));
190        log.push(record("a.txt", "third"));
191
192        let popped = log.pop(3);
193        assert_eq!(popped[0].entry.original_text, "third");
194        assert_eq!(popped[1].entry.original_text, "second");
195        assert_eq!(popped[2].entry.original_text, "first");
196    }
197
198    /// **Adversarial**: When the log exceeds `max_entries`, pruning must
199    /// remove the OLDEST entries, preserving the most recent N. A buggy
200    /// implementation could drop the newest (which are the most likely to
201    /// be undone) or a random subset.
202    #[test]
203    fn prune_drops_oldest_not_newest() {
204        let mut log = UndoLog::new(3);
205        for i in 0..10 {
206            log.push(record(&format!("file_{i}.txt"), &format!("v{i}")));
207        }
208        assert_eq!(log.len(), 3);
209        // The three remaining should be files 7, 8, 9.
210        let latest_three = log.recent(3);
211        assert_eq!(latest_three[0].file_path, "file_7.txt");
212        assert_eq!(latest_three[1].file_path, "file_8.txt");
213        assert_eq!(latest_three[2].file_path, "file_9.txt");
214    }
215
216    /// **Adversarial**: `from_jsonl` must silently skip malformed lines
217    /// (corrupted log file recovery) but still load the good ones.
218    /// Blowing up on a single bad line would brick all undo.
219    #[test]
220    fn from_jsonl_skips_malformed_lines() {
221        let good = serde_json::to_string(&record("a.txt", "A")).unwrap();
222        let bad = "{{{ not valid json";
223        let also_good = serde_json::to_string(&record("b.txt", "B")).unwrap();
224        let mixed = format!("{good}\n{bad}\n{also_good}\n");
225
226        let log = UndoLog::from_jsonl(&mixed, 100);
227        assert_eq!(
228            log.len(),
229            2,
230            "malformed lines should be skipped, good ones kept"
231        );
232    }
233
234    /// **Adversarial**: `from_jsonl` must tolerate blank lines, CRLF line
235    /// endings, and leading/trailing whitespace without crashing or
236    /// miscounting records.
237    #[test]
238    fn from_jsonl_tolerates_blank_and_crlf_lines() {
239        let good = serde_json::to_string(&record("a.txt", "A")).unwrap();
240        let mixed = format!("\n{good}\r\n\r\n");
241        let log = UndoLog::from_jsonl(&mixed, 100);
242        assert_eq!(log.len(), 1);
243    }
244
245    /// **Adversarial**: serializing an empty log produces an empty string
246    /// (not a lone newline). Round-trip through `from_jsonl` preserves
247    /// emptiness. This guards against a subtle bug where writing a lone
248    /// `\n` would create a truncated-looking log file.
249    #[test]
250    fn empty_log_jsonl_is_empty_string() {
251        let log = UndoLog::new(100);
252        assert_eq!(log.to_jsonl(), "");
253        let reparsed = UndoLog::from_jsonl("", 100);
254        assert!(reparsed.is_empty());
255    }
256
257    /// **Adversarial**: after pushing and popping, the log should behave
258    /// as if the popped records were never pushed for the purpose of
259    /// subsequent `recent()` / `len()` calls. A bug could mark records
260    /// as popped without actually removing them.
261    #[test]
262    fn pop_actually_removes_from_log() {
263        let mut log = UndoLog::new(10);
264        log.push(record("a.txt", "A"));
265        log.push(record("b.txt", "B"));
266        let _ = log.pop(1);
267        assert_eq!(log.len(), 1);
268        let remaining = log.recent(1);
269        assert_eq!(remaining[0].file_path, "a.txt");
270    }
271}