1use serde::{Deserialize, Serialize};
2
3#[derive(Debug, Clone, Serialize, Deserialize)]
5pub struct UndoEntry {
6 pub original_text: String,
8}
9
10#[derive(Debug, Clone, Serialize, Deserialize)]
12pub struct UndoRecord {
13 pub timestamp: String,
15 pub file_path: String,
16 pub entry: UndoEntry,
17}
18
19pub struct UndoLog {
21 records: Vec<UndoRecord>,
22 max_entries: usize,
23}
24
25impl UndoLog {
26 pub fn new(max_entries: usize) -> Self {
27 Self {
28 records: Vec::new(),
29 max_entries,
30 }
31 }
32
33 pub fn from_jsonl(content: &str, max_entries: usize) -> Self {
35 let records: Vec<UndoRecord> = content
36 .lines()
37 .filter(|line| !line.trim().is_empty())
38 .filter_map(|line| serde_json::from_str(line).ok())
39 .collect();
40
41 Self {
42 records,
43 max_entries,
44 }
45 }
46
47 pub fn to_jsonl(&self) -> String {
49 self.records
50 .iter()
51 .filter_map(|r| serde_json::to_string(r).ok())
52 .collect::<Vec<_>>()
53 .join("\n")
54 + if self.records.is_empty() { "" } else { "\n" }
55 }
56
57 pub fn push(&mut self, record: UndoRecord) {
59 self.records.push(record);
60 self.prune();
61 }
62
63 pub fn pop(&mut self, count: usize) -> Vec<UndoRecord> {
65 let drain_start = self.records.len().saturating_sub(count);
66 self.records.drain(drain_start..).rev().collect()
67 }
68
69 pub fn len(&self) -> usize {
71 self.records.len()
72 }
73
74 pub fn is_empty(&self) -> bool {
75 self.records.is_empty()
76 }
77
78 pub fn recent(&self, count: usize) -> &[UndoRecord] {
80 let start = self.records.len().saturating_sub(count);
81 &self.records[start..]
82 }
83
84 fn prune(&mut self) {
85 if self.records.len() > self.max_entries {
86 let excess = self.records.len() - self.max_entries;
87 self.records.drain(..excess);
88 }
89 }
90}
91
92#[cfg(test)]
93mod tests {
94 use super::*;
95
96 #[test]
97 fn test_push_and_pop() {
98 let mut log = UndoLog::new(100);
99 log.push(UndoRecord {
100 timestamp: "2026-01-01T00:00:00Z".to_string(),
101 file_path: "test.txt".to_string(),
102 entry: UndoEntry {
103 original_text: "hello".to_string(),
104 },
105 });
106 assert_eq!(log.len(), 1);
107 let popped = log.pop(1);
108 assert_eq!(popped.len(), 1);
109 assert_eq!(popped[0].file_path, "test.txt");
110 assert!(log.is_empty());
111 }
112
113 #[test]
114 fn test_pruning() {
115 let mut log = UndoLog::new(2);
116 for i in 0..5 {
117 log.push(UndoRecord {
118 timestamp: format!("2026-01-0{i}T00:00:00Z"),
119 file_path: format!("file{i}.txt"),
120 entry: UndoEntry {
121 original_text: format!("content{i}"),
122 },
123 });
124 }
125 assert_eq!(log.len(), 2);
126 }
127
128 #[test]
129 fn test_jsonl_roundtrip() {
130 let mut log = UndoLog::new(100);
131 log.push(UndoRecord {
132 timestamp: "2026-01-01T00:00:00Z".to_string(),
133 file_path: "test.txt".to_string(),
134 entry: UndoEntry {
135 original_text: "original".to_string(),
136 },
137 });
138 let jsonl = log.to_jsonl();
139 let loaded = UndoLog::from_jsonl(&jsonl, 100);
140 assert_eq!(loaded.len(), 1);
141 }
142
143 fn record(path: &str, text: &str) -> UndoRecord {
146 UndoRecord {
147 timestamp: "2026-01-01T00:00:00Z".to_string(),
148 file_path: path.to_string(),
149 entry: UndoEntry {
150 original_text: text.to_string(),
151 },
152 }
153 }
154
155 #[test]
160 fn pop_zero_is_noop() {
161 let mut log = UndoLog::new(10);
162 log.push(record("a.txt", "A"));
163 log.push(record("b.txt", "B"));
164 let popped = log.pop(0);
165 assert!(popped.is_empty(), "pop(0) should return empty");
166 assert_eq!(log.len(), 2, "pop(0) should not drain the log");
167 }
168
169 #[test]
172 fn pop_exceeds_length_drains_all() {
173 let mut log = UndoLog::new(10);
174 log.push(record("a.txt", "A"));
175 log.push(record("b.txt", "B"));
176 let popped = log.pop(100);
177 assert_eq!(popped.len(), 2);
178 assert!(log.is_empty());
179 }
180
181 #[test]
186 fn pop_returns_newest_first() {
187 let mut log = UndoLog::new(10);
188 log.push(record("a.txt", "first"));
189 log.push(record("a.txt", "second"));
190 log.push(record("a.txt", "third"));
191
192 let popped = log.pop(3);
193 assert_eq!(popped[0].entry.original_text, "third");
194 assert_eq!(popped[1].entry.original_text, "second");
195 assert_eq!(popped[2].entry.original_text, "first");
196 }
197
198 #[test]
203 fn prune_drops_oldest_not_newest() {
204 let mut log = UndoLog::new(3);
205 for i in 0..10 {
206 log.push(record(&format!("file_{i}.txt"), &format!("v{i}")));
207 }
208 assert_eq!(log.len(), 3);
209 let latest_three = log.recent(3);
211 assert_eq!(latest_three[0].file_path, "file_7.txt");
212 assert_eq!(latest_three[1].file_path, "file_8.txt");
213 assert_eq!(latest_three[2].file_path, "file_9.txt");
214 }
215
216 #[test]
220 fn from_jsonl_skips_malformed_lines() {
221 let good = serde_json::to_string(&record("a.txt", "A")).unwrap();
222 let bad = "{{{ not valid json";
223 let also_good = serde_json::to_string(&record("b.txt", "B")).unwrap();
224 let mixed = format!("{good}\n{bad}\n{also_good}\n");
225
226 let log = UndoLog::from_jsonl(&mixed, 100);
227 assert_eq!(
228 log.len(),
229 2,
230 "malformed lines should be skipped, good ones kept"
231 );
232 }
233
234 #[test]
238 fn from_jsonl_tolerates_blank_and_crlf_lines() {
239 let good = serde_json::to_string(&record("a.txt", "A")).unwrap();
240 let mixed = format!("\n{good}\r\n\r\n");
241 let log = UndoLog::from_jsonl(&mixed, 100);
242 assert_eq!(log.len(), 1);
243 }
244
245 #[test]
250 fn empty_log_jsonl_is_empty_string() {
251 let log = UndoLog::new(100);
252 assert_eq!(log.to_jsonl(), "");
253 let reparsed = UndoLog::from_jsonl("", 100);
254 assert!(reparsed.is_empty());
255 }
256
257 #[test]
262 fn pop_actually_removes_from_log() {
263 let mut log = UndoLog::new(10);
264 log.push(record("a.txt", "A"));
265 log.push(record("b.txt", "B"));
266 let _ = log.pop(1);
267 assert_eq!(log.len(), 1);
268 let remaining = log.recent(1);
269 assert_eq!(remaining[0].file_path, "a.txt");
270 }
271}