bones_core/cache/
writer.rs1use std::fs;
4use std::path::Path;
5use std::time::{SystemTime, UNIX_EPOCH};
6
7use anyhow::{Context, Result};
8
9use crate::cache::{decode_events, encode_events};
10use crate::event::Event;
11use crate::event::parser::parse_lines;
12use crate::shard::ShardManager;
13
14#[derive(Debug, Clone, PartialEq)]
16pub struct CacheStats {
17 pub total_events: usize,
19 pub file_size_bytes: u64,
21 pub compression_ratio: f64,
23}
24
25#[derive(Debug, Default)]
27pub struct CacheWriter {
28 events: Vec<Event>,
29}
30
31impl CacheWriter {
32 #[must_use]
34 pub fn new() -> Self {
35 Self::default()
36 }
37
38 pub fn push_event(&mut self, event: &Event) {
40 self.events.push(event.clone());
41 }
42
43 pub fn write_to_file(&self, path: &Path) -> Result<CacheStats> {
51 if let Some(parent) = path.parent() {
52 fs::create_dir_all(parent)
53 .with_context(|| format!("create cache dir {}", parent.display()))?;
54 }
55
56 let created_at_us = now_us();
57 let bytes = encode_events(&self.events, created_at_us)
58 .map_err(|e| anyhow::anyhow!("encode cache events: {e}"))?;
59
60 fs::write(path, &bytes).with_context(|| format!("write cache file {}", path.display()))?;
61
62 let source_bytes = estimated_source_bytes(&self.events);
63 let compression_ratio = if source_bytes == 0 {
64 1.0
65 } else {
66 {
67 let num = u32::try_from(bytes.len()).unwrap_or(u32::MAX);
70 let den = u32::try_from(source_bytes).unwrap_or(u32::MAX);
71 f64::from(num) / f64::from(den)
72 }
73 };
74
75 Ok(CacheStats {
76 total_events: self.events.len(),
77 file_size_bytes: bytes.len() as u64,
78 compression_ratio,
79 })
80 }
81
82 pub fn append_incremental(existing: &Path, new_events: &[Event]) -> Result<CacheStats> {
91 let mut all_events = if existing.exists() {
92 let data = fs::read(existing)
93 .with_context(|| format!("read existing cache {}", existing.display()))?;
94 let (_header, events) =
95 decode_events(&data).map_err(|e| anyhow::anyhow!("decode existing cache: {e}"))?;
96 events
97 } else {
98 Vec::new()
99 };
100
101 all_events.extend_from_slice(new_events);
102
103 let writer = Self { events: all_events };
104 writer.write_to_file(existing)
105 }
106}
107
108pub fn rebuild_cache(events_dir: &Path, cache_path: &Path) -> Result<CacheStats> {
114 let bones_dir = events_dir.parent().unwrap_or_else(|| Path::new("."));
115 let shard_mgr = ShardManager::new(bones_dir);
116
117 let content = shard_mgr
118 .replay()
119 .map_err(|e| anyhow::anyhow!("replay shards: {e}"))?;
120
121 let events = parse_lines(&content)
122 .map_err(|(line, e)| anyhow::anyhow!("parse error at line {line}: {e}"))?;
123
124 let mut writer = CacheWriter::new();
125 for event in &events {
126 writer.push_event(event);
127 }
128
129 writer.write_to_file(cache_path)
130}
131
132fn estimated_source_bytes(events: &[Event]) -> usize {
133 events
134 .iter()
135 .map(|event| serde_json::to_vec(event).map_or(0, |v| v.len() + 1))
136 .sum()
137}
138
139fn now_us() -> u64 {
140 SystemTime::now()
141 .duration_since(UNIX_EPOCH)
142 .map_or(0, |d| u64::try_from(d.as_micros()).unwrap_or(u64::MAX))
143}
144
145#[cfg(test)]
146mod tests {
147 use std::collections::BTreeMap;
148
149 use tempfile::TempDir;
150
151 use super::*;
152 use crate::event::data::{CreateData, EventData, MoveData};
153 use crate::event::types::EventType;
154 use crate::event::{self, Event};
155 use crate::model::item::{Kind, State, Urgency};
156 use crate::model::item_id::ItemId;
157
158 fn make_event(item_id: &str, ts: i64, kind: EventType) -> Event {
159 let data = match kind {
160 EventType::Create => EventData::Create(CreateData {
161 title: format!("Item {item_id}"),
162 kind: Kind::Task,
163 size: None,
164 urgency: Urgency::Default,
165 labels: Vec::new(),
166 parent: None,
167 causation: None,
168 description: None,
169 extra: BTreeMap::new(),
170 }),
171 _ => EventData::Move(MoveData {
172 state: State::Doing,
173 reason: None,
174 extra: BTreeMap::new(),
175 }),
176 };
177
178 let mut event = Event {
179 wall_ts_us: ts,
180 agent: "test-agent".to_string(),
181 itc: "itc:AQ".to_string(),
182 parents: Vec::new(),
183 event_type: kind,
184 item_id: ItemId::new_unchecked(item_id),
185 data,
186 event_hash: String::new(),
187 };
188 let _ = event::writer::write_event(&mut event);
189 event
190 }
191
192 #[test]
193 fn write_to_file_round_trips_events() {
194 let tmp = TempDir::new().expect("tempdir");
195 let cache_path = tmp.path().join(".bones/cache/events.bin");
196
197 let mut writer = CacheWriter::new();
198 writer.push_event(&make_event("bn-a1", 1000, EventType::Create));
199 writer.push_event(&make_event("bn-a1", 2000, EventType::Move));
200
201 let stats = writer.write_to_file(&cache_path).expect("write cache");
202 assert_eq!(stats.total_events, 2);
203 assert!(stats.file_size_bytes > 0);
204
205 let data = fs::read(cache_path).expect("read cache file");
206 let (_header, decoded) = decode_events(&data).expect("decode cache file");
207 assert_eq!(decoded.len(), 2);
208 }
209
210 #[test]
211 fn append_incremental_appends_new_events() {
212 let tmp = TempDir::new().expect("tempdir");
213 let cache_path = tmp.path().join(".bones/cache/events.bin");
214
215 let mut writer = CacheWriter::new();
216 writer.push_event(&make_event("bn-a1", 1000, EventType::Create));
217 writer.write_to_file(&cache_path).expect("seed cache");
218
219 let new_events = vec![make_event("bn-a2", 2000, EventType::Create)];
220 let stats = CacheWriter::append_incremental(&cache_path, &new_events)
221 .expect("append cache incrementally");
222
223 assert_eq!(stats.total_events, 2);
224
225 let data = fs::read(cache_path).expect("read cache file");
226 let (_header, decoded) = decode_events(&data).expect("decode cache file");
227 assert_eq!(decoded.len(), 2);
228 }
229
230 #[test]
231 fn rebuild_cache_reads_events_shards() {
232 let tmp = TempDir::new().expect("tempdir");
233 let bones_dir = tmp.path().join(".bones");
234 let shard_mgr = ShardManager::new(&bones_dir);
235 shard_mgr.ensure_dirs().expect("ensure dirs");
236 shard_mgr.init().expect("init");
237
238 let mut event = make_event("bn-z9", 42, EventType::Create);
239 let line = event::writer::write_line(&event).expect("line");
240 let (year, month) = shard_mgr
241 .active_shard()
242 .expect("active shard")
243 .expect("active shard value");
244 shard_mgr
245 .append_raw(year, month, &line)
246 .expect("append event");
247
248 let events_dir = bones_dir.join("events");
249 let cache_path = bones_dir.join("cache/events.bin");
250 let stats = rebuild_cache(&events_dir, &cache_path).expect("rebuild cache");
251
252 assert_eq!(stats.total_events, 1);
253 assert!(cache_path.exists());
254
255 event.event_hash.clear();
257 }
258}