synwire_storage/
layout.rs1use crate::{StorageError, WorktreeId};
35use directories::BaseDirs;
36use serde::{Deserialize, Serialize};
37use std::path::{Path, PathBuf};
38
39#[derive(Debug, Clone, Serialize, Deserialize)]
41#[non_exhaustive]
42#[derive(Default)]
43pub struct StorageConfig {
44 pub data_home: Option<PathBuf>,
46 pub cache_home: Option<PathBuf>,
48 pub product_name: Option<String>,
50 pub project_skills_dirname: Option<String>,
52}
53
54#[derive(Debug, Clone)]
63pub struct StorageLayout {
64 data_home: PathBuf,
65 cache_home: PathBuf,
66 product_name: String,
67 project_skills_dirname: String,
68}
69
70impl StorageLayout {
71 pub fn new(product_name: impl Into<String>) -> Result<Self, StorageError> {
79 let product_name = product_name.into();
80
81 let data_home = if let Ok(val) = std::env::var("SYNWIRE_DATA_DIR") {
83 PathBuf::from(val)
84 } else {
85 let base = BaseDirs::new().ok_or_else(|| StorageError::NotWritable {
86 path: "<platform data dir>".to_owned(),
87 })?;
88 base.data_dir().join(&product_name)
89 };
90
91 let cache_home = if let Ok(val) = std::env::var("SYNWIRE_CACHE_DIR") {
92 PathBuf::from(val)
93 } else {
94 let base = BaseDirs::new().ok_or_else(|| StorageError::NotWritable {
95 path: "<platform cache dir>".to_owned(),
96 })?;
97 base.cache_dir().join(&product_name)
98 };
99
100 let project_skills_dirname = format!(".{product_name}");
101
102 Ok(Self {
103 data_home,
104 cache_home,
105 product_name,
106 project_skills_dirname,
107 })
108 }
109
110 pub fn with_root(root: impl AsRef<Path>, product_name: impl Into<String>) -> Self {
114 let root = root.as_ref();
115 let product_name = product_name.into();
116 let project_skills_dirname = format!(".{product_name}");
117 Self {
118 data_home: root.join("data").join(&product_name),
119 cache_home: root.join("cache").join(&product_name),
120 product_name,
121 project_skills_dirname,
122 }
123 }
124
125 #[must_use]
127 pub fn with_config(mut self, config: &StorageConfig) -> Self {
128 if let Some(d) = &config.data_home {
129 self.data_home.clone_from(d);
130 }
131 if let Some(c) = &config.cache_home {
132 self.cache_home.clone_from(c);
133 }
134 if let Some(p) = &config.product_name {
135 self.product_name.clone_from(p);
136 }
137 if let Some(d) = &config.project_skills_dirname {
138 self.project_skills_dirname.clone_from(d);
139 }
140 self
141 }
142
143 #[must_use]
149 pub fn data_home(&self) -> &Path {
150 &self.data_home
151 }
152
153 #[must_use]
155 pub fn cache_home(&self) -> &Path {
156 &self.cache_home
157 }
158
159 #[must_use]
161 pub fn product_name(&self) -> &str {
162 &self.product_name
163 }
164
165 #[must_use]
167 pub fn session_db(&self, session_id: &str) -> PathBuf {
168 self.data_home
169 .join("sessions")
170 .join(format!("{session_id}.db"))
171 }
172
173 #[must_use]
175 pub fn experience_db(&self, worktree: &WorktreeId) -> PathBuf {
176 self.data_home
177 .join("experience")
178 .join(format!("{}.db", worktree.key()))
179 }
180
181 #[must_use]
183 pub fn skills_dir(&self) -> PathBuf {
184 self.data_home.join("skills")
185 }
186
187 #[must_use]
189 pub fn logs_dir(&self) -> PathBuf {
190 self.data_home.join("logs")
191 }
192
193 #[must_use]
195 pub fn daemon_pid_file(&self) -> PathBuf {
196 self.data_home.join("daemon.pid")
197 }
198
199 #[must_use]
201 pub fn daemon_socket(&self) -> PathBuf {
202 self.data_home.join("daemon.sock")
203 }
204
205 #[must_use]
207 pub fn global_experience_db(&self) -> PathBuf {
208 self.data_home.join("global").join("experience.db")
209 }
210
211 #[must_use]
213 pub fn global_dependency_db(&self) -> PathBuf {
214 self.data_home.join("global").join("dependencies.db")
215 }
216
217 #[must_use]
219 pub fn global_registry(&self) -> PathBuf {
220 self.data_home.join("global").join("registry.json")
221 }
222
223 #[must_use]
225 pub fn global_config(&self) -> PathBuf {
226 self.data_home.join("global").join("config.json")
227 }
228
229 #[must_use]
235 pub fn index_cache(&self, worktree: &WorktreeId) -> PathBuf {
236 self.cache_home.join("indices").join(worktree.key())
237 }
238
239 #[must_use]
241 pub fn graph_dir(&self, worktree: &WorktreeId) -> PathBuf {
242 self.cache_home.join("graphs").join(worktree.key())
243 }
244
245 #[must_use]
247 pub fn communities_dir(&self, worktree: &WorktreeId) -> PathBuf {
248 self.cache_home.join("communities").join(worktree.key())
249 }
250
251 #[must_use]
253 pub fn lsp_cache(&self, worktree: &WorktreeId) -> PathBuf {
254 self.cache_home.join("lsp").join(worktree.key())
255 }
256
257 #[must_use]
259 pub fn models_cache(&self) -> PathBuf {
260 self.cache_home.join("models")
261 }
262
263 #[must_use]
265 pub fn repos_cache(&self) -> PathBuf {
266 self.cache_home.join("repos")
267 }
268
269 #[must_use]
271 pub fn repo_cache(&self, owner: &str, repo: &str) -> PathBuf {
272 self.repos_cache().join(owner).join(repo)
273 }
274
275 pub fn repo_gc(&self, max_age_days: u64) -> Result<Vec<PathBuf>, StorageError> {
285 let repos_root = self.repos_cache();
286 let mut removed = Vec::new();
287
288 let cutoff =
289 std::time::SystemTime::now() - std::time::Duration::from_secs(max_age_days * 86_400);
290
291 let owner_entries = match std::fs::read_dir(&repos_root) {
293 Ok(entries) => entries,
294 Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(removed),
295 Err(e) => return Err(StorageError::from(e)),
296 };
297
298 for owner_entry in owner_entries {
299 let owner_entry = match owner_entry {
300 Ok(e) => e,
301 Err(e) => {
302 tracing::warn!(error = %e, "skipping unreadable owner entry in repos cache");
303 continue;
304 }
305 };
306
307 let owner_path = owner_entry.path();
308 if !owner_path.is_dir() {
309 continue;
310 }
311
312 let repo_entries = match std::fs::read_dir(&owner_path) {
313 Ok(entries) => entries,
314 Err(e) => {
315 tracing::warn!(
316 error = %e,
317 path = %owner_path.display(),
318 "skipping unreadable owner directory"
319 );
320 continue;
321 }
322 };
323
324 for repo_entry in repo_entries {
325 let repo_entry = match repo_entry {
326 Ok(e) => e,
327 Err(e) => {
328 tracing::warn!(error = %e, "skipping unreadable repo entry");
329 continue;
330 }
331 };
332
333 let repo_path = repo_entry.path();
334 if !repo_path.is_dir() {
335 continue;
336 }
337
338 let modified = match std::fs::metadata(&repo_path).and_then(|m| m.modified()) {
339 Ok(t) => t,
340 Err(e) => {
341 tracing::warn!(
342 error = %e,
343 path = %repo_path.display(),
344 "skipping repo with unreadable metadata"
345 );
346 continue;
347 }
348 };
349
350 if modified < cutoff {
351 if let Err(e) = std::fs::remove_dir_all(&repo_path) {
352 tracing::warn!(
353 error = %e,
354 path = %repo_path.display(),
355 "failed to remove stale repo cache"
356 );
357 continue;
358 }
359 removed.push(repo_path);
360 }
361 }
362 }
363
364 Ok(removed)
365 }
366
367 #[must_use]
373 pub fn project_skills_dirname(&self) -> &str {
374 &self.project_skills_dirname
375 }
376
377 pub fn ensure_dir(&self, path: &Path) -> Result<(), StorageError> {
384 std::fs::create_dir_all(path)?;
385
386 #[cfg(unix)]
387 {
388 use std::os::unix::fs::PermissionsExt;
389 let mut perms = std::fs::metadata(path)?.permissions();
390 perms.set_mode(0o700);
391 std::fs::set_permissions(path, perms)?;
392 }
393
394 Ok(())
395 }
396
397 pub fn load_project_config(
406 &self,
407 project_root: &Path,
408 ) -> Result<Option<StorageConfig>, StorageError> {
409 let config_path = project_root
410 .join(&self.project_skills_dirname)
411 .join("config.json");
412 if !config_path.exists() {
413 return Ok(None);
414 }
415 let data = std::fs::read_to_string(&config_path)?;
416 let cfg: StorageConfig =
417 serde_json::from_str(&data).map_err(|e| StorageError::InvalidConfig {
418 path: config_path.display().to_string(),
419 reason: e.to_string(),
420 })?;
421 Ok(Some(cfg))
422 }
423}
424
425#[cfg(test)]
426#[allow(clippy::expect_used, clippy::unwrap_used)]
427mod tests {
428 use super::*;
429 use crate::WorktreeId;
430 use tempfile::tempdir;
431
432 fn test_layout() -> (StorageLayout, tempfile::TempDir) {
433 let dir = tempdir().expect("tempdir");
434 let layout = StorageLayout::with_root(dir.path(), "synwire");
435 (layout, dir)
436 }
437
438 fn dummy_worktree() -> WorktreeId {
439 use crate::identity::RepoId;
440 WorktreeId::from_parts(
441 RepoId::from_string("abc123"),
442 "def456789012".to_owned(),
443 "myrepo@main".to_owned(),
444 )
445 }
446
447 #[test]
448 fn layout_data_paths_are_distinct() {
449 let (layout, _dir) = test_layout();
450 assert_ne!(layout.data_home(), layout.cache_home());
451 }
452
453 #[test]
454 fn session_db_has_db_extension() {
455 let (layout, _dir) = test_layout();
456 let p = layout.session_db("sess-001");
457 assert!(p.to_string_lossy().ends_with(".db"));
458 }
459
460 #[test]
461 fn index_cache_contains_worktree_key() {
462 let (layout, _dir) = test_layout();
463 let wid = dummy_worktree();
464 let p = layout.index_cache(&wid);
465 assert!(p.to_string_lossy().contains(&wid.key()));
466 }
467
468 #[test]
469 fn two_products_have_isolated_paths() {
470 let dir = tempdir().expect("tempdir");
471 let a = StorageLayout::with_root(dir.path(), "product-a");
472 let b = StorageLayout::with_root(dir.path(), "product-b");
473 assert_ne!(a.data_home(), b.data_home());
474 assert_ne!(a.cache_home(), b.cache_home());
475 }
476
477 #[test]
478 fn repo_cache_path_contains_owner_and_repo() {
479 let (layout, _dir) = test_layout();
480 let p = layout.repo_cache("octocat", "hello-world");
481 let s = p.to_string_lossy();
482 assert!(s.contains("octocat"));
483 assert!(s.contains("hello-world"));
484 }
485
486 #[test]
487 fn ensure_dir_creates_directory() {
488 let (layout, _dir) = test_layout();
489 let target = layout.data_home().join("test-subdir");
490 layout.ensure_dir(&target).expect("ensure_dir");
491 assert!(target.exists());
492 }
493
494 #[test]
495 fn load_project_config_returns_none_when_absent() {
496 let (layout, dir) = test_layout();
497 let result = layout
498 .load_project_config(dir.path())
499 .expect("load_project_config");
500 assert!(result.is_none());
501 }
502}