1use crate::constants::{default_lock_timeout, pending_state_timeout};
29use crate::core::error::AgpmError;
30use crate::core::file_error::{FileOperation, FileResultExt};
31use crate::git::GitRepo;
32use crate::git::command_builder::GitCommand;
33use crate::utils::fs;
34use crate::utils::security::validate_path_security;
35use anyhow::{Context, Result};
36use dashmap::DashMap;
37use serde::{Deserialize, Serialize};
38use std::collections::{HashMap, HashSet};
39use std::path::{Path, PathBuf};
40use std::sync::Arc;
41use std::time::{Duration, SystemTime, UNIX_EPOCH};
42use tokio::fs as async_fs;
43use tokio::sync::{Mutex, MutexGuard, RwLock};
44
45async fn acquire_mutex_with_timeout<'a, T>(
48 mutex: &'a Mutex<T>,
49 name: &str,
50) -> Result<MutexGuard<'a, T>> {
51 let timeout = default_lock_timeout();
52 match tokio::time::timeout(timeout, mutex.lock()).await {
53 Ok(guard) => Ok(guard),
54 Err(_) => {
55 eprintln!("[DEADLOCK] Timeout waiting for mutex '{}' after {:?}", name, timeout);
56 anyhow::bail!(
57 "Timeout waiting for mutex '{}' after {:?} - possible deadlock",
58 name,
59 timeout
60 )
61 }
62 }
63}
64
65async fn acquire_rwlock_read_with_timeout<'a, T>(
67 rwlock: &'a RwLock<T>,
68 name: &str,
69) -> Result<tokio::sync::RwLockReadGuard<'a, T>> {
70 let timeout = default_lock_timeout();
71 match tokio::time::timeout(timeout, rwlock.read()).await {
72 Ok(guard) => Ok(guard),
73 Err(_) => {
74 eprintln!("[DEADLOCK] Timeout waiting for RwLock read '{}' after {:?}", name, timeout);
75 anyhow::bail!(
76 "Timeout waiting for RwLock read '{}' after {:?} - possible deadlock",
77 name,
78 timeout
79 )
80 }
81 }
82}
83
84async fn acquire_rwlock_write_with_timeout<'a, T>(
86 rwlock: &'a RwLock<T>,
87 name: &str,
88) -> Result<tokio::sync::RwLockWriteGuard<'a, T>> {
89 let timeout = default_lock_timeout();
90 match tokio::time::timeout(timeout, rwlock.write()).await {
91 Ok(guard) => Ok(guard),
92 Err(_) => {
93 eprintln!("[DEADLOCK] Timeout waiting for RwLock write '{}' after {:?}", name, timeout);
94 anyhow::bail!(
95 "Timeout waiting for RwLock write '{}' after {:?} - possible deadlock",
96 name,
97 timeout
98 )
99 }
100 }
101}
102
103#[derive(Debug, Clone)]
115enum WorktreeState {
116 Pending(Arc<tokio::sync::Notify>),
118 Ready(PathBuf),
120}
121
122fn extract_notify_handle(
124 cache: &DashMap<String, WorktreeState>,
125 key: &str,
126) -> Option<Arc<tokio::sync::Notify>> {
127 cache.get(key).and_then(|entry| {
128 if let WorktreeState::Pending(n) = entry.value() {
129 Some(n.clone())
130 } else {
131 None
132 }
133 })
134}
135
136#[derive(Debug, Clone, Serialize, Deserialize, Default)]
137struct WorktreeRegistry {
138 entries: HashMap<String, WorktreeRecord>,
139}
140
141#[derive(Debug, Clone, Serialize, Deserialize)]
142struct WorktreeRecord {
143 source: String,
144 version: String,
145 path: PathBuf,
146 last_used: u64,
147}
148
149impl WorktreeRegistry {
150 fn load(path: &Path) -> Self {
151 match std::fs::read(path) {
152 Ok(data) => serde_json::from_slice(&data).unwrap_or_default(),
153 Err(err) if err.kind() == std::io::ErrorKind::NotFound => Self::default(),
154 Err(err) => {
155 tracing::warn!("Failed to load worktree registry from {}: {}", path.display(), err);
156 Self::default()
157 }
158 }
159 }
160
161 fn update(&mut self, key: String, source: String, version: String, path: PathBuf) {
162 let timestamp = SystemTime::now()
163 .duration_since(UNIX_EPOCH)
164 .unwrap_or_else(|_| Duration::from_secs(0))
165 .as_secs();
166
167 self.entries.insert(
168 key,
169 WorktreeRecord {
170 source,
171 version,
172 path,
173 last_used: timestamp,
174 },
175 );
176 }
177
178 fn remove_by_path(&mut self, target: &Path) -> bool {
179 if let Some(key) = self.entries.iter().find_map(|(k, record)| {
180 if record.path == target {
181 Some(k.clone())
182 } else {
183 None
184 }
185 }) {
186 self.entries.remove(&key);
187 true
188 } else {
189 false
190 }
191 }
192
193 fn get_source_by_path(&self, target: &Path) -> Option<String> {
197 self.entries
198 .values()
199 .find(|record| record.path == target)
200 .map(|record| record.source.clone())
201 }
202
203 async fn persist(&self, path: &Path) -> Result<()> {
204 if let Some(parent) = path.parent() {
205 async_fs::create_dir_all(parent).await?;
206 }
207
208 let data = serde_json::to_vec_pretty(self)?;
209 async_fs::write(path, data).await?;
210 Ok(())
211 }
212}
213
214pub mod lock;
220pub use lock::CacheLock;
221
222pub struct Cache {
227 dir: PathBuf,
229 worktree_cache: Arc<DashMap<String, WorktreeState>>,
232 fetch_locks: Arc<DashMap<PathBuf, Arc<Mutex<()>>>>,
234 fetched_repos: Arc<RwLock<HashSet<PathBuf>>>,
236 worktree_registry: Arc<Mutex<WorktreeRegistry>>,
238}
239
240impl Clone for Cache {
241 fn clone(&self) -> Self {
242 Self {
243 dir: self.dir.clone(),
244 worktree_cache: Arc::clone(&self.worktree_cache),
245 fetch_locks: Arc::clone(&self.fetch_locks),
246 fetched_repos: Arc::clone(&self.fetched_repos),
247 worktree_registry: Arc::clone(&self.worktree_registry),
248 }
249 }
250}
251
252impl Cache {
253 fn registry_path_for(cache_dir: &Path) -> PathBuf {
254 cache_dir.join("worktrees").join(".state.json")
255 }
256
257 fn registry_path(&self) -> PathBuf {
258 Self::registry_path_for(&self.dir)
259 }
260
261 async fn record_worktree_usage(
262 &self,
263 registry_key: &str,
264 source_name: &str,
265 version_key: &str,
266 worktree_path: &Path,
267 ) -> Result<()> {
268 let mut registry =
269 acquire_mutex_with_timeout(&self.worktree_registry, "worktree_registry").await?;
270 registry.update(
271 registry_key.to_string(),
272 source_name.to_string(),
273 version_key.to_string(),
274 worktree_path.to_path_buf(),
275 );
276 registry.persist(&self.registry_path()).await?;
277 Ok(())
278 }
279
280 async fn remove_worktree_record_by_path(&self, worktree_path: &Path) -> Result<()> {
281 let mut registry =
282 acquire_mutex_with_timeout(&self.worktree_registry, "worktree_registry").await?;
283 if registry.remove_by_path(worktree_path) {
284 registry.persist(&self.registry_path()).await?;
285 }
286 Ok(())
287 }
288
289 async fn configure_connection_pooling(path: &Path) -> Result<()> {
290 let commands = [
291 ("http.version", "HTTP/2"),
292 ("http.postBuffer", "524288000"),
293 ("core.compression", "0"),
294 ];
295
296 for (key, value) in commands {
297 GitCommand::new()
298 .args(["config", key, value])
299 .current_dir(path)
300 .execute_success()
301 .await
302 .ok();
303 }
304
305 Ok(())
306 }
307
308 pub fn new() -> Result<Self> {
313 let dir = crate::config::get_cache_dir()?;
314 let registry_path = Self::registry_path_for(&dir);
315 let registry = WorktreeRegistry::load(®istry_path);
316 Ok(Self {
317 dir,
318 worktree_cache: Arc::new(DashMap::new()),
319 fetch_locks: Arc::new(DashMap::new()),
320 fetched_repos: Arc::new(RwLock::new(HashSet::new())),
321 worktree_registry: Arc::new(Mutex::new(registry)),
322 })
323 }
324
325 pub fn with_dir(dir: PathBuf) -> Result<Self> {
327 let registry_path = Self::registry_path_for(&dir);
328 let registry = WorktreeRegistry::load(®istry_path);
329 Ok(Self {
330 dir,
331 worktree_cache: Arc::new(DashMap::new()),
332 fetch_locks: Arc::new(DashMap::new()),
333 fetched_repos: Arc::new(RwLock::new(HashSet::new())),
334 worktree_registry: Arc::new(Mutex::new(registry)),
335 })
336 }
337
338 pub async fn ensure_cache_dir(&self) -> Result<()> {
340 if !self.dir.exists() {
341 async_fs::create_dir_all(&self.dir).await.with_file_context(
342 FileOperation::CreateDir,
343 &self.dir,
344 "creating cache directory",
345 "cache::ensure_cache_dir",
346 )?;
347 }
348 Ok(())
349 }
350
351 #[must_use]
353 pub fn cache_dir(&self) -> &Path {
354 &self.dir
355 }
356
357 pub fn get_worktree_path(&self, url: &str, sha: &str) -> Result<PathBuf> {
359 let (owner, repo) =
360 crate::git::parse_git_url(url).map_err(|e| anyhow::anyhow!("Invalid Git URL: {e}"))?;
361 let sha_short = &sha[..8.min(sha.len())];
362 Ok(self.dir.join("worktrees").join(format!("{owner}_{repo}_{sha_short}")))
363 }
364
365 pub async fn get_or_clone_source(
376 &self,
377 name: &str,
378 url: &str,
379 version: Option<&str>,
380 ) -> Result<PathBuf> {
381 self.get_or_clone_source_impl(name, url, version).await
382 }
383
384 pub async fn cleanup_worktree(&self, worktree_path: &Path) -> Result<()> {
389 if !worktree_path.exists() {
390 return Ok(());
391 }
392
393 let source_url = {
396 let registry =
397 acquire_mutex_with_timeout(&self.worktree_registry, "worktree_registry").await?;
398 registry.get_source_by_path(worktree_path)
399 };
400
401 if let Some(url) = source_url {
402 if let Ok((owner, repo)) = crate::git::parse_git_url(&url) {
404 let bare_repo_path = self.dir.join("sources").join(format!("{owner}_{repo}.git"));
405 if bare_repo_path.exists() {
406 let bare_repo_worktree_lock_name = format!("bare-worktree-{owner}_{repo}");
410 let _bare_worktree_lock =
411 CacheLock::acquire(&self.dir, &bare_repo_worktree_lock_name).await?;
412
413 let repo = GitRepo::new(&bare_repo_path);
415 let _ = repo.remove_worktree(worktree_path).await;
416 }
417 }
418 }
419
420 if worktree_path.exists() {
422 tokio::fs::remove_dir_all(worktree_path).await.with_file_context(
423 FileOperation::Write,
424 worktree_path,
425 "removing worktree directory",
426 "cache::cleanup_worktree",
427 )?;
428 }
429
430 self.remove_worktree_record_by_path(worktree_path).await?;
431 Ok(())
432 }
433
434 pub async fn cleanup_all_worktrees(&self) -> Result<()> {
436 let worktrees_dir = self.dir.join("worktrees");
437
438 if !worktrees_dir.exists() {
439 return Ok(());
440 }
441
442 tokio::fs::remove_dir_all(&worktrees_dir).await.with_file_context(
444 FileOperation::Write,
445 &worktrees_dir,
446 "cleaning up worktrees directory",
447 "cache_module",
448 )?;
449
450 let sources_dir = self.dir.join("sources");
452 if sources_dir.exists() {
453 let mut entries = tokio::fs::read_dir(&sources_dir).await.with_file_context(
454 FileOperation::Read,
455 &sources_dir,
456 "reading sources directory",
457 "cache_module",
458 )?;
459 while let Some(entry) = entries.next_entry().await? {
460 let path = entry.path();
461 if path.extension().and_then(|s| s.to_str()) == Some("git") {
462 let bare_repo = GitRepo::new(&path);
463 bare_repo.prune_worktrees().await.ok();
464 }
465 }
466 }
467
468 {
469 let mut registry =
470 acquire_mutex_with_timeout(&self.worktree_registry, "worktree_registry").await?;
471 if !registry.entries.is_empty() {
472 registry.entries.clear();
473 registry.persist(&self.registry_path()).await?;
474 }
475 }
476
477 Ok(())
478 }
479
480 #[allow(clippy::too_many_lines)]
489 pub async fn get_or_create_worktree_for_sha(
490 &self,
491 name: &str,
492 url: &str,
493 sha: &str,
494 context: Option<&str>,
495 ) -> Result<PathBuf> {
496 if sha.len() != 40 || !sha.chars().all(|c| c.is_ascii_hexdigit()) {
498 return Err(anyhow::anyhow!(
499 "Invalid SHA format: expected 40 hex characters, got '{sha}'"
500 ));
501 }
502
503 let is_local_path = crate::utils::is_local_path(url);
505 if is_local_path {
506 return self.get_or_clone_source(name, url, None).await;
508 }
509
510 self.ensure_cache_dir().await?;
511
512 let (owner, repo) =
514 crate::git::parse_git_url(url).unwrap_or(("direct".to_string(), "repo".to_string()));
515
516 let bare_repo_dir = self.dir.join("sources").join(format!("{owner}_{repo}.git"));
518 let bare_repo_lock_name = format!("bare-repo-{owner}_{repo}");
519
520 let sha_short = &sha[..8];
523 let cache_dir_hash = {
524 use std::collections::hash_map::DefaultHasher;
525 use std::hash::{Hash, Hasher};
526 let mut hasher = DefaultHasher::new();
527 self.dir.hash(&mut hasher);
528 format!("{:x}", hasher.finish())[..8].to_string()
529 };
530 let cache_key = format!("{cache_dir_hash}:{owner}_{repo}:{sha}");
531
532 let pending_timeout = pending_state_timeout();
535
536 loop {
537 match self.worktree_cache.entry(cache_key.clone()) {
538 dashmap::mapref::entry::Entry::Occupied(entry) => {
539 match entry.get() {
540 WorktreeState::Ready(cached_path) if cached_path.exists() => {
541 let cached_path = cached_path.clone();
543 drop(entry);
544
545 self.record_worktree_usage(&cache_key, name, sha_short, &cached_path)
546 .await?;
547
548 if let Some(ctx) = context {
549 tracing::debug!(
550 target: "git",
551 "({}) Reusing SHA-based worktree for {} @ {}",
552 ctx,
553 url.split('/').next_back().unwrap_or(url),
554 sha_short
555 );
556 }
557 return Ok(cached_path);
558 }
559 WorktreeState::Ready(_cached_path) => {
560 let notify = Arc::new(tokio::sync::Notify::new());
563 drop(entry);
564 self.worktree_cache
566 .insert(cache_key.clone(), WorktreeState::Pending(notify));
567 break;
568 }
569 WorktreeState::Pending(existing_notify) => {
570 let existing_notify = existing_notify.clone();
572
573 let notified_future = existing_notify.notified();
578 drop(entry);
579
580 if let Some(ctx) = context {
581 tracing::debug!(
582 target: "git",
583 "({}) Waiting for SHA worktree creation for {} @ {}",
584 ctx,
585 url.split('/').next_back().unwrap_or(url),
586 sha_short
587 );
588 }
589
590 tokio::select! {
592 _ = notified_future => {
593 continue;
595 }
596 _ = tokio::time::sleep(pending_timeout) => {
597 let our_notify = Arc::new(tokio::sync::Notify::new());
601 self.worktree_cache
602 .insert(cache_key.clone(), WorktreeState::Pending(our_notify));
603
604 existing_notify.notify_waiters();
606 tracing::warn!(
607 target: "git",
608 "Timeout waiting for worktree creation for {} @ {} - taking ownership",
609 url.split('/').next_back().unwrap_or(url),
610 sha_short
611 );
612 break;
613 }
614 }
615 }
616 }
617 }
618 dashmap::mapref::entry::Entry::Vacant(entry) => {
619 let notify = Arc::new(tokio::sync::Notify::new());
622 entry.insert(WorktreeState::Pending(notify));
623 break;
624 }
625 }
626 }
627
628 let worktree_cache = self.worktree_cache.clone();
630 let cache_key_for_cleanup = cache_key.clone();
631
632 let result: Result<PathBuf> = async {
633 tracing::debug!(
634 target: "git::worktree",
635 "Starting worktree creation for {} @ {} (cache_key={})",
636 url.split('/').next_back().unwrap_or(url),
637 sha_short,
638 cache_key
639 );
640
641 if !bare_repo_dir.exists() {
644 tracing::debug!(
646 target: "git",
647 "Bare repo does not exist, acquiring lock to clone: {}",
648 bare_repo_dir.display()
649 );
650
651 let bare_repo_lock = CacheLock::acquire(&self.dir, &bare_repo_lock_name).await?;
652
653 if !bare_repo_dir.exists() {
655 if let Some(parent) = bare_repo_dir.parent() {
656 tokio::fs::create_dir_all(parent).await.with_file_context(
657 FileOperation::CreateDir,
658 parent,
659 "creating cache parent directory",
660 "cache_module",
661 )?;
662 }
663
664 if let Some(ctx) = context {
665 tracing::debug!("📦 ({ctx}) Cloning repository {url}...");
666 } else {
667 tracing::debug!("📦 Cloning repository {url} to cache...");
668 }
669
670 tokio::time::timeout(
672 crate::constants::GIT_CLONE_TIMEOUT,
673 GitRepo::clone_bare_with_context(url, &bare_repo_dir, context),
674 )
675 .await
676 .map_err(|_| {
677 anyhow::anyhow!(
678 "Git clone operation timed out after {:?} for {}",
679 crate::constants::GIT_CLONE_TIMEOUT,
680 url
681 )
682 })??;
683
684 Self::configure_connection_pooling(&bare_repo_dir).await.ok();
685
686 acquire_rwlock_write_with_timeout(&self.fetched_repos, "fetched_repos")
688 .await?
689 .insert(bare_repo_dir.clone());
690 }
691
692 drop(bare_repo_lock);
694 }
695 let bare_repo = GitRepo::new(&bare_repo_dir);
698
699 let worktree_path =
701 self.dir.join("worktrees").join(format!("{owner}_{repo}_{sha_short}"));
702
703 let worktree_lock_name = format!("worktree-{owner}-{repo}-{sha_short}");
705 let _worktree_lock = CacheLock::acquire(&self.dir, &worktree_lock_name).await?;
706
707 if worktree_path.exists() {
709 let notify_to_wake = extract_notify_handle(&self.worktree_cache, &cache_key);
711 self.worktree_cache
712 .insert(cache_key.clone(), WorktreeState::Ready(worktree_path.clone()));
713 if let Some(n) = notify_to_wake {
714 n.notify_waiters();
715 }
716
717 self.record_worktree_usage(&cache_key, name, sha_short, &worktree_path).await?;
718 return Ok(worktree_path);
719 }
720
721 if let Some(ctx) = context {
731 tracing::debug!(
732 target: "git",
733 "({}) Creating SHA-based worktree: {} @ {}",
734 ctx,
735 url.split('/').next_back().unwrap_or(url),
736 sha_short
737 );
738 }
739
740 let bare_repo_worktree_lock_name = format!("bare-worktree-{owner}_{repo}");
745 let _bare_worktree_lock =
746 CacheLock::acquire_shared(&self.dir, &bare_repo_worktree_lock_name).await?;
747
748 let worktree_result = tokio::time::timeout(
751 crate::constants::GIT_WORKTREE_TIMEOUT,
752 bare_repo.create_worktree_with_context(&worktree_path, Some(sha), context),
753 )
754 .await
755 .map_err(|_| {
756 anyhow::anyhow!(
757 "Git worktree creation timed out after {:?} for {} @ {}",
758 crate::constants::GIT_WORKTREE_TIMEOUT,
759 url,
760 sha_short
761 )
762 })?;
763
764 match worktree_result {
766 Ok(_) => {
767 if !worktree_path.exists() {
772 return Err(anyhow::anyhow!(
773 "Worktree directory does not exist: {}",
774 worktree_path.display()
775 ));
776 }
777
778 let git_file = worktree_path.join(".git");
779 if !git_file.exists() {
780 return Err(anyhow::anyhow!(
781 "Worktree .git file does not exist: {}",
782 git_file.display()
783 ));
784 }
785
786 drop(_bare_worktree_lock);
788
789 #[cfg(not(windows))]
797 {
798 let worktree_path_clone = worktree_path.clone();
799 let bare_worktrees_dir = bare_repo_dir.join("worktrees");
800 let bare_worktrees_exists = bare_worktrees_dir.exists();
801
802 let _ = tokio::task::spawn_blocking(move || {
803 if let Ok(dir) = std::fs::File::open(&worktree_path_clone) {
805 let _ = dir.sync_all();
806 }
807
808 if bare_worktrees_exists {
810 if let Ok(dir) = std::fs::File::open(&bare_worktrees_dir) {
811 let _ = dir.sync_all();
812 }
813 }
814 })
815 .await;
816
817 tracing::debug!(
818 target: "git::worktree",
819 "Worktree fsync completed for {} @ {}",
820 worktree_path.display(),
821 &sha[..8]
822 );
823 }
824
825 let notify_to_wake = extract_notify_handle(&self.worktree_cache, &cache_key);
827 self.worktree_cache
828 .insert(cache_key.clone(), WorktreeState::Ready(worktree_path.clone()));
829 if let Some(n) = notify_to_wake {
830 n.notify_waiters();
831 }
832
833 self.record_worktree_usage(&cache_key, name, sha_short, &worktree_path).await?;
834 Ok(worktree_path)
835 }
836 Err(e) => Err(e),
837 }
838 }
839 .await;
840
841 match result {
843 Ok(path) => Ok(path),
844 Err(e) => {
845 let notify = extract_notify_handle(&worktree_cache, &cache_key_for_cleanup);
847 worktree_cache.remove(&cache_key_for_cleanup);
848 if let Some(n) = notify {
849 n.notify_waiters();
850 }
851 Err(e)
852 }
853 }
854 }
855
856 async fn get_or_clone_source_impl(
873 &self,
874 name: &str,
875 url: &str,
876 version: Option<&str>,
877 ) -> Result<PathBuf> {
878 let is_local_path = crate::utils::is_local_path(url);
880
881 if is_local_path {
882 let resolved_path = crate::utils::platform::resolve_path(url)?;
887
888 let canonical_path = crate::utils::safe_canonicalize(&resolved_path)
890 .map_err(|_| anyhow::anyhow!("Local path is not accessible or does not exist"))?;
891
892 validate_path_security(&canonical_path, true)?;
894
895 if let Some(ver) = version
897 && ver != "local"
898 {
899 eprintln!("Warning: Version constraints are ignored for local paths");
900 }
901
902 return Ok(canonical_path);
903 }
904
905 self.ensure_cache_dir().await?;
906
907 let _lock = CacheLock::acquire(&self.dir, name)
909 .await
910 .with_context(|| format!("Failed to acquire lock for source: {name}"))?;
911
912 let (owner, repo) =
915 crate::git::parse_git_url(url).unwrap_or(("direct".to_string(), "repo".to_string()));
916 let source_dir = self.dir.join("sources").join(format!("{owner}_{repo}.git")); if let Some(parent) = source_dir.parent() {
920 tokio::fs::create_dir_all(parent).await.with_file_context(
921 FileOperation::CreateDir,
922 parent,
923 "creating cache directory",
924 "cache_module",
925 )?;
926 }
927
928 if source_dir.exists() {
929 if crate::utils::is_git_url(url) {
933 let already_fetched = {
935 let fetched =
936 acquire_rwlock_read_with_timeout(&self.fetched_repos, "fetched_repos")
937 .await?;
938 fetched.contains(&source_dir)
939 };
940
941 if already_fetched {
942 tracing::debug!(
943 target: "agpm::cache",
944 "Skipping fetch for {} (already fetched in this command)",
945 name
946 );
947 } else {
948 tracing::debug!(
949 target: "agpm::cache",
950 "Fetching updates for {} from {}",
951 name,
952 url
953 );
954 let repo = crate::git::GitRepo::new(&source_dir);
955 if let Err(e) = repo.fetch(None).await {
956 tracing::warn!(
957 target: "agpm::cache",
958 "Failed to fetch updates for {}: {}",
959 name,
960 e
961 );
962 } else {
963 let mut fetched =
965 acquire_rwlock_write_with_timeout(&self.fetched_repos, "fetched_repos")
966 .await?;
967 fetched.insert(source_dir.clone());
968 tracing::debug!(
969 target: "agpm::cache",
970 "Successfully fetched updates for {}",
971 name
972 );
973 }
974 }
975 } else {
976 tracing::debug!(
977 target: "agpm::cache",
978 "Skipping fetch for local path: {}",
979 url
980 );
981 }
982 } else {
983 self.clone_source(url, &source_dir).await?;
985 }
986
987 Ok(source_dir)
988 }
989
990 async fn clone_source(&self, url: &str, target: &Path) -> Result<()> {
1025 tracing::debug!("📦 Cloning {} to cache...", url);
1026
1027 GitRepo::clone_bare(url, target)
1029 .await
1030 .with_context(|| format!("Failed to clone repository from {url}"))?;
1031
1032 if cfg!(test)
1034 && let Ok(entries) = std::fs::read_dir(target)
1035 {
1036 tracing::debug!(
1037 target: "agpm::cache",
1038 "Cloned bare repo to {}, contents:",
1039 target.display()
1040 );
1041 for entry in entries.flatten() {
1042 tracing::debug!(
1043 target: "agpm::cache",
1044 " - {}",
1045 entry.path().display()
1046 );
1047 }
1048 }
1049
1050 Ok(())
1051 }
1052
1053 pub async fn copy_resource(
1064 &self,
1065 source_dir: &Path,
1066 source_path: &str,
1067 target_path: &Path,
1068 ) -> Result<()> {
1069 self.copy_resource_with_output(source_dir, source_path, target_path, false).await
1070 }
1071
1072 pub async fn copy_resource_with_output(
1080 &self,
1081 source_dir: &Path,
1082 source_path: &str,
1083 target_path: &Path,
1084 show_output: bool,
1085 ) -> Result<()> {
1086 let source_file = source_dir.join(source_path);
1087
1088 if !source_file.exists() {
1089 return Err(AgpmError::ResourceFileNotFound {
1090 path: source_path.to_string(),
1091 source_name: source_dir
1092 .file_name()
1093 .and_then(|n| n.to_str())
1094 .unwrap_or("unknown")
1095 .to_string(),
1096 }
1097 .into());
1098 }
1099
1100 if let Some(parent) = target_path.parent() {
1101 async_fs::create_dir_all(parent)
1102 .await
1103 .with_context(|| format!("Failed to create directory: {}", parent.display()))?;
1104 }
1105
1106 async_fs::copy(&source_file, target_path).await.with_context(|| {
1107 format!("Failed to copy {} to {}", source_file.display(), target_path.display())
1108 })?;
1109
1110 if show_output {
1111 println!(" ✅ Installed {}", target_path.display());
1112 }
1113
1114 Ok(())
1115 }
1116
1117 pub async fn clean_unused(&self, active_sources: &[String]) -> Result<usize> {
1125 self.ensure_cache_dir().await?;
1126
1127 let mut removed_count = 0;
1128 let mut entries = async_fs::read_dir(&self.dir)
1129 .await
1130 .with_context(|| "Failed to read cache directory")?;
1131
1132 while let Some(entry) =
1133 entries.next_entry().await.with_context(|| "Failed to read directory entry")?
1134 {
1135 let path = entry.path();
1136 if path.is_dir() {
1137 let dir_name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");
1138
1139 if !active_sources.contains(&dir_name.to_string()) {
1140 println!("🗑️ Removing unused cache: {dir_name}");
1141 async_fs::remove_dir_all(&path).await.with_context(|| {
1142 format!("Failed to remove cache directory: {}", path.display())
1143 })?;
1144 removed_count += 1;
1145 }
1146 }
1147 }
1148
1149 Ok(removed_count)
1150 }
1151
1152 pub async fn get_cache_size(&self) -> Result<u64> {
1154 if !self.dir.exists() {
1155 return Ok(0);
1156 }
1157
1158 let size = fs::get_directory_size(&self.dir).await?;
1159 Ok(size)
1160 }
1161
1162 #[must_use]
1164 pub fn get_cache_location(&self) -> &Path {
1165 &self.dir
1166 }
1167
1168 pub async fn clear_all(&self) -> Result<()> {
1170 if self.dir.exists() {
1171 async_fs::remove_dir_all(&self.dir).await.with_context(|| "Failed to clear cache")?;
1172 println!("🗑️ Cleared all cache");
1173 }
1174 Ok(())
1175 }
1176}
1177
1178#[cfg(test)]
1179mod tests {
1180 use super::*;
1181
1182 use tempfile::TempDir;
1183
1184 #[tokio::test]
1185 async fn test_cache_dir_creation() {
1186 let temp_dir = TempDir::new().unwrap();
1187 let cache_dir = temp_dir.path().join("cache");
1188
1189 let cache = Cache::with_dir(cache_dir.clone()).unwrap();
1190 cache.ensure_cache_dir().await.unwrap();
1191
1192 assert!(cache_dir.exists());
1193 }
1194
1195 #[tokio::test]
1196 async fn test_cache_location() {
1197 let temp_dir = TempDir::new().unwrap();
1198 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
1199 let location = cache.get_cache_location();
1200 assert_eq!(location, temp_dir.path());
1201 }
1202
1203 #[tokio::test]
1204 async fn test_cache_size_empty() {
1205 let temp_dir = TempDir::new().unwrap();
1206 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
1207
1208 cache.ensure_cache_dir().await.unwrap();
1209 let size = cache.get_cache_size().await.unwrap();
1210 assert_eq!(size, 0);
1211 }
1212
1213 #[tokio::test]
1214 async fn test_cache_size_with_content() {
1215 let temp_dir = TempDir::new().unwrap();
1216 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
1217
1218 cache.ensure_cache_dir().await.unwrap();
1219
1220 let test_file = temp_dir.path().join("test.txt");
1222 std::fs::write(&test_file, "test content").unwrap();
1223
1224 let size = cache.get_cache_size().await.unwrap();
1225 assert!(size > 0);
1226 assert_eq!(size, 12); }
1228
1229 #[tokio::test]
1230 async fn test_clean_unused_empty_cache() {
1231 let temp_dir = TempDir::new().unwrap();
1232 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
1233
1234 cache.ensure_cache_dir().await.unwrap();
1235
1236 let removed = cache.clean_unused(&["active".to_string()]).await.unwrap();
1237 assert_eq!(removed, 0);
1238 }
1239
1240 #[tokio::test]
1241 async fn test_clean_unused_removes_correct_dirs() {
1242 let temp_dir = TempDir::new().unwrap();
1243 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
1244
1245 cache.ensure_cache_dir().await.unwrap();
1246
1247 let active_dir = temp_dir.path().join("active");
1249 let unused_dir = temp_dir.path().join("unused");
1250 let another_unused = temp_dir.path().join("another_unused");
1251
1252 std::fs::create_dir_all(&active_dir).unwrap();
1253 std::fs::create_dir_all(&unused_dir).unwrap();
1254 std::fs::create_dir_all(&another_unused).unwrap();
1255
1256 std::fs::write(active_dir.join("file.txt"), "keep").unwrap();
1258 std::fs::write(unused_dir.join("file.txt"), "remove").unwrap();
1259 std::fs::write(another_unused.join("file.txt"), "remove").unwrap();
1260
1261 let removed = cache.clean_unused(&["active".to_string()]).await.unwrap();
1262
1263 assert_eq!(removed, 2);
1264 assert!(active_dir.exists());
1265 assert!(!unused_dir.exists());
1266 assert!(!another_unused.exists());
1267 }
1268
1269 #[tokio::test]
1270 async fn test_clear_all_removes_entire_cache() {
1271 let temp_dir = TempDir::new().unwrap();
1272 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
1273
1274 cache.ensure_cache_dir().await.unwrap();
1275
1276 let subdir = temp_dir.path().join("subdir");
1278 std::fs::create_dir_all(&subdir).unwrap();
1279 std::fs::write(subdir.join("file.txt"), "content").unwrap();
1280
1281 assert!(temp_dir.path().exists());
1282 assert!(subdir.exists());
1283
1284 cache.clear_all().await.unwrap();
1285
1286 assert!(!temp_dir.path().exists());
1287 }
1288
1289 #[tokio::test]
1290 async fn test_copy_resource() {
1291 let temp_dir = TempDir::new().unwrap();
1292 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
1293
1294 let source_dir = temp_dir.path().join("source");
1296 std::fs::create_dir_all(&source_dir).unwrap();
1297 let source_file = source_dir.join("resource.md");
1298 std::fs::write(&source_file, "# Test Resource\nContent").unwrap();
1299
1300 let dest = temp_dir.path().join("dest.md");
1302 cache.copy_resource(&source_dir, "resource.md", &dest).await.unwrap();
1303
1304 assert!(dest.exists());
1305 let content = std::fs::read_to_string(&dest).unwrap();
1306 assert_eq!(content, "# Test Resource\nContent");
1307 }
1308
1309 #[tokio::test]
1310 async fn test_copy_resource_nested_path() {
1311 let temp_dir = TempDir::new().unwrap();
1312 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
1313
1314 let source_dir = temp_dir.path().join("source");
1316 let nested_dir = source_dir.join("nested").join("path");
1317 std::fs::create_dir_all(&nested_dir).unwrap();
1318 let source_file = nested_dir.join("resource.md");
1319 std::fs::write(&source_file, "# Nested Resource").unwrap();
1320
1321 let dest = temp_dir.path().join("dest.md");
1323 cache.copy_resource(&source_dir, "nested/path/resource.md", &dest).await.unwrap();
1324
1325 assert!(dest.exists());
1326 let content = std::fs::read_to_string(&dest).unwrap();
1327 assert_eq!(content, "# Nested Resource");
1328 }
1329
1330 #[tokio::test]
1331 async fn test_copy_resource_invalid_path() {
1332 let temp_dir = TempDir::new().unwrap();
1333 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
1334
1335 let source_dir = temp_dir.path().join("source");
1336 std::fs::create_dir_all(&source_dir).unwrap();
1337
1338 let dest = temp_dir.path().join("dest.md");
1340 let result = cache.copy_resource(&source_dir, "nonexistent.md", &dest).await;
1341
1342 assert!(result.is_err());
1343 assert!(!dest.exists());
1344 }
1345
1346 #[tokio::test]
1347 async fn test_ensure_cache_dir_idempotent() {
1348 let temp_dir = TempDir::new().unwrap();
1349 let cache_dir = temp_dir.path().join("cache");
1350 let cache = Cache::with_dir(cache_dir.clone()).unwrap();
1351
1352 cache.ensure_cache_dir().await.unwrap();
1354 assert!(cache_dir.exists());
1355
1356 cache.ensure_cache_dir().await.unwrap();
1357 assert!(cache_dir.exists());
1358
1359 std::fs::write(cache_dir.join("test.txt"), "content").unwrap();
1361
1362 cache.ensure_cache_dir().await.unwrap();
1363 assert!(cache_dir.exists());
1364 assert!(cache_dir.join("test.txt").exists());
1365 }
1366
1367 #[tokio::test]
1368 async fn test_copy_resource_creates_parent_directories() {
1369 let temp_dir = TempDir::new().unwrap();
1370 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
1371
1372 let source_dir = temp_dir.path().join("source");
1374 std::fs::create_dir_all(&source_dir).unwrap();
1375 std::fs::write(source_dir.join("file.md"), "content").unwrap();
1376
1377 let dest = temp_dir.path().join("deep").join("nested").join("dest.md");
1379 cache.copy_resource(&source_dir, "file.md", &dest).await.unwrap();
1380
1381 assert!(dest.exists());
1382 assert_eq!(std::fs::read_to_string(&dest).unwrap(), "content");
1383 }
1384
1385 #[tokio::test]
1386 async fn test_copy_resource_with_output_flag() {
1387 let temp_dir = TempDir::new().unwrap();
1388 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
1389
1390 let source_dir = temp_dir.path().join("source");
1392 std::fs::create_dir_all(&source_dir).unwrap();
1393 std::fs::write(source_dir.join("file.md"), "content").unwrap();
1394
1395 let dest1 = temp_dir.path().join("dest1.md");
1397 cache.copy_resource_with_output(&source_dir, "file.md", &dest1, false).await.unwrap();
1398 assert!(dest1.exists());
1399
1400 let dest2 = temp_dir.path().join("dest2.md");
1402 cache.copy_resource_with_output(&source_dir, "file.md", &dest2, true).await.unwrap();
1403 assert!(dest2.exists());
1404 }
1405
1406 #[tokio::test]
1407 async fn test_cache_size_nonexistent_dir() {
1408 let temp_dir = TempDir::new().unwrap();
1409 let nonexistent = temp_dir.path().join("nonexistent");
1410 let cache = Cache::with_dir(nonexistent).unwrap();
1411
1412 let size = cache.get_cache_size().await.unwrap();
1413 assert_eq!(size, 0);
1414 }
1415
1416 #[tokio::test]
1417 async fn test_clear_all_nonexistent_cache() {
1418 let temp_dir = TempDir::new().unwrap();
1419 let nonexistent = temp_dir.path().join("nonexistent");
1420 let cache = Cache::with_dir(nonexistent).unwrap();
1421
1422 cache.clear_all().await.unwrap();
1424 }
1425
1426 #[tokio::test]
1427 async fn test_clean_unused_with_files_and_dirs() {
1428 let temp_dir = TempDir::new().unwrap();
1429 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
1430
1431 cache.ensure_cache_dir().await.unwrap();
1432
1433 std::fs::create_dir_all(temp_dir.path().join("keep")).unwrap();
1435 std::fs::create_dir_all(temp_dir.path().join("remove")).unwrap();
1436
1437 std::fs::write(temp_dir.path().join("file.txt"), "content").unwrap();
1439
1440 let removed = cache.clean_unused(&["keep".to_string()]).await.unwrap();
1441
1442 assert_eq!(removed, 1);
1444 assert!(temp_dir.path().join("keep").exists());
1445 assert!(!temp_dir.path().join("remove").exists());
1446 assert!(temp_dir.path().join("file.txt").exists());
1447 }
1448
1449 #[tokio::test]
1450 async fn test_copy_resource_overwrites_existing() {
1451 let temp_dir = TempDir::new().unwrap();
1452 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
1453
1454 let source_dir = temp_dir.path().join("source");
1456 std::fs::create_dir_all(&source_dir).unwrap();
1457 std::fs::write(source_dir.join("file.md"), "new content").unwrap();
1458
1459 let dest = temp_dir.path().join("dest.md");
1461 std::fs::write(&dest, "old content").unwrap();
1462
1463 cache.copy_resource(&source_dir, "file.md", &dest).await.unwrap();
1465
1466 assert_eq!(std::fs::read_to_string(&dest).unwrap(), "new content");
1467 }
1468
1469 #[tokio::test]
1470 async fn test_copy_resource_special_characters() {
1471 let temp_dir = TempDir::new().unwrap();
1472 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
1473
1474 let source_dir = temp_dir.path().join("source");
1476 std::fs::create_dir_all(&source_dir).unwrap();
1477 let special_name = "file with spaces & special-chars.md";
1478 std::fs::write(source_dir.join(special_name), "content").unwrap();
1479
1480 let dest = temp_dir.path().join("dest.md");
1482 cache.copy_resource(&source_dir, special_name, &dest).await.unwrap();
1483
1484 assert!(dest.exists());
1485 assert_eq!(std::fs::read_to_string(&dest).unwrap(), "content");
1486 }
1487
1488 #[tokio::test]
1489 async fn test_cache_location_consistency() {
1490 let temp_dir = TempDir::new().unwrap();
1491 let cache_dir = temp_dir.path().join("my_cache");
1492 let cache = Cache::with_dir(cache_dir.clone()).unwrap();
1493
1494 let loc1 = cache.get_cache_location();
1496 let loc2 = cache.get_cache_location();
1497
1498 assert_eq!(loc1, loc2);
1499 assert_eq!(loc1, cache_dir.as_path());
1500 }
1501
1502 #[tokio::test]
1503 async fn test_clean_unused_empty_active_list() {
1504 let temp_dir = TempDir::new().unwrap();
1505 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
1506
1507 cache.ensure_cache_dir().await.unwrap();
1508
1509 std::fs::create_dir_all(temp_dir.path().join("source1")).unwrap();
1511 std::fs::create_dir_all(temp_dir.path().join("source2")).unwrap();
1512
1513 let removed = cache.clean_unused(&[]).await.unwrap();
1515
1516 assert_eq!(removed, 2);
1517 assert!(!temp_dir.path().join("source1").exists());
1518 assert!(!temp_dir.path().join("source2").exists());
1519 }
1520
1521 #[tokio::test]
1522 async fn test_copy_resource_with_relative_paths() {
1523 let temp_dir = TempDir::new().unwrap();
1524 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
1525
1526 let source_dir = temp_dir.path().join("source");
1528 let sub_dir = source_dir.join("agents");
1529 std::fs::create_dir_all(&sub_dir).unwrap();
1530 std::fs::write(sub_dir.join("helper.md"), "# Helper Agent").unwrap();
1531
1532 let dest = temp_dir.path().join("my-agent.md");
1534 cache.copy_resource(&source_dir, "agents/helper.md", &dest).await.unwrap();
1535
1536 assert!(dest.exists());
1537 assert_eq!(std::fs::read_to_string(&dest).unwrap(), "# Helper Agent");
1538 }
1539
1540 #[tokio::test]
1541 async fn test_cache_size_with_subdirectories() {
1542 let temp_dir = TempDir::new().unwrap();
1543 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
1544
1545 cache.ensure_cache_dir().await.unwrap();
1546
1547 let sub1 = temp_dir.path().join("sub1");
1549 let sub2 = sub1.join("sub2");
1550 std::fs::create_dir_all(&sub2).unwrap();
1551
1552 std::fs::write(temp_dir.path().join("file1.txt"), "12345").unwrap(); std::fs::write(sub1.join("file2.txt"), "1234567890").unwrap(); std::fs::write(sub2.join("file3.txt"), "abc").unwrap(); let size = cache.get_cache_size().await.unwrap();
1557 assert_eq!(size, 18); }
1559}