agpm_cli/cache/mod.rs
1//! Git repository cache management with worktree-based parallel operations
2//!
3//! This module provides a sophisticated caching system for Git repositories that enables
4//! safe parallel resource installation through Git worktrees. The cache system has been
5//! redesigned for optimal concurrency, simplified architecture, and enhanced performance
6//! in AGPM v0.3.0.
7//!
8//! # Architecture Overview
9//!
10//! The cache system implements a multi-layered architecture:
11//! - [`Cache`] struct: Core repository management and worktree orchestration
12//! - [`CacheLock`]: File-based locking for process-safe concurrent access
13//! - `WorktreeState`: Instance-level caching for worktree lifecycle management
14//! - Bare repositories: Optimized Git storage for efficient worktree creation
15//!
16//! # Platform-Specific Cache Locations
17//!
18//! The cache follows platform conventions for optimal performance:
19//! - **Linux/macOS**: `~/.agpm/cache/` (following XDG standards)
20//! - **Windows**: `%LOCALAPPDATA%\agpm\cache\` (using Windows cache directory)
21//! - **Environment Override**: Set `AGPM_CACHE_DIR` for custom locations
22//!
23//! # Cache Directory Structure
24//!
25//! The cache is organized for optimal parallel access patterns:
26//! ```text
27//! ~/.agpm/cache/
28//! ├── sources/ # Bare repositories optimized for worktrees
29//! │ ├── github_owner_repo.git/ # Bare repo with all Git objects
30//! │ └── gitlab_org_project.git/ # URL-parsed directory naming
31//! ├── worktrees/ # SHA-based worktrees for maximum deduplication
32//! │ ├── github_owner_repo_abc12345/ # First 8 chars of commit SHA
33//! │ ├── github_owner_repo_def67890/ # Each unique commit gets one worktree
34//! │ ├── .state.json # Persistent worktree registry
35//! │ └── github_owner_repo_456789ab/ # Multiple refs to same SHA share worktree
36//! └── .locks/ # Fine-grained locking infrastructure
37//! ├── github_owner_repo.lock # Repository-level locks
38//! └── worktree-owner_repo-v1.lock # Worktree creation locks
39//! ```
40//!
41//! # Enhanced Concurrency Architecture
42//!
43//! The v0.3.2+ cache implements SHA-based worktree optimization with advanced concurrency:
44//! - **SHA-based deduplication**: Worktrees keyed by commit SHA, not version reference
45//! - **Centralized resolution**: `VersionResolver` handles batch SHA resolution upfront
46//! - **Maximum reuse**: Multiple tags/branches pointing to same commit share one worktree
47//! - **Instance-level caching**: `WorktreeState` tracks creation across threads
48//! - **Per-worktree file locking**: Fine-grained locks prevent creation conflicts
49//! - **Direct parallelism control**: `--max-parallel` flag controls concurrency
50//! - **Command-instance fetch caching**: Single fetch per repository per command
51//! - **Atomic state transitions**: Pending → Ready state coordination
52//!
53//! ## Locking Strategy
54//!
55//! ```text
56//! Process A: acquire("source1") ───┐
57//! ├─── BLOCKS: same source
58//! Process B: acquire("source1") ───┘
59//!
60//! Process C: acquire("source2") ───── CONCURRENT: different source
61//! ```
62//!
63//! # Cache Operations
64//!
65//! ## Repository Management
66//! - **Clone**: Initial repository cloning from remote URLs
67//! - **Update**: Fetch latest changes from remote (git fetch)
68//! - **Checkout**: Switch to specific versions (tags, branches, commits)
69//! - **Cleanup**: Remove unused repositories to reclaim disk space
70//!
71//! ## Resource Installation
72//! - **Copy-based**: Files copied from cache to project directories
73//! - **Path resolution**: Handles relative paths within repositories
74//! - **Directory creation**: Automatically creates parent directories
75//! - **Overwrite safety**: Replaces existing files atomically
76//!
77//! # Performance Characteristics
78//!
79//! The cache is optimized for common AGPM workflows:
80//! - **First install**: Clone repository once, reuse for all resources
81//! - **Subsequent installs**: Copy from local cache (fast file operations)
82//! - **Version switching**: Git checkout within cached repository
83//! - **Parallel operations**: Multiple sources can be processed concurrently
84//!
85//! ## Disk Space Management
86//!
87//! - **Size calculation**: Recursive directory size calculation
88//! - **Unused cleanup**: Remove repositories no longer referenced
89//! - **Complete cleanup**: Clear entire cache when needed
90//! - **Selective removal**: Keep active sources, remove only unused ones
91//!
92//! # Error Handling and Recovery
93//!
94//! The cache provides comprehensive error handling:
95//! - **Lock timeouts**: Graceful handling of concurrent access
96//! - **Clone failures**: Network and authentication error reporting
97//! - **Version errors**: Clear messages for invalid tags/branches/commits
98//! - **File system errors**: Detailed context for permission and space issues
99//!
100//! # Security Considerations
101//!
102//! - **Path validation**: Prevents directory traversal attacks
103//! - **Lock file isolation**: Prevents lock file manipulation
104//! - **Safe file operations**: Atomic operations prevent corruption
105//! - **Permission handling**: Respects file system permissions
106//!
107//! # Usage Examples
108//!
109//! ## Basic Cache Operations
110//!
111//! ```rust,no_run
112//! use agpm_cli::cache::Cache;
113//! use std::path::PathBuf;
114//!
115//! # async fn example() -> anyhow::Result<()> {
116//! // Initialize cache with default location
117//! let cache = Cache::new()?;
118//!
119//! // Get or clone a source repository
120//! let repo_path = cache.get_or_clone_source(
121//! "community",
122//! "https://github.com/example/agpm-community.git",
123//! Some("v1.0.0") // Specific version
124//! ).await?;
125//!
126//! // Copy a resource from cache to project
127//! cache.copy_resource(
128//! &repo_path,
129//! "agents/helper.md", // Source path in repository
130//! &PathBuf::from("./agents/helper.md") // Destination in project
131//! ).await?;
132//! # Ok(())
133//! # }
134//! ```
135//!
136//! ## Cache Maintenance
137//!
138//! ```rust,no_run
139//! use agpm_cli::cache::Cache;
140//!
141//! # #[tokio::main]
142//! # async fn main() -> anyhow::Result<()> {
143//! let cache = Cache::new()?;
144//!
145//! // Check cache size
146//! let size_bytes = cache.get_cache_size().await?;
147//! println!("Cache size: {} MB", size_bytes / 1024 / 1024);
148//!
149//! // Clean unused repositories
150//! let active_sources = vec!["community".to_string(), "work".to_string()];
151//! let removed_count = cache.clean_unused(&active_sources).await?;
152//! println!("Removed {} unused repositories", removed_count);
153//!
154//! // Complete cache cleanup
155//! cache.clear_all().await?;
156//! # Ok(())
157//! # }
158//! ```
159//!
160//! ## Custom Cache Location
161//!
162//! ```rust,no_run
163//! use agpm_cli::cache::Cache;
164//! use std::path::PathBuf;
165//!
166//! # fn custom_location() -> anyhow::Result<()> {
167//! // Use custom cache directory (useful for testing or special setups)
168//! let custom_dir = PathBuf::from("/tmp/my-agpm-cache");
169//! let cache = Cache::with_dir(custom_dir)?;
170//!
171//! println!("Using cache at: {}", cache.get_cache_location().display());
172//! # Ok(())
173//! # }
174//! ```
175//!
176//! # Integration with AGPM Workflow
177//!
178//! The cache module integrates seamlessly with AGPM's dependency management:
179//! 1. **Manifest parsing**: Source URLs extracted from `agpm.toml`
180//! 2. **Dependency resolution**: Version constraints resolved to specific commits
181//! 3. **Cache population**: Repositories cloned and checked out as needed
182//! 4. **Resource installation**: Files copied from cache to project directories
183//! 5. **Lockfile generation**: Installed resources tracked in `agpm.lock`
184//!
185//! See [`crate::manifest`] for manifest handling and [`crate::lockfile`] for
186//! lockfile management.
187
188use crate::core::error::AgpmError;
189use crate::git::GitRepo;
190use crate::git::command_builder::GitCommand;
191use crate::utils::fs;
192use crate::utils::security::validate_path_security;
193use anyhow::{Context, Result};
194use dashmap::DashMap;
195use serde::{Deserialize, Serialize};
196use std::collections::{HashMap, HashSet};
197use std::path::{Path, PathBuf};
198use std::sync::Arc;
199use std::time::{Duration, SystemTime, UNIX_EPOCH};
200use tokio::fs as async_fs;
201use tokio::sync::{Mutex, RwLock};
202
203// Concurrency Architecture:
204// - Direct control approach: Command parallelism (--max-parallel) + per-worktree file locking
205// - Instance-level caching: Worktrees and fetch operations cached per Cache instance
206// - Command-level control: --max-parallel flag controls dependency processing parallelism
207// - Fetch caching: Network operations cached for 5 minutes to reduce redundancy
208
209/// State of a worktree in the instance-level cache for concurrent coordination.
210///
211/// This enum implements a sophisticated state machine for worktree lifecycle management
212/// that enables safe concurrent access across multiple threads without race conditions.
213/// The cache uses this state to coordinate between threads that might request the same
214/// worktree simultaneously, eliminating the need for global synchronization bottlenecks.
215///
216/// # State Transitions
217///
218/// - **Initial**: No entry exists in cache (implicit state)
219/// - [`Pending`](WorktreeState::Pending): One thread is creating the worktree
220/// - [`Ready`](WorktreeState::Ready): Worktree exists and is ready for all threads
221///
222/// # Concurrency Coordination Pattern
223///
224/// The worktree creation process follows this coordinated pattern:
225/// 1. **Reservation**: First thread reserves slot by setting state to `Pending`
226/// 2. **Creation**: Reserved thread performs actual worktree creation with file lock
227/// 3. **Notification**: Creator updates state to `Ready(path)` when complete
228/// 4. **Reuse**: Subsequent threads immediately use the ready worktree path
229/// 5. **Validation**: All threads verify worktree still exists before use
230///
231/// # Cache Key Format
232///
233/// Worktrees are uniquely identified by composite keys:
234/// ```text
235/// "{cache_dir_hash}:{owner}_{repo}:{version}"
236/// ```
237///
238/// Components:
239/// - `cache_dir_hash`: First 8 hex chars of cache directory path hash
240/// - `owner_repo`: Parsed from Git URL (e.g., "`github_owner_project`")
241/// - `version`: Git reference (tag, branch, commit, or "HEAD")
242///
243/// This format ensures isolation between:
244/// - Different cache instances (via hash)
245/// - Different repositories (via owner/repo)
246/// - Different versions (via version string)
247///
248/// # Memory Management
249///
250/// The instance-level cache persists for the lifetime of the `Cache` instance,
251/// but worktrees are validated on each access to handle external deletion.
252#[derive(Debug, Clone)]
253enum WorktreeState {
254 /// Another thread is currently creating this worktree.
255 ///
256 /// When threads encounter this state, they should wait briefly and retry
257 /// rather than attempting concurrent worktree creation which would fail.
258 Pending,
259
260 /// Worktree is fully created and ready to use.
261 ///
262 /// The `PathBuf` contains the filesystem path to the working directory.
263 /// This path should be validated before use as the worktree may have been
264 /// externally deleted.
265 Ready(PathBuf),
266}
267
268#[derive(Debug, Clone, Serialize, Deserialize, Default)]
269struct WorktreeRegistry {
270 entries: HashMap<String, WorktreeRecord>,
271}
272
273#[derive(Debug, Clone, Serialize, Deserialize)]
274struct WorktreeRecord {
275 source: String,
276 version: String,
277 path: PathBuf,
278 last_used: u64,
279}
280
281impl WorktreeRegistry {
282 fn load(path: &Path) -> Self {
283 match std::fs::read(path) {
284 Ok(data) => serde_json::from_slice(&data).unwrap_or_default(),
285 Err(err) if err.kind() == std::io::ErrorKind::NotFound => Self::default(),
286 Err(err) => {
287 tracing::warn!("Failed to load worktree registry from {}: {}", path.display(), err);
288 Self::default()
289 }
290 }
291 }
292
293 fn update(&mut self, key: String, source: String, version: String, path: PathBuf) {
294 let timestamp = SystemTime::now()
295 .duration_since(UNIX_EPOCH)
296 .unwrap_or_else(|_| Duration::from_secs(0))
297 .as_secs();
298
299 self.entries.insert(
300 key,
301 WorktreeRecord {
302 source,
303 version,
304 path,
305 last_used: timestamp,
306 },
307 );
308 }
309
310 fn remove_by_path(&mut self, target: &Path) -> bool {
311 if let Some(key) = self.entries.iter().find_map(|(k, record)| {
312 if record.path == target {
313 Some(k.clone())
314 } else {
315 None
316 }
317 }) {
318 self.entries.remove(&key);
319 true
320 } else {
321 false
322 }
323 }
324
325 async fn persist(&self, path: &Path) -> Result<()> {
326 if let Some(parent) = path.parent() {
327 async_fs::create_dir_all(parent).await?;
328 }
329
330 let data = serde_json::to_vec_pretty(self)?;
331 async_fs::write(path, data).await?;
332 Ok(())
333 }
334}
335
336/// File-based locking mechanism for cache operations
337///
338/// This module provides thread-safe and process-safe locking for cache
339/// operations through OS-level file locks, ensuring data consistency
340/// when multiple AGPM processes access the same cache directory.
341pub mod lock;
342pub use lock::CacheLock;
343
344/// Git repository cache for efficient resource management
345///
346/// The `Cache` struct provides the primary interface for managing Git repository
347/// caching in AGPM. It handles repository cloning, updating, version management,
348/// and resource file copying operations.
349///
350/// # Thread Safety
351///
352/// While the `Cache` struct itself is not thread-safe (not `Send + Sync`),
353/// multiple instances can safely operate on the same cache directory through
354/// the file-based locking mechanism provided by [`CacheLock`].
355///
356/// # Platform Compatibility
357///
358/// The cache automatically handles platform-specific differences:
359/// - **Path separators**: Uses [`std::path`] for cross-platform compatibility
360/// - **Cache location**: Follows platform conventions for app data storage
361/// - **File locking**: Uses [`fs4`] crate for cross-platform file locking
362/// - **Directory creation**: Handles permissions and long paths on Windows
363///
364/// # Examples
365///
366/// Create a cache with default platform-specific location:
367///
368/// ```rust,no_run
369/// use agpm_cli::cache::Cache;
370///
371/// # fn example() -> anyhow::Result<()> {
372/// let cache = Cache::new()?;
373/// println!("Cache location: {}", cache.get_cache_location().display());
374/// # Ok(())
375/// # }
376/// ```
377///
378/// Create a cache with custom location (useful for testing):
379///
380/// ```rust,no_run
381/// use agpm_cli::cache::Cache;
382/// use std::path::PathBuf;
383///
384/// # fn example() -> anyhow::Result<()> {
385/// let custom_dir = PathBuf::from("/tmp/test-cache");
386/// let cache = Cache::with_dir(custom_dir)?;
387/// # Ok(())
388/// # }
389/// ```
390pub struct Cache {
391 /// The root directory where all cached repositories are stored
392 cache_dir: PathBuf,
393
394 /// Instance-level cache for worktrees to avoid redundant checkouts.
395 ///
396 /// This cache maps worktree identifiers to their creation state, enabling
397 /// safe concurrent access. Multiple threads can request the same worktree
398 /// without conflicts - the first thread creates it while others wait.
399 ///
400 /// **Key format**: `"{cache_dir_hash}:{owner}_{repo}:{version}"`
401 ///
402 /// The cache directory hash ensures isolation between different Cache instances,
403 /// preventing conflicts when multiple instances operate on different cache roots.
404 worktree_cache: Arc<RwLock<HashMap<String, WorktreeState>>>,
405
406 /// Per-repository async locks that serialize fetch operations across
407 /// concurrent tasks. This prevents redundant `git fetch` runs when
408 /// multiple dependencies target the same repository simultaneously.
409 fetch_locks: Arc<DashMap<PathBuf, Arc<Mutex<()>>>>,
410
411 /// Command-instance fetch cache to track which repositories have been fetched
412 /// during this command execution. This ensures we only fetch once per repository
413 /// per command instance, dramatically reducing network operations for multi-dependency
414 /// installations.
415 ///
416 /// Contains bare repository paths that have been fetched in this command instance.
417 /// Works in conjunction with `VersionResolver` to minimize Git network operations.
418 fetched_repos: Arc<RwLock<HashSet<PathBuf>>>,
419
420 /// Persistent registry of worktrees stored on disk for reuse across
421 /// AGPM runs. Tracks last-used timestamps and paths so we can validate
422 /// and clean up cached worktrees without recreating them unnecessarily.
423 worktree_registry: Arc<Mutex<WorktreeRegistry>>,
424}
425
426impl Clone for Cache {
427 fn clone(&self) -> Self {
428 Self {
429 cache_dir: self.cache_dir.clone(),
430 worktree_cache: Arc::clone(&self.worktree_cache),
431 fetch_locks: Arc::clone(&self.fetch_locks),
432 fetched_repos: Arc::clone(&self.fetched_repos),
433 worktree_registry: Arc::clone(&self.worktree_registry),
434 }
435 }
436}
437
438impl Cache {
439 fn registry_path_for(cache_dir: &Path) -> PathBuf {
440 cache_dir.join("worktrees").join(".state.json")
441 }
442
443 fn registry_path(&self) -> PathBuf {
444 Self::registry_path_for(&self.cache_dir)
445 }
446
447 async fn record_worktree_usage(
448 &self,
449 registry_key: &str,
450 source_name: &str,
451 version_key: &str,
452 worktree_path: &Path,
453 ) -> Result<()> {
454 let mut registry = self.worktree_registry.lock().await;
455 registry.update(
456 registry_key.to_string(),
457 source_name.to_string(),
458 version_key.to_string(),
459 worktree_path.to_path_buf(),
460 );
461 registry.persist(&self.registry_path()).await?;
462 Ok(())
463 }
464
465 async fn remove_worktree_record_by_path(&self, worktree_path: &Path) -> Result<()> {
466 let mut registry = self.worktree_registry.lock().await;
467 if registry.remove_by_path(worktree_path) {
468 registry.persist(&self.registry_path()).await?;
469 }
470 Ok(())
471 }
472
473 async fn configure_connection_pooling(path: &Path) -> Result<()> {
474 let commands = [
475 ("http.version", "HTTP/2"),
476 ("http.postBuffer", "524288000"),
477 ("core.compression", "0"),
478 ];
479
480 for (key, value) in commands {
481 GitCommand::new()
482 .args(["config", key, value])
483 .current_dir(path)
484 .execute_success()
485 .await
486 .ok();
487 }
488
489 Ok(())
490 }
491
492 /// Creates a new `Cache` instance using the default platform-specific cache directory.
493 ///
494 /// The cache directory is determined based on the current platform:
495 /// - **Linux/macOS**: `~/.agpm/cache/`
496 /// - **Windows**: `%LOCALAPPDATA%\agpm\cache\`
497 ///
498 /// # Environment Variable Override
499 ///
500 /// The cache location can be overridden by setting the `AGPM_CACHE_DIR`
501 /// environment variable. This is particularly useful for:
502 /// - Testing with isolated cache directories
503 /// - CI/CD environments with specific cache locations
504 /// - Custom deployment scenarios
505 ///
506 /// # Errors
507 ///
508 /// Returns an error if:
509 /// - Unable to determine the home/local data directory
510 /// - The resolved path is invalid or inaccessible
511 ///
512 /// # Examples
513 ///
514 /// ```rust,no_run
515 /// use agpm_cli::cache::Cache;
516 ///
517 /// # fn example() -> anyhow::Result<()> {
518 /// let cache = Cache::new()?;
519 /// println!("Using cache at: {}", cache.get_cache_location().display());
520 /// # Ok(())
521 /// # }
522 /// ```
523 pub fn new() -> Result<Self> {
524 let cache_dir = crate::config::get_cache_dir()?;
525 let registry_path = Self::registry_path_for(&cache_dir);
526 let registry = WorktreeRegistry::load(®istry_path);
527 Ok(Self {
528 cache_dir,
529 worktree_cache: Arc::new(RwLock::new(HashMap::new())),
530 fetch_locks: Arc::new(DashMap::new()),
531 fetched_repos: Arc::new(RwLock::new(HashSet::new())),
532 worktree_registry: Arc::new(Mutex::new(registry)),
533 })
534 }
535
536 /// Creates a new `Cache` instance using a custom cache directory.
537 ///
538 /// This constructor allows you to specify exactly where the cache should be
539 /// stored, overriding platform defaults. The directory will be created if
540 /// it doesn't exist when cache operations are performed.
541 ///
542 /// # Use Cases
543 ///
544 /// - **Testing**: Use temporary directories for isolated test environments
545 /// - **Development**: Use project-local cache directories
546 /// - **Deployment**: Use specific paths in containerized environments
547 /// - **Multi-user systems**: Use user-specific cache locations
548 ///
549 /// # Parameters
550 ///
551 /// * `cache_dir` - The absolute path where cache data should be stored
552 ///
553 /// # Examples
554 ///
555 /// ```rust,no_run
556 /// use agpm_cli::cache::Cache;
557 /// use std::path::PathBuf;
558 ///
559 /// # fn example() -> anyhow::Result<()> {
560 /// // Use a project-local cache
561 /// let project_cache = Cache::with_dir(PathBuf::from("./cache"))?;
562 ///
563 /// // Use a system-wide cache
564 /// let system_cache = Cache::with_dir(PathBuf::from("/var/cache/agpm"))?;
565 ///
566 /// // Use a temporary cache for testing
567 /// let temp_cache = Cache::with_dir(std::env::temp_dir().join("agpm-test"))?;
568 /// # Ok(())
569 /// # }
570 /// ```
571 pub fn with_dir(cache_dir: PathBuf) -> Result<Self> {
572 let registry_path = Self::registry_path_for(&cache_dir);
573 let registry = WorktreeRegistry::load(®istry_path);
574 Ok(Self {
575 cache_dir,
576 worktree_cache: Arc::new(RwLock::new(HashMap::new())),
577 fetch_locks: Arc::new(DashMap::new()),
578 fetched_repos: Arc::new(RwLock::new(HashSet::new())),
579 worktree_registry: Arc::new(Mutex::new(registry)),
580 })
581 }
582
583 /// Ensures the cache directory exists, creating it if necessary.
584 ///
585 /// This method creates the cache directory and all necessary parent directories
586 /// if they don't already exist. It's safe to call multiple times - it will
587 /// not error if the directory already exists.
588 ///
589 /// # Platform Considerations
590 ///
591 /// - **Windows**: Handles long path names (>260 characters) correctly
592 /// - **Unix**: Respects umask settings for directory permissions
593 /// - **All platforms**: Creates intermediate directories as needed
594 ///
595 /// # Errors
596 ///
597 /// Returns an error if:
598 /// - Insufficient permissions to create the directory
599 /// - Disk space is exhausted
600 /// - Path contains invalid characters for the platform
601 /// - A file exists at the target path (not a directory)
602 ///
603 /// # Examples
604 ///
605 /// ```rust,no_run
606 /// use agpm_cli::cache::Cache;
607 ///
608 /// # async fn example() -> anyhow::Result<()> {
609 /// let cache = Cache::new()?;
610 ///
611 /// // Ensure cache directory exists before operations
612 /// cache.ensure_cache_dir().await?;
613 ///
614 /// // Safe to call multiple times
615 /// cache.ensure_cache_dir().await?; // No error
616 /// # Ok(())
617 /// # }
618 /// ```
619 pub async fn ensure_cache_dir(&self) -> Result<()> {
620 if !self.cache_dir.exists() {
621 async_fs::create_dir_all(&self.cache_dir).await.with_context(|| {
622 format!("Failed to create cache directory at {}", self.cache_dir.display())
623 })?;
624 }
625 Ok(())
626 }
627
628 /// Returns the path to the cache directory.
629 ///
630 /// This is useful for operations that need direct access to the cache directory,
631 /// such as lock file cleanup or cache size calculations.
632 ///
633 /// # Example
634 ///
635 /// ```rust,no_run
636 /// use agpm_cli::cache::Cache;
637 ///
638 /// # fn example() -> anyhow::Result<()> {
639 /// let cache = Cache::new()?;
640 /// let cache_dir = cache.cache_dir();
641 /// println!("Cache directory: {}", cache_dir.display());
642 /// # Ok(())
643 /// # }
644 /// ```
645 pub fn cache_dir(&self) -> &Path {
646 &self.cache_dir
647 }
648
649 /// Gets or clones a source repository, ensuring it's available in the cache.
650 ///
651 /// This is the primary method for source repository management. It handles both
652 /// initial cloning of new repositories and updating existing cached repositories.
653 /// The operation is atomic and thread-safe through file-based locking.
654 ///
655 /// # Operation Flow
656 ///
657 /// 1. **Lock acquisition**: Acquires exclusive lock for the source name
658 /// 2. **Directory check**: Determines if repository already exists in cache
659 /// 3. **Clone or update**: Either clones new repository or fetches updates
660 /// 4. **Version checkout**: Switches to requested version if specified
661 /// 5. **Path return**: Returns path to cached repository
662 ///
663 /// # Concurrency Behavior
664 ///
665 /// - **Same source**: Concurrent calls with the same `name` will block
666 /// - **Different sources**: Concurrent calls with different `name` run in parallel
667 /// - **Process safety**: Safe across multiple AGPM processes
668 ///
669 /// # Version Handling
670 ///
671 /// The `version` parameter accepts various Git reference types:
672 /// - **Tags**: `"v1.0.0"`, `"release-2023"` (most common for releases)
673 /// - **Branches**: `"main"`, `"develop"`, `"feature/new-agents"`
674 /// - **Commits**: `"abc123def"` (full or short SHA hashes)
675 /// - **None**: Uses repository's default branch (typically `main` or `master`)
676 ///
677 /// # Parameters
678 ///
679 /// * `name` - Unique source identifier (used for cache directory and locking)
680 /// * `url` - Git repository URL (HTTPS, SSH, or local paths)
681 /// * `version` - Optional version constraint (tag, branch, or commit)
682 ///
683 /// # Returns
684 ///
685 /// Returns the [`PathBuf`] to the cached repository directory, which contains
686 /// the full Git repository structure and can be used for resource file access.
687 ///
688 /// # Errors
689 ///
690 /// Returns an error if:
691 /// - **Network issues**: Unable to clone or fetch from remote repository
692 /// - **Authentication**: Invalid credentials for private repositories
693 /// - **Version issues**: Specified version doesn't exist in repository
694 /// - **Lock timeout**: Unable to acquire exclusive lock (rare)
695 /// - **File system**: Permission or disk space issues
696 /// - **Git errors**: Repository corruption or invalid Git operations
697 ///
698 /// # Performance Notes
699 ///
700 /// - **First call**: Performs full repository clone (slower)
701 /// - **Subsequent calls**: Only fetches updates (faster)
702 /// - **Version switching**: Uses Git checkout (very fast)
703 /// - **Parallel sources**: Multiple sources processed concurrently
704 ///
705 /// # Examples
706 ///
707 /// Clone a public repository with specific version:
708 ///
709 /// ```rust,no_run
710 /// use agpm_cli::cache::Cache;
711 ///
712 /// # async fn example() -> anyhow::Result<()> {
713 /// let cache = Cache::new()?;
714 ///
715 /// let repo_path = cache.get_or_clone_source(
716 /// "community",
717 /// "https://github.com/example/agpm-community.git",
718 /// Some("v1.2.0")
719 /// ).await?;
720 ///
721 /// println!("Repository cached at: {}", repo_path.display());
722 /// # Ok(())
723 /// # }
724 /// ```
725 ///
726 /// Use latest version from default branch:
727 ///
728 /// ```rust,no_run
729 /// use agpm_cli::cache::Cache;
730 ///
731 /// # async fn example() -> anyhow::Result<()> {
732 /// let cache = Cache::new()?;
733 ///
734 /// let repo_path = cache.get_or_clone_source(
735 /// "dev-tools",
736 /// "https://github.com/myorg/dev-tools.git",
737 /// None // Use default branch
738 /// ).await?;
739 /// # Ok(())
740 /// # }
741 /// ```
742 ///
743 /// Work with development branch:
744 ///
745 /// ```rust,no_run
746 /// use agpm_cli::cache::Cache;
747 ///
748 /// # async fn example() -> anyhow::Result<()> {
749 /// let cache = Cache::new()?;
750 ///
751 /// let repo_path = cache.get_or_clone_source(
752 /// "experimental",
753 /// "https://github.com/myorg/experimental.git",
754 /// Some("develop")
755 /// ).await?;
756 /// # Ok(())
757 /// # }
758 /// ```
759 pub async fn get_or_clone_source(
760 &self,
761 name: &str,
762 url: &str,
763 version: Option<&str>,
764 ) -> Result<PathBuf> {
765 self.get_or_clone_source_impl(name, url, version).await
766 }
767
768 /// Clean up a worktree after use (fast version).
769 ///
770 /// This just removes the worktree directory without calling git.
771 /// Git will clean up its internal references when `git worktree prune` is called.
772 ///
773 /// # Parameters
774 ///
775 /// * `worktree_path` - The path to the worktree to clean up
776 pub async fn cleanup_worktree(&self, worktree_path: &Path) -> Result<()> {
777 // Just remove the directory - don't call git worktree remove
778 // This is much faster and git will clean up its references later
779 if worktree_path.exists() {
780 tokio::fs::remove_dir_all(worktree_path).await.with_context(|| {
781 format!("Failed to remove worktree directory: {worktree_path:?}")
782 })?;
783 self.remove_worktree_record_by_path(worktree_path).await?;
784 }
785 Ok(())
786 }
787
788 /// Clean up all worktrees in the cache.
789 ///
790 /// This is useful for cleaning up after batch operations or on cache clear.
791 pub async fn cleanup_all_worktrees(&self) -> Result<()> {
792 let worktrees_dir = self.cache_dir.join("worktrees");
793
794 if !worktrees_dir.exists() {
795 return Ok(());
796 }
797
798 // Remove the entire worktrees directory
799 tokio::fs::remove_dir_all(&worktrees_dir)
800 .await
801 .with_context(|| "Failed to clean up worktrees")?;
802
803 // Also prune worktree references from all bare repos
804 let sources_dir = self.cache_dir.join("sources");
805 if sources_dir.exists() {
806 let mut entries = tokio::fs::read_dir(&sources_dir).await?;
807 while let Some(entry) = entries.next_entry().await? {
808 let path = entry.path();
809 if path.extension().and_then(|s| s.to_str()) == Some("git") {
810 let bare_repo = GitRepo::new(&path);
811 bare_repo.prune_worktrees().await.ok();
812 }
813 }
814 }
815
816 {
817 let mut registry = self.worktree_registry.lock().await;
818 if !registry.entries.is_empty() {
819 registry.entries.clear();
820 registry.persist(&self.registry_path()).await?;
821 }
822 }
823
824 Ok(())
825 }
826
827 /// Get or create a worktree for a specific commit SHA.
828 ///
829 /// This method is the cornerstone of AGPM's optimized dependency resolution.
830 /// By using commit SHAs as the primary key for worktrees, we ensure:
831 /// - Maximum worktree reuse (same SHA = same worktree)
832 /// - Deterministic installations (SHA uniquely identifies content)
833 /// - Reduced disk usage (no duplicate worktrees for same commit)
834 ///
835 /// # SHA-Based Caching Strategy
836 ///
837 /// Unlike version-based worktrees that create separate directories for
838 /// "v1.0.0" and "release-1.0" even if they point to the same commit,
839 /// SHA-based worktrees ensure a single worktree per unique commit.
840 ///
841 /// # Parameters
842 ///
843 /// * `name` - Source name from manifest
844 /// * `url` - Git repository URL
845 /// * `sha` - Full 40-character commit SHA (must be pre-resolved)
846 /// * `context` - Optional context for logging
847 ///
848 /// # Returns
849 ///
850 /// Path to the worktree containing the exact commit specified by SHA.
851 ///
852 /// # Example
853 ///
854 /// ```no_run
855 /// # use agpm_cli::cache::Cache;
856 /// # async fn example() -> anyhow::Result<()> {
857 /// let cache = Cache::new()?;
858 ///
859 /// // First resolve version to SHA
860 /// let sha = "abc1234567890def1234567890abcdef12345678";
861 ///
862 /// // Get worktree for that specific commit
863 /// let worktree = cache.get_or_create_worktree_for_sha(
864 /// "community",
865 /// "https://github.com/example/repo.git",
866 /// sha,
867 /// Some("my-agent")
868 /// ).await?;
869 /// # Ok(())
870 /// # }
871 /// ```
872 pub async fn get_or_create_worktree_for_sha(
873 &self,
874 name: &str,
875 url: &str,
876 sha: &str,
877 context: Option<&str>,
878 ) -> Result<PathBuf> {
879 // Validate SHA format
880 if sha.len() != 40 || !sha.chars().all(|c| c.is_ascii_hexdigit()) {
881 return Err(anyhow::anyhow!(
882 "Invalid SHA format: expected 40 hex characters, got '{sha}'"
883 ));
884 }
885
886 // Check if this is a local path
887 let is_local_path = crate::utils::is_local_path(url);
888 if is_local_path {
889 // Local paths don't use worktrees
890 return self.get_or_clone_source(name, url, None).await;
891 }
892
893 self.ensure_cache_dir().await?;
894
895 // Parse URL for cache structure
896 let (owner, repo) =
897 crate::git::parse_git_url(url).unwrap_or(("direct".to_string(), "repo".to_string()));
898
899 // Create SHA-based cache key
900 // Using first 8 chars of SHA for directory name (like Git does)
901 let sha_short = &sha[..8];
902 let cache_dir_hash = {
903 use std::collections::hash_map::DefaultHasher;
904 use std::hash::{Hash, Hasher};
905 let mut hasher = DefaultHasher::new();
906 self.cache_dir.hash(&mut hasher);
907 format!("{:x}", hasher.finish())[..8].to_string()
908 };
909 let cache_key = format!("{cache_dir_hash}:{owner}_{repo}:{sha}");
910
911 // Check if we already have a worktree for this SHA
912 let mut should_create_worktree = false;
913 while !should_create_worktree {
914 {
915 let cache_read = self.worktree_cache.read().await;
916 match cache_read.get(&cache_key) {
917 Some(WorktreeState::Ready(cached_path)) => {
918 if cached_path.exists() {
919 let cached_path = cached_path.clone();
920 drop(cache_read);
921 self.record_worktree_usage(&cache_key, name, sha_short, &cached_path)
922 .await?;
923
924 if let Some(ctx) = context {
925 tracing::debug!(
926 target: "git",
927 "({}) Reusing SHA-based worktree for {} @ {}",
928 ctx,
929 url.split('/').next_back().unwrap_or(url),
930 sha_short
931 );
932 }
933 return Ok(cached_path);
934 }
935 should_create_worktree = true;
936 }
937 Some(WorktreeState::Pending) => {
938 if let Some(ctx) = context {
939 tracing::debug!(
940 target: "git",
941 "({}) Waiting for SHA worktree creation for {} @ {}",
942 ctx,
943 url.split('/').next_back().unwrap_or(url),
944 sha_short
945 );
946 }
947 drop(cache_read);
948 tokio::time::sleep(Duration::from_millis(100)).await;
949 }
950 None => {
951 should_create_worktree = true;
952 }
953 }
954 }
955 }
956
957 // Reserve the cache slot
958 let mut reservation_successful = false;
959 while !reservation_successful {
960 let mut cache_write = self.worktree_cache.write().await;
961 match cache_write.get(&cache_key) {
962 Some(WorktreeState::Ready(cached_path)) if cached_path.exists() => {
963 return Ok(cached_path.clone());
964 }
965 Some(WorktreeState::Pending) => {
966 drop(cache_write);
967 tokio::time::sleep(Duration::from_millis(50)).await;
968 }
969 _ => {
970 cache_write.insert(cache_key.clone(), WorktreeState::Pending);
971 reservation_successful = true;
972 }
973 }
974 }
975
976 // Get bare repository (fetches if needed)
977 let bare_repo_dir = self.cache_dir.join("sources").join(format!("{owner}_{repo}.git"));
978
979 if bare_repo_dir.exists() {
980 // Fetch to ensure we have the SHA
981 self.fetch_with_hybrid_lock(&bare_repo_dir, context).await?;
982 } else {
983 let lock_name = format!("{owner}_{repo}");
984 let _lock = CacheLock::acquire(&self.cache_dir, &lock_name).await?;
985
986 if let Some(parent) = bare_repo_dir.parent() {
987 tokio::fs::create_dir_all(parent).await?;
988 }
989
990 if !bare_repo_dir.exists() {
991 if let Some(ctx) = context {
992 tracing::debug!("📦 ({ctx}) Cloning repository {url}...");
993 } else {
994 tracing::debug!("📦 Cloning repository {url} to cache...");
995 }
996
997 GitRepo::clone_bare_with_context(url, &bare_repo_dir, context).await?;
998 Self::configure_connection_pooling(&bare_repo_dir).await.ok();
999 }
1000 }
1001
1002 let bare_repo = GitRepo::new(&bare_repo_dir);
1003
1004 // Create worktree path using SHA
1005 let worktree_path =
1006 self.cache_dir.join("worktrees").join(format!("{owner}_{repo}_{sha_short}"));
1007
1008 // Acquire worktree creation lock
1009 let worktree_lock_name = format!("worktree-{owner}-{repo}-{sha_short}");
1010 let _worktree_lock = CacheLock::acquire(&self.cache_dir, &worktree_lock_name).await?;
1011
1012 // Re-check after lock
1013 if worktree_path.exists() {
1014 let mut cache_write = self.worktree_cache.write().await;
1015 cache_write.insert(cache_key.clone(), WorktreeState::Ready(worktree_path.clone()));
1016 self.record_worktree_usage(&cache_key, name, sha_short, &worktree_path).await?;
1017 return Ok(worktree_path);
1018 }
1019
1020 // Prune stale worktrees if needed
1021 if !worktree_path.exists() {
1022 let _ = bare_repo.prune_worktrees().await;
1023 }
1024
1025 // Create worktree at specific SHA
1026 if let Some(ctx) = context {
1027 tracing::debug!(
1028 target: "git",
1029 "({}) Creating SHA-based worktree: {} @ {}",
1030 ctx,
1031 url.split('/').next_back().unwrap_or(url),
1032 sha_short
1033 );
1034 }
1035
1036 // Lock bare repo for worktree creation
1037 // Hold the lock through cache update to prevent git state corruption
1038 // when multiple worktrees are created concurrently for the same repo
1039 let bare_repo_lock_name = format!("bare-repo-{owner}_{repo}");
1040 let _bare_repo_lock = CacheLock::acquire(&self.cache_dir, &bare_repo_lock_name).await?;
1041
1042 // Create worktree using SHA directly
1043 let worktree_result =
1044 bare_repo.create_worktree_with_context(&worktree_path, Some(sha), context).await;
1045
1046 // Keep lock held until cache is updated to ensure git state is fully settled
1047 match worktree_result {
1048 Ok(_) => {
1049 let mut cache_write = self.worktree_cache.write().await;
1050 cache_write.insert(cache_key.clone(), WorktreeState::Ready(worktree_path.clone()));
1051 self.record_worktree_usage(&cache_key, name, sha_short, &worktree_path).await?;
1052 // Lock automatically dropped here
1053 Ok(worktree_path)
1054 }
1055 Err(e) => {
1056 let mut cache_write = self.worktree_cache.write().await;
1057 cache_write.remove(&cache_key);
1058 // Lock automatically dropped here
1059 Err(e)
1060 }
1061 }
1062 }
1063
1064 /// Get or clone a source repository with options to control cache behavior.
1065 ///
1066 /// This method provides the core functionality for repository access with
1067 /// additional control over cache behavior. Creates bare repositories that
1068 /// can be shared by all operations (resolution, installation, etc).
1069 ///
1070 /// # Parameters
1071 ///
1072 /// * `name` - The name of the source (used for cache directory naming)
1073 /// * `url` - The Git repository URL or local path
1074 /// * `version` - Optional specific version/tag/branch to checkout
1075 /// * `force_refresh` - If true, ignore cached version and clone/fetch fresh
1076 ///
1077 /// # Returns
1078 ///
1079 /// Returns the path to the cached bare repository directory
1080 async fn get_or_clone_source_impl(
1081 &self,
1082 name: &str,
1083 url: &str,
1084 version: Option<&str>,
1085 ) -> Result<PathBuf> {
1086 // Check if this is a local path (not a git repository URL)
1087 let is_local_path = crate::utils::is_local_path(url);
1088
1089 if is_local_path {
1090 // For local paths (directories), validate and return the secure path
1091 // No cloning or version management needed
1092
1093 // Resolve path securely with validation
1094 let resolved_path = crate::utils::platform::resolve_path(url)?;
1095
1096 // Canonicalize to get the real path and prevent symlink attacks
1097 let canonical_path = crate::utils::safe_canonicalize(&resolved_path)
1098 .map_err(|_| anyhow::anyhow!("Local path is not accessible or does not exist"))?;
1099
1100 // Security check: Validate path against blacklist and symlinks
1101 validate_path_security(&canonical_path, true)?;
1102
1103 // For local paths, versions don't apply. Suppress warning for internal sentinel values.
1104 if let Some(ver) = version
1105 && ver != "local"
1106 {
1107 eprintln!("Warning: Version constraints are ignored for local paths");
1108 }
1109
1110 return Ok(canonical_path);
1111 }
1112
1113 self.ensure_cache_dir().await?;
1114
1115 // Acquire lock for this source to prevent concurrent access
1116 let _lock = CacheLock::acquire(&self.cache_dir, name)
1117 .await
1118 .with_context(|| format!("Failed to acquire lock for source: {name}"))?;
1119
1120 // Use the same cache directory structure as worktrees - bare repos with .git suffix
1121 // This ensures we have ONE repository that's shared by all operations
1122 let (owner, repo) =
1123 crate::git::parse_git_url(url).unwrap_or(("direct".to_string(), "repo".to_string()));
1124 let source_dir = self.cache_dir.join("sources").join(format!("{owner}_{repo}.git")); // Always use .git suffix for bare repos
1125
1126 // Ensure parent directory exists
1127 if let Some(parent) = source_dir.parent() {
1128 tokio::fs::create_dir_all(parent)
1129 .await
1130 .with_context(|| format!("Failed to create cache directory: {parent:?}"))?;
1131 }
1132
1133 if source_dir.exists() {
1134 // Use existing cache - fetch to ensure we have latest refs
1135 // Skip fetch for local paths as they don't have remotes
1136 // For Git URLs, always fetch to get the latest refs (especially important for branches)
1137 if crate::utils::is_git_url(url) {
1138 // Check if we've already fetched this repo in this command instance
1139 let already_fetched = {
1140 let fetched = self.fetched_repos.read().await;
1141 fetched.contains(&source_dir)
1142 };
1143
1144 if already_fetched {
1145 tracing::debug!(
1146 target: "agpm::cache",
1147 "Skipping fetch for {} (already fetched in this command)",
1148 name
1149 );
1150 } else {
1151 tracing::debug!(
1152 target: "agpm::cache",
1153 "Fetching updates for {} from {}",
1154 name,
1155 url
1156 );
1157 let repo = crate::git::GitRepo::new(&source_dir);
1158 if let Err(e) = repo.fetch(None).await {
1159 tracing::warn!(
1160 target: "agpm::cache",
1161 "Failed to fetch updates for {}: {}",
1162 name,
1163 e
1164 );
1165 } else {
1166 // Mark this repo as fetched for this command execution
1167 let mut fetched = self.fetched_repos.write().await;
1168 fetched.insert(source_dir.clone());
1169 tracing::debug!(
1170 target: "agpm::cache",
1171 "Successfully fetched updates for {}",
1172 name
1173 );
1174 }
1175 }
1176 } else {
1177 tracing::debug!(
1178 target: "agpm::cache",
1179 "Skipping fetch for local path: {}",
1180 url
1181 );
1182 }
1183 } else {
1184 // Directory doesn't exist - clone fresh as bare repo
1185 self.clone_source(url, &source_dir).await?;
1186 }
1187
1188 Ok(source_dir)
1189 }
1190
1191 /// Clones a Git repository to the specified target directory as a bare repository.
1192 ///
1193 /// This internal method performs the initial clone operation for repositories
1194 /// that are not yet present in the cache. It creates a bare repository which
1195 /// is optimal for serving and allows multiple worktrees to be created from it.
1196 ///
1197 /// # Why Bare Repositories
1198 ///
1199 /// Bare repositories are used because:
1200 /// - **No working directory conflicts**: Multiple worktrees can be created safely
1201 /// - **Optimized for serving**: Like GitHub/GitLab, designed for fetch operations
1202 /// - **Space efficient**: No checkout of files in the main repository
1203 /// - **Thread-safe**: Multiple processes can fetch from it simultaneously
1204 ///
1205 /// # Authentication
1206 ///
1207 /// Repository authentication is handled through:
1208 /// - **SSH keys**: For `git@github.com:` URLs (user's SSH configuration)
1209 /// - **HTTPS tokens**: For private repositories (from global config)
1210 /// - **Public repos**: No authentication required
1211 ///
1212 /// # Parameters
1213 ///
1214 /// * `url` - Git repository URL to clone from
1215 /// * `target` - Local directory path where bare repository should be created
1216 ///
1217 /// # Errors
1218 ///
1219 /// Returns an error if:
1220 /// - Repository URL is invalid or unreachable
1221 /// - Authentication fails for private repositories
1222 /// - Target directory cannot be created or written to
1223 /// - Network connectivity issues
1224 /// - Git command is not available in PATH
1225 async fn clone_source(&self, url: &str, target: &Path) -> Result<()> {
1226 tracing::debug!("📦 Cloning {} to cache...", url);
1227
1228 // Clone as a bare repository for better concurrency and worktree support
1229 GitRepo::clone_bare(url, target)
1230 .await
1231 .with_context(|| format!("Failed to clone repository from {url}"))?;
1232
1233 // Debug: List what was cloned
1234 if cfg!(test)
1235 && let Ok(entries) = std::fs::read_dir(target)
1236 {
1237 tracing::debug!(
1238 target: "agpm::cache",
1239 "Cloned bare repo to {}, contents:",
1240 target.display()
1241 );
1242 for entry in entries.flatten() {
1243 tracing::debug!(
1244 target: "agpm::cache",
1245 " - {}",
1246 entry.path().display()
1247 );
1248 }
1249 }
1250
1251 Ok(())
1252 }
1253
1254 /// Copies a resource file from cached repository to project directory.
1255 ///
1256 /// This method performs the core resource installation operation by copying
1257 /// files from the cached Git repository to the project's local directory.
1258 /// It provides a simple interface for resource installation without output.
1259 ///
1260 /// # Copy Strategy
1261 ///
1262 /// The method uses a copy-based approach rather than symlinks for:
1263 /// - **Cross-platform compatibility**: Works identically on all platforms
1264 /// - **Git integration**: Real files can be tracked and committed
1265 /// - **Editor support**: No symlink confusion in IDEs and editors
1266 /// - **User flexibility**: Local files can be modified if needed
1267 ///
1268 /// # Path Resolution
1269 ///
1270 /// - **Source path**: Relative to the repository root directory
1271 /// - **Target path**: Absolute path where file should be installed
1272 /// - **Directory creation**: Parent directories created automatically
1273 /// - **Path normalization**: Handles platform-specific path separators
1274 ///
1275 /// # Parameters
1276 ///
1277 /// * `source_dir` - Path to the cached repository directory
1278 /// * `source_path` - Relative path to the resource file within the repository
1279 /// * `target_path` - Absolute path where the resource should be installed
1280 ///
1281 /// # Errors
1282 ///
1283 /// Returns an error if:
1284 /// - Source file doesn't exist in the repository
1285 /// - Target directory cannot be created (permissions)
1286 /// - File copy operation fails (disk space, permissions)
1287 /// - Source path attempts directory traversal (security)
1288 ///
1289 /// # Examples
1290 ///
1291 /// Copy a single resource file:
1292 ///
1293 /// ```rust,no_run
1294 /// use agpm_cli::cache::Cache;
1295 /// use std::path::PathBuf;
1296 ///
1297 /// # async fn example() -> anyhow::Result<()> {
1298 /// let cache = Cache::new()?;
1299 ///
1300 /// // Get cached repository
1301 /// let repo_path = cache.get_or_clone_source(
1302 /// "community",
1303 /// "https://github.com/example/repo.git",
1304 /// Some("v1.0.0")
1305 /// ).await?;
1306 ///
1307 /// // Copy resource to project
1308 /// cache.copy_resource(
1309 /// &repo_path,
1310 /// "agents/helper.md", // Source: agents/helper.md in repository
1311 /// &PathBuf::from("./my-agents/helper.md") // Target: project location
1312 /// ).await?;
1313 /// # Ok(())
1314 /// # }
1315 /// ```
1316 ///
1317 /// Copy nested resource:
1318 ///
1319 /// ```rust,no_run
1320 /// use agpm_cli::cache::Cache;
1321 /// use std::path::PathBuf;
1322 ///
1323 /// # async fn example() -> anyhow::Result<()> {
1324 /// let cache = Cache::new()?;
1325 /// let repo_path = PathBuf::from("/cache/community");
1326 ///
1327 /// cache.copy_resource(
1328 /// &repo_path,
1329 /// "tools/generators/api-client.md", // Nested source path
1330 /// &PathBuf::from("./tools/api-client.md") // Flattened target
1331 /// ).await?;
1332 /// # Ok(())
1333 /// # }
1334 /// ```
1335 pub async fn copy_resource(
1336 &self,
1337 source_dir: &Path,
1338 source_path: &str,
1339 target_path: &Path,
1340 ) -> Result<()> {
1341 self.copy_resource_with_output(source_dir, source_path, target_path, false).await
1342 }
1343
1344 /// Copies a resource file with optional installation output messages.
1345 ///
1346 /// This is the full-featured resource copying method that provides control
1347 /// over whether installation progress is displayed to the user. It handles
1348 /// all the details of safe file copying including directory creation,
1349 /// error handling, and atomic operations.
1350 ///
1351 /// # Operation Details
1352 ///
1353 /// 1. **Source validation**: Verifies the source file exists in repository
1354 /// 2. **Directory creation**: Creates target parent directories if needed
1355 /// 3. **Atomic copy**: Performs file copy operation safely
1356 /// 4. **Progress output**: Optionally displays installation confirmation
1357 ///
1358 /// # File Safety
1359 ///
1360 /// - **Overwrite protection**: Will overwrite existing files without warning
1361 /// - **Atomic operations**: Uses system copy operations for atomicity
1362 /// - **Permission preservation**: Maintains reasonable file permissions
1363 /// - **Path validation**: Prevents directory traversal attacks
1364 ///
1365 /// # Output Control
1366 ///
1367 /// When `show_output` is `true`, displays user-friendly installation messages:
1368 /// ```text
1369 /// ✅ Installed ./agents/helper.md
1370 /// ✅ Installed ./snippets/docker-compose.md
1371 /// ```
1372 ///
1373 /// # Parameters
1374 ///
1375 /// * `source_dir` - Path to the cached repository directory
1376 /// * `source_path` - Relative path to resource file within repository
1377 /// * `target_path` - Absolute path where resource should be installed
1378 /// * `show_output` - Whether to display installation progress messages
1379 ///
1380 /// # Errors
1381 ///
1382 /// Returns specific error types for different failure modes:
1383 /// - [`AgpmError::ResourceFileNotFound`]: Source file doesn't exist
1384 /// - File system errors: Permission, disk space, invalid paths
1385 /// - Directory creation errors: Parent directory creation failures
1386 ///
1387 /// # Examples
1388 ///
1389 /// Silent installation (for batch operations):
1390 ///
1391 /// ```rust,no_run
1392 /// use agpm_cli::cache::Cache;
1393 /// use std::path::PathBuf;
1394 ///
1395 /// # async fn example() -> anyhow::Result<()> {
1396 /// let cache = Cache::new()?;
1397 /// let repo_path = PathBuf::from("/cache/community");
1398 ///
1399 /// cache.copy_resource_with_output(
1400 /// &repo_path,
1401 /// "agents/helper.md",
1402 /// &PathBuf::from("./agents/helper.md"),
1403 /// false // No output
1404 /// ).await?;
1405 /// # Ok(())
1406 /// # }
1407 /// ```
1408 ///
1409 /// Interactive installation (with progress):
1410 ///
1411 /// ```rust,no_run
1412 /// use agpm_cli::cache::Cache;
1413 /// use std::path::PathBuf;
1414 ///
1415 /// # async fn example() -> anyhow::Result<()> {
1416 /// let cache = Cache::new()?;
1417 /// let repo_path = PathBuf::from("/cache/community");
1418 ///
1419 /// cache.copy_resource_with_output(
1420 /// &repo_path,
1421 /// "snippets/deployment.md",
1422 /// &PathBuf::from("./snippets/deployment.md"),
1423 /// true // Show "✅ Installed" message
1424 /// ).await?;
1425 /// # Ok(())
1426 /// # }
1427 /// ```
1428 pub async fn copy_resource_with_output(
1429 &self,
1430 source_dir: &Path,
1431 source_path: &str,
1432 target_path: &Path,
1433 show_output: bool,
1434 ) -> Result<()> {
1435 let source_file = source_dir.join(source_path);
1436
1437 if !source_file.exists() {
1438 return Err(AgpmError::ResourceFileNotFound {
1439 path: source_path.to_string(),
1440 source_name: source_dir
1441 .file_name()
1442 .and_then(|n| n.to_str())
1443 .unwrap_or("unknown")
1444 .to_string(),
1445 }
1446 .into());
1447 }
1448
1449 if let Some(parent) = target_path.parent() {
1450 async_fs::create_dir_all(parent)
1451 .await
1452 .with_context(|| format!("Failed to create directory: {}", parent.display()))?;
1453 }
1454
1455 async_fs::copy(&source_file, target_path).await.with_context(|| {
1456 format!("Failed to copy {} to {}", source_file.display(), target_path.display())
1457 })?;
1458
1459 if show_output {
1460 println!(" ✅ Installed {}", target_path.display());
1461 }
1462
1463 Ok(())
1464 }
1465
1466 /// Removes unused cached repositories to reclaim disk space.
1467 ///
1468 /// This method performs selective cache cleanup by removing repositories
1469 /// that are no longer referenced by any active source configurations.
1470 /// It's a safe operation that preserves repositories currently in use.
1471 ///
1472 /// # Cleanup Strategy
1473 ///
1474 /// 1. **Directory scanning**: Enumerates all cached repository directories
1475 /// 2. **Active comparison**: Checks each directory against active sources list
1476 /// 3. **Safe removal**: Removes only unused directories, preserving files
1477 /// 4. **Progress reporting**: Displays removal progress for user feedback
1478 ///
1479 /// # Safety Guarantees
1480 ///
1481 /// - **Active protection**: Never removes repositories listed in active sources
1482 /// - **Directory-only**: Only removes directories, preserves any loose files
1483 /// - **Atomic removal**: Each directory is removed completely or not at all
1484 /// - **Lock awareness**: Respects file locks but doesn't acquire them
1485 ///
1486 /// # Performance Considerations
1487 ///
1488 /// - **I/O intensive**: Scans entire cache directory structure
1489 /// - **Disk space recovery**: Can free significant space for large repositories
1490 /// - **Network savings**: Removed repositories will need re-cloning if used again
1491 /// - **Concurrent safe**: Can run while other cache operations are in progress
1492 ///
1493 /// # Parameters
1494 ///
1495 /// * `active_sources` - List of source names that should be preserved in cache
1496 ///
1497 /// # Returns
1498 ///
1499 /// Returns the number of repository directories that were successfully removed.
1500 ///
1501 /// # Errors
1502 ///
1503 /// Returns an error if:
1504 /// - Cache directory cannot be read (permissions)
1505 /// - Unable to remove a directory (file locks, permissions)
1506 /// - File system errors during directory traversal
1507 ///
1508 /// # Output Messages
1509 ///
1510 /// Displays progress messages for each removed repository:
1511 /// ```text
1512 /// 🗑️ Removing unused cache: old-project
1513 /// 🗑️ Removing unused cache: deprecated-tools
1514 /// ```
1515 ///
1516 /// # Examples
1517 ///
1518 /// Clean cache based on current manifest sources:
1519 ///
1520 /// ```rust,no_run
1521 /// use agpm_cli::cache::Cache;
1522 ///
1523 /// # async fn example() -> anyhow::Result<()> {
1524 /// let cache = Cache::new()?;
1525 ///
1526 /// // Active sources from current agpm.toml
1527 /// let active_sources = vec![
1528 /// "community".to_string(),
1529 /// "work-tools".to_string(),
1530 /// "personal".to_string(),
1531 /// ];
1532 ///
1533 /// let removed = cache.clean_unused(&active_sources).await?;
1534 /// println!("Cleaned {} unused repositories", removed);
1535 /// # Ok(())
1536 /// # }
1537 /// ```
1538 ///
1539 /// Clean all cached repositories:
1540 ///
1541 /// ```rust,no_run
1542 /// use agpm_cli::cache::Cache;
1543 ///
1544 /// # async fn example() -> anyhow::Result<()> {
1545 /// let cache = Cache::new()?;
1546 ///
1547 /// // Empty active list removes everything
1548 /// let removed = cache.clean_unused(&[]).await?;
1549 /// println!("Removed all {} cached repositories", removed);
1550 /// # Ok(())
1551 /// # }
1552 /// ```
1553 pub async fn clean_unused(&self, active_sources: &[String]) -> Result<usize> {
1554 self.ensure_cache_dir().await?;
1555
1556 let mut removed_count = 0;
1557 let mut entries = async_fs::read_dir(&self.cache_dir)
1558 .await
1559 .with_context(|| "Failed to read cache directory")?;
1560
1561 while let Some(entry) =
1562 entries.next_entry().await.with_context(|| "Failed to read directory entry")?
1563 {
1564 let path = entry.path();
1565 if path.is_dir() {
1566 let dir_name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");
1567
1568 if !active_sources.contains(&dir_name.to_string()) {
1569 println!("🗑️ Removing unused cache: {dir_name}");
1570 async_fs::remove_dir_all(&path).await.with_context(|| {
1571 format!("Failed to remove cache directory: {}", path.display())
1572 })?;
1573 removed_count += 1;
1574 }
1575 }
1576 }
1577
1578 Ok(removed_count)
1579 }
1580
1581 /// Calculates the total size of the cache directory in bytes.
1582 ///
1583 /// This method recursively calculates the disk space used by all cached
1584 /// repositories and supporting files. It's useful for cache size monitoring,
1585 /// cleanup decisions, and storage management.
1586 ///
1587 /// # Calculation Method
1588 ///
1589 /// - **Recursive traversal**: Includes all subdirectories and files
1590 /// - **Actual file sizes**: Reports real disk usage, not allocated blocks
1591 /// - **All file types**: Includes Git objects, working files, and lock files
1592 /// - **Cross-platform**: Consistent behavior across different file systems
1593 ///
1594 /// # Performance Notes
1595 ///
1596 /// - **I/O intensive**: May be slow for very large caches
1597 /// - **File system dependent**: Performance varies by underlying storage
1598 /// - **Concurrent safe**: Can run during other cache operations
1599 /// - **Memory efficient**: Streams directory traversal without loading all paths
1600 ///
1601 /// # Returns
1602 ///
1603 /// Returns the total size in bytes. For a non-existent cache directory,
1604 /// returns `0` without error.
1605 ///
1606 /// # Errors
1607 ///
1608 /// Returns an error if:
1609 /// - Permission denied reading cache directory or subdirectories
1610 /// - File system errors during directory traversal
1611 /// - Symbolic link cycles (rare, but possible)
1612 ///
1613 /// # Examples
1614 ///
1615 /// Check current cache size:
1616 ///
1617 /// ```rust,no_run
1618 /// use agpm_cli::cache::Cache;
1619 ///
1620 /// # async fn example() -> anyhow::Result<()> {
1621 /// let cache = Cache::new()?;
1622 ///
1623 /// let size_bytes = cache.get_cache_size().await?;
1624 /// let size_mb = size_bytes / 1024 / 1024;
1625 ///
1626 /// println!("Cache size: {} MB ({} bytes)", size_mb, size_bytes);
1627 /// # Ok(())
1628 /// # }
1629 /// ```
1630 ///
1631 /// Display human-readable sizes:
1632 ///
1633 /// ```rust,no_run
1634 /// use agpm_cli::cache::Cache;
1635 ///
1636 /// # async fn example() -> anyhow::Result<()> {
1637 /// let cache = Cache::new()?;
1638 /// let size_bytes = cache.get_cache_size().await?;
1639 ///
1640 /// let (size, unit) = match size_bytes {
1641 /// s if s < 1024 => (s, "B"),
1642 /// s if s < 1024 * 1024 => (s / 1024, "KB"),
1643 /// s if s < 1024 * 1024 * 1024 => (s / 1024 / 1024, "MB"),
1644 /// s => (s / 1024 / 1024 / 1024, "GB"),
1645 /// };
1646 ///
1647 /// println!("Cache size: {}{}", size, unit);
1648 /// # Ok(())
1649 /// # }
1650 /// ```
1651 pub async fn get_cache_size(&self) -> Result<u64> {
1652 if !self.cache_dir.exists() {
1653 return Ok(0);
1654 }
1655
1656 let size = fs::get_directory_size(&self.cache_dir).await?;
1657 Ok(size)
1658 }
1659
1660 /// Returns the path to the cache directory.
1661 ///
1662 /// This method provides access to the cache directory path for inspection,
1663 /// logging, or integration with other tools. The path represents where
1664 /// all cached repositories and supporting files are stored.
1665 ///
1666 /// # Return Value
1667 ///
1668 /// Returns a reference to the [`Path`] representing the cache directory.
1669 /// The path may or may not exist on the file system - use [`ensure_cache_dir`]
1670 /// to create it if needed.
1671 ///
1672 /// # Thread Safety
1673 ///
1674 /// This method is safe to call from multiple threads as it only returns
1675 /// a reference to the immutable path stored in the `Cache` instance.
1676 ///
1677 /// # Examples
1678 ///
1679 /// Display cache location:
1680 ///
1681 /// ```rust,no_run
1682 /// use agpm_cli::cache::Cache;
1683 ///
1684 /// # fn example() -> anyhow::Result<()> {
1685 /// let cache = Cache::new()?;
1686 /// println!("Cache stored at: {}", cache.get_cache_location().display());
1687 /// # Ok(())
1688 /// # }
1689 /// ```
1690 ///
1691 /// Check if cache exists:
1692 ///
1693 /// ```rust,no_run
1694 /// use agpm_cli::cache::Cache;
1695 ///
1696 /// # fn example() -> anyhow::Result<()> {
1697 /// let cache = Cache::new()?;
1698 /// let location = cache.get_cache_location();
1699 ///
1700 /// if location.exists() {
1701 /// println!("Cache directory exists at: {}", location.display());
1702 /// } else {
1703 /// println!("Cache directory not yet created: {}", location.display());
1704 /// }
1705 /// # Ok(())
1706 /// # }
1707 /// ```
1708 ///
1709 /// [`ensure_cache_dir`]: Cache::ensure_cache_dir
1710 #[must_use]
1711 pub fn get_cache_location(&self) -> &Path {
1712 &self.cache_dir
1713 }
1714
1715 /// Completely removes the entire cache directory and all its contents.
1716 ///
1717 /// This is a destructive operation that removes all cached repositories,
1718 /// lock files, and any other cache-related data. Use with caution as
1719 /// this will require re-cloning all repositories on the next operation.
1720 ///
1721 /// # Operation Details
1722 ///
1723 /// - **Complete removal**: Deletes the entire cache directory tree
1724 /// - **Recursive deletion**: Removes all subdirectories and files
1725 /// - **Lock files**: Also removes .locks directory and all lock files
1726 /// - **Atomic operation**: Either succeeds completely or leaves cache intact
1727 ///
1728 /// # Recovery Impact
1729 ///
1730 /// After calling this method:
1731 /// - All repositories must be re-cloned on next use
1732 /// - Network bandwidth will be required for repository downloads
1733 /// - Disk space is immediately reclaimed
1734 /// - Cache directory will be recreated automatically on next operation
1735 ///
1736 /// # Safety Considerations
1737 ///
1738 /// - **No confirmation**: This method doesn't ask for confirmation
1739 /// - **Irreversible**: Cannot undo the deletion operation
1740 /// - **Concurrent operations**: May interfere with running cache operations
1741 /// - **Lock respect**: Doesn't wait for locks, may fail if repositories are in use
1742 ///
1743 /// # Errors
1744 ///
1745 /// Returns an error if:
1746 /// - Permission denied for cache directory or contents
1747 /// - Files are locked by other processes
1748 /// - File system errors during deletion
1749 /// - Cache directory is in use by another process
1750 ///
1751 /// # Output Messages
1752 ///
1753 /// Displays confirmation message on successful completion:
1754 /// ```text
1755 /// 🗑️ Cleared all cache
1756 /// ```
1757 ///
1758 /// # Examples
1759 ///
1760 /// Clear cache for fresh start:
1761 ///
1762 /// ```rust,no_run
1763 /// use agpm_cli::cache::Cache;
1764 ///
1765 /// # async fn example() -> anyhow::Result<()> {
1766 /// let cache = Cache::new()?;
1767 ///
1768 /// // Check size before clearing
1769 /// let size_before = cache.get_cache_size().await?;
1770 /// println!("Cache size before: {} bytes", size_before);
1771 ///
1772 /// // Clear everything
1773 /// cache.clear_all().await?;
1774 ///
1775 /// // Verify cache is empty
1776 /// let size_after = cache.get_cache_size().await?;
1777 /// println!("Cache size after: {} bytes", size_after); // Should be 0
1778 /// # Ok(())
1779 /// # }
1780 /// ```
1781 ///
1782 /// Clear cache with error handling:
1783 ///
1784 /// ```rust,no_run
1785 /// use agpm_cli::cache::Cache;
1786 ///
1787 /// # async fn example() -> anyhow::Result<()> {
1788 /// let cache = Cache::new()?;
1789 ///
1790 /// match cache.clear_all().await {
1791 /// Ok(()) => println!("Cache cleared successfully"),
1792 /// Err(e) => {
1793 /// eprintln!("Failed to clear cache: {}", e);
1794 /// eprintln!("Some files may be in use by other processes");
1795 /// }
1796 /// }
1797 /// # Ok(())
1798 /// # }
1799 /// ```
1800 pub async fn clear_all(&self) -> Result<()> {
1801 if self.cache_dir.exists() {
1802 async_fs::remove_dir_all(&self.cache_dir)
1803 .await
1804 .with_context(|| "Failed to clear cache")?;
1805 println!("🗑️ Cleared all cache");
1806 }
1807 Ok(())
1808 }
1809
1810 /// Perform a fetch operation with hybrid locking (in-process and cross-process).
1811 ///
1812 /// This method implements a two-level locking strategy:
1813 /// 1. In-process locks (Arc<Mutex>) for fast coordination within the same process
1814 /// 2. File-based locks for cross-process coordination
1815 ///
1816 /// The fetch will only happen once per repository per command execution.
1817 ///
1818 /// # Parameters
1819 ///
1820 /// * `bare_repo_path` - Path to the bare repository
1821 /// * `context` - Optional context string for logging
1822 ///
1823 /// # Returns
1824 ///
1825 /// Returns Ok(()) if the fetch was successful or skipped.
1826 async fn fetch_with_hybrid_lock(
1827 &self,
1828 bare_repo_path: &Path,
1829 context: Option<&str>,
1830 ) -> Result<()> {
1831 // Level 1: In-process lock (fast path)
1832 let memory_lock = self
1833 .fetch_locks
1834 .entry(bare_repo_path.to_path_buf())
1835 .or_insert_with(|| Arc::new(Mutex::new(())))
1836 .clone();
1837 let _memory_guard = memory_lock.lock().await;
1838
1839 // Level 2: File-based lock (cross-process)
1840 let safe_name = bare_repo_path
1841 .file_name()
1842 .and_then(|s| s.to_str())
1843 .unwrap_or("unknown")
1844 .replace(['/', '\\', ':'], "_");
1845
1846 let lock_path = self.cache_dir.join(".locks").join(format!("{safe_name}.fetch.lock"));
1847
1848 // Ensure lock directory exists
1849 if let Some(parent) = lock_path.parent() {
1850 tokio::fs::create_dir_all(parent).await?;
1851 }
1852
1853 // Create/open lock file
1854 let lock_file = tokio::fs::OpenOptions::new()
1855 .create(true)
1856 .write(true)
1857 .truncate(false)
1858 .open(&lock_path)
1859 .await?;
1860
1861 // Convert to std::fs::File for fs4
1862 let std_file = lock_file.into_std().await;
1863
1864 // Acquire exclusive lock (blocks until available)
1865 use fs4::fs_std::FileExt;
1866 if let Some(ctx) = context {
1867 tracing::debug!(
1868 target: "agpm::git",
1869 "({}) Acquiring file lock for {}",
1870 ctx,
1871 bare_repo_path.display()
1872 );
1873 }
1874 std_file.lock_exclusive()?;
1875
1876 if let Some(ctx) = context {
1877 tracing::debug!(
1878 target: "agpm::git",
1879 "({}) Acquired file lock for {}",
1880 ctx,
1881 bare_repo_path.display()
1882 );
1883 }
1884
1885 // Now check if we've already fetched this repo in this command execution
1886 // This happens AFTER acquiring the lock to prevent race conditions
1887 let already_fetched = {
1888 let fetched = self.fetched_repos.read().await;
1889 let is_fetched = fetched.contains(bare_repo_path);
1890 if let Some(ctx) = context {
1891 tracing::debug!(
1892 target: "agpm::git",
1893 "({}) Checking if already fetched: {} - Result: {} (total fetched: {}, hashset addr: {:p})",
1894 ctx,
1895 bare_repo_path.display(),
1896 is_fetched,
1897 fetched.len(),
1898 &raw const *fetched
1899 );
1900 }
1901 is_fetched
1902 };
1903
1904 if already_fetched {
1905 if let Some(ctx) = context {
1906 tracing::debug!(
1907 target: "agpm::git",
1908 "({}) Skipping fetch (already fetched in this command): {}",
1909 ctx,
1910 bare_repo_path.display()
1911 );
1912 }
1913 // Release the file lock and return
1914 return Ok(());
1915 }
1916
1917 // Now safe to fetch
1918 let repo = GitRepo::new(bare_repo_path);
1919
1920 if let Some(ctx) = context {
1921 tracing::debug!(
1922 target: "agpm::git",
1923 "({}) Fetching updates for {}",
1924 ctx,
1925 bare_repo_path.display()
1926 );
1927 }
1928
1929 repo.fetch(None).await?;
1930
1931 // Mark this repo as fetched for this command execution
1932 {
1933 let mut fetched = self.fetched_repos.write().await;
1934 fetched.insert(bare_repo_path.to_path_buf());
1935 if let Some(ctx) = context {
1936 tracing::debug!(
1937 target: "agpm::git",
1938 "({}) Marked as fetched: {} (total fetched: {}, hashset addr: {:p})",
1939 ctx,
1940 bare_repo_path.display(),
1941 fetched.len(),
1942 &raw const *fetched
1943 );
1944 }
1945 }
1946
1947 // File lock automatically released when std_file is dropped
1948 Ok(())
1949 }
1950}
1951
1952#[cfg(test)]
1953mod tests {
1954 use super::*;
1955 use tempfile::TempDir;
1956
1957 #[tokio::test]
1958 async fn test_cache_dir_creation() {
1959 let temp_dir = TempDir::new().unwrap();
1960 let cache_dir = temp_dir.path().join("cache");
1961
1962 let cache = Cache::with_dir(cache_dir.clone()).unwrap();
1963 cache.ensure_cache_dir().await.unwrap();
1964
1965 assert!(cache_dir.exists());
1966 }
1967
1968 #[tokio::test]
1969 async fn test_cache_location() {
1970 let temp_dir = TempDir::new().unwrap();
1971 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
1972 let location = cache.get_cache_location();
1973 assert_eq!(location, temp_dir.path());
1974 }
1975
1976 #[tokio::test]
1977 async fn test_cache_size_empty() {
1978 let temp_dir = TempDir::new().unwrap();
1979 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
1980
1981 cache.ensure_cache_dir().await.unwrap();
1982 let size = cache.get_cache_size().await.unwrap();
1983 assert_eq!(size, 0);
1984 }
1985
1986 #[tokio::test]
1987 async fn test_cache_size_with_content() {
1988 let temp_dir = TempDir::new().unwrap();
1989 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
1990
1991 cache.ensure_cache_dir().await.unwrap();
1992
1993 // Create some test content
1994 let test_file = temp_dir.path().join("test.txt");
1995 std::fs::write(&test_file, "test content").unwrap();
1996
1997 let size = cache.get_cache_size().await.unwrap();
1998 assert!(size > 0);
1999 assert_eq!(size, 12); // "test content" is 12 bytes
2000 }
2001
2002 #[tokio::test]
2003 async fn test_clean_unused_empty_cache() {
2004 let temp_dir = TempDir::new().unwrap();
2005 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2006
2007 cache.ensure_cache_dir().await.unwrap();
2008
2009 let removed = cache.clean_unused(&["active".to_string()]).await.unwrap();
2010 assert_eq!(removed, 0);
2011 }
2012
2013 #[tokio::test]
2014 async fn test_clean_unused_removes_correct_dirs() {
2015 let temp_dir = TempDir::new().unwrap();
2016 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2017
2018 cache.ensure_cache_dir().await.unwrap();
2019
2020 // Create some test directories
2021 let active_dir = temp_dir.path().join("active");
2022 let unused_dir = temp_dir.path().join("unused");
2023 let another_unused = temp_dir.path().join("another_unused");
2024
2025 std::fs::create_dir_all(&active_dir).unwrap();
2026 std::fs::create_dir_all(&unused_dir).unwrap();
2027 std::fs::create_dir_all(&another_unused).unwrap();
2028
2029 // Add some content to verify directories are removed completely
2030 std::fs::write(active_dir.join("file.txt"), "keep").unwrap();
2031 std::fs::write(unused_dir.join("file.txt"), "remove").unwrap();
2032 std::fs::write(another_unused.join("file.txt"), "remove").unwrap();
2033
2034 let removed = cache.clean_unused(&["active".to_string()]).await.unwrap();
2035
2036 assert_eq!(removed, 2);
2037 assert!(active_dir.exists());
2038 assert!(!unused_dir.exists());
2039 assert!(!another_unused.exists());
2040 }
2041
2042 #[tokio::test]
2043 async fn test_clear_all_removes_entire_cache() {
2044 let temp_dir = TempDir::new().unwrap();
2045 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2046
2047 cache.ensure_cache_dir().await.unwrap();
2048
2049 // Create some content
2050 let subdir = temp_dir.path().join("subdir");
2051 std::fs::create_dir_all(&subdir).unwrap();
2052 std::fs::write(subdir.join("file.txt"), "content").unwrap();
2053
2054 assert!(temp_dir.path().exists());
2055 assert!(subdir.exists());
2056
2057 cache.clear_all().await.unwrap();
2058
2059 assert!(!temp_dir.path().exists());
2060 }
2061
2062 #[tokio::test]
2063 async fn test_copy_resource() {
2064 let temp_dir = TempDir::new().unwrap();
2065 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2066
2067 // Create source file
2068 let source_dir = temp_dir.path().join("source");
2069 std::fs::create_dir_all(&source_dir).unwrap();
2070 let source_file = source_dir.join("resource.md");
2071 std::fs::write(&source_file, "# Test Resource\nContent").unwrap();
2072
2073 // Copy resource
2074 let dest = temp_dir.path().join("dest.md");
2075 cache.copy_resource(&source_dir, "resource.md", &dest).await.unwrap();
2076
2077 assert!(dest.exists());
2078 let content = std::fs::read_to_string(&dest).unwrap();
2079 assert_eq!(content, "# Test Resource\nContent");
2080 }
2081
2082 #[tokio::test]
2083 async fn test_copy_resource_nested_path() {
2084 let temp_dir = TempDir::new().unwrap();
2085 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2086
2087 // Create source file in nested directory
2088 let source_dir = temp_dir.path().join("source");
2089 let nested_dir = source_dir.join("nested").join("path");
2090 std::fs::create_dir_all(&nested_dir).unwrap();
2091 let source_file = nested_dir.join("resource.md");
2092 std::fs::write(&source_file, "# Nested Resource").unwrap();
2093
2094 // Copy resource using relative path from source_dir
2095 let dest = temp_dir.path().join("dest.md");
2096 cache.copy_resource(&source_dir, "nested/path/resource.md", &dest).await.unwrap();
2097
2098 assert!(dest.exists());
2099 let content = std::fs::read_to_string(&dest).unwrap();
2100 assert_eq!(content, "# Nested Resource");
2101 }
2102
2103 #[tokio::test]
2104 async fn test_copy_resource_invalid_path() {
2105 let temp_dir = TempDir::new().unwrap();
2106 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2107
2108 let source_dir = temp_dir.path().join("source");
2109 std::fs::create_dir_all(&source_dir).unwrap();
2110
2111 // Try to copy non-existent resource
2112 let dest = temp_dir.path().join("dest.md");
2113 let result = cache.copy_resource(&source_dir, "nonexistent.md", &dest).await;
2114
2115 assert!(result.is_err());
2116 assert!(!dest.exists());
2117 }
2118
2119 #[tokio::test]
2120 async fn test_ensure_cache_dir_idempotent() {
2121 let temp_dir = TempDir::new().unwrap();
2122 let cache_dir = temp_dir.path().join("cache");
2123 let cache = Cache::with_dir(cache_dir.clone()).unwrap();
2124
2125 // Call ensure_cache_dir multiple times
2126 cache.ensure_cache_dir().await.unwrap();
2127 assert!(cache_dir.exists());
2128
2129 cache.ensure_cache_dir().await.unwrap();
2130 assert!(cache_dir.exists());
2131
2132 // Add a file and ensure it's preserved
2133 std::fs::write(cache_dir.join("test.txt"), "content").unwrap();
2134
2135 cache.ensure_cache_dir().await.unwrap();
2136 assert!(cache_dir.exists());
2137 assert!(cache_dir.join("test.txt").exists());
2138 }
2139
2140 #[tokio::test]
2141 async fn test_copy_resource_creates_parent_directories() {
2142 let temp_dir = TempDir::new().unwrap();
2143 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2144
2145 // Create source file
2146 let source_dir = temp_dir.path().join("source");
2147 std::fs::create_dir_all(&source_dir).unwrap();
2148 std::fs::write(source_dir.join("file.md"), "content").unwrap();
2149
2150 // Copy to a destination with non-existent parent directories
2151 let dest = temp_dir.path().join("deep").join("nested").join("dest.md");
2152 cache.copy_resource(&source_dir, "file.md", &dest).await.unwrap();
2153
2154 assert!(dest.exists());
2155 assert_eq!(std::fs::read_to_string(&dest).unwrap(), "content");
2156 }
2157
2158 #[tokio::test]
2159 async fn test_copy_resource_with_output_flag() {
2160 let temp_dir = TempDir::new().unwrap();
2161 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2162
2163 // Create source file
2164 let source_dir = temp_dir.path().join("source");
2165 std::fs::create_dir_all(&source_dir).unwrap();
2166 std::fs::write(source_dir.join("file.md"), "content").unwrap();
2167
2168 // Test with output flag false
2169 let dest1 = temp_dir.path().join("dest1.md");
2170 cache.copy_resource_with_output(&source_dir, "file.md", &dest1, false).await.unwrap();
2171 assert!(dest1.exists());
2172
2173 // Test with output flag true
2174 let dest2 = temp_dir.path().join("dest2.md");
2175 cache.copy_resource_with_output(&source_dir, "file.md", &dest2, true).await.unwrap();
2176 assert!(dest2.exists());
2177 }
2178
2179 #[tokio::test]
2180 async fn test_cache_size_nonexistent_dir() {
2181 let temp_dir = TempDir::new().unwrap();
2182 let nonexistent = temp_dir.path().join("nonexistent");
2183 let cache = Cache::with_dir(nonexistent).unwrap();
2184
2185 let size = cache.get_cache_size().await.unwrap();
2186 assert_eq!(size, 0);
2187 }
2188
2189 #[tokio::test]
2190 async fn test_clear_all_nonexistent_cache() {
2191 let temp_dir = TempDir::new().unwrap();
2192 let nonexistent = temp_dir.path().join("nonexistent");
2193 let cache = Cache::with_dir(nonexistent).unwrap();
2194
2195 // Should not error when clearing non-existent cache
2196 cache.clear_all().await.unwrap();
2197 }
2198
2199 #[tokio::test]
2200 async fn test_clean_unused_with_files_and_dirs() {
2201 let temp_dir = TempDir::new().unwrap();
2202 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2203
2204 cache.ensure_cache_dir().await.unwrap();
2205
2206 // Create directories
2207 std::fs::create_dir_all(temp_dir.path().join("keep")).unwrap();
2208 std::fs::create_dir_all(temp_dir.path().join("remove")).unwrap();
2209
2210 // Create a file (not a directory)
2211 std::fs::write(temp_dir.path().join("file.txt"), "content").unwrap();
2212
2213 let removed = cache.clean_unused(&["keep".to_string()]).await.unwrap();
2214
2215 // Should only remove the "remove" directory, not the file
2216 assert_eq!(removed, 1);
2217 assert!(temp_dir.path().join("keep").exists());
2218 assert!(!temp_dir.path().join("remove").exists());
2219 assert!(temp_dir.path().join("file.txt").exists());
2220 }
2221
2222 #[tokio::test]
2223 async fn test_copy_resource_overwrites_existing() {
2224 let temp_dir = TempDir::new().unwrap();
2225 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2226
2227 // Create source file
2228 let source_dir = temp_dir.path().join("source");
2229 std::fs::create_dir_all(&source_dir).unwrap();
2230 std::fs::write(source_dir.join("file.md"), "new content").unwrap();
2231
2232 // Create existing destination file
2233 let dest = temp_dir.path().join("dest.md");
2234 std::fs::write(&dest, "old content").unwrap();
2235
2236 // Copy should overwrite
2237 cache.copy_resource(&source_dir, "file.md", &dest).await.unwrap();
2238
2239 assert_eq!(std::fs::read_to_string(&dest).unwrap(), "new content");
2240 }
2241
2242 #[tokio::test]
2243 async fn test_copy_resource_special_characters() {
2244 let temp_dir = TempDir::new().unwrap();
2245 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2246
2247 // Create source file with special characters
2248 let source_dir = temp_dir.path().join("source");
2249 std::fs::create_dir_all(&source_dir).unwrap();
2250 let special_name = "file with spaces & special-chars.md";
2251 std::fs::write(source_dir.join(special_name), "content").unwrap();
2252
2253 // Copy resource
2254 let dest = temp_dir.path().join("dest.md");
2255 cache.copy_resource(&source_dir, special_name, &dest).await.unwrap();
2256
2257 assert!(dest.exists());
2258 assert_eq!(std::fs::read_to_string(&dest).unwrap(), "content");
2259 }
2260
2261 #[tokio::test]
2262 async fn test_cache_location_consistency() {
2263 let temp_dir = TempDir::new().unwrap();
2264 let cache_dir = temp_dir.path().join("my_cache");
2265 let cache = Cache::with_dir(cache_dir.clone()).unwrap();
2266
2267 // Get location multiple times
2268 let loc1 = cache.get_cache_location();
2269 let loc2 = cache.get_cache_location();
2270
2271 assert_eq!(loc1, loc2);
2272 assert_eq!(loc1, cache_dir.as_path());
2273 }
2274
2275 #[tokio::test]
2276 async fn test_clean_unused_empty_active_list() {
2277 let temp_dir = TempDir::new().unwrap();
2278 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2279
2280 cache.ensure_cache_dir().await.unwrap();
2281
2282 // Create some directories
2283 std::fs::create_dir_all(temp_dir.path().join("source1")).unwrap();
2284 std::fs::create_dir_all(temp_dir.path().join("source2")).unwrap();
2285
2286 // Empty active list should remove all
2287 let removed = cache.clean_unused(&[]).await.unwrap();
2288
2289 assert_eq!(removed, 2);
2290 assert!(!temp_dir.path().join("source1").exists());
2291 assert!(!temp_dir.path().join("source2").exists());
2292 }
2293
2294 #[tokio::test]
2295 async fn test_copy_resource_with_relative_paths() {
2296 let temp_dir = TempDir::new().unwrap();
2297 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2298
2299 // Create source with subdirectories
2300 let source_dir = temp_dir.path().join("source");
2301 let sub_dir = source_dir.join("agents");
2302 std::fs::create_dir_all(&sub_dir).unwrap();
2303 std::fs::write(sub_dir.join("helper.md"), "# Helper Agent").unwrap();
2304
2305 // Copy using relative path
2306 let dest = temp_dir.path().join("my-agent.md");
2307 cache.copy_resource(&source_dir, "agents/helper.md", &dest).await.unwrap();
2308
2309 assert!(dest.exists());
2310 assert_eq!(std::fs::read_to_string(&dest).unwrap(), "# Helper Agent");
2311 }
2312
2313 #[tokio::test]
2314 async fn test_cache_size_with_subdirectories() {
2315 let temp_dir = TempDir::new().unwrap();
2316 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2317
2318 cache.ensure_cache_dir().await.unwrap();
2319
2320 // Create nested structure with files
2321 let sub1 = temp_dir.path().join("sub1");
2322 let sub2 = sub1.join("sub2");
2323 std::fs::create_dir_all(&sub2).unwrap();
2324
2325 std::fs::write(temp_dir.path().join("file1.txt"), "12345").unwrap(); // 5 bytes
2326 std::fs::write(sub1.join("file2.txt"), "1234567890").unwrap(); // 10 bytes
2327 std::fs::write(sub2.join("file3.txt"), "abc").unwrap(); // 3 bytes
2328
2329 let size = cache.get_cache_size().await.unwrap();
2330 assert_eq!(size, 18); // 5 + 10 + 3
2331 }
2332}