agpm_cli/cache/mod.rs
1//! Git repository cache management with worktree-based parallel operations
2//!
3//! This module provides a sophisticated caching system for Git repositories that enables
4//! safe parallel resource installation through Git worktrees. The cache system has been
5//! redesigned for optimal concurrency, simplified architecture, and enhanced performance
6//! in AGPM v0.3.0.
7//!
8//! # Architecture Overview
9//!
10//! The cache system implements a multi-layered architecture:
11//! - [`Cache`] struct: Core repository management and worktree orchestration
12//! - [`CacheLock`]: File-based locking for process-safe concurrent access
13//! - `WorktreeState`: Instance-level caching for worktree lifecycle management
14//! - Bare repositories: Optimized Git storage for efficient worktree creation
15//!
16//! # Platform-Specific Cache Locations
17//!
18//! The cache follows platform conventions for optimal performance:
19//! - **Linux/macOS**: `~/.agpm/cache/` (following XDG standards)
20//! - **Windows**: `%LOCALAPPDATA%\agpm\cache\` (using Windows cache directory)
21//! - **Environment Override**: Set `AGPM_CACHE_DIR` for custom locations
22//!
23//! # Cache Directory Structure
24//!
25//! The cache is organized for optimal parallel access patterns:
26//! ```text
27//! ~/.agpm/cache/
28//! ├── sources/ # Bare repositories optimized for worktrees
29//! │ ├── github_owner_repo.git/ # Bare repo with all Git objects
30//! │ └── gitlab_org_project.git/ # URL-parsed directory naming
31//! ├── worktrees/ # SHA-based worktrees for maximum deduplication
32//! │ ├── github_owner_repo_abc12345/ # First 8 chars of commit SHA
33//! │ ├── github_owner_repo_def67890/ # Each unique commit gets one worktree
34//! │ ├── .state.json # Persistent worktree registry
35//! │ └── github_owner_repo_456789ab/ # Multiple refs to same SHA share worktree
36//! └── .locks/ # Fine-grained locking infrastructure
37//! ├── github_owner_repo.lock # Repository-level locks
38//! └── worktree-owner_repo-v1.lock # Worktree creation locks
39//! ```
40//!
41//! # Enhanced Concurrency Architecture
42//!
43//! The v0.3.2+ cache implements SHA-based worktree optimization with advanced concurrency:
44//! - **SHA-based deduplication**: Worktrees keyed by commit SHA, not version reference
45//! - **Centralized resolution**: `VersionResolver` handles batch SHA resolution upfront
46//! - **Maximum reuse**: Multiple tags/branches pointing to same commit share one worktree
47//! - **Instance-level caching**: `WorktreeState` tracks creation across threads
48//! - **Per-worktree file locking**: Fine-grained locks prevent creation conflicts
49//! - **Direct parallelism control**: `--max-parallel` flag controls concurrency
50//! - **Command-instance fetch caching**: Single fetch per repository per command
51//! - **Atomic state transitions**: Pending → Ready state coordination
52//!
53//! ## Locking Strategy
54//!
55//! ```text
56//! Process A: acquire("source1") ───┐
57//! ├─── BLOCKS: same source
58//! Process B: acquire("source1") ───┘
59//!
60//! Process C: acquire("source2") ───── CONCURRENT: different source
61//! ```
62//!
63//! # Cache Operations
64//!
65//! ## Repository Management
66//! - **Clone**: Initial repository cloning from remote URLs
67//! - **Update**: Fetch latest changes from remote (git fetch)
68//! - **Checkout**: Switch to specific versions (tags, branches, commits)
69//! - **Cleanup**: Remove unused repositories to reclaim disk space
70//!
71//! ## Resource Installation
72//! - **Copy-based**: Files copied from cache to project directories
73//! - **Path resolution**: Handles relative paths within repositories
74//! - **Directory creation**: Automatically creates parent directories
75//! - **Overwrite safety**: Replaces existing files atomically
76//!
77//! # Performance Characteristics
78//!
79//! The cache is optimized for common AGPM workflows:
80//! - **First install**: Clone repository once, reuse for all resources
81//! - **Subsequent installs**: Copy from local cache (fast file operations)
82//! - **Version switching**: Git checkout within cached repository
83//! - **Parallel operations**: Multiple sources can be processed concurrently
84//!
85//! ## Disk Space Management
86//!
87//! - **Size calculation**: Recursive directory size calculation
88//! - **Unused cleanup**: Remove repositories no longer referenced
89//! - **Complete cleanup**: Clear entire cache when needed
90//! - **Selective removal**: Keep active sources, remove only unused ones
91//!
92//! # Error Handling and Recovery
93//!
94//! The cache provides comprehensive error handling:
95//! - **Lock timeouts**: Graceful handling of concurrent access
96//! - **Clone failures**: Network and authentication error reporting
97//! - **Version errors**: Clear messages for invalid tags/branches/commits
98//! - **File system errors**: Detailed context for permission and space issues
99//!
100//! # Security Considerations
101//!
102//! - **Path validation**: Prevents directory traversal attacks
103//! - **Lock file isolation**: Prevents lock file manipulation
104//! - **Safe file operations**: Atomic operations prevent corruption
105//! - **Permission handling**: Respects file system permissions
106//!
107//! # Usage Examples
108//!
109//! ## Basic Cache Operations
110//!
111//! ```rust,no_run
112//! use agpm_cli::cache::Cache;
113//! use std::path::PathBuf;
114//!
115//! # async fn example() -> anyhow::Result<()> {
116//! // Initialize cache with default location
117//! let cache = Cache::new()?;
118//!
119//! // Get or clone a source repository
120//! let repo_path = cache.get_or_clone_source(
121//! "community",
122//! "https://github.com/example/agpm-community.git",
123//! Some("v1.0.0") // Specific version
124//! ).await?;
125//!
126//! // Copy a resource from cache to project
127//! cache.copy_resource(
128//! &repo_path,
129//! "agents/helper.md", // Source path in repository
130//! &PathBuf::from("./agents/helper.md") // Destination in project
131//! ).await?;
132//! # Ok(())
133//! # }
134//! ```
135//!
136//! ## Cache Maintenance
137//!
138//! ```rust,no_run
139//! use agpm_cli::cache::Cache;
140//!
141//! # #[tokio::main]
142//! # async fn main() -> anyhow::Result<()> {
143//! let cache = Cache::new()?;
144//!
145//! // Check cache size
146//! let size_bytes = cache.get_cache_size().await?;
147//! println!("Cache size: {} MB", size_bytes / 1024 / 1024);
148//!
149//! // Clean unused repositories
150//! let active_sources = vec!["community".to_string(), "work".to_string()];
151//! let removed_count = cache.clean_unused(&active_sources).await?;
152//! println!("Removed {} unused repositories", removed_count);
153//!
154//! // Complete cache cleanup
155//! cache.clear_all().await?;
156//! # Ok(())
157//! # }
158//! ```
159//!
160//! ## Custom Cache Location
161//!
162//! ```rust,no_run
163//! use agpm_cli::cache::Cache;
164//! use std::path::PathBuf;
165//!
166//! # fn custom_location() -> anyhow::Result<()> {
167//! // Use custom cache directory (useful for testing or special setups)
168//! let custom_dir = PathBuf::from("/tmp/my-agpm-cache");
169//! let cache = Cache::with_dir(custom_dir)?;
170//!
171//! println!("Using cache at: {}", cache.get_cache_location().display());
172//! # Ok(())
173//! # }
174//! ```
175//!
176//! # Integration with AGPM Workflow
177//!
178//! The cache module integrates seamlessly with AGPM's dependency management:
179//! 1. **Manifest parsing**: Source URLs extracted from `agpm.toml`
180//! 2. **Dependency resolution**: Version constraints resolved to specific commits
181//! 3. **Cache population**: Repositories cloned and checked out as needed
182//! 4. **Resource installation**: Files copied from cache to project directories
183//! 5. **Lockfile generation**: Installed resources tracked in `agpm.lock`
184//!
185//! See [`crate::manifest`] for manifest handling and [`crate::lockfile`] for
186//! lockfile management.
187
188use crate::core::error::AgpmError;
189use crate::git::GitRepo;
190use crate::git::command_builder::GitCommand;
191use crate::utils::fs;
192use crate::utils::security::validate_path_security;
193use anyhow::{Context, Result};
194use dashmap::DashMap;
195use serde::{Deserialize, Serialize};
196use std::collections::{HashMap, HashSet};
197use std::path::{Path, PathBuf};
198use std::sync::Arc;
199use std::time::{Duration, SystemTime, UNIX_EPOCH};
200use tokio::fs as async_fs;
201use tokio::sync::{Mutex, RwLock};
202
203// Concurrency Architecture:
204// - Direct control approach: Command parallelism (--max-parallel) + per-worktree file locking
205// - Instance-level caching: Worktrees and fetch operations cached per Cache instance
206// - Command-level control: --max-parallel flag controls dependency processing parallelism
207// - Fetch caching: Network operations cached for 5 minutes to reduce redundancy
208
209/// State of a worktree in the instance-level cache for concurrent coordination.
210///
211/// This enum implements a sophisticated state machine for worktree lifecycle management
212/// that enables safe concurrent access across multiple threads without race conditions.
213/// The cache uses this state to coordinate between threads that might request the same
214/// worktree simultaneously, eliminating the need for global synchronization bottlenecks.
215///
216/// # State Transitions
217///
218/// - **Initial**: No entry exists in cache (implicit state)
219/// - [`Pending`](WorktreeState::Pending): One thread is creating the worktree
220/// - [`Ready`](WorktreeState::Ready): Worktree exists and is ready for all threads
221///
222/// # Concurrency Coordination Pattern
223///
224/// The worktree creation process follows this coordinated pattern:
225/// 1. **Reservation**: First thread reserves slot by setting state to `Pending`
226/// 2. **Creation**: Reserved thread performs actual worktree creation with file lock
227/// 3. **Notification**: Creator updates state to `Ready(path)` when complete
228/// 4. **Reuse**: Subsequent threads immediately use the ready worktree path
229/// 5. **Validation**: All threads verify worktree still exists before use
230///
231/// # Cache Key Format
232///
233/// Worktrees are uniquely identified by composite keys:
234/// ```text
235/// "{cache_dir_hash}:{owner}_{repo}:{version}"
236/// ```
237///
238/// Components:
239/// - `cache_dir_hash`: First 8 hex chars of cache directory path hash
240/// - `owner_repo`: Parsed from Git URL (e.g., "`github_owner_project`")
241/// - `version`: Git reference (tag, branch, commit, or "HEAD")
242///
243/// This format ensures isolation between:
244/// - Different cache instances (via hash)
245/// - Different repositories (via owner/repo)
246/// - Different versions (via version string)
247///
248/// # Memory Management
249///
250/// The instance-level cache persists for the lifetime of the `Cache` instance,
251/// but worktrees are validated on each access to handle external deletion.
252#[derive(Debug, Clone)]
253enum WorktreeState {
254 /// Another thread is currently creating this worktree.
255 ///
256 /// When threads encounter this state, they should wait briefly and retry
257 /// rather than attempting concurrent worktree creation which would fail.
258 Pending,
259
260 /// Worktree is fully created and ready to use.
261 ///
262 /// The `PathBuf` contains the filesystem path to the working directory.
263 /// This path should be validated before use as the worktree may have been
264 /// externally deleted.
265 Ready(PathBuf),
266}
267
268#[derive(Debug, Clone, Serialize, Deserialize, Default)]
269struct WorktreeRegistry {
270 entries: HashMap<String, WorktreeRecord>,
271}
272
273#[derive(Debug, Clone, Serialize, Deserialize)]
274struct WorktreeRecord {
275 source: String,
276 version: String,
277 path: PathBuf,
278 last_used: u64,
279}
280
281impl WorktreeRegistry {
282 fn load(path: &Path) -> Self {
283 match std::fs::read(path) {
284 Ok(data) => serde_json::from_slice(&data).unwrap_or_default(),
285 Err(err) if err.kind() == std::io::ErrorKind::NotFound => Self::default(),
286 Err(err) => {
287 tracing::warn!("Failed to load worktree registry from {}: {}", path.display(), err);
288 Self::default()
289 }
290 }
291 }
292
293 fn update(&mut self, key: String, source: String, version: String, path: PathBuf) {
294 let timestamp = SystemTime::now()
295 .duration_since(UNIX_EPOCH)
296 .unwrap_or_else(|_| Duration::from_secs(0))
297 .as_secs();
298
299 self.entries.insert(
300 key,
301 WorktreeRecord {
302 source,
303 version,
304 path,
305 last_used: timestamp,
306 },
307 );
308 }
309
310 fn remove_by_path(&mut self, target: &Path) -> bool {
311 if let Some(key) = self.entries.iter().find_map(|(k, record)| {
312 if record.path == target {
313 Some(k.clone())
314 } else {
315 None
316 }
317 }) {
318 self.entries.remove(&key);
319 true
320 } else {
321 false
322 }
323 }
324
325 async fn persist(&self, path: &Path) -> Result<()> {
326 if let Some(parent) = path.parent() {
327 async_fs::create_dir_all(parent).await?;
328 }
329
330 let data = serde_json::to_vec_pretty(self)?;
331 async_fs::write(path, data).await?;
332 Ok(())
333 }
334}
335
336/// File-based locking mechanism for cache operations
337///
338/// This module provides thread-safe and process-safe locking for cache
339/// operations through OS-level file locks, ensuring data consistency
340/// when multiple AGPM processes access the same cache directory.
341pub mod lock;
342pub use lock::CacheLock;
343
344/// Git repository cache for efficient resource management
345///
346/// The `Cache` struct provides the primary interface for managing Git repository
347/// caching in AGPM. It handles repository cloning, updating, version management,
348/// and resource file copying operations.
349///
350/// # Thread Safety
351///
352/// While the `Cache` struct itself is not thread-safe (not `Send + Sync`),
353/// multiple instances can safely operate on the same cache directory through
354/// the file-based locking mechanism provided by [`CacheLock`].
355///
356/// # Platform Compatibility
357///
358/// The cache automatically handles platform-specific differences:
359/// - **Path separators**: Uses [`std::path`] for cross-platform compatibility
360/// - **Cache location**: Follows platform conventions for app data storage
361/// - **File locking**: Uses [`fs4`] crate for cross-platform file locking
362/// - **Directory creation**: Handles permissions and long paths on Windows
363///
364/// # Examples
365///
366/// Create a cache with default platform-specific location:
367///
368/// ```rust,no_run
369/// use agpm_cli::cache::Cache;
370///
371/// # fn example() -> anyhow::Result<()> {
372/// let cache = Cache::new()?;
373/// println!("Cache location: {}", cache.get_cache_location().display());
374/// # Ok(())
375/// # }
376/// ```
377///
378/// Create a cache with custom location (useful for testing):
379///
380/// ```rust,no_run
381/// use agpm_cli::cache::Cache;
382/// use std::path::PathBuf;
383///
384/// # fn example() -> anyhow::Result<()> {
385/// let custom_dir = PathBuf::from("/tmp/test-cache");
386/// let cache = Cache::with_dir(custom_dir)?;
387/// # Ok(())
388/// # }
389/// ```
390pub struct Cache {
391 /// The root directory where all cached repositories are stored
392 dir: PathBuf,
393
394 /// Instance-level cache for worktrees to avoid redundant checkouts.
395 ///
396 /// This cache maps worktree identifiers to their creation state, enabling
397 /// safe concurrent access. Multiple threads can request the same worktree
398 /// without conflicts - the first thread creates it while others wait.
399 ///
400 /// **Key format**: `"{cache_dir_hash}:{owner}_{repo}:{version}"`
401 ///
402 /// The cache directory hash ensures isolation between different Cache instances,
403 /// preventing conflicts when multiple instances operate on different cache roots.
404 worktree_cache: Arc<RwLock<HashMap<String, WorktreeState>>>,
405
406 /// Per-repository async locks that serialize fetch operations across
407 /// concurrent tasks. This prevents redundant `git fetch` runs when
408 /// multiple dependencies target the same repository simultaneously.
409 fetch_locks: Arc<DashMap<PathBuf, Arc<Mutex<()>>>>,
410
411 /// Command-instance fetch cache to track which repositories have been fetched
412 /// during this command execution. This ensures we only fetch once per repository
413 /// per command instance, dramatically reducing network operations for multi-dependency
414 /// installations.
415 ///
416 /// Contains bare repository paths that have been fetched in this command instance.
417 /// Works in conjunction with `VersionResolver` to minimize Git network operations.
418 fetched_repos: Arc<RwLock<HashSet<PathBuf>>>,
419
420 /// Persistent registry of worktrees stored on disk for reuse across
421 /// AGPM runs. Tracks last-used timestamps and paths so we can validate
422 /// and clean up cached worktrees without recreating them unnecessarily.
423 worktree_registry: Arc<Mutex<WorktreeRegistry>>,
424}
425
426impl Clone for Cache {
427 fn clone(&self) -> Self {
428 Self {
429 dir: self.dir.clone(),
430 worktree_cache: Arc::clone(&self.worktree_cache),
431 fetch_locks: Arc::clone(&self.fetch_locks),
432 fetched_repos: Arc::clone(&self.fetched_repos),
433 worktree_registry: Arc::clone(&self.worktree_registry),
434 }
435 }
436}
437
438impl Cache {
439 fn registry_path_for(cache_dir: &Path) -> PathBuf {
440 cache_dir.join("worktrees").join(".state.json")
441 }
442
443 fn registry_path(&self) -> PathBuf {
444 Self::registry_path_for(&self.dir)
445 }
446
447 /// Verify that a worktree directory is fully accessible with actual content.
448 ///
449 /// This function ensures that a newly created worktree is fully accessible
450 /// before it's marked as ready. This prevents race conditions in parallel
451 /// operations where `git worktree add` returns but the filesystem hasn't
452 /// finished writing all files yet.
453 ///
454 /// # Implementation
455 ///
456 /// Uses tokio-retry with exponential backoff to handle filesystem sync delays.
457 ///
458 /// Verification uses `git diff-index --quiet HEAD` which provides a comprehensive
459 /// check that:
460 /// - The worktree directory and .git marker exist
461 /// - The git index is readable
462 /// - ALL files from the commit are present and match HEAD
463 /// - Git recognizes the worktree as valid
464 ///
465 /// This single command provides stronger guarantees than multi-level checks,
466 /// as it verifies complete checkout rather than partial availability.
467 ///
468 /// # Parameters
469 ///
470 /// * `worktree_path` - Path to the worktree directory to verify
471 /// * `sha` - The commit SHA being checked out (for logging)
472 ///
473 /// # Errors
474 ///
475 /// Returns an error if the worktree is not accessible after all retries.
476 async fn verify_worktree_accessible(worktree_path: &Path, sha: &str) -> Result<()> {
477 use tokio_retry::Retry;
478 use tokio_retry::strategy::{ExponentialBackoff, jitter};
479
480 // Retry strategy with jitter for concurrent operations
481 let retry_strategy = ExponentialBackoff::from_millis(50)
482 .max_delay(std::time::Duration::from_secs(2))
483 .take(10)
484 .map(jitter);
485
486 let worktree_path = worktree_path.to_path_buf();
487 let sha_short = &sha[..8];
488
489 tracing::debug!(
490 target: "git::worktree",
491 "Verifying worktree at {} for SHA {}",
492 worktree_path.display(),
493 sha_short
494 );
495
496 Retry::spawn(retry_strategy, || async {
497 // Verify working tree matches HEAD (all files checked out)
498 // This verifies the worktree structure is valid and all files are present.
499 // Cache coherency (making files visible to the parent process) is now
500 // handled at the point of actual file read in installer/mod.rs and resolver/mod.rs
501 // via read_with_cache_retry functions.
502 crate::git::command_builder::GitCommand::new()
503 .args(["diff-index", "--quiet", "HEAD"])
504 .current_dir(&worktree_path)
505 .execute_success()
506 .await
507 .map_err(|_| "Working tree doesn't match HEAD (checkout incomplete)".to_string())?;
508
509 tracing::debug!(
510 target: "git::worktree",
511 "Worktree verification passed for {}",
512 worktree_path.display()
513 );
514
515 Ok::<(), String>(())
516 })
517 .await
518 .map_err(|e| {
519 anyhow::anyhow!(
520 "Worktree not fully initialized after retries: {} @ {} - {}",
521 worktree_path.display(),
522 sha_short,
523 e
524 )
525 })
526 }
527
528 async fn record_worktree_usage(
529 &self,
530 registry_key: &str,
531 source_name: &str,
532 version_key: &str,
533 worktree_path: &Path,
534 ) -> Result<()> {
535 let mut registry = self.worktree_registry.lock().await;
536 registry.update(
537 registry_key.to_string(),
538 source_name.to_string(),
539 version_key.to_string(),
540 worktree_path.to_path_buf(),
541 );
542 registry.persist(&self.registry_path()).await?;
543 Ok(())
544 }
545
546 async fn remove_worktree_record_by_path(&self, worktree_path: &Path) -> Result<()> {
547 let mut registry = self.worktree_registry.lock().await;
548 if registry.remove_by_path(worktree_path) {
549 registry.persist(&self.registry_path()).await?;
550 }
551 Ok(())
552 }
553
554 async fn configure_connection_pooling(path: &Path) -> Result<()> {
555 let commands = [
556 ("http.version", "HTTP/2"),
557 ("http.postBuffer", "524288000"),
558 ("core.compression", "0"),
559 ];
560
561 for (key, value) in commands {
562 GitCommand::new()
563 .args(["config", key, value])
564 .current_dir(path)
565 .execute_success()
566 .await
567 .ok();
568 }
569
570 Ok(())
571 }
572
573 /// Creates a new `Cache` instance using the default platform-specific cache directory.
574 ///
575 /// The cache directory is determined based on the current platform:
576 /// - **Linux/macOS**: `~/.agpm/cache/`
577 /// - **Windows**: `%LOCALAPPDATA%\agpm\cache\`
578 ///
579 /// # Environment Variable Override
580 ///
581 /// The cache location can be overridden by setting the `AGPM_CACHE_DIR`
582 /// environment variable. This is particularly useful for:
583 /// - Testing with isolated cache directories
584 /// - CI/CD environments with specific cache locations
585 /// - Custom deployment scenarios
586 ///
587 /// # Errors
588 ///
589 /// Returns an error if:
590 /// - Unable to determine the home/local data directory
591 /// - The resolved path is invalid or inaccessible
592 ///
593 /// # Examples
594 ///
595 /// ```rust,no_run
596 /// use agpm_cli::cache::Cache;
597 ///
598 /// # fn example() -> anyhow::Result<()> {
599 /// let cache = Cache::new()?;
600 /// println!("Using cache at: {}", cache.get_cache_location().display());
601 /// # Ok(())
602 /// # }
603 /// ```
604 pub fn new() -> Result<Self> {
605 let dir = crate::config::get_cache_dir()?;
606 let registry_path = Self::registry_path_for(&dir);
607 let registry = WorktreeRegistry::load(®istry_path);
608 Ok(Self {
609 dir,
610 worktree_cache: Arc::new(RwLock::new(HashMap::new())),
611 fetch_locks: Arc::new(DashMap::new()),
612 fetched_repos: Arc::new(RwLock::new(HashSet::new())),
613 worktree_registry: Arc::new(Mutex::new(registry)),
614 })
615 }
616
617 /// Creates a new `Cache` instance using a custom cache directory.
618 ///
619 /// This constructor allows you to specify exactly where the cache should be
620 /// stored, overriding platform defaults. The directory will be created if
621 /// it doesn't exist when cache operations are performed.
622 ///
623 /// # Use Cases
624 ///
625 /// - **Testing**: Use temporary directories for isolated test environments
626 /// - **Development**: Use project-local cache directories
627 /// - **Deployment**: Use specific paths in containerized environments
628 /// - **Multi-user systems**: Use user-specific cache locations
629 ///
630 /// # Parameters
631 ///
632 /// * `cache_dir` - The absolute path where cache data should be stored
633 ///
634 /// # Errors
635 ///
636 /// Returns an error if:
637 /// - Unable to load worktree registry from cache directory
638 ///
639 /// # Examples
640 ///
641 /// ```rust,no_run
642 /// use agpm_cli::cache::Cache;
643 /// use std::path::PathBuf;
644 ///
645 /// # fn example() -> anyhow::Result<()> {
646 /// // Use a project-local cache
647 /// let project_cache = Cache::with_dir(PathBuf::from("./cache"))?;
648 ///
649 /// // Use a system-wide cache
650 /// let system_cache = Cache::with_dir(PathBuf::from("/var/cache/agpm"))?;
651 ///
652 /// // Use a temporary cache for testing
653 /// let temp_cache = Cache::with_dir(std::env::temp_dir().join("agpm-test"))?;
654 /// # Ok(())
655 /// # }
656 /// ```
657 pub fn with_dir(dir: PathBuf) -> Result<Self> {
658 let registry_path = Self::registry_path_for(&dir);
659 let registry = WorktreeRegistry::load(®istry_path);
660 Ok(Self {
661 dir,
662 worktree_cache: Arc::new(RwLock::new(HashMap::new())),
663 fetch_locks: Arc::new(DashMap::new()),
664 fetched_repos: Arc::new(RwLock::new(HashSet::new())),
665 worktree_registry: Arc::new(Mutex::new(registry)),
666 })
667 }
668
669 /// Ensures the cache directory exists, creating it if necessary.
670 ///
671 /// This method creates the cache directory and all necessary parent directories
672 /// if they don't already exist. It's safe to call multiple times - it will
673 /// not error if the directory already exists.
674 ///
675 /// # Platform Considerations
676 ///
677 /// - **Windows**: Handles long path names (>260 characters) correctly
678 /// - **Unix**: Respects umask settings for directory permissions
679 /// - **All platforms**: Creates intermediate directories as needed
680 ///
681 /// # Errors
682 ///
683 /// Returns an error if:
684 /// - Insufficient permissions to create the directory
685 /// - Disk space is exhausted
686 /// - Path contains invalid characters for the platform
687 /// - A file exists at the target path (not a directory)
688 ///
689 /// # Examples
690 ///
691 /// ```rust,no_run
692 /// use agpm_cli::cache::Cache;
693 ///
694 /// # async fn example() -> anyhow::Result<()> {
695 /// let cache = Cache::new()?;
696 ///
697 /// // Ensure cache directory exists before operations
698 /// cache.ensure_cache_dir().await?;
699 ///
700 /// // Safe to call multiple times
701 /// cache.ensure_cache_dir().await?; // No error
702 /// # Ok(())
703 /// # }
704 /// ```
705 pub async fn ensure_cache_dir(&self) -> Result<()> {
706 if !self.dir.exists() {
707 async_fs::create_dir_all(&self.dir).await.with_context(|| {
708 format!("Failed to create cache directory at {}", self.dir.display())
709 })?;
710 }
711 Ok(())
712 }
713
714 /// Returns the path to the cache directory.
715 ///
716 /// This is useful for operations that need direct access to the cache directory,
717 /// such as lock file cleanup or cache size calculations.
718 ///
719 /// # Example
720 ///
721 /// ```rust,no_run
722 /// use agpm_cli::cache::Cache;
723 ///
724 /// # fn example() -> anyhow::Result<()> {
725 /// let cache = Cache::new()?;
726 /// let cache_dir = cache.cache_dir();
727 /// println!("Cache directory: {}", cache_dir.display());
728 /// # Ok(())
729 /// # }
730 /// ```
731 #[must_use]
732 pub fn cache_dir(&self) -> &Path {
733 &self.dir
734 }
735
736 /// Get the worktree path for a specific URL and commit SHA.
737 ///
738 /// This method constructs the expected worktree directory path based on the cache's
739 /// naming scheme. It does NOT check if the worktree exists or create it - use
740 /// `get_or_create_worktree_for_sha` for that.
741 ///
742 /// # Arguments
743 ///
744 /// * `url` - Git repository URL
745 /// * `sha` - Full commit SHA (will be shortened to first 8 characters)
746 ///
747 /// # Returns
748 ///
749 /// Path to the worktree directory (may not exist yet)
750 ///
751 /// # Errors
752 ///
753 /// Returns an error if:
754 /// - Invalid Git URL format
755 ///
756 /// # Example
757 ///
758 /// ```rust,no_run
759 /// use agpm_cli::cache::Cache;
760 ///
761 /// # fn example() -> anyhow::Result<()> {
762 /// let cache = Cache::new()?;
763 /// let path = cache.get_worktree_path(
764 /// "https://github.com/owner/repo.git",
765 /// "abc1234567890def"
766 /// )?;
767 /// println!("Worktree path: {}", path.display());
768 /// # Ok(())
769 /// # }
770 /// ```
771 pub fn get_worktree_path(&self, url: &str, sha: &str) -> Result<PathBuf> {
772 let (owner, repo) =
773 crate::git::parse_git_url(url).map_err(|e| anyhow::anyhow!("Invalid Git URL: {e}"))?;
774 let sha_short = &sha[..8.min(sha.len())];
775 Ok(self.dir.join("worktrees").join(format!("{owner}_{repo}_{sha_short}")))
776 }
777
778 /// Gets or clones a source repository, ensuring it's available in the cache.
779 ///
780 /// This is the primary method for source repository management. It handles both
781 /// initial cloning of new repositories and updating existing cached repositories.
782 /// The operation is atomic and thread-safe through file-based locking.
783 ///
784 /// # Operation Flow
785 ///
786 /// 1. **Lock acquisition**: Acquires exclusive lock for the source name
787 /// 2. **Directory check**: Determines if repository already exists in cache
788 /// 3. **Clone or update**: Either clones new repository or fetches updates
789 /// 4. **Version checkout**: Switches to requested version if specified
790 /// 5. **Path return**: Returns path to cached repository
791 ///
792 /// # Concurrency Behavior
793 ///
794 /// - **Same source**: Concurrent calls with the same `name` will block
795 /// - **Different sources**: Concurrent calls with different `name` run in parallel
796 /// - **Process safety**: Safe across multiple AGPM processes
797 ///
798 /// # Version Handling
799 ///
800 /// The `version` parameter accepts various Git reference types:
801 /// - **Tags**: `"v1.0.0"`, `"release-2023"` (most common for releases)
802 /// - **Branches**: `"main"`, `"develop"`, `"feature/new-agents"`
803 /// - **Commits**: `"abc123def"` (full or short SHA hashes)
804 /// - **None**: Uses repository's default branch (typically `main` or `master`)
805 ///
806 /// # Parameters
807 ///
808 /// * `name` - Unique source identifier (used for cache directory and locking)
809 /// * `url` - Git repository URL (HTTPS, SSH, or local paths)
810 /// * `version` - Optional version constraint (tag, branch, or commit)
811 ///
812 /// # Returns
813 ///
814 /// Returns the [`PathBuf`] to the cached repository directory, which contains
815 /// the full Git repository structure and can be used for resource file access.
816 ///
817 /// # Errors
818 ///
819 /// Returns an error if:
820 /// - **Network issues**: Unable to clone or fetch from remote repository
821 /// - **Authentication**: Invalid credentials for private repositories
822 /// - **Version issues**: Specified version doesn't exist in repository
823 /// - **Lock timeout**: Unable to acquire exclusive lock (rare)
824 /// - **File system**: Permission or disk space issues
825 /// - **Git errors**: Repository corruption or invalid Git operations
826 ///
827 /// # Performance Notes
828 ///
829 /// - **First call**: Performs full repository clone (slower)
830 /// - **Subsequent calls**: Only fetches updates (faster)
831 /// - **Version switching**: Uses Git checkout (very fast)
832 /// - **Parallel sources**: Multiple sources processed concurrently
833 ///
834 /// # Examples
835 ///
836 /// Clone a public repository with specific version:
837 ///
838 /// ```rust,no_run
839 /// use agpm_cli::cache::Cache;
840 ///
841 /// # async fn example() -> anyhow::Result<()> {
842 /// let cache = Cache::new()?;
843 ///
844 /// let repo_path = cache.get_or_clone_source(
845 /// "community",
846 /// "https://github.com/example/agpm-community.git",
847 /// Some("v1.2.0")
848 /// ).await?;
849 ///
850 /// println!("Repository cached at: {}", repo_path.display());
851 /// # Ok(())
852 /// # }
853 /// ```
854 ///
855 /// Use latest version from default branch:
856 ///
857 /// ```rust,no_run
858 /// use agpm_cli::cache::Cache;
859 ///
860 /// # async fn example() -> anyhow::Result<()> {
861 /// let cache = Cache::new()?;
862 ///
863 /// let repo_path = cache.get_or_clone_source(
864 /// "dev-tools",
865 /// "https://github.com/myorg/dev-tools.git",
866 /// None // Use default branch
867 /// ).await?;
868 /// # Ok(())
869 /// # }
870 /// ```
871 ///
872 /// Work with development branch:
873 ///
874 /// ```rust,no_run
875 /// use agpm_cli::cache::Cache;
876 ///
877 /// # async fn example() -> anyhow::Result<()> {
878 /// let cache = Cache::new()?;
879 ///
880 /// let repo_path = cache.get_or_clone_source(
881 /// "experimental",
882 /// "https://github.com/myorg/experimental.git",
883 /// Some("develop")
884 /// ).await?;
885 /// # Ok(())
886 /// # }
887 /// ```
888 pub async fn get_or_clone_source(
889 &self,
890 name: &str,
891 url: &str,
892 version: Option<&str>,
893 ) -> Result<PathBuf> {
894 self.get_or_clone_source_impl(name, url, version).await
895 }
896
897 /// Clean up a worktree after use (fast version).
898 ///
899 /// This just removes the worktree directory without calling git.
900 /// Git will clean up its internal references when `git worktree prune` is called.
901 ///
902 /// # Parameters
903 ///
904 /// * `worktree_path` - The path to the worktree to clean up
905 ///
906 /// # Errors
907 ///
908 /// Returns an error if:
909 /// - Unable to remove worktree directory
910 /// - Unable to update worktree registry
911 pub async fn cleanup_worktree(&self, worktree_path: &Path) -> Result<()> {
912 // Just remove the directory - don't call git worktree remove
913 // This is much faster and git will clean up its references later
914 if worktree_path.exists() {
915 tokio::fs::remove_dir_all(worktree_path).await.with_context(|| {
916 format!("Failed to remove worktree directory: {}", worktree_path.display())
917 })?;
918 self.remove_worktree_record_by_path(worktree_path).await?;
919 }
920 Ok(())
921 }
922
923 /// Clean up all worktrees in the cache.
924 ///
925 /// This is useful for cleaning up after batch operations or on cache clear.
926 ///
927 /// # Errors
928 ///
929 /// Returns an error if:
930 /// - Unable to remove worktrees directory
931 /// - Unable to prune worktree references from bare repositories
932 /// - Unable to update worktree registry
933 pub async fn cleanup_all_worktrees(&self) -> Result<()> {
934 let worktrees_dir = self.dir.join("worktrees");
935
936 if !worktrees_dir.exists() {
937 return Ok(());
938 }
939
940 // Remove the entire worktrees directory
941 tokio::fs::remove_dir_all(&worktrees_dir)
942 .await
943 .with_context(|| "Failed to clean up worktrees")?;
944
945 // Also prune worktree references from all bare repos
946 let sources_dir = self.dir.join("sources");
947 if sources_dir.exists() {
948 let mut entries = tokio::fs::read_dir(&sources_dir).await?;
949 while let Some(entry) = entries.next_entry().await? {
950 let path = entry.path();
951 if path.extension().and_then(|s| s.to_str()) == Some("git") {
952 let bare_repo = GitRepo::new(&path);
953 bare_repo.prune_worktrees().await.ok();
954 }
955 }
956 }
957
958 {
959 let mut registry = self.worktree_registry.lock().await;
960 if !registry.entries.is_empty() {
961 registry.entries.clear();
962 registry.persist(&self.registry_path()).await?;
963 }
964 }
965
966 Ok(())
967 }
968
969 /// Get or create a worktree for a specific commit SHA.
970 ///
971 /// This method is the cornerstone of AGPM's optimized dependency resolution.
972 /// By using commit SHAs as the primary key for worktrees, we ensure:
973 /// - Maximum worktree reuse (same SHA = same worktree)
974 /// - Deterministic installations (SHA uniquely identifies content)
975 /// - Reduced disk usage (no duplicate worktrees for same commit)
976 ///
977 /// # SHA-Based Caching Strategy
978 ///
979 /// Unlike version-based worktrees that create separate directories for
980 /// "v1.0.0" and "release-1.0" even if they point to the same commit,
981 /// SHA-based worktrees ensure a single worktree per unique commit.
982 ///
983 /// # Parameters
984 ///
985 /// * `name` - Source name from manifest
986 /// * `url` - Git repository URL
987 /// * `sha` - Full 40-character commit SHA (must be pre-resolved)
988 /// * `context` - Optional context for logging
989 ///
990 /// # Returns
991 ///
992 /// Path to the worktree containing the exact commit specified by SHA.
993 ///
994 /// # Example
995 ///
996 /// ```no_run
997 /// # use agpm_cli::cache::Cache;
998 /// # async fn example() -> anyhow::Result<()> {
999 /// let cache = Cache::new()?;
1000 ///
1001 /// // First resolve version to SHA
1002 /// let sha = "abc1234567890def1234567890abcdef12345678";
1003 ///
1004 /// // Get worktree for that specific commit
1005 /// let worktree = cache.get_or_create_worktree_for_sha(
1006 /// "community",
1007 /// "https://github.com/example/repo.git",
1008 /// sha,
1009 /// Some("my-agent")
1010 /// ).await?;
1011 /// # Ok(())
1012 /// # }
1013 /// ```
1014 #[allow(clippy::too_many_lines)]
1015 pub async fn get_or_create_worktree_for_sha(
1016 &self,
1017 name: &str,
1018 url: &str,
1019 sha: &str,
1020 context: Option<&str>,
1021 ) -> Result<PathBuf> {
1022 // Validate SHA format
1023 if sha.len() != 40 || !sha.chars().all(|c| c.is_ascii_hexdigit()) {
1024 return Err(anyhow::anyhow!(
1025 "Invalid SHA format: expected 40 hex characters, got '{sha}'"
1026 ));
1027 }
1028
1029 // Check if this is a local path
1030 let is_local_path = crate::utils::is_local_path(url);
1031 if is_local_path {
1032 // Local paths don't use worktrees
1033 return self.get_or_clone_source(name, url, None).await;
1034 }
1035
1036 self.ensure_cache_dir().await?;
1037
1038 // Parse URL for cache structure
1039 let (owner, repo) =
1040 crate::git::parse_git_url(url).unwrap_or(("direct".to_string(), "repo".to_string()));
1041
1042 // Create SHA-based cache key
1043 // Using first 8 chars of SHA for directory name (like Git does)
1044 let sha_short = &sha[..8];
1045 let cache_dir_hash = {
1046 use std::collections::hash_map::DefaultHasher;
1047 use std::hash::{Hash, Hasher};
1048 let mut hasher = DefaultHasher::new();
1049 self.dir.hash(&mut hasher);
1050 format!("{:x}", hasher.finish())[..8].to_string()
1051 };
1052 let cache_key = format!("{cache_dir_hash}:{owner}_{repo}:{sha}");
1053
1054 // Check if we already have a worktree for this SHA
1055 let mut should_create_worktree = false;
1056 while !should_create_worktree {
1057 {
1058 let cache_read = self.worktree_cache.read().await;
1059 match cache_read.get(&cache_key) {
1060 Some(WorktreeState::Ready(cached_path)) => {
1061 if cached_path.exists() {
1062 let cached_path = cached_path.clone();
1063 drop(cache_read);
1064 self.record_worktree_usage(&cache_key, name, sha_short, &cached_path)
1065 .await?;
1066
1067 if let Some(ctx) = context {
1068 tracing::debug!(
1069 target: "git",
1070 "({}) Reusing SHA-based worktree for {} @ {}",
1071 ctx,
1072 url.split('/').next_back().unwrap_or(url),
1073 sha_short
1074 );
1075 }
1076 return Ok(cached_path);
1077 }
1078 should_create_worktree = true;
1079 }
1080 Some(WorktreeState::Pending) => {
1081 if let Some(ctx) = context {
1082 tracing::debug!(
1083 target: "git",
1084 "({}) Waiting for SHA worktree creation for {} @ {}",
1085 ctx,
1086 url.split('/').next_back().unwrap_or(url),
1087 sha_short
1088 );
1089 }
1090 drop(cache_read);
1091 tokio::time::sleep(Duration::from_millis(100)).await;
1092 }
1093 None => {
1094 should_create_worktree = true;
1095 }
1096 }
1097 }
1098 }
1099
1100 // Reserve the cache slot
1101 let mut reservation_successful = false;
1102 while !reservation_successful {
1103 let mut cache_write = self.worktree_cache.write().await;
1104 match cache_write.get(&cache_key) {
1105 Some(WorktreeState::Ready(cached_path)) if cached_path.exists() => {
1106 return Ok(cached_path.clone());
1107 }
1108 Some(WorktreeState::Pending) => {
1109 drop(cache_write);
1110 tokio::time::sleep(Duration::from_millis(50)).await;
1111 }
1112 _ => {
1113 cache_write.insert(cache_key.clone(), WorktreeState::Pending);
1114 reservation_successful = true;
1115 }
1116 }
1117 }
1118
1119 // Get bare repository (fetches if needed)
1120 let bare_repo_dir = self.dir.join("sources").join(format!("{owner}_{repo}.git"));
1121
1122 if bare_repo_dir.exists() {
1123 // Fetch to ensure we have the SHA
1124 self.fetch_with_hybrid_lock(&bare_repo_dir, context).await?;
1125 } else {
1126 let lock_name = format!("{owner}_{repo}");
1127 let _lock = CacheLock::acquire(&self.dir, &lock_name).await?;
1128
1129 if let Some(parent) = bare_repo_dir.parent() {
1130 tokio::fs::create_dir_all(parent).await?;
1131 }
1132
1133 if !bare_repo_dir.exists() {
1134 if let Some(ctx) = context {
1135 tracing::debug!("📦 ({ctx}) Cloning repository {url}...");
1136 } else {
1137 tracing::debug!("📦 Cloning repository {url} to cache...");
1138 }
1139
1140 GitRepo::clone_bare_with_context(url, &bare_repo_dir, context).await?;
1141 Self::configure_connection_pooling(&bare_repo_dir).await.ok();
1142 }
1143 }
1144
1145 let bare_repo = GitRepo::new(&bare_repo_dir);
1146
1147 // Create worktree path using SHA
1148 let worktree_path = self.dir.join("worktrees").join(format!("{owner}_{repo}_{sha_short}"));
1149
1150 // Acquire worktree creation lock
1151 let worktree_lock_name = format!("worktree-{owner}-{repo}-{sha_short}");
1152 let _worktree_lock = CacheLock::acquire(&self.dir, &worktree_lock_name).await?;
1153
1154 // Re-check after lock
1155 if worktree_path.exists() {
1156 let mut cache_write = self.worktree_cache.write().await;
1157 cache_write.insert(cache_key.clone(), WorktreeState::Ready(worktree_path.clone()));
1158 self.record_worktree_usage(&cache_key, name, sha_short, &worktree_path).await?;
1159 return Ok(worktree_path);
1160 }
1161
1162 // Prune stale worktrees if needed
1163 if !worktree_path.exists() {
1164 let _ = bare_repo.prune_worktrees().await;
1165 }
1166
1167 // Create worktree at specific SHA
1168 if let Some(ctx) = context {
1169 tracing::debug!(
1170 target: "git",
1171 "({}) Creating SHA-based worktree: {} @ {}",
1172 ctx,
1173 url.split('/').next_back().unwrap_or(url),
1174 sha_short
1175 );
1176 }
1177
1178 // Lock bare repo for worktree creation
1179 // Hold the lock through cache update to prevent git state corruption
1180 // when multiple worktrees are created concurrently for the same repo
1181 let bare_repo_lock_name = format!("bare-repo-{owner}_{repo}");
1182 let _bare_repo_lock = CacheLock::acquire(&self.dir, &bare_repo_lock_name).await?;
1183
1184 // Create worktree using SHA directly
1185 let worktree_result =
1186 bare_repo.create_worktree_with_context(&worktree_path, Some(sha), context).await;
1187
1188 // Keep lock held until cache is updated to ensure git state is fully settled
1189 match worktree_result {
1190 Ok(_) => {
1191 // Verify worktree is fully accessible before marking as Ready
1192 // This prevents race conditions where git worktree add returns
1193 // but filesystem hasn't finished writing all files yet
1194 Self::verify_worktree_accessible(&worktree_path, sha).await?;
1195
1196 let mut cache_write = self.worktree_cache.write().await;
1197 cache_write.insert(cache_key.clone(), WorktreeState::Ready(worktree_path.clone()));
1198 self.record_worktree_usage(&cache_key, name, sha_short, &worktree_path).await?;
1199 // Lock automatically dropped here
1200 Ok(worktree_path)
1201 }
1202 Err(e) => {
1203 let mut cache_write = self.worktree_cache.write().await;
1204 cache_write.remove(&cache_key);
1205 // Lock automatically dropped here
1206 Err(e)
1207 }
1208 }
1209 }
1210
1211 /// Get or clone a source repository with options to control cache behavior.
1212 ///
1213 /// This method provides the core functionality for repository access with
1214 /// additional control over cache behavior. Creates bare repositories that
1215 /// can be shared by all operations (resolution, installation, etc).
1216 ///
1217 /// # Parameters
1218 ///
1219 /// * `name` - The name of the source (used for cache directory naming)
1220 /// * `url` - The Git repository URL or local path
1221 /// * `version` - Optional specific version/tag/branch to checkout
1222 /// * `force_refresh` - If true, ignore cached version and clone/fetch fresh
1223 ///
1224 /// # Returns
1225 ///
1226 /// Returns the path to the cached bare repository directory
1227 async fn get_or_clone_source_impl(
1228 &self,
1229 name: &str,
1230 url: &str,
1231 version: Option<&str>,
1232 ) -> Result<PathBuf> {
1233 // Check if this is a local path (not a git repository URL)
1234 let is_local_path = crate::utils::is_local_path(url);
1235
1236 if is_local_path {
1237 // For local paths (directories), validate and return the secure path
1238 // No cloning or version management needed
1239
1240 // Resolve path securely with validation
1241 let resolved_path = crate::utils::platform::resolve_path(url)?;
1242
1243 // Canonicalize to get the real path and prevent symlink attacks
1244 let canonical_path = crate::utils::safe_canonicalize(&resolved_path)
1245 .map_err(|_| anyhow::anyhow!("Local path is not accessible or does not exist"))?;
1246
1247 // Security check: Validate path against blacklist and symlinks
1248 validate_path_security(&canonical_path, true)?;
1249
1250 // For local paths, versions don't apply. Suppress warning for internal sentinel values.
1251 if let Some(ver) = version
1252 && ver != "local"
1253 {
1254 eprintln!("Warning: Version constraints are ignored for local paths");
1255 }
1256
1257 return Ok(canonical_path);
1258 }
1259
1260 self.ensure_cache_dir().await?;
1261
1262 // Acquire lock for this source to prevent concurrent access
1263 let _lock = CacheLock::acquire(&self.dir, name)
1264 .await
1265 .with_context(|| format!("Failed to acquire lock for source: {name}"))?;
1266
1267 // Use the same cache directory structure as worktrees - bare repos with .git suffix
1268 // This ensures we have ONE repository that's shared by all operations
1269 let (owner, repo) =
1270 crate::git::parse_git_url(url).unwrap_or(("direct".to_string(), "repo".to_string()));
1271 let source_dir = self.dir.join("sources").join(format!("{owner}_{repo}.git")); // Always use .git suffix for bare repos
1272
1273 // Ensure parent directory exists
1274 if let Some(parent) = source_dir.parent() {
1275 tokio::fs::create_dir_all(parent).await.with_context(|| {
1276 format!("Failed to create cache directory: {}", parent.display())
1277 })?;
1278 }
1279
1280 if source_dir.exists() {
1281 // Use existing cache - fetch to ensure we have latest refs
1282 // Skip fetch for local paths as they don't have remotes
1283 // For Git URLs, always fetch to get the latest refs (especially important for branches)
1284 if crate::utils::is_git_url(url) {
1285 // Check if we've already fetched this repo in this command instance
1286 let already_fetched = {
1287 let fetched = self.fetched_repos.read().await;
1288 fetched.contains(&source_dir)
1289 };
1290
1291 if already_fetched {
1292 tracing::debug!(
1293 target: "agpm::cache",
1294 "Skipping fetch for {} (already fetched in this command)",
1295 name
1296 );
1297 } else {
1298 tracing::debug!(
1299 target: "agpm::cache",
1300 "Fetching updates for {} from {}",
1301 name,
1302 url
1303 );
1304 let repo = crate::git::GitRepo::new(&source_dir);
1305 if let Err(e) = repo.fetch(None).await {
1306 tracing::warn!(
1307 target: "agpm::cache",
1308 "Failed to fetch updates for {}: {}",
1309 name,
1310 e
1311 );
1312 } else {
1313 // Mark this repo as fetched for this command execution
1314 let mut fetched = self.fetched_repos.write().await;
1315 fetched.insert(source_dir.clone());
1316 tracing::debug!(
1317 target: "agpm::cache",
1318 "Successfully fetched updates for {}",
1319 name
1320 );
1321 }
1322 }
1323 } else {
1324 tracing::debug!(
1325 target: "agpm::cache",
1326 "Skipping fetch for local path: {}",
1327 url
1328 );
1329 }
1330 } else {
1331 // Directory doesn't exist - clone fresh as bare repo
1332 self.clone_source(url, &source_dir).await?;
1333 }
1334
1335 Ok(source_dir)
1336 }
1337
1338 /// Clones a Git repository to the specified target directory as a bare repository.
1339 ///
1340 /// This internal method performs the initial clone operation for repositories
1341 /// that are not yet present in the cache. It creates a bare repository which
1342 /// is optimal for serving and allows multiple worktrees to be created from it.
1343 ///
1344 /// # Why Bare Repositories
1345 ///
1346 /// Bare repositories are used because:
1347 /// - **No working directory conflicts**: Multiple worktrees can be created safely
1348 /// - **Optimized for serving**: Like GitHub/GitLab, designed for fetch operations
1349 /// - **Space efficient**: No checkout of files in the main repository
1350 /// - **Thread-safe**: Multiple processes can fetch from it simultaneously
1351 ///
1352 /// # Authentication
1353 ///
1354 /// Repository authentication is handled through:
1355 /// - **SSH keys**: For `git@github.com:` URLs (user's SSH configuration)
1356 /// - **HTTPS tokens**: For private repositories (from global config)
1357 /// - **Public repos**: No authentication required
1358 ///
1359 /// # Parameters
1360 ///
1361 /// * `url` - Git repository URL to clone from
1362 /// * `target` - Local directory path where bare repository should be created
1363 ///
1364 /// # Errors
1365 ///
1366 /// Returns an error if:
1367 /// - Repository URL is invalid or unreachable
1368 /// - Authentication fails for private repositories
1369 /// - Target directory cannot be created or written to
1370 /// - Network connectivity issues
1371 /// - Git command is not available in PATH
1372 async fn clone_source(&self, url: &str, target: &Path) -> Result<()> {
1373 tracing::debug!("📦 Cloning {} to cache...", url);
1374
1375 // Clone as a bare repository for better concurrency and worktree support
1376 GitRepo::clone_bare(url, target)
1377 .await
1378 .with_context(|| format!("Failed to clone repository from {url}"))?;
1379
1380 // Debug: List what was cloned
1381 if cfg!(test)
1382 && let Ok(entries) = std::fs::read_dir(target)
1383 {
1384 tracing::debug!(
1385 target: "agpm::cache",
1386 "Cloned bare repo to {}, contents:",
1387 target.display()
1388 );
1389 for entry in entries.flatten() {
1390 tracing::debug!(
1391 target: "agpm::cache",
1392 " - {}",
1393 entry.path().display()
1394 );
1395 }
1396 }
1397
1398 Ok(())
1399 }
1400
1401 /// Copies a resource file from cached repository to project directory.
1402 ///
1403 /// This method performs the core resource installation operation by copying
1404 /// files from the cached Git repository to the project's local directory.
1405 /// It provides a simple interface for resource installation without output.
1406 ///
1407 /// # Copy Strategy
1408 ///
1409 /// The method uses a copy-based approach rather than symlinks for:
1410 /// - **Cross-platform compatibility**: Works identically on all platforms
1411 /// - **Git integration**: Real files can be tracked and committed
1412 /// - **Editor support**: No symlink confusion in IDEs and editors
1413 /// - **User flexibility**: Local files can be modified if needed
1414 ///
1415 /// # Path Resolution
1416 ///
1417 /// - **Source path**: Relative to the repository root directory
1418 /// - **Target path**: Absolute path where file should be installed
1419 /// - **Directory creation**: Parent directories created automatically
1420 /// - **Path normalization**: Handles platform-specific path separators
1421 ///
1422 /// # Parameters
1423 ///
1424 /// * `source_dir` - Path to the cached repository directory
1425 /// * `source_path` - Relative path to the resource file within the repository
1426 /// * `target_path` - Absolute path where the resource should be installed
1427 ///
1428 /// # Errors
1429 ///
1430 /// Returns an error if:
1431 /// - Source file doesn't exist in the repository
1432 /// - Target directory cannot be created (permissions)
1433 /// - File copy operation fails (disk space, permissions)
1434 /// - Source path attempts directory traversal (security)
1435 ///
1436 /// # Examples
1437 ///
1438 /// Copy a single resource file:
1439 ///
1440 /// ```rust,no_run
1441 /// use agpm_cli::cache::Cache;
1442 /// use std::path::PathBuf;
1443 ///
1444 /// # async fn example() -> anyhow::Result<()> {
1445 /// let cache = Cache::new()?;
1446 ///
1447 /// // Get cached repository
1448 /// let repo_path = cache.get_or_clone_source(
1449 /// "community",
1450 /// "https://github.com/example/repo.git",
1451 /// Some("v1.0.0")
1452 /// ).await?;
1453 ///
1454 /// // Copy resource to project
1455 /// cache.copy_resource(
1456 /// &repo_path,
1457 /// "agents/helper.md", // Source: agents/helper.md in repository
1458 /// &PathBuf::from("./my-agents/helper.md") // Target: project location
1459 /// ).await?;
1460 /// # Ok(())
1461 /// # }
1462 /// ```
1463 ///
1464 /// Copy nested resource:
1465 ///
1466 /// ```rust,no_run
1467 /// use agpm_cli::cache::Cache;
1468 /// use std::path::PathBuf;
1469 ///
1470 /// # async fn example() -> anyhow::Result<()> {
1471 /// let cache = Cache::new()?;
1472 /// let repo_path = PathBuf::from("/cache/community");
1473 ///
1474 /// cache.copy_resource(
1475 /// &repo_path,
1476 /// "tools/generators/api-client.md", // Nested source path
1477 /// &PathBuf::from("./tools/api-client.md") // Flattened target
1478 /// ).await?;
1479 /// # Ok(())
1480 /// # }
1481 /// ```
1482 pub async fn copy_resource(
1483 &self,
1484 source_dir: &Path,
1485 source_path: &str,
1486 target_path: &Path,
1487 ) -> Result<()> {
1488 self.copy_resource_with_output(source_dir, source_path, target_path, false).await
1489 }
1490
1491 /// Copies a resource file with optional installation output messages.
1492 ///
1493 /// This is the full-featured resource copying method that provides control
1494 /// over whether installation progress is displayed to the user. It handles
1495 /// all the details of safe file copying including directory creation,
1496 /// error handling, and atomic operations.
1497 ///
1498 /// # Operation Details
1499 ///
1500 /// 1. **Source validation**: Verifies the source file exists in repository
1501 /// 2. **Directory creation**: Creates target parent directories if needed
1502 /// 3. **Atomic copy**: Performs file copy operation safely
1503 /// 4. **Progress output**: Optionally displays installation confirmation
1504 ///
1505 /// # File Safety
1506 ///
1507 /// - **Overwrite protection**: Will overwrite existing files without warning
1508 /// - **Atomic operations**: Uses system copy operations for atomicity
1509 /// - **Permission preservation**: Maintains reasonable file permissions
1510 /// - **Path validation**: Prevents directory traversal attacks
1511 ///
1512 /// # Output Control
1513 ///
1514 /// When `show_output` is `true`, displays user-friendly installation messages:
1515 /// ```text
1516 /// ✅ Installed ./agents/helper.md
1517 /// ✅ Installed ./snippets/docker-compose.md
1518 /// ```
1519 ///
1520 /// # Parameters
1521 ///
1522 /// * `source_dir` - Path to the cached repository directory
1523 /// * `source_path` - Relative path to resource file within repository
1524 /// * `target_path` - Absolute path where resource should be installed
1525 /// * `show_output` - Whether to display installation progress messages
1526 ///
1527 /// # Errors
1528 ///
1529 /// Returns specific error types for different failure modes:
1530 /// - [`AgpmError::ResourceFileNotFound`]: Source file doesn't exist
1531 /// - File system errors: Permission, disk space, invalid paths
1532 /// - Directory creation errors: Parent directory creation failures
1533 ///
1534 /// # Examples
1535 ///
1536 /// Silent installation (for batch operations):
1537 ///
1538 /// ```rust,no_run
1539 /// use agpm_cli::cache::Cache;
1540 /// use std::path::PathBuf;
1541 ///
1542 /// # async fn example() -> anyhow::Result<()> {
1543 /// let cache = Cache::new()?;
1544 /// let repo_path = PathBuf::from("/cache/community");
1545 ///
1546 /// cache.copy_resource_with_output(
1547 /// &repo_path,
1548 /// "agents/helper.md",
1549 /// &PathBuf::from("./agents/helper.md"),
1550 /// false // No output
1551 /// ).await?;
1552 /// # Ok(())
1553 /// # }
1554 /// ```
1555 ///
1556 /// Interactive installation (with progress):
1557 ///
1558 /// ```rust,no_run
1559 /// use agpm_cli::cache::Cache;
1560 /// use std::path::PathBuf;
1561 ///
1562 /// # async fn example() -> anyhow::Result<()> {
1563 /// let cache = Cache::new()?;
1564 /// let repo_path = PathBuf::from("/cache/community");
1565 ///
1566 /// cache.copy_resource_with_output(
1567 /// &repo_path,
1568 /// "snippets/deployment.md",
1569 /// &PathBuf::from("./snippets/deployment.md"),
1570 /// true // Show "✅ Installed" message
1571 /// ).await?;
1572 /// # Ok(())
1573 /// # }
1574 /// ```
1575 pub async fn copy_resource_with_output(
1576 &self,
1577 source_dir: &Path,
1578 source_path: &str,
1579 target_path: &Path,
1580 show_output: bool,
1581 ) -> Result<()> {
1582 let source_file = source_dir.join(source_path);
1583
1584 if !source_file.exists() {
1585 return Err(AgpmError::ResourceFileNotFound {
1586 path: source_path.to_string(),
1587 source_name: source_dir
1588 .file_name()
1589 .and_then(|n| n.to_str())
1590 .unwrap_or("unknown")
1591 .to_string(),
1592 }
1593 .into());
1594 }
1595
1596 if let Some(parent) = target_path.parent() {
1597 async_fs::create_dir_all(parent)
1598 .await
1599 .with_context(|| format!("Failed to create directory: {}", parent.display()))?;
1600 }
1601
1602 async_fs::copy(&source_file, target_path).await.with_context(|| {
1603 format!("Failed to copy {} to {}", source_file.display(), target_path.display())
1604 })?;
1605
1606 if show_output {
1607 println!(" ✅ Installed {}", target_path.display());
1608 }
1609
1610 Ok(())
1611 }
1612
1613 /// Removes unused cached repositories to reclaim disk space.
1614 ///
1615 /// This method performs selective cache cleanup by removing repositories
1616 /// that are no longer referenced by any active source configurations.
1617 /// It's a safe operation that preserves repositories currently in use.
1618 ///
1619 /// # Cleanup Strategy
1620 ///
1621 /// 1. **Directory scanning**: Enumerates all cached repository directories
1622 /// 2. **Active comparison**: Checks each directory against active sources list
1623 /// 3. **Safe removal**: Removes only unused directories, preserving files
1624 /// 4. **Progress reporting**: Displays removal progress for user feedback
1625 ///
1626 /// # Safety Guarantees
1627 ///
1628 /// - **Active protection**: Never removes repositories listed in active sources
1629 /// - **Directory-only**: Only removes directories, preserves any loose files
1630 /// - **Atomic removal**: Each directory is removed completely or not at all
1631 /// - **Lock awareness**: Respects file locks but doesn't acquire them
1632 ///
1633 /// # Performance Considerations
1634 ///
1635 /// - **I/O intensive**: Scans entire cache directory structure
1636 /// - **Disk space recovery**: Can free significant space for large repositories
1637 /// - **Network savings**: Removed repositories will need re-cloning if used again
1638 /// - **Concurrent safe**: Can run while other cache operations are in progress
1639 ///
1640 /// # Parameters
1641 ///
1642 /// * `active_sources` - List of source names that should be preserved in cache
1643 ///
1644 /// # Returns
1645 ///
1646 /// Returns the number of repository directories that were successfully removed.
1647 ///
1648 /// # Errors
1649 ///
1650 /// Returns an error if:
1651 /// - Cache directory cannot be read (permissions)
1652 /// - Unable to remove a directory (file locks, permissions)
1653 /// - File system errors during directory traversal
1654 ///
1655 /// # Output Messages
1656 ///
1657 /// Displays progress messages for each removed repository:
1658 /// ```text
1659 /// 🗑️ Removing unused cache: old-project
1660 /// 🗑️ Removing unused cache: deprecated-tools
1661 /// ```
1662 ///
1663 /// # Examples
1664 ///
1665 /// Clean cache based on current manifest sources:
1666 ///
1667 /// ```rust,no_run
1668 /// use agpm_cli::cache::Cache;
1669 ///
1670 /// # async fn example() -> anyhow::Result<()> {
1671 /// let cache = Cache::new()?;
1672 ///
1673 /// // Active sources from current agpm.toml
1674 /// let active_sources = vec![
1675 /// "community".to_string(),
1676 /// "work-tools".to_string(),
1677 /// "personal".to_string(),
1678 /// ];
1679 ///
1680 /// let removed = cache.clean_unused(&active_sources).await?;
1681 /// println!("Cleaned {} unused repositories", removed);
1682 /// # Ok(())
1683 /// # }
1684 /// ```
1685 ///
1686 /// Clean all cached repositories:
1687 ///
1688 /// ```rust,no_run
1689 /// use agpm_cli::cache::Cache;
1690 ///
1691 /// # async fn example() -> anyhow::Result<()> {
1692 /// let cache = Cache::new()?;
1693 ///
1694 /// // Empty active list removes everything
1695 /// let removed = cache.clean_unused(&[]).await?;
1696 /// println!("Removed all {} cached repositories", removed);
1697 /// # Ok(())
1698 /// # }
1699 /// ```
1700 pub async fn clean_unused(&self, active_sources: &[String]) -> Result<usize> {
1701 self.ensure_cache_dir().await?;
1702
1703 let mut removed_count = 0;
1704 let mut entries = async_fs::read_dir(&self.dir)
1705 .await
1706 .with_context(|| "Failed to read cache directory")?;
1707
1708 while let Some(entry) =
1709 entries.next_entry().await.with_context(|| "Failed to read directory entry")?
1710 {
1711 let path = entry.path();
1712 if path.is_dir() {
1713 let dir_name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");
1714
1715 if !active_sources.contains(&dir_name.to_string()) {
1716 println!("🗑️ Removing unused cache: {dir_name}");
1717 async_fs::remove_dir_all(&path).await.with_context(|| {
1718 format!("Failed to remove cache directory: {}", path.display())
1719 })?;
1720 removed_count += 1;
1721 }
1722 }
1723 }
1724
1725 Ok(removed_count)
1726 }
1727
1728 /// Calculates the total size of the cache directory in bytes.
1729 ///
1730 /// This method recursively calculates the disk space used by all cached
1731 /// repositories and supporting files. It's useful for cache size monitoring,
1732 /// cleanup decisions, and storage management.
1733 ///
1734 /// # Calculation Method
1735 ///
1736 /// - **Recursive traversal**: Includes all subdirectories and files
1737 /// - **Actual file sizes**: Reports real disk usage, not allocated blocks
1738 /// - **All file types**: Includes Git objects, working files, and lock files
1739 /// - **Cross-platform**: Consistent behavior across different file systems
1740 ///
1741 /// # Performance Notes
1742 ///
1743 /// - **I/O intensive**: May be slow for very large caches
1744 /// - **File system dependent**: Performance varies by underlying storage
1745 /// - **Concurrent safe**: Can run during other cache operations
1746 /// - **Memory efficient**: Streams directory traversal without loading all paths
1747 ///
1748 /// # Returns
1749 ///
1750 /// Returns the total size in bytes. For a non-existent cache directory,
1751 /// returns `0` without error.
1752 ///
1753 /// # Errors
1754 ///
1755 /// Returns an error if:
1756 /// - Permission denied reading cache directory or subdirectories
1757 /// - File system errors during directory traversal
1758 /// - Symbolic link cycles (rare, but possible)
1759 ///
1760 /// # Examples
1761 ///
1762 /// Check current cache size:
1763 ///
1764 /// ```rust,no_run
1765 /// use agpm_cli::cache::Cache;
1766 ///
1767 /// # async fn example() -> anyhow::Result<()> {
1768 /// let cache = Cache::new()?;
1769 ///
1770 /// let size_bytes = cache.get_cache_size().await?;
1771 /// let size_mb = size_bytes / 1024 / 1024;
1772 ///
1773 /// println!("Cache size: {} MB ({} bytes)", size_mb, size_bytes);
1774 /// # Ok(())
1775 /// # }
1776 /// ```
1777 ///
1778 /// Display human-readable sizes:
1779 ///
1780 /// ```rust,no_run
1781 /// use agpm_cli::cache::Cache;
1782 ///
1783 /// # async fn example() -> anyhow::Result<()> {
1784 /// let cache = Cache::new()?;
1785 /// let size_bytes = cache.get_cache_size().await?;
1786 ///
1787 /// let (size, unit) = match size_bytes {
1788 /// s if s < 1024 => (s, "B"),
1789 /// s if s < 1024 * 1024 => (s / 1024, "KB"),
1790 /// s if s < 1024 * 1024 * 1024 => (s / 1024 / 1024, "MB"),
1791 /// s => (s / 1024 / 1024 / 1024, "GB"),
1792 /// };
1793 ///
1794 /// println!("Cache size: {}{}", size, unit);
1795 /// # Ok(())
1796 /// # }
1797 /// ```
1798 pub async fn get_cache_size(&self) -> Result<u64> {
1799 if !self.dir.exists() {
1800 return Ok(0);
1801 }
1802
1803 let size = fs::get_directory_size(&self.dir).await?;
1804 Ok(size)
1805 }
1806
1807 /// Returns the path to the cache directory.
1808 ///
1809 /// This method provides access to the cache directory path for inspection,
1810 /// logging, or integration with other tools. The path represents where
1811 /// all cached repositories and supporting files are stored.
1812 ///
1813 /// # Return Value
1814 ///
1815 /// Returns a reference to the [`Path`] representing the cache directory.
1816 /// The path may or may not exist on the file system - use [`ensure_cache_dir`]
1817 /// to create it if needed.
1818 ///
1819 /// # Thread Safety
1820 ///
1821 /// This method is safe to call from multiple threads as it only returns
1822 /// a reference to the immutable path stored in the `Cache` instance.
1823 ///
1824 /// # Examples
1825 ///
1826 /// Display cache location:
1827 ///
1828 /// ```rust,no_run
1829 /// use agpm_cli::cache::Cache;
1830 ///
1831 /// # fn example() -> anyhow::Result<()> {
1832 /// let cache = Cache::new()?;
1833 /// println!("Cache stored at: {}", cache.get_cache_location().display());
1834 /// # Ok(())
1835 /// # }
1836 /// ```
1837 ///
1838 /// Check if cache exists:
1839 ///
1840 /// ```rust,no_run
1841 /// use agpm_cli::cache::Cache;
1842 ///
1843 /// # fn example() -> anyhow::Result<()> {
1844 /// let cache = Cache::new()?;
1845 /// let location = cache.get_cache_location();
1846 ///
1847 /// if location.exists() {
1848 /// println!("Cache directory exists at: {}", location.display());
1849 /// } else {
1850 /// println!("Cache directory not yet created: {}", location.display());
1851 /// }
1852 /// # Ok(())
1853 /// # }
1854 /// ```
1855 ///
1856 /// [`ensure_cache_dir`]: Cache::ensure_cache_dir
1857 #[must_use]
1858 pub fn get_cache_location(&self) -> &Path {
1859 &self.dir
1860 }
1861
1862 /// Completely removes the entire cache directory and all its contents.
1863 ///
1864 /// This is a destructive operation that removes all cached repositories,
1865 /// lock files, and any other cache-related data. Use with caution as
1866 /// this will require re-cloning all repositories on the next operation.
1867 ///
1868 /// # Operation Details
1869 ///
1870 /// - **Complete removal**: Deletes the entire cache directory tree
1871 /// - **Recursive deletion**: Removes all subdirectories and files
1872 /// - **Lock files**: Also removes .locks directory and all lock files
1873 /// - **Atomic operation**: Either succeeds completely or leaves cache intact
1874 ///
1875 /// # Recovery Impact
1876 ///
1877 /// After calling this method:
1878 /// - All repositories must be re-cloned on next use
1879 /// - Network bandwidth will be required for repository downloads
1880 /// - Disk space is immediately reclaimed
1881 /// - Cache directory will be recreated automatically on next operation
1882 ///
1883 /// # Safety Considerations
1884 ///
1885 /// - **No confirmation**: This method doesn't ask for confirmation
1886 /// - **Irreversible**: Cannot undo the deletion operation
1887 /// - **Concurrent operations**: May interfere with running cache operations
1888 /// - **Lock respect**: Doesn't wait for locks, may fail if repositories are in use
1889 ///
1890 /// # Errors
1891 ///
1892 /// Returns an error if:
1893 /// - Permission denied for cache directory or contents
1894 /// - Files are locked by other processes
1895 /// - File system errors during deletion
1896 /// - Cache directory is in use by another process
1897 ///
1898 /// # Output Messages
1899 ///
1900 /// Displays confirmation message on successful completion:
1901 /// ```text
1902 /// 🗑️ Cleared all cache
1903 /// ```
1904 ///
1905 /// # Examples
1906 ///
1907 /// Clear cache for fresh start:
1908 ///
1909 /// ```rust,no_run
1910 /// use agpm_cli::cache::Cache;
1911 ///
1912 /// # async fn example() -> anyhow::Result<()> {
1913 /// let cache = Cache::new()?;
1914 ///
1915 /// // Check size before clearing
1916 /// let size_before = cache.get_cache_size().await?;
1917 /// println!("Cache size before: {} bytes", size_before);
1918 ///
1919 /// // Clear everything
1920 /// cache.clear_all().await?;
1921 ///
1922 /// // Verify cache is empty
1923 /// let size_after = cache.get_cache_size().await?;
1924 /// println!("Cache size after: {} bytes", size_after); // Should be 0
1925 /// # Ok(())
1926 /// # }
1927 /// ```
1928 ///
1929 /// Clear cache with error handling:
1930 ///
1931 /// ```rust,no_run
1932 /// use agpm_cli::cache::Cache;
1933 ///
1934 /// # async fn example() -> anyhow::Result<()> {
1935 /// let cache = Cache::new()?;
1936 ///
1937 /// match cache.clear_all().await {
1938 /// Ok(()) => println!("Cache cleared successfully"),
1939 /// Err(e) => {
1940 /// eprintln!("Failed to clear cache: {}", e);
1941 /// eprintln!("Some files may be in use by other processes");
1942 /// }
1943 /// }
1944 /// # Ok(())
1945 /// # }
1946 /// ```
1947 pub async fn clear_all(&self) -> Result<()> {
1948 if self.dir.exists() {
1949 async_fs::remove_dir_all(&self.dir).await.with_context(|| "Failed to clear cache")?;
1950 println!("🗑️ Cleared all cache");
1951 }
1952 Ok(())
1953 }
1954
1955 /// Perform a fetch operation with hybrid locking (in-process and cross-process).
1956 ///
1957 /// This method implements a two-level locking strategy:
1958 /// 1. In-process locks (Arc<Mutex>) for fast coordination within the same process
1959 /// 2. File-based locks for cross-process coordination
1960 ///
1961 /// The fetch will only happen once per repository per command execution.
1962 ///
1963 /// # Parameters
1964 ///
1965 /// * `bare_repo_path` - Path to the bare repository
1966 /// * `context` - Optional context string for logging
1967 ///
1968 /// # Returns
1969 ///
1970 /// Returns Ok(()) if the fetch was successful or skipped.
1971 async fn fetch_with_hybrid_lock(
1972 &self,
1973 bare_repo_path: &Path,
1974 context: Option<&str>,
1975 ) -> Result<()> {
1976 use fs4::fs_std::FileExt;
1977
1978 // Level 1: In-process lock (fast path)
1979 let memory_lock = self
1980 .fetch_locks
1981 .entry(bare_repo_path.to_path_buf())
1982 .or_insert_with(|| Arc::new(Mutex::new(())))
1983 .clone();
1984 let _memory_guard = memory_lock.lock().await;
1985
1986 // Level 2: File-based lock (cross-process)
1987 let safe_name = bare_repo_path
1988 .file_name()
1989 .and_then(|s| s.to_str())
1990 .unwrap_or("unknown")
1991 .replace(['/', '\\', ':'], "_");
1992
1993 let lock_path = self.dir.join(".locks").join(format!("{safe_name}.fetch.lock"));
1994
1995 // Ensure lock directory exists
1996 if let Some(parent) = lock_path.parent() {
1997 tokio::fs::create_dir_all(parent).await?;
1998 }
1999
2000 // Create/open lock file
2001 let lock_file = tokio::fs::OpenOptions::new()
2002 .create(true)
2003 .write(true)
2004 .truncate(false)
2005 .open(&lock_path)
2006 .await?;
2007
2008 // Convert to std::fs::File for fs4
2009 let std_file = lock_file.into_std().await;
2010
2011 // Acquire exclusive lock (blocks until available)
2012 if let Some(ctx) = context {
2013 tracing::debug!(
2014 target: "agpm::git",
2015 "({}) Acquiring file lock for {}",
2016 ctx,
2017 bare_repo_path.display()
2018 );
2019 }
2020 std_file.lock_exclusive()?;
2021
2022 if let Some(ctx) = context {
2023 tracing::debug!(
2024 target: "agpm::git",
2025 "({}) Acquired file lock for {}",
2026 ctx,
2027 bare_repo_path.display()
2028 );
2029 }
2030
2031 // Now check if we've already fetched this repo in this command execution
2032 // This happens AFTER acquiring the lock to prevent race conditions
2033 let already_fetched = {
2034 let fetched = self.fetched_repos.read().await;
2035 let is_fetched = fetched.contains(bare_repo_path);
2036 if let Some(ctx) = context {
2037 tracing::debug!(
2038 target: "agpm::git",
2039 "({}) Checking if already fetched: {} - Result: {} (total fetched: {}, hashset addr: {:p})",
2040 ctx,
2041 bare_repo_path.display(),
2042 is_fetched,
2043 fetched.len(),
2044 &raw const *fetched
2045 );
2046 }
2047 is_fetched
2048 };
2049
2050 if already_fetched {
2051 if let Some(ctx) = context {
2052 tracing::debug!(
2053 target: "agpm::git",
2054 "({}) Skipping fetch (already fetched in this command): {}",
2055 ctx,
2056 bare_repo_path.display()
2057 );
2058 }
2059 // Release the file lock and return
2060 return Ok(());
2061 }
2062
2063 // Now safe to fetch
2064 let repo = GitRepo::new(bare_repo_path);
2065
2066 if let Some(ctx) = context {
2067 tracing::debug!(
2068 target: "agpm::git",
2069 "({}) Fetching updates for {}",
2070 ctx,
2071 bare_repo_path.display()
2072 );
2073 }
2074
2075 repo.fetch(None).await?;
2076
2077 // Mark this repo as fetched for this command execution
2078 {
2079 let mut fetched = self.fetched_repos.write().await;
2080 fetched.insert(bare_repo_path.to_path_buf());
2081 if let Some(ctx) = context {
2082 tracing::debug!(
2083 target: "agpm::git",
2084 "({}) Marked as fetched: {} (total fetched: {}, hashset addr: {:p})",
2085 ctx,
2086 bare_repo_path.display(),
2087 fetched.len(),
2088 &raw const *fetched
2089 );
2090 }
2091 }
2092
2093 // File lock automatically released when std_file is dropped
2094 Ok(())
2095 }
2096}
2097
2098#[cfg(test)]
2099mod tests {
2100 use super::*;
2101 use tempfile::TempDir;
2102
2103 #[tokio::test]
2104 async fn test_cache_dir_creation() {
2105 let temp_dir = TempDir::new().unwrap();
2106 let cache_dir = temp_dir.path().join("cache");
2107
2108 let cache = Cache::with_dir(cache_dir.clone()).unwrap();
2109 cache.ensure_cache_dir().await.unwrap();
2110
2111 assert!(cache_dir.exists());
2112 }
2113
2114 #[tokio::test]
2115 async fn test_cache_location() {
2116 let temp_dir = TempDir::new().unwrap();
2117 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2118 let location = cache.get_cache_location();
2119 assert_eq!(location, temp_dir.path());
2120 }
2121
2122 #[tokio::test]
2123 async fn test_cache_size_empty() {
2124 let temp_dir = TempDir::new().unwrap();
2125 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2126
2127 cache.ensure_cache_dir().await.unwrap();
2128 let size = cache.get_cache_size().await.unwrap();
2129 assert_eq!(size, 0);
2130 }
2131
2132 #[tokio::test]
2133 async fn test_cache_size_with_content() {
2134 let temp_dir = TempDir::new().unwrap();
2135 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2136
2137 cache.ensure_cache_dir().await.unwrap();
2138
2139 // Create some test content
2140 let test_file = temp_dir.path().join("test.txt");
2141 std::fs::write(&test_file, "test content").unwrap();
2142
2143 let size = cache.get_cache_size().await.unwrap();
2144 assert!(size > 0);
2145 assert_eq!(size, 12); // "test content" is 12 bytes
2146 }
2147
2148 #[tokio::test]
2149 async fn test_clean_unused_empty_cache() {
2150 let temp_dir = TempDir::new().unwrap();
2151 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2152
2153 cache.ensure_cache_dir().await.unwrap();
2154
2155 let removed = cache.clean_unused(&["active".to_string()]).await.unwrap();
2156 assert_eq!(removed, 0);
2157 }
2158
2159 #[tokio::test]
2160 async fn test_clean_unused_removes_correct_dirs() {
2161 let temp_dir = TempDir::new().unwrap();
2162 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2163
2164 cache.ensure_cache_dir().await.unwrap();
2165
2166 // Create some test directories
2167 let active_dir = temp_dir.path().join("active");
2168 let unused_dir = temp_dir.path().join("unused");
2169 let another_unused = temp_dir.path().join("another_unused");
2170
2171 std::fs::create_dir_all(&active_dir).unwrap();
2172 std::fs::create_dir_all(&unused_dir).unwrap();
2173 std::fs::create_dir_all(&another_unused).unwrap();
2174
2175 // Add some content to verify directories are removed completely
2176 std::fs::write(active_dir.join("file.txt"), "keep").unwrap();
2177 std::fs::write(unused_dir.join("file.txt"), "remove").unwrap();
2178 std::fs::write(another_unused.join("file.txt"), "remove").unwrap();
2179
2180 let removed = cache.clean_unused(&["active".to_string()]).await.unwrap();
2181
2182 assert_eq!(removed, 2);
2183 assert!(active_dir.exists());
2184 assert!(!unused_dir.exists());
2185 assert!(!another_unused.exists());
2186 }
2187
2188 #[tokio::test]
2189 async fn test_clear_all_removes_entire_cache() {
2190 let temp_dir = TempDir::new().unwrap();
2191 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2192
2193 cache.ensure_cache_dir().await.unwrap();
2194
2195 // Create some content
2196 let subdir = temp_dir.path().join("subdir");
2197 std::fs::create_dir_all(&subdir).unwrap();
2198 std::fs::write(subdir.join("file.txt"), "content").unwrap();
2199
2200 assert!(temp_dir.path().exists());
2201 assert!(subdir.exists());
2202
2203 cache.clear_all().await.unwrap();
2204
2205 assert!(!temp_dir.path().exists());
2206 }
2207
2208 #[tokio::test]
2209 async fn test_copy_resource() {
2210 let temp_dir = TempDir::new().unwrap();
2211 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2212
2213 // Create source file
2214 let source_dir = temp_dir.path().join("source");
2215 std::fs::create_dir_all(&source_dir).unwrap();
2216 let source_file = source_dir.join("resource.md");
2217 std::fs::write(&source_file, "# Test Resource\nContent").unwrap();
2218
2219 // Copy resource
2220 let dest = temp_dir.path().join("dest.md");
2221 cache.copy_resource(&source_dir, "resource.md", &dest).await.unwrap();
2222
2223 assert!(dest.exists());
2224 let content = std::fs::read_to_string(&dest).unwrap();
2225 assert_eq!(content, "# Test Resource\nContent");
2226 }
2227
2228 #[tokio::test]
2229 async fn test_copy_resource_nested_path() {
2230 let temp_dir = TempDir::new().unwrap();
2231 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2232
2233 // Create source file in nested directory
2234 let source_dir = temp_dir.path().join("source");
2235 let nested_dir = source_dir.join("nested").join("path");
2236 std::fs::create_dir_all(&nested_dir).unwrap();
2237 let source_file = nested_dir.join("resource.md");
2238 std::fs::write(&source_file, "# Nested Resource").unwrap();
2239
2240 // Copy resource using relative path from source_dir
2241 let dest = temp_dir.path().join("dest.md");
2242 cache.copy_resource(&source_dir, "nested/path/resource.md", &dest).await.unwrap();
2243
2244 assert!(dest.exists());
2245 let content = std::fs::read_to_string(&dest).unwrap();
2246 assert_eq!(content, "# Nested Resource");
2247 }
2248
2249 #[tokio::test]
2250 async fn test_copy_resource_invalid_path() {
2251 let temp_dir = TempDir::new().unwrap();
2252 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2253
2254 let source_dir = temp_dir.path().join("source");
2255 std::fs::create_dir_all(&source_dir).unwrap();
2256
2257 // Try to copy non-existent resource
2258 let dest = temp_dir.path().join("dest.md");
2259 let result = cache.copy_resource(&source_dir, "nonexistent.md", &dest).await;
2260
2261 assert!(result.is_err());
2262 assert!(!dest.exists());
2263 }
2264
2265 #[tokio::test]
2266 async fn test_ensure_cache_dir_idempotent() {
2267 let temp_dir = TempDir::new().unwrap();
2268 let cache_dir = temp_dir.path().join("cache");
2269 let cache = Cache::with_dir(cache_dir.clone()).unwrap();
2270
2271 // Call ensure_cache_dir multiple times
2272 cache.ensure_cache_dir().await.unwrap();
2273 assert!(cache_dir.exists());
2274
2275 cache.ensure_cache_dir().await.unwrap();
2276 assert!(cache_dir.exists());
2277
2278 // Add a file and ensure it's preserved
2279 std::fs::write(cache_dir.join("test.txt"), "content").unwrap();
2280
2281 cache.ensure_cache_dir().await.unwrap();
2282 assert!(cache_dir.exists());
2283 assert!(cache_dir.join("test.txt").exists());
2284 }
2285
2286 #[tokio::test]
2287 async fn test_copy_resource_creates_parent_directories() {
2288 let temp_dir = TempDir::new().unwrap();
2289 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2290
2291 // Create source file
2292 let source_dir = temp_dir.path().join("source");
2293 std::fs::create_dir_all(&source_dir).unwrap();
2294 std::fs::write(source_dir.join("file.md"), "content").unwrap();
2295
2296 // Copy to a destination with non-existent parent directories
2297 let dest = temp_dir.path().join("deep").join("nested").join("dest.md");
2298 cache.copy_resource(&source_dir, "file.md", &dest).await.unwrap();
2299
2300 assert!(dest.exists());
2301 assert_eq!(std::fs::read_to_string(&dest).unwrap(), "content");
2302 }
2303
2304 #[tokio::test]
2305 async fn test_copy_resource_with_output_flag() {
2306 let temp_dir = TempDir::new().unwrap();
2307 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2308
2309 // Create source file
2310 let source_dir = temp_dir.path().join("source");
2311 std::fs::create_dir_all(&source_dir).unwrap();
2312 std::fs::write(source_dir.join("file.md"), "content").unwrap();
2313
2314 // Test with output flag false
2315 let dest1 = temp_dir.path().join("dest1.md");
2316 cache.copy_resource_with_output(&source_dir, "file.md", &dest1, false).await.unwrap();
2317 assert!(dest1.exists());
2318
2319 // Test with output flag true
2320 let dest2 = temp_dir.path().join("dest2.md");
2321 cache.copy_resource_with_output(&source_dir, "file.md", &dest2, true).await.unwrap();
2322 assert!(dest2.exists());
2323 }
2324
2325 #[tokio::test]
2326 async fn test_cache_size_nonexistent_dir() {
2327 let temp_dir = TempDir::new().unwrap();
2328 let nonexistent = temp_dir.path().join("nonexistent");
2329 let cache = Cache::with_dir(nonexistent).unwrap();
2330
2331 let size = cache.get_cache_size().await.unwrap();
2332 assert_eq!(size, 0);
2333 }
2334
2335 #[tokio::test]
2336 async fn test_clear_all_nonexistent_cache() {
2337 let temp_dir = TempDir::new().unwrap();
2338 let nonexistent = temp_dir.path().join("nonexistent");
2339 let cache = Cache::with_dir(nonexistent).unwrap();
2340
2341 // Should not error when clearing non-existent cache
2342 cache.clear_all().await.unwrap();
2343 }
2344
2345 #[tokio::test]
2346 async fn test_clean_unused_with_files_and_dirs() {
2347 let temp_dir = TempDir::new().unwrap();
2348 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2349
2350 cache.ensure_cache_dir().await.unwrap();
2351
2352 // Create directories
2353 std::fs::create_dir_all(temp_dir.path().join("keep")).unwrap();
2354 std::fs::create_dir_all(temp_dir.path().join("remove")).unwrap();
2355
2356 // Create a file (not a directory)
2357 std::fs::write(temp_dir.path().join("file.txt"), "content").unwrap();
2358
2359 let removed = cache.clean_unused(&["keep".to_string()]).await.unwrap();
2360
2361 // Should only remove the "remove" directory, not the file
2362 assert_eq!(removed, 1);
2363 assert!(temp_dir.path().join("keep").exists());
2364 assert!(!temp_dir.path().join("remove").exists());
2365 assert!(temp_dir.path().join("file.txt").exists());
2366 }
2367
2368 #[tokio::test]
2369 async fn test_copy_resource_overwrites_existing() {
2370 let temp_dir = TempDir::new().unwrap();
2371 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2372
2373 // Create source file
2374 let source_dir = temp_dir.path().join("source");
2375 std::fs::create_dir_all(&source_dir).unwrap();
2376 std::fs::write(source_dir.join("file.md"), "new content").unwrap();
2377
2378 // Create existing destination file
2379 let dest = temp_dir.path().join("dest.md");
2380 std::fs::write(&dest, "old content").unwrap();
2381
2382 // Copy should overwrite
2383 cache.copy_resource(&source_dir, "file.md", &dest).await.unwrap();
2384
2385 assert_eq!(std::fs::read_to_string(&dest).unwrap(), "new content");
2386 }
2387
2388 #[tokio::test]
2389 async fn test_copy_resource_special_characters() {
2390 let temp_dir = TempDir::new().unwrap();
2391 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2392
2393 // Create source file with special characters
2394 let source_dir = temp_dir.path().join("source");
2395 std::fs::create_dir_all(&source_dir).unwrap();
2396 let special_name = "file with spaces & special-chars.md";
2397 std::fs::write(source_dir.join(special_name), "content").unwrap();
2398
2399 // Copy resource
2400 let dest = temp_dir.path().join("dest.md");
2401 cache.copy_resource(&source_dir, special_name, &dest).await.unwrap();
2402
2403 assert!(dest.exists());
2404 assert_eq!(std::fs::read_to_string(&dest).unwrap(), "content");
2405 }
2406
2407 #[tokio::test]
2408 async fn test_cache_location_consistency() {
2409 let temp_dir = TempDir::new().unwrap();
2410 let cache_dir = temp_dir.path().join("my_cache");
2411 let cache = Cache::with_dir(cache_dir.clone()).unwrap();
2412
2413 // Get location multiple times
2414 let loc1 = cache.get_cache_location();
2415 let loc2 = cache.get_cache_location();
2416
2417 assert_eq!(loc1, loc2);
2418 assert_eq!(loc1, cache_dir.as_path());
2419 }
2420
2421 #[tokio::test]
2422 async fn test_clean_unused_empty_active_list() {
2423 let temp_dir = TempDir::new().unwrap();
2424 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2425
2426 cache.ensure_cache_dir().await.unwrap();
2427
2428 // Create some directories
2429 std::fs::create_dir_all(temp_dir.path().join("source1")).unwrap();
2430 std::fs::create_dir_all(temp_dir.path().join("source2")).unwrap();
2431
2432 // Empty active list should remove all
2433 let removed = cache.clean_unused(&[]).await.unwrap();
2434
2435 assert_eq!(removed, 2);
2436 assert!(!temp_dir.path().join("source1").exists());
2437 assert!(!temp_dir.path().join("source2").exists());
2438 }
2439
2440 #[tokio::test]
2441 async fn test_copy_resource_with_relative_paths() {
2442 let temp_dir = TempDir::new().unwrap();
2443 let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2444
2445 // Create source with subdirectories
2446 let source_dir = temp_dir.path().join("source");
2447 let sub_dir = source_dir.join("agents");
2448 std::fs::create_dir_all(&sub_dir).unwrap();
2449 std::fs::write(sub_dir.join("helper.md"), "# Helper Agent").unwrap();
2450
2451 // Copy using relative path
2452 let dest = temp_dir.path().join("my-agent.md");
2453 cache.copy_resource(&source_dir, "agents/helper.md", &dest).await.unwrap();
2454
2455 assert!(dest.exists());
2456 assert_eq!(std::fs::read_to_string(&dest).unwrap(), "# Helper Agent");
2457 }
2458
2459 #[tokio::test]
2460 async fn test_cache_size_with_subdirectories() {
2461 let temp_dir = TempDir::new().unwrap();
2462 let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
2463
2464 cache.ensure_cache_dir().await.unwrap();
2465
2466 // Create nested structure with files
2467 let sub1 = temp_dir.path().join("sub1");
2468 let sub2 = sub1.join("sub2");
2469 std::fs::create_dir_all(&sub2).unwrap();
2470
2471 std::fs::write(temp_dir.path().join("file1.txt"), "12345").unwrap(); // 5 bytes
2472 std::fs::write(sub1.join("file2.txt"), "1234567890").unwrap(); // 10 bytes
2473 std::fs::write(sub2.join("file3.txt"), "abc").unwrap(); // 3 bytes
2474
2475 let size = cache.get_cache_size().await.unwrap();
2476 assert_eq!(size, 18); // 5 + 10 + 3
2477 }
2478}