agpm_cli/
installer.rs

1//! Shared installation utilities for AGPM resources.
2//!
3//! This module provides common functionality for installing resources from
4//! lockfile entries to the project directory. It's shared between the install
5//! and update commands to avoid code duplication. The module includes both
6//! installation logic and automatic cleanup of removed or relocated artifacts.
7//!
8//! # SHA-Based Parallel Installation Architecture
9//!
10//! The installer uses SHA-based worktrees for optimal parallel resource installation:
11//! - **SHA-based worktrees**: Each unique commit gets one worktree for maximum deduplication
12//! - **Pre-resolved SHAs**: All versions resolved to SHAs before installation begins
13//! - **Concurrency control**: Direct parallelism control via --max-parallel flag
14//! - **Context-aware logging**: Each operation includes dependency name for debugging
15//! - **Efficient cleanup**: Worktrees are managed by the cache layer for reuse
16//! - **Pre-warming**: Worktrees created upfront to minimize installation latency
17//! - **Automatic artifact cleanup**: Removes old files when paths change or dependencies are removed
18//!
19//! # Installation Process
20//!
21//! 1. **SHA validation**: Ensures all resources have valid 40-character commit SHAs
22//! 2. **Worktree pre-warming**: Creates SHA-based worktrees for all unique commits
23//! 3. **Parallel processing**: Installs multiple resources concurrently using dedicated worktrees
24//! 4. **Content validation**: Validates markdown format and structure
25//! 5. **Atomic installation**: Files are written atomically to prevent corruption
26//! 6. **Progress tracking**: Real-time progress updates during parallel operations
27//! 7. **Artifact cleanup**: Automatically removes old files from previous installations when paths change
28//!
29//! # Artifact Cleanup (v0.3.18+)
30//!
31//! The module provides automatic cleanup of obsolete artifacts when:
32//! - **Dependencies are removed**: Files from removed dependencies are deleted
33//! - **Paths are relocated**: Old files are removed when `installed_at` paths change
34//! - **Structure changes**: Empty parent directories are cleaned up recursively
35//!
36//! The cleanup process:
37//! 1. Compares old and new lockfiles to identify removed artifacts
38//! 2. Removes files that exist in the old lockfile but not in the new one
39//! 3. Recursively removes empty parent directories up to `.claude/`
40//! 4. Reports the number of cleaned artifacts to the user
41//!
42//! See [`cleanup_removed_artifacts()`] for implementation details.
43//!
44//! # Performance Characteristics
45//!
46//! - **SHA-based deduplication**: Multiple refs to same commit share one worktree
47//! - **Parallel processing**: Multiple dependencies installed simultaneously
48//! - **Pre-warming optimization**: Worktrees created upfront to minimize latency
49//! - **Parallelism-controlled**: User controls concurrency via --max-parallel flag
50//! - **Atomic operations**: Fast, safe file installation with proper error handling
51//! - **Reduced disk usage**: No duplicate worktrees for identical commits
52//! - **Efficient cleanup**: Minimal overhead for artifact cleanup operations
53
54use crate::utils::progress::{InstallationPhase, MultiPhaseProgress};
55use anyhow::{Context, Result};
56use std::path::PathBuf;
57
58/// Type alias for complex installation result tuples to improve code readability.
59///
60/// This type alias simplifies the return type of parallel installation functions
61/// that need to return either success information or error details with context.
62/// It was introduced in AGPM v0.3.0 to resolve `clippy::type_complexity` warnings
63/// while maintaining clear semantics for installation results.
64///
65/// # Success Variant: `Ok((String, bool, String))`
66///
67/// When installation succeeds, the tuple contains:
68/// - `String`: Resource name that was processed
69/// - `bool`: Whether the resource was actually installed (`true`) or already up-to-date (`false`)
70/// - `String`: SHA-256 checksum of the installed file content
71///
72/// # Error Variant: `Err((String, anyhow::Error))`
73///
74/// When installation fails, the tuple contains:
75/// - `String`: Resource name that failed to install
76/// - `anyhow::Error`: Detailed error information for debugging
77///
78/// # Usage
79///
80/// This type is primarily used in parallel installation operations where
81/// individual resource results need to be collected and processed:
82///
83/// ```rust,ignore
84/// use agpm_cli::installer::InstallResult;
85/// use futures::stream::{self, StreamExt};
86///
87/// # async fn example() -> anyhow::Result<()> {
88/// let results: Vec<InstallResult> = stream::iter(vec!["resource1", "resource2"])
89///     .map(|resource_name| async move {
90///         // Installation logic here
91///         Ok((resource_name.to_string(), true, "sha256:abc123".to_string()))
92///     })
93///     .buffer_unordered(10)
94///     .collect()
95///     .await;
96///
97/// // Process results
98/// for result in results {
99///     match result {
100///         Ok((name, installed, checksum)) => {
101///             println!("✓ {}: installed={}, checksum={}", name, installed, checksum);
102///         }
103///         Err((name, error)) => {
104///             eprintln!("✗ {}: {}", name, error);
105///         }
106///     }
107/// }
108/// # Ok(())
109/// # }
110/// ```
111///
112/// # Design Rationale
113///
114/// The type alias serves several purposes:
115/// - **Clippy compliance**: Resolves `type_complexity` warnings for complex generic types
116/// - **Code clarity**: Makes function signatures more readable and self-documenting
117/// - **Error context**: Preserves resource name context when installation fails
118/// - **Batch processing**: Enables efficient collection and processing of parallel results
119type InstallResult = Result<(String, bool, String), (String, anyhow::Error)>;
120
121use futures::{
122    future,
123    stream::{self, StreamExt},
124};
125use std::path::Path;
126use std::sync::Arc;
127use tokio::sync::{Mutex, mpsc};
128
129use crate::cache::Cache;
130use crate::core::{ResourceIterator, ResourceTypeExt};
131use crate::lockfile::{LockFile, LockedResource};
132use crate::manifest::Manifest;
133use crate::markdown::MarkdownFile;
134use crate::utils::fs::{atomic_write, ensure_dir};
135use crate::utils::progress::ProgressBar;
136use hex;
137use std::collections::HashSet;
138use std::fs;
139
140/// Install a single resource from a lock entry using worktrees for parallel safety.
141///
142/// This function installs a resource specified by a lockfile entry to the project
143/// directory. It uses Git worktrees through the cache layer to enable safe parallel
144/// operations without conflicts between concurrent installations.
145///
146/// # Arguments
147///
148/// * `entry` - The locked resource to install containing source and version info
149/// * `project_dir` - The root project directory where resources should be installed
150/// * `resource_dir` - The subdirectory name for this resource type (e.g., "agents")
151/// * `cache` - The cache instance for managing Git repositories and worktrees
152///
153/// # Returns
154///
155/// Returns `Ok((installed, checksum))` where:
156/// - `installed` is `true` if the resource was actually installed (new or updated),
157///   `false` if the resource already existed and was unchanged
158/// - `checksum` is the SHA-256 hash of the installed file content
159///
160/// # Worktree Usage
161///
162/// For remote resources, this function:
163/// 1. Uses `cache.get_or_clone_source_worktree_with_context()` to get a worktree
164/// 2. Each dependency gets its own isolated worktree for parallel safety
165/// 3. Worktrees are automatically managed and reused by the cache layer
166/// 4. Context (dependency name) is provided for debugging parallel operations
167///
168/// # Installation Process
169///
170/// 1. **Path resolution**: Determines destination based on `installed_at` or defaults
171/// 2. **Repository access**: Gets worktree from cache (for remote) or validates local path
172/// 3. **Content validation**: Verifies markdown format and structure
173/// 4. **Atomic write**: Installs file atomically to prevent corruption
174///
175/// # Examples
176///
177/// ```rust,no_run
178/// use agpm_cli::installer::install_resource;
179/// use agpm_cli::lockfile::LockedResource;
180/// use agpm_cli::cache::Cache;
181/// use agpm_cli::core::ResourceType;
182/// use std::path::Path;
183///
184/// # async fn example() -> anyhow::Result<()> {
185/// let cache = Cache::new()?;
186/// let entry = LockedResource {
187///     name: "example-agent".to_string(),
188///     source: Some("community".to_string()),
189///     url: Some("https://github.com/example/repo.git".to_string()),
190///     path: "agents/example.md".to_string(),
191///     version: Some("v1.0.0".to_string()),
192///     resolved_commit: Some("abc123".to_string()),
193///     checksum: "sha256:...".to_string(),
194///     installed_at: ".claude/agents/example.md".to_string(),
195///     dependencies: vec![],
196///     resource_type: ResourceType::Agent,
197///     tool: "claude-code".to_string(),
198/// };
199///
200/// let (installed, checksum) = install_resource(&entry, Path::new("."), "agents", &cache, false).await?;
201/// if installed {
202///     println!("Resource was installed with checksum: {}", checksum);
203/// } else {
204///     println!("Resource already existed and was unchanged");
205/// }
206/// # Ok(())
207/// # }
208/// ```
209///
210/// # Error Handling
211///
212/// Returns an error if:
213/// - The source repository cannot be accessed or cloned
214/// - The specified file path doesn't exist in the repository
215/// - The file is not valid markdown format
216/// - File system operations fail (permissions, disk space)
217/// - Worktree creation fails due to Git issues
218pub async fn install_resource(
219    entry: &LockedResource,
220    project_dir: &Path,
221    resource_dir: &str,
222    cache: &Cache,
223    force_refresh: bool,
224) -> Result<(bool, String)> {
225    // Determine destination path
226    let dest_path = if entry.installed_at.is_empty() {
227        project_dir.join(resource_dir).join(format!("{}.md", entry.name))
228    } else {
229        project_dir.join(&entry.installed_at)
230    };
231
232    // Check if file already exists and compare checksums
233    let existing_checksum = if dest_path.exists() {
234        // Use blocking task for checksum calculation to avoid blocking the async runtime
235        let path = dest_path.clone();
236        tokio::task::spawn_blocking(move || LockFile::compute_checksum(&path)).await??.into()
237    } else {
238        None
239    };
240
241    let new_content = if let Some(source_name) = &entry.source {
242        let url = entry
243            .url
244            .as_ref()
245            .ok_or_else(|| anyhow::anyhow!("Resource {} has no URL", entry.name))?;
246
247        // Check if this is a local directory source (no SHA or empty SHA)
248        let is_local_source = entry.resolved_commit.as_deref().is_none_or(str::is_empty);
249
250        let cache_dir = if is_local_source {
251            // Local directory source - use the URL as the path directly
252            PathBuf::from(url)
253        } else {
254            // Git-based resource - use SHA-based worktree creation
255            let sha = entry.resolved_commit.as_deref().ok_or_else(|| {
256                anyhow::anyhow!("Resource {} missing resolved commit SHA. Run 'agpm update' to regenerate lockfile.", entry.name)
257            })?;
258
259            // Validate SHA format
260            if sha.len() != 40 || !sha.chars().all(|c| c.is_ascii_hexdigit()) {
261                return Err(anyhow::anyhow!(
262                    "Invalid SHA '{}' for resource {}. Expected 40 hex characters.",
263                    sha,
264                    entry.name
265                ));
266            }
267
268            let mut cache_dir = cache
269                .get_or_create_worktree_for_sha(source_name, url, sha, Some(&entry.name))
270                .await?;
271
272            if force_refresh {
273                let _ = cache.cleanup_worktree(&cache_dir).await;
274                cache_dir = cache
275                    .get_or_create_worktree_for_sha(source_name, url, sha, Some(&entry.name))
276                    .await?;
277            }
278
279            cache_dir
280        };
281
282        // Read the content from the source
283        let source_path = cache_dir.join(&entry.path);
284        let content = tokio::fs::read_to_string(&source_path)
285            .await
286            .with_context(|| format!("Failed to read resource file: {}", source_path.display()))?;
287
288        // Validate markdown - this will emit a warning if frontmatter is invalid but won't fail
289        MarkdownFile::parse_with_context(&content, Some(&source_path.display().to_string()))?;
290
291        content
292    } else {
293        // Local resource - copy directly from project directory or absolute path
294        let source_path = {
295            let candidate = Path::new(&entry.path);
296            if candidate.is_absolute() {
297                candidate.to_path_buf()
298            } else {
299                project_dir.join(candidate)
300            }
301        };
302
303        if !source_path.exists() {
304            return Err(anyhow::anyhow!(
305                "Local file '{}' not found. Expected at: {}",
306                entry.path,
307                source_path.display()
308            ));
309        }
310
311        let content = tokio::fs::read_to_string(&source_path)
312            .await
313            .with_context(|| format!("Failed to read resource file: {}", source_path.display()))?;
314
315        // Validate markdown - this will emit a warning if frontmatter is invalid but won't fail
316        MarkdownFile::parse_with_context(&content, Some(&source_path.display().to_string()))?;
317
318        content
319    };
320
321    // Calculate checksum of new content
322    let new_checksum = {
323        use sha2::{Digest, Sha256};
324        let mut hasher = Sha256::new();
325        hasher.update(new_content.as_bytes());
326        let hash = hasher.finalize();
327        format!("sha256:{}", hex::encode(hash))
328    };
329
330    // Check if content has changed by comparing checksums
331    let actually_installed = existing_checksum.as_ref() != Some(&new_checksum);
332
333    if actually_installed {
334        // Only write if content is different or file doesn't exist
335        if let Some(parent) = dest_path.parent() {
336            ensure_dir(parent)?;
337        }
338
339        atomic_write(&dest_path, new_content.as_bytes())
340            .with_context(|| format!("Failed to install resource to {}", dest_path.display()))?;
341    }
342
343    Ok((actually_installed, new_checksum))
344}
345
346/// Install a single resource with progress bar updates for user feedback.
347///
348/// This function wraps [`install_resource`] with progress bar integration to provide
349/// real-time feedback during resource installation. It updates the progress bar
350/// message before delegating to the core installation logic.
351///
352/// # Arguments
353///
354/// * `entry` - The locked resource containing installation metadata
355/// * `project_dir` - Root project directory for installation target
356/// * `resource_dir` - Subdirectory name for this resource type (e.g., "agents")
357/// * `cache` - Cache instance for Git repository and worktree management
358/// * `force_refresh` - Whether to force refresh of cached repositories
359/// * `pb` - Progress bar to update with installation status
360///
361/// # Returns
362///
363/// Returns a tuple of:
364/// - `bool`: Whether the resource was actually installed (`true` for new/updated, `false` for unchanged)
365/// - `String`: SHA-256 checksum of the installed content
366///
367/// # Progress Integration
368///
369/// The function automatically sets the progress bar message to indicate which
370/// resource is currently being installed. This provides users with real-time
371/// feedback about installation progress.
372///
373/// # Examples
374///
375/// ```rust,no_run
376/// use agpm_cli::installer::install_resource_with_progress;
377/// use agpm_cli::lockfile::LockedResource;
378/// use agpm_cli::cache::Cache;
379/// use agpm_cli::core::ResourceType;
380/// use agpm_cli::utils::progress::ProgressBar;
381/// use std::path::Path;
382///
383/// # async fn example() -> anyhow::Result<()> {
384/// let cache = Cache::new()?;
385/// let pb = ProgressBar::new(1);
386/// let entry = LockedResource {
387///     name: "example-agent".to_string(),
388///     source: Some("community".to_string()),
389///     url: Some("https://github.com/example/repo.git".to_string()),
390///     path: "agents/example.md".to_string(),
391///     version: Some("v1.0.0".to_string()),
392///     resolved_commit: Some("abc123".to_string()),
393///     checksum: "sha256:...".to_string(),
394///     installed_at: ".claude/agents/example.md".to_string(),
395///     dependencies: vec![],
396///     resource_type: ResourceType::Agent,
397///     tool: "claude-code".to_string(),
398/// };
399///
400/// let (installed, checksum) = install_resource_with_progress(
401///     &entry,
402///     Path::new("."),
403///     "agents",
404///     &cache,
405///     false,
406///     &pb
407/// ).await?;
408///
409/// pb.inc(1);
410/// # Ok(())
411/// # }
412/// ```
413///
414/// # Errors
415///
416/// Returns the same errors as [`install_resource`], including:
417/// - Repository access failures
418/// - File system operation errors
419/// - Invalid markdown content
420/// - Git worktree creation failures
421pub async fn install_resource_with_progress(
422    entry: &LockedResource,
423    project_dir: &Path,
424    resource_dir: &str,
425    cache: &Cache,
426    force_refresh: bool,
427    pb: &ProgressBar,
428) -> Result<(bool, String)> {
429    pb.set_message(format!("Installing {}", entry.name));
430    install_resource(entry, project_dir, resource_dir, cache, force_refresh).await
431}
432
433/// Install multiple resources in parallel using worktree-based concurrency.
434///
435/// This function performs parallel installation of all resources defined in the
436/// lockfile, using Git worktrees to enable safe concurrent access to repositories.
437/// Each dependency gets its own isolated worktree to prevent conflicts.
438///
439/// # Arguments
440///
441/// * `lockfile` - The lockfile containing all resources to install
442/// * `manifest` - The project manifest for configuration
443/// * `project_dir` - The root project directory for installation
444/// * `pb` - Progress bar for user feedback
445/// * `cache` - Cache instance managing Git repositories and worktrees
446///
447/// # Parallel Architecture
448///
449/// The function uses several layers of concurrency control:
450/// - **Tokio tasks**: Each resource installation runs in its own async task
451/// - **Unlimited task concurrency**: Uses `buffer_unordered(usize::MAX)`
452/// - **Parallelism control**: --max-parallel flag controls concurrent operations
453/// - **Worktree isolation**: Each dependency gets its own worktree for safety
454///
455/// # Performance Optimizations
456///
457/// - **Stream processing**: Uses `futures::stream` for efficient task scheduling
458/// - **Context logging**: Each operation includes dependency name for debugging
459/// - **Worktree reuse**: Cache layer optimizes Git repository access
460/// - **Batched progress**: Updates progress atomically to reduce contention
461/// - **Deferred cleanup**: Worktrees are left for reuse, cleaned up by cache commands
462///
463/// # Concurrency Control Flow
464///
465/// ```text
466/// Lockfile Resources
467///       ↓
468/// Async Task Stream (unlimited concurrency)
469///       ↓
470/// install_resource_for_parallel() calls
471///       ↓
472/// Cache worktree operations (parallelism-controlled)
473///       ↓
474/// Git operations (controlled by --max-parallel)
475/// ```
476///
477/// # Examples
478///
479/// ```rust,no_run
480/// use agpm_cli::installer::install_resources_parallel;
481/// use agpm_cli::lockfile::LockFile;
482/// use agpm_cli::manifest::Manifest;
483/// use agpm_cli::cache::Cache;
484/// use agpm_cli::utils::progress::ProgressBar;
485/// use std::path::Path;
486///
487/// # async fn example() -> anyhow::Result<()> {
488/// let lockfile = LockFile::load(Path::new("agpm.lock"))?;
489/// let manifest = Manifest::load(Path::new("agpm.toml"))?;
490/// let cache = Cache::new()?;
491///
492/// // Count total resources for progress bar
493/// let total = lockfile.agents.len() + lockfile.snippets.len()
494///     + lockfile.commands.len() + lockfile.scripts.len()
495///     + lockfile.hooks.len() + lockfile.mcp_servers.len();
496/// let pb = ProgressBar::new(total as u64);
497///
498/// let count = install_resources_parallel(
499///     &lockfile,
500///     &manifest,
501///     Path::new("."),
502///     &pb,
503///     &cache,
504///     false,
505///     None,
506/// ).await?;
507///
508/// println!("Installed {} resources", count);
509/// # Ok(())
510/// # }
511/// ```
512///
513/// # Error Handling
514///
515/// - **Atomic failure**: If any resource fails, the entire operation fails
516/// - **Detailed context**: Errors include specific resource and source information
517/// - **Progress preservation**: Progress updates continue even on partial failures
518/// - **Resource cleanup**: Failed operations don't leave partial state
519///
520/// # Return Value
521///
522/// Returns the total number of resources successfully installed.
523// Removed install_resources_parallel - use install_resources with MultiPhaseProgress instead
524#[deprecated(note = "Use install_resources with MultiPhaseProgress instead")]
525pub async fn install_resources_parallel(
526    lockfile: &LockFile,
527    manifest: &Manifest,
528    project_dir: &Path,
529    pb: &ProgressBar,
530    cache: &Cache,
531    force_refresh: bool,
532    max_concurrency: Option<usize>,
533) -> Result<usize> {
534    // Collect all entries to install using ResourceIterator
535    let all_entries = ResourceIterator::collect_all_entries(lockfile, manifest);
536
537    if all_entries.is_empty() {
538        return Ok(0);
539    }
540
541    // Pre-warm the cache by creating all needed worktrees upfront
542    // This allows maximum parallelism for Git operations
543    // Update the progress bar message to indicate preparation phase
544    let total = all_entries.len();
545    pb.set_message("Preparing resources");
546
547    // Collect unique (source, url, sha) triples to pre-create worktrees
548    let mut unique_worktrees = HashSet::new();
549    for (entry, _) in &all_entries {
550        if let Some(source_name) = &entry.source
551            && let Some(url) = &entry.url
552        {
553            // Only pre-warm if we have a valid SHA
554            if let Some(sha) = entry.resolved_commit.as_ref().filter(|commit| {
555                commit.len() == 40 && commit.chars().all(|c| c.is_ascii_hexdigit())
556            }) {
557                unique_worktrees.insert((source_name.clone(), url.clone(), sha.clone()));
558            }
559        }
560    }
561
562    // Pre-create all worktrees in parallel
563    if !unique_worktrees.is_empty() {
564        let worktree_futures: Vec<_> = unique_worktrees
565            .into_iter()
566            .map(|(source, url, sha)| {
567                let cache = cache.clone();
568                async move {
569                    cache
570                        .get_or_create_worktree_for_sha(&source, &url, &sha, Some("pre-warm"))
571                        .await
572                        .ok(); // Ignore errors during pre-warming
573                }
574            })
575            .collect();
576
577        // Execute all worktree creations in parallel
578        future::join_all(worktree_futures).await;
579    }
580
581    // Create thread-safe progress tracking
582    let installed_count = Arc::new(Mutex::new(0));
583    let pb = Arc::new(pb.clone());
584
585    // Update message for installation phase
586    pb.set_message(format!("Installing 0/{total} resources"));
587
588    let shared_cache = Arc::new(cache.clone());
589    let concurrency = max_concurrency.unwrap_or(usize::MAX).max(1);
590
591    let results: Vec<InstallResult> = stream::iter(all_entries)
592        .map(|(entry, resource_dir)| {
593            let entry = entry.clone();
594            let project_dir = project_dir.to_path_buf();
595            let resource_dir = resource_dir.to_string();
596            let installed_count = Arc::clone(&installed_count);
597            let pb = Arc::clone(&pb);
598            let cache = Arc::clone(&shared_cache);
599
600            async move {
601                let res = install_resource_for_parallel(
602                    &entry,
603                    &project_dir,
604                    &resource_dir,
605                    cache.as_ref(),
606                    force_refresh,
607                )
608                .await;
609
610                match res {
611                    Ok((actually_installed, checksum)) => {
612                        if actually_installed {
613                            let mut count = installed_count.lock().await;
614                            *count += 1;
615                        }
616                        let count = *installed_count.lock().await;
617                        pb.set_message(format!("Installing {count}/{total} resources"));
618                        pb.inc(1);
619                        Ok((entry.name.clone(), actually_installed, checksum))
620                    }
621                    Err(err) => Err((entry.name.clone(), err)),
622                }
623            }
624        })
625        .buffer_unordered(concurrency)
626        .collect()
627        .await;
628
629    let mut errors = Vec::new();
630    for result in results {
631        match result {
632            Ok((_name, _installed, _checksum)) => {
633                // Old function doesn't return checksums
634            }
635            Err((name, error)) => {
636                errors.push((name, error));
637            }
638        }
639    }
640
641    if !errors.is_empty() {
642        let error_msgs: Vec<String> =
643            errors.into_iter().map(|(name, error)| format!("  {name}: {error}")).collect();
644        return Err(anyhow::anyhow!(
645            "Failed to install {} resources:\n{}",
646            error_msgs.len(),
647            error_msgs.join("\n")
648        ));
649    }
650
651    let final_count = *installed_count.lock().await;
652    Ok(final_count)
653}
654
655/// Install a single resource in a thread-safe manner for parallel execution.
656///
657/// This function provides a thin wrapper around [`install_resource`] specifically
658/// designed for use in parallel installation streams. It ensures thread-safe
659/// operation when called concurrently from multiple async tasks.
660///
661/// # Thread Safety
662///
663/// While this function is just a wrapper, it's used within parallel streams where:
664/// - Each resource gets its own isolated Git worktree via the cache layer
665/// - File operations are atomic to prevent corruption
666/// - Progress tracking is coordinated through shared state
667///
668/// # Arguments
669///
670/// * `entry` - The locked resource to install
671/// * `project_dir` - Project root directory for installation
672/// * `resource_dir` - Resource type subdirectory (e.g., "agents", "snippets")
673/// * `cache` - Cache instance managing Git repositories and worktrees
674/// * `force_refresh` - Whether to force refresh cached repositories
675///
676/// # Returns
677///
678/// Returns a tuple of:
679/// - `bool`: Whether installation actually occurred (`true` for new/changed, `false` for up-to-date)
680/// - `String`: SHA-256 checksum of the installed file content
681///
682/// # Usage in Parallel Streams
683///
684/// This function is typically used within futures streams for concurrent processing:
685///
686/// ```rust,ignore
687/// use futures::stream::{self, StreamExt};
688/// use agpm_cli::installer::install_resource_for_parallel;
689/// # use agpm_cli::lockfile::LockedResource;
690/// # use agpm_cli::cache::Cache;
691/// # use std::path::Path;
692///
693/// # async fn example(entries: Vec<LockedResource>, cache: Cache) -> anyhow::Result<()> {
694/// let results: Vec<_> = stream::iter(entries)
695///     .map(|entry| {
696///         let cache = cache.clone();
697///         async move {
698///             install_resource_for_parallel(
699///                 &entry,
700///                 Path::new("."),
701///                 "agents",
702///                 &cache,
703///                 false
704///             ).await
705///         }
706///     })
707///     .buffer_unordered(10) // Process up to 10 resources concurrently
708///     .collect()
709///     .await;
710/// # Ok(())
711/// # }
712/// ```
713///
714/// # Errors
715///
716/// Returns the same errors as [`install_resource`]:
717/// - Git repository access failures
718/// - File system permission or space issues
719/// - Invalid markdown file format
720/// - Worktree creation conflicts
721async fn install_resource_for_parallel(
722    entry: &LockedResource,
723    project_dir: &Path,
724    resource_dir: &str,
725    cache: &Cache,
726    force_refresh: bool,
727) -> Result<(bool, String)> {
728    install_resource(entry, project_dir, resource_dir, cache, force_refresh).await
729}
730
731/// Progress update message for parallel installation operations.
732///
733/// This struct encapsulates the current state of a parallel installation operation,
734/// providing detailed information about which dependencies are actively being
735/// processed and the overall completion status. It's designed for use with
736/// channel-based progress reporting systems.
737///
738/// # Fields
739///
740/// * `active_deps` - Names of dependencies currently being processed in parallel
741/// * `completed_count` - Number of dependencies that have finished processing
742/// * `total_count` - Total number of dependencies to be processed
743///
744/// # Usage
745///
746/// This struct is typically sent through async channels to provide real-time
747/// progress updates to user interface components:
748///
749/// ```rust,no_run
750/// use agpm_cli::installer::InstallProgress;
751/// use tokio::sync::mpsc;
752///
753/// # async fn example() -> anyhow::Result<()> {
754/// let (tx, mut rx) = mpsc::unbounded_channel::<InstallProgress>();
755///
756/// // Installation task sends progress updates
757/// tokio::spawn(async move {
758///     let progress = InstallProgress {
759///         active_deps: vec!["agent1".to_string(), "tool2".to_string()],
760///         completed_count: 3,
761///         total_count: 10,
762///     };
763///     let _ = tx.send(progress);
764/// });
765///
766/// // UI task receives and displays progress
767/// while let Some(progress) = rx.recv().await {
768///     println!("Active: {:?}, Progress: {}/{}",
769///         progress.active_deps,
770///         progress.completed_count,
771///         progress.total_count
772///     );
773/// }
774/// # Ok(())
775/// # }
776/// ```
777///
778/// # Design Purpose
779///
780/// This structure enables sophisticated progress reporting that shows:
781/// - Which specific dependencies are being processed concurrently
782/// - Overall completion percentage for the installation operation
783/// - Real-time updates as the parallel installation progresses
784///
785/// The `active_deps` field is particularly useful for debugging parallel
786/// operations, as it shows exactly which resources are currently being
787/// downloaded, validated, or installed.
788#[derive(Debug, Clone)]
789pub struct InstallProgress {
790    /// Names of dependencies currently being processed in parallel.
791    ///
792    /// This vector contains the names of all resources that are actively
793    /// being installed at the time this progress update was generated.
794    /// The list changes dynamically as resources complete and new ones begin.
795    pub active_deps: Vec<String>,
796
797    /// Number of dependencies that have completed processing.
798    ///
799    /// This count includes both successful installations and failed attempts.
800    /// It represents the total number of resources that have finished,
801    /// regardless of outcome.
802    pub completed_count: usize,
803
804    /// Total number of dependencies to be processed in this operation.
805    ///
806    /// This count remains constant throughout the installation and represents
807    /// the full scope of the parallel installation operation.
808    pub total_count: usize,
809}
810
811/// Install resources in parallel with detailed progress updates via async channels.
812///
813/// This function performs parallel resource installation while providing real-time
814/// progress updates through an async channel. It's designed for UI implementations
815/// that need detailed visibility into parallel installation operations, showing
816/// which specific dependencies are being processed at any given time.
817///
818/// # Arguments
819///
820/// * `lockfile` - Lockfile containing all resources to install
821/// * `manifest` - Project manifest providing configuration context
822/// * `project_dir` - Root directory for resource installation
823/// * `cache` - Cache instance for Git repository and worktree management
824/// * `force_refresh` - Whether to force refresh of cached repositories
825/// * `max_concurrency` - Optional limit on concurrent operations (`None` = unlimited)
826/// * `progress_sender` - Optional channel sender for progress updates
827///
828/// # Progress Updates
829///
830/// When `progress_sender` is provided, the function sends [`InstallProgress`]
831/// updates that include:
832/// - Active dependencies currently being processed
833/// - Completed count (successful and failed installations)
834/// - Total dependency count for completion calculation
835///
836/// Updates are sent at key points:
837/// - When a dependency starts processing (added to `active_deps`)
838/// - When a dependency completes (removed from `active_deps`, `completed_count` incremented)
839///
840/// # Channel-Based Architecture
841///
842/// ```rust,ignore
843/// use agpm_cli::installer::{install_resources_parallel_with_progress, InstallProgress};
844/// use agpm_cli::lockfile::LockFile;
845/// use agpm_cli::manifest::Manifest;
846/// use agpm_cli::cache::Cache;
847/// use tokio::sync::mpsc;
848/// use std::path::Path;
849///
850/// # async fn example() -> anyhow::Result<()> {
851/// let (tx, mut rx) = mpsc::unbounded_channel::<InstallProgress>();
852/// let lockfile = LockFile::load(Path::new("agpm.lock"))?;
853/// let manifest = Manifest::load(Path::new("agpm.toml"))?;
854/// let cache = Cache::new()?;
855///
856/// // Spawn installation task
857/// let install_task = tokio::spawn(async move {
858///     install_resources_parallel_with_progress(
859///         &lockfile,
860///         &manifest,
861///         Path::new("."),
862///         &cache,
863///         false,
864///         Some(8),      // Max 8 concurrent operations
865///         Some(tx)      // Progress updates
866///     ).await
867/// });
868///
869/// // Handle progress updates
870/// tokio::spawn(async move {
871///     while let Some(progress) = rx.recv().await {
872///         println!("Progress: {}/{}, Active: {:?}",
873///             progress.completed_count,
874///             progress.total_count,
875///             progress.active_deps
876///         );
877///     }
878/// });
879///
880/// let count = install_task.await??;
881/// println!("Installed {} resources", count);
882/// # Ok(())
883/// # }
884/// ```
885///
886/// # Concurrency Control
887///
888/// The function implements the same parallel processing architecture as
889/// [`install_resources_parallel`] but adds channel-based progress reporting:
890/// - Pre-warming of Git worktrees for optimal parallelism
891/// - Configurable concurrency limits via `max_concurrency`
892/// - Thread-safe progress tracking with atomic updates
893///
894/// # Performance Characteristics
895///
896/// Progress updates are designed to have minimal performance impact:
897/// - Updates are sent asynchronously without blocking installation
898/// - Failed channel sends are silently ignored to prevent installation failures
899/// - State updates are batched to reduce contention
900///
901/// # Returns
902///
903/// Returns the total number of resources that were successfully installed.
904/// This count only includes resources that were actually modified (new or updated content),
905/// not resources that already existed with identical content.
906///
907/// # Errors
908///
909/// Returns an error if any resource installation fails. The error includes
910/// details about all failed installations with specific error context.
911/// Progress updates continue until the error occurs.
912// Removed install_resources_parallel_with_progress - use install_resources with MultiPhaseProgress instead
913#[deprecated(note = "Use install_resources with MultiPhaseProgress instead")]
914pub async fn install_resources_parallel_with_progress(
915    lockfile: &LockFile,
916    manifest: &Manifest,
917    project_dir: &Path,
918    cache: &Cache,
919    force_refresh: bool,
920    max_concurrency: Option<usize>,
921    progress_sender: Option<mpsc::UnboundedSender<InstallProgress>>,
922) -> Result<usize> {
923    // Collect all entries to install using ResourceIterator
924    let all_entries = ResourceIterator::collect_all_entries(lockfile, manifest);
925
926    if all_entries.is_empty() {
927        return Ok(0);
928    }
929
930    let total = all_entries.len();
931
932    // Pre-warm the cache by creating all needed worktrees upfront
933    // Collect unique (source, url, sha) triples to pre-create worktrees
934    let mut unique_worktrees = HashSet::new();
935    for (entry, _) in &all_entries {
936        if let Some(source_name) = &entry.source
937            && let Some(url) = &entry.url
938        {
939            // Only pre-warm if we have a valid SHA
940            if let Some(sha) = entry.resolved_commit.as_ref().filter(|commit| {
941                commit.len() == 40 && commit.chars().all(|c| c.is_ascii_hexdigit())
942            }) {
943                unique_worktrees.insert((source_name.clone(), url.clone(), sha.clone()));
944            }
945        }
946    }
947
948    if !unique_worktrees.is_empty() {
949        let worktree_futures: Vec<_> = unique_worktrees
950            .into_iter()
951            .map(|(source, url, sha)| {
952                async move {
953                    cache
954                        .get_or_create_worktree_for_sha(&source, &url, &sha, Some("pre-warm"))
955                        .await
956                        .ok(); // Ignore errors during pre-warming
957                }
958            })
959            .collect();
960
961        // Execute all worktree creations in parallel
962        future::join_all(worktree_futures).await;
963    }
964
965    // Create thread-safe progress tracking
966    let installed_count = Arc::new(Mutex::new(0));
967    let active_deps = Arc::new(Mutex::new(Vec::<String>::new()));
968    let sender = progress_sender.map(Arc::new);
969
970    let shared_cache = Arc::new(cache.clone());
971    let concurrency = max_concurrency.unwrap_or(usize::MAX).max(1);
972
973    let results: Vec<InstallResult> = stream::iter(all_entries)
974        .map(|(entry, resource_dir)| {
975            let entry = entry.clone();
976            let project_dir = project_dir.to_path_buf();
977            let resource_dir = resource_dir.to_string();
978            let installed_count = Arc::clone(&installed_count);
979            let active_deps = Arc::clone(&active_deps);
980            let sender = sender.clone();
981            let cache = Arc::clone(&shared_cache);
982
983            async move {
984                // Add to active list and send update
985                {
986                    let mut active = active_deps.lock().await;
987                    active.push(entry.name.clone());
988                    let count = *installed_count.lock().await;
989
990                    if let Some(ref tx) = sender {
991                        let _ = tx.send(InstallProgress {
992                            active_deps: active.clone(),
993                            completed_count: count,
994                            total_count: total,
995                        });
996                    }
997                }
998
999                let res = install_resource_for_parallel(
1000                    &entry,
1001                    &project_dir,
1002                    &resource_dir,
1003                    cache.as_ref(),
1004                    force_refresh,
1005                )
1006                .await;
1007
1008                // Remove from active list and update count only if actually installed
1009                {
1010                    let mut active = active_deps.lock().await;
1011                    active.retain(|x| x != &entry.name);
1012
1013                    if let Ok((actually_installed, _checksum)) = &res {
1014                        if *actually_installed {
1015                            let mut count = installed_count.lock().await;
1016                            *count += 1;
1017                        }
1018
1019                        let count = *installed_count.lock().await;
1020                        if let Some(ref tx) = sender {
1021                            let _ = tx.send(InstallProgress {
1022                                active_deps: active.clone(),
1023                                completed_count: count,
1024                                total_count: total,
1025                            });
1026                        }
1027                    }
1028                }
1029
1030                match res {
1031                    Ok((installed, checksum)) => Ok((entry.name.clone(), installed, checksum)),
1032                    Err(err) => Err((entry.name.clone(), err)),
1033                }
1034            }
1035        })
1036        .buffer_unordered(concurrency)
1037        .collect()
1038        .await;
1039
1040    let mut errors = Vec::new();
1041    for result in results {
1042        match result {
1043            Ok((_name, _installed, _checksum)) => {
1044                // Old function doesn't return checksums
1045            }
1046            Err((name, error)) => {
1047                errors.push((name, error));
1048            }
1049        }
1050    }
1051
1052    if !errors.is_empty() {
1053        let error_msgs: Vec<String> =
1054            errors.into_iter().map(|(name, error)| format!("  {name}: {error}")).collect();
1055        return Err(anyhow::anyhow!(
1056            "Failed to install {} resources:\n{}",
1057            error_msgs.len(),
1058            error_msgs.join("\n")
1059        ));
1060    }
1061
1062    let final_count = *installed_count.lock().await;
1063    Ok(final_count)
1064}
1065
1066/// Filtering options for resource installation operations.
1067///
1068/// This enum controls which resources are processed during installation,
1069/// enabling both full installations and selective updates. The filter
1070/// determines which entries from the lockfile are actually installed.
1071///
1072/// # Use Cases
1073///
1074/// - **Full installations**: Install all resources defined in lockfile
1075/// - **Selective updates**: Install only resources that have been updated
1076/// - **Performance optimization**: Avoid reinstalling unchanged resources
1077/// - **Incremental deployments**: Update only what has changed
1078///
1079/// # Variants
1080///
1081/// ## All Resources
1082/// [`ResourceFilter::All`] processes every resource entry in the lockfile,
1083/// regardless of whether it has changed. This is used by the install command
1084/// for complete environment setup.
1085///
1086/// ## Updated Resources Only
1087/// [`ResourceFilter::Updated`] processes only resources that have version
1088/// changes, as tracked by the update command. This enables efficient
1089/// incremental updates without full reinstallation.
1090///
1091/// # Examples
1092///
1093/// Install all resources:
1094/// ```rust,no_run
1095/// use agpm_cli::installer::ResourceFilter;
1096///
1097/// let filter = ResourceFilter::All;
1098/// // This will install every resource in the lockfile
1099/// ```
1100///
1101/// Install only updated resources:
1102/// ```rust,no_run
1103/// use agpm_cli::installer::ResourceFilter;
1104///
1105/// let updates = vec![
1106///     ("agent1".to_string(), None, "v1.0.0".to_string(), "v1.1.0".to_string()),
1107///     ("tool2".to_string(), Some("community".to_string()), "v2.1.0".to_string(), "v2.2.0".to_string()),
1108/// ];
1109/// let filter = ResourceFilter::Updated(updates);
1110/// // This will install only agent1 and tool2
1111/// ```
1112///
1113/// # Update Tuple Format
1114///
1115/// For [`ResourceFilter::Updated`], each tuple contains:
1116/// - `name`: Resource name as defined in the manifest
1117/// - `old_version`: Previous version (for logging and tracking)
1118/// - `new_version`: New version to install
1119///
1120/// The old version is primarily used for user feedback and logging,
1121/// while the new version determines what gets installed.
1122pub enum ResourceFilter {
1123    /// Install all resources from the lockfile.
1124    ///
1125    /// This option processes every resource entry in the lockfile,
1126    /// installing or updating each one regardless of whether it has
1127    /// changed since the last installation.
1128    All,
1129
1130    /// Install only specific updated resources.
1131    ///
1132    /// This option processes only the resources specified in the update list,
1133    /// allowing for efficient incremental updates. Each tuple contains:
1134    /// - Resource name
1135    /// - Source name (None for local resources)
1136    /// - Old version (for tracking)
1137    /// - New version (to install)
1138    Updated(Vec<(String, Option<String>, String, String)>),
1139}
1140
1141/// Resource installation function supporting multiple progress configurations.
1142///
1143/// This function consolidates all resource installation patterns into a single, flexible
1144/// interface that can handle both full installations and selective updates with different
1145/// progress reporting mechanisms. It represents the modernized installation architecture
1146/// introduced in AGPM v0.3.0.
1147///
1148/// # Architecture Benefits
1149///
1150/// - **Single API**: Single function handles install and update commands
1151/// - **Flexible progress**: Supports dynamic, simple, and quiet progress modes
1152/// - **Selective installation**: Can install all resources or just updated ones
1153/// - **Optimal concurrency**: Leverages worktree-based parallel operations
1154/// - **Cache efficiency**: Integrates with instance-level caching systems
1155///
1156/// # Parameters
1157///
1158/// * `filter` - Determines which resources to install ([`ResourceFilter::All`] or [`ResourceFilter::Updated`])
1159/// * `lockfile` - The lockfile containing all resource definitions to install
1160/// * `manifest` - The project manifest providing configuration and target directories
1161/// * `project_dir` - Root directory where resources should be installed
1162/// * `cache` - Cache instance for Git repository and worktree management
1163/// * `force_refresh` - Whether to force refresh of cached repositories
1164/// * `max_concurrency` - Optional limit on concurrent operations (None = unlimited)
1165/// * `progress` - Optional multi-phase progress manager ([`MultiPhaseProgress`])
1166///
1167/// # Progress Reporting
1168///
1169/// Progress is reported through the optional [`MultiPhaseProgress`] parameter:
1170/// - **Enabled**: Pass `Some(progress)` for multi-phase progress with live updates
1171/// - **Disabled**: Pass `None` for quiet operation (scripts and automation)
1172///
1173/// # Installation Process
1174///
1175/// 1. **Resource filtering**: Collects entries based on filter criteria
1176/// 2. **Cache warming**: Pre-creates worktrees for all unique repositories
1177/// 3. **Parallel installation**: Processes resources with configured concurrency
1178/// 4. **Progress coordination**: Updates progress based on configuration
1179/// 5. **Error aggregation**: Collects and reports any installation failures
1180///
1181/// # Concurrency Behavior
1182///
1183/// The function implements advanced parallel processing:
1184/// - **Pre-warming phase**: Creates all needed worktrees upfront for maximum parallelism
1185/// - **Parallel execution**: Each resource installed in its own async task
1186/// - **Concurrency control**: `max_concurrency` limits simultaneous operations
1187/// - **Thread safety**: Progress updates are atomic and thread-safe
1188///
1189/// # Returns
1190///
1191/// Returns a tuple of:
1192/// - The number of resources that were actually installed (new or updated content).
1193///   Resources that already exist with identical content are not counted.
1194/// - A vector of (`resource_name`, checksum) pairs for all processed resources
1195///
1196/// # Errors
1197///
1198/// Returns an error if any resource installation fails. The error includes details
1199/// about all failed installations with specific error messages for debugging.
1200///
1201/// # Examples
1202///
1203/// Install all resources with progress tracking:
1204/// ```rust,no_run
1205/// use agpm_cli::installer::{install_resources, ResourceFilter};
1206/// use agpm_cli::utils::progress::MultiPhaseProgress;
1207/// use agpm_cli::lockfile::LockFile;
1208/// use agpm_cli::manifest::Manifest;
1209/// use agpm_cli::cache::Cache;
1210/// use std::sync::Arc;
1211/// use std::path::Path;
1212///
1213/// # async fn example() -> anyhow::Result<()> {
1214/// # let lockfile = LockFile::default();
1215/// # let manifest = Manifest::default();
1216/// # let project_dir = Path::new(".");
1217/// # let cache = Cache::new()?;
1218/// let progress = Arc::new(MultiPhaseProgress::new(true));
1219///
1220/// let (count, _checksums) = install_resources(
1221///     ResourceFilter::All,
1222///     &lockfile,
1223///     &manifest,
1224///     &project_dir,
1225///     cache,
1226///     false,
1227///     Some(8), // Limit to 8 concurrent operations
1228///     Some(progress),
1229/// ).await?;
1230///
1231/// println!("Installed {} resources", count);
1232/// # Ok(())
1233/// # }
1234/// ```
1235///
1236/// Install resources quietly (for automation):
1237/// ```rust,no_run
1238/// use agpm_cli::installer::{install_resources, ResourceFilter};
1239/// use agpm_cli::lockfile::LockFile;
1240/// use agpm_cli::manifest::Manifest;
1241/// use agpm_cli::cache::Cache;
1242/// use std::path::Path;
1243///
1244/// # async fn example() -> anyhow::Result<()> {
1245/// # let lockfile = LockFile::default();
1246/// # let manifest = Manifest::default();
1247/// # let project_dir = Path::new(".");
1248/// # let cache = Cache::new()?;
1249/// let updates = vec![("agent1".to_string(), None, "v1.0".to_string(), "v1.1".to_string())];
1250///
1251/// let (count, _checksums) = install_resources(
1252///     ResourceFilter::Updated(updates),
1253///     &lockfile,
1254///     &manifest,
1255///     &project_dir,
1256///     cache,
1257///     false,
1258///     None, // Unlimited concurrency
1259///     None, // No progress output
1260/// ).await?;
1261///
1262/// println!("Updated {} resources", count);
1263/// # Ok(())
1264/// # }
1265/// ```
1266#[allow(clippy::too_many_arguments)]
1267pub async fn install_resources(
1268    filter: ResourceFilter,
1269    lockfile: &LockFile,
1270    manifest: &Manifest,
1271    project_dir: &Path,
1272    cache: Cache,
1273    force_refresh: bool,
1274    max_concurrency: Option<usize>,
1275    progress: Option<Arc<MultiPhaseProgress>>,
1276) -> Result<(usize, Vec<(String, String)>)> {
1277    // Collect entries to install based on filter
1278    let all_entries: Vec<(LockedResource, String)> = match filter {
1279        ResourceFilter::All => {
1280            // Use existing ResourceIterator logic for all entries
1281            ResourceIterator::collect_all_entries(lockfile, manifest)
1282                .into_iter()
1283                .map(|(entry, dir)| (entry.clone(), dir.into_owned()))
1284                .collect()
1285        }
1286        ResourceFilter::Updated(ref updates) => {
1287            // Collect only the updated entries
1288            let mut entries = Vec::new();
1289            for (name, source, _, _) in updates {
1290                if let Some((resource_type, entry)) =
1291                    ResourceIterator::find_resource_by_name_and_source(
1292                        lockfile,
1293                        name,
1294                        source.as_deref(),
1295                    )
1296                {
1297                    // Try artifact config first, fall back to legacy target config
1298                    let target_dir = if let Some(artifact_path) =
1299                        manifest.get_artifact_resource_path(&entry.tool, resource_type)
1300                    {
1301                        artifact_path.display().to_string()
1302                    } else {
1303                        // Fall back to legacy target config
1304                        #[allow(deprecated)]
1305                        resource_type.get_target_dir(&manifest.target).to_string()
1306                    };
1307                    entries.push((entry.clone(), target_dir));
1308                }
1309            }
1310            entries
1311        }
1312    };
1313
1314    if all_entries.is_empty() {
1315        return Ok((0, Vec::new()));
1316    }
1317
1318    let total = all_entries.len();
1319
1320    // Start installation phase with progress if provided
1321    if let Some(ref pm) = progress {
1322        pm.start_phase_with_progress(InstallationPhase::InstallingResources, total);
1323    }
1324
1325    // Pre-warm the cache by creating all needed worktrees upfront
1326    let mut unique_worktrees = HashSet::new();
1327    for (entry, _) in &all_entries {
1328        if let Some(source_name) = &entry.source
1329            && let Some(url) = &entry.url
1330        {
1331            // Only pre-warm if we have a valid SHA
1332            if let Some(sha) = entry.resolved_commit.as_ref().filter(|commit| {
1333                commit.len() == 40 && commit.chars().all(|c| c.is_ascii_hexdigit())
1334            }) {
1335                unique_worktrees.insert((source_name.clone(), url.clone(), sha.clone()));
1336            }
1337        }
1338    }
1339
1340    if !unique_worktrees.is_empty() {
1341        let context = match filter {
1342            ResourceFilter::All => "pre-warm",
1343            ResourceFilter::Updated(_) => "update-pre-warm",
1344        };
1345
1346        let worktree_futures: Vec<_> = unique_worktrees
1347            .into_iter()
1348            .map(|(source, url, sha)| {
1349                let cache = cache.clone();
1350                async move {
1351                    cache
1352                        .get_or_create_worktree_for_sha(&source, &url, &sha, Some(context))
1353                        .await
1354                        .ok(); // Ignore errors during pre-warming
1355                }
1356            })
1357            .collect();
1358
1359        // Execute all worktree creations in parallel
1360        future::join_all(worktree_futures).await;
1361    }
1362
1363    // Create thread-safe progress tracking
1364    let installed_count = Arc::new(Mutex::new(0));
1365    let concurrency = max_concurrency.unwrap_or(usize::MAX).max(1);
1366
1367    // Update initial progress message
1368    if let Some(ref pm) = progress {
1369        pm.update_current_message(&format!("Installing 0/{total} resources"));
1370    }
1371
1372    // Process installations in parallel
1373    let results: Vec<InstallResult> = stream::iter(all_entries)
1374        .map(|(entry, resource_dir)| {
1375            let project_dir = project_dir.to_path_buf();
1376            let installed_count = Arc::clone(&installed_count);
1377            let cache = cache.clone();
1378            let progress = progress.clone();
1379
1380            async move {
1381                // Update progress message for current resource
1382                if let Some(ref pm) = progress {
1383                    pm.update_current_message(&format!("Installing {}", entry.name));
1384                }
1385
1386                let res = install_resource_for_parallel(
1387                    &entry,
1388                    &project_dir,
1389                    &resource_dir,
1390                    &cache,
1391                    force_refresh,
1392                )
1393                .await;
1394
1395                // Update progress on success - but only count if actually installed
1396                if let Ok((actually_installed, _checksum)) = &res {
1397                    if *actually_installed {
1398                        let mut count = installed_count.lock().await;
1399                        *count += 1;
1400                    }
1401
1402                    if let Some(ref pm) = progress {
1403                        let count = *installed_count.lock().await;
1404                        pm.update_current_message(&format!("Installing {count}/{total} resources"));
1405                        pm.increment_progress(1);
1406                    }
1407                }
1408
1409                match res {
1410                    Ok((installed, checksum)) => Ok((entry.name.clone(), installed, checksum)),
1411                    Err(err) => Err((entry.name.clone(), err)),
1412                }
1413            }
1414        })
1415        .buffer_unordered(concurrency)
1416        .collect()
1417        .await;
1418
1419    // Handle errors and collect checksums
1420    let mut errors = Vec::new();
1421    let mut checksums = Vec::new();
1422    for result in results {
1423        match result {
1424            Ok((name, _installed, checksum)) => {
1425                checksums.push((name, checksum));
1426            }
1427            Err((name, error)) => {
1428                errors.push((name, error));
1429            }
1430        }
1431    }
1432
1433    if !errors.is_empty() {
1434        // Complete phase with error message
1435        if let Some(ref pm) = progress {
1436            pm.complete_phase(Some(&format!("Failed to install {} resources", errors.len())));
1437        }
1438
1439        let error_msgs: Vec<String> =
1440            errors.into_iter().map(|(name, error)| format!("  {name}: {error}")).collect();
1441        return Err(anyhow::anyhow!(
1442            "Failed to install {} resources:\n{}",
1443            error_msgs.len(),
1444            error_msgs.join("\n")
1445        ));
1446    }
1447
1448    let final_count = *installed_count.lock().await;
1449
1450    // Complete installation phase successfully
1451    if let Some(ref pm) = progress
1452        && final_count > 0
1453    {
1454        pm.complete_phase(Some(&format!("Installed {final_count} resources")));
1455    }
1456
1457    Ok((final_count, checksums))
1458}
1459
1460/// Install resources with real-time dynamic progress management.
1461///
1462/// This function provides sophisticated parallel resource installation with
1463/// live progress tracking that shows individual dependency states in real-time.
1464/// It uses a `ProgressBar` to display progress of dependencies
1465/// are currently being processed, completed, or experiencing issues.
1466///
1467/// # Arguments
1468///
1469/// * `lockfile` - Lockfile containing all resources to install
1470/// * `manifest` - Project manifest providing configuration and target directories
1471/// * `project_dir` - Root directory where resources will be installed
1472/// * `cache` - Cache instance for Git repository and worktree management
1473/// * `force_refresh` - Whether to force refresh of cached repositories
1474/// * `max_concurrency` - Optional limit on concurrent operations (`None` = unlimited)
1475/// * `progress_bar` - Optional dynamic progress bar for real-time updates
1476///
1477/// # Dynamic Progress Features
1478///
1479/// When a `ProgressBar` is provided, the installation displays:
1480/// - Real-time list of dependencies being processed concurrently
1481/// - Live updates as dependencies start, progress, and complete
1482/// - Clean terminal output with automatic clearing when finished
1483/// - Graceful handling of errors with preserved context
1484///
1485/// # Progress Flow
1486///
1487/// 1. **Initialization**: Progress manager starts with total dependency count
1488/// 2. **Pre-warming**: Cache prepares Git worktrees for parallel access
1489/// 3. **Parallel Processing**: Dependencies install concurrently with live updates
1490/// 4. **Completion**: Progress display clears, leaving clean final state
1491///
1492/// # Examples
1493///
1494/// ```rust,no_run
1495/// use agpm_cli::installer::install_resources_with_dynamic_progress;
1496/// use agpm_cli::utils::progress::ProgressBar;
1497/// use agpm_cli::lockfile::LockFile;
1498/// use agpm_cli::manifest::Manifest;
1499/// use agpm_cli::cache::Cache;
1500/// use std::sync::Arc;
1501/// use std::path::Path;
1502///
1503/// # async fn example() -> anyhow::Result<()> {
1504/// let lockfile = LockFile::load(Path::new("agpm.lock"))?;
1505/// let manifest = Manifest::load(Path::new("agpm.toml"))?;
1506/// let cache = Cache::new()?;
1507///
1508/// // Create dynamic progress manager
1509/// let progress_bar = Arc::new(ProgressBar::new(100));
1510///
1511/// let count = install_resources_with_dynamic_progress(
1512///     &lockfile,
1513///     &manifest,
1514///     Path::new("."),
1515///     &cache,
1516///     false,                    // No force refresh
1517///     Some(10),                 // Max 10 concurrent operations
1518///     Some(progress_bar)        // Dynamic progress display
1519/// ).await?;
1520///
1521/// println!("Successfully installed {} resources", count);
1522/// # Ok(())
1523/// # }
1524/// ```
1525///
1526/// # Performance Optimizations
1527///
1528/// The function includes several performance enhancements:
1529/// - **Worktree pre-warming**: All needed Git worktrees created upfront
1530/// - **Parallel processing**: Configurable concurrency for optimal resource usage
1531/// - **Progress batching**: Updates are batched to reduce terminal overhead
1532/// - **Efficient cleanup**: Worktrees left for reuse rather than immediate cleanup
1533///
1534/// # Returns
1535///
1536/// Returns the total number of resources that were actually installed.
1537/// This count only includes resources with new or updated content, not
1538/// resources that already existed and were unchanged.
1539///
1540/// # Errors
1541///
1542/// Returns an error if any resource installation fails. The error includes
1543/// detailed information about all failed installations. The progress manager
1544/// is automatically cleaned up even if errors occur.
1545// Removed install_resources_with_dynamic_progress - use install_resources with MultiPhaseProgress instead
1546#[deprecated(note = "Use install_resources with MultiPhaseProgress instead")]
1547pub async fn install_resources_with_dynamic_progress(
1548    lockfile: &LockFile,
1549    manifest: &Manifest,
1550    project_dir: &Path,
1551    cache: &Cache,
1552    force_refresh: bool,
1553    max_concurrency: Option<usize>,
1554    progress_bar: Option<Arc<crate::utils::progress::ProgressBar>>,
1555) -> Result<usize> {
1556    // Collect all entries to install using ResourceIterator
1557    let all_entries = ResourceIterator::collect_all_entries(lockfile, manifest);
1558
1559    if all_entries.is_empty() {
1560        return Ok(0);
1561    }
1562
1563    let _total = all_entries.len();
1564
1565    // Start progress if provided
1566    if let Some(ref progress) = progress_bar {
1567        progress.set_message("Installing resources");
1568    }
1569
1570    // Pre-warm the cache by creating all needed worktrees upfront
1571    // Collect unique (source, url, sha) triples to pre-create worktrees
1572    let mut unique_worktrees = HashSet::new();
1573    for (entry, _) in &all_entries {
1574        if let Some(source_name) = &entry.source
1575            && let Some(url) = &entry.url
1576        {
1577            // Only pre-warm if we have a valid SHA
1578            if let Some(sha) = entry.resolved_commit.as_ref().filter(|commit| {
1579                commit.len() == 40 && commit.chars().all(|c| c.is_ascii_hexdigit())
1580            }) {
1581                unique_worktrees.insert((source_name.clone(), url.clone(), sha.clone()));
1582            }
1583        }
1584    }
1585
1586    if !unique_worktrees.is_empty() {
1587        let worktree_futures: Vec<_> = unique_worktrees
1588            .into_iter()
1589            .map(|(source, url, sha)| {
1590                async move {
1591                    cache
1592                        .get_or_create_worktree_for_sha(&source, &url, &sha, Some("pre-warm"))
1593                        .await
1594                        .ok(); // Ignore errors during pre-warming
1595                }
1596            })
1597            .collect();
1598
1599        // Execute all worktree creations in parallel
1600        future::join_all(worktree_futures).await;
1601    }
1602
1603    // Create thread-safe progress tracking
1604    let installed_count = Arc::new(Mutex::new(0));
1605    let shared_cache = Arc::new(cache.clone());
1606    let concurrency = max_concurrency.unwrap_or(usize::MAX).max(1);
1607
1608    let results: Vec<InstallResult> = stream::iter(all_entries)
1609        .map(|(entry, resource_dir)| {
1610            let entry = entry.clone();
1611            let project_dir = project_dir.to_path_buf();
1612            let resource_dir = resource_dir.to_string();
1613            let installed_count = Arc::clone(&installed_count);
1614            let cache = Arc::clone(&shared_cache);
1615            let progress_bar_ref = progress_bar.clone();
1616
1617            async move {
1618                // Update progress if available
1619                if let Some(ref progress) = progress_bar_ref {
1620                    progress.set_message(format!("Installing {}", entry.name));
1621                }
1622
1623                let res = install_resource_for_parallel(
1624                    &entry,
1625                    &project_dir,
1626                    &resource_dir,
1627                    cache.as_ref(),
1628                    force_refresh,
1629                )
1630                .await;
1631
1632                // Signal completion and update count only if actually installed
1633                if let Ok((actually_installed, _checksum)) = &res {
1634                    if *actually_installed {
1635                        let mut count = installed_count.lock().await;
1636                        *count += 1;
1637                    }
1638
1639                    if let Some(ref progress) = progress_bar_ref {
1640                        progress.inc(1);
1641                    }
1642                }
1643
1644                match res {
1645                    Ok((installed, checksum)) => Ok((entry.name.clone(), installed, checksum)),
1646                    Err(err) => Err((entry.name.clone(), err)),
1647                }
1648            }
1649        })
1650        .buffer_unordered(concurrency)
1651        .collect()
1652        .await;
1653
1654    let mut errors = Vec::new();
1655    for result in results {
1656        match result {
1657            Ok((_name, _installed, _checksum)) => {
1658                // Old function doesn't return checksums
1659            }
1660            Err((name, error)) => {
1661                errors.push((name, error));
1662            }
1663        }
1664    }
1665
1666    if !errors.is_empty() {
1667        // Finish with error
1668        if let Some(ref progress) = progress_bar {
1669            progress.finish_and_clear();
1670        }
1671
1672        let error_msgs: Vec<String> =
1673            errors.into_iter().map(|(name, error)| format!("  {name}: {error}")).collect();
1674        return Err(anyhow::anyhow!(
1675            "Failed to install {} resources:\n{}",
1676            error_msgs.len(),
1677            error_msgs.join("\n")
1678        ));
1679    }
1680
1681    let final_count = *installed_count.lock().await;
1682
1683    // Clear the progress display - success message will be shown by the caller
1684    if let Some(ref progress) = progress_bar {
1685        progress.finish_and_clear();
1686    }
1687
1688    Ok(final_count)
1689}
1690
1691/// Install only specific updated resources in parallel (selective installation).
1692///
1693/// This function provides targeted installation of only the resources that have
1694/// been updated, rather than reinstalling all resources. It's designed for
1695/// efficient update operations where only a subset of dependencies have changed.
1696/// The function uses the same parallel processing architecture as full installations
1697/// but operates on a filtered set of resources.
1698///
1699/// # Arguments
1700///
1701/// * `updates` - Vector of tuples containing (name, `old_version`, `new_version`) for each updated resource
1702/// * `lockfile` - Lockfile containing all available resources (updated resources must exist here)
1703/// * `manifest` - Project manifest providing configuration and target directories
1704/// * `project_dir` - Root directory where resources will be installed
1705/// * `cache` - Cache instance for Git repository and worktree management
1706/// * `pb` - Optional progress bar for user feedback during installation
1707/// * `_quiet` - Quiet mode flag (currently unused, maintained for API compatibility)
1708///
1709/// # Update Tuple Format
1710///
1711/// Each update tuple contains:
1712/// - `name`: Resource name as defined in the lockfile
1713/// - `old_version`: Previous version (used for logging and user feedback)
1714/// - `new_version`: New version that will be installed
1715///
1716/// # Selective Processing
1717///
1718/// The function implements selective resource processing:
1719/// 1. **Filtering**: Only processes resources listed in the `updates` vector
1720/// 2. **Lookup**: Finds corresponding entries in the lockfile for each update
1721/// 3. **Validation**: Ensures all specified resources exist before processing
1722/// 4. **Installation**: Uses the same parallel architecture as full installations
1723///
1724/// # Examples
1725///
1726/// ```rust,no_run
1727/// use agpm_cli::installer::install_updated_resources;
1728/// use agpm_cli::lockfile::LockFile;
1729/// use agpm_cli::manifest::Manifest;
1730/// use agpm_cli::cache::Cache;
1731/// use agpm_cli::utils::progress::ProgressBar;
1732/// use std::path::Path;
1733///
1734/// # async fn example() -> anyhow::Result<()> {
1735/// let lockfile = LockFile::load(Path::new("agpm.lock"))?;
1736/// let manifest = Manifest::load(Path::new("agpm.toml"))?;
1737/// let cache = Cache::new()?;
1738/// let pb = ProgressBar::new(3);
1739///
1740/// // Define which resources to update
1741/// let updates = vec![
1742///     ("ai-agent".to_string(), None, "v1.0.0".to_string(), "v1.1.0".to_string()),
1743///     ("helper-tool".to_string(), Some("community".to_string()), "v2.0.0".to_string(), "v2.1.0".to_string()),
1744///     ("data-processor".to_string(), None, "v1.5.0".to_string(), "v1.6.0".to_string()),
1745/// ];
1746///
1747/// let count = install_updated_resources(
1748///     &updates,
1749///     &lockfile,
1750///     &manifest,
1751///     Path::new("."),
1752///     &cache,
1753///     Some(&pb),
1754///     false
1755/// ).await?;
1756///
1757/// println!("Updated {} resources", count);
1758/// # Ok(())
1759/// # }
1760/// ```
1761///
1762/// # Performance Benefits
1763///
1764/// Selective installation provides significant performance benefits:
1765/// - **Reduced processing**: Only installs resources that have actually changed
1766/// - **Faster execution**: Avoids redundant operations on unchanged resources
1767/// - **Network efficiency**: Only fetches Git data for repositories with updates
1768/// - **Disk efficiency**: Minimizes file system operations and cache usage
1769///
1770/// # Integration with Update Command
1771///
1772/// This function is typically used by the `agpm update` command after dependency
1773/// resolution determines which resources have new versions available:
1774///
1775/// ```text
1776/// Update Flow:
1777/// 1. Resolve dependencies → identify version changes
1778/// 2. Update lockfile → record new versions and checksums
1779/// 3. Selective installation → install only changed resources
1780/// ```
1781///
1782/// # Returns
1783///
1784/// Returns the total number of resources that were successfully installed.
1785/// This represents the actual number of files that were updated on disk.
1786///
1787/// # Errors
1788///
1789/// Returns an error if:
1790/// - Any specified resource name is not found in the lockfile
1791/// - Git repository access fails for resources being updated
1792/// - File system operations fail during installation
1793/// - Any individual resource installation encounters an error
1794///
1795/// The function uses atomic error handling - if any resource fails, the entire
1796/// operation fails and detailed error information is provided.
1797pub async fn install_updated_resources(
1798    updates: &[(String, Option<String>, String, String)], // (name, source, old_version, new_version)
1799    lockfile: &LockFile,
1800    manifest: &Manifest,
1801    project_dir: &Path,
1802    cache: &Cache,
1803    pb: Option<&ProgressBar>,
1804    _quiet: bool,
1805) -> Result<usize> {
1806    if updates.is_empty() {
1807        return Ok(0);
1808    }
1809
1810    let total = updates.len();
1811
1812    // Collect all entries to install
1813    let mut entries_to_install = Vec::new();
1814    for (name, source, _, _) in updates {
1815        if let Some((resource_type, entry)) =
1816            ResourceIterator::find_resource_by_name_and_source(lockfile, name, source.as_deref())
1817        {
1818            // Try artifact config first, fall back to legacy target config
1819            let target_dir = if let Some(artifact_path) =
1820                manifest.get_artifact_resource_path(&entry.tool, resource_type)
1821            {
1822                artifact_path.display().to_string()
1823            } else {
1824                // Fall back to legacy target config
1825                #[allow(deprecated)]
1826                resource_type.get_target_dir(&manifest.target).to_string()
1827            };
1828            entries_to_install.push((entry.clone(), target_dir));
1829        }
1830    }
1831
1832    if entries_to_install.is_empty() {
1833        return Ok(0);
1834    }
1835
1836    // Pre-warm the cache by creating all needed worktrees upfront
1837    if let Some(pb) = pb {
1838        pb.set_message("Preparing resources...");
1839    }
1840
1841    // Collect unique (source, url, sha) triples to pre-create worktrees
1842    let mut unique_worktrees = HashSet::new();
1843    for (entry, _) in &entries_to_install {
1844        if let Some(source_name) = &entry.source
1845            && let Some(url) = &entry.url
1846        {
1847            // Only pre-warm if we have a valid SHA
1848            if let Some(sha) = entry.resolved_commit.as_ref().filter(|commit| {
1849                commit.len() == 40 && commit.chars().all(|c| c.is_ascii_hexdigit())
1850            }) {
1851                unique_worktrees.insert((source_name.clone(), url.clone(), sha.clone()));
1852            }
1853        }
1854    }
1855
1856    // Pre-create all worktrees in parallel
1857    if !unique_worktrees.is_empty() {
1858        let worktree_futures: Vec<_> = unique_worktrees
1859            .into_iter()
1860            .map(|(source, url, sha)| {
1861                async move {
1862                    cache
1863                        .get_or_create_worktree_for_sha(
1864                            &source,
1865                            &url,
1866                            &sha,
1867                            Some("update-pre-warm"),
1868                        )
1869                        .await
1870                        .ok(); // Ignore errors during pre-warming
1871                }
1872            })
1873            .collect();
1874
1875        // Execute all worktree creations in parallel
1876        future::join_all(worktree_futures).await;
1877    }
1878
1879    // Create thread-safe progress tracking
1880    let installed_count = Arc::new(Mutex::new(0));
1881    let pb = pb.map(Arc::new);
1882    let cache = Arc::new(cache);
1883
1884    // Set initial progress
1885    if let Some(ref pb) = pb {
1886        pb.set_message(format!("Installing 0/{total} resources"));
1887    }
1888
1889    // Use concurrent stream processing for parallel installation
1890    let results: Vec<Result<(), anyhow::Error>> = stream::iter(entries_to_install)
1891        .map(|(entry, resource_dir)| {
1892            let project_dir = project_dir.to_path_buf();
1893            let installed_count = Arc::clone(&installed_count);
1894            let pb = pb.clone();
1895            let cache = Arc::clone(&cache);
1896
1897            async move {
1898                // Install the resource
1899                install_resource_for_parallel(
1900                    &entry,
1901                    &project_dir,
1902                    &resource_dir,
1903                    cache.as_ref(),
1904                    false,
1905                )
1906                .await?;
1907
1908                // Update progress
1909                let mut count = installed_count.lock().await;
1910                *count += 1;
1911
1912                if let Some(pb) = pb {
1913                    pb.set_message(format!("Installing {}/{} resources", *count, total));
1914                    pb.inc(1);
1915                }
1916
1917                Ok::<(), anyhow::Error>(())
1918            }
1919        })
1920        .buffer_unordered(usize::MAX) // Allow unlimited task concurrency
1921        .collect()
1922        .await;
1923
1924    // Check all results for errors
1925    for result in results {
1926        result?;
1927    }
1928
1929    let final_count = *installed_count.lock().await;
1930    Ok(final_count)
1931}
1932
1933/// Update .gitignore with installed file paths
1934pub fn update_gitignore(lockfile: &LockFile, project_dir: &Path, enabled: bool) -> Result<()> {
1935    if !enabled {
1936        // Gitignore management is disabled
1937        return Ok(());
1938    }
1939
1940    let gitignore_path = project_dir.join(".gitignore");
1941
1942    // Collect all installed file paths relative to project root
1943    let mut paths_to_ignore = HashSet::new();
1944
1945    // Helper to add paths from a resource list
1946    let mut add_resource_paths = |resources: &[LockedResource]| {
1947        for resource in resources {
1948            if !resource.installed_at.is_empty() {
1949                // Use the explicit installed_at path
1950                paths_to_ignore.insert(resource.installed_at.clone());
1951            }
1952        }
1953    };
1954
1955    // Collect paths from all resource types
1956    // Skip hooks and MCP servers - they are configured only, not installed as files
1957    add_resource_paths(&lockfile.agents);
1958    add_resource_paths(&lockfile.snippets);
1959    add_resource_paths(&lockfile.commands);
1960    add_resource_paths(&lockfile.scripts);
1961
1962    // Read existing gitignore if it exists
1963    let mut before_agpm_section = Vec::new();
1964    let mut after_agpm_section = Vec::new();
1965
1966    if gitignore_path.exists() {
1967        let content = fs::read_to_string(&gitignore_path)
1968            .with_context(|| format!("Failed to read {}", gitignore_path.display()))?;
1969
1970        let mut in_agpm_section = false;
1971        let mut past_agpm_section = false;
1972
1973        for line in content.lines() {
1974            // Support both AGPM and legacy CCPM markers for migration compatibility
1975            if line == "# AGPM managed entries - do not edit below this line"
1976                || line == "# CCPM managed entries - do not edit below this line"
1977            {
1978                in_agpm_section = true;
1979                continue;
1980            } else if line == "# End of AGPM managed entries"
1981                || line == "# End of CCPM managed entries"
1982            {
1983                in_agpm_section = false;
1984                past_agpm_section = true;
1985                continue;
1986            }
1987
1988            if !in_agpm_section && !past_agpm_section {
1989                // Preserve everything before AGPM section exactly as-is
1990                before_agpm_section.push(line.to_string());
1991            } else if in_agpm_section {
1992                // Skip existing AGPM/CCPM entries (they'll be replaced)
1993                continue;
1994            } else {
1995                // Preserve everything after AGPM section exactly as-is
1996                after_agpm_section.push(line.to_string());
1997            }
1998        }
1999    }
2000
2001    // Build the new content
2002    let mut new_content = String::new();
2003
2004    // Add everything before AGPM section exactly as it was
2005    if !before_agpm_section.is_empty() {
2006        for line in &before_agpm_section {
2007            new_content.push_str(line);
2008            new_content.push('\n');
2009        }
2010        // Add blank line before AGPM section if the previous content doesn't end with one
2011        if !before_agpm_section.is_empty() && !before_agpm_section.last().unwrap().trim().is_empty()
2012        {
2013            new_content.push('\n');
2014        }
2015    }
2016
2017    // Add AGPM managed section
2018    new_content.push_str("# AGPM managed entries - do not edit below this line\n");
2019
2020    // Convert paths to gitignore format (relative to project root)
2021    // Sort paths for consistent output
2022    let mut sorted_paths: Vec<_> = paths_to_ignore.into_iter().collect();
2023    sorted_paths.sort();
2024
2025    for path in &sorted_paths {
2026        // Use paths as-is since gitignore is now at project root
2027        let ignore_path = if path.starts_with("./") {
2028            // Remove leading ./ if present
2029            path.strip_prefix("./").unwrap_or(path).to_string()
2030        } else {
2031            path.clone()
2032        };
2033
2034        // Normalize to forward slashes for .gitignore (Git expects forward slashes on all platforms)
2035        let normalized_path = ignore_path.replace('\\', "/");
2036
2037        new_content.push_str(&normalized_path);
2038        new_content.push('\n');
2039    }
2040
2041    new_content.push_str("# End of AGPM managed entries\n");
2042
2043    // Add everything after AGPM section exactly as it was
2044    if !after_agpm_section.is_empty() {
2045        new_content.push('\n');
2046        for line in &after_agpm_section {
2047            new_content.push_str(line);
2048            new_content.push('\n');
2049        }
2050    }
2051
2052    // If this is a new file, add a basic header
2053    if before_agpm_section.is_empty() && after_agpm_section.is_empty() {
2054        let mut default_content = String::new();
2055        default_content.push_str("# .gitignore - AGPM managed entries\n");
2056        default_content.push_str("# AGPM entries are automatically generated\n");
2057        default_content.push('\n');
2058        default_content.push_str("# AGPM managed entries - do not edit below this line\n");
2059
2060        // Add the AGPM paths
2061        for path in &sorted_paths {
2062            let ignore_path = if path.starts_with("./") {
2063                path.strip_prefix("./").unwrap_or(path).to_string()
2064            } else {
2065                path.clone()
2066            };
2067            // Normalize to forward slashes for .gitignore (Git expects forward slashes on all platforms)
2068            let normalized_path = ignore_path.replace('\\', "/");
2069            default_content.push_str(&normalized_path);
2070            default_content.push('\n');
2071        }
2072
2073        default_content.push_str("# End of AGPM managed entries\n");
2074        new_content = default_content;
2075    }
2076
2077    // Write the updated gitignore
2078    atomic_write(&gitignore_path, new_content.as_bytes())
2079        .with_context(|| format!("Failed to update {}", gitignore_path.display()))?;
2080
2081    Ok(())
2082}
2083
2084/// Removes artifacts that are no longer needed based on lockfile comparison.
2085///
2086/// This function performs automatic cleanup of obsolete resource files by comparing
2087/// the old and new lockfiles. It identifies and removes artifacts that have been:
2088/// - **Removed from manifest**: Dependencies deleted from `agpm.toml`
2089/// - **Relocated**: Files with changed `installed_at` paths due to:
2090///   - Relative path preservation (v0.3.18+)
2091///   - Custom target changes
2092///   - Dependency name changes
2093/// - **Replaced**: Resources that moved due to source or version changes
2094///
2095/// After removing files, it also cleans up any empty parent directories to prevent
2096/// directory accumulation over time.
2097///
2098/// # Cleanup Strategy
2099///
2100/// The function uses a **set-based difference algorithm**:
2101/// 1. Collects all `installed_at` paths from the new lockfile into a `HashSet`
2102/// 2. Iterates through old lockfile resources
2103/// 3. For each old path not in the new set:
2104///    - Removes the file if it exists
2105///    - Recursively cleans empty parent directories
2106///    - Records the path for reporting
2107///
2108/// # Arguments
2109///
2110/// * `old_lockfile` - The previous lockfile state containing old installation paths
2111/// * `new_lockfile` - The current lockfile state with updated installation paths
2112/// * `project_dir` - The project root directory (usually contains `.claude/`)
2113///
2114/// # Returns
2115///
2116/// Returns `Ok(Vec<String>)` containing the list of `installed_at` paths that were
2117/// successfully removed. An empty vector indicates no artifacts needed cleanup.
2118///
2119/// # Errors
2120///
2121/// Returns an error if:
2122/// - File removal fails due to permissions or locks
2123/// - Directory cleanup encounters unexpected I/O errors
2124/// - File system operations fail for other reasons
2125///
2126/// # Examples
2127///
2128/// ## Basic Cleanup After Update
2129///
2130/// ```no_run
2131/// use agpm_cli::installer::cleanup_removed_artifacts;
2132/// use agpm_cli::lockfile::LockFile;
2133/// use std::path::Path;
2134///
2135/// # async fn example() -> anyhow::Result<()> {
2136/// let old_lockfile = LockFile::load(Path::new("agpm.lock"))?;
2137/// let new_lockfile = LockFile::new(); // After resolution
2138/// let project_dir = Path::new(".");
2139///
2140/// let removed = cleanup_removed_artifacts(&old_lockfile, &new_lockfile, project_dir).await?;
2141/// if !removed.is_empty() {
2142///     println!("Cleaned up {} artifact(s)", removed.len());
2143///     for path in removed {
2144///         println!("  - Removed: {}", path);
2145///     }
2146/// }
2147/// # Ok(())
2148/// # }
2149/// ```
2150///
2151/// ## Cleanup After Path Migration
2152///
2153/// When relative path preservation changes installation paths:
2154///
2155/// ```text
2156/// Old lockfile (v0.3.17):
2157///   installed_at: ".claude/agents/helper.md"
2158///
2159/// New lockfile (v0.3.18+):
2160///   installed_at: ".claude/agents/ai/helper.md"  # Preserved subdirectory
2161///
2162/// Cleanup removes: .claude/agents/helper.md
2163/// ```
2164///
2165/// ## Cleanup After Dependency Removal
2166///
2167/// ```no_run
2168/// # use agpm_cli::installer::cleanup_removed_artifacts;
2169/// # use agpm_cli::lockfile::{LockFile, LockedResource};
2170/// # use std::path::Path;
2171/// # async fn removal_example() -> anyhow::Result<()> {
2172/// // Old lockfile had 3 agents
2173/// let mut old_lockfile = LockFile::new();
2174/// old_lockfile.agents = vec![
2175///     // ... 3 agents including one at .claude/agents/removed.md
2176/// ];
2177///
2178/// // New lockfile only has 2 agents (one was removed from manifest)
2179/// let mut new_lockfile = LockFile::new();
2180/// new_lockfile.agents = vec![
2181///     // ... 2 agents, removed.md is gone
2182/// ];
2183///
2184/// let removed = cleanup_removed_artifacts(&old_lockfile, &new_lockfile, Path::new(".")).await?;
2185/// assert!(removed.contains(&".claude/agents/removed.md".to_string()));
2186/// # Ok(())
2187/// # }
2188/// ```
2189///
2190/// ## Integration with Install Command
2191///
2192/// This function is automatically called during `agpm install` when both old and
2193/// new lockfiles exist:
2194///
2195/// ```rust,ignore
2196/// // In src/cli/install.rs
2197/// if !self.frozen && !self.regenerate && lockfile_path.exists() {
2198///     if let Ok(old_lockfile) = LockFile::load(&lockfile_path) {
2199///         detect_tag_movement(&old_lockfile, &lockfile, self.quiet);
2200///
2201///         // Automatic cleanup of removed or moved artifacts
2202///         if let Ok(removed) = cleanup_removed_artifacts(
2203///             &old_lockfile,
2204///             &lockfile,
2205///             actual_project_dir,
2206///         ).await && !removed.is_empty() && !self.quiet {
2207///             println!("🗑️  Cleaned up {} moved or removed artifact(s)", removed.len());
2208///         }
2209///     }
2210/// }
2211/// ```
2212///
2213/// # Performance
2214///
2215/// - **Time Complexity**: O(n + m) where n = old resources, m = new resources
2216/// - **Space Complexity**: O(m) for the `HashSet` of new paths
2217/// - **I/O Operations**: One file removal per obsolete artifact
2218/// - **Directory Cleanup**: Walks up parent directories once per removed file
2219///
2220/// The function is highly efficient as it:
2221/// - Uses `HashSet` for O(1) path lookups
2222/// - Only performs I/O for files that actually exist
2223/// - Cleans directories recursively but stops at first non-empty directory
2224///
2225/// # Safety
2226///
2227/// - Only removes files explicitly tracked in the old lockfile
2228/// - Never removes files outside the project directory
2229/// - Stops directory cleanup at `.claude/` boundary
2230/// - Handles concurrent file access gracefully (ENOENT is not an error)
2231///
2232/// # Use Cases
2233///
2234/// ## Relative Path Migration (v0.3.18+)
2235///
2236/// When upgrading to v0.3.18+, resource paths change to preserve directory structure:
2237/// ```text
2238/// Before: .claude/agents/helper.md  (flat)
2239/// After:  .claude/agents/ai/helper.md  (nested)
2240/// ```
2241/// This function removes the old flat file automatically.
2242///
2243/// ## Dependency Reorganization
2244///
2245/// When reorganizing dependencies with custom targets:
2246/// ```toml
2247/// # Before
2248/// [agents]
2249/// helper = { source = "community", path = "agents/helper.md" }
2250///
2251/// # After (with custom target)
2252/// [agents]
2253/// helper = { source = "community", path = "agents/helper.md", target = "tools" }
2254/// ```
2255/// Old file at `.claude/agents/helper.md` is removed, new file at
2256/// `.claude/agents/tools/helper.md` is installed.
2257///
2258/// ## Manifest Cleanup
2259///
2260/// Simply removing dependencies from `agpm.toml` triggers automatic cleanup:
2261/// ```toml
2262/// # Remove unwanted dependency
2263/// [agents]
2264/// # old-agent = { ... }  # Commented out or deleted
2265/// ```
2266/// The next `agpm install` removes the old agent file automatically.
2267///
2268/// # Version History
2269///
2270/// - **v0.3.18**: Introduced to handle relative path preservation and custom target changes
2271/// - Works in conjunction with `cleanup_empty_dirs()` for comprehensive cleanup
2272pub async fn cleanup_removed_artifacts(
2273    old_lockfile: &LockFile,
2274    new_lockfile: &LockFile,
2275    project_dir: &std::path::Path,
2276) -> Result<Vec<String>> {
2277    use std::collections::HashSet;
2278
2279    let mut removed = Vec::new();
2280
2281    // Collect all installed paths from new lockfile
2282    let new_paths: HashSet<String> =
2283        new_lockfile.all_resources().into_iter().map(|r| r.installed_at.clone()).collect();
2284
2285    // Check each old resource
2286    for old_resource in old_lockfile.all_resources() {
2287        // If the old path doesn't exist in new lockfile, it needs to be removed
2288        if !new_paths.contains(&old_resource.installed_at) {
2289            let full_path = project_dir.join(&old_resource.installed_at);
2290
2291            // Only remove if the file actually exists
2292            if full_path.exists() {
2293                tokio::fs::remove_file(&full_path).await.with_context(|| {
2294                    format!("Failed to remove old artifact: {}", full_path.display())
2295                })?;
2296
2297                removed.push(old_resource.installed_at.clone());
2298
2299                // Try to clean up empty parent directories
2300                cleanup_empty_dirs(&full_path).await?;
2301            }
2302        }
2303    }
2304
2305    Ok(removed)
2306}
2307
2308/// Recursively removes empty parent directories up to the project root.
2309///
2310/// This helper function performs bottom-up directory cleanup after file removal.
2311/// It walks up the directory tree from a given file path, removing empty parent
2312/// directories until it encounters:
2313/// - A non-empty directory (containing other files or subdirectories)
2314/// - The `.claude` directory boundary (cleanup stops here for safety)
2315/// - The project root (no parent directory)
2316/// - A directory that cannot be removed (permissions, locks, etc.)
2317///
2318/// This prevents accumulation of empty directory trees over time as resources
2319/// are removed, renamed, or relocated.
2320///
2321/// # Cleanup Algorithm
2322///
2323/// The function implements a **safe recursive cleanup** strategy:
2324/// 1. Starts at the parent directory of the given file path
2325/// 2. Attempts to remove the directory
2326/// 3. If successful (directory was empty), moves to parent and repeats
2327/// 4. If unsuccessful, stops immediately (directory has content or other issues)
2328/// 5. Always stops at `.claude/` directory to avoid over-cleanup
2329///
2330/// # Safety Boundaries
2331///
2332/// The function enforces strict boundaries to prevent accidental data loss:
2333/// - **`.claude/` boundary**: Never removes the `.claude` directory itself
2334/// - **Project root**: Stops if parent directory is None
2335/// - **Non-empty guard**: Only removes truly empty directories
2336/// - **Error tolerance**: ENOENT (directory not found) is not considered an error
2337///
2338/// # Arguments
2339///
2340/// * `file_path` - The path to the removed file whose parent directories should be cleaned.
2341///   Typically this is the full path to a resource file that was just deleted.
2342///
2343/// # Returns
2344///
2345/// Returns `Ok(())` in all normal cases, including:
2346/// - All empty directories successfully removed
2347/// - Cleanup stopped at a non-empty directory
2348/// - Directory already doesn't exist (ENOENT)
2349///
2350/// # Errors
2351///
2352/// Returns an error only for unexpected I/O failures during directory removal
2353/// that are not normal "directory not empty" or "not found" errors.
2354///
2355/// # Examples
2356///
2357/// ## Basic Directory Cleanup
2358///
2359/// ```ignore
2360/// # use agpm_cli::installer::cleanup_empty_dirs;
2361/// # use std::path::Path;
2362/// # use std::fs;
2363/// # async fn example() -> anyhow::Result<()> {
2364/// // After removing: .claude/agents/rust/specialized/expert.md
2365/// let file_path = Path::new(".claude/agents/rust/specialized/expert.md");
2366///
2367/// // If this was the last file in specialized/, the directory will be removed
2368/// // If specialized/ was the last item in rust/, that will be removed too
2369/// // Cleanup stops at .claude/agents/ or when it finds a non-empty directory
2370/// cleanup_empty_dirs(file_path).await?;
2371/// # Ok(())
2372/// # }
2373/// ```
2374///
2375/// ## Cleanup Scenarios
2376///
2377/// ### Scenario 1: Full Cleanup
2378///
2379/// ```text
2380/// Before:
2381///   .claude/agents/rust/specialized/expert.md  (only file in hierarchy)
2382///
2383/// After removing expert.md:
2384///   cleanup_empty_dirs() removes:
2385///   - .claude/agents/rust/specialized/  (now empty)
2386///   - .claude/agents/rust/              (now empty)
2387///   Stops at .claude/agents/ (keeps base directory)
2388/// ```
2389///
2390/// ### Scenario 2: Partial Cleanup
2391///
2392/// ```text
2393/// Before:
2394///   .claude/agents/rust/specialized/expert.md
2395///   .claude/agents/rust/specialized/tester.md
2396///   .claude/agents/rust/basic.md
2397///
2398/// After removing expert.md:
2399///   .claude/agents/rust/specialized/ still has tester.md
2400///   cleanup_empty_dirs() stops at specialized/ (not empty)
2401/// ```
2402///
2403/// ### Scenario 3: Boundary Enforcement
2404///
2405/// ```text
2406/// After removing: .claude/agents/only-agent.md
2407///
2408/// cleanup_empty_dirs() attempts to remove:
2409/// - .claude/agents/ (empty now)
2410/// - But stops because parent is .claude/ (boundary)
2411///
2412/// Result: .claude/agents/ remains (empty but preserved)
2413/// ```
2414///
2415/// ## Integration with `cleanup_removed_artifacts`
2416///
2417/// This function is called automatically by [`cleanup_removed_artifacts`]
2418/// after each file removal:
2419///
2420/// ```rust,ignore
2421/// for old_resource in old_lockfile.all_resources() {
2422///     if !new_paths.contains(&old_resource.installed_at) {
2423///         let full_path = project_dir.join(&old_resource.installed_at);
2424///
2425///         if full_path.exists() {
2426///             tokio::fs::remove_file(&full_path).await?;
2427///             removed.push(old_resource.installed_at.clone());
2428///
2429///             // Automatic directory cleanup after file removal
2430///             cleanup_empty_dirs(&full_path).await?;
2431///         }
2432///     }
2433/// }
2434/// ```
2435///
2436/// # Performance
2437///
2438/// - **Time Complexity**: O(d) where d = directory depth from file to `.claude/`
2439/// - **I/O Operations**: One `remove_dir` attempt per directory level
2440/// - **Early Termination**: Stops immediately on first non-empty directory
2441///
2442/// The function is extremely efficient as it:
2443/// - Only walks up the directory tree (no scanning of siblings)
2444/// - Stops at the first non-empty directory (no unnecessary attempts)
2445/// - Uses atomic `remove_dir` which fails fast on non-empty directories
2446/// - Typical depth is 2-4 levels (.claude/agents/subdir/file.md)
2447///
2448/// # Error Handling Strategy
2449///
2450/// The function differentiates between expected and unexpected errors:
2451///
2452/// | Error Kind | Interpretation | Action |
2453/// |------------|----------------|--------|
2454/// | `Ok(())` | Directory was empty and removed | Continue up tree |
2455/// | `ENOENT` | Directory doesn't exist | Continue up tree (race condition) |
2456/// | `ENOTEMPTY` | Directory has contents | Stop cleanup (expected) |
2457/// | `EPERM` | No permission | Stop cleanup (expected) |
2458/// | Other | Unexpected I/O error | Propagate error |
2459///
2460/// In practice, most errors simply stop the cleanup process without failing
2461/// the overall operation, as the goal is best-effort cleanup.
2462///
2463/// # Thread Safety
2464///
2465/// This function is safe for concurrent use because:
2466/// - Uses async filesystem operations from `tokio::fs`
2467/// - `remove_dir` is atomic (succeeds only if directory is empty)
2468/// - ENOENT handling accounts for race conditions
2469/// - Multiple concurrent calls won't interfere with each other
2470///
2471/// # Use Cases
2472///
2473/// ## After Pattern-Based Installation Changes
2474///
2475/// When pattern matches change, old directory structures may become empty:
2476/// ```toml
2477/// # Old: pattern matched agents/rust/expert.md, agents/rust/testing.md
2478/// # New: pattern only matches agents/rust/expert.md
2479///
2480/// # testing.md removed → agents/rust/ might now be empty
2481/// ```
2482///
2483/// ## After Custom Target Changes
2484///
2485/// Custom target changes can leave old directory structures empty:
2486/// ```toml
2487/// # Old: target = "tools"  → .claude/agents/tools/helper.md
2488/// # New: target = "utils" → .claude/agents/utils/helper.md
2489///
2490/// # .claude/agents/tools/ might now be empty
2491/// ```
2492///
2493/// ## After Dependency Removal
2494///
2495/// Removing the last dependency in a category may leave empty subdirectories:
2496/// ```toml
2497/// [agents]
2498/// # Removed: python-helper (was in agents/python/)
2499/// # Only agents/rust/ remains
2500///
2501/// # .claude/agents/python/ should be cleaned up
2502/// ```
2503///
2504/// # Design Rationale
2505///
2506/// This function exists to solve the "directory accumulation problem":
2507/// - Without cleanup: Empty directories accumulate over time
2508/// - With cleanup: Project structure stays clean and organized
2509/// - Safety boundaries: Prevents accidental removal of important directories
2510/// - Best-effort approach: Cleanup failures don't block main operations
2511///
2512/// # Version History
2513///
2514/// - **v0.3.18**: Introduced alongside [`cleanup_removed_artifacts`]
2515/// - Complements relative path preservation by cleaning up old directory structures
2516async fn cleanup_empty_dirs(file_path: &std::path::Path) -> Result<()> {
2517    let mut current = file_path.parent();
2518
2519    while let Some(dir) = current {
2520        // Stop if we've reached .claude or the project root
2521        if dir.ends_with(".claude") || dir.parent().is_none() {
2522            break;
2523        }
2524
2525        // Try to remove the directory (will only succeed if empty)
2526        match tokio::fs::remove_dir(dir).await {
2527            Ok(()) => {
2528                // Directory was empty and removed, continue up
2529                current = dir.parent();
2530            }
2531            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
2532                // Directory doesn't exist, continue up
2533                current = dir.parent();
2534            }
2535            Err(_) => {
2536                // Directory is not empty or we don't have permission, stop here
2537                break;
2538            }
2539        }
2540    }
2541
2542    Ok(())
2543}
2544
2545#[cfg(test)]
2546mod tests {
2547    use super::*;
2548    use tempfile::TempDir;
2549
2550    fn create_test_locked_resource(name: &str, is_local: bool) -> LockedResource {
2551        if is_local {
2552            LockedResource {
2553                name: name.to_string(),
2554                source: None,
2555                url: None,
2556                path: "test.md".to_string(),
2557                version: None,
2558                resolved_commit: None,
2559                checksum: String::new(),
2560                installed_at: String::new(),
2561                dependencies: vec![],
2562                resource_type: crate::core::ResourceType::Agent,
2563
2564                tool: "claude-code".to_string(),
2565            }
2566        } else {
2567            LockedResource {
2568                name: name.to_string(),
2569                source: Some("test_source".to_string()),
2570                url: Some("https://github.com/test/repo.git".to_string()),
2571                path: "resources/test.md".to_string(),
2572                version: Some("v1.0.0".to_string()),
2573                resolved_commit: Some("abc123".to_string()),
2574                checksum: "sha256:test".to_string(),
2575                installed_at: String::new(),
2576                dependencies: vec![],
2577                resource_type: crate::core::ResourceType::Agent,
2578
2579                tool: "claude-code".to_string(),
2580            }
2581        }
2582    }
2583
2584    #[tokio::test]
2585    async fn test_install_resource_local() {
2586        let temp_dir = TempDir::new().unwrap();
2587        let project_dir = temp_dir.path();
2588        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2589
2590        // Create a local markdown file
2591        let local_file = temp_dir.path().join("test.md");
2592        std::fs::write(&local_file, "# Test Resource\nThis is a test").unwrap();
2593
2594        // Create a locked resource pointing to the local file
2595        let mut entry = create_test_locked_resource("local-test", true);
2596        entry.path = local_file.to_string_lossy().to_string();
2597
2598        // Install the resource
2599        let result = install_resource(&entry, project_dir, "agents", &cache, false).await;
2600        assert!(result.is_ok(), "Failed to install local resource: {:?}", result);
2601
2602        // Should be installed the first time
2603        let (installed, _checksum) = result.unwrap();
2604        assert!(installed, "Should have installed new resource");
2605
2606        // Verify the file was installed
2607        let expected_path = project_dir.join("agents").join("local-test.md");
2608        assert!(expected_path.exists(), "Installed file not found");
2609
2610        // Verify content
2611        let content = std::fs::read_to_string(expected_path).unwrap();
2612        assert_eq!(content, "# Test Resource\nThis is a test");
2613    }
2614
2615    #[tokio::test]
2616    async fn test_install_resource_with_custom_path() {
2617        let temp_dir = TempDir::new().unwrap();
2618        let project_dir = temp_dir.path();
2619        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2620
2621        // Create a local markdown file
2622        let local_file = temp_dir.path().join("test.md");
2623        std::fs::write(&local_file, "# Custom Path Test").unwrap();
2624
2625        // Create a locked resource with custom installation path
2626        let mut entry = create_test_locked_resource("custom-test", true);
2627        entry.path = local_file.to_string_lossy().to_string();
2628        entry.installed_at = "custom/location/resource.md".to_string();
2629
2630        // Install the resource
2631        let result = install_resource(&entry, project_dir, "agents", &cache, false).await;
2632        assert!(result.is_ok());
2633        let (installed, _checksum) = result.unwrap();
2634        assert!(installed, "Should have installed new resource");
2635
2636        // Verify the file was installed at custom path
2637        let expected_path = project_dir.join("custom/location/resource.md");
2638        assert!(expected_path.exists(), "File not installed at custom path");
2639    }
2640
2641    #[tokio::test]
2642    async fn test_install_resource_local_missing_file() {
2643        let temp_dir = TempDir::new().unwrap();
2644        let project_dir = temp_dir.path();
2645        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2646
2647        // Create a locked resource pointing to non-existent file
2648        let mut entry = create_test_locked_resource("missing-test", true);
2649        entry.path = "/non/existent/file.md".to_string();
2650
2651        // Try to install the resource
2652        let result = install_resource(&entry, project_dir, "agents", &cache, false).await;
2653        assert!(result.is_err());
2654        let error_msg = result.unwrap_err().to_string();
2655        assert!(error_msg.contains("Local file") && error_msg.contains("not found"));
2656    }
2657
2658    #[tokio::test]
2659    async fn test_install_resource_invalid_markdown_frontmatter() {
2660        let temp_dir = TempDir::new().unwrap();
2661        let project_dir = temp_dir.path();
2662        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2663
2664        // Create a markdown file with invalid frontmatter
2665        let local_file = temp_dir.path().join("invalid.md");
2666        std::fs::write(&local_file, "---\ninvalid: yaml: [\n---\nContent").unwrap();
2667
2668        // Create a locked resource
2669        let mut entry = create_test_locked_resource("invalid-test", true);
2670        entry.path = local_file.to_string_lossy().to_string();
2671
2672        // Install should now succeed even with invalid frontmatter (just emits a warning)
2673        let result = install_resource(&entry, project_dir, "agents", &cache, false).await;
2674        assert!(result.is_ok());
2675        let (installed, _checksum) = result.unwrap();
2676        assert!(installed);
2677
2678        // Verify the file was installed
2679        let dest_path = project_dir.join("agents/invalid-test.md");
2680        assert!(dest_path.exists());
2681
2682        // Content should include the entire file since frontmatter was invalid
2683        let installed_content = std::fs::read_to_string(&dest_path).unwrap();
2684        assert!(installed_content.contains("---"));
2685        assert!(installed_content.contains("invalid: yaml:"));
2686        assert!(installed_content.contains("Content"));
2687    }
2688
2689    #[tokio::test]
2690    async fn test_install_resource_with_progress() {
2691        let temp_dir = TempDir::new().unwrap();
2692        let project_dir = temp_dir.path();
2693        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2694        let pb = ProgressBar::new(1);
2695
2696        // Create a local markdown file
2697        let local_file = temp_dir.path().join("test.md");
2698        std::fs::write(&local_file, "# Progress Test").unwrap();
2699
2700        // Create a locked resource
2701        let mut entry = create_test_locked_resource("progress-test", true);
2702        entry.path = local_file.to_string_lossy().to_string();
2703
2704        // Install with progress
2705        let result =
2706            install_resource_with_progress(&entry, project_dir, "agents", &cache, false, &pb).await;
2707        assert!(result.is_ok());
2708
2709        // Verify installation
2710        let expected_path = project_dir.join("agents").join("progress-test.md");
2711        assert!(expected_path.exists());
2712    }
2713
2714    #[tokio::test]
2715    async fn test_install_resources_empty() {
2716        let temp_dir = TempDir::new().unwrap();
2717        let project_dir = temp_dir.path();
2718        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2719
2720        // Create empty lockfile and manifest
2721        let lockfile = LockFile::new();
2722        let manifest = Manifest::new();
2723
2724        let (count, _) = install_resources(
2725            ResourceFilter::All,
2726            &lockfile,
2727            &manifest,
2728            project_dir,
2729            cache,
2730            false,
2731            None,
2732            None,
2733        )
2734        .await
2735        .unwrap();
2736
2737        assert_eq!(count, 0, "Should install 0 resources from empty lockfile");
2738    }
2739
2740    #[tokio::test]
2741    async fn test_install_resources_multiple() {
2742        let temp_dir = TempDir::new().unwrap();
2743        let project_dir = temp_dir.path();
2744        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2745
2746        // Create test markdown files
2747        let file1 = temp_dir.path().join("agent.md");
2748        let file2 = temp_dir.path().join("snippet.md");
2749        let file3 = temp_dir.path().join("command.md");
2750        std::fs::write(&file1, "# Agent").unwrap();
2751        std::fs::write(&file2, "# Snippet").unwrap();
2752        std::fs::write(&file3, "# Command").unwrap();
2753
2754        // Create lockfile with multiple resources
2755        let mut lockfile = LockFile::new();
2756        let mut agent = create_test_locked_resource("test-agent", true);
2757        agent.path = file1.to_string_lossy().to_string();
2758        lockfile.agents.push(agent);
2759
2760        let mut snippet = create_test_locked_resource("test-snippet", true);
2761        snippet.path = file2.to_string_lossy().to_string();
2762        lockfile.snippets.push(snippet);
2763
2764        let mut command = create_test_locked_resource("test-command", true);
2765        command.path = file3.to_string_lossy().to_string();
2766        lockfile.commands.push(command);
2767
2768        let manifest = Manifest::new();
2769
2770        let (count, _) = install_resources(
2771            ResourceFilter::All,
2772            &lockfile,
2773            &manifest,
2774            project_dir,
2775            cache,
2776            false,
2777            None,
2778            None,
2779        )
2780        .await
2781        .unwrap();
2782
2783        assert_eq!(count, 3, "Should install 3 resources");
2784
2785        // Verify all files were installed (using default directories)
2786        assert!(project_dir.join(".claude/agents/test-agent.md").exists());
2787        assert!(project_dir.join(".claude/agpm/snippets/test-snippet.md").exists());
2788        assert!(project_dir.join(".claude/commands/test-command.md").exists());
2789    }
2790
2791    #[tokio::test]
2792    async fn test_install_updated_resources() {
2793        let temp_dir = TempDir::new().unwrap();
2794        let project_dir = temp_dir.path();
2795        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2796
2797        // Create test markdown files
2798        let file1 = temp_dir.path().join("agent.md");
2799        let file2 = temp_dir.path().join("snippet.md");
2800        std::fs::write(&file1, "# Updated Agent").unwrap();
2801        std::fs::write(&file2, "# Updated Snippet").unwrap();
2802
2803        // Create lockfile with resources
2804        let mut lockfile = LockFile::new();
2805        let mut agent = create_test_locked_resource("test-agent", true);
2806        agent.path = file1.to_string_lossy().to_string();
2807        lockfile.agents.push(agent);
2808
2809        let mut snippet = create_test_locked_resource("test-snippet", true);
2810        snippet.path = file2.to_string_lossy().to_string();
2811        lockfile.snippets.push(snippet);
2812
2813        let manifest = Manifest::new();
2814
2815        // Define updates (only agent is updated)
2816        let updates = vec![(
2817            "test-agent".to_string(),
2818            None, // source
2819            "v1.0.0".to_string(),
2820            "v1.1.0".to_string(),
2821        )];
2822
2823        let count = install_updated_resources(
2824            &updates,
2825            &lockfile,
2826            &manifest,
2827            project_dir,
2828            &cache,
2829            None,
2830            false, // quiet
2831        )
2832        .await
2833        .unwrap();
2834
2835        assert_eq!(count, 1, "Should install 1 updated resource");
2836        assert!(project_dir.join(".claude/agents/test-agent.md").exists());
2837        assert!(!project_dir.join(".claude/snippets/test-snippet.md").exists()); // Not updated
2838    }
2839
2840    #[tokio::test]
2841    async fn test_install_updated_resources_quiet_mode() {
2842        let temp_dir = TempDir::new().unwrap();
2843        let project_dir = temp_dir.path();
2844        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2845
2846        // Create test markdown file
2847        let file = temp_dir.path().join("command.md");
2848        std::fs::write(&file, "# Command").unwrap();
2849
2850        // Create lockfile
2851        let mut lockfile = LockFile::new();
2852        let mut command = create_test_locked_resource("test-command", true);
2853        command.path = file.to_string_lossy().to_string();
2854        lockfile.commands.push(command);
2855
2856        let manifest = Manifest::new();
2857
2858        let updates = vec![(
2859            "test-command".to_string(),
2860            None, // source
2861            "v1.0.0".to_string(),
2862            "v2.0.0".to_string(),
2863        )];
2864
2865        let count = install_updated_resources(
2866            &updates,
2867            &lockfile,
2868            &manifest,
2869            project_dir,
2870            &cache,
2871            None,
2872            true, // quiet mode
2873        )
2874        .await
2875        .unwrap();
2876
2877        assert_eq!(count, 1);
2878        assert!(project_dir.join(".claude/commands/test-command.md").exists());
2879    }
2880
2881    #[tokio::test]
2882    async fn test_install_resource_for_parallel() {
2883        let temp_dir = TempDir::new().unwrap();
2884        let project_dir = temp_dir.path();
2885        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2886
2887        // Create a local markdown file
2888        let local_file = temp_dir.path().join("parallel.md");
2889        std::fs::write(&local_file, "# Parallel Test").unwrap();
2890
2891        // Create a locked resource
2892        let mut entry = create_test_locked_resource("parallel-test", true);
2893        entry.path = local_file.to_string_lossy().to_string();
2894
2895        // Install using the parallel function
2896        let result =
2897            install_resource_for_parallel(&entry, project_dir, "agents", &cache, false).await;
2898        assert!(result.is_ok());
2899
2900        // Verify installation
2901        let expected_path = project_dir.join("agents").join("parallel-test.md");
2902        assert!(expected_path.exists());
2903    }
2904
2905    #[tokio::test]
2906    async fn test_install_resource_creates_nested_directories() {
2907        let temp_dir = TempDir::new().unwrap();
2908        let project_dir = temp_dir.path();
2909        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
2910
2911        // Create a local markdown file
2912        let local_file = temp_dir.path().join("nested.md");
2913        std::fs::write(&local_file, "# Nested Test").unwrap();
2914
2915        // Create a locked resource with deeply nested path
2916        let mut entry = create_test_locked_resource("nested-test", true);
2917        entry.path = local_file.to_string_lossy().to_string();
2918        entry.installed_at = "very/deeply/nested/path/resource.md".to_string();
2919
2920        // Install the resource
2921        let result = install_resource(&entry, project_dir, "agents", &cache, false).await;
2922        assert!(result.is_ok());
2923        let (installed, _checksum) = result.unwrap();
2924        assert!(installed, "Should have installed new resource");
2925
2926        // Verify nested directories were created
2927        let expected_path = project_dir.join("very/deeply/nested/path/resource.md");
2928        assert!(expected_path.exists());
2929    }
2930
2931    #[tokio::test]
2932    async fn test_update_gitignore_creates_new_file() {
2933        let temp_dir = TempDir::new().unwrap();
2934        let project_dir = temp_dir.path();
2935
2936        // Create a lockfile with some resources
2937        let mut lockfile = LockFile::new();
2938
2939        // Add agent with installed path
2940        let mut agent = create_test_locked_resource("test-agent", true);
2941        agent.installed_at = ".claude/agents/test-agent.md".to_string();
2942        lockfile.agents.push(agent);
2943
2944        // Add snippet with installed path
2945        let mut snippet = create_test_locked_resource("test-snippet", true);
2946        snippet.installed_at = ".claude/agpm/snippets/test-snippet.md".to_string();
2947        lockfile.snippets.push(snippet);
2948
2949        // Call update_gitignore
2950        let result = update_gitignore(&lockfile, project_dir, true);
2951        assert!(result.is_ok());
2952
2953        // Check that .gitignore was created
2954        let gitignore_path = project_dir.join(".gitignore");
2955        assert!(gitignore_path.exists(), "Gitignore file should be created");
2956
2957        // Check content
2958        let content = std::fs::read_to_string(&gitignore_path).unwrap();
2959        assert!(content.contains("AGPM managed entries"));
2960        assert!(content.contains(".claude/agents/test-agent.md"));
2961        assert!(content.contains(".claude/agpm/snippets/test-snippet.md"));
2962    }
2963
2964    #[tokio::test]
2965    async fn test_update_gitignore_disabled() {
2966        let temp_dir = TempDir::new().unwrap();
2967        let project_dir = temp_dir.path();
2968
2969        let lockfile = LockFile::new();
2970
2971        // Call with disabled flag
2972        let result = update_gitignore(&lockfile, project_dir, false);
2973        assert!(result.is_ok());
2974
2975        // Check that .gitignore was NOT created
2976        let gitignore_path = project_dir.join(".gitignore");
2977        assert!(!gitignore_path.exists(), "Gitignore should not be created when disabled");
2978    }
2979
2980    #[tokio::test]
2981    async fn test_update_gitignore_preserves_user_entries() {
2982        let temp_dir = TempDir::new().unwrap();
2983        let project_dir = temp_dir.path();
2984
2985        // Create .claude directory for resources
2986        let claude_dir = project_dir.join(".claude");
2987        ensure_dir(&claude_dir).unwrap();
2988
2989        // Create existing gitignore with user entries at project root
2990        let gitignore_path = project_dir.join(".gitignore");
2991        let existing_content = "# User comment\n\
2992                               user-file.txt\n\
2993                               *.backup\n\
2994                               # AGPM managed entries - do not edit below this line\n\
2995                               .claude/agents/old-entry.md\n\
2996                               # End of AGPM managed entries\n";
2997        std::fs::write(&gitignore_path, existing_content).unwrap();
2998
2999        // Create lockfile with new resources
3000        let mut lockfile = LockFile::new();
3001        let mut agent = create_test_locked_resource("new-agent", true);
3002        agent.installed_at = ".claude/agents/new-agent.md".to_string();
3003        lockfile.agents.push(agent);
3004
3005        // Update gitignore
3006        let result = update_gitignore(&lockfile, project_dir, true);
3007        assert!(result.is_ok());
3008
3009        // Check that user entries are preserved
3010        let updated_content = std::fs::read_to_string(&gitignore_path).unwrap();
3011        assert!(updated_content.contains("user-file.txt"));
3012        assert!(updated_content.contains("*.backup"));
3013        assert!(updated_content.contains("# User comment"));
3014
3015        // Check that new entries are added
3016        assert!(updated_content.contains(".claude/agents/new-agent.md"));
3017
3018        // Check that old managed entries are replaced
3019        assert!(!updated_content.contains(".claude/agents/old-entry.md"));
3020    }
3021
3022    #[tokio::test]
3023    async fn test_update_gitignore_handles_external_paths() {
3024        let temp_dir = TempDir::new().unwrap();
3025        let project_dir = temp_dir.path();
3026
3027        let mut lockfile = LockFile::new();
3028
3029        // Add resource installed outside .claude
3030        let mut script = create_test_locked_resource("test-script", true);
3031        script.installed_at = "scripts/test.sh".to_string();
3032        lockfile.scripts.push(script);
3033
3034        // Add resource inside .claude
3035        let mut agent = create_test_locked_resource("test-agent", true);
3036        agent.installed_at = ".claude/agents/test.md".to_string();
3037        lockfile.agents.push(agent);
3038
3039        let result = update_gitignore(&lockfile, project_dir, true);
3040        assert!(result.is_ok());
3041
3042        let gitignore_path = project_dir.join(".gitignore");
3043        let content = std::fs::read_to_string(&gitignore_path).unwrap();
3044
3045        // External path should be as-is
3046        assert!(content.contains("scripts/test.sh"));
3047
3048        // Internal path should be as-is
3049        assert!(content.contains(".claude/agents/test.md"));
3050    }
3051
3052    #[tokio::test]
3053    async fn test_update_gitignore_migrates_ccpm_entries() {
3054        let temp_dir = TempDir::new().unwrap();
3055        let project_dir = temp_dir.path();
3056
3057        // Create .claude directory
3058        tokio::fs::create_dir_all(project_dir.join(".claude/agents")).await.unwrap();
3059
3060        // Create a gitignore with legacy CCPM markers
3061        let gitignore_path = project_dir.join(".gitignore");
3062        let legacy_content = r#"# User's custom entries
3063*.backup
3064temp/
3065
3066# CCPM managed entries - do not edit below this line
3067.claude/agents/old-ccpm-agent.md
3068.claude/commands/old-ccpm-command.md
3069# End of CCPM managed entries
3070
3071# More user entries
3072local-config.json
3073"#;
3074        tokio::fs::write(&gitignore_path, legacy_content).await.unwrap();
3075
3076        // Create a new lockfile with AGPM entries
3077        let mut lockfile = LockFile::new();
3078        let mut agent = create_test_locked_resource("new-agent", true);
3079        agent.installed_at = ".claude/agents/new-agent.md".to_string();
3080        lockfile.agents.push(agent);
3081
3082        // Update gitignore
3083        let result = update_gitignore(&lockfile, project_dir, true);
3084        assert!(result.is_ok());
3085
3086        // Read updated content
3087        let updated_content = tokio::fs::read_to_string(&gitignore_path).await.unwrap();
3088
3089        // User entries before CCPM section should be preserved
3090        assert!(updated_content.contains("*.backup"));
3091        assert!(updated_content.contains("temp/"));
3092
3093        // User entries after CCPM section should be preserved
3094        assert!(updated_content.contains("local-config.json"));
3095
3096        // Should have AGPM markers now (not CCPM)
3097        assert!(updated_content.contains("# AGPM managed entries - do not edit below this line"));
3098        assert!(updated_content.contains("# End of AGPM managed entries"));
3099
3100        // Old CCPM markers should be removed
3101        assert!(!updated_content.contains("# CCPM managed entries"));
3102        assert!(!updated_content.contains("# End of CCPM managed entries"));
3103
3104        // Old CCPM entries should be removed
3105        assert!(!updated_content.contains("old-ccpm-agent.md"));
3106        assert!(!updated_content.contains("old-ccpm-command.md"));
3107
3108        // New AGPM entries should be added
3109        assert!(updated_content.contains(".claude/agents/new-agent.md"));
3110    }
3111
3112    #[tokio::test]
3113    async fn test_install_updated_resources_not_found() {
3114        let temp_dir = TempDir::new().unwrap();
3115        let project_dir = temp_dir.path();
3116        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3117
3118        let lockfile = LockFile::new();
3119        let manifest = Manifest::new();
3120
3121        // Try to update a resource that doesn't exist
3122        let updates = vec![(
3123            "non-existent".to_string(),
3124            None, // source
3125            "v1.0.0".to_string(),
3126            "v2.0.0".to_string(),
3127        )];
3128
3129        let count = install_updated_resources(
3130            &updates,
3131            &lockfile,
3132            &manifest,
3133            project_dir,
3134            &cache,
3135            None,
3136            false,
3137        )
3138        .await
3139        .unwrap();
3140
3141        assert_eq!(count, 0, "Should install 0 resources when not found");
3142    }
3143}