agpm_cli/
installer.rs

1//! Shared installation utilities for AGPM resources.
2//!
3//! This module provides common functionality for installing resources from
4//! lockfile entries to the project directory. It's shared between the install
5//! and update commands to avoid code duplication. The module includes both
6//! installation logic and automatic cleanup of removed or relocated artifacts.
7//!
8//! # SHA-Based Parallel Installation Architecture
9//!
10//! The installer uses SHA-based worktrees for optimal parallel resource installation:
11//! - **SHA-based worktrees**: Each unique commit gets one worktree for maximum deduplication
12//! - **Pre-resolved SHAs**: All versions resolved to SHAs before installation begins
13//! - **Concurrency control**: Direct parallelism control via --max-parallel flag
14//! - **Context-aware logging**: Each operation includes dependency name for debugging
15//! - **Efficient cleanup**: Worktrees are managed by the cache layer for reuse
16//! - **Pre-warming**: Worktrees created upfront to minimize installation latency
17//! - **Automatic artifact cleanup**: Removes old files when paths change or dependencies are removed
18//!
19//! # Installation Process
20//!
21//! 1. **SHA validation**: Ensures all resources have valid 40-character commit SHAs
22//! 2. **Worktree pre-warming**: Creates SHA-based worktrees for all unique commits
23//! 3. **Parallel processing**: Installs multiple resources concurrently using dedicated worktrees
24//! 4. **Content validation**: Validates markdown format and structure
25//! 5. **Atomic installation**: Files are written atomically to prevent corruption
26//! 6. **Progress tracking**: Real-time progress updates during parallel operations
27//! 7. **Artifact cleanup**: Automatically removes old files from previous installations when paths change
28//!
29//! # Artifact Cleanup (v0.3.18+)
30//!
31//! The module provides automatic cleanup of obsolete artifacts when:
32//! - **Dependencies are removed**: Files from removed dependencies are deleted
33//! - **Paths are relocated**: Old files are removed when `installed_at` paths change
34//! - **Structure changes**: Empty parent directories are cleaned up recursively
35//!
36//! The cleanup process:
37//! 1. Compares old and new lockfiles to identify removed artifacts
38//! 2. Removes files that exist in the old lockfile but not in the new one
39//! 3. Recursively removes empty parent directories up to `.claude/`
40//! 4. Reports the number of cleaned artifacts to the user
41//!
42//! See [`cleanup_removed_artifacts()`] for implementation details.
43//!
44//! # Performance Characteristics
45//!
46//! - **SHA-based deduplication**: Multiple refs to same commit share one worktree
47//! - **Parallel processing**: Multiple dependencies installed simultaneously
48//! - **Pre-warming optimization**: Worktrees created upfront to minimize latency
49//! - **Parallelism-controlled**: User controls concurrency via --max-parallel flag
50//! - **Atomic operations**: Fast, safe file installation with proper error handling
51//! - **Reduced disk usage**: No duplicate worktrees for identical commits
52//! - **Efficient cleanup**: Minimal overhead for artifact cleanup operations
53
54use crate::utils::progress::{InstallationPhase, MultiPhaseProgress};
55use anyhow::{Context, Result};
56use std::path::PathBuf;
57use std::time::Duration;
58
59/// Type alias for complex installation result tuples to improve code readability.
60///
61/// This type alias simplifies the return type of parallel installation functions
62/// that need to return either success information or error details with context.
63/// It was introduced in AGPM v0.3.0 to resolve `clippy::type_complexity` warnings
64/// while maintaining clear semantics for installation results.
65///
66/// # Success Variant: `Ok((String, bool, String))`
67///
68/// When installation succeeds, the tuple contains:
69/// - `String`: Resource name that was processed
70/// - `bool`: Whether the resource was actually installed (`true`) or already up-to-date (`false`)
71/// - `String`: SHA-256 checksum of the installed file content
72///
73/// # Error Variant: `Err((String, anyhow::Error))`
74///
75/// When installation fails, the tuple contains:
76/// - `String`: Resource name that failed to install
77/// - `anyhow::Error`: Detailed error information for debugging
78///
79/// # Usage
80///
81/// This type is primarily used in parallel installation operations where
82/// individual resource results need to be collected and processed:
83///
84/// ```rust,ignore
85/// use agpm_cli::installer::InstallResult;
86/// use futures::stream::{self, StreamExt};
87///
88/// # async fn example() -> anyhow::Result<()> {
89/// let results: Vec<InstallResult> = stream::iter(vec!["resource1", "resource2"])
90///     .map(|resource_name| async move {
91///         // Installation logic here
92///         Ok((resource_name.to_string(), true, "sha256:abc123".to_string()))
93///     })
94///     .buffer_unordered(10)
95///     .collect()
96///     .await;
97///
98/// // Process results
99/// for result in results {
100///     match result {
101///         Ok((name, installed, checksum)) => {
102///             println!("✓ {}: installed={}, checksum={}", name, installed, checksum);
103///         }
104///         Err((name, error)) => {
105///             eprintln!("✗ {}: {}", name, error);
106///         }
107///     }
108/// }
109/// # Ok(())
110/// # }
111/// ```
112///
113/// # Design Rationale
114///
115/// The type alias serves several purposes:
116/// - **Clippy compliance**: Resolves `type_complexity` warnings for complex generic types
117/// - **Code clarity**: Makes function signatures more readable and self-documenting
118/// - **Error context**: Preserves resource name context when installation fails
119/// - **Batch processing**: Enables efficient collection and processing of parallel results
120type InstallResult = Result<
121    (String, bool, String, crate::manifest::patches::AppliedPatches),
122    (String, anyhow::Error),
123>;
124
125use futures::{
126    future,
127    stream::{self, StreamExt},
128};
129use std::path::Path;
130use std::sync::Arc;
131use tokio::sync::{Mutex, mpsc};
132
133use crate::cache::Cache;
134use crate::core::ResourceIterator;
135use crate::lockfile::{LockFile, LockedResource};
136use crate::manifest::Manifest;
137use crate::markdown::MarkdownFile;
138use crate::utils::fs::{atomic_write, ensure_dir};
139use crate::utils::normalize_path_for_storage;
140use crate::utils::progress::ProgressBar;
141use hex;
142use std::collections::HashSet;
143use std::fs;
144
145/// Installation context containing common parameters for resource installation.
146///
147/// This struct bundles frequently-used installation parameters to reduce
148/// function parameter counts and improve code readability. It's used throughout
149/// the installation pipeline to pass configuration and context information.
150///
151/// # Fields
152///
153/// * `project_dir` - Root directory of the project where resources will be installed
154/// * `cache` - Cache instance for managing Git repositories and worktrees
155/// * `force_refresh` - Whether to force refresh of cached worktrees
156/// * `manifest` - Optional reference to the project manifest for template context
157/// * `lockfile` - Optional reference to the lockfile for template context
158/// * `project_patches` - Optional project-level patches from agpm.toml
159/// * `private_patches` - Optional user-level patches from agpm.private.toml
160pub struct InstallContext<'a> {
161    pub project_dir: &'a Path,
162    pub cache: &'a Cache,
163    pub force_refresh: bool,
164    pub verbose: bool,
165    pub manifest: Option<&'a Manifest>,
166    pub lockfile: Option<&'a Arc<LockFile>>,
167    pub project_patches: Option<&'a crate::manifest::ManifestPatches>,
168    pub private_patches: Option<&'a crate::manifest::ManifestPatches>,
169    pub gitignore_lock: Option<&'a Arc<Mutex<()>>>,
170    pub max_content_file_size: Option<u64>,
171}
172
173impl<'a> InstallContext<'a> {
174    /// Create a new installation context.
175    #[allow(clippy::too_many_arguments)]
176    pub fn new(
177        project_dir: &'a Path,
178        cache: &'a Cache,
179        force_refresh: bool,
180        verbose: bool,
181        manifest: Option<&'a Manifest>,
182        lockfile: Option<&'a Arc<LockFile>>,
183        project_patches: Option<&'a crate::manifest::ManifestPatches>,
184        private_patches: Option<&'a crate::manifest::ManifestPatches>,
185        gitignore_lock: Option<&'a Arc<Mutex<()>>>,
186        max_content_file_size: Option<u64>,
187    ) -> Self {
188        Self {
189            project_dir,
190            cache,
191            force_refresh,
192            verbose,
193            manifest,
194            lockfile,
195            project_patches,
196            private_patches,
197            gitignore_lock,
198            max_content_file_size,
199        }
200    }
201}
202
203/// Read a file with retry logic to handle cross-process filesystem cache coherency issues.
204///
205/// This function wraps `tokio::fs::read_to_string` with retry logic to handle cases where
206/// files created by Git subprocesses are not immediately visible to the parent Rust process
207/// due to filesystem cache propagation delays. This is particularly important in CI
208/// environments with network-attached storage where cache coherency delays can be significant.
209///
210/// # Arguments
211///
212/// * `path` - The file path to read
213///
214/// # Returns
215///
216/// Returns the file content as a `String`, or an error if the file cannot be read after retries.
217///
218/// # Retry Strategy
219///
220/// - Initial delay: 10ms
221/// - Max delay: 500ms
222/// - Factor: 2x (exponential backoff)
223/// - Max attempts: 10
224/// - Total max time: ~10 seconds
225///
226/// Only `NotFound` errors are retried, as these indicate cache coherency issues.
227/// Other errors (permissions, I/O errors) fail immediately by returning Ok to bypass retry.
228async fn read_with_cache_retry(path: &Path) -> Result<String> {
229    use std::io;
230
231    let retry_strategy = tokio_retry::strategy::ExponentialBackoff::from_millis(10)
232        .max_delay(Duration::from_millis(500))
233        .factor(2)
234        .take(10);
235
236    let path_buf = path.to_path_buf();
237
238    tokio_retry::Retry::spawn(retry_strategy, || {
239        let path = path_buf.clone();
240        async move {
241            tokio::fs::read_to_string(&path).await.map_err(|e| {
242                if e.kind() == io::ErrorKind::NotFound {
243                    tracing::debug!(
244                        "File not yet visible (likely cache coherency issue): {}",
245                        path.display()
246                    );
247                    format!("File not found: {}", path.display())
248                } else {
249                    // Non-retriable error - return error message that will fail fast
250                    format!("I/O error (non-retriable): {}", e)
251                }
252            })
253        }
254    })
255    .await
256    .map_err(|e| anyhow::anyhow!("Failed to read resource file: {}: {}", path.display(), e))
257}
258
259/// Install a single resource from a lock entry using worktrees for parallel safety.
260///
261/// This function installs a resource specified by a lockfile entry to the project
262/// directory. It uses Git worktrees through the cache layer to enable safe parallel
263/// operations without conflicts between concurrent installations.
264///
265/// # Arguments
266///
267/// * `entry` - The locked resource to install containing source and version info
268/// * `project_dir` - The root project directory where resources should be installed
269/// * `resource_dir` - The subdirectory name for this resource type (e.g., "agents")
270/// * `cache` - The cache instance for managing Git repositories and worktrees
271///
272/// # Returns
273///
274/// Returns `Ok((installed, checksum))` where:
275/// - `installed` is `true` if the resource was actually installed (new or updated),
276///   `false` if the resource already existed and was unchanged
277/// - `checksum` is the SHA-256 hash of the installed file content
278///
279/// # Worktree Usage
280///
281/// For remote resources, this function:
282/// 1. Uses `cache.get_or_clone_source_worktree_with_context()` to get a worktree
283/// 2. Each dependency gets its own isolated worktree for parallel safety
284/// 3. Worktrees are automatically managed and reused by the cache layer
285/// 4. Context (dependency name) is provided for debugging parallel operations
286///
287/// # Installation Process
288///
289/// 1. **Path resolution**: Determines destination based on `installed_at` or defaults
290/// 2. **Repository access**: Gets worktree from cache (for remote) or validates local path
291/// 3. **Content validation**: Verifies markdown format and structure
292/// 4. **Atomic write**: Installs file atomically to prevent corruption
293///
294/// # Examples
295///
296/// ```rust,no_run
297/// use agpm_cli::installer::{install_resource, InstallContext};
298/// use agpm_cli::lockfile::LockedResource;
299/// use agpm_cli::cache::Cache;
300/// use agpm_cli::core::ResourceType;
301/// use std::path::Path;
302///
303/// # async fn example() -> anyhow::Result<()> {
304/// let cache = Cache::new()?;
305/// let entry = LockedResource {
306///     name: "example-agent".to_string(),
307///     source: Some("community".to_string()),
308///     url: Some("https://github.com/example/repo.git".to_string()),
309///     path: "agents/example.md".to_string(),
310///     version: Some("v1.0.0".to_string()),
311///     resolved_commit: Some("abc123".to_string()),
312///     checksum: "sha256:...".to_string(),
313///     installed_at: ".claude/agents/example.md".to_string(),
314///     dependencies: vec![],
315///     resource_type: ResourceType::Agent,
316///     tool: Some("claude-code".to_string()),
317///     manifest_alias: None,
318///     applied_patches: std::collections::HashMap::new(),
319///     install: None,
320/// };
321///
322/// let context = InstallContext::new(Path::new("."), &cache, false, false, None, None, None, None, None, None);
323/// let (installed, checksum, _patches) = install_resource(&entry, "agents", &context).await?;
324/// if installed {
325///     println!("Resource was installed with checksum: {}", checksum);
326/// } else {
327///     println!("Resource already existed and was unchanged");
328/// }
329/// # Ok(())
330/// # }
331/// ```
332///
333/// # Error Handling
334///
335/// Returns an error if:
336/// - The source repository cannot be accessed or cloned
337/// - The specified file path doesn't exist in the repository
338/// - The file is not valid markdown format
339/// - File system operations fail (permissions, disk space)
340/// - Worktree creation fails due to Git issues
341pub async fn install_resource(
342    entry: &LockedResource,
343    resource_dir: &str,
344    context: &InstallContext<'_>,
345) -> Result<(bool, String, crate::manifest::patches::AppliedPatches)> {
346    // Determine destination path
347    let dest_path = if entry.installed_at.is_empty() {
348        context.project_dir.join(resource_dir).join(format!("{}.md", entry.name))
349    } else {
350        context.project_dir.join(&entry.installed_at)
351    };
352
353    // Check if file already exists and compare checksums
354    let existing_checksum = if dest_path.exists() {
355        // Use blocking task for checksum calculation to avoid blocking the async runtime
356        let path = dest_path.clone();
357        tokio::task::spawn_blocking(move || LockFile::compute_checksum(&path)).await??.into()
358    } else {
359        None
360    };
361
362    let new_content = if let Some(source_name) = &entry.source {
363        let url = entry
364            .url
365            .as_ref()
366            .ok_or_else(|| anyhow::anyhow!("Resource {} has no URL", entry.name))?;
367
368        // Check if this is a local directory source (no SHA or empty SHA)
369        let is_local_source = entry.resolved_commit.as_deref().is_none_or(str::is_empty);
370
371        let cache_dir = if is_local_source {
372            // Local directory source - use the URL as the path directly
373            PathBuf::from(url)
374        } else {
375            // Git-based resource - use SHA-based worktree creation
376            let sha = entry.resolved_commit.as_deref().ok_or_else(|| {
377                anyhow::anyhow!("Resource {} missing resolved commit SHA. Run 'agpm update' to regenerate lockfile.", entry.name)
378            })?;
379
380            // Validate SHA format
381            if sha.len() != 40 || !sha.chars().all(|c| c.is_ascii_hexdigit()) {
382                return Err(anyhow::anyhow!(
383                    "Invalid SHA '{}' for resource {}. Expected 40 hex characters.",
384                    sha,
385                    entry.name
386                ));
387            }
388
389            let mut cache_dir = context
390                .cache
391                .get_or_create_worktree_for_sha(source_name, url, sha, Some(&entry.name))
392                .await?;
393
394            if context.force_refresh {
395                let _ = context.cache.cleanup_worktree(&cache_dir).await;
396                cache_dir = context
397                    .cache
398                    .get_or_create_worktree_for_sha(source_name, url, sha, Some(&entry.name))
399                    .await?;
400            }
401
402            cache_dir
403        };
404
405        // Read the content from the source (with cache coherency retry)
406        let source_path = cache_dir.join(&entry.path);
407        let content = read_with_cache_retry(&source_path).await?;
408
409        // Validate markdown - this will emit a warning if frontmatter is invalid but won't fail
410        MarkdownFile::parse_with_context(&content, Some(&source_path.display().to_string()))?;
411
412        content
413    } else {
414        // Local resource - copy directly from project directory or absolute path
415        let source_path = {
416            let candidate = Path::new(&entry.path);
417            if candidate.is_absolute() {
418                candidate.to_path_buf()
419            } else {
420                context.project_dir.join(candidate)
421            }
422        };
423
424        if !source_path.exists() {
425            return Err(anyhow::anyhow!(
426                "Local file '{}' not found. Expected at: {}",
427                entry.path,
428                source_path.display()
429            ));
430        }
431
432        let content = tokio::fs::read_to_string(&source_path)
433            .await
434            .with_context(|| format!("Failed to read resource file: {}", source_path.display()))?;
435
436        // Validate markdown - this will emit a warning if frontmatter is invalid but won't fail
437        MarkdownFile::parse_with_context(&content, Some(&source_path.display().to_string()))?;
438
439        content
440    };
441
442    // Apply patches if provided (before templating)
443    let empty_patches = std::collections::HashMap::new();
444    let (patched_content, applied_patches) =
445        if context.project_patches.is_some() || context.private_patches.is_some() {
446            use crate::manifest::patches::apply_patches_to_content_with_origin;
447
448            // Look up patches for this specific resource
449            let resource_type = entry.resource_type.to_plural();
450            let lookup_name = entry.manifest_alias.as_ref().unwrap_or(&entry.name);
451
452            let project_patch_data = context
453                .project_patches
454                .and_then(|patches| patches.get(resource_type, lookup_name))
455                .unwrap_or(&empty_patches);
456
457            let private_patch_data = context
458                .private_patches
459                .and_then(|patches| patches.get(resource_type, lookup_name))
460                .unwrap_or(&empty_patches);
461
462            let file_path = entry.installed_at.as_str();
463            apply_patches_to_content_with_origin(
464                &new_content,
465                file_path,
466                project_patch_data,
467                private_patch_data,
468            )
469            .with_context(|| format!("Failed to apply patches to resource {}", entry.name))?
470        } else {
471            (new_content.clone(), crate::manifest::patches::AppliedPatches::default())
472        };
473
474    // Apply templating to markdown files if enabled in frontmatter (after patching)
475    // Track whether templating was applied and the context digest for cache invalidation
476    let (final_content, template_context_digest) = if entry.path.ends_with(".md") {
477        // Check for opt-in in frontmatter
478        let templating_enabled = if let Ok(md_file) = MarkdownFile::parse(&patched_content) {
479            md_file
480                .metadata
481                .as_ref()
482                .and_then(|m| m.extra.get("agpm"))
483                .and_then(|agpm| agpm.get("templating"))
484                .and_then(|v| v.as_bool())
485                .unwrap_or(false)
486        } else {
487            false
488        };
489
490        if !templating_enabled {
491            tracing::debug!("Templating not enabled via frontmatter for {}", entry.name);
492            (patched_content, None)
493        } else if patched_content.contains("{{")
494            || patched_content.contains("{%")
495            || patched_content.contains("{#")
496        {
497            // Check if content contains template syntax
498            tracing::debug!("Template syntax detected in {}, rendering...", entry.name);
499
500            // Build template context if we have lockfile
501            if let Some(lockfile) = context.lockfile {
502                use crate::templating::{TemplateContextBuilder, TemplateRenderer};
503
504                // Determine resource type from entry
505                let resource_type = entry.resource_type;
506
507                // Extract project config from manifest if available
508                let project_config = context.manifest.and_then(|m| m.project.clone());
509
510                // Build context
511                let template_context_builder = TemplateContextBuilder::new(
512                    lockfile.clone(),
513                    project_config,
514                    Arc::new(context.cache.clone()),
515                    context.project_dir.to_path_buf(),
516                );
517
518                // Compute context digest for cache invalidation
519                // This ensures that changes to dependency versions invalidate the cache
520                let context_digest =
521                    template_context_builder.compute_context_digest().with_context(|| {
522                        format!("Failed to compute template context digest for {}", entry.name)
523                    })?;
524
525                let template_context = template_context_builder
526                    .build_context(&entry.name, resource_type)
527                    .await
528                    .with_context(|| {
529                        format!("Failed to build template context for {}", entry.name)
530                    })?;
531
532                // Show verbose output before rendering
533                if context.verbose {
534                    let num_resources = template_context
535                        .get("resources")
536                        .and_then(|v| v.as_object())
537                        .map(|o| o.len())
538                        .unwrap_or(0);
539                    let num_dependencies = template_context
540                        .get("dependencies")
541                        .and_then(|v| v.as_object())
542                        .map(|o| o.len())
543                        .unwrap_or(0);
544
545                    tracing::info!("📝 Rendering template: {}", entry.path);
546                    tracing::info!(
547                        "   Context: {} resources, {} dependencies",
548                        num_resources,
549                        num_dependencies
550                    );
551                    tracing::debug!("   Context digest: {}", context_digest);
552                }
553
554                // Create renderer and render template
555                let mut renderer = TemplateRenderer::new(
556                    true,
557                    context.project_dir.to_path_buf(),
558                    context.max_content_file_size,
559                )
560                .with_context(|| "Failed to create template renderer")?;
561
562                let rendered = renderer
563                    .render_template(&patched_content, &template_context)
564                    .map_err(|e| {
565                        // Log detailed error with full error chain
566                        tracing::error!(
567                            "Template rendering failed for resource '{}' ({}): {}",
568                            entry.name,
569                            entry.path,
570                            e
571                        );
572                        // Log error chain if available
573                        for (i, cause) in e.chain().skip(1).enumerate() {
574                            tracing::error!("  Caused by [{}]: {}", i + 1, cause);
575                        }
576                        e
577                    })
578                    .with_context(|| {
579                        format!(
580                            "Failed to render template for '{}' (source: {}, path: {})",
581                            entry.name,
582                            entry.source.as_deref().unwrap_or("local"),
583                            entry.path
584                        )
585                    })?;
586
587                tracing::debug!("Successfully rendered template for {}", entry.name);
588
589                // Show verbose output after rendering
590                if context.verbose {
591                    let size_bytes = rendered.len();
592                    let size_str = if size_bytes < 1024 {
593                        format!("{} B", size_bytes)
594                    } else if size_bytes < 1024 * 1024 {
595                        format!("{:.1} KB", size_bytes as f64 / 1024.0)
596                    } else {
597                        format!("{:.1} MB", size_bytes as f64 / (1024.0 * 1024.0))
598                    };
599                    tracing::info!("   Output: {} ({})", dest_path.display(), size_str);
600                    tracing::info!("✅ Template rendered successfully");
601                }
602
603                (rendered, Some(context_digest))
604            } else {
605                tracing::warn!(
606                    "Template syntax found in {} but manifest/lockfile not available, skipping templating",
607                    entry.name
608                );
609                (patched_content, None)
610            }
611        } else {
612            tracing::debug!("No template syntax in {}, skipping templating", entry.name);
613            (patched_content, None)
614        }
615    } else {
616        tracing::debug!("Not a markdown file: {}", entry.path);
617        (patched_content, None)
618    };
619
620    // Calculate checksum of final content (after patching and templating)
621    // Include template context digest to ensure cache invalidation when dependencies change
622    let new_checksum = {
623        use sha2::{Digest, Sha256};
624        let mut hasher = Sha256::new();
625        hasher.update(final_content.as_bytes());
626
627        // Include context digest if templating was applied
628        // This ensures that changes to dependency versions trigger re-rendering
629        if let Some(ref digest) = template_context_digest {
630            hasher.update(digest.as_bytes());
631        }
632
633        let hash = hasher.finalize();
634        format!("sha256:{}", hex::encode(hash))
635    };
636
637    // Check if content has changed by comparing checksums
638    let content_changed = existing_checksum.as_ref() != Some(&new_checksum);
639
640    // Check if we should actually write the file to disk
641    let should_install = entry.install.unwrap_or(true);
642
643    let actually_installed = if should_install && content_changed {
644        // Only write if install=true and content is different or file doesn't exist
645        if let Some(parent) = dest_path.parent() {
646            ensure_dir(parent)?;
647        }
648
649        // Add to .gitignore BEFORE writing file to prevent accidental commits
650        if let Some(lock) = context.gitignore_lock {
651            // Calculate relative path for gitignore
652            let relative_path = dest_path
653                .strip_prefix(context.project_dir)
654                .unwrap_or(&dest_path)
655                .to_string_lossy()
656                .to_string();
657
658            add_path_to_gitignore(context.project_dir, &relative_path, lock)
659                .await
660                .with_context(|| format!("Failed to add {} to .gitignore", relative_path))?;
661        }
662
663        atomic_write(&dest_path, final_content.as_bytes())
664            .with_context(|| format!("Failed to install resource to {}", dest_path.display()))?;
665
666        true
667    } else if !should_install {
668        // install=false: content-only dependency, don't write file
669        tracing::debug!(
670            "Skipping file write for content-only dependency: {} (install=false)",
671            entry.name
672        );
673        false
674    } else {
675        // install=true but content unchanged
676        false
677    };
678
679    Ok((actually_installed, new_checksum, applied_patches))
680}
681
682/// Install a single resource with progress bar updates for user feedback.
683///
684/// This function wraps [`install_resource`] with progress bar integration to provide
685/// real-time feedback during resource installation. It updates the progress bar
686/// message before delegating to the core installation logic.
687///
688/// # Arguments
689///
690/// * `entry` - The locked resource containing installation metadata
691/// * `project_dir` - Root project directory for installation target
692/// * `resource_dir` - Subdirectory name for this resource type (e.g., "agents")
693/// * `cache` - Cache instance for Git repository and worktree management
694/// * `force_refresh` - Whether to force refresh of cached repositories
695/// * `pb` - Progress bar to update with installation status
696///
697/// # Returns
698///
699/// Returns a tuple of:
700/// - `bool`: Whether the resource was actually installed (`true` for new/updated, `false` for unchanged)
701/// - `String`: SHA-256 checksum of the installed content
702///
703/// # Progress Integration
704///
705/// The function automatically sets the progress bar message to indicate which
706/// resource is currently being installed. This provides users with real-time
707/// feedback about installation progress.
708///
709/// # Examples
710///
711/// ```rust,no_run
712/// use agpm_cli::installer::{install_resource_with_progress, InstallContext};
713/// use agpm_cli::lockfile::LockedResource;
714/// use agpm_cli::cache::Cache;
715/// use agpm_cli::core::ResourceType;
716/// use agpm_cli::utils::progress::ProgressBar;
717/// use std::path::Path;
718///
719/// # async fn example() -> anyhow::Result<()> {
720/// let cache = Cache::new()?;
721/// let pb = ProgressBar::new(1);
722/// let entry = LockedResource {
723///     name: "example-agent".to_string(),
724///     source: Some("community".to_string()),
725///     url: Some("https://github.com/example/repo.git".to_string()),
726///     path: "agents/example.md".to_string(),
727///     version: Some("v1.0.0".to_string()),
728///     resolved_commit: Some("abc123".to_string()),
729///     checksum: "sha256:...".to_string(),
730///     installed_at: ".claude/agents/example.md".to_string(),
731///     dependencies: vec![],
732///     resource_type: ResourceType::Agent,
733///     tool: Some("claude-code".to_string()),
734///     manifest_alias: None,
735///     applied_patches: std::collections::HashMap::new(),
736///     install: None,
737/// };
738///
739/// let context = InstallContext::new(Path::new("."), &cache, false, false, None, None, None, None, None, None);
740/// let (installed, checksum, _patches) = install_resource_with_progress(
741///     &entry,
742///     "agents",
743///     &context,
744///     &pb
745/// ).await?;
746///
747/// pb.inc(1);
748/// # Ok(())
749/// # }
750/// ```
751///
752/// # Errors
753///
754/// Returns the same errors as [`install_resource`], including:
755/// - Repository access failures
756/// - File system operation errors
757/// - Invalid markdown content
758/// - Git worktree creation failures
759pub async fn install_resource_with_progress(
760    entry: &LockedResource,
761    resource_dir: &str,
762    context: &InstallContext<'_>,
763    pb: &ProgressBar,
764) -> Result<(bool, String, crate::manifest::patches::AppliedPatches)> {
765    pb.set_message(format!("Installing {}", entry.name));
766    install_resource(entry, resource_dir, context).await
767}
768
769/// Install multiple resources in parallel using worktree-based concurrency.
770///
771/// This function performs parallel installation of all resources defined in the
772/// lockfile, using Git worktrees to enable safe concurrent access to repositories.
773/// Each dependency gets its own isolated worktree to prevent conflicts.
774///
775/// # Arguments
776///
777/// * `lockfile` - The lockfile containing all resources to install
778/// * `manifest` - The project manifest for configuration
779/// * `project_dir` - The root project directory for installation
780/// * `pb` - Progress bar for user feedback
781/// * `cache` - Cache instance managing Git repositories and worktrees
782///
783/// # Parallel Architecture
784///
785/// The function uses several layers of concurrency control:
786/// - **Tokio tasks**: Each resource installation runs in its own async task
787/// - **Unlimited task concurrency**: Uses `buffer_unordered(usize::MAX)`
788/// - **Parallelism control**: --max-parallel flag controls concurrent operations
789/// - **Worktree isolation**: Each dependency gets its own worktree for safety
790///
791/// # Performance Optimizations
792///
793/// - **Stream processing**: Uses `futures::stream` for efficient task scheduling
794/// - **Context logging**: Each operation includes dependency name for debugging
795/// - **Worktree reuse**: Cache layer optimizes Git repository access
796/// - **Batched progress**: Updates progress atomically to reduce contention
797/// - **Deferred cleanup**: Worktrees are left for reuse, cleaned up by cache commands
798///
799/// # Concurrency Control Flow
800///
801/// ```text
802/// Lockfile Resources
803///       ↓
804/// Async Task Stream (unlimited concurrency)
805///       ↓
806/// install_resource_for_parallel() calls
807///       ↓
808/// Cache worktree operations (parallelism-controlled)
809///       ↓
810/// Git operations (controlled by --max-parallel)
811/// ```
812///
813/// # Examples
814///
815/// ```rust,no_run
816/// use agpm_cli::installer::{install_resources_parallel, InstallContext};
817/// use agpm_cli::lockfile::LockFile;
818/// use agpm_cli::manifest::Manifest;
819/// use agpm_cli::cache::Cache;
820/// use agpm_cli::utils::progress::ProgressBar;
821/// use std::path::Path;
822/// use std::sync::Arc;
823///
824/// # async fn example() -> anyhow::Result<()> {
825/// let lockfile = Arc::new(LockFile::load(Path::new("agpm.lock"))?);
826/// let manifest = Manifest::load(Path::new("agpm.toml"))?;
827/// let cache = Cache::new()?;
828///
829/// // Count total resources for progress bar
830/// let total = lockfile.agents.len() + lockfile.snippets.len()
831///     + lockfile.commands.len() + lockfile.scripts.len()
832///     + lockfile.hooks.len() + lockfile.mcp_servers.len();
833/// let pb = ProgressBar::new(total as u64);
834///
835/// let context = InstallContext::new(Path::new("."), &cache, false, false, Some(&manifest), Some(&lockfile), None, None, None, None);
836/// let count = install_resources_parallel(
837///     &lockfile,
838///     &manifest,
839///     &context,
840///     &pb,
841///     None,
842/// ).await?;
843///
844/// println!("Installed {} resources", count);
845/// # Ok(())
846/// # }
847/// ```
848///
849/// # Error Handling
850///
851/// - **Atomic failure**: If any resource fails, the entire operation fails
852/// - **Detailed context**: Errors include specific resource and source information
853/// - **Progress preservation**: Progress updates continue even on partial failures
854/// - **Resource cleanup**: Failed operations don't leave partial state
855///
856/// # Return Value
857///
858/// Returns the total number of resources successfully installed.
859// Removed install_resources_parallel - use install_resources with MultiPhaseProgress instead
860#[deprecated(note = "Use install_resources with MultiPhaseProgress instead")]
861pub async fn install_resources_parallel(
862    lockfile: &Arc<LockFile>,
863    manifest: &Manifest,
864    install_ctx: &InstallContext<'_>,
865    pb: &ProgressBar,
866    max_concurrency: Option<usize>,
867) -> Result<usize> {
868    let project_dir = install_ctx.project_dir;
869    let cache = install_ctx.cache;
870    let force_refresh = install_ctx.force_refresh;
871    // Collect all entries to install using ResourceIterator
872    let all_entries = ResourceIterator::collect_all_entries(lockfile, manifest);
873
874    if all_entries.is_empty() {
875        return Ok(0);
876    }
877
878    // Pre-warm the cache by creating all needed worktrees upfront
879    // This allows maximum parallelism for Git operations
880    // Update the progress bar message to indicate preparation phase
881    let total = all_entries.len();
882    pb.set_message("Preparing resources");
883
884    // Collect unique (source, url, sha) triples to pre-create worktrees
885    let mut unique_worktrees = HashSet::new();
886    for (entry, _) in &all_entries {
887        tracing::debug!(
888            "Checking entry '{}' (type: {:?}): source={:?}, url={:?}, sha={:?}",
889            entry.name,
890            entry.resource_type,
891            entry.source,
892            entry.url.as_deref().map(|u| &u[..60.min(u.len())]),
893            entry.resolved_commit.as_deref().map(|s| &s[..8.min(s.len())])
894        );
895
896        if let Some(source_name) = &entry.source
897            && let Some(url) = &entry.url
898        {
899            // Only pre-warm if we have a valid SHA
900            if let Some(sha) = entry.resolved_commit.as_ref().filter(|commit| {
901                commit.len() == 40 && commit.chars().all(|c| c.is_ascii_hexdigit())
902            }) {
903                tracing::info!(
904                    "Adding worktree to pre-warm set: source={}, sha={}",
905                    source_name,
906                    &sha[..8]
907                );
908                unique_worktrees.insert((source_name.clone(), url.clone(), sha.clone()));
909            } else {
910                tracing::warn!(
911                    "Skipping worktree pre-warm for '{}': invalid or missing SHA",
912                    entry.name
913                );
914            }
915        }
916    }
917
918    tracing::info!("Pre-warming {} unique worktrees", unique_worktrees.len());
919
920    // Pre-create all worktrees in parallel
921    if !unique_worktrees.is_empty() {
922        let worktree_futures: Vec<_> = unique_worktrees
923            .into_iter()
924            .map(|(source, url, sha)| {
925                let cache = cache.clone();
926                async move {
927                    cache
928                        .get_or_create_worktree_for_sha(&source, &url, &sha, Some("pre-warm"))
929                        .await
930                        .map_err(|e| {
931                            tracing::error!(
932                                "Failed to create worktree for {}/{}: {}",
933                                source,
934                                &sha[..8.min(sha.len())],
935                                e
936                            );
937                            e
938                        })
939                }
940            })
941            .collect();
942
943        // Execute all worktree creations in parallel - fail fast on first error
944        future::try_join_all(worktree_futures).await.context("Failed to pre-warm worktrees")?;
945    }
946
947    // Create thread-safe progress tracking
948    let installed_count = Arc::new(Mutex::new(0));
949    let pb = Arc::new(pb.clone());
950
951    // Update message for installation phase
952    pb.set_message(format!("Installing 0/{total} resources"));
953
954    let shared_cache = Arc::new(cache.clone());
955    let concurrency = max_concurrency.unwrap_or(usize::MAX).max(1);
956
957    let results: Vec<InstallResult> = stream::iter(all_entries)
958        .map(|(entry, resource_dir)| {
959            let entry = entry.clone();
960            let project_dir = project_dir.to_path_buf();
961            let resource_dir = resource_dir.to_string();
962            let installed_count = Arc::clone(&installed_count);
963            let pb = Arc::clone(&pb);
964            let cache = Arc::clone(&shared_cache);
965
966            async move {
967                let context = InstallContext::new(
968                    &project_dir,
969                    cache.as_ref(),
970                    force_refresh,
971                    false, // verbose - will be threaded through from CLI
972                    Some(manifest),
973                    Some(lockfile),
974                    install_ctx.project_patches,
975                    install_ctx.private_patches,
976                    install_ctx.gitignore_lock,
977                    install_ctx.max_content_file_size,
978                );
979                let res = install_resource_for_parallel(&entry, &resource_dir, &context).await;
980
981                match res {
982                    Ok((actually_installed, checksum, applied_patches)) => {
983                        if actually_installed {
984                            let mut count = installed_count.lock().await;
985                            *count += 1;
986                        }
987                        let count = *installed_count.lock().await;
988                        pb.set_message(format!("Installing {count}/{total} resources"));
989                        pb.inc(1);
990                        Ok((entry.name.clone(), actually_installed, checksum, applied_patches))
991                    }
992                    Err(err) => Err((entry.name.clone(), err)),
993                }
994            }
995        })
996        .buffer_unordered(concurrency)
997        .collect()
998        .await;
999
1000    let mut errors = Vec::new();
1001    for result in results {
1002        match result {
1003            Ok((_name, _installed, _checksum, _applied_patches)) => {
1004                // Old function doesn't return checksums or patches
1005            }
1006            Err((name, error)) => {
1007                errors.push((name, error));
1008            }
1009        }
1010    }
1011
1012    if !errors.is_empty() {
1013        let error_msgs: Vec<String> =
1014            errors.into_iter().map(|(name, error)| format!("  {name}: {error}")).collect();
1015        return Err(anyhow::anyhow!(
1016            "Failed to install {} resources:\n{}",
1017            error_msgs.len(),
1018            error_msgs.join("\n")
1019        ));
1020    }
1021
1022    let final_count = *installed_count.lock().await;
1023    Ok(final_count)
1024}
1025
1026/// Install a single resource in a thread-safe manner for parallel execution.
1027///
1028/// This function provides a thin wrapper around [`install_resource`] specifically
1029/// designed for use in parallel installation streams. It ensures thread-safe
1030/// operation when called concurrently from multiple async tasks.
1031///
1032/// # Thread Safety
1033///
1034/// While this function is just a wrapper, it's used within parallel streams where:
1035/// - Each resource gets its own isolated Git worktree via the cache layer
1036/// - File operations are atomic to prevent corruption
1037/// - Progress tracking is coordinated through shared state
1038///
1039/// # Arguments
1040///
1041/// * `entry` - The locked resource to install
1042/// * `project_dir` - Project root directory for installation
1043/// * `resource_dir` - Resource type subdirectory (e.g., "agents", "snippets")
1044/// * `cache` - Cache instance managing Git repositories and worktrees
1045/// * `force_refresh` - Whether to force refresh cached repositories
1046///
1047/// # Returns
1048///
1049/// Returns a tuple of:
1050/// - `bool`: Whether installation actually occurred (`true` for new/changed, `false` for up-to-date)
1051/// - `String`: SHA-256 checksum of the installed file content
1052///
1053/// # Usage in Parallel Streams
1054///
1055/// This function is typically used within futures streams for concurrent processing:
1056///
1057/// ```rust,ignore
1058/// use futures::stream::{self, StreamExt};
1059/// use agpm_cli::installer::install_resource_for_parallel;
1060/// # use agpm_cli::lockfile::LockedResource;
1061/// # use agpm_cli::cache::Cache;
1062/// # use std::path::Path;
1063///
1064/// # async fn example(entries: Vec<LockedResource>, cache: Cache) -> anyhow::Result<()> {
1065/// let results: Vec<_> = stream::iter(entries)
1066///     .map(|entry| {
1067///         let cache = cache.clone();
1068///         async move {
1069///             install_resource_for_parallel(
1070///                 &entry,
1071///                 Path::new("."),
1072///                 "agents",
1073///                 &cache,
1074///                 false
1075///             ).await
1076///         }
1077///     })
1078///     .buffer_unordered(10) // Process up to 10 resources concurrently
1079///     .collect()
1080///     .await;
1081/// # Ok(())
1082/// # }
1083/// ```
1084///
1085/// # Errors
1086///
1087/// Returns the same errors as [`install_resource`]:
1088/// - Git repository access failures
1089/// - File system permission or space issues
1090/// - Invalid markdown file format
1091/// - Worktree creation conflicts
1092async fn install_resource_for_parallel(
1093    entry: &LockedResource,
1094    resource_dir: &str,
1095    context: &InstallContext<'_>,
1096) -> Result<(bool, String, crate::manifest::patches::AppliedPatches)> {
1097    install_resource(entry, resource_dir, context).await
1098}
1099
1100/// Progress update message for parallel installation operations.
1101///
1102/// This struct encapsulates the current state of a parallel installation operation,
1103/// providing detailed information about which dependencies are actively being
1104/// processed and the overall completion status. It's designed for use with
1105/// channel-based progress reporting systems.
1106///
1107/// # Fields
1108///
1109/// * `active_deps` - Names of dependencies currently being processed in parallel
1110/// * `completed_count` - Number of dependencies that have finished processing
1111/// * `total_count` - Total number of dependencies to be processed
1112///
1113/// # Usage
1114///
1115/// This struct is typically sent through async channels to provide real-time
1116/// progress updates to user interface components:
1117///
1118/// ```rust,no_run
1119/// use agpm_cli::installer::InstallProgress;
1120/// use tokio::sync::mpsc;
1121///
1122/// # async fn example() -> anyhow::Result<()> {
1123/// let (tx, mut rx) = mpsc::unbounded_channel::<InstallProgress>();
1124///
1125/// // Installation task sends progress updates
1126/// tokio::spawn(async move {
1127///     let progress = InstallProgress {
1128///         active_deps: vec!["agent1".to_string(), "tool2".to_string()],
1129///         completed_count: 3,
1130///         total_count: 10,
1131///     };
1132///     let _ = tx.send(progress);
1133/// });
1134///
1135/// // UI task receives and displays progress
1136/// while let Some(progress) = rx.recv().await {
1137///     println!("Active: {:?}, Progress: {}/{}",
1138///         progress.active_deps,
1139///         progress.completed_count,
1140///         progress.total_count
1141///     );
1142/// }
1143/// # Ok(())
1144/// # }
1145/// ```
1146///
1147/// # Design Purpose
1148///
1149/// This structure enables sophisticated progress reporting that shows:
1150/// - Which specific dependencies are being processed concurrently
1151/// - Overall completion percentage for the installation operation
1152/// - Real-time updates as the parallel installation progresses
1153///
1154/// The `active_deps` field is particularly useful for debugging parallel
1155/// operations, as it shows exactly which resources are currently being
1156/// downloaded, validated, or installed.
1157#[derive(Debug, Clone)]
1158pub struct InstallProgress {
1159    /// Names of dependencies currently being processed in parallel.
1160    ///
1161    /// This vector contains the names of all resources that are actively
1162    /// being installed at the time this progress update was generated.
1163    /// The list changes dynamically as resources complete and new ones begin.
1164    pub active_deps: Vec<String>,
1165
1166    /// Number of dependencies that have completed processing.
1167    ///
1168    /// This count includes both successful installations and failed attempts.
1169    /// It represents the total number of resources that have finished,
1170    /// regardless of outcome.
1171    pub completed_count: usize,
1172
1173    /// Total number of dependencies to be processed in this operation.
1174    ///
1175    /// This count remains constant throughout the installation and represents
1176    /// the full scope of the parallel installation operation.
1177    pub total_count: usize,
1178}
1179
1180/// Install resources in parallel with detailed progress updates via async channels.
1181///
1182/// This function performs parallel resource installation while providing real-time
1183/// progress updates through an async channel. It's designed for UI implementations
1184/// that need detailed visibility into parallel installation operations, showing
1185/// which specific dependencies are being processed at any given time.
1186///
1187/// # Arguments
1188///
1189/// * `lockfile` - Lockfile containing all resources to install
1190/// * `manifest` - Project manifest providing configuration context
1191/// * `project_dir` - Root directory for resource installation
1192/// * `cache` - Cache instance for Git repository and worktree management
1193/// * `force_refresh` - Whether to force refresh of cached repositories
1194/// * `max_concurrency` - Optional limit on concurrent operations (`None` = unlimited)
1195/// * `progress_sender` - Optional channel sender for progress updates
1196///
1197/// # Progress Updates
1198///
1199/// When `progress_sender` is provided, the function sends [`InstallProgress`]
1200/// updates that include:
1201/// - Active dependencies currently being processed
1202/// - Completed count (successful and failed installations)
1203/// - Total dependency count for completion calculation
1204///
1205/// Updates are sent at key points:
1206/// - When a dependency starts processing (added to `active_deps`)
1207/// - When a dependency completes (removed from `active_deps`, `completed_count` incremented)
1208///
1209/// # Channel-Based Architecture
1210///
1211/// ```rust,ignore
1212/// use agpm_cli::installer::{install_resources_parallel_with_progress, InstallProgress};
1213/// use agpm_cli::lockfile::LockFile;
1214/// use agpm_cli::manifest::Manifest;
1215/// use agpm_cli::cache::Cache;
1216/// use tokio::sync::mpsc;
1217/// use std::path::Path;
1218///
1219/// # async fn example() -> anyhow::Result<()> {
1220/// let (tx, mut rx) = mpsc::unbounded_channel::<InstallProgress>();
1221/// let lockfile = LockFile::load(Path::new("agpm.lock"))?;
1222/// let manifest = Manifest::load(Path::new("agpm.toml"))?;
1223/// let cache = Cache::new()?;
1224///
1225/// // Spawn installation task
1226/// let install_task = tokio::spawn(async move {
1227///     install_resources_parallel_with_progress(
1228///         &lockfile,
1229///         &manifest,
1230///         Path::new("."),
1231///         &cache,
1232///         false,
1233///         Some(8),      // Max 8 concurrent operations
1234///         Some(tx)      // Progress updates
1235///     ).await
1236/// });
1237///
1238/// // Handle progress updates
1239/// tokio::spawn(async move {
1240///     while let Some(progress) = rx.recv().await {
1241///         println!("Progress: {}/{}, Active: {:?}",
1242///             progress.completed_count,
1243///             progress.total_count,
1244///             progress.active_deps
1245///         );
1246///     }
1247/// });
1248///
1249/// let count = install_task.await??;
1250/// println!("Installed {} resources", count);
1251/// # Ok(())
1252/// # }
1253/// ```
1254///
1255/// # Concurrency Control
1256///
1257/// The function implements the same parallel processing architecture as
1258/// [`install_resources_parallel`] but adds channel-based progress reporting:
1259/// - Pre-warming of Git worktrees for optimal parallelism
1260/// - Configurable concurrency limits via `max_concurrency`
1261/// - Thread-safe progress tracking with atomic updates
1262///
1263/// # Performance Characteristics
1264///
1265/// Progress updates are designed to have minimal performance impact:
1266/// - Updates are sent asynchronously without blocking installation
1267/// - Failed channel sends are silently ignored to prevent installation failures
1268/// - State updates are batched to reduce contention
1269///
1270/// # Returns
1271///
1272/// Returns the total number of resources that were successfully installed.
1273/// This count only includes resources that were actually modified (new or updated content),
1274/// not resources that already existed with identical content.
1275///
1276/// # Errors
1277///
1278/// Returns an error if any resource installation fails. The error includes
1279/// details about all failed installations with specific error context.
1280/// Progress updates continue until the error occurs.
1281// Removed install_resources_parallel_with_progress - use install_resources with MultiPhaseProgress instead
1282#[deprecated(note = "Use install_resources with MultiPhaseProgress instead")]
1283pub async fn install_resources_parallel_with_progress(
1284    lockfile: &Arc<LockFile>,
1285    manifest: &Manifest,
1286    install_ctx: &InstallContext<'_>,
1287    max_concurrency: Option<usize>,
1288    progress_sender: Option<mpsc::UnboundedSender<InstallProgress>>,
1289) -> Result<usize> {
1290    let project_dir = install_ctx.project_dir;
1291    let cache = install_ctx.cache;
1292    let force_refresh = install_ctx.force_refresh;
1293    // Collect all entries to install using ResourceIterator
1294    let all_entries = ResourceIterator::collect_all_entries(lockfile, manifest);
1295
1296    if all_entries.is_empty() {
1297        return Ok(0);
1298    }
1299
1300    let total = all_entries.len();
1301
1302    // Pre-warm the cache by creating all needed worktrees upfront
1303    // Collect unique (source, url, sha) triples to pre-create worktrees
1304    let mut unique_worktrees = HashSet::new();
1305    for (entry, _) in &all_entries {
1306        if let Some(source_name) = &entry.source
1307            && let Some(url) = &entry.url
1308        {
1309            // Only pre-warm if we have a valid SHA
1310            if let Some(sha) = entry.resolved_commit.as_ref().filter(|commit| {
1311                commit.len() == 40 && commit.chars().all(|c| c.is_ascii_hexdigit())
1312            }) {
1313                unique_worktrees.insert((source_name.clone(), url.clone(), sha.clone()));
1314            }
1315        }
1316    }
1317
1318    if !unique_worktrees.is_empty() {
1319        let worktree_futures: Vec<_> = unique_worktrees
1320            .into_iter()
1321            .map(|(source, url, sha)| {
1322                async move {
1323                    cache
1324                        .get_or_create_worktree_for_sha(&source, &url, &sha, Some("pre-warm"))
1325                        .await
1326                        .ok(); // Ignore errors during pre-warming
1327                }
1328            })
1329            .collect();
1330
1331        // Execute all worktree creations in parallel
1332        future::join_all(worktree_futures).await;
1333    }
1334
1335    // Create thread-safe progress tracking
1336    let installed_count = Arc::new(Mutex::new(0));
1337    let active_deps = Arc::new(Mutex::new(Vec::<String>::new()));
1338    let sender = progress_sender.map(Arc::new);
1339
1340    let shared_cache = Arc::new(cache.clone());
1341    let concurrency = max_concurrency.unwrap_or(usize::MAX).max(1);
1342
1343    let results: Vec<InstallResult> = stream::iter(all_entries)
1344        .map(|(entry, resource_dir)| {
1345            let entry = entry.clone();
1346            let project_dir = project_dir.to_path_buf();
1347            let resource_dir = resource_dir.to_string();
1348            let installed_count = Arc::clone(&installed_count);
1349            let active_deps = Arc::clone(&active_deps);
1350            let sender = sender.clone();
1351            let cache = Arc::clone(&shared_cache);
1352            let lockfile = Arc::clone(lockfile);
1353
1354            async move {
1355                // Add to active list and send update
1356                {
1357                    let mut active = active_deps.lock().await;
1358                    active.push(entry.name.clone());
1359                    let count = *installed_count.lock().await;
1360
1361                    if let Some(ref tx) = sender {
1362                        let _ = tx.send(InstallProgress {
1363                            active_deps: active.clone(),
1364                            completed_count: count,
1365                            total_count: total,
1366                        });
1367                    }
1368                }
1369
1370                let context = InstallContext::new(
1371                    &project_dir,
1372                    cache.as_ref(),
1373                    force_refresh,
1374                    false, // verbose - will be threaded through from CLI
1375                    Some(manifest),
1376                    Some(&lockfile),
1377                    install_ctx.project_patches,
1378                    install_ctx.private_patches,
1379                    install_ctx.gitignore_lock,
1380                    install_ctx.max_content_file_size,
1381                );
1382                let res = install_resource_for_parallel(&entry, &resource_dir, &context).await;
1383
1384                // Remove from active list and update count only if actually installed
1385                {
1386                    let mut active = active_deps.lock().await;
1387                    active.retain(|x| x != &entry.name);
1388
1389                    if let Ok((actually_installed, _checksum, _applied_patches)) = &res {
1390                        if *actually_installed {
1391                            let mut count = installed_count.lock().await;
1392                            *count += 1;
1393                        }
1394
1395                        let count = *installed_count.lock().await;
1396                        if let Some(ref tx) = sender {
1397                            let _ = tx.send(InstallProgress {
1398                                active_deps: active.clone(),
1399                                completed_count: count,
1400                                total_count: total,
1401                            });
1402                        }
1403                    }
1404                }
1405
1406                match res {
1407                    Ok((installed, checksum, applied_patches)) => {
1408                        Ok((entry.name.clone(), installed, checksum, applied_patches))
1409                    }
1410                    Err(err) => Err((entry.name.clone(), err)),
1411                }
1412            }
1413        })
1414        .buffer_unordered(concurrency)
1415        .collect()
1416        .await;
1417
1418    let mut errors = Vec::new();
1419    for result in results {
1420        match result {
1421            Ok((_name, _installed, _checksum, _applied_patches)) => {
1422                // Old function doesn't return checksums or patches
1423            }
1424            Err((name, error)) => {
1425                errors.push((name, error));
1426            }
1427        }
1428    }
1429
1430    if !errors.is_empty() {
1431        let error_msgs: Vec<String> =
1432            errors.into_iter().map(|(name, error)| format!("  {name}: {error}")).collect();
1433        return Err(anyhow::anyhow!(
1434            "Failed to install {} resources:\n{}",
1435            error_msgs.len(),
1436            error_msgs.join("\n")
1437        ));
1438    }
1439
1440    let final_count = *installed_count.lock().await;
1441    Ok(final_count)
1442}
1443
1444/// Filtering options for resource installation operations.
1445///
1446/// This enum controls which resources are processed during installation,
1447/// enabling both full installations and selective updates. The filter
1448/// determines which entries from the lockfile are actually installed.
1449///
1450/// # Use Cases
1451///
1452/// - **Full installations**: Install all resources defined in lockfile
1453/// - **Selective updates**: Install only resources that have been updated
1454/// - **Performance optimization**: Avoid reinstalling unchanged resources
1455/// - **Incremental deployments**: Update only what has changed
1456///
1457/// # Variants
1458///
1459/// ## All Resources
1460/// [`ResourceFilter::All`] processes every resource entry in the lockfile,
1461/// regardless of whether it has changed. This is used by the install command
1462/// for complete environment setup.
1463///
1464/// ## Updated Resources Only
1465/// [`ResourceFilter::Updated`] processes only resources that have version
1466/// changes, as tracked by the update command. This enables efficient
1467/// incremental updates without full reinstallation.
1468///
1469/// # Examples
1470///
1471/// Install all resources:
1472/// ```rust,no_run
1473/// use agpm_cli::installer::ResourceFilter;
1474///
1475/// let filter = ResourceFilter::All;
1476/// // This will install every resource in the lockfile
1477/// ```
1478///
1479/// Install only updated resources:
1480/// ```rust,no_run
1481/// use agpm_cli::installer::ResourceFilter;
1482///
1483/// let updates = vec![
1484///     ("agent1".to_string(), None, "v1.0.0".to_string(), "v1.1.0".to_string()),
1485///     ("tool2".to_string(), Some("community".to_string()), "v2.1.0".to_string(), "v2.2.0".to_string()),
1486/// ];
1487/// let filter = ResourceFilter::Updated(updates);
1488/// // This will install only agent1 and tool2
1489/// ```
1490///
1491/// # Update Tuple Format
1492///
1493/// For [`ResourceFilter::Updated`], each tuple contains:
1494/// - `name`: Resource name as defined in the manifest
1495/// - `old_version`: Previous version (for logging and tracking)
1496/// - `new_version`: New version to install
1497///
1498/// The old version is primarily used for user feedback and logging,
1499/// while the new version determines what gets installed.
1500pub enum ResourceFilter {
1501    /// Install all resources from the lockfile.
1502    ///
1503    /// This option processes every resource entry in the lockfile,
1504    /// installing or updating each one regardless of whether it has
1505    /// changed since the last installation.
1506    All,
1507
1508    /// Install only specific updated resources.
1509    ///
1510    /// This option processes only the resources specified in the update list,
1511    /// allowing for efficient incremental updates. Each tuple contains:
1512    /// - Resource name
1513    /// - Source name (None for local resources)
1514    /// - Old version (for tracking)
1515    /// - New version (to install)
1516    Updated(Vec<(String, Option<String>, String, String)>),
1517}
1518
1519/// Resource installation function supporting multiple progress configurations.
1520///
1521/// This function consolidates all resource installation patterns into a single, flexible
1522/// interface that can handle both full installations and selective updates with different
1523/// progress reporting mechanisms. It represents the modernized installation architecture
1524/// introduced in AGPM v0.3.0.
1525///
1526/// # Architecture Benefits
1527///
1528/// - **Single API**: Single function handles install and update commands
1529/// - **Flexible progress**: Supports dynamic, simple, and quiet progress modes
1530/// - **Selective installation**: Can install all resources or just updated ones
1531/// - **Optimal concurrency**: Leverages worktree-based parallel operations
1532/// - **Cache efficiency**: Integrates with instance-level caching systems
1533///
1534/// # Parameters
1535///
1536/// * `filter` - Determines which resources to install ([`ResourceFilter::All`] or [`ResourceFilter::Updated`])
1537/// * `lockfile` - The lockfile containing all resource definitions to install
1538/// * `manifest` - The project manifest providing configuration and target directories
1539/// * `project_dir` - Root directory where resources should be installed
1540/// * `cache` - Cache instance for Git repository and worktree management
1541/// * `force_refresh` - Whether to force refresh of cached repositories
1542/// * `max_concurrency` - Optional limit on concurrent operations (None = unlimited)
1543/// * `progress` - Optional multi-phase progress manager ([`MultiPhaseProgress`])
1544///
1545/// # Progress Reporting
1546///
1547/// Progress is reported through the optional [`MultiPhaseProgress`] parameter:
1548/// - **Enabled**: Pass `Some(progress)` for multi-phase progress with live updates
1549/// - **Disabled**: Pass `None` for quiet operation (scripts and automation)
1550///
1551/// # Installation Process
1552///
1553/// 1. **Resource filtering**: Collects entries based on filter criteria
1554/// 2. **Cache warming**: Pre-creates worktrees for all unique repositories
1555/// 3. **Parallel installation**: Processes resources with configured concurrency
1556/// 4. **Progress coordination**: Updates progress based on configuration
1557/// 5. **Error aggregation**: Collects and reports any installation failures
1558///
1559/// # Concurrency Behavior
1560///
1561/// The function implements advanced parallel processing:
1562/// - **Pre-warming phase**: Creates all needed worktrees upfront for maximum parallelism
1563/// - **Parallel execution**: Each resource installed in its own async task
1564/// - **Concurrency control**: `max_concurrency` limits simultaneous operations
1565/// - **Thread safety**: Progress updates are atomic and thread-safe
1566///
1567/// # Returns
1568///
1569/// Returns a tuple of:
1570/// - The number of resources that were actually installed (new or updated content).
1571///   Resources that already exist with identical content are not counted.
1572/// - A vector of (`resource_name`, checksum) pairs for all processed resources
1573///
1574/// # Errors
1575///
1576/// Returns an error if any resource installation fails. The error includes details
1577/// about all failed installations with specific error messages for debugging.
1578///
1579/// # Examples
1580///
1581/// Install all resources with progress tracking:
1582/// ```rust,no_run
1583/// use agpm_cli::installer::{install_resources, ResourceFilter};
1584/// use agpm_cli::utils::progress::MultiPhaseProgress;
1585/// use agpm_cli::lockfile::LockFile;
1586/// use agpm_cli::manifest::Manifest;
1587/// use agpm_cli::cache::Cache;
1588/// use std::sync::Arc;
1589/// use std::path::Path;
1590///
1591/// # async fn example() -> anyhow::Result<()> {
1592/// # let lockfile = Arc::new(LockFile::default());
1593/// # let manifest = Manifest::default();
1594/// # let project_dir = Path::new(".");
1595/// # let cache = Cache::new()?;
1596/// let progress = Arc::new(MultiPhaseProgress::new(true));
1597///
1598/// let (count, _checksums, _patches) = install_resources(
1599///     ResourceFilter::All,
1600///     &lockfile,
1601///     &manifest,
1602///     &project_dir,
1603///     cache,
1604///     false,
1605///     Some(8), // Limit to 8 concurrent operations
1606///     Some(progress),
1607///     false, // verbose
1608/// ).await?;
1609///
1610/// println!("Installed {} resources", count);
1611/// # Ok(())
1612/// # }
1613/// ```
1614///
1615/// Install resources quietly (for automation):
1616/// ```rust,no_run
1617/// use agpm_cli::installer::{install_resources, ResourceFilter};
1618/// use agpm_cli::lockfile::LockFile;
1619/// use agpm_cli::manifest::Manifest;
1620/// use agpm_cli::cache::Cache;
1621/// use std::path::Path;
1622/// use std::sync::Arc;
1623///
1624/// # async fn example() -> anyhow::Result<()> {
1625/// # let lockfile = Arc::new(LockFile::default());
1626/// # let manifest = Manifest::default();
1627/// # let project_dir = Path::new(".");
1628/// # let cache = Cache::new()?;
1629/// let updates = vec![("agent1".to_string(), None, "v1.0".to_string(), "v1.1".to_string())];
1630///
1631/// let (count, _checksums, _patches) = install_resources(
1632///     ResourceFilter::Updated(updates),
1633///     &lockfile,
1634///     &manifest,
1635///     &project_dir,
1636///     cache,
1637///     false,
1638///     None, // Unlimited concurrency
1639///     None, // No progress output
1640///     false, // verbose
1641/// ).await?;
1642///
1643/// println!("Updated {} resources", count);
1644/// # Ok(())
1645/// # }
1646/// ```
1647#[allow(clippy::too_many_arguments)]
1648pub async fn install_resources(
1649    filter: ResourceFilter,
1650    lockfile: &Arc<LockFile>,
1651    manifest: &Manifest,
1652    project_dir: &Path,
1653    cache: Cache,
1654    force_refresh: bool,
1655    max_concurrency: Option<usize>,
1656    progress: Option<Arc<MultiPhaseProgress>>,
1657    verbose: bool,
1658) -> Result<(usize, Vec<(String, String)>, Vec<(String, crate::manifest::patches::AppliedPatches)>)>
1659{
1660    // Collect entries to install based on filter
1661    let all_entries: Vec<(LockedResource, String)> = match filter {
1662        ResourceFilter::All => {
1663            // Use existing ResourceIterator logic for all entries
1664            ResourceIterator::collect_all_entries(lockfile, manifest)
1665                .into_iter()
1666                .map(|(entry, dir)| (entry.clone(), dir.into_owned()))
1667                .collect()
1668        }
1669        ResourceFilter::Updated(ref updates) => {
1670            // Collect only the updated entries
1671            let mut entries = Vec::new();
1672            for (name, source, _, _) in updates {
1673                if let Some((resource_type, entry)) =
1674                    ResourceIterator::find_resource_by_name_and_source(
1675                        lockfile,
1676                        name,
1677                        source.as_deref(),
1678                    )
1679                {
1680                    // Get artifact configuration path
1681                    let tool = entry.tool.as_deref().unwrap_or("claude-code");
1682                    let artifact_path = manifest
1683                        .get_artifact_resource_path(tool, resource_type)
1684                        .expect("Resource type should be supported by configured tools");
1685                    let target_dir = artifact_path.display().to_string();
1686                    entries.push((entry.clone(), target_dir));
1687                }
1688            }
1689            entries
1690        }
1691    };
1692
1693    if all_entries.is_empty() {
1694        return Ok((0, Vec::new(), Vec::new()));
1695    }
1696
1697    let total = all_entries.len();
1698
1699    // Start installation phase with progress if provided
1700    if let Some(ref pm) = progress {
1701        pm.start_phase_with_progress(InstallationPhase::InstallingResources, total);
1702    }
1703
1704    // Pre-warm the cache by creating all needed worktrees upfront
1705    let mut unique_worktrees = HashSet::new();
1706    for (entry, _) in &all_entries {
1707        if let Some(source_name) = &entry.source
1708            && let Some(url) = &entry.url
1709        {
1710            // Only pre-warm if we have a valid SHA
1711            if let Some(sha) = entry.resolved_commit.as_ref().filter(|commit| {
1712                commit.len() == 40 && commit.chars().all(|c| c.is_ascii_hexdigit())
1713            }) {
1714                unique_worktrees.insert((source_name.clone(), url.clone(), sha.clone()));
1715            }
1716        }
1717    }
1718
1719    if !unique_worktrees.is_empty() {
1720        let context = match filter {
1721            ResourceFilter::All => "pre-warm",
1722            ResourceFilter::Updated(_) => "update-pre-warm",
1723        };
1724
1725        let worktree_futures: Vec<_> = unique_worktrees
1726            .into_iter()
1727            .map(|(source, url, sha)| {
1728                let cache = cache.clone();
1729                async move {
1730                    cache
1731                        .get_or_create_worktree_for_sha(&source, &url, &sha, Some(context))
1732                        .await
1733                        .ok(); // Ignore errors during pre-warming
1734                }
1735            })
1736            .collect();
1737
1738        // Execute all worktree creations in parallel
1739        future::join_all(worktree_futures).await;
1740    }
1741
1742    // Create thread-safe progress tracking
1743    let installed_count = Arc::new(Mutex::new(0));
1744    let concurrency = max_concurrency.unwrap_or(usize::MAX).max(1);
1745
1746    // Create gitignore lock for thread-safe gitignore updates
1747    let gitignore_lock = Arc::new(Mutex::new(()));
1748
1749    // Update initial progress message
1750    if let Some(ref pm) = progress {
1751        pm.update_current_message(&format!("Installing 0/{total} resources"));
1752    }
1753
1754    // Process installations in parallel
1755    let results: Vec<InstallResult> = stream::iter(all_entries)
1756        .map(|(entry, resource_dir)| {
1757            let project_dir = project_dir.to_path_buf();
1758            let installed_count = Arc::clone(&installed_count);
1759            let cache = cache.clone();
1760            let progress = progress.clone();
1761            let gitignore_lock = Arc::clone(&gitignore_lock);
1762
1763            async move {
1764                // Update progress message for current resource
1765                if let Some(ref pm) = progress {
1766                    pm.update_current_message(&format!("Installing {}", entry.name));
1767                }
1768
1769                let install_context = InstallContext::new(
1770                    &project_dir,
1771                    &cache,
1772                    force_refresh,
1773                    verbose,
1774                    Some(manifest),
1775                    Some(lockfile),
1776                    Some(&manifest.project_patches),
1777                    Some(&manifest.private_patches),
1778                    Some(&gitignore_lock),
1779                    None, // max_content_file_size - not available in install_resources context
1780                );
1781
1782                let res =
1783                    install_resource_for_parallel(&entry, &resource_dir, &install_context).await;
1784
1785                // Update progress on success - but only count if actually installed
1786                if let Ok((actually_installed, _checksum, _applied_patches)) = &res {
1787                    if *actually_installed {
1788                        let mut count = installed_count.lock().await;
1789                        *count += 1;
1790                    }
1791
1792                    if let Some(ref pm) = progress {
1793                        let count = *installed_count.lock().await;
1794                        pm.update_current_message(&format!("Installing {count}/{total} resources"));
1795                        pm.increment_progress(1);
1796                    }
1797                }
1798
1799                match res {
1800                    Ok((installed, checksum, applied_patches)) => {
1801                        Ok((entry.name.clone(), installed, checksum, applied_patches))
1802                    }
1803                    Err(err) => Err((entry.name.clone(), err)),
1804                }
1805            }
1806        })
1807        .buffer_unordered(concurrency)
1808        .collect()
1809        .await;
1810
1811    // Handle errors and collect checksums and applied patches
1812    let mut errors = Vec::new();
1813    let mut checksums = Vec::new();
1814    let mut applied_patches_list = Vec::new();
1815    for result in results {
1816        match result {
1817            Ok((name, _installed, checksum, applied_patches)) => {
1818                checksums.push((name.clone(), checksum));
1819                applied_patches_list.push((name, applied_patches));
1820            }
1821            Err((name, error)) => {
1822                errors.push((name, error));
1823            }
1824        }
1825    }
1826
1827    if !errors.is_empty() {
1828        // Complete phase with error message
1829        if let Some(ref pm) = progress {
1830            pm.complete_phase(Some(&format!("Failed to install {} resources", errors.len())));
1831        }
1832
1833        let error_msgs: Vec<String> =
1834            errors.into_iter().map(|(name, error)| format!("  {name}: {error}")).collect();
1835        return Err(anyhow::anyhow!(
1836            "Failed to install {} resources:\n{}",
1837            error_msgs.len(),
1838            error_msgs.join("\n")
1839        ));
1840    }
1841
1842    let final_count = *installed_count.lock().await;
1843
1844    // Complete installation phase successfully
1845    if let Some(ref pm) = progress
1846        && final_count > 0
1847    {
1848        pm.complete_phase(Some(&format!("Installed {final_count} resources")));
1849    }
1850
1851    Ok((final_count, checksums, applied_patches_list))
1852}
1853
1854/// Install resources with real-time dynamic progress management.
1855///
1856/// This function provides sophisticated parallel resource installation with
1857/// live progress tracking that shows individual dependency states in real-time.
1858/// It uses a `ProgressBar` to display progress of dependencies
1859/// are currently being processed, completed, or experiencing issues.
1860///
1861/// # Arguments
1862///
1863/// * `lockfile` - Lockfile containing all resources to install
1864/// * `manifest` - Project manifest providing configuration and target directories
1865/// * `project_dir` - Root directory where resources will be installed
1866/// * `cache` - Cache instance for Git repository and worktree management
1867/// * `force_refresh` - Whether to force refresh of cached repositories
1868/// * `max_concurrency` - Optional limit on concurrent operations (`None` = unlimited)
1869/// * `progress_bar` - Optional dynamic progress bar for real-time updates
1870///
1871/// # Dynamic Progress Features
1872///
1873/// When a `ProgressBar` is provided, the installation displays:
1874/// - Real-time list of dependencies being processed concurrently
1875/// - Live updates as dependencies start, progress, and complete
1876/// - Clean terminal output with automatic clearing when finished
1877/// - Graceful handling of errors with preserved context
1878///
1879/// # Progress Flow
1880///
1881/// 1. **Initialization**: Progress manager starts with total dependency count
1882/// 2. **Pre-warming**: Cache prepares Git worktrees for parallel access
1883/// 3. **Parallel Processing**: Dependencies install concurrently with live updates
1884/// 4. **Completion**: Progress display clears, leaving clean final state
1885///
1886/// # Examples
1887///
1888/// ```rust,no_run
1889/// use agpm_cli::installer::{install_resources_with_dynamic_progress, InstallContext};
1890/// use agpm_cli::utils::progress::ProgressBar;
1891/// use agpm_cli::lockfile::LockFile;
1892/// use agpm_cli::manifest::Manifest;
1893/// use agpm_cli::cache::Cache;
1894/// use std::sync::Arc;
1895/// use std::path::Path;
1896///
1897/// # async fn example() -> anyhow::Result<()> {
1898/// let lockfile = Arc::new(LockFile::load(Path::new("agpm.lock"))?);
1899/// let manifest = Manifest::load(Path::new("agpm.toml"))?;
1900/// let cache = Cache::new()?;
1901///
1902/// // Create dynamic progress manager
1903/// let progress_bar = Arc::new(ProgressBar::new(100));
1904///
1905/// let context = InstallContext::new(Path::new("."), &cache, false, false, Some(&manifest), Some(&lockfile), None, None, None, None);
1906/// let count = install_resources_with_dynamic_progress(
1907///     &lockfile,
1908///     &manifest,
1909///     &context,
1910///     Some(10),                 // Max 10 concurrent operations
1911///     Some(progress_bar)        // Dynamic progress display
1912/// ).await?;
1913///
1914/// println!("Successfully installed {} resources", count);
1915/// # Ok(())
1916/// # }
1917/// ```
1918///
1919/// # Performance Optimizations
1920///
1921/// The function includes several performance enhancements:
1922/// - **Worktree pre-warming**: All needed Git worktrees created upfront
1923/// - **Parallel processing**: Configurable concurrency for optimal resource usage
1924/// - **Progress batching**: Updates are batched to reduce terminal overhead
1925/// - **Efficient cleanup**: Worktrees left for reuse rather than immediate cleanup
1926///
1927/// # Returns
1928///
1929/// Returns the total number of resources that were actually installed.
1930/// This count only includes resources with new or updated content, not
1931/// resources that already existed and were unchanged.
1932///
1933/// # Errors
1934///
1935/// Returns an error if any resource installation fails. The error includes
1936/// detailed information about all failed installations. The progress manager
1937/// is automatically cleaned up even if errors occur.
1938// Removed install_resources_with_dynamic_progress - use install_resources with MultiPhaseProgress instead
1939#[deprecated(note = "Use install_resources with MultiPhaseProgress instead")]
1940pub async fn install_resources_with_dynamic_progress(
1941    lockfile: &Arc<LockFile>,
1942    manifest: &Manifest,
1943    install_ctx: &InstallContext<'_>,
1944    max_concurrency: Option<usize>,
1945    progress_bar: Option<Arc<crate::utils::progress::ProgressBar>>,
1946) -> Result<usize> {
1947    let project_dir = install_ctx.project_dir;
1948    let cache = install_ctx.cache;
1949    let force_refresh = install_ctx.force_refresh;
1950    // Collect all entries to install using ResourceIterator
1951    let all_entries = ResourceIterator::collect_all_entries(lockfile, manifest);
1952
1953    if all_entries.is_empty() {
1954        return Ok(0);
1955    }
1956
1957    let _total = all_entries.len();
1958
1959    // Start progress if provided
1960    if let Some(ref progress) = progress_bar {
1961        progress.set_message("Installing resources");
1962    }
1963
1964    // Pre-warm the cache by creating all needed worktrees upfront
1965    // Collect unique (source, url, sha) triples to pre-create worktrees
1966    let mut unique_worktrees = HashSet::new();
1967    for (entry, _) in &all_entries {
1968        if let Some(source_name) = &entry.source
1969            && let Some(url) = &entry.url
1970        {
1971            // Only pre-warm if we have a valid SHA
1972            if let Some(sha) = entry.resolved_commit.as_ref().filter(|commit| {
1973                commit.len() == 40 && commit.chars().all(|c| c.is_ascii_hexdigit())
1974            }) {
1975                unique_worktrees.insert((source_name.clone(), url.clone(), sha.clone()));
1976            }
1977        }
1978    }
1979
1980    if !unique_worktrees.is_empty() {
1981        let worktree_futures: Vec<_> = unique_worktrees
1982            .into_iter()
1983            .map(|(source, url, sha)| {
1984                async move {
1985                    cache
1986                        .get_or_create_worktree_for_sha(&source, &url, &sha, Some("pre-warm"))
1987                        .await
1988                        .ok(); // Ignore errors during pre-warming
1989                }
1990            })
1991            .collect();
1992
1993        // Execute all worktree creations in parallel
1994        future::join_all(worktree_futures).await;
1995    }
1996
1997    // Create thread-safe progress tracking
1998    let installed_count = Arc::new(Mutex::new(0));
1999    let shared_cache = Arc::new(cache.clone());
2000    let concurrency = max_concurrency.unwrap_or(usize::MAX).max(1);
2001
2002    let results: Vec<InstallResult> = stream::iter(all_entries)
2003        .map(|(entry, resource_dir)| {
2004            let entry = entry.clone();
2005            let project_dir = project_dir.to_path_buf();
2006            let resource_dir = resource_dir.to_string();
2007            let installed_count = Arc::clone(&installed_count);
2008            let cache = Arc::clone(&shared_cache);
2009            let progress_bar_ref = progress_bar.clone();
2010            let lockfile = Arc::clone(lockfile);
2011
2012            async move {
2013                // Update progress if available
2014                if let Some(ref progress) = progress_bar_ref {
2015                    progress.set_message(format!("Installing {}", entry.name));
2016                }
2017
2018                let context = InstallContext::new(
2019                    &project_dir,
2020                    cache.as_ref(),
2021                    force_refresh,
2022                    false, // verbose - will be threaded through from CLI
2023                    Some(manifest),
2024                    Some(&lockfile),
2025                    install_ctx.project_patches,
2026                    install_ctx.private_patches,
2027                    install_ctx.gitignore_lock,
2028                    install_ctx.max_content_file_size,
2029                );
2030                let res = install_resource_for_parallel(&entry, &resource_dir, &context).await;
2031
2032                // Signal completion and update count only if actually installed
2033                if let Ok((actually_installed, _checksum, _applied_patches)) = &res {
2034                    if *actually_installed {
2035                        let mut count = installed_count.lock().await;
2036                        *count += 1;
2037                    }
2038
2039                    if let Some(ref progress) = progress_bar_ref {
2040                        progress.inc(1);
2041                    }
2042                }
2043
2044                match res {
2045                    Ok((installed, checksum, applied_patches)) => {
2046                        Ok((entry.name.clone(), installed, checksum, applied_patches))
2047                    }
2048                    Err(err) => Err((entry.name.clone(), err)),
2049                }
2050            }
2051        })
2052        .buffer_unordered(concurrency)
2053        .collect()
2054        .await;
2055
2056    let mut errors = Vec::new();
2057    for result in results {
2058        match result {
2059            Ok((_name, _installed, _checksum, _applied_patches)) => {
2060                // Old function doesn't return checksums or patches
2061            }
2062            Err((name, error)) => {
2063                errors.push((name, error));
2064            }
2065        }
2066    }
2067
2068    if !errors.is_empty() {
2069        // Finish with error
2070        if let Some(ref progress) = progress_bar {
2071            progress.finish_and_clear();
2072        }
2073
2074        let error_msgs: Vec<String> =
2075            errors.into_iter().map(|(name, error)| format!("  {name}: {error}")).collect();
2076        return Err(anyhow::anyhow!(
2077            "Failed to install {} resources:\n{}",
2078            error_msgs.len(),
2079            error_msgs.join("\n")
2080        ));
2081    }
2082
2083    let final_count = *installed_count.lock().await;
2084
2085    // Clear the progress display - success message will be shown by the caller
2086    if let Some(ref progress) = progress_bar {
2087        progress.finish_and_clear();
2088    }
2089
2090    Ok(final_count)
2091}
2092
2093/// Install only specific updated resources in parallel (selective installation).
2094///
2095/// This function provides targeted installation of only the resources that have
2096/// been updated, rather than reinstalling all resources. It's designed for
2097/// efficient update operations where only a subset of dependencies have changed.
2098/// The function uses the same parallel processing architecture as full installations
2099/// but operates on a filtered set of resources.
2100///
2101/// # Arguments
2102///
2103/// * `updates` - Vector of tuples containing (name, `old_version`, `new_version`) for each updated resource
2104/// * `lockfile` - Lockfile containing all available resources (updated resources must exist here)
2105/// * `manifest` - Project manifest providing configuration and target directories
2106/// * `project_dir` - Root directory where resources will be installed
2107/// * `cache` - Cache instance for Git repository and worktree management
2108/// * `pb` - Optional progress bar for user feedback during installation
2109/// * `_quiet` - Quiet mode flag (currently unused, maintained for API compatibility)
2110///
2111/// # Update Tuple Format
2112///
2113/// Each update tuple contains:
2114/// - `name`: Resource name as defined in the lockfile
2115/// - `old_version`: Previous version (used for logging and user feedback)
2116/// - `new_version`: New version that will be installed
2117///
2118/// # Selective Processing
2119///
2120/// The function implements selective resource processing:
2121/// 1. **Filtering**: Only processes resources listed in the `updates` vector
2122/// 2. **Lookup**: Finds corresponding entries in the lockfile for each update
2123/// 3. **Validation**: Ensures all specified resources exist before processing
2124/// 4. **Installation**: Uses the same parallel architecture as full installations
2125///
2126/// # Examples
2127///
2128/// ```rust,no_run
2129/// use agpm_cli::installer::{install_updated_resources, InstallContext};
2130/// use agpm_cli::lockfile::LockFile;
2131/// use agpm_cli::manifest::Manifest;
2132/// use agpm_cli::cache::Cache;
2133/// use agpm_cli::utils::progress::ProgressBar;
2134/// use std::path::Path;
2135/// use std::sync::Arc;
2136///
2137/// # async fn example() -> anyhow::Result<()> {
2138/// let lockfile = Arc::new(LockFile::load(Path::new("agpm.lock"))?);
2139/// let manifest = Manifest::load(Path::new("agpm.toml"))?;
2140/// let cache = Cache::new()?;
2141/// let pb = ProgressBar::new(3);
2142///
2143/// // Define which resources to update
2144/// let updates = vec![
2145///     ("ai-agent".to_string(), None, "v1.0.0".to_string(), "v1.1.0".to_string()),
2146///     ("helper-tool".to_string(), Some("community".to_string()), "v2.0.0".to_string(), "v2.1.0".to_string()),
2147///     ("data-processor".to_string(), None, "v1.5.0".to_string(), "v1.6.0".to_string()),
2148/// ];
2149///
2150/// let context = InstallContext::new(Path::new("."), &cache, false, false, Some(&manifest), Some(&lockfile), None, None, None, None);
2151/// let count = install_updated_resources(
2152///     &updates,
2153///     &lockfile,
2154///     &manifest,
2155///     &context,
2156///     Some(&pb),
2157///     false
2158/// ).await?;
2159///
2160/// println!("Updated {} resources", count);
2161/// # Ok(())
2162/// # }
2163/// ```
2164///
2165/// # Performance Benefits
2166///
2167/// Selective installation provides significant performance benefits:
2168/// - **Reduced processing**: Only installs resources that have actually changed
2169/// - **Faster execution**: Avoids redundant operations on unchanged resources
2170/// - **Network efficiency**: Only fetches Git data for repositories with updates
2171/// - **Disk efficiency**: Minimizes file system operations and cache usage
2172///
2173/// # Integration with Update Command
2174///
2175/// This function is typically used by the `agpm update` command after dependency
2176/// resolution determines which resources have new versions available:
2177///
2178/// ```text
2179/// Update Flow:
2180/// 1. Resolve dependencies → identify version changes
2181/// 2. Update lockfile → record new versions and checksums
2182/// 3. Selective installation → install only changed resources
2183/// ```
2184///
2185/// # Returns
2186///
2187/// Returns the total number of resources that were successfully installed.
2188/// This represents the actual number of files that were updated on disk.
2189///
2190/// # Errors
2191///
2192/// Returns an error if:
2193/// - Any specified resource name is not found in the lockfile
2194/// - Git repository access fails for resources being updated
2195/// - File system operations fail during installation
2196/// - Any individual resource installation encounters an error
2197///
2198/// The function uses atomic error handling - if any resource fails, the entire
2199/// operation fails and detailed error information is provided.
2200pub async fn install_updated_resources(
2201    updates: &[(String, Option<String>, String, String)], // (name, source, old_version, new_version)
2202    lockfile: &Arc<LockFile>,
2203    manifest: &Manifest,
2204    install_ctx: &InstallContext<'_>,
2205    pb: Option<&ProgressBar>,
2206    _quiet: bool,
2207) -> Result<usize> {
2208    let project_dir = install_ctx.project_dir;
2209    let cache = install_ctx.cache;
2210    if updates.is_empty() {
2211        return Ok(0);
2212    }
2213
2214    let total = updates.len();
2215
2216    // Collect all entries to install
2217    let mut entries_to_install = Vec::new();
2218    for (name, source, _, _) in updates {
2219        if let Some((resource_type, entry)) =
2220            ResourceIterator::find_resource_by_name_and_source(lockfile, name, source.as_deref())
2221        {
2222            // Get artifact configuration path
2223            let tool = entry.tool.as_deref().unwrap_or("claude-code");
2224            let artifact_path = manifest
2225                .get_artifact_resource_path(tool, resource_type)
2226                .expect("Resource type should be supported by configured tools");
2227            let target_dir = artifact_path.display().to_string();
2228            entries_to_install.push((entry.clone(), target_dir));
2229        }
2230    }
2231
2232    if entries_to_install.is_empty() {
2233        return Ok(0);
2234    }
2235
2236    // Pre-warm the cache by creating all needed worktrees upfront
2237    if let Some(pb) = pb {
2238        pb.set_message("Preparing resources...");
2239    }
2240
2241    // Collect unique (source, url, sha) triples to pre-create worktrees
2242    let mut unique_worktrees = HashSet::new();
2243    for (entry, _) in &entries_to_install {
2244        if let Some(source_name) = &entry.source
2245            && let Some(url) = &entry.url
2246        {
2247            // Only pre-warm if we have a valid SHA
2248            if let Some(sha) = entry.resolved_commit.as_ref().filter(|commit| {
2249                commit.len() == 40 && commit.chars().all(|c| c.is_ascii_hexdigit())
2250            }) {
2251                unique_worktrees.insert((source_name.clone(), url.clone(), sha.clone()));
2252            }
2253        }
2254    }
2255
2256    // Pre-create all worktrees in parallel
2257    if !unique_worktrees.is_empty() {
2258        let worktree_futures: Vec<_> = unique_worktrees
2259            .into_iter()
2260            .map(|(source, url, sha)| {
2261                async move {
2262                    cache
2263                        .get_or_create_worktree_for_sha(
2264                            &source,
2265                            &url,
2266                            &sha,
2267                            Some("update-pre-warm"),
2268                        )
2269                        .await
2270                        .ok(); // Ignore errors during pre-warming
2271                }
2272            })
2273            .collect();
2274
2275        // Execute all worktree creations in parallel
2276        future::join_all(worktree_futures).await;
2277    }
2278
2279    // Create thread-safe progress tracking
2280    let installed_count = Arc::new(Mutex::new(0));
2281    let pb = pb.map(Arc::new);
2282    let cache = Arc::new(cache);
2283
2284    // Set initial progress
2285    if let Some(ref pb) = pb {
2286        pb.set_message(format!("Installing 0/{total} resources"));
2287    }
2288
2289    // Use concurrent stream processing for parallel installation
2290    let results: Vec<Result<(), anyhow::Error>> = stream::iter(entries_to_install)
2291        .map(|(entry, resource_dir)| {
2292            let project_dir = project_dir.to_path_buf();
2293            let installed_count = Arc::clone(&installed_count);
2294            let pb = pb.clone();
2295            let cache = Arc::clone(&cache);
2296            let lockfile = Arc::clone(lockfile);
2297
2298            async move {
2299                // Install the resource
2300                let context = InstallContext::new(
2301                    &project_dir,
2302                    cache.as_ref(),
2303                    false,
2304                    false, // verbose - will be threaded through from CLI
2305                    Some(manifest),
2306                    Some(&lockfile),
2307                    install_ctx.project_patches,
2308                    install_ctx.private_patches,
2309                    install_ctx.gitignore_lock,
2310                    install_ctx.max_content_file_size,
2311                );
2312                install_resource_for_parallel(&entry, &resource_dir, &context).await?;
2313
2314                // Update progress
2315                let mut count = installed_count.lock().await;
2316                *count += 1;
2317
2318                if let Some(pb) = pb {
2319                    pb.set_message(format!("Installing {}/{} resources", *count, total));
2320                    pb.inc(1);
2321                }
2322
2323                Ok::<(), anyhow::Error>(())
2324            }
2325        })
2326        .buffer_unordered(usize::MAX) // Allow unlimited task concurrency
2327        .collect()
2328        .await;
2329
2330    // Check all results for errors
2331    for result in results {
2332        result?;
2333    }
2334
2335    let final_count = *installed_count.lock().await;
2336    Ok(final_count)
2337}
2338
2339/// Add a single path to .gitignore atomically
2340///
2341/// This function adds a single path to the AGPM-managed section of `.gitignore`,
2342/// ensuring the file is protected from accidental commits even if subsequent
2343/// operations fail. Thread-safe via mutex locking.
2344///
2345/// # Arguments
2346///
2347/// * `project_dir` - Project root directory containing `.gitignore`
2348/// * `path` - Path to add (relative to project root, forward slashes)
2349/// * `lock` - Mutex to synchronize concurrent gitignore updates
2350///
2351/// # Returns
2352///
2353/// Returns `Ok(())` if the path was added successfully or was already present.
2354pub async fn add_path_to_gitignore(
2355    project_dir: &Path,
2356    path: &str,
2357    lock: &Arc<Mutex<()>>,
2358) -> Result<()> {
2359    // Acquire lock to ensure thread-safe updates
2360    let _guard = lock.lock().await;
2361
2362    let gitignore_path = project_dir.join(".gitignore");
2363
2364    // Read existing .gitignore content
2365    let mut before_agpm = Vec::new();
2366    let mut agpm_paths = std::collections::HashSet::new();
2367    let mut after_agpm = Vec::new();
2368
2369    if gitignore_path.exists() {
2370        let content = tokio::fs::read_to_string(&gitignore_path)
2371            .await
2372            .with_context(|| format!("Failed to read {}", gitignore_path.display()))?;
2373
2374        let mut in_agpm_section = false;
2375        let mut past_agpm_section = false;
2376
2377        for line in content.lines() {
2378            if line == "# AGPM managed entries - do not edit below this line"
2379                || line == "# CCPM managed entries - do not edit below this line"
2380            {
2381                in_agpm_section = true;
2382            } else if line == "# End of AGPM managed entries"
2383                || line == "# End of CCPM managed entries"
2384            {
2385                in_agpm_section = false;
2386                past_agpm_section = true;
2387            } else if in_agpm_section {
2388                // Collect existing AGPM paths
2389                if !line.is_empty() && !line.starts_with('#') {
2390                    agpm_paths.insert(line.to_string());
2391                }
2392            } else if !past_agpm_section {
2393                before_agpm.push(line.to_string());
2394            } else {
2395                after_agpm.push(line.to_string());
2396            }
2397        }
2398    }
2399
2400    // Add the new path if not already present
2401    let normalized_path = normalize_path_for_storage(path);
2402    if agpm_paths.contains(&normalized_path) {
2403        // Path already exists, no update needed
2404        return Ok(());
2405    }
2406    agpm_paths.insert(normalized_path);
2407
2408    // Always include private config files
2409    agpm_paths.insert("agpm.private.toml".to_string());
2410    agpm_paths.insert("agpm.private.lock".to_string());
2411
2412    // Build new content
2413    let mut new_content = String::new();
2414
2415    // Add header for new files
2416    if before_agpm.is_empty() && after_agpm.is_empty() {
2417        new_content.push_str("# .gitignore - AGPM managed entries\n");
2418        new_content.push_str("# AGPM entries are automatically generated\n");
2419        new_content.push('\n');
2420    } else {
2421        // Preserve content before AGPM section
2422        for line in &before_agpm {
2423            new_content.push_str(line);
2424            new_content.push('\n');
2425        }
2426        if !before_agpm.is_empty() && !before_agpm.last().unwrap().trim().is_empty() {
2427            new_content.push('\n');
2428        }
2429    }
2430
2431    // Add AGPM section
2432    new_content.push_str("# AGPM managed entries - do not edit below this line\n");
2433    let mut sorted_paths: Vec<_> = agpm_paths.into_iter().collect();
2434    sorted_paths.sort();
2435    for p in sorted_paths {
2436        new_content.push_str(&p);
2437        new_content.push('\n');
2438    }
2439    new_content.push_str("# End of AGPM managed entries\n");
2440
2441    // Preserve content after AGPM section
2442    if !after_agpm.is_empty() {
2443        new_content.push('\n');
2444        for line in &after_agpm {
2445            new_content.push_str(line);
2446            new_content.push('\n');
2447        }
2448    }
2449
2450    // Write atomically
2451    atomic_write(&gitignore_path, new_content.as_bytes())
2452        .with_context(|| format!("Failed to update {}", gitignore_path.display()))?;
2453
2454    Ok(())
2455}
2456
2457/// Update .gitignore with installed file paths
2458pub fn update_gitignore(lockfile: &LockFile, project_dir: &Path, enabled: bool) -> Result<()> {
2459    if !enabled {
2460        // Gitignore management is disabled
2461        return Ok(());
2462    }
2463
2464    let gitignore_path = project_dir.join(".gitignore");
2465
2466    // Collect all installed file paths relative to project root
2467    let mut paths_to_ignore = HashSet::new();
2468
2469    // Helper to add paths from a resource list
2470    let mut add_resource_paths = |resources: &[LockedResource]| {
2471        for resource in resources {
2472            if !resource.installed_at.is_empty() {
2473                // Use the explicit installed_at path
2474                paths_to_ignore.insert(resource.installed_at.clone());
2475            }
2476        }
2477    };
2478
2479    // Collect paths from all resource types
2480    // Skip hooks and MCP servers - they are configured only, not installed as files
2481    add_resource_paths(&lockfile.agents);
2482    add_resource_paths(&lockfile.snippets);
2483    add_resource_paths(&lockfile.commands);
2484    add_resource_paths(&lockfile.scripts);
2485
2486    // Read existing gitignore if it exists
2487    let mut before_agpm_section = Vec::new();
2488    let mut after_agpm_section = Vec::new();
2489
2490    if gitignore_path.exists() {
2491        let content = fs::read_to_string(&gitignore_path)
2492            .with_context(|| format!("Failed to read {}", gitignore_path.display()))?;
2493
2494        let mut in_agpm_section = false;
2495        let mut past_agpm_section = false;
2496
2497        for line in content.lines() {
2498            // Support both AGPM and legacy CCPM markers for migration compatibility
2499            if line == "# AGPM managed entries - do not edit below this line"
2500                || line == "# CCPM managed entries - do not edit below this line"
2501            {
2502                in_agpm_section = true;
2503                continue;
2504            } else if line == "# End of AGPM managed entries"
2505                || line == "# End of CCPM managed entries"
2506            {
2507                in_agpm_section = false;
2508                past_agpm_section = true;
2509                continue;
2510            }
2511
2512            if !in_agpm_section && !past_agpm_section {
2513                // Preserve everything before AGPM section exactly as-is
2514                before_agpm_section.push(line.to_string());
2515            } else if in_agpm_section {
2516                // Skip existing AGPM/CCPM entries (they'll be replaced)
2517                continue;
2518            } else {
2519                // Preserve everything after AGPM section exactly as-is
2520                after_agpm_section.push(line.to_string());
2521            }
2522        }
2523    }
2524
2525    // Build the new content
2526    let mut new_content = String::new();
2527
2528    // Add everything before AGPM section exactly as it was
2529    if !before_agpm_section.is_empty() {
2530        for line in &before_agpm_section {
2531            new_content.push_str(line);
2532            new_content.push('\n');
2533        }
2534        // Add blank line before AGPM section if the previous content doesn't end with one
2535        if !before_agpm_section.is_empty() && !before_agpm_section.last().unwrap().trim().is_empty()
2536        {
2537            new_content.push('\n');
2538        }
2539    }
2540
2541    // Add AGPM managed section
2542    new_content.push_str("# AGPM managed entries - do not edit below this line\n");
2543
2544    // Always include private config files
2545    new_content.push_str("agpm.private.toml\n");
2546    new_content.push_str("agpm.private.lock\n");
2547
2548    // Convert paths to gitignore format (relative to project root)
2549    // Sort paths for consistent output
2550    let mut sorted_paths: Vec<_> = paths_to_ignore.into_iter().collect();
2551    sorted_paths.sort();
2552
2553    for path in &sorted_paths {
2554        // Use paths as-is since gitignore is now at project root
2555        let ignore_path = if path.starts_with("./") {
2556            // Remove leading ./ if present
2557            path.strip_prefix("./").unwrap_or(path).to_string()
2558        } else {
2559            path.clone()
2560        };
2561
2562        // Normalize to forward slashes for .gitignore (Git expects forward slashes on all platforms)
2563        let normalized_path = normalize_path_for_storage(&ignore_path);
2564
2565        new_content.push_str(&normalized_path);
2566        new_content.push('\n');
2567    }
2568
2569    new_content.push_str("# End of AGPM managed entries\n");
2570
2571    // Add everything after AGPM section exactly as it was
2572    if !after_agpm_section.is_empty() {
2573        new_content.push('\n');
2574        for line in &after_agpm_section {
2575            new_content.push_str(line);
2576            new_content.push('\n');
2577        }
2578    }
2579
2580    // If this is a new file, add a basic header
2581    if before_agpm_section.is_empty() && after_agpm_section.is_empty() {
2582        let mut default_content = String::new();
2583        default_content.push_str("# .gitignore - AGPM managed entries\n");
2584        default_content.push_str("# AGPM entries are automatically generated\n");
2585        default_content.push('\n');
2586        default_content.push_str("# AGPM managed entries - do not edit below this line\n");
2587
2588        // Always include private config files
2589        default_content.push_str("agpm.private.toml\n");
2590        default_content.push_str("agpm.private.lock\n");
2591
2592        // Add the AGPM paths
2593        for path in &sorted_paths {
2594            let ignore_path = if path.starts_with("./") {
2595                path.strip_prefix("./").unwrap_or(path).to_string()
2596            } else {
2597                path.clone()
2598            };
2599            // Normalize to forward slashes for .gitignore (Git expects forward slashes on all platforms)
2600            let normalized_path = ignore_path.replace('\\', "/");
2601            default_content.push_str(&normalized_path);
2602            default_content.push('\n');
2603        }
2604
2605        default_content.push_str("# End of AGPM managed entries\n");
2606        new_content = default_content;
2607    }
2608
2609    // Write the updated gitignore
2610    atomic_write(&gitignore_path, new_content.as_bytes())
2611        .with_context(|| format!("Failed to update {}", gitignore_path.display()))?;
2612
2613    Ok(())
2614}
2615
2616/// Removes artifacts that are no longer needed based on lockfile comparison.
2617///
2618/// This function performs automatic cleanup of obsolete resource files by comparing
2619/// the old and new lockfiles. It identifies and removes artifacts that have been:
2620/// - **Removed from manifest**: Dependencies deleted from `agpm.toml`
2621/// - **Changed to content-only**: Dependencies that changed from `install: true` to `install: false`
2622/// - **Relocated**: Files with changed `installed_at` paths due to:
2623///   - Relative path preservation (v0.3.18+)
2624///   - Custom target changes
2625///   - Dependency name changes
2626/// - **Replaced**: Resources that moved due to source or version changes
2627///
2628/// After removing files, it also cleans up any empty parent directories to prevent
2629/// directory accumulation over time.
2630///
2631/// # Cleanup Strategy
2632///
2633/// The function uses a **set-based difference algorithm**:
2634/// 1. Collects all `installed_at` paths from the new lockfile into a `HashSet`
2635///    (excluding resources with `install: false` which should not have files)
2636/// 2. Iterates through old lockfile resources
2637/// 3. For each old path not in the new set:
2638///    - Removes the file if it exists
2639///    - Recursively cleans empty parent directories
2640///    - Records the path for reporting
2641///
2642/// # Arguments
2643///
2644/// * `old_lockfile` - The previous lockfile state containing old installation paths
2645/// * `new_lockfile` - The current lockfile state with updated installation paths
2646/// * `project_dir` - The project root directory (usually contains `.claude/`)
2647///
2648/// # Returns
2649///
2650/// Returns `Ok(Vec<String>)` containing the list of `installed_at` paths that were
2651/// successfully removed. An empty vector indicates no artifacts needed cleanup.
2652///
2653/// # Errors
2654///
2655/// Returns an error if:
2656/// - File removal fails due to permissions or locks
2657/// - Directory cleanup encounters unexpected I/O errors
2658/// - File system operations fail for other reasons
2659///
2660/// # Examples
2661///
2662/// ## Basic Cleanup After Update
2663///
2664/// ```no_run
2665/// use agpm_cli::installer::cleanup_removed_artifacts;
2666/// use agpm_cli::lockfile::LockFile;
2667/// use std::path::Path;
2668///
2669/// # async fn example() -> anyhow::Result<()> {
2670/// let old_lockfile = LockFile::load(Path::new("agpm.lock"))?;
2671/// let new_lockfile = LockFile::new(); // After resolution
2672/// let project_dir = Path::new(".");
2673///
2674/// let removed = cleanup_removed_artifacts(&old_lockfile, &new_lockfile, project_dir).await?;
2675/// if !removed.is_empty() {
2676///     println!("Cleaned up {} artifact(s)", removed.len());
2677///     for path in removed {
2678///         println!("  - Removed: {}", path);
2679///     }
2680/// }
2681/// # Ok(())
2682/// # }
2683/// ```
2684///
2685/// ## Cleanup After Path Migration
2686///
2687/// When relative path preservation changes installation paths:
2688///
2689/// ```text
2690/// Old lockfile (v0.3.17):
2691///   installed_at: ".claude/agents/helper.md"
2692///
2693/// New lockfile (v0.3.18+):
2694///   installed_at: ".claude/agents/ai/helper.md"  # Preserved subdirectory
2695///
2696/// Cleanup removes: .claude/agents/helper.md
2697/// ```
2698///
2699/// ## Cleanup After Dependency Removal
2700///
2701/// ```no_run
2702/// # use agpm_cli::installer::cleanup_removed_artifacts;
2703/// # use agpm_cli::lockfile::{LockFile, LockedResource};
2704/// # use std::path::Path;
2705/// # async fn removal_example() -> anyhow::Result<()> {
2706/// // Old lockfile had 3 agents
2707/// let mut old_lockfile = LockFile::new();
2708/// old_lockfile.agents = vec![
2709///     // ... 3 agents including one at .claude/agents/removed.md
2710/// ];
2711///
2712/// // New lockfile only has 2 agents (one was removed from manifest)
2713/// let mut new_lockfile = LockFile::new();
2714/// new_lockfile.agents = vec![
2715///     // ... 2 agents, removed.md is gone
2716/// ];
2717///
2718/// let removed = cleanup_removed_artifacts(&old_lockfile, &new_lockfile, Path::new(".")).await?;
2719/// assert!(removed.contains(&".claude/agents/removed.md".to_string()));
2720/// # Ok(())
2721/// # }
2722/// ```
2723///
2724/// ## Integration with Install Command
2725///
2726/// This function is automatically called during `agpm install` when both old and
2727/// new lockfiles exist:
2728///
2729/// ```rust,ignore
2730/// // In src/cli/install.rs
2731/// if !self.frozen && !self.regenerate && lockfile_path.exists() {
2732///     if let Ok(old_lockfile) = LockFile::load(&lockfile_path) {
2733///         detect_tag_movement(&old_lockfile, &lockfile, self.quiet);
2734///
2735///         // Automatic cleanup of removed or moved artifacts
2736///         if let Ok(removed) = cleanup_removed_artifacts(
2737///             &old_lockfile,
2738///             &lockfile,
2739///             actual_project_dir,
2740///         ).await && !removed.is_empty() && !self.quiet {
2741///             println!("🗑️  Cleaned up {} moved or removed artifact(s)", removed.len());
2742///         }
2743///     }
2744/// }
2745/// ```
2746///
2747/// # Performance
2748///
2749/// - **Time Complexity**: O(n + m) where n = old resources, m = new resources
2750/// - **Space Complexity**: O(m) for the `HashSet` of new paths
2751/// - **I/O Operations**: One file removal per obsolete artifact
2752/// - **Directory Cleanup**: Walks up parent directories once per removed file
2753///
2754/// The function is highly efficient as it:
2755/// - Uses `HashSet` for O(1) path lookups
2756/// - Only performs I/O for files that actually exist
2757/// - Cleans directories recursively but stops at first non-empty directory
2758///
2759/// # Safety
2760///
2761/// - Only removes files explicitly tracked in the old lockfile
2762/// - Never removes files outside the project directory
2763/// - Stops directory cleanup at `.claude/` boundary
2764/// - Handles concurrent file access gracefully (ENOENT is not an error)
2765///
2766/// # Use Cases
2767///
2768/// ## Relative Path Migration (v0.3.18+)
2769///
2770/// When upgrading to v0.3.18+, resource paths change to preserve directory structure:
2771/// ```text
2772/// Before: .claude/agents/helper.md  (flat)
2773/// After:  .claude/agents/ai/helper.md  (nested)
2774/// ```
2775/// This function removes the old flat file automatically.
2776///
2777/// ## Dependency Reorganization
2778///
2779/// When reorganizing dependencies with custom targets:
2780/// ```toml
2781/// # Before
2782/// [agents]
2783/// helper = { source = "community", path = "agents/helper.md" }
2784///
2785/// # After (with custom target)
2786/// [agents]
2787/// helper = { source = "community", path = "agents/helper.md", target = "tools" }
2788/// ```
2789/// Old file at `.claude/agents/helper.md` is removed, new file at
2790/// `.claude/agents/tools/helper.md` is installed.
2791///
2792/// ## Manifest Cleanup
2793///
2794/// Simply removing dependencies from `agpm.toml` triggers automatic cleanup:
2795/// ```toml
2796/// # Remove unwanted dependency
2797/// [agents]
2798/// # old-agent = { ... }  # Commented out or deleted
2799/// ```
2800/// The next `agpm install` removes the old agent file automatically.
2801///
2802/// # Version History
2803///
2804/// - **v0.3.18**: Introduced to handle relative path preservation and custom target changes
2805/// - Works in conjunction with `cleanup_empty_dirs()` for comprehensive cleanup
2806pub async fn cleanup_removed_artifacts(
2807    old_lockfile: &LockFile,
2808    new_lockfile: &LockFile,
2809    project_dir: &std::path::Path,
2810) -> Result<Vec<String>> {
2811    use std::collections::HashSet;
2812
2813    let mut removed = Vec::new();
2814
2815    // Collect installed paths from new lockfile (only resources that should have files on disk)
2816    // Resources with install=false are content-only and should not have files
2817    let new_paths: HashSet<String> = new_lockfile
2818        .all_resources()
2819        .into_iter()
2820        .filter(|r| r.install != Some(false))
2821        .map(|r| r.installed_at.clone())
2822        .collect();
2823
2824    // Check each old resource
2825    for old_resource in old_lockfile.all_resources() {
2826        // If the old path doesn't exist in new lockfile, it needs to be removed
2827        if !new_paths.contains(&old_resource.installed_at) {
2828            let full_path = project_dir.join(&old_resource.installed_at);
2829
2830            // Only remove if the file actually exists
2831            if full_path.exists() {
2832                tokio::fs::remove_file(&full_path).await.with_context(|| {
2833                    format!("Failed to remove old artifact: {}", full_path.display())
2834                })?;
2835
2836                removed.push(old_resource.installed_at.clone());
2837
2838                // Try to clean up empty parent directories
2839                cleanup_empty_dirs(&full_path).await?;
2840            }
2841        }
2842    }
2843
2844    Ok(removed)
2845}
2846
2847/// Recursively removes empty parent directories up to the project root.
2848///
2849/// This helper function performs bottom-up directory cleanup after file removal.
2850/// It walks up the directory tree from a given file path, removing empty parent
2851/// directories until it encounters:
2852/// - A non-empty directory (containing other files or subdirectories)
2853/// - The `.claude` directory boundary (cleanup stops here for safety)
2854/// - The project root (no parent directory)
2855/// - A directory that cannot be removed (permissions, locks, etc.)
2856///
2857/// This prevents accumulation of empty directory trees over time as resources
2858/// are removed, renamed, or relocated.
2859///
2860/// # Cleanup Algorithm
2861///
2862/// The function implements a **safe recursive cleanup** strategy:
2863/// 1. Starts at the parent directory of the given file path
2864/// 2. Attempts to remove the directory
2865/// 3. If successful (directory was empty), moves to parent and repeats
2866/// 4. If unsuccessful, stops immediately (directory has content or other issues)
2867/// 5. Always stops at `.claude/` directory to avoid over-cleanup
2868///
2869/// # Safety Boundaries
2870///
2871/// The function enforces strict boundaries to prevent accidental data loss:
2872/// - **`.claude/` boundary**: Never removes the `.claude` directory itself
2873/// - **Project root**: Stops if parent directory is None
2874/// - **Non-empty guard**: Only removes truly empty directories
2875/// - **Error tolerance**: ENOENT (directory not found) is not considered an error
2876///
2877/// # Arguments
2878///
2879/// * `file_path` - The path to the removed file whose parent directories should be cleaned.
2880///   Typically this is the full path to a resource file that was just deleted.
2881///
2882/// # Returns
2883///
2884/// Returns `Ok(())` in all normal cases, including:
2885/// - All empty directories successfully removed
2886/// - Cleanup stopped at a non-empty directory
2887/// - Directory already doesn't exist (ENOENT)
2888///
2889/// # Errors
2890///
2891/// Returns an error only for unexpected I/O failures during directory removal
2892/// that are not normal "directory not empty" or "not found" errors.
2893///
2894/// # Examples
2895///
2896/// ## Basic Directory Cleanup
2897///
2898/// ```ignore
2899/// # use agpm_cli::installer::cleanup_empty_dirs;
2900/// # use std::path::Path;
2901/// # use std::fs;
2902/// # async fn example() -> anyhow::Result<()> {
2903/// // After removing: .claude/agents/rust/specialized/expert.md
2904/// let file_path = Path::new(".claude/agents/rust/specialized/expert.md");
2905///
2906/// // If this was the last file in specialized/, the directory will be removed
2907/// // If specialized/ was the last item in rust/, that will be removed too
2908/// // Cleanup stops at .claude/agents/ or when it finds a non-empty directory
2909/// cleanup_empty_dirs(file_path).await?;
2910/// # Ok(())
2911/// # }
2912/// ```
2913///
2914/// ## Cleanup Scenarios
2915///
2916/// ### Scenario 1: Full Cleanup
2917///
2918/// ```text
2919/// Before:
2920///   .claude/agents/rust/specialized/expert.md  (only file in hierarchy)
2921///
2922/// After removing expert.md:
2923///   cleanup_empty_dirs() removes:
2924///   - .claude/agents/rust/specialized/  (now empty)
2925///   - .claude/agents/rust/              (now empty)
2926///   Stops at .claude/agents/ (keeps base directory)
2927/// ```
2928///
2929/// ### Scenario 2: Partial Cleanup
2930///
2931/// ```text
2932/// Before:
2933///   .claude/agents/rust/specialized/expert.md
2934///   .claude/agents/rust/specialized/tester.md
2935///   .claude/agents/rust/basic.md
2936///
2937/// After removing expert.md:
2938///   .claude/agents/rust/specialized/ still has tester.md
2939///   cleanup_empty_dirs() stops at specialized/ (not empty)
2940/// ```
2941///
2942/// ### Scenario 3: Boundary Enforcement
2943///
2944/// ```text
2945/// After removing: .claude/agents/only-agent.md
2946///
2947/// cleanup_empty_dirs() attempts to remove:
2948/// - .claude/agents/ (empty now)
2949/// - But stops because parent is .claude/ (boundary)
2950///
2951/// Result: .claude/agents/ remains (empty but preserved)
2952/// ```
2953///
2954/// ## Integration with `cleanup_removed_artifacts`
2955///
2956/// This function is called automatically by [`cleanup_removed_artifacts`]
2957/// after each file removal:
2958///
2959/// ```rust,ignore
2960/// for old_resource in old_lockfile.all_resources() {
2961///     if !new_paths.contains(&old_resource.installed_at) {
2962///         let full_path = project_dir.join(&old_resource.installed_at);
2963///
2964///         if full_path.exists() {
2965///             tokio::fs::remove_file(&full_path).await?;
2966///             removed.push(old_resource.installed_at.clone());
2967///
2968///             // Automatic directory cleanup after file removal
2969///             cleanup_empty_dirs(&full_path).await?;
2970///         }
2971///     }
2972/// }
2973/// ```
2974///
2975/// # Performance
2976///
2977/// - **Time Complexity**: O(d) where d = directory depth from file to `.claude/`
2978/// - **I/O Operations**: One `remove_dir` attempt per directory level
2979/// - **Early Termination**: Stops immediately on first non-empty directory
2980///
2981/// The function is extremely efficient as it:
2982/// - Only walks up the directory tree (no scanning of siblings)
2983/// - Stops at the first non-empty directory (no unnecessary attempts)
2984/// - Uses atomic `remove_dir` which fails fast on non-empty directories
2985/// - Typical depth is 2-4 levels (.claude/agents/subdir/file.md)
2986///
2987/// # Error Handling Strategy
2988///
2989/// The function differentiates between expected and unexpected errors:
2990///
2991/// | Error Kind | Interpretation | Action |
2992/// |------------|----------------|--------|
2993/// | `Ok(())` | Directory was empty and removed | Continue up tree |
2994/// | `ENOENT` | Directory doesn't exist | Continue up tree (race condition) |
2995/// | `ENOTEMPTY` | Directory has contents | Stop cleanup (expected) |
2996/// | `EPERM` | No permission | Stop cleanup (expected) |
2997/// | Other | Unexpected I/O error | Propagate error |
2998///
2999/// In practice, most errors simply stop the cleanup process without failing
3000/// the overall operation, as the goal is best-effort cleanup.
3001///
3002/// # Thread Safety
3003///
3004/// This function is safe for concurrent use because:
3005/// - Uses async filesystem operations from `tokio::fs`
3006/// - `remove_dir` is atomic (succeeds only if directory is empty)
3007/// - ENOENT handling accounts for race conditions
3008/// - Multiple concurrent calls won't interfere with each other
3009///
3010/// # Use Cases
3011///
3012/// ## After Pattern-Based Installation Changes
3013///
3014/// When pattern matches change, old directory structures may become empty:
3015/// ```toml
3016/// # Old: pattern matched agents/rust/expert.md, agents/rust/testing.md
3017/// # New: pattern only matches agents/rust/expert.md
3018///
3019/// # testing.md removed → agents/rust/ might now be empty
3020/// ```
3021///
3022/// ## After Custom Target Changes
3023///
3024/// Custom target changes can leave old directory structures empty:
3025/// ```toml
3026/// # Old: target = "tools"  → .claude/agents/tools/helper.md
3027/// # New: target = "utils" → .claude/agents/utils/helper.md
3028///
3029/// # .claude/agents/tools/ might now be empty
3030/// ```
3031///
3032/// ## After Dependency Removal
3033///
3034/// Removing the last dependency in a category may leave empty subdirectories:
3035/// ```toml
3036/// [agents]
3037/// # Removed: python-helper (was in agents/python/)
3038/// # Only agents/rust/ remains
3039///
3040/// # .claude/agents/python/ should be cleaned up
3041/// ```
3042///
3043/// # Design Rationale
3044///
3045/// This function exists to solve the "directory accumulation problem":
3046/// - Without cleanup: Empty directories accumulate over time
3047/// - With cleanup: Project structure stays clean and organized
3048/// - Safety boundaries: Prevents accidental removal of important directories
3049/// - Best-effort approach: Cleanup failures don't block main operations
3050///
3051/// # Version History
3052///
3053/// - **v0.3.18**: Introduced alongside [`cleanup_removed_artifacts`]
3054/// - Complements relative path preservation by cleaning up old directory structures
3055async fn cleanup_empty_dirs(file_path: &std::path::Path) -> Result<()> {
3056    let mut current = file_path.parent();
3057
3058    while let Some(dir) = current {
3059        // Stop if we've reached .claude or the project root
3060        if dir.ends_with(".claude") || dir.parent().is_none() {
3061            break;
3062        }
3063
3064        // Try to remove the directory (will only succeed if empty)
3065        match tokio::fs::remove_dir(dir).await {
3066            Ok(()) => {
3067                // Directory was empty and removed, continue up
3068                current = dir.parent();
3069            }
3070            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
3071                // Directory doesn't exist, continue up
3072                current = dir.parent();
3073            }
3074            Err(_) => {
3075                // Directory is not empty or we don't have permission, stop here
3076                break;
3077            }
3078        }
3079    }
3080
3081    Ok(())
3082}
3083
3084#[cfg(test)]
3085mod tests {
3086    use super::*;
3087    use tempfile::TempDir;
3088
3089    fn create_test_locked_resource(name: &str, is_local: bool) -> LockedResource {
3090        if is_local {
3091            LockedResource {
3092                name: name.to_string(),
3093                source: None,
3094                url: None,
3095                path: "test.md".to_string(),
3096                version: None,
3097                resolved_commit: None,
3098                checksum: String::new(),
3099                installed_at: String::new(),
3100                dependencies: vec![],
3101                resource_type: crate::core::ResourceType::Agent,
3102                tool: Some("claude-code".to_string()),
3103                manifest_alias: None,
3104                applied_patches: std::collections::HashMap::new(),
3105                install: None,
3106            }
3107        } else {
3108            LockedResource {
3109                name: name.to_string(),
3110                source: Some("test_source".to_string()),
3111                url: Some("https://github.com/test/repo.git".to_string()),
3112                path: "resources/test.md".to_string(),
3113                version: Some("v1.0.0".to_string()),
3114                resolved_commit: Some("abc123".to_string()),
3115                checksum: "sha256:test".to_string(),
3116                installed_at: String::new(),
3117                dependencies: vec![],
3118                resource_type: crate::core::ResourceType::Agent,
3119                tool: Some("claude-code".to_string()),
3120                manifest_alias: None,
3121                applied_patches: std::collections::HashMap::new(),
3122                install: None,
3123            }
3124        }
3125    }
3126
3127    #[tokio::test]
3128    async fn test_install_resource_local() {
3129        let temp_dir = TempDir::new().unwrap();
3130        let project_dir = temp_dir.path();
3131        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3132
3133        // Create a local markdown file
3134        let local_file = temp_dir.path().join("test.md");
3135        std::fs::write(&local_file, "# Test Resource\nThis is a test").unwrap();
3136
3137        // Create a locked resource pointing to the local file
3138        let mut entry = create_test_locked_resource("local-test", true);
3139        entry.path = local_file.to_string_lossy().to_string();
3140
3141        // Create install context
3142        let context = InstallContext::new(
3143            project_dir,
3144            &cache,
3145            false,
3146            false,
3147            None,
3148            None,
3149            None,
3150            None,
3151            None,
3152            None,
3153        );
3154
3155        // Install the resource
3156        let result = install_resource(&entry, "agents", &context).await;
3157        assert!(result.is_ok(), "Failed to install local resource: {:?}", result);
3158
3159        // Should be installed the first time
3160        let (installed, _checksum, _applied_patches) = result.unwrap();
3161        assert!(installed, "Should have installed new resource");
3162
3163        // Verify the file was installed
3164        let expected_path = project_dir.join("agents").join("local-test.md");
3165        assert!(expected_path.exists(), "Installed file not found");
3166
3167        // Verify content
3168        let content = std::fs::read_to_string(expected_path).unwrap();
3169        assert_eq!(content, "# Test Resource\nThis is a test");
3170    }
3171
3172    #[tokio::test]
3173    async fn test_install_resource_with_custom_path() {
3174        let temp_dir = TempDir::new().unwrap();
3175        let project_dir = temp_dir.path();
3176        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3177
3178        // Create a local markdown file
3179        let local_file = temp_dir.path().join("test.md");
3180        std::fs::write(&local_file, "# Custom Path Test").unwrap();
3181
3182        // Create a locked resource with custom installation path
3183        let mut entry = create_test_locked_resource("custom-test", true);
3184        entry.path = local_file.to_string_lossy().to_string();
3185        entry.installed_at = "custom/location/resource.md".to_string();
3186
3187        // Create install context
3188        let context = InstallContext::new(
3189            project_dir,
3190            &cache,
3191            false,
3192            false,
3193            None,
3194            None,
3195            None,
3196            None,
3197            None,
3198            None,
3199        );
3200
3201        // Install the resource
3202        let result = install_resource(&entry, "agents", &context).await;
3203        assert!(result.is_ok());
3204        let (installed, _checksum, _applied_patches) = result.unwrap();
3205        assert!(installed, "Should have installed new resource");
3206
3207        // Verify the file was installed at custom path
3208        let expected_path = project_dir.join("custom/location/resource.md");
3209        assert!(expected_path.exists(), "File not installed at custom path");
3210    }
3211
3212    #[tokio::test]
3213    async fn test_install_resource_local_missing_file() {
3214        let temp_dir = TempDir::new().unwrap();
3215        let project_dir = temp_dir.path();
3216        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3217
3218        // Create a locked resource pointing to non-existent file
3219        let mut entry = create_test_locked_resource("missing-test", true);
3220        entry.path = "/non/existent/file.md".to_string();
3221
3222        // Create install context
3223        let context = InstallContext::new(
3224            project_dir,
3225            &cache,
3226            false,
3227            false,
3228            None,
3229            None,
3230            None,
3231            None,
3232            None,
3233            None,
3234        );
3235
3236        // Try to install the resource
3237        let result = install_resource(&entry, "agents", &context).await;
3238        assert!(result.is_err());
3239        let error_msg = result.unwrap_err().to_string();
3240        assert!(error_msg.contains("Local file") && error_msg.contains("not found"));
3241    }
3242
3243    #[tokio::test]
3244    async fn test_install_resource_invalid_markdown_frontmatter() {
3245        let temp_dir = TempDir::new().unwrap();
3246        let project_dir = temp_dir.path();
3247        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3248
3249        // Create a markdown file with invalid frontmatter
3250        let local_file = temp_dir.path().join("invalid.md");
3251        std::fs::write(&local_file, "---\ninvalid: yaml: [\n---\nContent").unwrap();
3252
3253        // Create a locked resource
3254        let mut entry = create_test_locked_resource("invalid-test", true);
3255        entry.path = local_file.to_string_lossy().to_string();
3256
3257        // Create install context
3258        let context = InstallContext::new(
3259            project_dir,
3260            &cache,
3261            false,
3262            false,
3263            None,
3264            None,
3265            None,
3266            None,
3267            None,
3268            None,
3269        );
3270
3271        // Install should now succeed even with invalid frontmatter (just emits a warning)
3272        let result = install_resource(&entry, "agents", &context).await;
3273        assert!(result.is_ok());
3274        let (installed, _checksum, _applied_patches) = result.unwrap();
3275        assert!(installed);
3276
3277        // Verify the file was installed
3278        let dest_path = project_dir.join("agents/invalid-test.md");
3279        assert!(dest_path.exists());
3280
3281        // Content should include the entire file since frontmatter was invalid
3282        let installed_content = std::fs::read_to_string(&dest_path).unwrap();
3283        assert!(installed_content.contains("---"));
3284        assert!(installed_content.contains("invalid: yaml:"));
3285        assert!(installed_content.contains("Content"));
3286    }
3287
3288    #[tokio::test]
3289    async fn test_install_resource_with_progress() {
3290        let temp_dir = TempDir::new().unwrap();
3291        let project_dir = temp_dir.path();
3292        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3293        let pb = ProgressBar::new(1);
3294
3295        // Create a local markdown file
3296        let local_file = temp_dir.path().join("test.md");
3297        std::fs::write(&local_file, "# Progress Test").unwrap();
3298
3299        // Create a locked resource
3300        let mut entry = create_test_locked_resource("progress-test", true);
3301        entry.path = local_file.to_string_lossy().to_string();
3302
3303        // Create install context
3304        let context = InstallContext::new(
3305            project_dir,
3306            &cache,
3307            false,
3308            false,
3309            None,
3310            None,
3311            None,
3312            None,
3313            None,
3314            None,
3315        );
3316
3317        // Install with progress
3318        let result = install_resource_with_progress(&entry, "agents", &context, &pb).await;
3319        assert!(result.is_ok());
3320
3321        // Verify installation
3322        let expected_path = project_dir.join("agents").join("progress-test.md");
3323        assert!(expected_path.exists());
3324    }
3325
3326    #[tokio::test]
3327    async fn test_install_resources_empty() {
3328        let temp_dir = TempDir::new().unwrap();
3329        let project_dir = temp_dir.path();
3330        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3331
3332        // Create empty lockfile and manifest
3333        let lockfile = LockFile::new();
3334        let manifest = Manifest::new();
3335
3336        let (count, _, _) = install_resources(
3337            ResourceFilter::All,
3338            &Arc::new(lockfile),
3339            &manifest,
3340            project_dir,
3341            cache,
3342            false,
3343            None,
3344            None,
3345            false, // verbose
3346        )
3347        .await
3348        .unwrap();
3349
3350        assert_eq!(count, 0, "Should install 0 resources from empty lockfile");
3351    }
3352
3353    #[tokio::test]
3354    async fn test_install_resources_multiple() {
3355        let temp_dir = TempDir::new().unwrap();
3356        let project_dir = temp_dir.path();
3357        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3358
3359        // Create test markdown files
3360        let file1 = temp_dir.path().join("agent.md");
3361        let file2 = temp_dir.path().join("snippet.md");
3362        let file3 = temp_dir.path().join("command.md");
3363        std::fs::write(&file1, "# Agent").unwrap();
3364        std::fs::write(&file2, "# Snippet").unwrap();
3365        std::fs::write(&file3, "# Command").unwrap();
3366
3367        // Create lockfile with multiple resources
3368        let mut lockfile = LockFile::new();
3369        let mut agent = create_test_locked_resource("test-agent", true);
3370        agent.path = file1.to_string_lossy().to_string();
3371        agent.installed_at = ".claude/agents/test-agent.md".to_string();
3372        lockfile.agents.push(agent);
3373
3374        let mut snippet = create_test_locked_resource("test-snippet", true);
3375        snippet.path = file2.to_string_lossy().to_string();
3376        snippet.resource_type = crate::core::ResourceType::Snippet;
3377        snippet.tool = Some("agpm".to_string()); // Snippets use agpm tool
3378        snippet.installed_at = ".agpm/snippets/test-snippet.md".to_string();
3379        lockfile.snippets.push(snippet);
3380
3381        let mut command = create_test_locked_resource("test-command", true);
3382        command.path = file3.to_string_lossy().to_string();
3383        command.resource_type = crate::core::ResourceType::Command;
3384        command.installed_at = ".claude/commands/test-command.md".to_string();
3385        lockfile.commands.push(command);
3386
3387        let manifest = Manifest::new();
3388
3389        let (count, _, _) = install_resources(
3390            ResourceFilter::All,
3391            &Arc::new(lockfile),
3392            &manifest,
3393            project_dir,
3394            cache,
3395            false,
3396            None,
3397            None,
3398            false, // verbose
3399        )
3400        .await
3401        .unwrap();
3402
3403        assert_eq!(count, 3, "Should install 3 resources");
3404
3405        // Verify all files were installed (using default directories)
3406        assert!(project_dir.join(".claude/agents/test-agent.md").exists());
3407        assert!(project_dir.join(".agpm/snippets/test-snippet.md").exists());
3408        assert!(project_dir.join(".claude/commands/test-command.md").exists());
3409    }
3410
3411    #[tokio::test]
3412    async fn test_install_updated_resources() {
3413        let temp_dir = TempDir::new().unwrap();
3414        let project_dir = temp_dir.path();
3415        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3416
3417        // Create test markdown files
3418        let file1 = temp_dir.path().join("agent.md");
3419        let file2 = temp_dir.path().join("snippet.md");
3420        std::fs::write(&file1, "# Updated Agent").unwrap();
3421        std::fs::write(&file2, "# Updated Snippet").unwrap();
3422
3423        // Create lockfile with resources
3424        let mut lockfile = LockFile::new();
3425        let mut agent = create_test_locked_resource("test-agent", true);
3426        agent.path = file1.to_string_lossy().to_string();
3427        lockfile.agents.push(agent);
3428
3429        let mut snippet = create_test_locked_resource("test-snippet", true);
3430        snippet.path = file2.to_string_lossy().to_string();
3431        lockfile.snippets.push(snippet);
3432
3433        let manifest = Manifest::new();
3434        let lockfile = Arc::new(lockfile);
3435
3436        // Define updates (only agent is updated)
3437        let updates = vec![(
3438            "test-agent".to_string(),
3439            None, // source
3440            "v1.0.0".to_string(),
3441            "v1.1.0".to_string(),
3442        )];
3443
3444        // Create install context
3445        let context = InstallContext::new(
3446            project_dir,
3447            &cache,
3448            false,
3449            false,
3450            Some(&manifest),
3451            Some(&lockfile),
3452            None,
3453            None,
3454            None,
3455            None,
3456        );
3457
3458        let count = install_updated_resources(
3459            &updates, &lockfile, &manifest, &context, None, false, // quiet
3460        )
3461        .await
3462        .unwrap();
3463
3464        assert_eq!(count, 1, "Should install 1 updated resource");
3465        assert!(project_dir.join(".claude/agents/test-agent.md").exists());
3466        assert!(!project_dir.join(".claude/snippets/test-snippet.md").exists()); // Not updated
3467    }
3468
3469    #[tokio::test]
3470    async fn test_install_updated_resources_quiet_mode() {
3471        let temp_dir = TempDir::new().unwrap();
3472        let project_dir = temp_dir.path();
3473        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3474
3475        // Create test markdown file
3476        let file = temp_dir.path().join("command.md");
3477        std::fs::write(&file, "# Command").unwrap();
3478
3479        // Create lockfile
3480        let mut lockfile = LockFile::new();
3481        let mut command = create_test_locked_resource("test-command", true);
3482        command.path = file.to_string_lossy().to_string();
3483        command.resource_type = crate::core::ResourceType::Command;
3484        lockfile.commands.push(command);
3485
3486        let manifest = Manifest::new();
3487        let lockfile = Arc::new(lockfile);
3488
3489        let updates = vec![(
3490            "test-command".to_string(),
3491            None, // source
3492            "v1.0.0".to_string(),
3493            "v2.0.0".to_string(),
3494        )];
3495
3496        // Create install context
3497        let context = InstallContext::new(
3498            project_dir,
3499            &cache,
3500            false,
3501            false,
3502            Some(&manifest),
3503            Some(&lockfile),
3504            None,
3505            None,
3506            None,
3507            None,
3508        );
3509
3510        let count = install_updated_resources(
3511            &updates, &lockfile, &manifest, &context, None, true, // quiet mode
3512        )
3513        .await
3514        .unwrap();
3515
3516        assert_eq!(count, 1);
3517        assert!(project_dir.join(".claude/commands/test-command.md").exists());
3518    }
3519
3520    #[tokio::test]
3521    async fn test_install_resource_for_parallel() {
3522        let temp_dir = TempDir::new().unwrap();
3523        let project_dir = temp_dir.path();
3524        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3525
3526        // Create a local markdown file
3527        let local_file = temp_dir.path().join("parallel.md");
3528        std::fs::write(&local_file, "# Parallel Test").unwrap();
3529
3530        // Create a locked resource
3531        let mut entry = create_test_locked_resource("parallel-test", true);
3532        entry.path = local_file.to_string_lossy().to_string();
3533
3534        // Create install context
3535        let context = InstallContext::new(
3536            project_dir,
3537            &cache,
3538            false,
3539            false,
3540            None,
3541            None,
3542            None,
3543            None,
3544            None,
3545            None,
3546        );
3547
3548        // Install using the parallel function
3549        let result = install_resource_for_parallel(&entry, "agents", &context).await;
3550        assert!(result.is_ok());
3551
3552        // Verify installation
3553        let expected_path = project_dir.join("agents").join("parallel-test.md");
3554        assert!(expected_path.exists());
3555    }
3556
3557    #[tokio::test]
3558    async fn test_install_resource_creates_nested_directories() {
3559        let temp_dir = TempDir::new().unwrap();
3560        let project_dir = temp_dir.path();
3561        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3562
3563        // Create a local markdown file
3564        let local_file = temp_dir.path().join("nested.md");
3565        std::fs::write(&local_file, "# Nested Test").unwrap();
3566
3567        // Create a locked resource with deeply nested path
3568        let mut entry = create_test_locked_resource("nested-test", true);
3569        entry.path = local_file.to_string_lossy().to_string();
3570        entry.installed_at = "very/deeply/nested/path/resource.md".to_string();
3571
3572        // Create install context
3573        let context = InstallContext::new(
3574            project_dir,
3575            &cache,
3576            false,
3577            false,
3578            None,
3579            None,
3580            None,
3581            None,
3582            None,
3583            None,
3584        );
3585
3586        // Install the resource
3587        let result = install_resource(&entry, "agents", &context).await;
3588        assert!(result.is_ok());
3589        let (installed, _checksum, _applied_patches) = result.unwrap();
3590        assert!(installed, "Should have installed new resource");
3591
3592        // Verify nested directories were created
3593        let expected_path = project_dir.join("very/deeply/nested/path/resource.md");
3594        assert!(expected_path.exists());
3595    }
3596
3597    #[tokio::test]
3598    async fn test_update_gitignore_creates_new_file() {
3599        let temp_dir = TempDir::new().unwrap();
3600        let project_dir = temp_dir.path();
3601
3602        // Create a lockfile with some resources
3603        let mut lockfile = LockFile::new();
3604
3605        // Add agent with installed path
3606        let mut agent = create_test_locked_resource("test-agent", true);
3607        agent.installed_at = ".claude/agents/test-agent.md".to_string();
3608        lockfile.agents.push(agent);
3609
3610        // Add snippet with installed path
3611        let mut snippet = create_test_locked_resource("test-snippet", true);
3612        snippet.installed_at = ".agpm/snippets/test-snippet.md".to_string();
3613        lockfile.snippets.push(snippet);
3614
3615        // Call update_gitignore
3616        let result = update_gitignore(&lockfile, project_dir, true);
3617        assert!(result.is_ok());
3618
3619        // Check that .gitignore was created
3620        let gitignore_path = project_dir.join(".gitignore");
3621        assert!(gitignore_path.exists(), "Gitignore file should be created");
3622
3623        // Check content
3624        let content = std::fs::read_to_string(&gitignore_path).unwrap();
3625        assert!(content.contains("AGPM managed entries"));
3626        assert!(content.contains(".claude/agents/test-agent.md"));
3627        assert!(content.contains(".agpm/snippets/test-snippet.md"));
3628    }
3629
3630    #[tokio::test]
3631    async fn test_update_gitignore_disabled() {
3632        let temp_dir = TempDir::new().unwrap();
3633        let project_dir = temp_dir.path();
3634
3635        let lockfile = LockFile::new();
3636
3637        // Call with disabled flag
3638        let result = update_gitignore(&lockfile, project_dir, false);
3639        assert!(result.is_ok());
3640
3641        // Check that .gitignore was NOT created
3642        let gitignore_path = project_dir.join(".gitignore");
3643        assert!(!gitignore_path.exists(), "Gitignore should not be created when disabled");
3644    }
3645
3646    #[tokio::test]
3647    async fn test_update_gitignore_preserves_user_entries() {
3648        let temp_dir = TempDir::new().unwrap();
3649        let project_dir = temp_dir.path();
3650
3651        // Create .claude directory for resources
3652        let claude_dir = project_dir.join(".claude");
3653        ensure_dir(&claude_dir).unwrap();
3654
3655        // Create existing gitignore with user entries at project root
3656        let gitignore_path = project_dir.join(".gitignore");
3657        let existing_content = "# User comment\n\
3658                               user-file.txt\n\
3659                               # AGPM managed entries - do not edit below this line\n\
3660                               .claude/agents/old-entry.md\n\
3661                               # End of AGPM managed entries\n";
3662        std::fs::write(&gitignore_path, existing_content).unwrap();
3663
3664        // Create lockfile with new resources
3665        let mut lockfile = LockFile::new();
3666        let mut agent = create_test_locked_resource("new-agent", true);
3667        agent.installed_at = ".claude/agents/new-agent.md".to_string();
3668        lockfile.agents.push(agent);
3669
3670        // Update gitignore
3671        let result = update_gitignore(&lockfile, project_dir, true);
3672        assert!(result.is_ok());
3673
3674        // Check that user entries are preserved
3675        let updated_content = std::fs::read_to_string(&gitignore_path).unwrap();
3676        assert!(updated_content.contains("user-file.txt"));
3677        assert!(updated_content.contains("# User comment"));
3678
3679        // Check that new entries are added
3680        assert!(updated_content.contains(".claude/agents/new-agent.md"));
3681
3682        // Check that old managed entries are replaced
3683        assert!(!updated_content.contains(".claude/agents/old-entry.md"));
3684    }
3685
3686    #[tokio::test]
3687    async fn test_update_gitignore_handles_external_paths() {
3688        let temp_dir = TempDir::new().unwrap();
3689        let project_dir = temp_dir.path();
3690
3691        let mut lockfile = LockFile::new();
3692
3693        // Add resource installed outside .claude
3694        let mut script = create_test_locked_resource("test-script", true);
3695        script.installed_at = "scripts/test.sh".to_string();
3696        lockfile.scripts.push(script);
3697
3698        // Add resource inside .claude
3699        let mut agent = create_test_locked_resource("test-agent", true);
3700        agent.installed_at = ".claude/agents/test.md".to_string();
3701        lockfile.agents.push(agent);
3702
3703        let result = update_gitignore(&lockfile, project_dir, true);
3704        assert!(result.is_ok());
3705
3706        let gitignore_path = project_dir.join(".gitignore");
3707        let content = std::fs::read_to_string(&gitignore_path).unwrap();
3708
3709        // External path should be as-is
3710        assert!(content.contains("scripts/test.sh"));
3711
3712        // Internal path should be as-is
3713        assert!(content.contains(".claude/agents/test.md"));
3714    }
3715
3716    #[tokio::test]
3717    async fn test_update_gitignore_migrates_ccpm_entries() {
3718        let temp_dir = TempDir::new().unwrap();
3719        let project_dir = temp_dir.path();
3720
3721        // Create .claude directory
3722        tokio::fs::create_dir_all(project_dir.join(".claude/agents")).await.unwrap();
3723
3724        // Create a gitignore with legacy CCPM markers
3725        let gitignore_path = project_dir.join(".gitignore");
3726        let legacy_content = r#"# User's custom entries
3727temp/
3728
3729# CCPM managed entries - do not edit below this line
3730.claude/agents/old-ccpm-agent.md
3731.claude/commands/old-ccpm-command.md
3732# End of CCPM managed entries
3733
3734# More user entries
3735local-config.json
3736"#;
3737        tokio::fs::write(&gitignore_path, legacy_content).await.unwrap();
3738
3739        // Create a new lockfile with AGPM entries
3740        let mut lockfile = LockFile::new();
3741        let mut agent = create_test_locked_resource("new-agent", true);
3742        agent.installed_at = ".claude/agents/new-agent.md".to_string();
3743        lockfile.agents.push(agent);
3744
3745        // Update gitignore
3746        let result = update_gitignore(&lockfile, project_dir, true);
3747        assert!(result.is_ok());
3748
3749        // Read updated content
3750        let updated_content = tokio::fs::read_to_string(&gitignore_path).await.unwrap();
3751
3752        // User entries before CCPM section should be preserved
3753        assert!(updated_content.contains("temp/"));
3754
3755        // User entries after CCPM section should be preserved
3756        assert!(updated_content.contains("local-config.json"));
3757
3758        // Should have AGPM markers now (not CCPM)
3759        assert!(updated_content.contains("# AGPM managed entries - do not edit below this line"));
3760        assert!(updated_content.contains("# End of AGPM managed entries"));
3761
3762        // Old CCPM markers should be removed
3763        assert!(!updated_content.contains("# CCPM managed entries"));
3764        assert!(!updated_content.contains("# End of CCPM managed entries"));
3765
3766        // Old CCPM entries should be removed
3767        assert!(!updated_content.contains("old-ccpm-agent.md"));
3768        assert!(!updated_content.contains("old-ccpm-command.md"));
3769
3770        // New AGPM entries should be added
3771        assert!(updated_content.contains(".claude/agents/new-agent.md"));
3772    }
3773
3774    #[tokio::test]
3775    async fn test_install_updated_resources_not_found() {
3776        let temp_dir = TempDir::new().unwrap();
3777        let project_dir = temp_dir.path();
3778        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
3779
3780        let lockfile = Arc::new(LockFile::new());
3781        let manifest = Manifest::new();
3782
3783        // Try to update a resource that doesn't exist
3784        let updates = vec![(
3785            "non-existent".to_string(),
3786            None, // source
3787            "v1.0.0".to_string(),
3788            "v2.0.0".to_string(),
3789        )];
3790
3791        // Create install context
3792        let context = InstallContext::new(
3793            project_dir,
3794            &cache,
3795            false,
3796            false,
3797            Some(&manifest),
3798            Some(&lockfile),
3799            None,
3800            None,
3801            None,
3802            None,
3803        );
3804
3805        let count =
3806            install_updated_resources(&updates, &lockfile, &manifest, &context, None, false)
3807                .await
3808                .unwrap();
3809
3810        assert_eq!(count, 0, "Should install 0 resources when not found");
3811    }
3812}