agpm_cli/installer/mod.rs
1//! Shared installation utilities for AGPM resources.
2//!
3//! This module provides common functionality for installing resources from
4//! lockfile entries to the project directory. It's shared between the install
5//! and update commands to avoid code duplication. The module includes both
6//! installation logic and automatic cleanup of removed or relocated artifacts.
7//!
8//! # SHA-Based Parallel Installation Architecture
9//!
10//! The installer uses SHA-based worktrees for optimal parallel resource installation:
11//! - **SHA-based worktrees**: Each unique commit gets one worktree for maximum deduplication
12//! - **Pre-resolved SHAs**: All versions resolved to SHAs before installation begins
13//! - **Concurrency control**: Direct parallelism control via --max-parallel flag
14//! - **Context-aware logging**: Each operation includes dependency name for debugging
15//! - **Efficient cleanup**: Worktrees are managed by the cache layer for reuse
16//! - **Pre-warming**: Worktrees created upfront to minimize installation latency
17//! - **Automatic artifact cleanup**: Removes old files when paths change or dependencies are removed
18//!
19//! # Installation Process
20//!
21//! 1. **SHA validation**: Ensures all resources have valid 40-character commit SHAs
22//! 2. **Worktree pre-warming**: Creates SHA-based worktrees for all unique commits
23//! 3. **Parallel processing**: Installs multiple resources concurrently using dedicated worktrees
24//! 4. **Content validation**: Validates markdown format and structure
25//! 5. **Atomic installation**: Files are written atomically to prevent corruption
26//! 6. **Progress tracking**: Real-time progress updates during parallel operations
27//! 7. **Artifact cleanup**: Automatically removes old files from previous installations when paths change
28//!
29//! # Artifact Cleanup (v0.3.18+)
30//!
31//! The module provides automatic cleanup of obsolete artifacts when:
32//! - **Dependencies are removed**: Files from removed dependencies are deleted
33//! - **Paths are relocated**: Old files are removed when `installed_at` paths change
34//! - **Structure changes**: Empty parent directories are cleaned up recursively
35//!
36//! The cleanup process:
37//! 1. Compares old and new lockfiles to identify removed artifacts
38//! 2. Removes files that exist in the old lockfile but not in the new one
39//! 3. Recursively removes empty parent directories up to `.claude/`
40//! 4. Reports the number of cleaned artifacts to the user
41//!
42//! See [`cleanup_removed_artifacts()`] for implementation details.
43//!
44//! # Performance Characteristics
45//!
46//! - **SHA-based deduplication**: Multiple refs to same commit share one worktree
47//! - **Parallel processing**: Multiple dependencies installed simultaneously
48//! - **Pre-warming optimization**: Worktrees created upfront to minimize latency
49//! - **Parallelism-controlled**: User controls concurrency via --max-parallel flag
50//! - **Atomic operations**: Fast, safe file installation with proper error handling
51//! - **Reduced disk usage**: No duplicate worktrees for identical commits
52//! - **Efficient cleanup**: Minimal overhead for artifact cleanup operations
53
54use crate::constants::{
55 FALLBACK_CORE_COUNT, MIN_PARALLELISM, PARALLELISM_CORE_MULTIPLIER, default_lock_timeout,
56};
57use crate::lockfile::ResourceId;
58use crate::utils::progress::{InstallationPhase, MultiPhaseProgress};
59use anyhow::Result;
60
61mod cleanup;
62mod config_check;
63mod context;
64pub mod project_lock;
65mod resource;
66mod selective;
67mod skills;
68
69#[cfg(test)]
70mod tests;
71
72pub use cleanup::cleanup_removed_artifacts;
73pub use config_check::{ConfigValidation, validate_config};
74pub use context::InstallContext;
75pub use project_lock::ProjectLock;
76pub use selective::install_updated_resources;
77
78use resource::{
79 apply_resource_patches, compute_file_checksum, read_source_content, render_resource_content,
80 should_skip_installation, should_skip_trusted, validate_markdown_content,
81 write_resource_to_disk,
82};
83
84use skills::{collect_skill_patches, compute_skill_directory_checksum, install_skill_directory};
85
86/// Type alias for complex installation result tuples to improve code readability.
87///
88/// This type alias simplifies the return type of parallel installation functions
89/// that need to return either success information or error details with context.
90/// It was introduced in AGPM v0.3.0 to resolve `clippy::type_complexity` warnings
91/// while maintaining clear semantics for installation results.
92///
93/// # Success Variant: `Ok((String, bool, String, Option<String>))`
94///
95/// When installation succeeds, the tuple contains:
96/// - `String`: Resource name that was processed
97/// - `bool`: Whether the resource was actually installed (`true`) or already up-to-date (`false`)
98/// - `String`: SHA-256 checksum of the installed file content
99/// - `Option<String>`: SHA-256 checksum of the template rendering inputs, or None for non-templated resources
100///
101/// # Error Variant: `Err((String, anyhow::Error))`
102///
103/// When installation fails, the tuple contains:
104/// - `String`: Resource name that failed to install
105/// - `anyhow::Error`: Detailed error information for debugging
106///
107/// # Usage
108///
109/// This type is primarily used in parallel installation operations where
110/// individual resource results need to be collected and processed:
111///
112/// ```rust,ignore
113/// use agpm_cli::installer::InstallResult;
114/// use futures::stream::{self, StreamExt};
115///
116/// # async fn example() -> anyhow::Result<()> {
117/// let results: Vec<InstallResult> = stream::iter(vec!["resource1", "resource2"])
118/// .map(|resource_name| async move {
119/// // Installation logic here
120/// Ok((resource_name.to_string(), true, "sha256:abc123".to_string()))
121/// })
122/// .buffer_unordered(10)
123/// .collect()
124/// .await;
125///
126/// // Process results
127/// for result in results {
128/// match result {
129/// Ok((name, installed, checksum)) => {
130/// println!("✓ {}: installed={}, checksum={}", name, installed, checksum);
131/// }
132/// Err((name, error)) => {
133/// eprintln!("✗ {}: {}", name, error);
134/// }
135/// }
136/// }
137/// # Ok(())
138/// # }
139/// ```
140///
141/// # Design Rationale
142///
143/// The type alias serves several purposes:
144/// - **Clippy compliance**: Resolves `type_complexity` warnings for complex generic types
145/// - **Code clarity**: Makes function signatures more readable and self-documenting
146/// - **Error context**: Preserves resource name context when installation fails
147/// - **Batch processing**: Enables efficient collection and processing of parallel results
148type InstallResult = Result<
149 (
150 crate::lockfile::ResourceId,
151 bool,
152 String,
153 Option<String>,
154 crate::manifest::patches::AppliedPatches,
155 Option<u64>, // approximate token count
156 ),
157 (crate::lockfile::ResourceId, anyhow::Error),
158>;
159
160/// Results from a successful installation operation.
161///
162/// This struct encapsulates all the data returned from installing resources,
163/// providing a more readable and maintainable alternative to the complex 4-tuple
164/// that previously triggered clippy::type_complexity warnings.
165///
166/// # Fields
167///
168/// - **installed_count**: Number of resources that were successfully installed
169/// - **checksums**: File checksums for each installed resource (ResourceId -> SHA256)
170/// - **context_checksums**: Template context checksums for each resource (ResourceId -> SHA256 or None)
171/// - **applied_patches**: List of applied patches for each resource (ResourceId -> AppliedPatches)
172/// - **token_counts**: Approximate BPE token counts for each resource (ResourceId -> Option<u64>)
173#[derive(Debug, Clone)]
174pub struct InstallationResults {
175 /// Number of resources that were successfully installed
176 pub installed_count: usize,
177 /// File checksums for each installed resource
178 pub checksums: Vec<(crate::lockfile::ResourceId, String)>,
179 /// Template context checksums for each resource (None if no templating used)
180 pub context_checksums: Vec<(crate::lockfile::ResourceId, Option<String>)>,
181 /// Applied patch information for each resource
182 pub applied_patches:
183 Vec<(crate::lockfile::ResourceId, crate::manifest::patches::AppliedPatches)>,
184 /// Approximate BPE token counts for each resource (None for skills/directories)
185 pub token_counts: Vec<(crate::lockfile::ResourceId, Option<u64>)>,
186}
187
188impl InstallationResults {
189 /// Creates a new InstallationResults instance.
190 ///
191 /// # Arguments
192 ///
193 /// * `installed_count` - Number of successfully installed resources
194 /// * `checksums` - File checksums for each installed resource
195 /// * `context_checksums` - Template context checksums for each resource
196 /// * `applied_patches` - Applied patch information for each resource
197 /// * `token_counts` - Approximate BPE token counts for each resource
198 pub fn new(
199 installed_count: usize,
200 checksums: Vec<(crate::lockfile::ResourceId, String)>,
201 context_checksums: Vec<(crate::lockfile::ResourceId, Option<String>)>,
202 applied_patches: Vec<(
203 crate::lockfile::ResourceId,
204 crate::manifest::patches::AppliedPatches,
205 )>,
206 token_counts: Vec<(crate::lockfile::ResourceId, Option<u64>)>,
207 ) -> Self {
208 Self {
209 installed_count,
210 checksums,
211 context_checksums,
212 applied_patches,
213 token_counts,
214 }
215 }
216
217 /// Returns true if no resources were installed.
218 pub fn is_empty(&self) -> bool {
219 self.installed_count == 0
220 }
221
222 /// Returns the number of installed resources.
223 pub fn len(&self) -> usize {
224 self.installed_count
225 }
226}
227
228use futures::stream::{self, StreamExt};
229use std::path::Path;
230use std::sync::Arc;
231use tokio::sync::Mutex;
232
233use crate::cache::Cache;
234use crate::core::ResourceIterator;
235use crate::lockfile::{LockFile, LockedResource};
236use crate::manifest::Manifest;
237use indicatif::ProgressBar;
238use std::collections::HashSet;
239
240/// Install a single resource from a lock entry using worktrees for parallel safety.
241///
242/// This function installs a resource specified by a lockfile entry to the project
243/// directory. It uses Git worktrees through the cache layer to enable safe parallel
244/// operations without conflicts between concurrent installations.
245///
246/// # Arguments
247///
248/// * `entry` - The locked resource to install containing source and version info
249/// * `resource_dir` - The subdirectory name for this resource type (e.g., "agents")
250/// * `context` - Installation context containing project configuration and cache instance
251///
252/// # Returns
253///
254/// Returns `Ok((installed, file_checksum, context_checksum, applied_patches, token_count))` where:
255/// - `installed` is `true` if the resource was actually installed (new or updated),
256/// `false` if the resource already existed and was unchanged
257/// - `file_checksum` is the SHA-256 hash of the installed file content (after rendering)
258/// - `context_checksum` is the SHA-256 hash of the template rendering inputs, or None for non-templated resources
259/// - `applied_patches` contains information about any patches that were applied during installation
260/// - `token_count` is the approximate BPE token count of the content, or None for skills/directories
261///
262/// # Worktree Usage
263///
264/// For remote resources, this function:
265/// 1. Uses `cache.get_or_clone_source_worktree_with_context()` to get a worktree
266/// 2. Each dependency gets its own isolated worktree for parallel safety
267/// 3. Worktrees are automatically managed and reused by the cache layer
268/// 4. Context (dependency name) is provided for debugging parallel operations
269///
270/// # Installation Process
271///
272/// 1. **Path resolution**: Determines destination based on `installed_at` or defaults
273/// 2. **Repository access**: Gets worktree from cache (for remote) or validates local path
274/// 3. **Content validation**: Verifies markdown format and structure
275/// 4. **Atomic write**: Installs file atomically to prevent corruption
276///
277/// # Examples
278///
279/// ```rust,no_run
280/// use agpm_cli::installer::{install_resource, InstallContext};
281/// use agpm_cli::lockfile::LockedResourceBuilder;
282/// use agpm_cli::cache::Cache;
283/// use agpm_cli::core::ResourceType;
284/// use std::path::Path;
285///
286/// # async fn example() -> anyhow::Result<()> {
287/// let cache = Cache::new()?;
288/// let entry = LockedResourceBuilder::new(
289/// "example-agent".to_string(),
290/// "agents/example.md".to_string(),
291/// "sha256:...".to_string(),
292/// ".claude/agents/example.md".to_string(),
293/// ResourceType::Agent,
294/// )
295/// .source(Some("community".to_string()))
296/// .url(Some("https://github.com/example/repo.git".to_string()))
297/// .version(Some("v1.0.0".to_string()))
298/// .resolved_commit(Some("abc123".to_string()))
299/// .tool(Some("claude-code".to_string()))
300/// .build();
301///
302/// let context = InstallContext::builder(Path::new("."), &cache).build();
303/// let (installed, checksum, _old_checksum, _patches, _token_count) = install_resource(&entry, "agents", &context).await?;
304/// if installed {
305/// println!("Resource was installed with checksum: {}", checksum);
306/// } else {
307/// println!("Resource already existed and was unchanged");
308/// }
309/// # Ok(())
310/// # }
311/// ```
312///
313/// # Error Handling
314///
315/// Returns an error if:
316/// - The source repository cannot be accessed or cloned
317/// - The specified file path doesn't exist in the repository
318/// - The file is not valid markdown format
319/// - File system operations fail (permissions, disk space)
320/// - Worktree creation fails due to Git issues
321pub async fn install_resource(
322 entry: &LockedResource,
323 resource_dir: &str,
324 context: &InstallContext<'_>,
325) -> Result<(bool, String, Option<String>, crate::manifest::patches::AppliedPatches, Option<u64>)> {
326 // For skills, create directory path; for others, create file path
327 let dest_path = if entry.installed_at.is_empty() {
328 if entry.resource_type == crate::core::ResourceType::Skill {
329 // Skills are directories, don't add .md extension
330 context.project_dir.join(resource_dir).join(&entry.name)
331 } else {
332 context.project_dir.join(resource_dir).join(format!("{}.md", entry.name))
333 }
334 } else {
335 context.project_dir.join(&entry.installed_at)
336 };
337
338 // Fast path: Trust lockfile checksums without recomputing
339 // This is safe when manifest hash matches and all deps are immutable
340 if let Some(result) = should_skip_trusted(entry, &dest_path, context) {
341 return Ok(result);
342 }
343
344 // For skills (directory-based resources), use directory checksum
345 let existing_checksum = if entry.resource_type == crate::core::ResourceType::Skill {
346 if dest_path.exists() && dest_path.is_dir() {
347 let path = dest_path.clone();
348 tokio::task::spawn_blocking(move || LockFile::compute_directory_checksum(&path))
349 .await??
350 .into()
351 } else {
352 None
353 }
354 } else if dest_path.exists() {
355 let path = dest_path.clone();
356 tokio::task::spawn_blocking(move || LockFile::compute_checksum(&path)).await??.into()
357 } else {
358 None
359 };
360
361 // Early-exit optimization: Skip if nothing changed (Git dependencies only)
362 if let Some((checksum, context_checksum, patches, token_count)) =
363 should_skip_installation(entry, &dest_path, existing_checksum.as_ref(), context)
364 {
365 return Ok((false, checksum, context_checksum, patches, token_count));
366 }
367
368 // Log local dependency processing
369 if entry.is_local() {
370 tracing::debug!(
371 "Processing local dependency: {} (early-exit optimization skipped)",
372 entry.name
373 );
374 }
375
376 // Handle skill directory installation separately from regular files
377 let (actually_installed, file_checksum, context_checksum, applied_patches, token_count) =
378 if entry.resource_type == crate::core::ResourceType::Skill {
379 // For skills, skip content reading and go straight to directory installation
380 let content_changed = existing_checksum.as_ref() != Some(&entry.checksum);
381 let should_install = entry.install.unwrap_or(true);
382
383 // Collect patches for skill
384 let applied_patches = collect_skill_patches(entry, context);
385
386 let actually_installed = install_skill_directory(
387 entry,
388 &dest_path,
389 &applied_patches,
390 should_install,
391 content_changed,
392 context,
393 )
394 .await?;
395
396 // Compute directory checksum from source after installation
397 let dir_checksum = if actually_installed {
398 compute_skill_directory_checksum(entry, context).await?
399 } else {
400 entry.checksum.clone()
401 };
402
403 (actually_installed, dir_checksum, None, applied_patches, None)
404 } else {
405 // Regular file-based resources
406 // Read source content from Git or local file
407 let content = read_source_content(entry, context).await?;
408
409 // Validate markdown format
410 validate_markdown_content(&content)?;
411
412 // Apply patches (before templating)
413 let (patched_content, applied_patches) =
414 apply_resource_patches(&content, entry, context)?;
415
416 // Apply templating to markdown files
417 let (final_content, _templating_was_applied, context_checksum) =
418 render_resource_content(&patched_content, entry, context).await?;
419
420 // Count tokens for lockfile storage and threshold checking
421 let token_count = crate::tokens::count_tokens(&final_content);
422
423 // Check against threshold and warn if exceeded
424 if let Some(threshold) = context.token_warning_threshold {
425 if token_count as u64 > threshold {
426 let formatted = crate::tokens::format_token_count(token_count);
427 let threshold_formatted = crate::tokens::format_token_count(threshold as usize);
428 tracing::warn!(
429 "Resource '{}' has ~{} tokens (threshold: {})",
430 entry.name,
431 formatted,
432 threshold_formatted
433 );
434 }
435 }
436
437 // Calculate file checksum of final content
438 let file_checksum = compute_file_checksum(&final_content);
439
440 // Determine if content has changed
441 let content_changed = existing_checksum.as_ref() != Some(&file_checksum);
442
443 // Write to disk if needed
444 let should_install = entry.install.unwrap_or(true);
445 let actually_installed = write_resource_to_disk(
446 &dest_path,
447 &final_content,
448 should_install,
449 content_changed,
450 context,
451 )
452 .await?;
453
454 (
455 actually_installed,
456 file_checksum,
457 context_checksum,
458 applied_patches,
459 Some(token_count as u64),
460 )
461 };
462
463 Ok((actually_installed, file_checksum, context_checksum, applied_patches, token_count))
464}
465
466/// Install a single resource with progress bar updates for user feedback.
467///
468/// This function wraps [`install_resource`] with progress bar integration to provide
469/// real-time feedback during resource installation. It updates the progress bar
470/// message before delegating to the core installation logic.
471///
472/// # Arguments
473///
474/// * `entry` - The locked resource containing installation metadata
475/// * `project_dir` - Root project directory for installation target
476/// * `resource_dir` - Subdirectory name for this resource type (e.g., "agents")
477/// * `cache` - Cache instance for Git repository and worktree management
478/// * `force_refresh` - Whether to force refresh of cached repositories
479/// * `pb` - Progress bar to update with installation status
480///
481/// # Returns
482///
483/// Returns a tuple of:
484/// - `bool`: Whether the resource was actually installed (`true` for new/updated, `false` for unchanged)
485/// - `String`: SHA-256 checksum of the installed file content
486/// - `Option<String>`: SHA-256 checksum of the template rendering inputs, or None for non-templated resources
487/// - `AppliedPatches`: Information about any patches that were applied during installation
488///
489/// # Progress Integration
490///
491/// The function automatically sets the progress bar message to indicate which
492/// resource is currently being installed. This provides users with real-time
493/// feedback about installation progress.
494///
495/// # Examples
496///
497/// ```rust,no_run
498/// use agpm_cli::installer::{install_resource_with_progress, InstallContext};
499/// use agpm_cli::lockfile::{LockedResource, LockedResourceBuilder};
500/// use agpm_cli::cache::Cache;
501/// use agpm_cli::core::ResourceType;
502/// use indicatif::ProgressBar;
503/// use std::path::Path;
504///
505/// # async fn example() -> anyhow::Result<()> {
506/// let cache = Cache::new()?;
507/// let pb = ProgressBar::new(1);
508/// let entry = LockedResourceBuilder::new(
509/// "example-agent".to_string(),
510/// "agents/example.md".to_string(),
511/// "sha256:...".to_string(),
512/// ".claude/agents/example.md".to_string(),
513/// ResourceType::Agent,
514/// )
515/// .source(Some("community".to_string()))
516/// .url(Some("https://github.com/example/repo.git".to_string()))
517/// .version(Some("v1.0.0".to_string()))
518/// .resolved_commit(Some("abc123".to_string()))
519/// .tool(Some("claude-code".to_string()))
520/// .build();
521///
522/// let context = InstallContext::builder(Path::new("."), &cache).build();
523/// let (installed, checksum, _old_checksum, _patches, _token_count) = install_resource_with_progress(
524/// &entry,
525/// "agents",
526/// &context,
527/// &pb
528/// ).await?;
529///
530/// pb.inc(1);
531/// # Ok(())
532/// # }
533/// ```
534///
535/// # Errors
536///
537/// Returns the same errors as [`install_resource`], including:
538/// - Repository access failures
539/// - File system operation errors
540/// - Invalid markdown content
541/// - Git worktree creation failures
542pub async fn install_resource_with_progress(
543 entry: &LockedResource,
544 resource_dir: &str,
545 context: &InstallContext<'_>,
546 pb: &ProgressBar,
547) -> Result<(bool, String, Option<String>, crate::manifest::patches::AppliedPatches, Option<u64>)> {
548 pb.set_message(format!("Installing {}", entry.name));
549 install_resource(entry, resource_dir, context).await
550}
551
552/// Install a single resource in a thread-safe manner for parallel execution.
553///
554/// This is a private helper function used by parallel installation operations.
555/// It's a thin wrapper around [`install_resource`] designed for use in parallel
556/// installation streams.
557pub(crate) async fn install_resource_for_parallel(
558 entry: &LockedResource,
559 resource_dir: &str,
560 context: &InstallContext<'_>,
561) -> Result<(bool, String, Option<String>, crate::manifest::patches::AppliedPatches, Option<u64>)> {
562 install_resource(entry, resource_dir, context).await
563}
564
565/// Filtering options for resource installation operations.
566///
567/// This enum controls which resources are processed during installation,
568/// enabling both full installations and selective updates. The filter
569/// determines which entries from the lockfile are actually installed.
570///
571/// # Use Cases
572///
573/// - **Full installations**: Install all resources defined in lockfile
574/// - **Selective updates**: Install only resources that have been updated
575/// - **Performance optimization**: Avoid reinstalling unchanged resources
576/// - **Incremental deployments**: Update only what has changed
577///
578/// # Variants
579///
580/// ## All Resources
581/// [`ResourceFilter::All`] processes every resource entry in the lockfile,
582/// regardless of whether it has changed. This is used by the install command
583/// for complete environment setup.
584///
585/// ## Updated Resources Only
586/// [`ResourceFilter::Updated`] processes only resources that have version
587/// changes, as tracked by the update command. This enables efficient
588/// incremental updates without full reinstallation.
589///
590/// # Examples
591///
592/// Install all resources:
593/// ```rust,no_run
594/// use agpm_cli::installer::ResourceFilter;
595///
596/// let filter = ResourceFilter::All;
597/// // This will install every resource in the lockfile
598/// ```
599///
600/// Install only updated resources:
601/// ```rust,no_run
602/// use agpm_cli::installer::ResourceFilter;
603///
604/// let updates = vec![
605/// ("agent1".to_string(), None, "v1.0.0".to_string(), "v1.1.0".to_string()),
606/// ("tool2".to_string(), Some("community".to_string()), "v2.1.0".to_string(), "v2.2.0".to_string()),
607/// ];
608/// let filter = ResourceFilter::Updated(updates);
609/// // This will install only agent1 and tool2
610/// ```
611///
612/// # Update Tuple Format
613///
614/// For [`ResourceFilter::Updated`], each tuple contains:
615/// - `name`: Resource name as defined in the manifest
616/// - `old_version`: Previous version (for logging and tracking)
617/// - `new_version`: New version to install
618///
619/// The old version is primarily used for user feedback and logging,
620/// while the new version determines what gets installed.
621pub enum ResourceFilter {
622 /// Install all resources from the lockfile.
623 ///
624 /// This option processes every resource entry in the lockfile,
625 /// installing or updating each one regardless of whether it has
626 /// changed since the last installation.
627 All,
628
629 /// Install only specific updated resources.
630 ///
631 /// This option processes only the resources specified in the update list,
632 /// allowing for efficient incremental updates. Each tuple contains:
633 /// - Resource name
634 /// - Source name (None for local resources)
635 /// - Old version (for tracking)
636 /// - New version (to install)
637 Updated(Vec<(String, Option<String>, String, String)>),
638}
639
640/// Resource installation function supporting multiple progress configurations.
641///
642/// This function consolidates all resource installation patterns into a single, flexible
643/// interface that can handle both full installations and selective updates with different
644/// progress reporting mechanisms. It represents the modernized installation architecture
645/// introduced in AGPM v0.3.0.
646///
647/// # Architecture Benefits
648///
649/// - **Single API**: Single function handles install and update commands
650/// - **Flexible progress**: Supports dynamic, simple, and quiet progress modes
651/// - **Selective installation**: Can install all resources or just updated ones
652/// - **Optimal concurrency**: Leverages worktree-based parallel operations
653/// - **Cache efficiency**: Integrates with instance-level caching systems
654///
655/// # Parameters
656///
657/// * `filter` - Determines which resources to install ([`ResourceFilter::All`] or [`ResourceFilter::Updated`])
658/// * `lockfile` - The lockfile containing all resource definitions to install
659/// * `manifest` - The project manifest providing configuration and target directories
660/// * `project_dir` - Root directory where resources should be installed
661/// * `cache` - Cache instance for Git repository and worktree management
662/// * `force_refresh` - Whether to force refresh of cached repositories
663/// * `max_concurrency` - Optional limit on concurrent operations (None = unlimited)
664/// * `progress` - Optional multi-phase progress manager ([`MultiPhaseProgress`])
665///
666/// # Progress Reporting
667///
668/// Progress is reported through the optional [`MultiPhaseProgress`] parameter:
669/// - **Enabled**: Pass `Some(progress)` for multi-phase progress with live updates
670/// - **Disabled**: Pass `None` for quiet operation (scripts and automation)
671///
672/// # Installation Process
673///
674/// 1. **Resource filtering**: Collects entries based on filter criteria
675/// 2. **Cache warming**: Pre-creates worktrees for all unique repositories
676/// 3. **Parallel installation**: Processes resources with configured concurrency
677/// 4. **Progress coordination**: Updates progress based on configuration
678/// 5. **Error aggregation**: Collects and reports any installation failures
679///
680/// # Concurrency Behavior
681///
682/// The function implements advanced parallel processing:
683/// - **Pre-warming phase**: Creates all needed worktrees upfront for maximum parallelism
684/// - **Parallel execution**: Each resource installed in its own async task
685/// - **Concurrency control**: `max_concurrency` limits simultaneous operations
686/// - **Thread safety**: Progress updates are atomic and thread-safe
687///
688/// # Returns
689///
690/// Returns a tuple of:
691/// - The number of resources that were actually installed (new or updated content).
692/// Resources that already exist with identical content are not counted.
693/// - A vector of (`resource_name`, checksum) pairs for all processed resources
694///
695/// # Errors
696///
697/// Returns an error if any resource installation fails. The error includes details
698/// about all failed installations with specific error messages for debugging.
699///
700/// # Examples
701///
702/// Install all resources with progress tracking:
703/// ```rust,no_run
704/// use agpm_cli::installer::{install_resources, ResourceFilter};
705/// use agpm_cli::utils::progress::MultiPhaseProgress;
706/// use agpm_cli::lockfile::LockFile;
707/// use agpm_cli::manifest::Manifest;
708/// use agpm_cli::cache::Cache;
709/// use std::sync::Arc;
710/// use std::path::Path;
711///
712/// # async fn example() -> anyhow::Result<()> {
713/// # let lockfile = Arc::new(LockFile::default());
714/// # let manifest = Manifest::default();
715/// # let project_dir = Path::new(".");
716/// # let cache = Cache::new()?;
717/// let progress = Arc::new(MultiPhaseProgress::new(true));
718///
719/// let results = install_resources(
720/// ResourceFilter::All,
721/// &lockfile,
722/// &manifest,
723/// &project_dir,
724/// cache,
725/// false,
726/// Some(8), // Limit to 8 concurrent operations
727/// Some(progress),
728/// false, // verbose
729/// None, // old_lockfile
730/// false, // trust_lockfile_checksums
731/// None, // token_warning_threshold
732/// ).await?;
733///
734/// println!("Installed {} resources", results.installed_count);
735/// # Ok(())
736/// # }
737/// ```
738///
739/// Install resources quietly (for automation):
740/// ```rust,no_run
741/// use agpm_cli::installer::{install_resources, ResourceFilter};
742/// use agpm_cli::lockfile::LockFile;
743/// use agpm_cli::manifest::Manifest;
744/// use agpm_cli::cache::Cache;
745/// use std::path::Path;
746/// use std::sync::Arc;
747///
748/// # async fn example() -> anyhow::Result<()> {
749/// # let lockfile = Arc::new(LockFile::default());
750/// # let manifest = Manifest::default();
751/// # let project_dir = Path::new(".");
752/// # let cache = Cache::new()?;
753/// let updates = vec![("agent1".to_string(), None, "v1.0".to_string(), "v1.1".to_string())];
754///
755/// let results = install_resources(
756/// ResourceFilter::Updated(updates),
757/// &lockfile,
758/// &manifest,
759/// &project_dir,
760/// cache,
761/// false,
762/// None, // Unlimited concurrency
763/// None, // No progress output
764/// false, // verbose
765/// None, // old_lockfile
766/// false, // trust_lockfile_checksums
767/// None, // token_warning_threshold
768/// ).await?;
769///
770/// println!("Updated {} resources", results.installed_count);
771/// # Ok(())
772/// # }
773/// ```
774/// Collect entries to install based on filter criteria.
775///
776/// Returns a sorted vector of (LockedResource, target_directory) tuples.
777/// Sorting ensures deterministic processing order for consistent context checksums.
778fn collect_install_entries(
779 filter: &ResourceFilter,
780 lockfile: &LockFile,
781 manifest: &Manifest,
782) -> Vec<(LockedResource, String)> {
783 let all_entries: Vec<(LockedResource, String)> = match filter {
784 ResourceFilter::All => {
785 // Use existing ResourceIterator logic for all entries
786 ResourceIterator::collect_all_entries(lockfile, manifest)
787 .into_iter()
788 .map(|(entry, dir)| (entry.clone(), dir.into_owned()))
789 .collect()
790 }
791 ResourceFilter::Updated(updates) => {
792 // Collect only the updated entries
793 let mut entries = Vec::new();
794 for (name, source, _, _) in updates {
795 if let Some((resource_type, entry)) =
796 ResourceIterator::find_resource_by_name_and_source(
797 lockfile,
798 name,
799 source.as_deref(),
800 )
801 {
802 // Get artifact configuration path
803 let tool = entry.tool.as_deref().unwrap_or("claude-code");
804 // System invariant: Resource type validated during manifest parsing
805 // If this fails, skip the entry with a warning
806 let Some(artifact_path) =
807 manifest.get_artifact_resource_path(tool, resource_type)
808 else {
809 tracing::warn!(
810 name = %name,
811 tool = %tool,
812 resource_type = %resource_type,
813 "Skipping resource: tool does not support this resource type"
814 );
815 continue;
816 };
817 let target_dir = artifact_path.display().to_string();
818 entries.push((entry.clone(), target_dir));
819 }
820 }
821 entries
822 }
823 };
824
825 if all_entries.is_empty() {
826 return Vec::new();
827 }
828
829 // Sort entries for deterministic processing order
830 let mut sorted_entries = all_entries;
831 sorted_entries.sort_by(|(a, _), (b, _)| {
832 a.resource_type.cmp(&b.resource_type).then_with(|| a.name.cmp(&b.name))
833 });
834
835 sorted_entries
836}
837
838/// Pre-warm cache by creating all needed worktrees upfront.
839///
840/// Creates worktrees for all unique (source, url, sha) combinations to enable
841/// parallel installation without worktree creation bottlenecks.
842async fn pre_warm_worktrees(
843 entries: &[(LockedResource, String)],
844 cache: &Cache,
845 filter: &ResourceFilter,
846 max_concurrency: usize,
847) {
848 let mut unique_worktrees = HashSet::new();
849
850 // Collect unique worktrees
851 for (entry, _) in entries {
852 if let Some(source_name) = &entry.source
853 && let Some(url) = &entry.url
854 {
855 // Only pre-warm if we have a valid SHA
856 if let Some(sha) = entry.resolved_commit.as_ref().filter(|commit| {
857 commit.len() == 40 && commit.chars().all(|c| c.is_ascii_hexdigit())
858 }) {
859 unique_worktrees.insert((source_name.clone(), url.clone(), sha.clone()));
860 }
861 }
862 }
863
864 if unique_worktrees.is_empty() {
865 return;
866 }
867
868 let context = match filter {
869 ResourceFilter::All => "pre-warm",
870 ResourceFilter::Updated(_) => "update-pre-warm",
871 };
872
873 let total = unique_worktrees.len();
874
875 tracing::debug!(
876 "Starting worktree pre-warming for {} worktrees with concurrency {}",
877 total,
878 max_concurrency
879 );
880
881 // Use stream with buffer_unordered to limit concurrency
882 stream::iter(unique_worktrees)
883 .map(|(source, url, sha)| {
884 let cache = cache.clone();
885
886 async move {
887 // Format display: source@sha[8]
888 let display_name = format!("{}@{}", source, &sha[..8]);
889
890 tracing::trace!("Pre-warming worktree: {}", display_name);
891
892 // Create or get worktree
893 let start = std::time::Instant::now();
894 cache.get_or_create_worktree_for_sha(&source, &url, &sha, Some(context)).await.ok(); // Ignore errors during pre-warming
895 let elapsed = start.elapsed();
896 tracing::trace!("Worktree {} took {:?}", display_name, elapsed);
897 }
898 })
899 .buffer_unordered(max_concurrency)
900 .collect::<Vec<_>>()
901 .await;
902
903 tracing::debug!("Completed worktree pre-warming");
904}
905
906/// Execute parallel installation with progress tracking.
907///
908/// Processes all entries concurrently with active progress tracking.
909/// Returns vector of installation results for each resource.
910#[allow(clippy::too_many_arguments)]
911async fn execute_parallel_installation(
912 entries: Vec<(LockedResource, String)>,
913 project_dir: &Path,
914 cache: &Cache,
915 manifest: &Manifest,
916 lockfile: &Arc<LockFile>,
917 force_refresh: bool,
918 verbose: bool,
919 max_concurrency: Option<usize>,
920 progress: Option<Arc<MultiPhaseProgress>>,
921 old_lockfile: Option<&LockFile>,
922 trust_lockfile_checksums: bool,
923 token_warning_threshold: Option<u64>,
924) -> Vec<InstallResult> {
925 // Create thread-safe progress tracking
926 let installed_count = Arc::new(Mutex::new(0));
927 let type_counts =
928 Arc::new(Mutex::new(std::collections::HashMap::<crate::core::ResourceType, usize>::new()));
929 let concurrency = max_concurrency.unwrap_or(usize::MAX).max(1);
930
931 let total = entries.len();
932
933 // Process installations in parallel with active tracking
934 stream::iter(entries)
935 .map(|(entry, resource_dir)| {
936 let project_dir = project_dir.to_path_buf();
937 let installed_count = Arc::clone(&installed_count);
938 let type_counts = Arc::clone(&type_counts);
939 let cache = cache.clone();
940 let progress = progress.clone();
941 let entry_type = entry.resource_type;
942 async move {
943 // Signal that this resource is starting
944 if let Some(ref pm) = progress {
945 pm.mark_resource_active(&entry);
946 }
947
948 let install_context = InstallContext::with_common_options_and_trust(
949 &project_dir,
950 &cache,
951 Some(manifest),
952 Some(lockfile),
953 force_refresh,
954 verbose,
955 old_lockfile,
956 trust_lockfile_checksums,
957 token_warning_threshold,
958 );
959
960 let res =
961 install_resource_for_parallel(&entry, &resource_dir, &install_context).await;
962
963 // Handle result and track completion
964 match res {
965 Ok((actually_installed, file_checksum, context_checksum, applied_patches, token_count)) => {
966 // Always increment the counter (regardless of whether file was written)
967 let timeout = default_lock_timeout();
968 let mut count = match tokio::time::timeout(timeout, installed_count.lock()).await {
969 Ok(guard) => guard,
970 Err(_) => {
971 eprintln!("[DEADLOCK] Timeout waiting for installed_count lock after {:?}", timeout);
972 return Err((entry.id(), anyhow::anyhow!("Timeout waiting for installed_count lock after {:?} - possible deadlock", timeout)));
973 }
974 };
975 *count += 1;
976
977 // Track by type for summary (only count those actually written to disk)
978 if actually_installed {
979 let mut type_guard = match tokio::time::timeout(timeout, type_counts.lock()).await {
980 Ok(guard) => guard,
981 Err(_) => {
982 eprintln!("[DEADLOCK] Timeout waiting for type_counts lock after {:?}", timeout);
983 return Err((entry.id(), anyhow::anyhow!("Timeout waiting for type_counts lock after {:?} - possible deadlock", timeout)));
984 }
985 };
986 *type_guard.entry(entry_type).or_insert(0) += 1;
987 }
988
989 // Signal completion and update counter
990 if let Some(ref pm) = progress {
991 pm.mark_resource_complete(&entry, *count, total);
992 }
993
994 Ok((
995 entry.id(),
996 actually_installed,
997 file_checksum,
998 context_checksum,
999 applied_patches,
1000 token_count,
1001 ))
1002 }
1003 Err(err) => {
1004 // On error, still increment counter and clear the slot
1005 let timeout = default_lock_timeout();
1006 let mut count = match tokio::time::timeout(timeout, installed_count.lock()).await {
1007 Ok(guard) => guard,
1008 Err(_) => {
1009 eprintln!("[DEADLOCK] Timeout waiting for installed_count lock after {:?}", timeout);
1010 return Err((entry.id(), anyhow::anyhow!("Timeout waiting for installed_count lock after {:?} - possible deadlock", timeout)));
1011 }
1012 };
1013 *count += 1;
1014
1015 // Clear the slot for this failed resource
1016 if let Some(ref pm) = progress {
1017 pm.mark_resource_complete(&entry, *count, total);
1018 }
1019
1020 Err((entry.id(), err))
1021 }
1022 }
1023 }
1024 })
1025 .buffered(concurrency)
1026 .collect()
1027 .await
1028}
1029
1030/// Process installation results and aggregate checksums.
1031///
1032/// Aggregates installation results, handles errors with detailed context,
1033/// and returns structured results for lockfile updates.
1034fn process_install_results(
1035 results: Vec<InstallResult>,
1036 progress: Option<Arc<MultiPhaseProgress>>,
1037) -> Result<InstallationResults> {
1038 // Handle errors and collect checksums, context checksums, applied patches, and token counts
1039 let mut errors = Vec::new();
1040 let mut checksums = Vec::new();
1041 let mut context_checksums = Vec::new();
1042 let mut applied_patches_list = Vec::new();
1043 let mut token_counts = Vec::new();
1044
1045 for result in results {
1046 match result {
1047 Ok((id, _installed, file_checksum, context_checksum, applied_patches, token_count)) => {
1048 checksums.push((id.clone(), file_checksum));
1049 context_checksums.push((id.clone(), context_checksum));
1050 applied_patches_list.push((id.clone(), applied_patches));
1051 token_counts.push((id, token_count));
1052 }
1053 Err((id, error)) => {
1054 errors.push((id, error));
1055 }
1056 }
1057 }
1058
1059 // Complete installation phase
1060 if let Some(ref pm) = progress {
1061 if !errors.is_empty() {
1062 pm.complete_phase_with_window(Some(&format!(
1063 "Failed to install {} resources",
1064 errors.len()
1065 )));
1066 } else {
1067 let installed_count = checksums.len();
1068 if installed_count > 0 {
1069 pm.complete_phase_with_window(Some(&format!(
1070 "Installed {installed_count} resources"
1071 )));
1072 }
1073 }
1074 }
1075
1076 // Handle errors with detailed context
1077 if !errors.is_empty() {
1078 // Deduplicate errors by ResourceId - same resource may fail multiple times
1079 // if multiple parents depend on it
1080 let mut unique_errors: std::collections::HashMap<ResourceId, anyhow::Error> =
1081 std::collections::HashMap::new();
1082 for (id, error) in errors {
1083 unique_errors.entry(id).or_insert(error);
1084 }
1085
1086 // Format each error - use enhanced formatting for template errors
1087 let error_msgs: Vec<String> = unique_errors
1088 .into_iter()
1089 .map(|(id, error)| {
1090 // Check if this is a TemplateError by walking the error chain
1091 let mut current_error: &dyn std::error::Error = error.as_ref();
1092 loop {
1093 if let Some(template_error) =
1094 current_error.downcast_ref::<crate::templating::TemplateError>()
1095 {
1096 // Found a TemplateError - use its detailed formatting
1097 return format!(
1098 " {}:\n{}",
1099 id, // Use full ResourceId Display (shows variants)
1100 template_error.format_with_context()
1101 );
1102 }
1103
1104 // Move to the next error in the chain
1105 match current_error.source() {
1106 Some(source) => current_error = source,
1107 None => break,
1108 }
1109 }
1110
1111 // Not a template error - use alternate formatting to show full error chain
1112 format!(" {}: {:#}", id, error) // Use full ResourceId Display + full error chain
1113 })
1114 .collect();
1115
1116 // Return the formatted errors without wrapping context
1117 return Err(anyhow::anyhow!(
1118 "Installation incomplete: {} resource(s) could not be set up\n{}",
1119 error_msgs.len(),
1120 error_msgs.join("\n\n")
1121 ));
1122 }
1123
1124 let installed_count = checksums.len();
1125 Ok(InstallationResults::new(
1126 installed_count,
1127 checksums,
1128 context_checksums,
1129 applied_patches_list,
1130 token_counts,
1131 ))
1132}
1133
1134#[allow(clippy::too_many_arguments)]
1135pub async fn install_resources(
1136 filter: ResourceFilter,
1137 lockfile: &Arc<LockFile>,
1138 manifest: &Manifest,
1139 project_dir: &Path,
1140 cache: Cache,
1141 force_refresh: bool,
1142 max_concurrency: Option<usize>,
1143 progress: Option<Arc<MultiPhaseProgress>>,
1144 verbose: bool,
1145 old_lockfile: Option<&LockFile>,
1146 trust_lockfile_checksums: bool,
1147 token_warning_threshold: Option<u64>,
1148) -> Result<InstallationResults> {
1149 // 1. Collect entries to install
1150 let all_entries = collect_install_entries(&filter, lockfile, manifest);
1151 if all_entries.is_empty() {
1152 return Ok(InstallationResults::new(0, Vec::new(), Vec::new(), Vec::new(), Vec::new()));
1153 }
1154
1155 let total = all_entries.len();
1156
1157 // Calculate optimal window size (fallback rarely used since caller usually provides value)
1158 let concurrency = max_concurrency.unwrap_or_else(|| {
1159 let cores = std::thread::available_parallelism()
1160 .map(std::num::NonZero::get)
1161 .unwrap_or(FALLBACK_CORE_COUNT);
1162 std::cmp::max(MIN_PARALLELISM, cores * PARALLELISM_CORE_MULTIPLIER)
1163 });
1164 let window_size =
1165 crate::utils::progress::MultiPhaseProgress::calculate_window_size(concurrency);
1166
1167 // 2. Pre-warm worktrees
1168 pre_warm_worktrees(&all_entries, &cache, &filter, concurrency).await;
1169
1170 // 3. Start installation phase with active window tracking
1171 if let Some(ref pm) = progress {
1172 pm.start_phase_with_active_tracking(
1173 InstallationPhase::InstallingResources,
1174 total,
1175 window_size,
1176 );
1177 }
1178
1179 // 4. Execute parallel installation
1180 let results = execute_parallel_installation(
1181 all_entries,
1182 project_dir,
1183 &cache,
1184 manifest,
1185 lockfile,
1186 force_refresh,
1187 verbose,
1188 max_concurrency,
1189 progress.clone(),
1190 old_lockfile,
1191 trust_lockfile_checksums,
1192 token_warning_threshold,
1193 )
1194 .await;
1195
1196 // 5. Process results and aggregate checksums
1197 process_install_results(results, progress)
1198}
1199
1200/// Finalize installation by configuring hooks, MCP servers, and updating lockfiles.
1201///
1202/// This function performs the final steps shared by install and update commands after
1203/// resources are installed. It executes multiple operations in sequence, with each
1204/// step building on the previous.
1205///
1206/// # Process Steps
1207///
1208/// 1. **Hook Configuration** - Configures Claude Code hooks from source files
1209/// 2. **MCP Server Setup** - Groups and configures MCP servers by tool type
1210/// 3. **Patch Application** - Applies and tracks project/private patches
1211/// 4. **Artifact Cleanup** - Removes old files from previous installations
1212/// 5. **Lockfile Saving** - Writes main lockfile with checksums (unless --no-lock)
1213/// 6. **Private Lockfile** - Saves private patches to separate file
1214///
1215/// # Arguments
1216///
1217/// * `lockfile` - Mutable lockfile to update with applied patches
1218/// * `manifest` - Project manifest for configuration
1219/// * `project_dir` - Project root directory
1220/// * `cache` - Cache instance for Git operations
1221/// * `old_lockfile` - Optional previous lockfile for artifact cleanup
1222/// * `quiet` - Whether to suppress output messages
1223/// * `no_lock` - Whether to skip lockfile saving (development mode)
1224///
1225/// # Returns
1226///
1227/// Returns `(hook_count, server_count)` tuple:
1228/// - `hook_count`: Number of hooks configured (regardless of changed status)
1229/// - `server_count`: Number of MCP servers configured (regardless of changed status)
1230///
1231/// # Errors
1232///
1233/// Returns an error if:
1234/// - **Hook configuration fails**: Invalid hook source files or permission errors
1235/// - **MCP handler not found**: Tool type has no registered MCP handler
1236/// - **Tool not configured**: Tool missing from manifest `[default-tools]` section
1237/// - **Lockfile save fails**: Permission denied or disk full
1238///
1239/// # Examples
1240///
1241/// ```rust,no_run
1242/// # use agpm_cli::installer::finalize_installation;
1243/// # use agpm_cli::lockfile::LockFile;
1244/// # use agpm_cli::manifest::Manifest;
1245/// # use agpm_cli::cache::Cache;
1246/// # use std::path::Path;
1247/// # async fn example() -> anyhow::Result<()> {
1248/// let mut lockfile = LockFile::default();
1249/// let manifest = Manifest::default();
1250/// let project_dir = Path::new(".");
1251/// let cache = Cache::new()?;
1252///
1253/// let (hooks, servers) = finalize_installation(
1254/// &mut lockfile,
1255/// &manifest,
1256/// project_dir,
1257/// &cache,
1258/// None, // no old lockfile (fresh install)
1259/// false, // not quiet
1260/// false, // create lockfile
1261/// ).await?;
1262///
1263/// println!("Configured {} hooks and {} servers", hooks, servers);
1264/// # Ok(())
1265/// # }
1266/// ```
1267///
1268/// # Implementation Notes
1269///
1270/// - Hooks are configured by reading directly from source files (no copying)
1271/// - MCP servers are grouped by tool type for batch configuration
1272/// - Patch tracking: project patches stored in lockfile, private in separate file
1273/// - Artifact cleanup only runs if old lockfile exists (update scenario)
1274/// - Private lockfile automatically deleted if empty
1275pub async fn finalize_installation(
1276 lockfile: &mut LockFile,
1277 manifest: &Manifest,
1278 project_dir: &Path,
1279 cache: &Cache,
1280 old_lockfile: Option<&LockFile>,
1281 quiet: bool,
1282 no_lock: bool,
1283) -> Result<(usize, usize)> {
1284 use anyhow::Context;
1285
1286 let mut hook_count = 0;
1287 let mut server_count = 0;
1288
1289 // Handle hooks if present
1290 if !lockfile.hooks.is_empty() {
1291 // Configure hooks directly from source files (no copying)
1292 let hooks_changed = crate::hooks::install_hooks(lockfile, project_dir, cache).await?;
1293 hook_count = lockfile.hooks.len();
1294
1295 // Always show hooks configuration feedback with changed count
1296 if !quiet {
1297 if hook_count == 1 {
1298 if hooks_changed == 1 {
1299 println!("✓ Configured 1 hook (1 changed)");
1300 } else {
1301 println!("✓ Configured 1 hook ({hooks_changed} changed)");
1302 }
1303 } else {
1304 println!("✓ Configured {hook_count} hooks ({hooks_changed} changed)");
1305 }
1306 }
1307 }
1308
1309 // Handle MCP servers if present - group by artifact type
1310 if !lockfile.mcp_servers.is_empty() {
1311 use crate::mcp::handlers::McpHandler;
1312 use std::collections::HashMap;
1313
1314 // Group MCP servers by artifact type
1315 let mut servers_by_type: HashMap<String, Vec<crate::lockfile::LockedResource>> =
1316 HashMap::new();
1317 {
1318 // Scope to limit the immutable borrow of lockfile
1319 for server in &lockfile.mcp_servers {
1320 let tool = server.tool.clone().unwrap_or_else(|| "claude-code".to_string());
1321 servers_by_type.entry(tool).or_default().push(server.clone());
1322 }
1323 }
1324
1325 // Collect all applied patches to update lockfile after iteration
1326 let mut all_mcp_patches: Vec<(String, crate::manifest::patches::AppliedPatches)> =
1327 Vec::new();
1328 // Track total changed MCP servers
1329 let mut total_mcp_changed = 0;
1330
1331 // Configure MCP servers for each artifact type using appropriate handler
1332 for (artifact_type, servers) in servers_by_type {
1333 if let Some(handler) = crate::mcp::handlers::get_mcp_handler(&artifact_type) {
1334 // Get artifact base directory - must be properly configured
1335 let artifact_base = manifest
1336 .get_tool_config(&artifact_type)
1337 .map(|c| &c.path)
1338 .ok_or_else(|| {
1339 anyhow::anyhow!(
1340 "Tool '{}' is not configured. Please define it in [default-tools] section.",
1341 artifact_type
1342 )
1343 })?;
1344 let artifact_base = project_dir.join(artifact_base);
1345
1346 // Configure MCP servers by reading directly from source (no file copying)
1347 let server_entries = servers.clone();
1348
1349 // Collect applied patches and changed count
1350 let (applied_patches_list, changed_count) = handler
1351 .configure_mcp_servers(
1352 project_dir,
1353 &artifact_base,
1354 &server_entries,
1355 cache,
1356 manifest,
1357 )
1358 .await
1359 .with_context(|| {
1360 format!(
1361 "Failed to configure MCP servers for artifact type '{}'",
1362 artifact_type
1363 )
1364 })?;
1365
1366 // Collect patches for later application
1367 all_mcp_patches.extend(applied_patches_list);
1368 total_mcp_changed += changed_count;
1369
1370 server_count += servers.len();
1371 }
1372 }
1373
1374 // Update lockfile with all collected applied patches
1375 for (name, applied_patches) in all_mcp_patches {
1376 lockfile.update_resource_applied_patches(&name, &applied_patches);
1377 }
1378
1379 // Use the actual changed count from MCP handlers
1380 let mcp_servers_changed = total_mcp_changed;
1381
1382 if server_count > 0 && !quiet {
1383 if server_count == 1 {
1384 if mcp_servers_changed == 1 {
1385 println!("✓ Configured 1 MCP server (1 changed)");
1386 } else {
1387 println!("✓ Configured 1 MCP server ({mcp_servers_changed} changed)");
1388 }
1389 } else {
1390 println!("✓ Configured {server_count} MCP servers ({mcp_servers_changed} changed)");
1391 }
1392 }
1393 }
1394
1395 // Clean up removed or moved artifacts if old lockfile provided
1396 if let Some(old) = old_lockfile {
1397 if let Ok(removed) = cleanup_removed_artifacts(old, lockfile, project_dir).await {
1398 if !removed.is_empty() && !quiet {
1399 println!("✓ Cleaned up {} moved or removed artifact(s)", removed.len());
1400 }
1401 }
1402 }
1403
1404 if !no_lock {
1405 // Split lockfile into public and private parts
1406 let (public_lock, private_lock) = lockfile.split_by_privacy();
1407
1408 // Save public lockfile (team-shared)
1409 public_lock.save(&project_dir.join("agpm.lock")).with_context(|| {
1410 format!("Failed to save lockfile to {}", project_dir.join("agpm.lock").display())
1411 })?;
1412
1413 // Save private lockfile (user-specific, automatically deletes if empty)
1414 private_lock.save(project_dir).with_context(|| "Failed to save private lockfile")?;
1415 }
1416
1417 Ok((hook_count, server_count))
1418}
1419
1420/// Find parent resources that depend on the given resource.
1421///
1422/// This function searches through the lockfile to find resources that list
1423/// the given resource name in their `dependencies` field. This is useful for
1424/// error reporting to show which resources depend on a failing resource.
1425///
1426/// # Arguments
1427///
1428/// * `lockfile` - The lockfile to search
1429/// * `resource_name` - The canonical name of the resource to find parents for
1430///
1431/// # Returns
1432///
1433/// A vector of parent resource names (manifest aliases if available, otherwise
1434/// canonical names) that directly depend on the given resource.
1435///
1436/// # Examples
1437///
1438/// ```rust,no_run
1439/// use agpm_cli::lockfile::LockFile;
1440/// use agpm_cli::installer::find_parent_resources;
1441///
1442/// let lockfile = LockFile::default();
1443/// let parents = find_parent_resources(&lockfile, "agents/helper");
1444/// if !parents.is_empty() {
1445/// println!("Resource is required by: {}", parents.join(", "));
1446/// }
1447/// ```
1448pub fn find_parent_resources(lockfile: &LockFile, resource_name: &str) -> Vec<String> {
1449 use crate::core::ResourceIterator;
1450
1451 let mut parents = Vec::new();
1452
1453 // Iterate through all resources in the lockfile
1454 for (entry, _dir) in
1455 ResourceIterator::collect_all_entries(lockfile, &crate::manifest::Manifest::default())
1456 {
1457 // Check if this resource depends on the target resource
1458 if entry.dependencies.iter().any(|dep| dep == resource_name) {
1459 // Use manifest_alias if available (user-facing name), otherwise canonical name
1460 let parent_name = entry.manifest_alias.as_ref().unwrap_or(&entry.name).clone();
1461 parents.push(parent_name);
1462 }
1463 }
1464
1465 parents
1466}