apicurio_cli/commands/
lock.rs

1use anyhow::{Context, Result};
2use sha2::{Digest, Sha256};
3use std::{
4    collections::{HashMap, HashSet},
5    path::PathBuf,
6};
7
8use crate::{
9    config::{load_global_config, load_repo_config},
10    constants::{APICURIO_CONFIG, APICURIO_LOCK},
11    dependency::Dependency,
12    lockfile::{resolve_output_path, LockFile, LockedDependency},
13    registry::RegistryClient,
14};
15
16/// Represents a dependency to be resolved (either direct or transitive)
17#[derive(Debug, Clone, PartialEq, Eq, Hash)]
18struct DependencyToResolve {
19    group_id: String,
20    artifact_id: String,
21    version_req: String, // For direct deps, this is semver. For transitive, exact version
22    registry: String,
23    output_path: Option<String>, // None for transitive deps
24    is_transitive: bool,
25    depth: u32,
26}
27
28pub async fn run() -> Result<()> {
29    // 1) load repo + global + merge registries
30    let config_path = PathBuf::from(APICURIO_CONFIG);
31    let config_content = std::fs::read_to_string(&config_path)
32        .with_context(|| format!("reading config from {}", config_path.display()))?;
33    let repo_cfg = load_repo_config(&config_path)?;
34    let global_cfg = load_global_config()?;
35    let registries = repo_cfg.merge_registries(global_cfg)?;
36
37    // Compute config hash for lock integrity
38    let config_hash = LockFile::compute_config_hash(&config_content, &repo_cfg.dependencies);
39
40    let mut clients = HashMap::new();
41    for reg in &registries {
42        clients.insert(reg.name.clone(), RegistryClient::new(reg)?);
43    }
44
45    // 2) Check if existing lock is up-to-date with enhanced validation
46    let lock_path = PathBuf::from(APICURIO_LOCK);
47    let existing_lock = if let Ok(existing_lock) = LockFile::load(&lock_path) {
48        // First, quick check: is config hash the same?
49        if existing_lock.is_compatible_with_config(&config_hash) {
50            // Second, check modification time if available
51            if existing_lock
52                .is_newer_than_config(&config_path)
53                .unwrap_or(false)
54            {
55                // Third, verify all dependencies can still be resolved
56                if verify_lock_is_still_valid(&existing_lock, &clients).await? {
57                    println!("🔒 Lock file already up-to-date");
58                    return Ok(());
59                } else {
60                    println!("🔓 Lock file outdated: some dependencies are no longer available");
61                }
62            } else {
63                println!("🔓 Lock file outdated: config file has been modified");
64            }
65        } else {
66            println!("🔓 Lock file outdated: config hash changed");
67        }
68        Some(existing_lock)
69    } else {
70        None
71    };
72
73    // 3) Build initial set of dependencies to resolve
74    let mut dependencies_to_resolve = Vec::new();
75
76    // Add direct dependencies from config
77    for dep_cfg in &repo_cfg.dependencies {
78        let dep = Dependency::from_config(dep_cfg)?;
79        dependencies_to_resolve.push(DependencyToResolve {
80            group_id: dep.group_id.clone(),
81            artifact_id: dep.artifact_id.clone(),
82            version_req: dep_cfg.version.clone(),
83            registry: dep.registry.clone(),
84            output_path: Some(dep.output_path.clone()),
85            is_transitive: false,
86            depth: 0,
87        });
88    }
89
90    // 4) Resolve all dependencies including transitive references
91    let mut resolved_dependencies = HashMap::new();
92    let mut processed = HashSet::new();
93
94    while let Some(dep_to_resolve) = dependencies_to_resolve.pop() {
95        let key = format!(
96            "{}:{}:{}",
97            dep_to_resolve.registry, dep_to_resolve.group_id, dep_to_resolve.artifact_id
98        );
99
100        // Skip if already processed
101        if processed.contains(&key) {
102            continue;
103        }
104        processed.insert(key.clone());
105
106        // Skip if depth exceeds maximum
107        if dep_to_resolve.depth > repo_cfg.reference_resolution.max_depth {
108            eprintln!(
109                "Warning: Skipping reference resolution for {} at depth {} (exceeds max depth {})",
110                key, dep_to_resolve.depth, repo_cfg.reference_resolution.max_depth
111            );
112            continue;
113        }
114
115        let client = &clients[&dep_to_resolve.registry];
116
117        // Resolve version
118        let resolved_version = if dep_to_resolve.is_transitive {
119            // For transitive deps, version_req is already exact
120            semver::Version::parse(&dep_to_resolve.version_req)?
121        } else {
122            // For direct deps, resolve semver range
123            let dep = Dependency {
124                name: format!("{}/{}", dep_to_resolve.group_id, dep_to_resolve.artifact_id),
125                group_id: dep_to_resolve.group_id.clone(),
126                artifact_id: dep_to_resolve.artifact_id.clone(),
127                req: semver::VersionReq::parse(&dep_to_resolve.version_req)?,
128                registry: dep_to_resolve.registry.clone(),
129                output_path: dep_to_resolve.output_path.clone().unwrap_or_default(),
130            };
131
132            let all_versions = client
133                .list_versions(&dep.group_id, &dep.artifact_id)
134                .await
135                .with_context(|| {
136                    format!("listing versions for {}/{}", dep.group_id, dep.artifact_id)
137                })?;
138
139            let selected = all_versions
140                .iter()
141                .filter(|v| dep.req.matches(v))
142                .max()
143                .with_context(|| {
144                    format!(
145                        "no version matching '{}' for dependency '{}'",
146                        dep_to_resolve.version_req, dep.name
147                    )
148                })?;
149            selected.clone()
150        };
151
152        // Download content for hashing
153        let data = client
154            .download(
155                &dep_to_resolve.group_id,
156                &dep_to_resolve.artifact_id,
157                &resolved_version,
158            )
159            .await
160            .with_context(|| {
161                format!(
162                    "downloading content for {}:{} v{}",
163                    dep_to_resolve.group_id, dep_to_resolve.artifact_id, resolved_version
164                )
165            })?;
166
167        // Compute SHA256
168        let sha256 = {
169            let mut hasher = Sha256::new();
170            hasher.update(&data);
171            hex::encode(hasher.finalize())
172        };
173
174        // Determine output path
175        let output_path = if let Some(path) = dep_to_resolve.output_path {
176            Some(path)
177        } else {
178            // Generate path for transitive dependency using pattern and overrides
179            let metadata = client
180                .get_artifact_metadata(&dep_to_resolve.group_id, &dep_to_resolve.artifact_id)
181                .await?;
182            resolve_output_path(
183                &repo_cfg.reference_resolution.output_pattern,
184                &repo_cfg.reference_resolution.output_overrides,
185                &dep_to_resolve.registry,
186                &dep_to_resolve.group_id,
187                &dep_to_resolve.artifact_id,
188                &resolved_version.to_string(),
189                &metadata.artifact_type,
190            )
191        };
192
193        // Skip this dependency if it's mapped to null (excluded from resolution)
194        let output_path = match output_path {
195            Some(path) => path,
196            None => {
197                println!(
198                    "  ⏭️  Skipping transitive dependency {}:{} (mapped to null)",
199                    dep_to_resolve.group_id, dep_to_resolve.artifact_id
200                );
201                continue; // Skip to next dependency
202            }
203        };
204
205        // Create locked dependency
206        let locked_dep = LockedDependency {
207            name: if dep_to_resolve.is_transitive {
208                format!("{}/{}", dep_to_resolve.group_id, dep_to_resolve.artifact_id)
209            } else {
210                // Find the original name from config
211                repo_cfg
212                    .dependencies
213                    .iter()
214                    .find(|cfg| {
215                        let dep = Dependency::from_config(cfg).unwrap();
216                        dep.group_id == dep_to_resolve.group_id
217                            && dep.artifact_id == dep_to_resolve.artifact_id
218                    })
219                    .map(|cfg| cfg.name.clone())
220                    .unwrap_or_else(|| {
221                        format!("{}/{}", dep_to_resolve.group_id, dep_to_resolve.artifact_id)
222                    })
223            },
224            registry: dep_to_resolve.registry.clone(),
225            resolved_version: resolved_version.to_string(),
226            download_url: client.get_download_url(
227                &dep_to_resolve.group_id,
228                &dep_to_resolve.artifact_id,
229                &resolved_version,
230            ),
231            sha256,
232            output_path,
233            group_id: dep_to_resolve.group_id.clone(),
234            artifact_id: dep_to_resolve.artifact_id.clone(),
235            version_spec: dep_to_resolve.version_req.clone(),
236            is_transitive: dep_to_resolve.is_transitive,
237        };
238
239        resolved_dependencies.insert(key, locked_dep);
240
241        // Determine if reference resolution should be enabled for this dependency
242        let should_resolve_references = if dep_to_resolve.is_transitive {
243            // For transitive dependencies, always use global setting
244            repo_cfg.reference_resolution.enabled
245        } else {
246            // For direct dependencies, check per-dependency override first
247            let original_dep_config = repo_cfg.dependencies.iter().find(|cfg| {
248                let dep = Dependency::from_config(cfg).unwrap();
249                dep.group_id == dep_to_resolve.group_id
250                    && dep.artifact_id == dep_to_resolve.artifact_id
251            });
252
253            match original_dep_config.and_then(|cfg| cfg.resolve_references) {
254                Some(override_setting) => override_setting,
255                None => repo_cfg.reference_resolution.enabled,
256            }
257        };
258
259        // If reference resolution is enabled, get version references
260        if should_resolve_references
261            && dep_to_resolve.depth < repo_cfg.reference_resolution.max_depth
262        {
263            match client
264                .get_version_references(
265                    &dep_to_resolve.group_id,
266                    &dep_to_resolve.artifact_id,
267                    &resolved_version,
268                    None,
269                )
270                .await
271            {
272                Ok(references) => {
273                    for reference in references {
274                        // Use "default" as the group_id if the reference doesn't specify one
275                        let ref_group_id = reference.group_id.as_deref().unwrap_or("default");
276
277                        let ref_key = format!(
278                            "{}:{}:{}",
279                            dep_to_resolve.registry, ref_group_id, reference.artifact_id
280                        );
281
282                        // Only add if not already processed or in queue
283                        if !processed.contains(&ref_key)
284                            && !dependencies_to_resolve.iter().any(|d| {
285                                format!("{}:{}:{}", d.registry, d.group_id, d.artifact_id)
286                                    == ref_key
287                            })
288                        {
289                            dependencies_to_resolve.push(DependencyToResolve {
290                                group_id: ref_group_id.to_string(),
291                                artifact_id: reference.artifact_id,
292                                version_req: reference.version, // References use exact versions
293                                registry: dep_to_resolve.registry.clone(), // Use same registry as parent
294                                output_path: None, // Will be generated using pattern
295                                is_transitive: true,
296                                depth: dep_to_resolve.depth + 1,
297                            });
298                        }
299                    }
300                }
301                Err(e) => {
302                    eprintln!(
303                        "Warning: Failed to get version references for {}:{} v{}: {}",
304                        dep_to_resolve.group_id, dep_to_resolve.artifact_id, resolved_version, e
305                    );
306                }
307            }
308        }
309    }
310
311    // Convert resolved dependencies to vector
312    let mut new_locks: Vec<LockedDependency> = resolved_dependencies.into_values().collect();
313
314    // Sort to ensure consistent ordering (direct deps first, then alphabetical)
315    new_locks.sort_by(|a, b| match (a.is_transitive, b.is_transitive) {
316        (false, true) => std::cmp::Ordering::Less,
317        (true, false) => std::cmp::Ordering::Greater,
318        _ => a.name.cmp(&b.name),
319    });
320
321    // 4) Create new lockfile with metadata including config modification time
322    let config_modified = LockFile::get_config_modification_time(&config_path).ok();
323    let lf = LockFile::with_config_modified(new_locks, config_hash, config_modified);
324
325    // 5) Clean up old output paths if they changed
326    if let Some(ref old_lock) = existing_lock {
327        cleanup_changed_output_paths(&old_lock.locked_dependencies, &lf.locked_dependencies)?;
328    }
329
330    lf.save(&lock_path)
331        .with_context(|| format!("writing {}", lock_path.display()))?;
332    println!("🔒 Updated {}", lock_path.display());
333
334    Ok(())
335}
336
337/// Verify that an existing lock file can still be resolved with the same versions
338/// This performs a more lightweight check than re-resolving all dependencies
339async fn verify_lock_is_still_valid(
340    lock: &LockFile,
341    clients: &HashMap<String, RegistryClient>,
342) -> Result<bool> {
343    // Quick optimization: if the lockfile is very recent (< 5 minutes),
344    // trust it without checking registries
345    if let Ok(generated_nanos) = lock.generated_at.parse::<i64>() {
346        let now_nanos = chrono::Utc::now().timestamp_nanos_opt().unwrap_or(0);
347
348        // If lockfile was generated within the last 5 minutes, trust it
349        let five_minutes_nanos = 5 * 60 * 1_000_000_000i64; // 5 minutes in nanoseconds
350        if now_nanos.saturating_sub(generated_nanos) < five_minutes_nanos {
351            return Ok(true);
352        }
353    }
354
355    // Otherwise, verify each dependency can still be resolved
356    for locked_dep in &lock.locked_dependencies {
357        let client = match clients.get(&locked_dep.registry) {
358            Some(c) => c,
359            None => {
360                eprintln!(
361                    "Warning: Registry '{}' is no longer configured",
362                    locked_dep.registry
363                );
364                return Ok(false);
365            }
366        };
367
368        // Check if the exact version is still available
369        match client
370            .list_versions(&locked_dep.group_id, &locked_dep.artifact_id)
371            .await
372        {
373            Ok(versions) => {
374                if !versions
375                    .iter()
376                    .any(|v| v.to_string() == locked_dep.resolved_version)
377                {
378                    eprintln!(
379                        "Warning: Version '{}' of '{}:{}' is no longer available",
380                        locked_dep.resolved_version, locked_dep.group_id, locked_dep.artifact_id
381                    );
382                    return Ok(false);
383                }
384            }
385            Err(e) => {
386                eprintln!(
387                    "Warning: Failed to check availability of '{}:{}': {}",
388                    locked_dep.group_id, locked_dep.artifact_id, e
389                );
390                // On network errors, etc., we'll be conservative and re-generate
391                return Ok(false);
392            }
393        }
394    }
395    Ok(true)
396}
397
398/// Clean up old output files when their paths change during locking
399fn cleanup_changed_output_paths(
400    old_dependencies: &[LockedDependency],
401    new_dependencies: &[LockedDependency],
402) -> Result<()> {
403    use std::collections::HashMap;
404
405    // Create a map of dependency name to output path for old and new dependencies
406    let old_paths: HashMap<&str, &str> = old_dependencies
407        .iter()
408        .map(|dep| (dep.name.as_str(), dep.output_path.as_str()))
409        .collect();
410
411    let new_paths: HashMap<&str, &str> = new_dependencies
412        .iter()
413        .map(|dep| (dep.name.as_str(), dep.output_path.as_str()))
414        .collect();
415
416    // Check for dependencies with changed output paths
417    for (dep_name, old_path) in &old_paths {
418        if let Some(new_path) = new_paths.get(dep_name) {
419            // If the dependency still exists but the output path changed
420            if old_path != new_path {
421                let old_file = PathBuf::from(old_path);
422                if old_file.exists() {
423                    match std::fs::remove_file(&old_file) {
424                        Ok(()) => {
425                            println!("🗑️  Removed old output file: {old_path}");
426                        }
427                        Err(e) => {
428                            eprintln!(
429                                "Warning: Failed to remove old output file '{old_path}': {e}"
430                            );
431                        }
432                    }
433
434                    // Also try to remove empty parent directories
435                    if let Some(parent) = old_file.parent() {
436                        let _ = remove_empty_parent_dirs(parent);
437                    }
438                }
439            }
440        } else {
441            // Dependency was removed entirely - clean up its output file
442            let old_file = PathBuf::from(old_path);
443            if old_file.exists() {
444                match std::fs::remove_file(&old_file) {
445                    Ok(()) => {
446                        println!(
447                            "🗑️  Removed output file for removed dependency '{dep_name}': {old_path}"
448                        );
449                    }
450                    Err(e) => {
451                        eprintln!(
452                            "Warning: Failed to remove output file for removed dependency '{dep_name}': {e}"
453                        );
454                    }
455                }
456
457                // Also try to remove empty parent directories
458                if let Some(parent) = old_file.parent() {
459                    let _ = remove_empty_parent_dirs(parent);
460                }
461            }
462        }
463    }
464
465    Ok(())
466}
467
468/// Recursively remove empty parent directories up to the current working directory
469fn remove_empty_parent_dirs(dir: &std::path::Path) -> Result<()> {
470    // Don't try to remove the current working directory or root
471    let cwd = std::env::current_dir().unwrap_or_default();
472    if dir == cwd || dir.parent().is_none() {
473        return Ok(());
474    }
475
476    // Only remove if the directory is empty
477    if let Ok(mut entries) = std::fs::read_dir(dir) {
478        if entries.next().is_none() {
479            // Directory is empty, try to remove it
480            match std::fs::remove_dir(dir) {
481                Ok(()) => {
482                    println!("🗑️  Removed empty directory: {}", dir.display());
483                    // Recursively try to remove parent directories
484                    if let Some(parent) = dir.parent() {
485                        let _ = remove_empty_parent_dirs(parent);
486                    }
487                }
488                Err(_) => {
489                    // Ignore errors when removing directories (might not have permissions, etc.)
490                }
491            }
492        }
493    }
494
495    Ok(())
496}
497
498#[cfg(test)]
499mod tests {
500    use super::*;
501    use std::collections::HashMap;
502    use tokio;
503
504    #[test]
505    fn test_verify_lock_is_still_valid_with_missing_registry() {
506        // Create lockfile with old timestamp to bypass recent optimization
507        let mut lock = LockFile::new(vec![], "test_hash".to_string());
508        lock.generated_at = "1000000000000000000".to_string(); // Very old timestamp
509        lock.locked_dependencies.push(LockedDependency {
510            name: "test_dep".to_string(),
511            registry: "missing_registry".to_string(),
512            resolved_version: "1.0.0".to_string(),
513            download_url: "https://example.com/test".to_string(),
514            sha256: "test_hash".to_string(),
515            output_path: "./protos".to_string(),
516            group_id: "com.example".to_string(),
517            artifact_id: "test".to_string(),
518            version_spec: "^1.0".to_string(),
519            is_transitive: false,
520        });
521
522        let clients = HashMap::new(); // Empty clients map
523
524        let rt = tokio::runtime::Runtime::new().unwrap();
525        let result = rt.block_on(verify_lock_is_still_valid(&lock, &clients));
526
527        assert!(result.is_ok());
528        assert!(
529            !result.unwrap(),
530            "Should return false when registry is missing"
531        );
532    }
533
534    #[test]
535    fn test_cleanup_changed_output_paths() {
536        use std::fs;
537        use tempfile::TempDir;
538
539        // Create a temporary directory for testing
540        let temp_dir = TempDir::new().unwrap();
541        let temp_path = temp_dir.path();
542
543        // Create old and new file paths
544        let old_path = temp_path.join("old").join("types.proto");
545        let new_path = temp_path.join("new").join("types.proto");
546
547        // Create the old file and its parent directory
548        fs::create_dir_all(old_path.parent().unwrap()).unwrap();
549        fs::write(&old_path, "old content").unwrap();
550
551        // Create old and new dependencies with different output paths
552        let old_deps = vec![LockedDependency {
553            name: "test_dep".to_string(),
554            registry: "local".to_string(),
555            resolved_version: "1.0.0".to_string(),
556            download_url: "http://localhost/test".to_string(),
557            sha256: "test_hash".to_string(),
558            output_path: old_path.to_string_lossy().to_string(),
559            group_id: "com.example".to_string(),
560            artifact_id: "test".to_string(),
561            version_spec: "^1.0".to_string(),
562            is_transitive: false,
563        }];
564
565        let new_deps = vec![LockedDependency {
566            name: "test_dep".to_string(),
567            registry: "local".to_string(),
568            resolved_version: "1.0.0".to_string(),
569            download_url: "http://localhost/test".to_string(),
570            sha256: "test_hash".to_string(),
571            output_path: new_path.to_string_lossy().to_string(),
572            group_id: "com.example".to_string(),
573            artifact_id: "test".to_string(),
574            version_spec: "^1.0".to_string(),
575            is_transitive: false,
576        }];
577
578        // Verify old file exists before cleanup
579        assert!(old_path.exists());
580
581        // Run cleanup
582        cleanup_changed_output_paths(&old_deps, &new_deps).unwrap();
583
584        // Verify old file was removed
585        assert!(!old_path.exists());
586
587        // Verify old directory was also removed since it's empty
588        assert!(!old_path.parent().unwrap().exists());
589    }
590
591    #[test]
592    fn test_cleanup_removed_dependency() {
593        use std::fs;
594        use tempfile::TempDir;
595
596        // Create a temporary directory for testing
597        let temp_dir = TempDir::new().unwrap();
598        let temp_path = temp_dir.path();
599
600        // Create old file path
601        let old_path = temp_path.join("removed").join("types.proto");
602
603        // Create the old file and its parent directory
604        fs::create_dir_all(old_path.parent().unwrap()).unwrap();
605        fs::write(&old_path, "old content").unwrap();
606
607        // Create old dependency that will be removed
608        let old_deps = vec![LockedDependency {
609            name: "removed_dep".to_string(),
610            registry: "local".to_string(),
611            resolved_version: "1.0.0".to_string(),
612            download_url: "http://localhost/test".to_string(),
613            sha256: "test_hash".to_string(),
614            output_path: old_path.to_string_lossy().to_string(),
615            group_id: "com.example".to_string(),
616            artifact_id: "test".to_string(),
617            version_spec: "^1.0".to_string(),
618            is_transitive: false,
619        }];
620
621        let new_deps = vec![]; // Empty - dependency removed
622
623        // Verify old file exists before cleanup
624        assert!(old_path.exists());
625
626        // Run cleanup
627        cleanup_changed_output_paths(&old_deps, &new_deps).unwrap();
628
629        // Verify old file was removed
630        assert!(!old_path.exists());
631
632        // Verify old directory was also removed since it's empty
633        assert!(!old_path.parent().unwrap().exists());
634    }
635
636    #[test]
637    fn test_cleanup_unchanged_output_paths() {
638        use std::fs;
639        use tempfile::TempDir;
640
641        // Create a temporary directory for testing
642        let temp_dir = TempDir::new().unwrap();
643        let temp_path = temp_dir.path();
644
645        // Create file path that won't change
646        let file_path = temp_path.join("unchanged").join("types.proto");
647
648        // Create the file and its parent directory
649        fs::create_dir_all(file_path.parent().unwrap()).unwrap();
650        fs::write(&file_path, "content").unwrap();
651
652        // Create dependencies with same output path
653        let deps = vec![LockedDependency {
654            name: "unchanged_dep".to_string(),
655            registry: "local".to_string(),
656            resolved_version: "1.0.0".to_string(),
657            download_url: "http://localhost/test".to_string(),
658            sha256: "test_hash".to_string(),
659            output_path: file_path.to_string_lossy().to_string(),
660            group_id: "com.example".to_string(),
661            artifact_id: "test".to_string(),
662            version_spec: "^1.0".to_string(),
663            is_transitive: false,
664        }];
665
666        // Verify file exists before cleanup
667        assert!(file_path.exists());
668
669        // Run cleanup with same old and new deps
670        cleanup_changed_output_paths(&deps, &deps).unwrap();
671
672        // Verify file still exists (unchanged)
673        assert!(file_path.exists());
674    }
675}