Skip to main content

uv_cache/
lib.rs

1use std::fmt::{Display, Formatter};
2use std::io;
3use std::io::Write;
4use std::ops::Deref;
5use std::path::{Path, PathBuf};
6use std::str::FromStr;
7use std::sync::Arc;
8
9use rustc_hash::FxHashMap;
10use tracing::{debug, trace, warn};
11
12use uv_cache_info::Timestamp;
13use uv_fs::{LockedFile, LockedFileError, LockedFileMode, Simplified, cachedir, directories};
14use uv_normalize::PackageName;
15use uv_pypi_types::ResolutionMetadata;
16
17pub use crate::by_timestamp::CachedByTimestamp;
18#[cfg(feature = "clap")]
19pub use crate::cli::CacheArgs;
20use crate::removal::Remover;
21pub use crate::removal::{Removal, rm_rf};
22pub use crate::wheel::WheelCache;
23use crate::wheel::WheelCacheKind;
24pub use archive::ArchiveId;
25
26mod archive;
27mod by_timestamp;
28#[cfg(feature = "clap")]
29mod cli;
30mod removal;
31mod wheel;
32
33/// The version of the archive bucket.
34///
35/// Must be kept in-sync with the version in [`CacheBucket::to_str`].
36pub const ARCHIVE_VERSION: u8 = 0;
37
38/// Error locking a cache entry or shard
39#[derive(Debug, thiserror::Error)]
40pub enum Error {
41    #[error(transparent)]
42    Io(#[from] io::Error),
43    #[error("Failed to initialize cache at `{}`", _0.user_display())]
44    Init(PathBuf, #[source] io::Error),
45    #[error("Could not make the path absolute")]
46    Absolute(#[source] io::Error),
47    #[error("Could not acquire lock")]
48    Acquire(#[from] LockedFileError),
49}
50
51/// A [`CacheEntry`] which may or may not exist yet.
52#[derive(Debug, Clone)]
53pub struct CacheEntry(PathBuf);
54
55impl CacheEntry {
56    /// Create a new [`CacheEntry`] from a directory and a file name.
57    pub fn new(dir: impl Into<PathBuf>, file: impl AsRef<Path>) -> Self {
58        Self(dir.into().join(file))
59    }
60
61    /// Create a new [`CacheEntry`] from a path.
62    pub fn from_path(path: impl Into<PathBuf>) -> Self {
63        Self(path.into())
64    }
65
66    /// Return the cache entry's parent directory.
67    pub fn shard(&self) -> CacheShard {
68        CacheShard(self.dir().to_path_buf())
69    }
70
71    /// Convert the [`CacheEntry`] into a [`PathBuf`].
72    #[inline]
73    pub fn into_path_buf(self) -> PathBuf {
74        self.0
75    }
76
77    /// Return the path to the [`CacheEntry`].
78    #[inline]
79    pub fn path(&self) -> &Path {
80        &self.0
81    }
82
83    /// Return the cache entry's parent directory.
84    #[inline]
85    pub fn dir(&self) -> &Path {
86        self.0.parent().expect("Cache entry has no parent")
87    }
88
89    /// Create a new [`CacheEntry`] with the given file name.
90    #[must_use]
91    pub fn with_file(&self, file: impl AsRef<Path>) -> Self {
92        Self(self.dir().join(file))
93    }
94
95    /// Acquire the [`CacheEntry`] as an exclusive lock.
96    pub async fn lock(&self) -> Result<LockedFile, Error> {
97        fs_err::create_dir_all(self.dir())?;
98        Ok(LockedFile::acquire(
99            self.path(),
100            LockedFileMode::Exclusive,
101            self.path().display(),
102        )
103        .await?)
104    }
105}
106
107impl AsRef<Path> for CacheEntry {
108    fn as_ref(&self) -> &Path {
109        &self.0
110    }
111}
112
113/// A subdirectory within the cache.
114#[derive(Debug, Clone)]
115pub struct CacheShard(PathBuf);
116
117impl CacheShard {
118    /// Return a [`CacheEntry`] within this shard.
119    pub fn entry(&self, file: impl AsRef<Path>) -> CacheEntry {
120        CacheEntry::new(&self.0, file)
121    }
122
123    /// Return a [`CacheShard`] within this shard.
124    #[must_use]
125    pub fn shard(&self, dir: impl AsRef<Path>) -> Self {
126        Self(self.0.join(dir.as_ref()))
127    }
128
129    /// Acquire the cache entry as an exclusive lock.
130    pub async fn lock(&self) -> Result<LockedFile, Error> {
131        fs_err::create_dir_all(self.as_ref())?;
132        Ok(LockedFile::acquire(
133            self.join(".lock"),
134            LockedFileMode::Exclusive,
135            self.display(),
136        )
137        .await?)
138    }
139
140    /// Return the [`CacheShard`] as a [`PathBuf`].
141    pub fn into_path_buf(self) -> PathBuf {
142        self.0
143    }
144}
145
146impl AsRef<Path> for CacheShard {
147    fn as_ref(&self) -> &Path {
148        &self.0
149    }
150}
151
152impl Deref for CacheShard {
153    type Target = Path;
154
155    fn deref(&self) -> &Self::Target {
156        &self.0
157    }
158}
159
160/// The main cache abstraction.
161///
162/// While the cache is active, it holds a read (shared) lock that prevents cache cleaning
163#[derive(Debug, Clone)]
164pub struct Cache {
165    /// The cache directory.
166    root: PathBuf,
167    /// The refresh strategy to use when reading from the cache.
168    refresh: Refresh,
169    /// A temporary cache directory, if the user requested `--no-cache`.
170    ///
171    /// Included to ensure that the temporary directory exists for the length of the operation, but
172    /// is dropped at the end as appropriate.
173    temp_dir: Option<Arc<tempfile::TempDir>>,
174    /// Ensure that `uv cache` operations don't remove items from the cache that are used by another
175    /// uv process.
176    lock_file: Option<Arc<LockedFile>>,
177}
178
179impl Cache {
180    /// A persistent cache directory at `root`.
181    pub fn from_path(root: impl Into<PathBuf>) -> Self {
182        Self {
183            root: root.into(),
184            refresh: Refresh::None(Timestamp::now()),
185            temp_dir: None,
186            lock_file: None,
187        }
188    }
189
190    /// Create a temporary cache directory.
191    pub fn temp() -> Result<Self, io::Error> {
192        let temp_dir = tempfile::tempdir()?;
193        Ok(Self {
194            root: temp_dir.path().to_path_buf(),
195            refresh: Refresh::None(Timestamp::now()),
196            temp_dir: Some(Arc::new(temp_dir)),
197            lock_file: None,
198        })
199    }
200
201    /// Set the [`Refresh`] policy for the cache.
202    #[must_use]
203    pub fn with_refresh(self, refresh: Refresh) -> Self {
204        Self { refresh, ..self }
205    }
206
207    /// Acquire a lock that allows removing entries from the cache.
208    pub async fn with_exclusive_lock(self) -> Result<Self, LockedFileError> {
209        let Self {
210            root,
211            refresh,
212            temp_dir,
213            lock_file,
214        } = self;
215
216        // Release the existing lock, avoid deadlocks from a cloned cache.
217        if let Some(lock_file) = lock_file {
218            drop(
219                Arc::try_unwrap(lock_file).expect(
220                    "cloning the cache before acquiring an exclusive lock causes a deadlock",
221                ),
222            );
223        }
224        let lock_file = LockedFile::acquire(
225            root.join(".lock"),
226            LockedFileMode::Exclusive,
227            root.simplified_display(),
228        )
229        .await?;
230
231        Ok(Self {
232            root,
233            refresh,
234            temp_dir,
235            lock_file: Some(Arc::new(lock_file)),
236        })
237    }
238
239    /// Acquire a lock that allows removing entries from the cache, if available.
240    ///
241    /// If the lock is not immediately available, returns [`Err`] with self.
242    pub fn with_exclusive_lock_no_wait(self) -> Result<Self, Self> {
243        let Self {
244            root,
245            refresh,
246            temp_dir,
247            lock_file,
248        } = self;
249
250        match LockedFile::acquire_no_wait(
251            root.join(".lock"),
252            LockedFileMode::Exclusive,
253            root.simplified_display(),
254        ) {
255            Some(lock_file) => Ok(Self {
256                root,
257                refresh,
258                temp_dir,
259                lock_file: Some(Arc::new(lock_file)),
260            }),
261            None => Err(Self {
262                root,
263                refresh,
264                temp_dir,
265                lock_file,
266            }),
267        }
268    }
269
270    /// Return the root of the cache.
271    pub fn root(&self) -> &Path {
272        &self.root
273    }
274
275    /// Return the [`Refresh`] policy for the cache.
276    pub fn refresh(&self) -> &Refresh {
277        &self.refresh
278    }
279
280    /// The folder for a specific cache bucket
281    pub fn bucket(&self, cache_bucket: CacheBucket) -> PathBuf {
282        self.root.join(cache_bucket.to_str())
283    }
284
285    /// Compute an entry in the cache.
286    pub fn shard(&self, cache_bucket: CacheBucket, dir: impl AsRef<Path>) -> CacheShard {
287        CacheShard(self.bucket(cache_bucket).join(dir.as_ref()))
288    }
289
290    /// Compute an entry in the cache.
291    pub fn entry(
292        &self,
293        cache_bucket: CacheBucket,
294        dir: impl AsRef<Path>,
295        file: impl AsRef<Path>,
296    ) -> CacheEntry {
297        CacheEntry::new(self.bucket(cache_bucket).join(dir), file)
298    }
299
300    /// Return the path to an archive in the cache.
301    pub fn archive(&self, id: &ArchiveId) -> PathBuf {
302        self.bucket(CacheBucket::Archive).join(id)
303    }
304
305    /// Create a temporary directory to be used as a Python virtual environment.
306    pub fn venv_dir(&self) -> io::Result<tempfile::TempDir> {
307        fs_err::create_dir_all(self.bucket(CacheBucket::Builds))?;
308        tempfile::tempdir_in(self.bucket(CacheBucket::Builds))
309    }
310
311    /// Create a temporary directory to be used for executing PEP 517 source distribution builds.
312    pub fn build_dir(&self) -> io::Result<tempfile::TempDir> {
313        fs_err::create_dir_all(self.bucket(CacheBucket::Builds))?;
314        tempfile::tempdir_in(self.bucket(CacheBucket::Builds))
315    }
316
317    /// Returns `true` if a cache entry must be revalidated given the [`Refresh`] policy.
318    pub fn must_revalidate_package(&self, package: &PackageName) -> bool {
319        match &self.refresh {
320            Refresh::None(_) => false,
321            Refresh::All(_) => true,
322            Refresh::Packages(packages, _, _) => packages.contains(package),
323        }
324    }
325
326    /// Returns `true` if a cache entry must be revalidated given the [`Refresh`] policy.
327    pub fn must_revalidate_path(&self, path: &Path) -> bool {
328        match &self.refresh {
329            Refresh::None(_) => false,
330            Refresh::All(_) => true,
331            Refresh::Packages(_, paths, _) => paths
332                .iter()
333                .any(|target| same_file::is_same_file(path, target).unwrap_or(false)),
334        }
335    }
336
337    /// Returns the [`Freshness`] for a cache entry, validating it against the [`Refresh`] policy.
338    ///
339    /// A cache entry is considered fresh if it was created after the cache itself was
340    /// initialized, or if the [`Refresh`] policy does not require revalidation.
341    pub fn freshness(
342        &self,
343        entry: &CacheEntry,
344        package: Option<&PackageName>,
345        path: Option<&Path>,
346    ) -> io::Result<Freshness> {
347        // Grab the cutoff timestamp, if it's relevant.
348        let timestamp = match &self.refresh {
349            Refresh::None(_) => return Ok(Freshness::Fresh),
350            Refresh::All(timestamp) => timestamp,
351            Refresh::Packages(packages, paths, timestamp) => {
352                if package.is_none_or(|package| packages.contains(package))
353                    || path.is_some_and(|path| {
354                        paths
355                            .iter()
356                            .any(|target| same_file::is_same_file(path, target).unwrap_or(false))
357                    })
358                {
359                    timestamp
360                } else {
361                    return Ok(Freshness::Fresh);
362                }
363            }
364        };
365
366        match fs_err::metadata(entry.path()) {
367            Ok(metadata) => {
368                if Timestamp::from_metadata(&metadata) >= *timestamp {
369                    Ok(Freshness::Fresh)
370                } else {
371                    Ok(Freshness::Stale)
372                }
373            }
374            Err(err) if err.kind() == io::ErrorKind::NotFound => Ok(Freshness::Missing),
375            Err(err) => Err(err),
376        }
377    }
378
379    /// Persist a temporary directory to the artifact store, returning its unique ID.
380    pub async fn persist(
381        &self,
382        temp_dir: impl AsRef<Path>,
383        path: impl AsRef<Path>,
384    ) -> io::Result<ArchiveId> {
385        // Create a unique ID for the artifact.
386        // TODO(charlie): Support content-addressed persistence via SHAs.
387        let id = ArchiveId::new();
388
389        // Move the temporary directory into the directory store.
390        let archive_entry = self.entry(CacheBucket::Archive, "", &id);
391        fs_err::create_dir_all(archive_entry.dir())?;
392        uv_fs::rename_with_retry(temp_dir.as_ref(), archive_entry.path()).await?;
393
394        // Create a symlink to the directory store.
395        fs_err::create_dir_all(path.as_ref().parent().expect("Cache entry to have parent"))?;
396        self.create_link(&id, path.as_ref())?;
397
398        Ok(id)
399    }
400
401    /// Returns `true` if the [`Cache`] is temporary.
402    pub fn is_temporary(&self) -> bool {
403        self.temp_dir.is_some()
404    }
405
406    /// Populate the cache scaffold.
407    fn create_base_files(root: &PathBuf) -> io::Result<()> {
408        // Create the cache directory, if it doesn't exist.
409        fs_err::create_dir_all(root)?;
410
411        // Add the CACHEDIR.TAG.
412        cachedir::ensure_tag(root)?;
413
414        // Add the .gitignore.
415        match fs_err::OpenOptions::new()
416            .write(true)
417            .create_new(true)
418            .open(root.join(".gitignore"))
419        {
420            Ok(mut file) => file.write_all(b"*")?,
421            Err(err) if err.kind() == io::ErrorKind::AlreadyExists => (),
422            Err(err) => return Err(err),
423        }
424
425        // Add an empty .gitignore to the build bucket, to ensure that the cache's own .gitignore
426        // doesn't interfere with source distribution builds. Build backends (like hatchling) will
427        // traverse upwards to look for .gitignore files.
428        fs_err::create_dir_all(root.join(CacheBucket::SourceDistributions.to_str()))?;
429        match fs_err::OpenOptions::new()
430            .write(true)
431            .create_new(true)
432            .open(
433                root.join(CacheBucket::SourceDistributions.to_str())
434                    .join(".gitignore"),
435            ) {
436            Ok(_) => {}
437            Err(err) if err.kind() == io::ErrorKind::AlreadyExists => (),
438            Err(err) => return Err(err),
439        }
440
441        // Add a phony .git, if it doesn't exist, to ensure that the cache isn't considered to be
442        // part of a Git repository. (Some packages will include Git metadata (like a hash) in the
443        // built version if they're in a Git repository, but the cache should be viewed as an
444        // isolated store.).
445        // We have to put this below the gitignore. Otherwise, if the build backend uses the rust
446        // ignore crate it will walk up to the top level .gitignore and ignore its python source
447        // files.
448        let phony_git = root
449            .join(CacheBucket::SourceDistributions.to_str())
450            .join(".git");
451        match fs_err::OpenOptions::new()
452            .create(true)
453            .write(true)
454            .open(&phony_git)
455        {
456            Ok(_) => {}
457            // Handle read-only caches including sandboxed environments.
458            Err(err) if err.kind() == io::ErrorKind::ReadOnlyFilesystem => {
459                if !phony_git.exists() {
460                    return Err(err);
461                }
462            }
463            Err(err) => return Err(err),
464        }
465
466        Ok(())
467    }
468
469    /// Initialize the [`Cache`].
470    pub async fn init(self) -> Result<Self, Error> {
471        let root = &self.root;
472
473        Self::create_base_files(root).map_err(|err| Error::Init(root.clone(), err))?;
474
475        // Block cache removal operations from interfering.
476        let lock_file = match LockedFile::acquire(
477            root.join(".lock"),
478            LockedFileMode::Shared,
479            root.simplified_display(),
480        )
481        .await
482        {
483            Ok(lock_file) => Some(Arc::new(lock_file)),
484            Err(err)
485                if err
486                    .as_io_error()
487                    .is_some_and(|err| err.kind() == io::ErrorKind::Unsupported) =>
488            {
489                warn!(
490                    "Shared locking is not supported by the current platform or filesystem, \
491                        reduced parallel process safety with `uv cache clean` and `uv cache prune`."
492                );
493                None
494            }
495            Err(err) => return Err(err.into()),
496        };
497
498        Ok(Self {
499            root: std::path::absolute(root).map_err(Error::Absolute)?,
500            lock_file,
501            ..self
502        })
503    }
504
505    /// Initialize the [`Cache`], assuming that there are no other uv processes running.
506    pub fn init_no_wait(self) -> Result<Option<Self>, Error> {
507        let root = &self.root;
508
509        Self::create_base_files(root).map_err(|err| Error::Init(root.clone(), err))?;
510
511        // Block cache removal operations from interfering.
512        let Some(lock_file) = LockedFile::acquire_no_wait(
513            root.join(".lock"),
514            LockedFileMode::Shared,
515            root.simplified_display(),
516        ) else {
517            return Ok(None);
518        };
519        Ok(Some(Self {
520            root: std::path::absolute(root).map_err(Error::Absolute)?,
521            lock_file: Some(Arc::new(lock_file)),
522            ..self
523        }))
524    }
525
526    /// Clear the cache, removing all entries.
527    pub fn clear(self, reporter: Box<dyn CleanReporter>) -> Result<Removal, io::Error> {
528        // Remove everything but `.lock`, Windows does not allow removal of a locked file
529        let mut removal = Remover::new(reporter).rm_rf(&self.root, true)?;
530        let Self {
531            root, lock_file, ..
532        } = self;
533
534        // Remove the `.lock` file, unlocking it first
535        if let Some(lock) = lock_file {
536            drop(lock);
537            fs_err::remove_file(root.join(".lock"))?;
538        }
539        removal.num_files += 1;
540
541        // Remove the root directory
542        match fs_err::remove_dir(root) {
543            Ok(()) => {
544                removal.num_dirs += 1;
545            }
546            // On Windows, when `--force` is used, the `.lock` file can exist and be unremovable,
547            // so we make this non-fatal
548            Err(err) if err.kind() == io::ErrorKind::DirectoryNotEmpty => {
549                trace!("Failed to remove root cache directory: not empty");
550            }
551            Err(err) => return Err(err),
552        }
553
554        Ok(removal)
555    }
556
557    /// Remove a package from the cache.
558    ///
559    /// Returns the number of entries removed from the cache.
560    pub fn remove(&self, name: &PackageName) -> io::Result<Removal> {
561        // Collect the set of referenced archives.
562        let references = self.find_archive_references()?;
563
564        // Remove any entries for the package from the cache.
565        let mut summary = Removal::default();
566        for bucket in CacheBucket::iter() {
567            summary += bucket.remove(self, name)?;
568        }
569
570        // Remove any archives that are no longer referenced.
571        for (target, references) in references {
572            if references.iter().all(|path| !path.exists()) {
573                debug!("Removing dangling cache entry: {}", target.display());
574                summary += rm_rf(target)?;
575            }
576        }
577
578        Ok(summary)
579    }
580
581    /// Run the garbage collector on the cache, removing any dangling entries.
582    pub fn prune(&self, ci: bool) -> Result<Removal, io::Error> {
583        let mut summary = Removal::default();
584
585        // First, remove any top-level directories that are unused. These typically represent
586        // outdated cache buckets (e.g., `wheels-v0`, when latest is `wheels-v1`).
587        for entry in fs_err::read_dir(&self.root)? {
588            let entry = entry?;
589            let metadata = entry.metadata()?;
590
591            if entry.file_name() == "CACHEDIR.TAG"
592                || entry.file_name() == ".gitignore"
593                || entry.file_name() == ".git"
594                || entry.file_name() == ".lock"
595            {
596                continue;
597            }
598
599            if metadata.is_dir() {
600                // If the directory is not a cache bucket, remove it.
601                if CacheBucket::iter().all(|bucket| entry.file_name() != bucket.to_str()) {
602                    let path = entry.path();
603                    debug!("Removing dangling cache bucket: {}", path.display());
604                    summary += rm_rf(path)?;
605                }
606            } else {
607                // If the file is not a marker file, remove it.
608                let path = entry.path();
609                debug!("Removing dangling cache bucket: {}", path.display());
610                summary += rm_rf(path)?;
611            }
612        }
613
614        // Second, remove any cached environments. These are never referenced by symlinks, so we can
615        // remove them directly.
616        match fs_err::read_dir(self.bucket(CacheBucket::Environments)) {
617            Ok(entries) => {
618                for entry in entries {
619                    let entry = entry?;
620                    let path = fs_err::canonicalize(entry.path())?;
621                    debug!("Removing dangling cache environment: {}", path.display());
622                    summary += rm_rf(path)?;
623                }
624            }
625            Err(err) if err.kind() == io::ErrorKind::NotFound => (),
626            Err(err) => return Err(err),
627        }
628
629        // Third, if enabled, remove all unzipped wheels, leaving only the wheel archives.
630        if ci {
631            // Remove the entire pre-built wheel cache, since every entry is an unzipped wheel.
632            match fs_err::read_dir(self.bucket(CacheBucket::Wheels)) {
633                Ok(entries) => {
634                    for entry in entries {
635                        let entry = entry?;
636                        let path = fs_err::canonicalize(entry.path())?;
637                        if path.is_dir() {
638                            debug!("Removing unzipped wheel entry: {}", path.display());
639                            summary += rm_rf(path)?;
640                        }
641                    }
642                }
643                Err(err) if err.kind() == io::ErrorKind::NotFound => (),
644                Err(err) => return Err(err),
645            }
646
647            for entry in walkdir::WalkDir::new(self.bucket(CacheBucket::SourceDistributions)) {
648                let entry = entry?;
649
650                // If the directory contains a `metadata.msgpack`, then it's a built wheel revision.
651                if !entry.file_type().is_dir() {
652                    continue;
653                }
654
655                if !entry.path().join("metadata.msgpack").exists() {
656                    continue;
657                }
658
659                // Remove everything except the built wheel archive and the metadata.
660                for entry in fs_err::read_dir(entry.path())? {
661                    let entry = entry?;
662                    let path = entry.path();
663
664                    // Retain the resolved metadata (`metadata.msgpack`).
665                    if path
666                        .file_name()
667                        .is_some_and(|file_name| file_name == "metadata.msgpack")
668                    {
669                        continue;
670                    }
671
672                    // Retain any built wheel archives.
673                    if path
674                        .extension()
675                        .is_some_and(|ext| ext.eq_ignore_ascii_case("whl"))
676                    {
677                        continue;
678                    }
679
680                    debug!("Removing unzipped built wheel entry: {}", path.display());
681                    summary += rm_rf(path)?;
682                }
683            }
684        }
685
686        // Fourth, remove any unused archives (by searching for archives that are not symlinked).
687        let references = self.find_archive_references()?;
688
689        match fs_err::read_dir(self.bucket(CacheBucket::Archive)) {
690            Ok(entries) => {
691                for entry in entries {
692                    let entry = entry?;
693                    let path = fs_err::canonicalize(entry.path())?;
694                    if !references.contains_key(&path) {
695                        debug!("Removing dangling cache archive: {}", path.display());
696                        summary += rm_rf(path)?;
697                    }
698                }
699            }
700            Err(err) if err.kind() == io::ErrorKind::NotFound => (),
701            Err(err) => return Err(err),
702        }
703
704        Ok(summary)
705    }
706
707    /// Find all references to entries in the archive bucket.
708    ///
709    /// Archive entries are often referenced by symlinks in other cache buckets. This method
710    /// searches for all such references.
711    ///
712    /// Returns a map from archive path to paths that reference it.
713    fn find_archive_references(&self) -> Result<FxHashMap<PathBuf, Vec<PathBuf>>, io::Error> {
714        let mut references = FxHashMap::<PathBuf, Vec<PathBuf>>::default();
715        for bucket in [CacheBucket::SourceDistributions, CacheBucket::Wheels] {
716            let bucket_path = self.bucket(bucket);
717            if bucket_path.is_dir() {
718                let walker = walkdir::WalkDir::new(&bucket_path).into_iter();
719                for entry in walker.filter_entry(|entry| {
720                    !(
721                        // As an optimization, ignore any `.lock`, `.whl`, `.msgpack`, `.rev`, or
722                        // `.http` files, along with the `src` directory, which represents the
723                        // unpacked source distribution.
724                        entry.file_name() == "src"
725                            || entry.file_name() == ".lock"
726                            || entry.file_name() == ".gitignore"
727                            || entry.path().extension().is_some_and(|ext| {
728                                ext.eq_ignore_ascii_case("lock")
729                                    || ext.eq_ignore_ascii_case("whl")
730                                    || ext.eq_ignore_ascii_case("http")
731                                    || ext.eq_ignore_ascii_case("rev")
732                                    || ext.eq_ignore_ascii_case("msgpack")
733                            })
734                    )
735                }) {
736                    let entry = entry?;
737
738                    // On Unix, archive references use symlinks.
739                    if cfg!(unix) {
740                        if !entry.file_type().is_symlink() {
741                            continue;
742                        }
743                    }
744
745                    // On Windows, archive references are files containing structured data.
746                    if cfg!(windows) {
747                        if !entry.file_type().is_file() {
748                            continue;
749                        }
750                    }
751
752                    if let Ok(target) = self.resolve_link(entry.path()) {
753                        references
754                            .entry(target)
755                            .or_default()
756                            .push(entry.path().to_path_buf());
757                    }
758                }
759            }
760        }
761        Ok(references)
762    }
763
764    /// Create a link to a directory in the archive bucket.
765    ///
766    /// On Windows, we write structured data ([`Link`]) to a file containing the archive ID and
767    /// version. On Unix, we create a symlink to the target directory.
768    #[cfg(windows)]
769    pub fn create_link(&self, id: &ArchiveId, dst: impl AsRef<Path>) -> io::Result<()> {
770        // Serialize the link.
771        let link = Link::new(id.clone());
772        let contents = link.to_string();
773
774        // First, attempt to create a file at the location, but fail if it already exists.
775        match fs_err::OpenOptions::new()
776            .write(true)
777            .create_new(true)
778            .open(dst.as_ref())
779        {
780            Ok(mut file) => {
781                // Write the target path to the file.
782                file.write_all(contents.as_bytes())?;
783                Ok(())
784            }
785            Err(err) if err.kind() == io::ErrorKind::AlreadyExists => {
786                // Write to a temporary file, then move it into place.
787                let temp_dir = tempfile::tempdir_in(dst.as_ref().parent().unwrap())?;
788                let temp_file = temp_dir.path().join("link");
789                fs_err::write(&temp_file, contents.as_bytes())?;
790
791                // Move the symlink into the target location.
792                fs_err::rename(&temp_file, dst.as_ref())?;
793
794                Ok(())
795            }
796            Err(err) => Err(err),
797        }
798    }
799
800    /// Resolve an archive link, returning the fully-resolved path.
801    ///
802    /// Returns an error if the link target does not exist.
803    #[cfg(windows)]
804    pub fn resolve_link(&self, path: impl AsRef<Path>) -> io::Result<PathBuf> {
805        // Deserialize the link.
806        let contents = fs_err::read_to_string(path.as_ref())?;
807        let link = Link::from_str(&contents)?;
808
809        // Ignore stale links.
810        if link.version != ARCHIVE_VERSION {
811            return Err(io::Error::new(
812                io::ErrorKind::NotFound,
813                "The link target does not exist.",
814            ));
815        }
816
817        // Reconstruct the path.
818        let path = self.archive(&link.id);
819        path.canonicalize()
820    }
821
822    /// Create a link to a directory in the archive bucket.
823    ///
824    /// On Windows, we write structured data ([`Link`]) to a file containing the archive ID and
825    /// version. On Unix, we create a symlink to the target directory.
826    #[cfg(unix)]
827    pub fn create_link(&self, id: &ArchiveId, dst: impl AsRef<Path>) -> io::Result<()> {
828        // Construct the link target.
829        let src = self.archive(id);
830        let dst = dst.as_ref();
831
832        // Attempt to create the symlink directly.
833        match fs_err::os::unix::fs::symlink(&src, dst) {
834            Ok(()) => Ok(()),
835            Err(err) if err.kind() == io::ErrorKind::AlreadyExists => {
836                // Create a symlink, using a temporary file to ensure atomicity.
837                let temp_dir = tempfile::tempdir_in(dst.parent().unwrap())?;
838                let temp_file = temp_dir.path().join("link");
839                fs_err::os::unix::fs::symlink(&src, &temp_file)?;
840
841                // Move the symlink into the target location.
842                fs_err::rename(&temp_file, dst)?;
843
844                Ok(())
845            }
846            Err(err) => Err(err),
847        }
848    }
849
850    /// Resolve an archive link, returning the fully-resolved path.
851    ///
852    /// Returns an error if the link target does not exist.
853    #[cfg(unix)]
854    pub fn resolve_link(&self, path: impl AsRef<Path>) -> io::Result<PathBuf> {
855        path.as_ref().canonicalize()
856    }
857}
858
859/// An archive (unzipped wheel) that exists in the local cache.
860#[derive(Debug, Clone)]
861#[allow(unused)]
862struct Link {
863    /// The unique ID of the entry in the archive bucket.
864    id: ArchiveId,
865    /// The version of the archive bucket.
866    version: u8,
867}
868
869#[allow(unused)]
870impl Link {
871    /// Create a new [`Archive`] with the given ID and hashes.
872    fn new(id: ArchiveId) -> Self {
873        Self {
874            id,
875            version: ARCHIVE_VERSION,
876        }
877    }
878}
879
880impl Display for Link {
881    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
882        write!(f, "archive-v{}/{}", self.version, self.id)
883    }
884}
885
886impl FromStr for Link {
887    type Err = io::Error;
888
889    fn from_str(s: &str) -> Result<Self, Self::Err> {
890        let mut parts = s.splitn(2, '/');
891        let version = parts
892            .next()
893            .filter(|s| !s.is_empty())
894            .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "missing version"))?;
895        let id = parts
896            .next()
897            .filter(|s| !s.is_empty())
898            .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "missing ID"))?;
899
900        // Parse the archive version from `archive-v{version}/{id}`.
901        let version = version
902            .strip_prefix("archive-v")
903            .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "missing version prefix"))?;
904        let version = u8::from_str(version).map_err(|err| {
905            io::Error::new(
906                io::ErrorKind::InvalidData,
907                format!("failed to parse version: {err}"),
908            )
909        })?;
910
911        // Parse the ID from `archive-v{version}/{id}`.
912        let id = ArchiveId::from_str(id).map_err(|err| {
913            io::Error::new(
914                io::ErrorKind::InvalidData,
915                format!("failed to parse ID: {err}"),
916            )
917        })?;
918
919        Ok(Self { id, version })
920    }
921}
922
923pub trait CleanReporter: Send + Sync {
924    /// Called after one file or directory is removed.
925    fn on_clean(&self);
926
927    /// Called after all files and directories are removed.
928    fn on_complete(&self);
929}
930
931/// The different kinds of data in the cache are stored in different bucket, which in our case
932/// are subdirectories of the cache root.
933#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
934pub enum CacheBucket {
935    /// Wheels (excluding built wheels), alongside their metadata and cache policy.
936    ///
937    /// There are three kinds from cache entries: Wheel metadata and policy as `MsgPack` files, the
938    /// wheels themselves, and the unzipped wheel archives. If a wheel file is over an in-memory
939    /// size threshold, we first download the zip file into the cache, then unzip it into a
940    /// directory with the same name (exclusive of the `.whl` extension).
941    ///
942    /// Cache structure:
943    ///  * `wheel-metadata-v0/pypi/foo/{foo-1.0.0-py3-none-any.msgpack, foo-1.0.0-py3-none-any.whl}`
944    ///  * `wheel-metadata-v0/<digest(index-url)>/foo/{foo-1.0.0-py3-none-any.msgpack, foo-1.0.0-py3-none-any.whl}`
945    ///  * `wheel-metadata-v0/url/<digest(url)>/foo/{foo-1.0.0-py3-none-any.msgpack, foo-1.0.0-py3-none-any.whl}`
946    ///
947    /// See `uv_client::RegistryClient::wheel_metadata` for information on how wheel metadata
948    /// is fetched.
949    ///
950    /// # Example
951    ///
952    /// Consider the following `requirements.in`:
953    /// ```text
954    /// # pypi wheel
955    /// pandas
956    /// # url wheel
957    /// flask @ https://files.pythonhosted.org/packages/36/42/015c23096649b908c809c69388a805a571a3bea44362fe87e33fc3afa01f/flask-3.0.0-py3-none-any.whl
958    /// ```
959    ///
960    /// When we run `pip compile`, it will only fetch and cache the metadata (and cache policy), it
961    /// doesn't need the actual wheels yet:
962    /// ```text
963    /// wheel-v0
964    /// ├── pypi
965    /// │   ...
966    /// │   ├── pandas
967    /// │   │   └── pandas-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.msgpack
968    /// │   ...
969    /// └── url
970    ///     └── 4b8be67c801a7ecb
971    ///         └── flask
972    ///             └── flask-3.0.0-py3-none-any.msgpack
973    /// ```
974    ///
975    /// We get the following `requirement.txt` from `pip compile`:
976    ///
977    /// ```text
978    /// [...]
979    /// flask @ https://files.pythonhosted.org/packages/36/42/015c23096649b908c809c69388a805a571a3bea44362fe87e33fc3afa01f/flask-3.0.0-py3-none-any.whl
980    /// [...]
981    /// pandas==2.1.3
982    /// [...]
983    /// ```
984    ///
985    /// If we run `pip sync` on `requirements.txt` on a different machine, it also fetches the
986    /// wheels:
987    ///
988    /// TODO(konstin): This is still wrong, we need to store the cache policy too!
989    /// ```text
990    /// wheel-v0
991    /// ├── pypi
992    /// │   ...
993    /// │   ├── pandas
994    /// │   │   ├── pandas-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
995    /// │   │   ├── pandas-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64
996    /// │   ...
997    /// └── url
998    ///     └── 4b8be67c801a7ecb
999    ///         └── flask
1000    ///             └── flask-3.0.0-py3-none-any.whl
1001    ///                 ├── flask
1002    ///                 │   └── ...
1003    ///                 └── flask-3.0.0.dist-info
1004    ///                     └── ...
1005    /// ```
1006    ///
1007    /// If we run first `pip compile` and then `pip sync` on the same machine, we get both:
1008    ///
1009    /// ```text
1010    /// wheels-v0
1011    /// ├── pypi
1012    /// │   ├── ...
1013    /// │   ├── pandas
1014    /// │   │   ├── pandas-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.msgpack
1015    /// │   │   ├── pandas-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
1016    /// │   │   └── pandas-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64
1017    /// │   │       ├── pandas
1018    /// │   │       │   ├── ...
1019    /// │   │       ├── pandas-2.1.3.dist-info
1020    /// │   │       │   ├── ...
1021    /// │   │       └── pandas.libs
1022    /// │   ├── ...
1023    /// └── url
1024    ///     └── 4b8be67c801a7ecb
1025    ///         └── flask
1026    ///             ├── flask-3.0.0-py3-none-any.msgpack
1027    ///             ├── flask-3.0.0-py3-none-any.msgpack
1028    ///             └── flask-3.0.0-py3-none-any
1029    ///                 ├── flask
1030    ///                 │   └── ...
1031    ///                 └── flask-3.0.0.dist-info
1032    ///                     └── ...
1033    Wheels,
1034    /// Source distributions, wheels built from source distributions, their extracted metadata, and the
1035    /// cache policy of the source distribution.
1036    ///
1037    /// The structure is similar of that of the `Wheel` bucket, except we have an additional layer
1038    /// for the source distribution filename and the metadata is at the source distribution-level,
1039    /// not at the wheel level.
1040    ///
1041    /// TODO(konstin): The cache policy should be on the source distribution level, the metadata we
1042    /// can put next to the wheels as in the `Wheels` bucket.
1043    ///
1044    /// The unzipped source distribution is stored in a directory matching the source distribution
1045    /// archive name.
1046    ///
1047    /// Source distributions are built into zipped wheel files (as PEP 517 specifies) and unzipped
1048    /// lazily before installing. So when resolving, we only build the wheel and store the archive
1049    /// file in the cache, when installing, we unpack it under the same name (exclusive of the
1050    /// `.whl` extension). You may find a mix of wheel archive zip files and unzipped wheel
1051    /// directories in the cache.
1052    ///
1053    /// Cache structure:
1054    ///  * `built-wheels-v0/pypi/foo/34a17436ed1e9669/{manifest.msgpack, metadata.msgpack, foo-1.0.0.zip, foo-1.0.0-py3-none-any.whl, ...other wheels}`
1055    ///  * `built-wheels-v0/<digest(index-url)>/foo/foo-1.0.0.zip/{manifest.msgpack, metadata.msgpack, foo-1.0.0-py3-none-any.whl, ...other wheels}`
1056    ///  * `built-wheels-v0/url/<digest(url)>/foo/foo-1.0.0.zip/{manifest.msgpack, metadata.msgpack, foo-1.0.0-py3-none-any.whl, ...other wheels}`
1057    ///  * `built-wheels-v0/git/<digest(url)>/<git sha>/foo/foo-1.0.0.zip/{metadata.msgpack, foo-1.0.0-py3-none-any.whl, ...other wheels}`
1058    ///
1059    /// But the url filename does not need to be a valid source dist filename
1060    /// (<https://github.com/search?q=path%3A**%2Frequirements.txt+master.zip&type=code>),
1061    /// so it could also be the following and we have to take any string as filename:
1062    ///  * `built-wheels-v0/url/<sha256(url)>/master.zip/metadata.msgpack`
1063    ///
1064    /// # Example
1065    ///
1066    /// The following requirements:
1067    /// ```text
1068    /// # git source dist
1069    /// pydantic-extra-types @ git+https://github.com/pydantic/pydantic-extra-types.git
1070    /// # pypi source dist
1071    /// django_allauth==0.51.0
1072    /// # url source dist
1073    /// werkzeug @ https://files.pythonhosted.org/packages/0d/cc/ff1904eb5eb4b455e442834dabf9427331ac0fa02853bf83db817a7dd53d/werkzeug-3.0.1.tar.gz
1074    /// ```
1075    ///
1076    /// ...may be cached as:
1077    /// ```text
1078    /// built-wheels-v4/
1079    /// ├── git
1080    /// │   └── 2122faf3e081fb7a
1081    /// │       └── 7a2d650a4a7b4d04
1082    /// │           ├── metadata.msgpack
1083    /// │           └── pydantic_extra_types-2.9.0-py3-none-any.whl
1084    /// ├── pypi
1085    /// │   └── django-allauth
1086    /// │       └── 0.51.0
1087    /// │           ├── 0gH-_fwv8tdJ7JwwjJsUc
1088    /// │           │   ├── django-allauth-0.51.0.tar.gz
1089    /// │           │   │   └── [UNZIPPED CONTENTS]
1090    /// │           │   ├── django_allauth-0.51.0-py3-none-any.whl
1091    /// │           │   └── metadata.msgpack
1092    /// │           └── revision.http
1093    /// └── url
1094    ///     └── 6781bd6440ae72c2
1095    ///         ├── APYY01rbIfpAo_ij9sCY6
1096    ///         │   ├── metadata.msgpack
1097    ///         │   ├── werkzeug-3.0.1-py3-none-any.whl
1098    ///         │   └── werkzeug-3.0.1.tar.gz
1099    ///         │       └── [UNZIPPED CONTENTS]
1100    ///         └── revision.http
1101    /// ```
1102    ///
1103    /// Structurally, the `manifest.msgpack` is empty, and only contains the caching information
1104    /// needed to invalidate the cache. The `metadata.msgpack` contains the metadata of the source
1105    /// distribution.
1106    SourceDistributions,
1107    /// Flat index responses, a format very similar to the simple metadata API.
1108    ///
1109    /// Cache structure:
1110    ///  * `flat-index-v0/index/<digest(flat_index_url)>.msgpack`
1111    ///
1112    /// The response is stored as `Vec<File>`.
1113    FlatIndex,
1114    /// Git repositories.
1115    Git,
1116    /// Information about an interpreter at a path.
1117    ///
1118    /// To avoid caching pyenv shims, bash scripts which may redirect to a new python version
1119    /// without the shim itself changing, we only cache when the path equals `sys.executable`, i.e.
1120    /// the path we're running is the python executable itself and not a shim.
1121    ///
1122    /// Cache structure: `interpreter-v0/<digest(path)>.msgpack`
1123    ///
1124    /// # Example
1125    ///
1126    /// The contents of each of the `MsgPack` files has a timestamp field in unix time, the [PEP 508]
1127    /// markers and some information from the `sys`/`sysconfig` modules.
1128    ///
1129    /// ```json
1130    /// {
1131    ///   "timestamp": 1698047994491,
1132    ///   "data": {
1133    ///     "markers": {
1134    ///       "implementation_name": "cpython",
1135    ///       "implementation_version": "3.12.0",
1136    ///       "os_name": "posix",
1137    ///       "platform_machine": "x86_64",
1138    ///       "platform_python_implementation": "CPython",
1139    ///       "platform_release": "6.5.0-13-generic",
1140    ///       "platform_system": "Linux",
1141    ///       "platform_version": "#13-Ubuntu SMP PREEMPT_DYNAMIC Fri Nov  3 12:16:05 UTC 2023",
1142    ///       "python_full_version": "3.12.0",
1143    ///       "python_version": "3.12",
1144    ///       "sys_platform": "linux"
1145    ///     },
1146    ///     "base_exec_prefix": "/home/ferris/.pyenv/versions/3.12.0",
1147    ///     "base_prefix": "/home/ferris/.pyenv/versions/3.12.0",
1148    ///     "sys_executable": "/home/ferris/projects/uv/.venv/bin/python"
1149    ///   }
1150    /// }
1151    /// ```
1152    ///
1153    /// [PEP 508]: https://peps.python.org/pep-0508/#environment-markers
1154    Interpreter,
1155    /// Index responses through the simple metadata API.
1156    ///
1157    /// Cache structure:
1158    ///  * `simple-v0/pypi/<package_name>.rkyv`
1159    ///  * `simple-v0/<digest(index_url)>/<package_name>.rkyv`
1160    ///
1161    /// The response is parsed into `uv_client::SimpleDetailMetadata` before storage.
1162    Simple,
1163    /// A cache of unzipped wheels, stored as directories. This is used internally within the cache.
1164    /// When other buckets need to store directories, they should persist them to
1165    /// [`CacheBucket::Archive`], and then symlink them into the appropriate bucket. This ensures
1166    /// that cache entries can be atomically replaced and removed, as storing directories in the
1167    /// other buckets directly would make atomic operations impossible.
1168    Archive,
1169    /// Ephemeral virtual environments used to execute PEP 517 builds and other operations.
1170    Builds,
1171    /// Reusable virtual environments used to invoke Python tools.
1172    Environments,
1173    /// Cached Python downloads
1174    Python,
1175    /// Downloaded tool binaries (e.g., Ruff).
1176    Binaries,
1177}
1178
1179impl CacheBucket {
1180    fn to_str(self) -> &'static str {
1181        match self {
1182            // Note that when bumping this, you'll also need to bump it
1183            // in `crates/uv/tests/it/cache_prune.rs`.
1184            Self::SourceDistributions => "sdists-v9",
1185            Self::FlatIndex => "flat-index-v2",
1186            Self::Git => "git-v0",
1187            Self::Interpreter => "interpreter-v4",
1188            // Note that when bumping this, you'll also need to bump it
1189            // in `crates/uv/tests/it/cache_clean.rs`.
1190            Self::Simple => "simple-v20",
1191            // Note that when bumping this, you'll also need to bump it
1192            // in `crates/uv/tests/it/cache_prune.rs`.
1193            Self::Wheels => "wheels-v6",
1194            // Note that when bumping this, you'll also need to bump
1195            // `ARCHIVE_VERSION` in `crates/uv-cache/src/lib.rs`.
1196            Self::Archive => "archive-v0",
1197            Self::Builds => "builds-v0",
1198            Self::Environments => "environments-v2",
1199            Self::Python => "python-v0",
1200            Self::Binaries => "binaries-v0",
1201        }
1202    }
1203
1204    /// Remove a package from the cache bucket.
1205    ///
1206    /// Returns the number of entries removed from the cache.
1207    fn remove(self, cache: &Cache, name: &PackageName) -> Result<Removal, io::Error> {
1208        /// Returns `true` if the [`Path`] represents a built wheel for the given package.
1209        fn is_match(path: &Path, name: &PackageName) -> bool {
1210            let Ok(metadata) = fs_err::read(path.join("metadata.msgpack")) else {
1211                return false;
1212            };
1213            let Ok(metadata) = rmp_serde::from_slice::<ResolutionMetadata>(&metadata) else {
1214                return false;
1215            };
1216            metadata.name == *name
1217        }
1218
1219        let mut summary = Removal::default();
1220        match self {
1221            Self::Wheels => {
1222                // For `pypi` wheels, we expect a directory per package (indexed by name).
1223                let root = cache.bucket(self).join(WheelCacheKind::Pypi);
1224                summary += rm_rf(root.join(name.to_string()))?;
1225
1226                // For alternate indices, we expect a directory for every index (under an `index`
1227                // subdirectory), followed by a directory per package (indexed by name).
1228                let root = cache.bucket(self).join(WheelCacheKind::Index);
1229                for directory in directories(root)? {
1230                    summary += rm_rf(directory.join(name.to_string()))?;
1231                }
1232
1233                // For direct URLs, we expect a directory for every URL, followed by a
1234                // directory per package (indexed by name).
1235                let root = cache.bucket(self).join(WheelCacheKind::Url);
1236                for directory in directories(root)? {
1237                    summary += rm_rf(directory.join(name.to_string()))?;
1238                }
1239            }
1240            Self::SourceDistributions => {
1241                // For `pypi` wheels, we expect a directory per package (indexed by name).
1242                let root = cache.bucket(self).join(WheelCacheKind::Pypi);
1243                summary += rm_rf(root.join(name.to_string()))?;
1244
1245                // For alternate indices, we expect a directory for every index (under an `index`
1246                // subdirectory), followed by a directory per package (indexed by name).
1247                let root = cache.bucket(self).join(WheelCacheKind::Index);
1248                for directory in directories(root)? {
1249                    summary += rm_rf(directory.join(name.to_string()))?;
1250                }
1251
1252                // For direct URLs, we expect a directory for every URL, followed by a
1253                // directory per version. To determine whether the URL is relevant, we need to
1254                // search for a wheel matching the package name.
1255                let root = cache.bucket(self).join(WheelCacheKind::Url);
1256                for url in directories(root)? {
1257                    if directories(&url)?.any(|version| is_match(&version, name)) {
1258                        summary += rm_rf(url)?;
1259                    }
1260                }
1261
1262                // For local dependencies, we expect a directory for every path, followed by a
1263                // directory per version. To determine whether the path is relevant, we need to
1264                // search for a wheel matching the package name.
1265                let root = cache.bucket(self).join(WheelCacheKind::Path);
1266                for path in directories(root)? {
1267                    if directories(&path)?.any(|version| is_match(&version, name)) {
1268                        summary += rm_rf(path)?;
1269                    }
1270                }
1271
1272                // For Git dependencies, we expect a directory for every repository, followed by a
1273                // directory for every SHA. To determine whether the SHA is relevant, we need to
1274                // search for a wheel matching the package name.
1275                let root = cache.bucket(self).join(WheelCacheKind::Git);
1276                for repository in directories(root)? {
1277                    for sha in directories(repository)? {
1278                        if is_match(&sha, name) {
1279                            summary += rm_rf(sha)?;
1280                        }
1281                    }
1282                }
1283            }
1284            Self::Simple => {
1285                // For `pypi` wheels, we expect a rkyv file per package, indexed by name.
1286                let root = cache.bucket(self).join(WheelCacheKind::Pypi);
1287                summary += rm_rf(root.join(format!("{name}.rkyv")))?;
1288
1289                // For alternate indices, we expect a directory for every index (under an `index`
1290                // subdirectory), followed by a directory per package (indexed by name).
1291                let root = cache.bucket(self).join(WheelCacheKind::Index);
1292                for directory in directories(root)? {
1293                    summary += rm_rf(directory.join(format!("{name}.rkyv")))?;
1294                }
1295            }
1296            Self::FlatIndex => {
1297                // We can't know if the flat index includes a package, so we just remove the entire
1298                // cache entry.
1299                let root = cache.bucket(self);
1300                summary += rm_rf(root)?;
1301            }
1302            Self::Git
1303            | Self::Interpreter
1304            | Self::Archive
1305            | Self::Builds
1306            | Self::Environments
1307            | Self::Python
1308            | Self::Binaries => {
1309                // Nothing to do.
1310            }
1311        }
1312        Ok(summary)
1313    }
1314
1315    /// Return an iterator over all cache buckets.
1316    pub fn iter() -> impl Iterator<Item = Self> {
1317        [
1318            Self::Wheels,
1319            Self::SourceDistributions,
1320            Self::FlatIndex,
1321            Self::Git,
1322            Self::Interpreter,
1323            Self::Simple,
1324            Self::Archive,
1325            Self::Builds,
1326            Self::Environments,
1327            Self::Binaries,
1328        ]
1329        .iter()
1330        .copied()
1331    }
1332}
1333
1334impl Display for CacheBucket {
1335    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
1336        f.write_str(self.to_str())
1337    }
1338}
1339
1340#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1341pub enum Freshness {
1342    /// The cache entry is fresh according to the [`Refresh`] policy.
1343    Fresh,
1344    /// The cache entry is stale according to the [`Refresh`] policy.
1345    Stale,
1346    /// The cache entry does not exist.
1347    Missing,
1348}
1349
1350impl Freshness {
1351    pub const fn is_fresh(self) -> bool {
1352        matches!(self, Self::Fresh)
1353    }
1354
1355    pub const fn is_stale(self) -> bool {
1356        matches!(self, Self::Stale)
1357    }
1358}
1359
1360/// A refresh policy for cache entries.
1361#[derive(Debug, Clone)]
1362pub enum Refresh {
1363    /// Don't refresh any entries.
1364    None(Timestamp),
1365    /// Refresh entries linked to the given packages, if created before the given timestamp.
1366    Packages(Vec<PackageName>, Vec<Box<Path>>, Timestamp),
1367    /// Refresh all entries created before the given timestamp.
1368    All(Timestamp),
1369}
1370
1371impl Refresh {
1372    /// Determine the refresh strategy to use based on the command-line arguments.
1373    pub fn from_args(refresh: Option<bool>, refresh_package: Vec<PackageName>) -> Self {
1374        let timestamp = Timestamp::now();
1375        match refresh {
1376            Some(true) => Self::All(timestamp),
1377            Some(false) => Self::None(timestamp),
1378            None => {
1379                if refresh_package.is_empty() {
1380                    Self::None(timestamp)
1381                } else {
1382                    Self::Packages(refresh_package, vec![], timestamp)
1383                }
1384            }
1385        }
1386    }
1387
1388    /// Return the [`Timestamp`] associated with the refresh policy.
1389    pub fn timestamp(&self) -> Timestamp {
1390        match self {
1391            Self::None(timestamp) => *timestamp,
1392            Self::Packages(.., timestamp) => *timestamp,
1393            Self::All(timestamp) => *timestamp,
1394        }
1395    }
1396
1397    /// Returns `true` if no packages should be reinstalled.
1398    pub fn is_none(&self) -> bool {
1399        matches!(self, Self::None(_))
1400    }
1401
1402    /// Combine two [`Refresh`] policies, taking the "max" of the two policies.
1403    #[must_use]
1404    pub fn combine(self, other: Self) -> Self {
1405        match (self, other) {
1406            // If the policy is `None`, return the existing refresh policy.
1407            // Take the `max` of the two timestamps.
1408            (Self::None(t1), Self::None(t2)) => Self::None(t1.max(t2)),
1409            (Self::None(t1), Self::All(t2)) => Self::All(t1.max(t2)),
1410            (Self::None(t1), Self::Packages(packages, paths, t2)) => {
1411                Self::Packages(packages, paths, t1.max(t2))
1412            }
1413
1414            // If the policy is `All`, refresh all packages.
1415            (Self::All(t1), Self::None(t2) | Self::All(t2) | Self::Packages(.., t2)) => {
1416                Self::All(t1.max(t2))
1417            }
1418
1419            // If the policy is `Packages`, take the "max" of the two policies.
1420            (Self::Packages(packages, paths, t1), Self::None(t2)) => {
1421                Self::Packages(packages, paths, t1.max(t2))
1422            }
1423            (Self::Packages(.., t1), Self::All(t2)) => Self::All(t1.max(t2)),
1424            (Self::Packages(packages1, paths1, t1), Self::Packages(packages2, paths2, t2)) => {
1425                Self::Packages(
1426                    packages1.into_iter().chain(packages2).collect(),
1427                    paths1.into_iter().chain(paths2).collect(),
1428                    t1.max(t2),
1429                )
1430            }
1431        }
1432    }
1433}
1434
1435#[cfg(test)]
1436mod tests {
1437    use std::str::FromStr;
1438
1439    use crate::ArchiveId;
1440
1441    use super::Link;
1442
1443    #[test]
1444    fn test_link_round_trip() {
1445        let id = ArchiveId::new();
1446        let link = Link::new(id);
1447        let s = link.to_string();
1448        let parsed = Link::from_str(&s).unwrap();
1449        assert_eq!(link.id, parsed.id);
1450        assert_eq!(link.version, parsed.version);
1451    }
1452
1453    #[test]
1454    fn test_link_deserialize() {
1455        assert!(Link::from_str("archive-v0/foo").is_ok());
1456        assert!(Link::from_str("archive/foo").is_err());
1457        assert!(Link::from_str("v1/foo").is_err());
1458        assert!(Link::from_str("archive-v0/").is_err());
1459    }
1460}