ferrous_actions/
cache_cargo_home.rs

1use crate::action_paths::get_action_cache_dir;
2use crate::actions::cache::Entry as CacheEntry;
3use crate::actions::core;
4use crate::agnostic_path::AgnosticPath;
5use crate::delta::{render_list as render_delta_list, Action as DeltaAction};
6use crate::dir_tree::match_relative_paths;
7use crate::fingerprinting::{fingerprint_path_with_ignores, Fingerprint, Ignores};
8use crate::hasher::Blake3 as Blake3Hasher;
9use crate::input_manager::{self, Input};
10use crate::job::Job;
11use crate::node::os::homedir;
12use crate::node::path::Path;
13use crate::{actions, error, info, node, notice, safe_encoding, warning, Error};
14use chrono::{DateTime, Utc};
15use lazy_static::lazy_static;
16use rustup_toolchain_manifest::HashValue;
17use serde::{Deserialize, Serialize};
18use simple_path_match::{PathMatch, PathMatchBuilder};
19use std::borrow::Cow;
20use std::collections::{BTreeMap, HashMap, HashSet};
21use std::hash::Hash as _;
22use std::str::FromStr;
23use strum::{Display, EnumIter, EnumString, IntoEnumIterator, IntoStaticStr};
24
25const ATIMES_SUPPORTED_KEY: &str = "ACCESS_TIMES_SUPPORTED";
26const DEFAULT_CROSS_OS_SHARING: CrossPlatformSharing = CrossPlatformSharing::None;
27const SCOPE_HASH_KEY: &str = "SCOPE_HASH";
28
29lazy_static! {
30    static ref CARGO_HOME: String = {
31        node::process::get_env()
32            .get("CARGO_HOME")
33            .map(String::as_str)
34            .map_or_else(|| homedir().join(".cargo"), Path::from)
35            .to_string()
36    };
37}
38
39#[derive(Clone, Copy, Debug, EnumString)]
40enum CrossPlatformSharing {
41    #[strum(serialize = "none")]
42    None,
43
44    #[strum(serialize = "unix-like")]
45    UnixLike,
46
47    #[strum(serialize = "all")]
48    All,
49}
50
51impl CrossPlatformSharing {
52    pub fn current_platform(self) -> Cow<'static, str> {
53        match self {
54            CrossPlatformSharing::All => "any".into(),
55            CrossPlatformSharing::None => node::os::platform().into(),
56            CrossPlatformSharing::UnixLike => {
57                let platform = node::os::platform();
58                match platform.as_str() {
59                    "aix" | "darwin" | "freebsd" | "linux" | "openbsd" | "sunos" => "unix-like".into(),
60                    _ => platform.into(),
61                }
62            }
63        }
64    }
65}
66
67#[derive(Clone, Debug, Serialize, Deserialize)]
68struct Group {
69    restore_key: Option<String>,
70    entries: BTreeMap<AgnosticPath, Fingerprint>,
71}
72
73impl Group {
74    pub fn is_empty(&self) -> bool {
75        self.entries.is_empty()
76    }
77
78    pub fn last_modified(&self) -> Option<DateTime<Utc>> {
79        let mut result = None;
80        for fingerprint in self.entries.values() {
81            result = match (result, fingerprint.modified()) {
82                (None, modified) | (modified, None) => modified,
83                (Some(a), Some(b)) => Some(std::cmp::max(a, b)),
84            };
85        }
86        result
87    }
88}
89
90#[derive(Clone, Debug, Serialize, Deserialize)]
91struct Cache {
92    cache_type: CacheType,
93    root: BTreeMap<AgnosticPath, Group>,
94    root_path: String,
95}
96
97#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
98struct GroupIdentifier {
99    path: AgnosticPath,
100    num_entries: usize,
101    entries_hash: HashValue,
102}
103
104impl Cache {
105    pub async fn new(cache_type: CacheType) -> Result<Cache, Error> {
106        let sources = HashMap::new();
107        Self::new_with_sources(cache_type, sources).await
108    }
109
110    async fn new_with_sources(
111        cache_type: CacheType,
112        mut sources: HashMap<AgnosticPath, String>,
113    ) -> Result<Cache, Error> {
114        // Delete derived content at any paths we want to build the cache at
115        for delete_path in find_additional_delete_paths(cache_type).await? {
116            if delete_path.exists().await {
117                info!("Pruning redundant cache element: {}", delete_path);
118                actions::io::rm_rf(&delete_path).await?;
119            }
120        }
121        let grouping_depth = cache_type.grouping_depth();
122        let entry_depth = cache_type.entry_depth();
123        assert!(
124            grouping_depth <= entry_depth,
125            "Cannot group at a higher depth than individual cache entries"
126        );
127        let top_depth_glob = depth_to_match(grouping_depth)?;
128        let folder_path = find_path(cache_type);
129        let top_depth_paths = match_relative_paths(&folder_path, &top_depth_glob, true).await?;
130        let entry_depth_relative = entry_depth - grouping_depth;
131        let mut map = BTreeMap::new();
132        for group in top_depth_paths {
133            let group_path = folder_path.join(&group);
134            map.insert(
135                AgnosticPath::from(&group),
136                Group {
137                    restore_key: sources.remove(&AgnosticPath::from(&group)),
138                    entries: Self::build_group(cache_type, &group_path, entry_depth_relative).await?,
139                },
140            );
141        }
142        if !sources.is_empty() {
143            error!("One or more restored cache keys did not map to a path: {:#?}", sources);
144        }
145        Ok(Cache {
146            cache_type,
147            root: map,
148            root_path: folder_path.to_string(),
149        })
150    }
151
152    fn build_group_identifier(&self, group_path: &AgnosticPath) -> GroupIdentifier {
153        let group = &self
154            .root
155            .get(group_path)
156            .unwrap_or_else(|| panic!("Unknown group: {}", group_path));
157        let mut hasher = Blake3Hasher::default();
158        group.entries.len().hash(&mut hasher);
159        group.entries.keys().for_each(|k| k.hash(&mut hasher));
160        GroupIdentifier {
161            path: group_path.clone(),
162            num_entries: group.entries.len(),
163            entries_hash: hasher.hash_value(),
164        }
165    }
166
167    pub async fn restore_from_env(
168        cache_type: CacheType,
169        scope: &HashValue,
170        cross_platform_sharing: CrossPlatformSharing,
171    ) -> Result<Cache, Error> {
172        use crate::access_times::revert_folder;
173        use itertools::Itertools as _;
174
175        let job = Job::from_env()?;
176
177        // Delete existing cache
178        let folder_path = find_path(cache_type);
179        if folder_path.exists().await {
180            warning!(
181                concat!(
182                    "Cache action will delete existing contents of {} and derived information. ",
183                    "To avoid this warning, place this action earlier or delete this before running the action."
184                ),
185                folder_path
186            );
187            actions::io::rm_rf(&folder_path).await?;
188        }
189
190        let entry = build_cache_entry_dependencies(cache_type, scope, &job)?;
191        let restore_key = entry.restore().await.map_err(Error::Js)?;
192        let mut restore_keys = HashMap::new();
193        if let Some(restore_key) = restore_key {
194            info!(
195                "Located dependencies list for {} in cache using key {}.",
196                cache_type.friendly_name(),
197                restore_key
198            );
199            let dep_file_path = dependency_file_path(cache_type, scope, &job)?;
200            let groups: Vec<GroupIdentifier> = {
201                let file_contents = node::fs::read_file(&dep_file_path).await?;
202                postcard::from_bytes(&file_contents)?
203            };
204            let group_list_string = groups.iter().map(|g| &g.path).join(", ");
205            info!(
206                "The following groups will be restored for cache type {}: {}",
207                cache_type.friendly_name(),
208                group_list_string
209            );
210            for group in &groups {
211                let entry = Self::group_identifier_to_cache_entry(cache_type, group, cross_platform_sharing);
212                if let Some(name) = entry.restore().await? {
213                    info!("Restored cache key: {}", name);
214                    restore_keys.insert(group.path.clone(), name);
215                } else {
216                    info!(
217                        "Failed to find {} cache entry for {}",
218                        cache_type.friendly_name(),
219                        group.path
220                    );
221                }
222            }
223        } else {
224            info!("No existing dependency list for {} found.", cache_type.friendly_name());
225        }
226        // Ensure we at least have an empty folder
227        node::fs::create_dir_all(&folder_path).await?;
228        // Revert access times
229        revert_folder(&folder_path).await?;
230        Self::new_with_sources(cache_type, restore_keys).await
231    }
232
233    pub async fn save_changes(
234        &self,
235        old: &Cache,
236        scope_hash: &HashValue,
237        min_recache_interval: &chrono::Duration,
238        cross_platform_sharing: CrossPlatformSharing,
239    ) -> Result<(), Error> {
240        let job = Job::from_env()?;
241        let dep_file_path = dependency_file_path(self.cache_type, scope_hash, &job)?;
242        let old_groups = if dep_file_path.exists().await {
243            let file_contents = node::fs::read_file(&dep_file_path).await?;
244            postcard::from_bytes(&file_contents)?
245        } else {
246            Vec::new()
247        };
248        let new_groups = self.group_identifiers();
249        let group_list_delta = Self::compare_group_lists(&old_groups, &new_groups);
250        if group_list_delta.is_empty() {
251            info!("{} dependency list is unchanged.", self.cache_type.friendly_name());
252        } else {
253            info!("{} dependency list changed:", self.cache_type.friendly_name());
254            info!("{}", render_delta_list(&group_list_delta));
255            let serialized_groups = postcard::to_stdvec(&new_groups)?;
256            {
257                let parent = dep_file_path.parent();
258                node::fs::create_dir_all(&parent).await?;
259            }
260            node::fs::write_file(&dep_file_path, &serialized_groups).await?;
261            let dependencies_entry = build_cache_entry_dependencies(self.cache_type, scope_hash, &job)?;
262            dependencies_entry.save().await?;
263            info!("{} dependency list was successfully saved.", self.cache_type);
264        }
265
266        for (path, group) in &self.root {
267            let (attempt_save, old_restore_key) = if let Some(old_group) = old.root.get(path) {
268                let group_delta = Self::compare_groups(&old_group.entries, &group.entries);
269                let attempt_save = if group_delta.is_empty() {
270                    // The group's content is unchanged
271                    false
272                } else {
273                    // The modification time is dubious because we cannot track when file deletions
274                    // occur and modifications times could be preserved from some sort of archive.
275                    // It should work fine for changes to Git repos however, which are our main
276                    // concern.
277                    let old_modification = old_group.last_modified().unwrap_or_default();
278                    // Be robust against our delta being negative.
279                    let modification_delta = chrono::Utc::now() - old_modification;
280                    let modification_delta = std::cmp::max(chrono::Duration::zero(), modification_delta);
281
282                    let interval_is_sufficient = modification_delta > *min_recache_interval;
283                    if interval_is_sufficient {
284                        info!("Cached {} group {} has changed:", self.cache_type.friendly_name(), path);
285                        info!("{}", render_delta_list(&group_delta));
286                        true
287                    } else {
288                        use humantime::format_duration;
289                        info!(
290                            "Cached {} group {} outdated by {}, but not updating cache since minimum recache interval is {}.",
291                            self.cache_type,
292                            path,
293                            format_duration(modification_delta.to_std()?),
294                            format_duration(min_recache_interval.to_std()?),
295                        );
296                        false
297                    }
298                };
299                (attempt_save, old_group.restore_key.as_deref())
300            } else {
301                // The group did not previously exist in the cache
302                (true, None)
303            };
304
305            if attempt_save {
306                let identifier = self.build_group_identifier(path);
307                let entry = Self::group_identifier_to_cache_entry(self.cache_type, &identifier, cross_platform_sharing);
308                info!(
309                    "Saving modified {} cache group {}",
310                    self.cache_type.friendly_name(),
311                    path
312                );
313                if entry.save_if_update(old_restore_key).await?.is_some() {
314                    info!(
315                        "{} cache group {} saved successfully.",
316                        self.cache_type.friendly_name(),
317                        path
318                    );
319                } else {
320                    info!(
321                        concat!(
322                            "It looks like the changed {} cache group {} already exists. ",
323                            "Not saving our version this time around because we can't be certain it's a useful update. "
324                        ),
325                        self.cache_type.friendly_name(),
326                        path
327                    );
328                }
329            }
330        }
331        Ok(())
332    }
333
334    async fn build_entry(cache_type: CacheType, entry_path: &Path) -> Result<Fingerprint, Error> {
335        let ignores = cache_type.ignores();
336        fingerprint_path_with_ignores(entry_path, &ignores).await
337    }
338
339    async fn build_group(
340        cache_type: CacheType,
341        group_path: &Path,
342        entry_level: usize,
343    ) -> Result<BTreeMap<AgnosticPath, Fingerprint>, Error> {
344        let entry_level_glob = depth_to_match(entry_level)?;
345        let entry_level_paths = match_relative_paths(group_path, &entry_level_glob, true).await?;
346        let mut map = BTreeMap::new();
347        for path in entry_level_paths {
348            let entry_path = group_path.join(&path);
349            map.insert(
350                AgnosticPath::from(&path),
351                Self::build_entry(cache_type, &entry_path).await?,
352            );
353        }
354        Ok(map)
355    }
356
357    fn group_identifiers(&self) -> Vec<GroupIdentifier> {
358        self.root
359            .keys()
360            .map(|group_path| self.build_group_identifier(group_path))
361            .collect()
362    }
363
364    fn compare_group_lists<'a>(
365        from: &'a [GroupIdentifier],
366        to: &'a [GroupIdentifier],
367    ) -> Vec<(&'a AgnosticPath, DeltaAction)> {
368        use itertools::{EitherOrBoth, Itertools as _};
369        let from_iter = from.iter();
370        let to_iter = to.iter();
371        let merged = from_iter.merge_join_by(to_iter, |left, right| left.path.cmp(&right.path));
372        merged
373            .filter_map(|element| match element {
374                EitherOrBoth::Left(left) => Some((&left.path, DeltaAction::Removed)),
375                EitherOrBoth::Right(right) => Some((&right.path, DeltaAction::Added)),
376                EitherOrBoth::Both(left, right) => (left != right).then_some((&right.path, DeltaAction::Changed)),
377            })
378            .collect()
379    }
380
381    fn compare_groups<'a>(
382        from: &'a BTreeMap<AgnosticPath, Fingerprint>,
383        to: &'a BTreeMap<AgnosticPath, Fingerprint>,
384    ) -> Vec<(&'a AgnosticPath, DeltaAction)> {
385        use itertools::{EitherOrBoth, Itertools as _};
386        let from_iter = from.iter();
387        let to_iter = to.iter();
388        let merged = from_iter.merge_join_by(to_iter, |left, right| left.0.cmp(right.0));
389        merged
390            .filter_map(|element| match element {
391                EitherOrBoth::Left(left) => Some((left.0, DeltaAction::Removed)),
392                EitherOrBoth::Right(right) => Some((right.0, DeltaAction::Added)),
393                EitherOrBoth::Both(left, right) => {
394                    (left.1.content_hash() != right.1.content_hash()).then_some((right.0, DeltaAction::Changed))
395                }
396            })
397            .collect()
398    }
399
400    fn group_identifier_to_cache_entry(
401        cache_type: CacheType,
402        group_id: &GroupIdentifier,
403        cross_platform_sharing: CrossPlatformSharing,
404    ) -> CacheEntry {
405        use crate::cache_key_builder::{Attribute, CacheKeyBuilder};
406
407        let name = format!("{} (content)", cache_type.friendly_name());
408        let mut builder = CacheKeyBuilder::new(&name);
409        builder.add_key_data(group_id);
410        builder.set_attribute(Attribute::Path, group_id.path.to_string());
411        builder.set_attribute(Attribute::NumEntries, group_id.num_entries.to_string());
412        let entries_hash = {
413            let lsb: &[u8] = group_id.entries_hash.as_ref();
414            let lsb = &lsb[..std::cmp::min(8, lsb.len())];
415            safe_encoding::encode(lsb)
416        };
417        builder.set_attribute(Attribute::EntriesHash, entries_hash);
418
419        let sharing_platform = cross_platform_sharing.current_platform();
420        let origin_platform = node::os::platform();
421        if sharing_platform != origin_platform {
422            builder.set_attribute(Attribute::OriginPlatform, origin_platform);
423        }
424        builder.set_key_attribute(Attribute::Platform, sharing_platform.to_string());
425
426        let mut entry = builder.into_entry();
427        let root_path = find_path(cache_type);
428        let path = root_path.join(&group_id.path);
429        entry.path(path);
430        entry
431    }
432
433    async fn prune_unused_entries(
434        left: &BTreeMap<AgnosticPath, Fingerprint>,
435        right: &mut BTreeMap<AgnosticPath, Fingerprint>,
436        right_path: &Path,
437    ) -> Result<(), Error> {
438        use itertools::{EitherOrBoth, Itertools as _};
439        let from_iter = left.iter();
440        let to_iter = right.iter();
441        let merged = from_iter.merge_join_by(to_iter, |left, right| left.0.cmp(right.0));
442        let to_prune: Vec<&AgnosticPath> = merged
443            .filter_map(|element| match element {
444                EitherOrBoth::Left(_) | EitherOrBoth::Right(_) => None,
445                EitherOrBoth::Both(left, right) => (left.1.accessed() == right.1.accessed()).then_some(left.0),
446            })
447            .collect();
448
449        for element_path in to_prune {
450            let path = right_path.join(element_path);
451            info!("Pruning unused cache element at {}", path);
452            actions::io::rm_rf(&path).await?;
453            right.remove(element_path);
454        }
455        Ok(())
456    }
457
458    pub async fn prune_unused(&mut self, old: &Cache) -> Result<(), Error> {
459        use itertools::{EitherOrBoth, Itertools as _};
460        let root_path = Path::from(&self.root_path);
461        let from_iter = old.root.iter();
462        let to_iter = self.root.iter_mut();
463        let merged = from_iter.merge_join_by(to_iter, |left, right| left.0.cmp(right.0));
464        for element in merged {
465            match element {
466                EitherOrBoth::Left(_) | EitherOrBoth::Right(_) => {}
467                EitherOrBoth::Both(left, right) => {
468                    let entry_path = root_path.join(right.0);
469                    Self::prune_unused_entries(&left.1.entries, &mut right.1.entries, &entry_path).await?;
470                }
471            }
472        }
473        self.root.retain(|k, v| {
474            let keep = !v.is_empty();
475            if !keep {
476                info!("Removing empty cache group: {}", k);
477            }
478            keep
479        });
480        Ok(())
481    }
482
483    pub fn get_root_path(&self) -> Path {
484        Path::from(&self.root_path)
485    }
486}
487
488fn find_cargo_home() -> Path {
489    Path::from(CARGO_HOME.as_str())
490}
491
492fn find_path(cache_type: CacheType) -> Path {
493    find_cargo_home().join(cache_type.relative_path())
494}
495
496fn depth_to_match(depth: usize) -> Result<PathMatch, Error> {
497    use itertools::Itertools as _;
498
499    let pattern = if depth == 0 {
500        ".".into()
501    } else {
502        std::iter::repeat("*").take(depth).join("/")
503    };
504    Ok(PathMatch::from_pattern(&pattern, &node::path::separator())?)
505}
506
507async fn find_additional_delete_paths(cache_type: CacheType) -> Result<Vec<Path>, Error> {
508    let mut path_match_builder = PathMatchBuilder::new(&node::path::separator());
509    cache_type.add_additional_delete_paths(&mut path_match_builder)?;
510    let path_matcher = path_match_builder.build()?;
511    let home_path = find_cargo_home();
512    let result = if home_path.exists().await {
513        match_relative_paths(&home_path, &path_matcher, false).await?
514    } else {
515        Vec::new()
516    };
517    Ok(result)
518}
519
520fn cached_folder_info_path(cache_type: CacheType) -> Result<Path, Error> {
521    let file_name = format!("{}.postcard", cache_type.short_name());
522    Ok(get_action_cache_dir()?.join("cached-folder-info").join(&file_name))
523}
524
525fn dependency_files_dir() -> Result<Path, Error> {
526    Ok(get_action_cache_dir()?.join("dependency-data"))
527}
528
529#[derive(
530    Debug, Clone, Copy, EnumIter, EnumString, Eq, Hash, PartialEq, IntoStaticStr, Display, Serialize, Deserialize,
531)]
532enum CacheType {
533    #[strum(serialize = "indices")]
534    Indices,
535
536    #[strum(serialize = "crates")]
537    Crates,
538
539    #[strum(serialize = "git-repos")]
540    GitRepos,
541}
542
543impl CacheType {
544    fn short_name(&self) -> Cow<str> {
545        let name: &str = self.into();
546        name.into()
547    }
548
549    fn friendly_name(&self) -> Cow<str> {
550        match *self {
551            CacheType::Indices => "registry indices",
552            CacheType::Crates => "crate files",
553            CacheType::GitRepos => "Git repositories",
554        }
555        .into()
556    }
557
558    fn relative_path(self) -> Path {
559        match self {
560            CacheType::Indices => Path::from("registry").join("index"),
561            CacheType::Crates => Path::from("registry").join("cache"),
562            CacheType::GitRepos => Path::from("git").join("db"),
563        }
564    }
565
566    fn add_additional_delete_paths(self, match_builder: &mut PathMatchBuilder) -> Result<(), Error> {
567        // These are paths we should delete at the same time as restoring the cache and
568        // also before saving. This is primarily because we want to see what in
569        // the cache is accessed, and leaving derived information about can
570        // cause cached items to never have their content read, leading to items
571        // repeatedly being evicted then restored.
572        match self {
573            CacheType::Indices => {
574                match_builder.add_pattern("registry/index/*/.cache")?;
575            }
576            CacheType::Crates => {
577                match_builder.add_pattern("registry/src")?;
578            }
579            CacheType::GitRepos => {
580                match_builder.add_pattern("git/checkouts")?;
581            }
582        }
583        Ok(())
584    }
585
586    fn ignores(self) -> Ignores {
587        // Depths are relative to the entry
588        let mut ignores = Ignores::default();
589        match self {
590            CacheType::Indices => {
591                ignores.add(1, ".last-updated");
592            }
593            CacheType::Crates | CacheType::GitRepos => {}
594        }
595        ignores
596    }
597
598    #[allow(clippy::unused_self)]
599    fn grouping_depth(self) -> usize {
600        1
601    }
602
603    fn entry_depth(self) -> usize {
604        match self {
605            CacheType::Indices | CacheType::GitRepos => 1,
606            CacheType::Crates => {
607                // This means we can prune individual crate files within an index
608                2
609            }
610        }
611    }
612
613    fn default_min_recache_interval(self) -> chrono::Duration {
614        match self {
615            CacheType::Indices => chrono::Duration::days(2),
616            _ => chrono::Duration::zero(),
617        }
618    }
619
620    fn min_recache_input(self) -> input_manager::Input {
621        match self {
622            CacheType::Indices => input_manager::Input::MinRecacheIndices,
623            CacheType::GitRepos => input_manager::Input::MinRecacheGitRepos,
624            CacheType::Crates => input_manager::Input::MinRecacheCrates,
625        }
626    }
627}
628
629fn get_cross_platform_sharing(input_manager: &input_manager::Manager) -> Result<CrossPlatformSharing, Error> {
630    Ok(if let Some(value) = input_manager.get(Input::CrossPlatformSharing) {
631        CrossPlatformSharing::from_str(value).map_err(|_| Error::ParseCrossPlatformSharing(value.to_string()))?
632    } else {
633        DEFAULT_CROSS_OS_SHARING
634    })
635}
636
637fn get_types_to_cache(input_manager: &input_manager::Manager) -> Result<Vec<CacheType>, Error> {
638    let mut result = HashSet::new();
639    if let Some(types) = input_manager.get(Input::CacheOnly) {
640        let types = types.split_whitespace();
641        for cache_type in types {
642            let cache_type =
643                CacheType::from_str(cache_type).map_err(|_| Error::ParseCacheableItem(cache_type.to_string()))?;
644            result.insert(cache_type);
645        }
646    } else {
647        result.extend(CacheType::iter());
648    }
649    Ok(result.into_iter().collect())
650}
651
652fn get_min_recache_interval(
653    input_manager: &input_manager::Manager,
654    cache_type: CacheType,
655) -> Result<chrono::Duration, Error> {
656    let result = if let Some(duration) = input_manager.get(cache_type.min_recache_input()) {
657        let duration = humantime::parse_duration(duration)?;
658        chrono::Duration::from_std(duration)?
659    } else {
660        cache_type.default_min_recache_interval()
661    };
662    Ok(result)
663}
664
665#[derive(Debug, Serialize, Deserialize)]
666struct CachedFolderInfo {
667    path: String,
668    fingerprint: Fingerprint,
669}
670
671fn dependency_file_path(cache_type: CacheType, scope: &HashValue, job: &Job) -> Result<Path, Error> {
672    let dependency_dir = dependency_files_dir()?;
673    let mut hasher = Blake3Hasher::default();
674    scope.hash(&mut hasher);
675    cache_type.hash(&mut hasher);
676    job.hash(&mut hasher);
677    let file_name = format!("{}.postcard", hasher.hash_value());
678    Ok(dependency_dir.join(&file_name))
679}
680
681fn build_cache_entry_dependencies(cache_type: CacheType, scope: &HashValue, job: &Job) -> Result<CacheEntry, Error> {
682    use crate::cache_key_builder::{Attribute, CacheKeyBuilder};
683    let name = format!("{} (dependencies)", cache_type.friendly_name());
684    let mut key_builder = CacheKeyBuilder::new(&name);
685    key_builder.add_key_data(scope);
686    key_builder.set_key_attribute(Attribute::Workflow, job.get_workflow().to_string());
687    key_builder.set_key_attribute(Attribute::Job, job.get_job_id().to_string());
688    if let Some(properties) = job.matrix_properties_as_string() {
689        key_builder.set_key_attribute(Attribute::Matrix, properties);
690    }
691    let mut cache_entry = key_builder.into_entry();
692    let path = dependency_file_path(cache_type, scope, job)?;
693    cache_entry.path(path);
694    Ok(cache_entry)
695}
696
697pub async fn restore_cargo_cache(input_manager: &input_manager::Manager) -> Result<(), Error> {
698    use crate::access_times::supports_atime;
699    use crate::cargo_lock_hashing::hash_cargo_lock_files;
700
701    info!("Checking to see if filesystem supports access times...");
702    let atimes_supported = supports_atime().await?;
703    if atimes_supported {
704        info!(concat!(
705            "File access times supported. Hooray! ",
706            "These will be used to intelligently decide what can be dropped from within cached cargo home items."
707        ));
708    } else {
709        notice!(concat!("File access times not supported - cannot perform intelligent cache pruning. ",
710            "Likely this platform is Windows. ",
711            "The hash of all Cargo.lock files found in this folder will be used as part of the key for cached cargo home entries. ",
712            "This means certain caches will be rebuilt from scratch whenever a Cargo.lock file changes. ",
713            "This is to avoid cache entries growing monotonically. ",
714            "Note that enabling file access times on Windows is generally a bad idea since Microsoft never implemented relatime semantics.")
715        );
716    }
717    core::save_state(ATIMES_SUPPORTED_KEY, serde_json::to_string(&atimes_supported)?);
718
719    let scope_hash = if atimes_supported {
720        // We can't use the empty array because it will encode to an empty string, which
721        // doesn't play well with `save_state`.
722        HashValue::from_bytes(&[42u8])
723    } else {
724        let cwd = node::process::cwd();
725        let lock_hash = hash_cargo_lock_files(&cwd).await?;
726        HashValue::from_bytes(&lock_hash.bytes)
727    };
728    core::save_state(SCOPE_HASH_KEY, safe_encoding::encode(&scope_hash));
729
730    let cross_platform_sharing = get_cross_platform_sharing(input_manager)?;
731    let cached_types = get_types_to_cache(input_manager)?;
732    for cache_type in cached_types {
733        core::start_group(cache_type.friendly_name().to_string());
734        // Mark as used to avoid spurious warnings (we only use this when we save the
735        // entries)
736        let _ = get_min_recache_interval(input_manager, cache_type)?;
737
738        // Build the cache
739        let cache = Cache::restore_from_env(cache_type, &scope_hash, cross_platform_sharing).await?;
740        let serialized_cache = postcard::to_stdvec(&cache)?;
741        let cached_info_path = cached_folder_info_path(cache_type)?;
742        {
743            let parent = cached_info_path.parent();
744            node::fs::create_dir_all(&parent).await?;
745        }
746        node::fs::write_file(&cached_info_path, &serialized_cache).await?;
747        core::end_group();
748    }
749    Ok(())
750}
751
752pub async fn save_cargo_cache(input_manager: &input_manager::Manager) -> Result<(), Error> {
753    let scope_hash = core::get_state(SCOPE_HASH_KEY).expect("Failed to find scope ID hash");
754    let scope_hash = safe_encoding::decode(&scope_hash).expect("Failed to decode scope ID hash");
755    let scope_hash = HashValue::from_bytes(&scope_hash);
756
757    let atimes_supported = core::get_state(ATIMES_SUPPORTED_KEY).expect("Failed to find access times support flag");
758    let atimes_supported: bool = serde_json::de::from_str(&atimes_supported)?;
759
760    let cross_platform_sharing = get_cross_platform_sharing(input_manager)?;
761    let cached_types = get_types_to_cache(input_manager)?;
762    for cache_type in cached_types {
763        core::start_group(cache_type.friendly_name().to_string());
764        // Delete items that should never make it into the cache
765        for delete_path in find_additional_delete_paths(cache_type).await? {
766            if delete_path.exists().await {
767                info!("Pruning redundant cache element: {}", delete_path);
768                actions::io::rm_rf(&delete_path).await?;
769            }
770        }
771
772        // Restore the old cache
773        let cache_old: Cache = {
774            let cached_info_path = cached_folder_info_path(cache_type)?;
775            let cache_serialized = node::fs::read_file(&cached_info_path).await?;
776            postcard::from_bytes(&cache_serialized)?
777        };
778
779        // Construct the new cache
780        let mut cache = Cache::new(cache_type).await?;
781
782        // Check the path to the cached items hasn't changed
783        if cache.get_root_path() != cache_old.get_root_path() {
784            use wasm_bindgen::JsError;
785            let error = JsError::new(&format!(
786                "Path to cache changed from {} to {}. Perhaps CARGO_HOME changed?",
787                cache_old.get_root_path(),
788                cache.get_root_path()
789            ));
790            return Err(Error::Js(error.into()));
791        }
792
793        // Prune unused items (if we have access time suppport)
794        if atimes_supported {
795            cache.prune_unused(&cache_old).await?;
796        }
797
798        // Save groups to cache if they have changed
799        let min_recache_interval = get_min_recache_interval(input_manager, cache_type)?;
800        cache
801            .save_changes(&cache_old, &scope_hash, &min_recache_interval, cross_platform_sharing)
802            .await?;
803        core::end_group();
804    }
805    Ok(())
806}