Skip to main content

ant_node/upgrade/
release_cache.rs

1//! Disk cache for GitHub release metadata.
2//!
3//! When multiple ant-node instances run on the same machine, each would
4//! otherwise poll the GitHub API independently.  `ReleaseCache` stores the
5//! most recent API response on disk with a configurable TTL so that only the
6//! first node to hit a stale cache actually contacts GitHub.
7
8use crate::error::{Error, Result};
9use crate::upgrade::monitor::{Asset, GitHubRelease};
10use fs2::FileExt;
11use serde::{Deserialize, Serialize};
12use std::fs::{self, File};
13use std::io::Write;
14use std::path::PathBuf;
15use std::time::{Duration, SystemTime, UNIX_EPOCH};
16use tracing::debug;
17
18/// On-disk cache for GitHub release metadata.
19#[derive(Clone)]
20pub struct ReleaseCache {
21    /// Directory that holds the cache file and its lock.
22    cache_dir: PathBuf,
23    /// How long a cached response is considered fresh.
24    ttl: Duration,
25}
26
27/// Serialized container written to disk.
28#[derive(Serialize, Deserialize)]
29struct CachedReleases {
30    /// The GitHub repo these releases belong to (e.g. "owner/repo").
31    repo: String,
32    /// When the releases were fetched (seconds since UNIX epoch).
33    fetched_at_epoch_secs: u64,
34    /// The cached release objects.
35    releases: Vec<CachedRelease>,
36}
37
38/// Serialized mirror of [`GitHubRelease`].
39#[derive(Serialize, Deserialize)]
40struct CachedRelease {
41    tag_name: String,
42    name: String,
43    body: String,
44    prerelease: bool,
45    assets: Vec<CachedAsset>,
46}
47
48/// Serialized mirror of [`Asset`].
49#[derive(Serialize, Deserialize)]
50struct CachedAsset {
51    name: String,
52    browser_download_url: String,
53}
54
55// ---------------------------------------------------------------------------
56// Conversions
57// ---------------------------------------------------------------------------
58
59impl From<&GitHubRelease> for CachedRelease {
60    fn from(r: &GitHubRelease) -> Self {
61        Self {
62            tag_name: r.tag_name.clone(),
63            name: r.name.clone(),
64            body: r.body.clone(),
65            prerelease: r.prerelease,
66            assets: r.assets.iter().map(CachedAsset::from).collect(),
67        }
68    }
69}
70
71impl From<CachedRelease> for GitHubRelease {
72    fn from(c: CachedRelease) -> Self {
73        Self {
74            tag_name: c.tag_name,
75            name: c.name,
76            body: c.body,
77            prerelease: c.prerelease,
78            assets: c.assets.into_iter().map(Asset::from).collect(),
79        }
80    }
81}
82
83impl From<&Asset> for CachedAsset {
84    fn from(a: &Asset) -> Self {
85        Self {
86            name: a.name.clone(),
87            browser_download_url: a.browser_download_url.clone(),
88        }
89    }
90}
91
92impl From<CachedAsset> for Asset {
93    fn from(c: CachedAsset) -> Self {
94        Self {
95            name: c.name,
96            browser_download_url: c.browser_download_url,
97        }
98    }
99}
100
101// ---------------------------------------------------------------------------
102// ReleaseCache implementation
103// ---------------------------------------------------------------------------
104
105impl ReleaseCache {
106    /// Create a new release cache backed by the given directory.
107    #[must_use]
108    pub fn new(cache_dir: PathBuf, ttl: Duration) -> Self {
109        Self { cache_dir, ttl }
110    }
111
112    /// Return the cached releases if the cache file exists, belongs to the
113    /// same repo, and has not expired.  Returns `None` on any error (missing,
114    /// corrupted, expired, wrong repo) — callers should fall back to the
115    /// network in that case.
116    #[must_use]
117    pub fn read_if_valid(&self, repo: &str) -> Option<Vec<GitHubRelease>> {
118        let data = fs::read_to_string(self.cache_file()).ok()?;
119        let cached: CachedReleases = serde_json::from_str(&data).ok()?;
120
121        if cached.repo != repo {
122            debug!(
123                "Release cache repo mismatch: cached={}, wanted={}",
124                cached.repo, repo
125            );
126            return None;
127        }
128
129        let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?.as_secs();
130        let age_secs = now.saturating_sub(cached.fetched_at_epoch_secs);
131        if age_secs >= self.ttl.as_secs() {
132            debug!(
133                "Release cache expired (age={}s, ttl={}s)",
134                age_secs,
135                self.ttl.as_secs()
136            );
137            return None;
138        }
139
140        Some(
141            cached
142                .releases
143                .into_iter()
144                .map(GitHubRelease::from)
145                .collect(),
146        )
147    }
148
149    /// Acquire the exclusive cache lock, re-check the cache, and return
150    /// valid cached releases if another node populated them while we waited.
151    ///
152    /// Returns `Ok(Some(releases))` if a valid cache was found under the
153    /// lock, or `Ok(None)` if the cache is still stale/missing and the
154    /// caller should fetch from the network.  The returned
155    /// The returned lock guard must be held until after writing the fresh
156    /// data so that other nodes block rather than all hitting the API.
157    ///
158    /// **Note:** `lock_exclusive()` blocks the calling thread.  Callers in
159    /// async contexts should wrap this in `tokio::task::spawn_blocking`.
160    ///
161    /// # Errors
162    ///
163    /// Returns an error if the lock file cannot be created or acquired.
164    pub fn lock_and_recheck(
165        &self,
166        repo: &str,
167    ) -> Result<(ReleaseCacheLockGuard, Option<Vec<GitHubRelease>>)> {
168        let lock_path = self.lock_file();
169        let lock = File::create(&lock_path)
170            .map_err(|e| Error::Upgrade(format!("Failed to create release cache lock: {e}")))?;
171        lock.lock_exclusive()
172            .map_err(|e| Error::Upgrade(format!("Failed to acquire release cache lock: {e}")))?;
173
174        let cached = self.read_if_valid(repo);
175        Ok((ReleaseCacheLockGuard { _file: lock }, cached))
176    }
177
178    /// Write releases to the cache, using an exclusive file lock to
179    /// coordinate with other nodes on the same machine.
180    ///
181    /// The write is atomic: data goes to a temp file first, then is renamed
182    /// over the cache file.
183    ///
184    /// # Errors
185    ///
186    /// Returns an error if the lock cannot be acquired or the file cannot be
187    /// written.
188    pub fn write(&self, repo: &str, releases: &[GitHubRelease]) -> Result<()> {
189        let lock_path = self.lock_file();
190        let lock = File::create(&lock_path)
191            .map_err(|e| Error::Upgrade(format!("Failed to create release cache lock: {e}")))?;
192        lock.lock_exclusive()
193            .map_err(|e| Error::Upgrade(format!("Failed to acquire release cache lock: {e}")))?;
194
195        let result = self.write_inner(repo, releases);
196
197        drop(lock); // Dropping the file releases the exclusive lock
198        result
199    }
200
201    /// Write releases to the cache while the caller already holds the
202    /// lock guard.  The guard is consumed to ensure the lock is released
203    /// after writing.
204    ///
205    /// # Errors
206    ///
207    /// Returns an error if the file cannot be written.
208    pub fn write_under_lock(
209        &self,
210        _guard: ReleaseCacheLockGuard,
211        repo: &str,
212        releases: &[GitHubRelease],
213    ) -> Result<()> {
214        self.write_inner(repo, releases)
215    }
216
217    // -- private helpers -----------------------------------------------------
218
219    fn write_inner(&self, repo: &str, releases: &[GitHubRelease]) -> Result<()> {
220        let now = SystemTime::now()
221            .duration_since(UNIX_EPOCH)
222            .map_err(|e| Error::Upgrade(format!("System clock error: {e}")))?
223            .as_secs();
224
225        let cached = CachedReleases {
226            repo: repo.to_string(),
227            fetched_at_epoch_secs: now,
228            releases: releases.iter().map(CachedRelease::from).collect(),
229        };
230
231        let json = serde_json::to_string(&cached)
232            .map_err(|e| Error::Upgrade(format!("Failed to serialize release cache: {e}")))?;
233
234        // Write to temp file then rename into place.
235        // Remove dest first on Windows where rename fails if it exists.
236        let tmp_path = self.cache_dir.join("releases.json.tmp");
237        {
238            let mut f = File::create(&tmp_path)?;
239            f.write_all(json.as_bytes())?;
240            f.sync_all()?;
241        }
242        let cache_file = self.cache_file();
243        let _ = fs::remove_file(&cache_file);
244        fs::rename(&tmp_path, &cache_file)?;
245
246        debug!("Wrote release cache ({} releases)", releases.len());
247        Ok(())
248    }
249
250    fn cache_file(&self) -> PathBuf {
251        self.cache_dir.join("releases.json")
252    }
253
254    fn lock_file(&self) -> PathBuf {
255        self.cache_dir.join("releases.lock")
256    }
257}
258
259/// RAII guard that holds an exclusive release cache lock.
260///
261/// The underlying file lock is released when this guard is dropped.
262pub struct ReleaseCacheLockGuard {
263    _file: File,
264}
265
266// ---------------------------------------------------------------------------
267// Tests
268// ---------------------------------------------------------------------------
269
270#[cfg(test)]
271#[allow(clippy::unwrap_used, clippy::expect_used)]
272mod tests {
273    use super::*;
274    use tempfile::TempDir;
275
276    fn sample_releases() -> Vec<GitHubRelease> {
277        vec![GitHubRelease {
278            tag_name: "v1.2.0".to_string(),
279            name: "Release 1.2.0".to_string(),
280            body: "Notes".to_string(),
281            prerelease: false,
282            assets: vec![Asset {
283                name: "ant-node-x86_64-linux.tar.gz".to_string(),
284                browser_download_url: "https://example.com/bin".to_string(),
285            }],
286        }]
287    }
288
289    #[test]
290    fn test_write_read_roundtrip() {
291        let tmp = TempDir::new().unwrap();
292        let cache = ReleaseCache::new(tmp.path().to_path_buf(), Duration::from_secs(300));
293
294        cache.write("owner/repo", &sample_releases()).unwrap();
295
296        let loaded = cache.read_if_valid("owner/repo").unwrap();
297        assert_eq!(loaded.len(), 1);
298        assert_eq!(loaded[0].tag_name, "v1.2.0");
299        assert_eq!(loaded[0].assets.len(), 1);
300        assert_eq!(loaded[0].assets[0].name, "ant-node-x86_64-linux.tar.gz");
301    }
302
303    #[test]
304    fn test_ttl_expiry_returns_none() {
305        let tmp = TempDir::new().unwrap();
306        // TTL of 0 seconds — anything written is immediately expired
307        let cache = ReleaseCache::new(tmp.path().to_path_buf(), Duration::from_secs(0));
308
309        cache.write("owner/repo", &sample_releases()).unwrap();
310
311        assert!(cache.read_if_valid("owner/repo").is_none());
312    }
313
314    #[test]
315    fn test_wrong_repo_returns_none() {
316        let tmp = TempDir::new().unwrap();
317        let cache = ReleaseCache::new(tmp.path().to_path_buf(), Duration::from_secs(300));
318
319        cache.write("owner/repo", &sample_releases()).unwrap();
320
321        assert!(cache.read_if_valid("other/repo").is_none());
322    }
323
324    #[test]
325    fn test_corrupted_file_returns_none() {
326        let tmp = TempDir::new().unwrap();
327        let cache = ReleaseCache::new(tmp.path().to_path_buf(), Duration::from_secs(300));
328
329        fs::write(cache.cache_file(), "not valid json!!!").unwrap();
330
331        assert!(cache.read_if_valid("owner/repo").is_none());
332    }
333
334    #[test]
335    fn test_missing_file_returns_none() {
336        let tmp = TempDir::new().unwrap();
337        let cache = ReleaseCache::new(tmp.path().to_path_buf(), Duration::from_secs(300));
338
339        assert!(cache.read_if_valid("owner/repo").is_none());
340    }
341}