use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicU64, Ordering};
#[cfg(not(test))]
use std::sync::OnceLock;
use std::time::{SystemTime, UNIX_EPOCH};
static TMP_COUNTER: AtomicU64 = AtomicU64::new(0);
#[cfg(not(test))]
static SWEEP_DONE: OnceLock<()> = OnceLock::new();
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
const CACHE_TTL_SECS: u64 = 60;
const GH_FETCH_LIMIT: usize = 500;
#[non_exhaustive]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "UPPERCASE")]
pub enum PrState {
Open,
Merged,
Closed,
#[serde(other)]
Other,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct CacheFile {
fetched_at: u64,
repo: String,
prs: HashMap<String, PrState>,
}
#[derive(Debug, Default, Clone)]
pub struct PrCache {
map: HashMap<String, PrState>,
}
impl PrCache {
pub fn state(&self, branch: &str) -> Option<&PrState> {
self.map.get(branch)
}
pub fn from_disk(repo: &Path) -> Option<Self> {
load_from_disk(repo).map(|map| PrCache { map })
}
pub fn fetch_and_persist(repo: &Path) -> Self {
match fetch_from_gh(repo) {
Some(map) => {
write_to_disk(repo, &map);
PrCache { map }
}
None => PrCache::default(),
}
}
pub fn load_or_fetch(repo: &Path, no_cache: bool) -> Self {
if !no_cache {
if let Some(c) = Self::from_disk(repo) {
return c;
}
}
Self::fetch_and_persist(repo)
}
}
fn repo_hash(repo: &Path) -> String {
let canon = repo.canonicalize().unwrap_or_else(|_| repo.to_path_buf());
let mut hasher = Sha256::new();
hasher.update(canon.to_string_lossy().as_bytes());
let digest = hasher.finalize();
hex_short(&digest[..8])
}
fn hex_short(bytes: &[u8]) -> String {
use std::fmt::Write;
let mut out = String::with_capacity(bytes.len() * 2);
for b in bytes {
let _ = write!(out, "{:02x}", b);
}
out
}
fn cache_path_for(repo: &Path) -> Option<PathBuf> {
#[cfg(test)]
if let Ok(dir) = std::env::var("GW_TEST_CACHE_DIR") {
return Some(
PathBuf::from(dir)
.join("gw")
.join(format!("pr-status-{}.json", repo_hash(repo))),
);
}
let base = dirs::cache_dir()?.join("gw");
Some(base.join(format!("pr-status-{}.json", repo_hash(repo))))
}
#[derive(Debug, Deserialize)]
struct GhPr {
#[serde(rename = "headRefName")]
head_ref_name: String,
state: PrState,
}
fn fetch_from_gh(repo: &Path) -> Option<HashMap<String, PrState>> {
#[cfg(test)]
{
if std::env::var("GW_TEST_GH_FAIL").ok().as_deref() == Some("1") {
return None;
}
if let Ok(json) = std::env::var("GW_TEST_GH_JSON") {
let prs: Vec<GhPr> = serde_json::from_str(json.trim()).ok()?;
let mut map = HashMap::with_capacity(prs.len());
for pr in prs {
map.insert(pr.head_ref_name, pr.state);
}
return Some(map);
}
}
if !crate::git::has_command("gh") {
return None;
}
let limit = GH_FETCH_LIMIT.to_string();
let result = crate::git::run_command(
&[
"gh",
"pr",
"list",
"--state",
"all",
"--json",
"headRefName,state",
"--limit",
&limit,
],
Some(repo),
false,
true,
)
.ok()?;
if result.returncode != 0 {
return None;
}
let prs: Vec<GhPr> = serde_json::from_str(result.stdout.trim()).ok()?;
let mut map = HashMap::with_capacity(prs.len());
for pr in prs {
map.insert(pr.head_ref_name, pr.state);
}
Some(map)
}
fn now_secs() -> Option<u64> {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.ok()
.map(|d| d.as_secs())
}
fn load_from_disk(repo: &Path) -> Option<HashMap<String, PrState>> {
let path = cache_path_for(repo)?;
let data = std::fs::read_to_string(&path).ok()?;
let file: CacheFile = serde_json::from_str(&data).ok()?;
let now = now_secs()?;
if file.fetched_at > now {
return None;
}
let age = now.saturating_sub(file.fetched_at);
if age > CACHE_TTL_SECS {
return None;
}
Some(file.prs)
}
fn sweep_orphans(parent: &Path, cutoff: SystemTime) {
let Ok(entries) = std::fs::read_dir(parent) else {
return;
};
for entry in entries.flatten() {
let name = entry.file_name();
let name_str = name.to_string_lossy();
if !(name_str.starts_with("pr-status-") && name_str.contains(".tmp.")) {
continue;
}
let Ok(meta) = entry.metadata() else {
continue;
};
let Ok(modified) = meta.modified() else {
continue;
};
if modified < cutoff {
let _ = std::fs::remove_file(entry.path());
}
}
}
fn write_to_disk(repo: &Path, prs: &HashMap<String, PrState>) {
let Some(path) = cache_path_for(repo) else {
return;
};
if let Some(parent) = path.parent() {
let _ = std::fs::create_dir_all(parent);
}
let now_instant = SystemTime::now();
let dur = match now_instant.duration_since(UNIX_EPOCH) {
Ok(d) => d,
Err(_) => return, };
let now = dur.as_secs();
let nanos = dur.subsec_nanos();
let file = CacheFile {
fetched_at: now,
repo: repo.to_string_lossy().into_owned(),
prs: prs.clone(),
};
let Ok(json) = serde_json::to_string(&file) else {
return;
};
#[cfg(not(test))]
let do_sweep = SWEEP_DONE.set(()).is_ok();
#[cfg(test)]
let do_sweep = true;
if do_sweep {
if let Some(parent) = path.parent() {
let cutoff = SystemTime::now()
.checked_sub(std::time::Duration::from_secs(60))
.unwrap_or_else(SystemTime::now);
sweep_orphans(parent, cutoff);
}
}
let counter = TMP_COUNTER.fetch_add(1, Ordering::Relaxed);
let tmp = path.with_file_name(format!(
"{}.tmp.{}.{}.{}",
path.file_stem().unwrap_or_default().to_string_lossy(),
std::process::id(),
nanos,
counter,
));
if std::fs::write(&tmp, &json).is_err() {
let _ = std::fs::remove_file(&tmp); return;
}
if std::fs::rename(&tmp, &path).is_err() {
let _ = std::fs::remove_file(&path);
if std::fs::rename(&tmp, &path).is_err() {
let _ = std::fs::remove_file(&tmp); }
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use std::sync::{Mutex, MutexGuard};
static ENV_LOCK: Mutex<()> = Mutex::new(());
fn env_lock() -> MutexGuard<'static, ()> {
ENV_LOCK.lock().unwrap_or_else(|e| e.into_inner())
}
#[test]
fn now_secs_returns_some_on_normal_system() {
assert!(now_secs().is_some());
}
#[test]
fn repo_hash_is_stable_and_short() {
let p = PathBuf::from("/tmp/some-repo-that-does-not-exist-xyz");
let h1 = repo_hash(&p);
let h2 = repo_hash(&p);
assert_eq!(h1, h2);
assert_eq!(h1.len(), 16);
}
#[test]
fn repo_hash_differs_per_path() {
let a = repo_hash(&PathBuf::from("/tmp/repo-a-xyz"));
let b = repo_hash(&PathBuf::from("/tmp/repo-b-xyz"));
assert_ne!(a, b);
}
#[test]
fn cache_path_contains_repo_hash() {
let p = PathBuf::from("/tmp/repo-xyz");
let cp = cache_path_for(&p).expect("cache dir available");
let s = cp.to_string_lossy();
assert!(s.contains("gw"));
assert!(s.contains("pr-status-"));
assert!(s.ends_with(".json"));
}
#[test]
fn fetch_parses_gh_json_from_env() {
let _g = env_lock();
let _env = EnvGuard::capture(&["GW_TEST_GH_FAIL", "GW_TEST_GH_JSON"]);
std::env::set_var(
"GW_TEST_GH_JSON",
r#"[{"headRefName":"feat/foo","state":"OPEN"},{"headRefName":"fix/bar","state":"MERGED"}]"#,
);
let prs = fetch_from_gh(std::path::Path::new(".")).expect("parsed");
assert_eq!(prs.get("feat/foo"), Some(&PrState::Open));
assert_eq!(prs.get("fix/bar"), Some(&PrState::Merged));
}
#[test]
fn fetch_returns_none_on_forced_failure() {
let _g = env_lock();
let _env = EnvGuard::capture(&["GW_TEST_GH_FAIL", "GW_TEST_GH_JSON"]);
std::env::set_var("GW_TEST_GH_FAIL", "1");
let result = fetch_from_gh(std::path::Path::new("."));
assert!(result.is_none());
}
use tempfile::tempdir;
struct EnvGuard {
saved: Vec<(&'static str, Option<std::ffi::OsString>)>,
}
impl EnvGuard {
fn capture(keys: &[&'static str]) -> Self {
let saved = keys.iter().map(|k| (*k, std::env::var_os(k))).collect();
Self { saved }
}
}
impl Drop for EnvGuard {
fn drop(&mut self) {
for (k, v) in self.saved.drain(..) {
match v {
Some(val) => std::env::set_var(k, val),
None => std::env::remove_var(k),
}
}
}
}
fn with_cache_dir<F: FnOnce()>(dir: &std::path::Path, f: F) {
let _g = EnvGuard::capture(&["GW_TEST_CACHE_DIR"]);
std::env::set_var("GW_TEST_CACHE_DIR", dir);
f();
}
#[test]
fn load_from_disk_returns_fresh_entry() {
let _g = env_lock();
let dir = tempdir().unwrap();
with_cache_dir(dir.path(), || {
let repo = std::path::Path::new("/tmp/repo-xyz");
let path = cache_path_for(repo).unwrap();
std::fs::create_dir_all(path.parent().unwrap()).unwrap();
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
let file = CacheFile {
fetched_at: now,
repo: repo.to_string_lossy().into_owned(),
prs: [("feat/a".to_string(), PrState::Open)]
.into_iter()
.collect(),
};
std::fs::write(&path, serde_json::to_string(&file).unwrap()).unwrap();
let loaded = load_from_disk(repo).expect("fresh cache");
assert_eq!(loaded.get("feat/a"), Some(&PrState::Open));
});
}
#[test]
fn load_from_disk_rejects_expired_entry() {
let _g = env_lock();
let dir = tempdir().unwrap();
with_cache_dir(dir.path(), || {
let repo = std::path::Path::new("/tmp/repo-expired-xyz");
let path = cache_path_for(repo).unwrap();
std::fs::create_dir_all(path.parent().unwrap()).unwrap();
let file = CacheFile {
fetched_at: 0, repo: repo.to_string_lossy().into_owned(),
prs: HashMap::new(),
};
std::fs::write(&path, serde_json::to_string(&file).unwrap()).unwrap();
assert!(load_from_disk(repo).is_none());
});
}
#[test]
fn load_from_disk_rejects_future_entry() {
let _g = env_lock();
let dir = tempdir().unwrap();
with_cache_dir(dir.path(), || {
let repo = std::path::Path::new("/tmp/repo-future-xyz");
let path = cache_path_for(repo).unwrap();
std::fs::create_dir_all(path.parent().unwrap()).unwrap();
let far_future = now_secs().unwrap() + 9999;
let file = CacheFile {
fetched_at: far_future,
repo: repo.to_string_lossy().into_owned(),
prs: HashMap::new(),
};
std::fs::write(&path, serde_json::to_string(&file).unwrap()).unwrap();
assert!(load_from_disk(repo).is_none());
});
}
#[test]
fn load_from_disk_rejects_corrupt_file() {
let _g = env_lock();
let dir = tempdir().unwrap();
with_cache_dir(dir.path(), || {
let repo = std::path::Path::new("/tmp/repo-corrupt-xyz");
let path = cache_path_for(repo).unwrap();
std::fs::create_dir_all(path.parent().unwrap()).unwrap();
std::fs::write(&path, "not json").unwrap();
assert!(load_from_disk(repo).is_none());
});
}
#[test]
fn load_or_fetch_uses_disk_when_fresh() {
let _g = env_lock();
let _env = EnvGuard::capture(&["GW_TEST_CACHE_DIR", "GW_TEST_GH_FAIL", "GW_TEST_GH_JSON"]);
let dir = tempdir().unwrap();
with_cache_dir(dir.path(), || {
let repo = std::path::Path::new("/tmp/repo-disk-hit-xyz");
let path = cache_path_for(repo).unwrap();
std::fs::create_dir_all(path.parent().unwrap()).unwrap();
let file = CacheFile {
fetched_at: now_secs().unwrap(),
repo: repo.to_string_lossy().into_owned(),
prs: [("feat/cached".to_string(), PrState::Merged)]
.into_iter()
.collect(),
};
std::fs::write(&path, serde_json::to_string(&file).unwrap()).unwrap();
std::env::set_var("GW_TEST_GH_FAIL", "1");
let cache = PrCache::load_or_fetch(repo, false);
assert_eq!(cache.state("feat/cached"), Some(&PrState::Merged));
});
}
#[test]
fn load_or_fetch_bypasses_disk_when_no_cache_true() {
let _g = env_lock();
let _env = EnvGuard::capture(&["GW_TEST_CACHE_DIR", "GW_TEST_GH_FAIL", "GW_TEST_GH_JSON"]);
let dir = tempdir().unwrap();
with_cache_dir(dir.path(), || {
let repo = std::path::Path::new("/tmp/repo-bypass-xyz");
let path = cache_path_for(repo).unwrap();
std::fs::create_dir_all(path.parent().unwrap()).unwrap();
let file = CacheFile {
fetched_at: now_secs().unwrap(),
repo: repo.to_string_lossy().into_owned(),
prs: [("feat/old".to_string(), PrState::Open)]
.into_iter()
.collect(),
};
std::fs::write(&path, serde_json::to_string(&file).unwrap()).unwrap();
std::env::set_var(
"GW_TEST_GH_JSON",
r#"[{"headRefName":"feat/new","state":"OPEN"}]"#,
);
let cache = PrCache::load_or_fetch(repo, true);
assert_eq!(cache.state("feat/new"), Some(&PrState::Open));
assert_eq!(cache.state("feat/old"), None);
});
}
#[test]
fn load_or_fetch_empty_when_gh_fails_and_no_cache_file() {
let _g = env_lock();
let _env = EnvGuard::capture(&["GW_TEST_CACHE_DIR", "GW_TEST_GH_FAIL", "GW_TEST_GH_JSON"]);
let dir = tempdir().unwrap();
with_cache_dir(dir.path(), || {
let repo = std::path::Path::new("/tmp/repo-empty-xyz");
std::env::set_var("GW_TEST_GH_FAIL", "1");
let cache = PrCache::load_or_fetch(repo, false);
assert!(cache.state("anything").is_none());
});
}
#[test]
fn write_to_disk_cleans_up_tmp_file() {
let _g = env_lock();
let dir = tempdir().unwrap();
with_cache_dir(dir.path(), || {
let repo = std::path::Path::new("/tmp/repo-atomic-xyz");
let mut prs = HashMap::new();
prs.insert("feat/x".to_string(), PrState::Open);
write_to_disk(repo, &prs);
let final_path = cache_path_for(repo).unwrap();
assert!(final_path.exists(), "final cache file exists");
let parent = final_path.parent().unwrap();
let entries: Vec<_> = std::fs::read_dir(parent).unwrap().flatten().collect();
for entry in &entries {
let name = entry.file_name();
let name_str = name.to_string_lossy();
assert!(
!name_str.contains(".tmp."),
"no tmp file should remain: {}",
name_str
);
}
});
}
#[test]
fn from_disk_and_fetch_and_persist_split() {
let _g = env_lock();
let _env = EnvGuard::capture(&["GW_TEST_CACHE_DIR", "GW_TEST_GH_FAIL", "GW_TEST_GH_JSON"]);
let dir = tempdir().unwrap();
with_cache_dir(dir.path(), || {
let repo = std::path::Path::new("/tmp/repo-split-xyz");
assert!(PrCache::from_disk(repo).is_none());
std::env::set_var("GW_TEST_GH_FAIL", "1");
let empty = PrCache::fetch_and_persist(repo);
assert!(empty.state("anything").is_none());
std::env::remove_var("GW_TEST_GH_FAIL");
std::env::set_var(
"GW_TEST_GH_JSON",
r#"[{"headRefName":"main","state":"OPEN"}]"#,
);
let _ = PrCache::fetch_and_persist(repo);
let loaded = PrCache::from_disk(repo).expect("written to disk");
assert_eq!(loaded.state("main"), Some(&PrState::Open));
});
}
#[cfg(unix)]
#[test]
fn write_to_disk_sweeps_old_orphan_tmp_files() {
let _g = env_lock();
let _env = EnvGuard::capture(&["GW_TEST_CACHE_DIR"]);
let dir = tempdir().unwrap();
with_cache_dir(dir.path(), || {
let repo = std::path::Path::new("/tmp/repo-sweep-xyz");
let final_path = cache_path_for(repo).unwrap();
let parent = final_path.parent().unwrap();
std::fs::create_dir_all(parent).unwrap();
let orphan = parent.join("pr-status-orphan.tmp.99999.123456789.0");
std::fs::write(&orphan, "stale").unwrap();
{
use std::ffi::CString;
let c_path = CString::new(orphan.to_string_lossy().as_bytes()).unwrap();
let times = [libc::timeval {
tv_sec: 0,
tv_usec: 0,
}; 2];
unsafe { libc::utimes(c_path.as_ptr(), times.as_ptr()) };
}
let fresh_tmp = parent.join("pr-status-fresh.tmp.123.456.0");
std::fs::write(&fresh_tmp, "fresh").unwrap();
let mut prs = HashMap::new();
prs.insert("feat/sweep".to_string(), PrState::Open);
write_to_disk(repo, &prs);
assert!(
!orphan.exists(),
"old orphan tmp file should have been swept"
);
assert!(fresh_tmp.exists(), "fresh tmp file should not be swept");
assert!(final_path.exists(), "final cache file should exist");
});
}
#[test]
fn pr_state_variants_are_handled() {
fn _must_handle(s: &PrState) {
match s {
PrState::Open => {}
PrState::Merged => {}
PrState::Closed => {}
PrState::Other => {}
}
}
}
}