use std::fs;
use std::time::{SystemTime, UNIX_EPOCH};
use serde::{Deserialize, Serialize};
use crate::contexts::merge_readiness::domain::cache::{CachePort, CacheState};
use super::tmp_cache_dir;
const DEFAULT_STALE_TTL_SECS: u64 = 5;
#[derive(Serialize, Deserialize)]
struct StateJson {
fetched_at_secs: u64,
output: String,
}
pub struct CacheStore;
impl CachePort for CacheStore {
fn check(&self, repo_id: &str) -> CacheState {
match check_raw(repo_id) {
RawCacheStatus::Fresh(s) => CacheState::Fresh(s),
RawCacheStatus::Stale(s) => CacheState::Stale(s),
RawCacheStatus::Miss => CacheState::Miss,
}
}
}
enum RawCacheStatus {
Fresh(String),
Stale(String),
Miss,
}
fn check_raw(repo_id: &str) -> RawCacheStatus {
let state_path = cache_path(repo_id);
let Ok(content) = fs::read_to_string(&state_path) else {
return RawCacheStatus::Miss;
};
let Ok(state) = serde_json::from_str::<StateJson>(&content) else {
return RawCacheStatus::Miss;
};
let now = now_secs();
let age = now.saturating_sub(state.fetched_at_secs);
if age <= stale_ttl_secs() {
RawCacheStatus::Fresh(state.output)
} else {
RawCacheStatus::Stale(state.output)
}
}
pub fn write(repo_id: &str, output: &str) {
let state_path = cache_path(repo_id);
if let Some(parent) = state_path.parent() {
let _ = fs::create_dir_all(parent);
}
let state = StateJson {
fetched_at_secs: now_secs(),
output: output.to_owned(),
};
let Ok(content) = serde_json::to_string(&state) else {
return;
};
let temp_path = state_path.with_extension(format!("tmp.{}", std::process::id()));
if fs::write(&temp_path, &content).is_err() {
return;
}
if fs::rename(&temp_path, &state_path).is_err() {
let _ = fs::remove_file(&temp_path); }
}
fn cache_path(repo_id: &str) -> std::path::PathBuf {
tmp_cache_dir::cache_dir().join(format!("{repo_id}.json"))
}
fn now_secs() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_or(0, |d| d.as_secs())
}
fn stale_ttl_secs() -> u64 {
std::env::var("MERGE_READY_STALE_TTL")
.ok()
.and_then(|v| v.parse().ok())
.unwrap_or(DEFAULT_STALE_TTL_SECS)
}