use crate::constants::{default_lock_timeout, pending_state_timeout};
use crate::core::error::AgpmError;
use crate::core::file_error::{FileOperation, FileResultExt};
use crate::git::GitRepo;
use crate::git::command_builder::GitCommand;
use crate::utils::fs;
use crate::utils::security::validate_path_security;
use anyhow::{Context, Result};
use dashmap::DashMap;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use tokio::fs as async_fs;
use tokio::sync::{Mutex, MutexGuard, RwLock};
async fn acquire_mutex_with_timeout<'a, T>(
mutex: &'a Mutex<T>,
name: &str,
) -> Result<MutexGuard<'a, T>> {
let timeout = default_lock_timeout();
match tokio::time::timeout(timeout, mutex.lock()).await {
Ok(guard) => Ok(guard),
Err(_) => {
eprintln!("[DEADLOCK] Timeout waiting for mutex '{}' after {:?}", name, timeout);
anyhow::bail!(
"Timeout waiting for mutex '{}' after {:?} - possible deadlock",
name,
timeout
)
}
}
}
async fn acquire_rwlock_read_with_timeout<'a, T>(
rwlock: &'a RwLock<T>,
name: &str,
) -> Result<tokio::sync::RwLockReadGuard<'a, T>> {
let timeout = default_lock_timeout();
match tokio::time::timeout(timeout, rwlock.read()).await {
Ok(guard) => Ok(guard),
Err(_) => {
eprintln!("[DEADLOCK] Timeout waiting for RwLock read '{}' after {:?}", name, timeout);
anyhow::bail!(
"Timeout waiting for RwLock read '{}' after {:?} - possible deadlock",
name,
timeout
)
}
}
}
async fn acquire_rwlock_write_with_timeout<'a, T>(
rwlock: &'a RwLock<T>,
name: &str,
) -> Result<tokio::sync::RwLockWriteGuard<'a, T>> {
let timeout = default_lock_timeout();
match tokio::time::timeout(timeout, rwlock.write()).await {
Ok(guard) => Ok(guard),
Err(_) => {
eprintln!("[DEADLOCK] Timeout waiting for RwLock write '{}' after {:?}", name, timeout);
anyhow::bail!(
"Timeout waiting for RwLock write '{}' after {:?} - possible deadlock",
name,
timeout
)
}
}
}
#[derive(Debug, Clone)]
enum WorktreeState {
Pending(Arc<tokio::sync::Notify>),
Ready(PathBuf),
}
fn extract_notify_handle(
cache: &DashMap<String, WorktreeState>,
key: &str,
) -> Option<Arc<tokio::sync::Notify>> {
cache.get(key).and_then(|entry| {
if let WorktreeState::Pending(n) = entry.value() {
Some(n.clone())
} else {
None
}
})
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct WorktreeRegistry {
entries: HashMap<String, WorktreeRecord>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct WorktreeRecord {
source: String,
version: String,
path: PathBuf,
last_used: u64,
}
impl WorktreeRegistry {
fn load(path: &Path) -> Self {
match std::fs::read(path) {
Ok(data) => serde_json::from_slice(&data).unwrap_or_default(),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Self::default(),
Err(err) => {
tracing::warn!("Failed to load worktree registry from {}: {}", path.display(), err);
Self::default()
}
}
}
fn update(&mut self, key: String, source: String, version: String, path: PathBuf) {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0))
.as_secs();
self.entries.insert(
key,
WorktreeRecord {
source,
version,
path,
last_used: timestamp,
},
);
}
fn remove_by_path(&mut self, target: &Path) -> bool {
if let Some(key) = self.entries.iter().find_map(|(k, record)| {
if record.path == target {
Some(k.clone())
} else {
None
}
}) {
self.entries.remove(&key);
true
} else {
false
}
}
fn get_source_by_path(&self, target: &Path) -> Option<String> {
self.entries
.values()
.find(|record| record.path == target)
.map(|record| record.source.clone())
}
async fn persist(&self, path: &Path) -> Result<()> {
if let Some(parent) = path.parent() {
async_fs::create_dir_all(parent).await?;
}
let data = serde_json::to_vec_pretty(self)?;
async_fs::write(path, data).await?;
Ok(())
}
}
pub mod lock;
pub use lock::CacheLock;
pub struct Cache {
dir: PathBuf,
worktree_cache: Arc<DashMap<String, WorktreeState>>,
fetch_locks: Arc<DashMap<PathBuf, Arc<Mutex<()>>>>,
fetched_repos: Arc<RwLock<HashSet<PathBuf>>>,
worktree_registry: Arc<Mutex<WorktreeRegistry>>,
}
impl Clone for Cache {
fn clone(&self) -> Self {
Self {
dir: self.dir.clone(),
worktree_cache: Arc::clone(&self.worktree_cache),
fetch_locks: Arc::clone(&self.fetch_locks),
fetched_repos: Arc::clone(&self.fetched_repos),
worktree_registry: Arc::clone(&self.worktree_registry),
}
}
}
impl Cache {
fn registry_path_for(cache_dir: &Path) -> PathBuf {
cache_dir.join("worktrees").join(".state.json")
}
fn registry_path(&self) -> PathBuf {
Self::registry_path_for(&self.dir)
}
async fn record_worktree_usage(
&self,
registry_key: &str,
source_name: &str,
version_key: &str,
worktree_path: &Path,
) -> Result<()> {
let mut registry =
acquire_mutex_with_timeout(&self.worktree_registry, "worktree_registry").await?;
registry.update(
registry_key.to_string(),
source_name.to_string(),
version_key.to_string(),
worktree_path.to_path_buf(),
);
registry.persist(&self.registry_path()).await?;
Ok(())
}
async fn remove_worktree_record_by_path(&self, worktree_path: &Path) -> Result<()> {
let mut registry =
acquire_mutex_with_timeout(&self.worktree_registry, "worktree_registry").await?;
if registry.remove_by_path(worktree_path) {
registry.persist(&self.registry_path()).await?;
}
Ok(())
}
async fn configure_connection_pooling(path: &Path) -> Result<()> {
let commands = [
("http.version", "HTTP/2"),
("http.postBuffer", "524288000"),
("core.compression", "0"),
];
for (key, value) in commands {
GitCommand::new()
.args(["config", key, value])
.current_dir(path)
.execute_success()
.await
.ok();
}
Ok(())
}
pub fn new() -> Result<Self> {
let dir = crate::config::get_cache_dir()?;
let registry_path = Self::registry_path_for(&dir);
let registry = WorktreeRegistry::load(®istry_path);
Ok(Self {
dir,
worktree_cache: Arc::new(DashMap::new()),
fetch_locks: Arc::new(DashMap::new()),
fetched_repos: Arc::new(RwLock::new(HashSet::new())),
worktree_registry: Arc::new(Mutex::new(registry)),
})
}
pub fn with_dir(dir: PathBuf) -> Result<Self> {
let registry_path = Self::registry_path_for(&dir);
let registry = WorktreeRegistry::load(®istry_path);
Ok(Self {
dir,
worktree_cache: Arc::new(DashMap::new()),
fetch_locks: Arc::new(DashMap::new()),
fetched_repos: Arc::new(RwLock::new(HashSet::new())),
worktree_registry: Arc::new(Mutex::new(registry)),
})
}
pub async fn ensure_cache_dir(&self) -> Result<()> {
if !self.dir.exists() {
async_fs::create_dir_all(&self.dir).await.with_file_context(
FileOperation::CreateDir,
&self.dir,
"creating cache directory",
"cache::ensure_cache_dir",
)?;
}
Ok(())
}
#[must_use]
pub fn cache_dir(&self) -> &Path {
&self.dir
}
pub fn get_worktree_path(&self, url: &str, sha: &str) -> Result<PathBuf> {
let (owner, repo) =
crate::git::parse_git_url(url).map_err(|e| anyhow::anyhow!("Invalid Git URL: {e}"))?;
let sha_short = &sha[..8.min(sha.len())];
Ok(self.dir.join("worktrees").join(format!("{owner}_{repo}_{sha_short}")))
}
pub async fn get_or_clone_source(
&self,
name: &str,
url: &str,
version: Option<&str>,
) -> Result<PathBuf> {
self.get_or_clone_source_impl(name, url, version).await
}
pub async fn cleanup_worktree(&self, worktree_path: &Path) -> Result<()> {
if !worktree_path.exists() {
return Ok(());
}
let source_url = {
let registry =
acquire_mutex_with_timeout(&self.worktree_registry, "worktree_registry").await?;
registry.get_source_by_path(worktree_path)
};
if let Some(url) = source_url {
if let Ok((owner, repo)) = crate::git::parse_git_url(&url) {
let bare_repo_path = self.dir.join("sources").join(format!("{owner}_{repo}.git"));
if bare_repo_path.exists() {
let bare_repo_worktree_lock_name = format!("bare-worktree-{owner}_{repo}");
let _bare_worktree_lock =
CacheLock::acquire(&self.dir, &bare_repo_worktree_lock_name).await?;
let repo = GitRepo::new(&bare_repo_path);
let _ = repo.remove_worktree(worktree_path).await;
}
}
}
if worktree_path.exists() {
tokio::fs::remove_dir_all(worktree_path).await.with_file_context(
FileOperation::Write,
worktree_path,
"removing worktree directory",
"cache::cleanup_worktree",
)?;
}
self.remove_worktree_record_by_path(worktree_path).await?;
Ok(())
}
pub async fn cleanup_all_worktrees(&self) -> Result<()> {
let worktrees_dir = self.dir.join("worktrees");
if !worktrees_dir.exists() {
return Ok(());
}
tokio::fs::remove_dir_all(&worktrees_dir).await.with_file_context(
FileOperation::Write,
&worktrees_dir,
"cleaning up worktrees directory",
"cache_module",
)?;
let sources_dir = self.dir.join("sources");
if sources_dir.exists() {
let mut entries = tokio::fs::read_dir(&sources_dir).await.with_file_context(
FileOperation::Read,
&sources_dir,
"reading sources directory",
"cache_module",
)?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.extension().and_then(|s| s.to_str()) == Some("git") {
let bare_repo = GitRepo::new(&path);
bare_repo.prune_worktrees().await.ok();
}
}
}
{
let mut registry =
acquire_mutex_with_timeout(&self.worktree_registry, "worktree_registry").await?;
if !registry.entries.is_empty() {
registry.entries.clear();
registry.persist(&self.registry_path()).await?;
}
}
Ok(())
}
#[allow(clippy::too_many_lines)]
pub async fn get_or_create_worktree_for_sha(
&self,
name: &str,
url: &str,
sha: &str,
context: Option<&str>,
) -> Result<PathBuf> {
if sha.len() != 40 || !sha.chars().all(|c| c.is_ascii_hexdigit()) {
return Err(anyhow::anyhow!(
"Invalid SHA format: expected 40 hex characters, got '{sha}'"
));
}
let is_local_path = crate::utils::is_local_path(url);
if is_local_path {
return self.get_or_clone_source(name, url, None).await;
}
self.ensure_cache_dir().await?;
let (owner, repo) =
crate::git::parse_git_url(url).unwrap_or(("direct".to_string(), "repo".to_string()));
let bare_repo_dir = self.dir.join("sources").join(format!("{owner}_{repo}.git"));
let bare_repo_lock_name = format!("bare-repo-{owner}_{repo}");
let sha_short = &sha[..8];
let cache_dir_hash = {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
self.dir.hash(&mut hasher);
format!("{:x}", hasher.finish())[..8].to_string()
};
let cache_key = format!("{cache_dir_hash}:{owner}_{repo}:{sha}");
let pending_timeout = pending_state_timeout();
loop {
match self.worktree_cache.entry(cache_key.clone()) {
dashmap::mapref::entry::Entry::Occupied(entry) => {
match entry.get() {
WorktreeState::Ready(cached_path) if cached_path.exists() => {
let cached_path = cached_path.clone();
drop(entry);
self.record_worktree_usage(&cache_key, name, sha_short, &cached_path)
.await?;
if let Some(ctx) = context {
tracing::debug!(
target: "git",
"({}) Reusing SHA-based worktree for {} @ {}",
ctx,
url.split('/').next_back().unwrap_or(url),
sha_short
);
}
return Ok(cached_path);
}
WorktreeState::Ready(_cached_path) => {
let notify = Arc::new(tokio::sync::Notify::new());
drop(entry);
self.worktree_cache
.insert(cache_key.clone(), WorktreeState::Pending(notify));
break;
}
WorktreeState::Pending(existing_notify) => {
let existing_notify = existing_notify.clone();
let notified_future = existing_notify.notified();
drop(entry);
if let Some(ctx) = context {
tracing::debug!(
target: "git",
"({}) Waiting for SHA worktree creation for {} @ {}",
ctx,
url.split('/').next_back().unwrap_or(url),
sha_short
);
}
tokio::select! {
_ = notified_future => {
continue;
}
_ = tokio::time::sleep(pending_timeout) => {
let our_notify = Arc::new(tokio::sync::Notify::new());
self.worktree_cache
.insert(cache_key.clone(), WorktreeState::Pending(our_notify));
existing_notify.notify_waiters();
tracing::warn!(
target: "git",
"Timeout waiting for worktree creation for {} @ {} - taking ownership",
url.split('/').next_back().unwrap_or(url),
sha_short
);
break;
}
}
}
}
}
dashmap::mapref::entry::Entry::Vacant(entry) => {
let notify = Arc::new(tokio::sync::Notify::new());
entry.insert(WorktreeState::Pending(notify));
break;
}
}
}
let worktree_cache = self.worktree_cache.clone();
let cache_key_for_cleanup = cache_key.clone();
let result: Result<PathBuf> = async {
tracing::debug!(
target: "git::worktree",
"Starting worktree creation for {} @ {} (cache_key={})",
url.split('/').next_back().unwrap_or(url),
sha_short,
cache_key
);
if !bare_repo_dir.exists() {
tracing::debug!(
target: "git",
"Bare repo does not exist, acquiring lock to clone: {}",
bare_repo_dir.display()
);
let bare_repo_lock = CacheLock::acquire(&self.dir, &bare_repo_lock_name).await?;
if !bare_repo_dir.exists() {
if let Some(parent) = bare_repo_dir.parent() {
tokio::fs::create_dir_all(parent).await.with_file_context(
FileOperation::CreateDir,
parent,
"creating cache parent directory",
"cache_module",
)?;
}
if let Some(ctx) = context {
tracing::debug!("📦 ({ctx}) Cloning repository {url}...");
} else {
tracing::debug!("📦 Cloning repository {url} to cache...");
}
tokio::time::timeout(
crate::constants::GIT_CLONE_TIMEOUT,
GitRepo::clone_bare_with_context(url, &bare_repo_dir, context),
)
.await
.map_err(|_| {
anyhow::anyhow!(
"Git clone operation timed out after {:?} for {}",
crate::constants::GIT_CLONE_TIMEOUT,
url
)
})??;
Self::configure_connection_pooling(&bare_repo_dir).await.ok();
acquire_rwlock_write_with_timeout(&self.fetched_repos, "fetched_repos")
.await?
.insert(bare_repo_dir.clone());
}
drop(bare_repo_lock);
}
let bare_repo = GitRepo::new(&bare_repo_dir);
let worktree_path =
self.dir.join("worktrees").join(format!("{owner}_{repo}_{sha_short}"));
let worktree_lock_name = format!("worktree-{owner}-{repo}-{sha_short}");
let _worktree_lock = CacheLock::acquire(&self.dir, &worktree_lock_name).await?;
if worktree_path.exists() {
let notify_to_wake = extract_notify_handle(&self.worktree_cache, &cache_key);
self.worktree_cache
.insert(cache_key.clone(), WorktreeState::Ready(worktree_path.clone()));
if let Some(n) = notify_to_wake {
n.notify_waiters();
}
self.record_worktree_usage(&cache_key, name, sha_short, &worktree_path).await?;
return Ok(worktree_path);
}
if let Some(ctx) = context {
tracing::debug!(
target: "git",
"({}) Creating SHA-based worktree: {} @ {}",
ctx,
url.split('/').next_back().unwrap_or(url),
sha_short
);
}
let bare_repo_worktree_lock_name = format!("bare-worktree-{owner}_{repo}");
let _bare_worktree_lock =
CacheLock::acquire_shared(&self.dir, &bare_repo_worktree_lock_name).await?;
let worktree_result = tokio::time::timeout(
crate::constants::GIT_WORKTREE_TIMEOUT,
bare_repo.create_worktree_with_context(&worktree_path, Some(sha), context),
)
.await
.map_err(|_| {
anyhow::anyhow!(
"Git worktree creation timed out after {:?} for {} @ {}",
crate::constants::GIT_WORKTREE_TIMEOUT,
url,
sha_short
)
})?;
match worktree_result {
Ok(_) => {
if !worktree_path.exists() {
return Err(anyhow::anyhow!(
"Worktree directory does not exist: {}",
worktree_path.display()
));
}
let git_file = worktree_path.join(".git");
if !git_file.exists() {
return Err(anyhow::anyhow!(
"Worktree .git file does not exist: {}",
git_file.display()
));
}
drop(_bare_worktree_lock);
#[cfg(not(windows))]
{
let worktree_path_clone = worktree_path.clone();
let bare_worktrees_dir = bare_repo_dir.join("worktrees");
let bare_worktrees_exists = bare_worktrees_dir.exists();
let _ = tokio::task::spawn_blocking(move || {
if let Ok(dir) = std::fs::File::open(&worktree_path_clone) {
let _ = dir.sync_all();
}
if bare_worktrees_exists {
if let Ok(dir) = std::fs::File::open(&bare_worktrees_dir) {
let _ = dir.sync_all();
}
}
})
.await;
tracing::debug!(
target: "git::worktree",
"Worktree fsync completed for {} @ {}",
worktree_path.display(),
&sha[..8]
);
}
let notify_to_wake = extract_notify_handle(&self.worktree_cache, &cache_key);
self.worktree_cache
.insert(cache_key.clone(), WorktreeState::Ready(worktree_path.clone()));
if let Some(n) = notify_to_wake {
n.notify_waiters();
}
self.record_worktree_usage(&cache_key, name, sha_short, &worktree_path).await?;
Ok(worktree_path)
}
Err(e) => Err(e),
}
}
.await;
match result {
Ok(path) => Ok(path),
Err(e) => {
let notify = extract_notify_handle(&worktree_cache, &cache_key_for_cleanup);
worktree_cache.remove(&cache_key_for_cleanup);
if let Some(n) = notify {
n.notify_waiters();
}
Err(e)
}
}
}
async fn get_or_clone_source_impl(
&self,
name: &str,
url: &str,
version: Option<&str>,
) -> Result<PathBuf> {
let is_local_path = crate::utils::is_local_path(url);
if is_local_path {
let resolved_path = crate::utils::platform::resolve_path(url)?;
let canonical_path = crate::utils::safe_canonicalize(&resolved_path)
.map_err(|_| anyhow::anyhow!("Local path is not accessible or does not exist"))?;
validate_path_security(&canonical_path, true)?;
if let Some(ver) = version
&& ver != "local"
{
eprintln!("Warning: Version constraints are ignored for local paths");
}
return Ok(canonical_path);
}
self.ensure_cache_dir().await?;
let _lock = CacheLock::acquire(&self.dir, name)
.await
.with_context(|| format!("Failed to acquire lock for source: {name}"))?;
let (owner, repo) =
crate::git::parse_git_url(url).unwrap_or(("direct".to_string(), "repo".to_string()));
let source_dir = self.dir.join("sources").join(format!("{owner}_{repo}.git"));
if let Some(parent) = source_dir.parent() {
tokio::fs::create_dir_all(parent).await.with_file_context(
FileOperation::CreateDir,
parent,
"creating cache directory",
"cache_module",
)?;
}
if source_dir.exists() {
if crate::utils::is_git_url(url) {
let already_fetched = {
let fetched =
acquire_rwlock_read_with_timeout(&self.fetched_repos, "fetched_repos")
.await?;
fetched.contains(&source_dir)
};
if already_fetched {
tracing::debug!(
target: "agpm::cache",
"Skipping fetch for {} (already fetched in this command)",
name
);
} else {
tracing::debug!(
target: "agpm::cache",
"Fetching updates for {} from {}",
name,
url
);
let repo = crate::git::GitRepo::new(&source_dir);
if let Err(e) = repo.fetch(None).await {
tracing::warn!(
target: "agpm::cache",
"Failed to fetch updates for {}: {}",
name,
e
);
} else {
let mut fetched =
acquire_rwlock_write_with_timeout(&self.fetched_repos, "fetched_repos")
.await?;
fetched.insert(source_dir.clone());
tracing::debug!(
target: "agpm::cache",
"Successfully fetched updates for {}",
name
);
}
}
} else {
tracing::debug!(
target: "agpm::cache",
"Skipping fetch for local path: {}",
url
);
}
} else {
self.clone_source(url, &source_dir).await?;
}
Ok(source_dir)
}
async fn clone_source(&self, url: &str, target: &Path) -> Result<()> {
tracing::debug!("📦 Cloning {} to cache...", url);
GitRepo::clone_bare(url, target)
.await
.with_context(|| format!("Failed to clone repository from {url}"))?;
if cfg!(test)
&& let Ok(entries) = std::fs::read_dir(target)
{
tracing::debug!(
target: "agpm::cache",
"Cloned bare repo to {}, contents:",
target.display()
);
for entry in entries.flatten() {
tracing::debug!(
target: "agpm::cache",
" - {}",
entry.path().display()
);
}
}
Ok(())
}
pub async fn copy_resource(
&self,
source_dir: &Path,
source_path: &str,
target_path: &Path,
) -> Result<()> {
self.copy_resource_with_output(source_dir, source_path, target_path, false).await
}
pub async fn copy_resource_with_output(
&self,
source_dir: &Path,
source_path: &str,
target_path: &Path,
show_output: bool,
) -> Result<()> {
let source_file = source_dir.join(source_path);
if !source_file.exists() {
return Err(AgpmError::ResourceFileNotFound {
path: source_path.to_string(),
source_name: source_dir
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown")
.to_string(),
}
.into());
}
if let Some(parent) = target_path.parent() {
async_fs::create_dir_all(parent)
.await
.with_context(|| format!("Failed to create directory: {}", parent.display()))?;
}
async_fs::copy(&source_file, target_path).await.with_context(|| {
format!("Failed to copy {} to {}", source_file.display(), target_path.display())
})?;
if show_output {
println!(" ✅ Installed {}", target_path.display());
}
Ok(())
}
pub async fn clean_unused(&self, active_sources: &[String]) -> Result<usize> {
self.ensure_cache_dir().await?;
let mut removed_count = 0;
let mut entries = async_fs::read_dir(&self.dir)
.await
.with_context(|| "Failed to read cache directory")?;
while let Some(entry) =
entries.next_entry().await.with_context(|| "Failed to read directory entry")?
{
let path = entry.path();
if path.is_dir() {
let dir_name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");
if !active_sources.contains(&dir_name.to_string()) {
println!("🗑️ Removing unused cache: {dir_name}");
async_fs::remove_dir_all(&path).await.with_context(|| {
format!("Failed to remove cache directory: {}", path.display())
})?;
removed_count += 1;
}
}
}
Ok(removed_count)
}
pub async fn get_cache_size(&self) -> Result<u64> {
if !self.dir.exists() {
return Ok(0);
}
let size = fs::get_directory_size(&self.dir).await?;
Ok(size)
}
#[must_use]
pub fn get_cache_location(&self) -> &Path {
&self.dir
}
pub async fn clear_all(&self) -> Result<()> {
if self.dir.exists() {
async_fs::remove_dir_all(&self.dir).await.with_context(|| "Failed to clear cache")?;
println!("🗑️ Cleared all cache");
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[tokio::test]
async fn test_cache_dir_creation() {
let temp_dir = TempDir::new().unwrap();
let cache_dir = temp_dir.path().join("cache");
let cache = Cache::with_dir(cache_dir.clone()).unwrap();
cache.ensure_cache_dir().await.unwrap();
assert!(cache_dir.exists());
}
#[tokio::test]
async fn test_cache_location() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
let location = cache.get_cache_location();
assert_eq!(location, temp_dir.path());
}
#[tokio::test]
async fn test_cache_size_empty() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
let size = cache.get_cache_size().await.unwrap();
assert_eq!(size, 0);
}
#[tokio::test]
async fn test_cache_size_with_content() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
let test_file = temp_dir.path().join("test.txt");
std::fs::write(&test_file, "test content").unwrap();
let size = cache.get_cache_size().await.unwrap();
assert!(size > 0);
assert_eq!(size, 12); }
#[tokio::test]
async fn test_clean_unused_empty_cache() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
let removed = cache.clean_unused(&["active".to_string()]).await.unwrap();
assert_eq!(removed, 0);
}
#[tokio::test]
async fn test_clean_unused_removes_correct_dirs() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
let active_dir = temp_dir.path().join("active");
let unused_dir = temp_dir.path().join("unused");
let another_unused = temp_dir.path().join("another_unused");
std::fs::create_dir_all(&active_dir).unwrap();
std::fs::create_dir_all(&unused_dir).unwrap();
std::fs::create_dir_all(&another_unused).unwrap();
std::fs::write(active_dir.join("file.txt"), "keep").unwrap();
std::fs::write(unused_dir.join("file.txt"), "remove").unwrap();
std::fs::write(another_unused.join("file.txt"), "remove").unwrap();
let removed = cache.clean_unused(&["active".to_string()]).await.unwrap();
assert_eq!(removed, 2);
assert!(active_dir.exists());
assert!(!unused_dir.exists());
assert!(!another_unused.exists());
}
#[tokio::test]
async fn test_clear_all_removes_entire_cache() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
let subdir = temp_dir.path().join("subdir");
std::fs::create_dir_all(&subdir).unwrap();
std::fs::write(subdir.join("file.txt"), "content").unwrap();
assert!(temp_dir.path().exists());
assert!(subdir.exists());
cache.clear_all().await.unwrap();
assert!(!temp_dir.path().exists());
}
#[tokio::test]
async fn test_copy_resource() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
std::fs::create_dir_all(&source_dir).unwrap();
let source_file = source_dir.join("resource.md");
std::fs::write(&source_file, "# Test Resource\nContent").unwrap();
let dest = temp_dir.path().join("dest.md");
cache.copy_resource(&source_dir, "resource.md", &dest).await.unwrap();
assert!(dest.exists());
let content = std::fs::read_to_string(&dest).unwrap();
assert_eq!(content, "# Test Resource\nContent");
}
#[tokio::test]
async fn test_copy_resource_nested_path() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
let nested_dir = source_dir.join("nested").join("path");
std::fs::create_dir_all(&nested_dir).unwrap();
let source_file = nested_dir.join("resource.md");
std::fs::write(&source_file, "# Nested Resource").unwrap();
let dest = temp_dir.path().join("dest.md");
cache.copy_resource(&source_dir, "nested/path/resource.md", &dest).await.unwrap();
assert!(dest.exists());
let content = std::fs::read_to_string(&dest).unwrap();
assert_eq!(content, "# Nested Resource");
}
#[tokio::test]
async fn test_copy_resource_invalid_path() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
std::fs::create_dir_all(&source_dir).unwrap();
let dest = temp_dir.path().join("dest.md");
let result = cache.copy_resource(&source_dir, "nonexistent.md", &dest).await;
assert!(result.is_err());
assert!(!dest.exists());
}
#[tokio::test]
async fn test_ensure_cache_dir_idempotent() {
let temp_dir = TempDir::new().unwrap();
let cache_dir = temp_dir.path().join("cache");
let cache = Cache::with_dir(cache_dir.clone()).unwrap();
cache.ensure_cache_dir().await.unwrap();
assert!(cache_dir.exists());
cache.ensure_cache_dir().await.unwrap();
assert!(cache_dir.exists());
std::fs::write(cache_dir.join("test.txt"), "content").unwrap();
cache.ensure_cache_dir().await.unwrap();
assert!(cache_dir.exists());
assert!(cache_dir.join("test.txt").exists());
}
#[tokio::test]
async fn test_copy_resource_creates_parent_directories() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
std::fs::create_dir_all(&source_dir).unwrap();
std::fs::write(source_dir.join("file.md"), "content").unwrap();
let dest = temp_dir.path().join("deep").join("nested").join("dest.md");
cache.copy_resource(&source_dir, "file.md", &dest).await.unwrap();
assert!(dest.exists());
assert_eq!(std::fs::read_to_string(&dest).unwrap(), "content");
}
#[tokio::test]
async fn test_copy_resource_with_output_flag() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
std::fs::create_dir_all(&source_dir).unwrap();
std::fs::write(source_dir.join("file.md"), "content").unwrap();
let dest1 = temp_dir.path().join("dest1.md");
cache.copy_resource_with_output(&source_dir, "file.md", &dest1, false).await.unwrap();
assert!(dest1.exists());
let dest2 = temp_dir.path().join("dest2.md");
cache.copy_resource_with_output(&source_dir, "file.md", &dest2, true).await.unwrap();
assert!(dest2.exists());
}
#[tokio::test]
async fn test_cache_size_nonexistent_dir() {
let temp_dir = TempDir::new().unwrap();
let nonexistent = temp_dir.path().join("nonexistent");
let cache = Cache::with_dir(nonexistent).unwrap();
let size = cache.get_cache_size().await.unwrap();
assert_eq!(size, 0);
}
#[tokio::test]
async fn test_clear_all_nonexistent_cache() {
let temp_dir = TempDir::new().unwrap();
let nonexistent = temp_dir.path().join("nonexistent");
let cache = Cache::with_dir(nonexistent).unwrap();
cache.clear_all().await.unwrap();
}
#[tokio::test]
async fn test_clean_unused_with_files_and_dirs() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
std::fs::create_dir_all(temp_dir.path().join("keep")).unwrap();
std::fs::create_dir_all(temp_dir.path().join("remove")).unwrap();
std::fs::write(temp_dir.path().join("file.txt"), "content").unwrap();
let removed = cache.clean_unused(&["keep".to_string()]).await.unwrap();
assert_eq!(removed, 1);
assert!(temp_dir.path().join("keep").exists());
assert!(!temp_dir.path().join("remove").exists());
assert!(temp_dir.path().join("file.txt").exists());
}
#[tokio::test]
async fn test_copy_resource_overwrites_existing() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
std::fs::create_dir_all(&source_dir).unwrap();
std::fs::write(source_dir.join("file.md"), "new content").unwrap();
let dest = temp_dir.path().join("dest.md");
std::fs::write(&dest, "old content").unwrap();
cache.copy_resource(&source_dir, "file.md", &dest).await.unwrap();
assert_eq!(std::fs::read_to_string(&dest).unwrap(), "new content");
}
#[tokio::test]
async fn test_copy_resource_special_characters() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
std::fs::create_dir_all(&source_dir).unwrap();
let special_name = "file with spaces & special-chars.md";
std::fs::write(source_dir.join(special_name), "content").unwrap();
let dest = temp_dir.path().join("dest.md");
cache.copy_resource(&source_dir, special_name, &dest).await.unwrap();
assert!(dest.exists());
assert_eq!(std::fs::read_to_string(&dest).unwrap(), "content");
}
#[tokio::test]
async fn test_cache_location_consistency() {
let temp_dir = TempDir::new().unwrap();
let cache_dir = temp_dir.path().join("my_cache");
let cache = Cache::with_dir(cache_dir.clone()).unwrap();
let loc1 = cache.get_cache_location();
let loc2 = cache.get_cache_location();
assert_eq!(loc1, loc2);
assert_eq!(loc1, cache_dir.as_path());
}
#[tokio::test]
async fn test_clean_unused_empty_active_list() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
std::fs::create_dir_all(temp_dir.path().join("source1")).unwrap();
std::fs::create_dir_all(temp_dir.path().join("source2")).unwrap();
let removed = cache.clean_unused(&[]).await.unwrap();
assert_eq!(removed, 2);
assert!(!temp_dir.path().join("source1").exists());
assert!(!temp_dir.path().join("source2").exists());
}
#[tokio::test]
async fn test_copy_resource_with_relative_paths() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
let sub_dir = source_dir.join("agents");
std::fs::create_dir_all(&sub_dir).unwrap();
std::fs::write(sub_dir.join("helper.md"), "# Helper Agent").unwrap();
let dest = temp_dir.path().join("my-agent.md");
cache.copy_resource(&source_dir, "agents/helper.md", &dest).await.unwrap();
assert!(dest.exists());
assert_eq!(std::fs::read_to_string(&dest).unwrap(), "# Helper Agent");
}
#[tokio::test]
async fn test_cache_size_with_subdirectories() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
let sub1 = temp_dir.path().join("sub1");
let sub2 = sub1.join("sub2");
std::fs::create_dir_all(&sub2).unwrap();
std::fs::write(temp_dir.path().join("file1.txt"), "12345").unwrap(); std::fs::write(sub1.join("file2.txt"), "1234567890").unwrap(); std::fs::write(sub2.join("file3.txt"), "abc").unwrap();
let size = cache.get_cache_size().await.unwrap();
assert_eq!(size, 18); }
}