use crate::core::error::AgpmError;
use crate::git::GitRepo;
use crate::git::command_builder::GitCommand;
use crate::utils::fs;
use crate::utils::security::validate_path_security;
use anyhow::{Context, Result};
use dashmap::DashMap;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use tokio::fs as async_fs;
use tokio::sync::{Mutex, RwLock};
#[derive(Debug, Clone)]
enum WorktreeState {
Pending,
Ready(PathBuf),
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct WorktreeRegistry {
entries: HashMap<String, WorktreeRecord>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct WorktreeRecord {
source: String,
version: String,
path: PathBuf,
last_used: u64,
}
impl WorktreeRegistry {
fn load(path: &Path) -> Self {
match std::fs::read(path) {
Ok(data) => serde_json::from_slice(&data).unwrap_or_default(),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Self::default(),
Err(err) => {
tracing::warn!("Failed to load worktree registry from {}: {}", path.display(), err);
Self::default()
}
}
}
fn update(&mut self, key: String, source: String, version: String, path: PathBuf) {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_else(|_| Duration::from_secs(0))
.as_secs();
self.entries.insert(
key,
WorktreeRecord {
source,
version,
path,
last_used: timestamp,
},
);
}
fn remove_by_path(&mut self, target: &Path) -> bool {
if let Some(key) = self.entries.iter().find_map(|(k, record)| {
if record.path == target {
Some(k.clone())
} else {
None
}
}) {
self.entries.remove(&key);
true
} else {
false
}
}
async fn persist(&self, path: &Path) -> Result<()> {
if let Some(parent) = path.parent() {
async_fs::create_dir_all(parent).await?;
}
let data = serde_json::to_vec_pretty(self)?;
async_fs::write(path, data).await?;
Ok(())
}
}
pub mod lock;
pub use lock::CacheLock;
pub struct Cache {
dir: PathBuf,
worktree_cache: Arc<RwLock<HashMap<String, WorktreeState>>>,
fetch_locks: Arc<DashMap<PathBuf, Arc<Mutex<()>>>>,
fetched_repos: Arc<RwLock<HashSet<PathBuf>>>,
worktree_registry: Arc<Mutex<WorktreeRegistry>>,
}
impl Clone for Cache {
fn clone(&self) -> Self {
Self {
dir: self.dir.clone(),
worktree_cache: Arc::clone(&self.worktree_cache),
fetch_locks: Arc::clone(&self.fetch_locks),
fetched_repos: Arc::clone(&self.fetched_repos),
worktree_registry: Arc::clone(&self.worktree_registry),
}
}
}
impl Cache {
fn registry_path_for(cache_dir: &Path) -> PathBuf {
cache_dir.join("worktrees").join(".state.json")
}
fn registry_path(&self) -> PathBuf {
Self::registry_path_for(&self.dir)
}
async fn verify_worktree_accessible(worktree_path: &Path, sha: &str) -> Result<()> {
use tokio_retry::Retry;
use tokio_retry::strategy::{ExponentialBackoff, jitter};
let retry_strategy = ExponentialBackoff::from_millis(50)
.max_delay(std::time::Duration::from_secs(2))
.take(10)
.map(jitter);
let worktree_path = worktree_path.to_path_buf();
let sha_short = &sha[..8];
tracing::debug!(
target: "git::worktree",
"Verifying worktree at {} for SHA {}",
worktree_path.display(),
sha_short
);
Retry::spawn(retry_strategy, || async {
crate::git::command_builder::GitCommand::new()
.args(["diff-index", "--quiet", "HEAD"])
.current_dir(&worktree_path)
.execute_success()
.await
.map_err(|_| "Working tree doesn't match HEAD (checkout incomplete)".to_string())?;
tracing::debug!(
target: "git::worktree",
"Worktree verification passed for {}",
worktree_path.display()
);
Ok::<(), String>(())
})
.await
.map_err(|e| {
anyhow::anyhow!(
"Worktree not fully initialized after retries: {} @ {} - {}",
worktree_path.display(),
sha_short,
e
)
})
}
async fn record_worktree_usage(
&self,
registry_key: &str,
source_name: &str,
version_key: &str,
worktree_path: &Path,
) -> Result<()> {
let mut registry = self.worktree_registry.lock().await;
registry.update(
registry_key.to_string(),
source_name.to_string(),
version_key.to_string(),
worktree_path.to_path_buf(),
);
registry.persist(&self.registry_path()).await?;
Ok(())
}
async fn remove_worktree_record_by_path(&self, worktree_path: &Path) -> Result<()> {
let mut registry = self.worktree_registry.lock().await;
if registry.remove_by_path(worktree_path) {
registry.persist(&self.registry_path()).await?;
}
Ok(())
}
async fn configure_connection_pooling(path: &Path) -> Result<()> {
let commands = [
("http.version", "HTTP/2"),
("http.postBuffer", "524288000"),
("core.compression", "0"),
];
for (key, value) in commands {
GitCommand::new()
.args(["config", key, value])
.current_dir(path)
.execute_success()
.await
.ok();
}
Ok(())
}
pub fn new() -> Result<Self> {
let dir = crate::config::get_cache_dir()?;
let registry_path = Self::registry_path_for(&dir);
let registry = WorktreeRegistry::load(®istry_path);
Ok(Self {
dir,
worktree_cache: Arc::new(RwLock::new(HashMap::new())),
fetch_locks: Arc::new(DashMap::new()),
fetched_repos: Arc::new(RwLock::new(HashSet::new())),
worktree_registry: Arc::new(Mutex::new(registry)),
})
}
pub fn with_dir(dir: PathBuf) -> Result<Self> {
let registry_path = Self::registry_path_for(&dir);
let registry = WorktreeRegistry::load(®istry_path);
Ok(Self {
dir,
worktree_cache: Arc::new(RwLock::new(HashMap::new())),
fetch_locks: Arc::new(DashMap::new()),
fetched_repos: Arc::new(RwLock::new(HashSet::new())),
worktree_registry: Arc::new(Mutex::new(registry)),
})
}
pub async fn ensure_cache_dir(&self) -> Result<()> {
if !self.dir.exists() {
async_fs::create_dir_all(&self.dir).await.with_context(|| {
format!("Failed to create cache directory at {}", self.dir.display())
})?;
}
Ok(())
}
#[must_use]
pub fn cache_dir(&self) -> &Path {
&self.dir
}
pub fn get_worktree_path(&self, url: &str, sha: &str) -> Result<PathBuf> {
let (owner, repo) =
crate::git::parse_git_url(url).map_err(|e| anyhow::anyhow!("Invalid Git URL: {e}"))?;
let sha_short = &sha[..8.min(sha.len())];
Ok(self.dir.join("worktrees").join(format!("{owner}_{repo}_{sha_short}")))
}
pub async fn get_or_clone_source(
&self,
name: &str,
url: &str,
version: Option<&str>,
) -> Result<PathBuf> {
self.get_or_clone_source_impl(name, url, version).await
}
pub async fn cleanup_worktree(&self, worktree_path: &Path) -> Result<()> {
if worktree_path.exists() {
tokio::fs::remove_dir_all(worktree_path).await.with_context(|| {
format!("Failed to remove worktree directory: {}", worktree_path.display())
})?;
self.remove_worktree_record_by_path(worktree_path).await?;
}
Ok(())
}
pub async fn cleanup_all_worktrees(&self) -> Result<()> {
let worktrees_dir = self.dir.join("worktrees");
if !worktrees_dir.exists() {
return Ok(());
}
tokio::fs::remove_dir_all(&worktrees_dir)
.await
.with_context(|| "Failed to clean up worktrees")?;
let sources_dir = self.dir.join("sources");
if sources_dir.exists() {
let mut entries = tokio::fs::read_dir(&sources_dir).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.extension().and_then(|s| s.to_str()) == Some("git") {
let bare_repo = GitRepo::new(&path);
bare_repo.prune_worktrees().await.ok();
}
}
}
{
let mut registry = self.worktree_registry.lock().await;
if !registry.entries.is_empty() {
registry.entries.clear();
registry.persist(&self.registry_path()).await?;
}
}
Ok(())
}
#[allow(clippy::too_many_lines)]
pub async fn get_or_create_worktree_for_sha(
&self,
name: &str,
url: &str,
sha: &str,
context: Option<&str>,
) -> Result<PathBuf> {
if sha.len() != 40 || !sha.chars().all(|c| c.is_ascii_hexdigit()) {
return Err(anyhow::anyhow!(
"Invalid SHA format: expected 40 hex characters, got '{sha}'"
));
}
let is_local_path = crate::utils::is_local_path(url);
if is_local_path {
return self.get_or_clone_source(name, url, None).await;
}
self.ensure_cache_dir().await?;
let (owner, repo) =
crate::git::parse_git_url(url).unwrap_or(("direct".to_string(), "repo".to_string()));
let sha_short = &sha[..8];
let cache_dir_hash = {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
self.dir.hash(&mut hasher);
format!("{:x}", hasher.finish())[..8].to_string()
};
let cache_key = format!("{cache_dir_hash}:{owner}_{repo}:{sha}");
let mut should_create_worktree = false;
while !should_create_worktree {
{
let cache_read = self.worktree_cache.read().await;
match cache_read.get(&cache_key) {
Some(WorktreeState::Ready(cached_path)) => {
if cached_path.exists() {
let cached_path = cached_path.clone();
drop(cache_read);
self.record_worktree_usage(&cache_key, name, sha_short, &cached_path)
.await?;
if let Some(ctx) = context {
tracing::debug!(
target: "git",
"({}) Reusing SHA-based worktree for {} @ {}",
ctx,
url.split('/').next_back().unwrap_or(url),
sha_short
);
}
return Ok(cached_path);
}
should_create_worktree = true;
}
Some(WorktreeState::Pending) => {
if let Some(ctx) = context {
tracing::debug!(
target: "git",
"({}) Waiting for SHA worktree creation for {} @ {}",
ctx,
url.split('/').next_back().unwrap_or(url),
sha_short
);
}
drop(cache_read);
tokio::time::sleep(Duration::from_millis(100)).await;
}
None => {
should_create_worktree = true;
}
}
}
}
let mut reservation_successful = false;
while !reservation_successful {
let mut cache_write = self.worktree_cache.write().await;
match cache_write.get(&cache_key) {
Some(WorktreeState::Ready(cached_path)) if cached_path.exists() => {
return Ok(cached_path.clone());
}
Some(WorktreeState::Pending) => {
drop(cache_write);
tokio::time::sleep(Duration::from_millis(50)).await;
}
_ => {
cache_write.insert(cache_key.clone(), WorktreeState::Pending);
reservation_successful = true;
}
}
}
let bare_repo_dir = self.dir.join("sources").join(format!("{owner}_{repo}.git"));
if bare_repo_dir.exists() {
self.fetch_with_hybrid_lock(&bare_repo_dir, context).await?;
} else {
let lock_name = format!("{owner}_{repo}");
let _lock = CacheLock::acquire(&self.dir, &lock_name).await?;
if let Some(parent) = bare_repo_dir.parent() {
tokio::fs::create_dir_all(parent).await?;
}
if !bare_repo_dir.exists() {
if let Some(ctx) = context {
tracing::debug!("📦 ({ctx}) Cloning repository {url}...");
} else {
tracing::debug!("📦 Cloning repository {url} to cache...");
}
GitRepo::clone_bare_with_context(url, &bare_repo_dir, context).await?;
Self::configure_connection_pooling(&bare_repo_dir).await.ok();
}
}
let bare_repo = GitRepo::new(&bare_repo_dir);
let worktree_path = self.dir.join("worktrees").join(format!("{owner}_{repo}_{sha_short}"));
let worktree_lock_name = format!("worktree-{owner}-{repo}-{sha_short}");
let _worktree_lock = CacheLock::acquire(&self.dir, &worktree_lock_name).await?;
if worktree_path.exists() {
let mut cache_write = self.worktree_cache.write().await;
cache_write.insert(cache_key.clone(), WorktreeState::Ready(worktree_path.clone()));
self.record_worktree_usage(&cache_key, name, sha_short, &worktree_path).await?;
return Ok(worktree_path);
}
if !worktree_path.exists() {
let _ = bare_repo.prune_worktrees().await;
}
if let Some(ctx) = context {
tracing::debug!(
target: "git",
"({}) Creating SHA-based worktree: {} @ {}",
ctx,
url.split('/').next_back().unwrap_or(url),
sha_short
);
}
let bare_repo_lock_name = format!("bare-repo-{owner}_{repo}");
let _bare_repo_lock = CacheLock::acquire(&self.dir, &bare_repo_lock_name).await?;
let worktree_result =
bare_repo.create_worktree_with_context(&worktree_path, Some(sha), context).await;
match worktree_result {
Ok(_) => {
Self::verify_worktree_accessible(&worktree_path, sha).await?;
let mut cache_write = self.worktree_cache.write().await;
cache_write.insert(cache_key.clone(), WorktreeState::Ready(worktree_path.clone()));
self.record_worktree_usage(&cache_key, name, sha_short, &worktree_path).await?;
Ok(worktree_path)
}
Err(e) => {
let mut cache_write = self.worktree_cache.write().await;
cache_write.remove(&cache_key);
Err(e)
}
}
}
async fn get_or_clone_source_impl(
&self,
name: &str,
url: &str,
version: Option<&str>,
) -> Result<PathBuf> {
let is_local_path = crate::utils::is_local_path(url);
if is_local_path {
let resolved_path = crate::utils::platform::resolve_path(url)?;
let canonical_path = crate::utils::safe_canonicalize(&resolved_path)
.map_err(|_| anyhow::anyhow!("Local path is not accessible or does not exist"))?;
validate_path_security(&canonical_path, true)?;
if let Some(ver) = version
&& ver != "local"
{
eprintln!("Warning: Version constraints are ignored for local paths");
}
return Ok(canonical_path);
}
self.ensure_cache_dir().await?;
let _lock = CacheLock::acquire(&self.dir, name)
.await
.with_context(|| format!("Failed to acquire lock for source: {name}"))?;
let (owner, repo) =
crate::git::parse_git_url(url).unwrap_or(("direct".to_string(), "repo".to_string()));
let source_dir = self.dir.join("sources").join(format!("{owner}_{repo}.git"));
if let Some(parent) = source_dir.parent() {
tokio::fs::create_dir_all(parent).await.with_context(|| {
format!("Failed to create cache directory: {}", parent.display())
})?;
}
if source_dir.exists() {
if crate::utils::is_git_url(url) {
let already_fetched = {
let fetched = self.fetched_repos.read().await;
fetched.contains(&source_dir)
};
if already_fetched {
tracing::debug!(
target: "agpm::cache",
"Skipping fetch for {} (already fetched in this command)",
name
);
} else {
tracing::debug!(
target: "agpm::cache",
"Fetching updates for {} from {}",
name,
url
);
let repo = crate::git::GitRepo::new(&source_dir);
if let Err(e) = repo.fetch(None).await {
tracing::warn!(
target: "agpm::cache",
"Failed to fetch updates for {}: {}",
name,
e
);
} else {
let mut fetched = self.fetched_repos.write().await;
fetched.insert(source_dir.clone());
tracing::debug!(
target: "agpm::cache",
"Successfully fetched updates for {}",
name
);
}
}
} else {
tracing::debug!(
target: "agpm::cache",
"Skipping fetch for local path: {}",
url
);
}
} else {
self.clone_source(url, &source_dir).await?;
}
Ok(source_dir)
}
async fn clone_source(&self, url: &str, target: &Path) -> Result<()> {
tracing::debug!("📦 Cloning {} to cache...", url);
GitRepo::clone_bare(url, target)
.await
.with_context(|| format!("Failed to clone repository from {url}"))?;
if cfg!(test)
&& let Ok(entries) = std::fs::read_dir(target)
{
tracing::debug!(
target: "agpm::cache",
"Cloned bare repo to {}, contents:",
target.display()
);
for entry in entries.flatten() {
tracing::debug!(
target: "agpm::cache",
" - {}",
entry.path().display()
);
}
}
Ok(())
}
pub async fn copy_resource(
&self,
source_dir: &Path,
source_path: &str,
target_path: &Path,
) -> Result<()> {
self.copy_resource_with_output(source_dir, source_path, target_path, false).await
}
pub async fn copy_resource_with_output(
&self,
source_dir: &Path,
source_path: &str,
target_path: &Path,
show_output: bool,
) -> Result<()> {
let source_file = source_dir.join(source_path);
if !source_file.exists() {
return Err(AgpmError::ResourceFileNotFound {
path: source_path.to_string(),
source_name: source_dir
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown")
.to_string(),
}
.into());
}
if let Some(parent) = target_path.parent() {
async_fs::create_dir_all(parent)
.await
.with_context(|| format!("Failed to create directory: {}", parent.display()))?;
}
async_fs::copy(&source_file, target_path).await.with_context(|| {
format!("Failed to copy {} to {}", source_file.display(), target_path.display())
})?;
if show_output {
println!(" ✅ Installed {}", target_path.display());
}
Ok(())
}
pub async fn clean_unused(&self, active_sources: &[String]) -> Result<usize> {
self.ensure_cache_dir().await?;
let mut removed_count = 0;
let mut entries = async_fs::read_dir(&self.dir)
.await
.with_context(|| "Failed to read cache directory")?;
while let Some(entry) =
entries.next_entry().await.with_context(|| "Failed to read directory entry")?
{
let path = entry.path();
if path.is_dir() {
let dir_name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");
if !active_sources.contains(&dir_name.to_string()) {
println!("🗑️ Removing unused cache: {dir_name}");
async_fs::remove_dir_all(&path).await.with_context(|| {
format!("Failed to remove cache directory: {}", path.display())
})?;
removed_count += 1;
}
}
}
Ok(removed_count)
}
pub async fn get_cache_size(&self) -> Result<u64> {
if !self.dir.exists() {
return Ok(0);
}
let size = fs::get_directory_size(&self.dir).await?;
Ok(size)
}
#[must_use]
pub fn get_cache_location(&self) -> &Path {
&self.dir
}
pub async fn clear_all(&self) -> Result<()> {
if self.dir.exists() {
async_fs::remove_dir_all(&self.dir).await.with_context(|| "Failed to clear cache")?;
println!("🗑️ Cleared all cache");
}
Ok(())
}
async fn fetch_with_hybrid_lock(
&self,
bare_repo_path: &Path,
context: Option<&str>,
) -> Result<()> {
use fs4::fs_std::FileExt;
let memory_lock = self
.fetch_locks
.entry(bare_repo_path.to_path_buf())
.or_insert_with(|| Arc::new(Mutex::new(())))
.clone();
let _memory_guard = memory_lock.lock().await;
let safe_name = bare_repo_path
.file_name()
.and_then(|s| s.to_str())
.unwrap_or("unknown")
.replace(['/', '\\', ':'], "_");
let lock_path = self.dir.join(".locks").join(format!("{safe_name}.fetch.lock"));
if let Some(parent) = lock_path.parent() {
tokio::fs::create_dir_all(parent).await?;
}
let lock_file = tokio::fs::OpenOptions::new()
.create(true)
.write(true)
.truncate(false)
.open(&lock_path)
.await?;
let std_file = lock_file.into_std().await;
if let Some(ctx) = context {
tracing::debug!(
target: "agpm::git",
"({}) Acquiring file lock for {}",
ctx,
bare_repo_path.display()
);
}
std_file.lock_exclusive()?;
if let Some(ctx) = context {
tracing::debug!(
target: "agpm::git",
"({}) Acquired file lock for {}",
ctx,
bare_repo_path.display()
);
}
let already_fetched = {
let fetched = self.fetched_repos.read().await;
let is_fetched = fetched.contains(bare_repo_path);
if let Some(ctx) = context {
tracing::debug!(
target: "agpm::git",
"({}) Checking if already fetched: {} - Result: {} (total fetched: {}, hashset addr: {:p})",
ctx,
bare_repo_path.display(),
is_fetched,
fetched.len(),
&raw const *fetched
);
}
is_fetched
};
if already_fetched {
if let Some(ctx) = context {
tracing::debug!(
target: "agpm::git",
"({}) Skipping fetch (already fetched in this command): {}",
ctx,
bare_repo_path.display()
);
}
return Ok(());
}
let repo = GitRepo::new(bare_repo_path);
if let Some(ctx) = context {
tracing::debug!(
target: "agpm::git",
"({}) Fetching updates for {}",
ctx,
bare_repo_path.display()
);
}
repo.fetch(None).await?;
{
let mut fetched = self.fetched_repos.write().await;
fetched.insert(bare_repo_path.to_path_buf());
if let Some(ctx) = context {
tracing::debug!(
target: "agpm::git",
"({}) Marked as fetched: {} (total fetched: {}, hashset addr: {:p})",
ctx,
bare_repo_path.display(),
fetched.len(),
&raw const *fetched
);
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
#[tokio::test]
async fn test_cache_dir_creation() {
let temp_dir = TempDir::new().unwrap();
let cache_dir = temp_dir.path().join("cache");
let cache = Cache::with_dir(cache_dir.clone()).unwrap();
cache.ensure_cache_dir().await.unwrap();
assert!(cache_dir.exists());
}
#[tokio::test]
async fn test_cache_location() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
let location = cache.get_cache_location();
assert_eq!(location, temp_dir.path());
}
#[tokio::test]
async fn test_cache_size_empty() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
let size = cache.get_cache_size().await.unwrap();
assert_eq!(size, 0);
}
#[tokio::test]
async fn test_cache_size_with_content() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
let test_file = temp_dir.path().join("test.txt");
std::fs::write(&test_file, "test content").unwrap();
let size = cache.get_cache_size().await.unwrap();
assert!(size > 0);
assert_eq!(size, 12); }
#[tokio::test]
async fn test_clean_unused_empty_cache() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
let removed = cache.clean_unused(&["active".to_string()]).await.unwrap();
assert_eq!(removed, 0);
}
#[tokio::test]
async fn test_clean_unused_removes_correct_dirs() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
let active_dir = temp_dir.path().join("active");
let unused_dir = temp_dir.path().join("unused");
let another_unused = temp_dir.path().join("another_unused");
std::fs::create_dir_all(&active_dir).unwrap();
std::fs::create_dir_all(&unused_dir).unwrap();
std::fs::create_dir_all(&another_unused).unwrap();
std::fs::write(active_dir.join("file.txt"), "keep").unwrap();
std::fs::write(unused_dir.join("file.txt"), "remove").unwrap();
std::fs::write(another_unused.join("file.txt"), "remove").unwrap();
let removed = cache.clean_unused(&["active".to_string()]).await.unwrap();
assert_eq!(removed, 2);
assert!(active_dir.exists());
assert!(!unused_dir.exists());
assert!(!another_unused.exists());
}
#[tokio::test]
async fn test_clear_all_removes_entire_cache() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
let subdir = temp_dir.path().join("subdir");
std::fs::create_dir_all(&subdir).unwrap();
std::fs::write(subdir.join("file.txt"), "content").unwrap();
assert!(temp_dir.path().exists());
assert!(subdir.exists());
cache.clear_all().await.unwrap();
assert!(!temp_dir.path().exists());
}
#[tokio::test]
async fn test_copy_resource() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
std::fs::create_dir_all(&source_dir).unwrap();
let source_file = source_dir.join("resource.md");
std::fs::write(&source_file, "# Test Resource\nContent").unwrap();
let dest = temp_dir.path().join("dest.md");
cache.copy_resource(&source_dir, "resource.md", &dest).await.unwrap();
assert!(dest.exists());
let content = std::fs::read_to_string(&dest).unwrap();
assert_eq!(content, "# Test Resource\nContent");
}
#[tokio::test]
async fn test_copy_resource_nested_path() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
let nested_dir = source_dir.join("nested").join("path");
std::fs::create_dir_all(&nested_dir).unwrap();
let source_file = nested_dir.join("resource.md");
std::fs::write(&source_file, "# Nested Resource").unwrap();
let dest = temp_dir.path().join("dest.md");
cache.copy_resource(&source_dir, "nested/path/resource.md", &dest).await.unwrap();
assert!(dest.exists());
let content = std::fs::read_to_string(&dest).unwrap();
assert_eq!(content, "# Nested Resource");
}
#[tokio::test]
async fn test_copy_resource_invalid_path() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
std::fs::create_dir_all(&source_dir).unwrap();
let dest = temp_dir.path().join("dest.md");
let result = cache.copy_resource(&source_dir, "nonexistent.md", &dest).await;
assert!(result.is_err());
assert!(!dest.exists());
}
#[tokio::test]
async fn test_ensure_cache_dir_idempotent() {
let temp_dir = TempDir::new().unwrap();
let cache_dir = temp_dir.path().join("cache");
let cache = Cache::with_dir(cache_dir.clone()).unwrap();
cache.ensure_cache_dir().await.unwrap();
assert!(cache_dir.exists());
cache.ensure_cache_dir().await.unwrap();
assert!(cache_dir.exists());
std::fs::write(cache_dir.join("test.txt"), "content").unwrap();
cache.ensure_cache_dir().await.unwrap();
assert!(cache_dir.exists());
assert!(cache_dir.join("test.txt").exists());
}
#[tokio::test]
async fn test_copy_resource_creates_parent_directories() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
std::fs::create_dir_all(&source_dir).unwrap();
std::fs::write(source_dir.join("file.md"), "content").unwrap();
let dest = temp_dir.path().join("deep").join("nested").join("dest.md");
cache.copy_resource(&source_dir, "file.md", &dest).await.unwrap();
assert!(dest.exists());
assert_eq!(std::fs::read_to_string(&dest).unwrap(), "content");
}
#[tokio::test]
async fn test_copy_resource_with_output_flag() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
std::fs::create_dir_all(&source_dir).unwrap();
std::fs::write(source_dir.join("file.md"), "content").unwrap();
let dest1 = temp_dir.path().join("dest1.md");
cache.copy_resource_with_output(&source_dir, "file.md", &dest1, false).await.unwrap();
assert!(dest1.exists());
let dest2 = temp_dir.path().join("dest2.md");
cache.copy_resource_with_output(&source_dir, "file.md", &dest2, true).await.unwrap();
assert!(dest2.exists());
}
#[tokio::test]
async fn test_cache_size_nonexistent_dir() {
let temp_dir = TempDir::new().unwrap();
let nonexistent = temp_dir.path().join("nonexistent");
let cache = Cache::with_dir(nonexistent).unwrap();
let size = cache.get_cache_size().await.unwrap();
assert_eq!(size, 0);
}
#[tokio::test]
async fn test_clear_all_nonexistent_cache() {
let temp_dir = TempDir::new().unwrap();
let nonexistent = temp_dir.path().join("nonexistent");
let cache = Cache::with_dir(nonexistent).unwrap();
cache.clear_all().await.unwrap();
}
#[tokio::test]
async fn test_clean_unused_with_files_and_dirs() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
std::fs::create_dir_all(temp_dir.path().join("keep")).unwrap();
std::fs::create_dir_all(temp_dir.path().join("remove")).unwrap();
std::fs::write(temp_dir.path().join("file.txt"), "content").unwrap();
let removed = cache.clean_unused(&["keep".to_string()]).await.unwrap();
assert_eq!(removed, 1);
assert!(temp_dir.path().join("keep").exists());
assert!(!temp_dir.path().join("remove").exists());
assert!(temp_dir.path().join("file.txt").exists());
}
#[tokio::test]
async fn test_copy_resource_overwrites_existing() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
std::fs::create_dir_all(&source_dir).unwrap();
std::fs::write(source_dir.join("file.md"), "new content").unwrap();
let dest = temp_dir.path().join("dest.md");
std::fs::write(&dest, "old content").unwrap();
cache.copy_resource(&source_dir, "file.md", &dest).await.unwrap();
assert_eq!(std::fs::read_to_string(&dest).unwrap(), "new content");
}
#[tokio::test]
async fn test_copy_resource_special_characters() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
std::fs::create_dir_all(&source_dir).unwrap();
let special_name = "file with spaces & special-chars.md";
std::fs::write(source_dir.join(special_name), "content").unwrap();
let dest = temp_dir.path().join("dest.md");
cache.copy_resource(&source_dir, special_name, &dest).await.unwrap();
assert!(dest.exists());
assert_eq!(std::fs::read_to_string(&dest).unwrap(), "content");
}
#[tokio::test]
async fn test_cache_location_consistency() {
let temp_dir = TempDir::new().unwrap();
let cache_dir = temp_dir.path().join("my_cache");
let cache = Cache::with_dir(cache_dir.clone()).unwrap();
let loc1 = cache.get_cache_location();
let loc2 = cache.get_cache_location();
assert_eq!(loc1, loc2);
assert_eq!(loc1, cache_dir.as_path());
}
#[tokio::test]
async fn test_clean_unused_empty_active_list() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
std::fs::create_dir_all(temp_dir.path().join("source1")).unwrap();
std::fs::create_dir_all(temp_dir.path().join("source2")).unwrap();
let removed = cache.clean_unused(&[]).await.unwrap();
assert_eq!(removed, 2);
assert!(!temp_dir.path().join("source1").exists());
assert!(!temp_dir.path().join("source2").exists());
}
#[tokio::test]
async fn test_copy_resource_with_relative_paths() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();
let source_dir = temp_dir.path().join("source");
let sub_dir = source_dir.join("agents");
std::fs::create_dir_all(&sub_dir).unwrap();
std::fs::write(sub_dir.join("helper.md"), "# Helper Agent").unwrap();
let dest = temp_dir.path().join("my-agent.md");
cache.copy_resource(&source_dir, "agents/helper.md", &dest).await.unwrap();
assert!(dest.exists());
assert_eq!(std::fs::read_to_string(&dest).unwrap(), "# Helper Agent");
}
#[tokio::test]
async fn test_cache_size_with_subdirectories() {
let temp_dir = TempDir::new().unwrap();
let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
cache.ensure_cache_dir().await.unwrap();
let sub1 = temp_dir.path().join("sub1");
let sub2 = sub1.join("sub2");
std::fs::create_dir_all(&sub2).unwrap();
std::fs::write(temp_dir.path().join("file1.txt"), "12345").unwrap(); std::fs::write(sub1.join("file2.txt"), "1234567890").unwrap(); std::fs::write(sub2.join("file3.txt"), "abc").unwrap();
let size = cache.get_cache_size().await.unwrap();
assert_eq!(size, 18); }
}