mod storage;
pub mod types;
pub use storage::RecoveryStorage;
pub use types::{
generate_buffer_id, path_hash, ChunkMeta, ChunkedRecoveryData, ChunkedRecoveryIndex,
InplaceWriteRecovery, RecoveryChunk, RecoveryEntry, RecoveryMetadata, RecoveryResult,
SessionInfo, MAX_CHUNK_SIZE,
};
use std::collections::HashMap;
use std::io;
use std::path::{Path, PathBuf};
use std::time::Instant;
#[derive(Debug, Clone)]
pub struct RecoveryConfig {
pub enabled: bool,
pub max_recovery_age_secs: u64,
}
impl Default for RecoveryConfig {
fn default() -> Self {
Self {
enabled: true,
max_recovery_age_secs: 7 * 24 * 60 * 60, }
}
}
#[derive(Debug)]
pub struct RecoveryService {
storage: RecoveryStorage,
config: RecoveryConfig,
last_save_times: HashMap<String, Instant>,
session_started: bool,
}
impl RecoveryService {
pub fn new() -> io::Result<Self> {
Ok(Self {
storage: RecoveryStorage::new()?,
config: RecoveryConfig::default(),
last_save_times: HashMap::new(),
session_started: false,
})
}
pub fn with_config(config: RecoveryConfig) -> io::Result<Self> {
Ok(Self {
storage: RecoveryStorage::new()?,
config,
last_save_times: HashMap::new(),
session_started: false,
})
}
pub fn with_storage_dir(storage_dir: PathBuf) -> Self {
Self {
storage: RecoveryStorage::with_dir(storage_dir),
config: RecoveryConfig::default(),
last_save_times: HashMap::new(),
session_started: false,
}
}
pub fn with_config_and_dir(config: RecoveryConfig, storage_dir: PathBuf) -> Self {
Self {
storage: RecoveryStorage::with_dir(storage_dir),
config,
last_save_times: HashMap::new(),
session_started: false,
}
}
pub fn is_enabled(&self) -> bool {
self.config.enabled
}
pub fn storage(&self) -> &RecoveryStorage {
&self.storage
}
pub fn should_offer_recovery(&self) -> io::Result<bool> {
if !self.config.enabled {
return Ok(false);
}
if self.storage.detect_crash()? {
let entries = self.storage.list_entries()?;
return Ok(!entries.is_empty());
}
Ok(false)
}
pub fn start_session(&mut self) -> io::Result<()> {
if !self.config.enabled {
return Ok(());
}
self.storage.create_session_lock()?;
self.session_started = true;
tracing::info!("Recovery session started");
Ok(())
}
pub fn end_session(&mut self) -> io::Result<()> {
if !self.config.enabled || !self.session_started {
return Ok(());
}
let cleaned = self.storage.cleanup_all()?;
tracing::info!("Cleaned up {} recovery files", cleaned);
self.storage.remove_session_lock()?;
self.session_started = false;
tracing::info!("Recovery session ended");
Ok(())
}
pub fn heartbeat(&self) -> io::Result<()> {
if self.config.enabled && self.session_started {
self.storage.update_session_lock()?;
}
Ok(())
}
pub fn needs_auto_recovery_save(&self, _buffer_id: &str, recovery_pending: bool) -> bool {
if !self.config.enabled {
return false;
}
recovery_pending
}
pub fn get_buffer_id(&self, path: Option<&Path>) -> String {
self.storage.get_buffer_id(path)
}
#[allow(clippy::too_many_arguments)]
pub fn save_buffer(
&mut self,
buffer_id: &str,
chunks: Vec<RecoveryChunk>,
original_path: Option<&Path>,
buffer_name: Option<&str>,
line_count: Option<usize>,
original_file_size: usize,
final_size: usize,
) -> io::Result<()> {
if !self.config.enabled {
return Ok(());
}
self.storage.save_recovery(
buffer_id,
chunks,
original_path,
buffer_name,
line_count,
original_file_size,
final_size,
)?;
self.last_save_times
.insert(buffer_id.to_string(), Instant::now());
tracing::trace!(
"Saved recovery for buffer {} (original: {} bytes, final: {} bytes)",
buffer_id,
original_file_size,
final_size
);
Ok(())
}
pub fn delete_buffer_recovery(&mut self, buffer_id: &str) -> io::Result<()> {
if !self.config.enabled {
return Ok(());
}
self.storage.delete_recovery(buffer_id)?;
self.last_save_times.remove(buffer_id);
tracing::debug!("Deleted recovery for buffer {}", buffer_id);
Ok(())
}
pub fn list_recoverable(&self) -> io::Result<Vec<RecoveryEntry>> {
self.storage.list_entries()
}
pub fn load_recovery(&self, entry: &RecoveryEntry) -> io::Result<RecoveryResult> {
if entry.metadata.original_file_size > 0 {
if let Some(ref original_path) = entry.metadata.original_path {
if entry.original_file_modified() {
return Ok(RecoveryResult::OriginalFileModified {
id: entry.id.clone(),
original_path: original_path.clone(),
});
}
if !original_path.exists() {
return Ok(RecoveryResult::Corrupted {
id: entry.id.clone(),
reason: format!(
"Original file not found: {}. Recovery requires the original file.",
original_path.display()
),
});
}
let chunked_data =
self.storage
.read_chunked_content(&entry.id)?
.ok_or_else(|| {
io::Error::new(io::ErrorKind::NotFound, "Chunk content not found")
})?;
return Ok(RecoveryResult::RecoveredChunks {
original_path: original_path.clone(),
chunks: chunked_data.chunks,
});
} else {
return Ok(RecoveryResult::Corrupted {
id: entry.id.clone(),
reason: "Recovery entry requires original file but path is not set".to_string(),
});
}
}
let chunked_data = self
.storage
.read_chunked_content(&entry.id)?
.ok_or_else(|| io::Error::new(io::ErrorKind::NotFound, "Chunk content not found"))?;
if chunked_data.chunks.len() == 1 && chunked_data.chunks[0].offset == 0 {
Ok(RecoveryResult::Recovered {
original_path: entry.metadata.original_path.clone(),
content: chunked_data.chunks[0].content.clone(),
})
} else {
Ok(RecoveryResult::Corrupted {
id: entry.id.clone(),
reason: "Invalid recovery format: expected single chunk for new buffer".to_string(),
})
}
}
pub fn load_recovery_with_original(
&self,
entry: &RecoveryEntry,
original_file: &Path,
) -> io::Result<RecoveryResult> {
let content = self
.storage
.reconstruct_from_chunks(&entry.id, original_file)?;
Ok(RecoveryResult::Recovered {
original_path: Some(original_file.to_path_buf()),
content,
})
}
pub fn accept_recovery(&mut self, entry: &RecoveryEntry) -> io::Result<RecoveryResult> {
let result = self.load_recovery(entry)?;
if matches!(result, RecoveryResult::Recovered { .. }) {
self.storage.delete_recovery(&entry.id)?;
}
Ok(result)
}
pub fn discard_recovery(&mut self, entry: &RecoveryEntry) -> io::Result<()> {
self.storage.delete_recovery(&entry.id)
}
pub fn discard_all_recovery(&mut self) -> io::Result<usize> {
self.storage.cleanup_all()
}
pub fn cleanup_old(&self) -> io::Result<usize> {
if !self.config.enabled {
return Ok(0);
}
let entries = self.storage.list_entries()?;
let mut cleaned = 0;
for entry in entries {
if entry.age_seconds() > self.config.max_recovery_age_secs
&& self.storage.delete_recovery(&entry.id).is_ok()
{
cleaned += 1;
}
}
if cleaned > 0 {
tracing::info!("Cleaned up {} old recovery files", cleaned);
}
Ok(cleaned)
}
pub fn cleanup_orphans(&self) -> io::Result<usize> {
self.storage.cleanup_orphans()
}
}
impl Default for RecoveryService {
fn default() -> Self {
Self::new().unwrap_or_else(|_| Self {
storage: RecoveryStorage::default(),
config: RecoveryConfig::default(),
last_save_times: HashMap::new(),
session_started: false,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
fn create_test_service() -> (RecoveryService, TempDir) {
let temp_dir = TempDir::new().unwrap();
let storage = RecoveryStorage::with_dir(temp_dir.path().to_path_buf());
let service = RecoveryService {
storage,
config: RecoveryConfig::default(),
last_save_times: HashMap::new(),
session_started: false,
};
(service, temp_dir)
}
#[test]
fn test_session_lifecycle() {
let (mut service, _temp) = create_test_service();
service.start_session().unwrap();
assert!(service.session_started);
service.end_session().unwrap();
assert!(!service.session_started);
}
#[test]
fn test_save_and_recover() {
let (mut service, _temp) = create_test_service();
service.start_session().unwrap();
let content = b"Test content for recovery";
let path = Path::new("/test/file.txt");
let id = service.get_buffer_id(Some(path));
let chunks = vec![RecoveryChunk::new(0, 0, content.to_vec())];
service
.save_buffer(&id, chunks, Some(path), None, Some(1), 0, content.len())
.unwrap();
let entries = service.list_recoverable().unwrap();
assert_eq!(entries.len(), 1);
let entry = &entries[0];
let result = service.load_recovery(entry).unwrap();
match result {
RecoveryResult::Recovered {
original_path,
content: loaded,
} => {
assert_eq!(original_path, Some(path.to_path_buf()));
assert_eq!(loaded, content);
}
_ => panic!("Expected Recovered result"),
}
}
#[test]
fn test_needs_auto_recovery_save() {
let (service, _temp) = create_test_service();
let id = "test-buffer";
assert!(!service.needs_auto_recovery_save(id, false));
assert!(service.needs_auto_recovery_save(id, true));
assert!(!service.needs_auto_recovery_save(id, false));
}
#[test]
fn test_disabled_service() {
let (mut service, _temp) = create_test_service();
service.config.enabled = false;
assert!(!service.needs_auto_recovery_save("test", true));
let chunks = vec![RecoveryChunk::new(0, 0, b"content".to_vec())];
service
.save_buffer("test", chunks, None, None, None, 0, 7)
.unwrap();
}
#[test]
fn test_load_recovery_returns_chunks_for_large_files() {
use std::fs;
let (mut service, temp_dir) = create_test_service();
service.start_session().unwrap();
let original_content = b"Hello, this is the original content!";
let original_path = temp_dir.path().join("original.txt");
fs::write(&original_path, original_content).unwrap();
let id = service.get_buffer_id(Some(&original_path));
let chunks = vec![RecoveryChunk::new(0, 0, b"PREFIX: ".to_vec())];
service
.save_buffer(
&id,
chunks,
Some(&original_path),
None,
Some(1),
original_content.len(), original_content.len() + 8,
)
.unwrap();
let entries = service.list_recoverable().unwrap();
assert_eq!(entries.len(), 1);
let result = service.load_recovery(&entries[0]).unwrap();
match result {
RecoveryResult::RecoveredChunks {
original_path: path,
chunks,
} => {
assert_eq!(path, original_path);
assert_eq!(chunks.len(), 1);
assert_eq!(chunks[0].offset, 0);
assert_eq!(chunks[0].original_len, 0);
assert_eq!(chunks[0].content, b"PREFIX: ");
}
_ => panic!("Expected RecoveredChunks result, got {:?}", result),
}
}
}