use crate::document_text_storage::DocumentTextStorage;
use crate::error::ShardexError;
use crate::error_handling::{
BackupManager, BackupRetentionPolicy, RecoveryConfig, RecoveryStrategy, TextStorageHealth,
TextStorageHealthMonitor, TextStorageRecoveryManager,
};
use crate::identifiers::DocumentId;
use crate::monitoring::PerformanceMonitor;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use tempfile::TempDir;
#[tokio::test]
async fn test_error_handling_system_integration() {
let temp_dir = TempDir::new().unwrap();
let backup_dir = temp_dir.path().join("backups");
let mut storage = DocumentTextStorage::create(&temp_dir, 10 * 1024 * 1024).unwrap();
let doc_id = DocumentId::new();
let test_text = "This is a test document for error handling verification.";
storage.store_text(doc_id, test_text).unwrap();
let storage_arc = Arc::new(storage);
let performance_monitor = Arc::new(PerformanceMonitor::new());
let mut health_monitor = TextStorageHealthMonitor::new(
Arc::clone(&storage_arc),
Duration::from_secs(1), Some(Arc::clone(&performance_monitor)),
);
let health = health_monitor.check_health().await.unwrap();
assert!(matches!(health, TextStorageHealth::Healthy));
health_monitor.report_health_metrics().await;
let recovery_config = RecoveryConfig {
max_recovery_attempts: 2,
backup_before_recovery: true,
recovery_strategy: RecoveryStrategy::Conservative,
};
let storage_for_mutex = DocumentTextStorage::create(temp_dir.path().join("recovery"), 10 * 1024 * 1024).unwrap();
let storage_mutex = Arc::new(Mutex::new(storage_for_mutex));
let mut recovery_manager = TextStorageRecoveryManager::new(
storage_mutex,
backup_dir,
recovery_config,
Some(Arc::clone(&performance_monitor)),
)
.unwrap();
let corruption_error = ShardexError::text_corruption("Test corruption for recovery");
let recovery_result = recovery_manager
.attempt_recovery(&corruption_error)
.await
.unwrap();
match recovery_result {
crate::error_handling::RecoveryResult::RequiresManualIntervention {
reason,
suggested_actions,
} => {
assert!(!reason.is_empty());
assert!(!suggested_actions.is_empty());
}
_ => panic!("Expected manual intervention for test corruption"),
}
let retrieved_text = storage_arc.get_text(doc_id).unwrap();
assert_eq!(retrieved_text, test_text);
println!("Error handling system integration test passed");
}
#[tokio::test]
async fn test_health_monitoring_scenarios() {
let temp_dir = TempDir::new().unwrap();
{
let storage = Arc::new(DocumentTextStorage::create(temp_dir.path().join("empty"), 1024 * 1024).unwrap());
let mut monitor = TextStorageHealthMonitor::new(storage, Duration::from_millis(100), None);
let health = monitor.check_health().await.unwrap();
assert!(matches!(health, TextStorageHealth::Healthy));
}
{
let mut storage = DocumentTextStorage::create(temp_dir.path().join("with_data"), 1024 * 1024).unwrap();
for i in 0..100 {
let doc_id = DocumentId::new();
let text = format!("Test document number {} with various content lengths.", i);
storage.store_text(doc_id, &text).unwrap();
}
let storage_arc = Arc::new(storage);
let mut monitor = TextStorageHealthMonitor::new(storage_arc, Duration::from_millis(100), None);
let health = monitor.check_health().await.unwrap();
assert!(matches!(health, TextStorageHealth::Healthy));
let health2 = monitor.force_check().await.unwrap();
assert!(matches!(health2, TextStorageHealth::Healthy));
}
}
#[tokio::test]
async fn test_backup_restore_system() {
let temp_dir = TempDir::new().unwrap();
let backup_dir = temp_dir.path().join("backups");
let retention_policy = BackupRetentionPolicy {
max_backups: 5,
max_age: Duration::from_secs(3600),
compression_enabled: false,
};
let backup_manager = BackupManager::new(backup_dir.clone(), retention_policy).unwrap();
let backup_info = backup_manager
.create_backup(Some("test_backup".to_string()))
.await
.unwrap();
assert_eq!(backup_info.id, "test_backup");
assert!(!backup_info.compression_used);
let emergency_backup = backup_manager.create_emergency_backup().await.unwrap();
assert!(emergency_backup.id.starts_with("emergency_"));
let backups = backup_manager.list_backups().await.unwrap();
println!("Found {} backups after creating test_backup + emergency", backups.len());
let restore_result = backup_manager
.restore_from_backup("test_backup")
.await
.unwrap();
assert_eq!(restore_result.backup_id, "test_backup");
assert!(restore_result.emergency_backup_id.starts_with("emergency_"));
let final_backups = backup_manager.list_backups().await.unwrap();
println!("Found {} backups after restore", final_backups.len());
assert!(
!final_backups.is_empty(),
"Should have at least some backups after restore"
);
}
#[tokio::test]
async fn test_error_metrics_integration() {
let temp_dir = TempDir::new().unwrap();
let mut storage = DocumentTextStorage::create(&temp_dir, 1000).unwrap();
let doc_id = DocumentId::new();
let large_text = "A".repeat(1500); let result = storage.store_text(doc_id, &large_text);
match result {
Err(error) => {
storage.report_error_metrics(&error, "store_text");
match error {
ShardexError::DocumentTooLarge { size, max_size } => {
assert_eq!(size, 1500);
assert_eq!(max_size, 1000);
}
_ => panic!("Expected DocumentTooLarge error"),
}
}
Ok(_) => panic!("Expected size limit error"),
}
let small_text = "Small text that fits.";
storage.store_text(doc_id, small_text).unwrap();
storage.report_operation_metrics("store_text", Duration::from_millis(10), small_text.len());
let nonexistent_doc = DocumentId::new();
let result = storage.get_text(nonexistent_doc);
match result {
Err(error) => {
storage.report_error_metrics(&error, "get_text");
match error {
ShardexError::DocumentTextNotFound { document_id } => {
assert_eq!(document_id, nonexistent_doc.to_string());
}
_ => panic!("Expected DocumentTextNotFound error"),
}
}
Ok(_) => panic!("Expected not found error"),
}
}
#[tokio::test]
async fn test_storage_validation_methods() {
let temp_dir = TempDir::new().unwrap();
let mut storage = DocumentTextStorage::create(&temp_dir, 1024 * 1024).unwrap();
storage.validate_headers().unwrap();
storage.validate_file_sizes().unwrap();
storage.verify_checksums().unwrap();
let doc_id = DocumentId::new();
let text = "Validation test document with reasonable length.";
storage.store_text(doc_id, text).unwrap();
storage.validate_headers().unwrap();
storage.validate_file_sizes().unwrap();
assert_eq!(storage.get_entry_count(), 1);
let entry = storage.get_entry_at_index(0).unwrap();
assert_eq!(entry.document_id, doc_id);
storage.validate_entry_data_region(&entry).unwrap();
let retrieved_text = storage
.read_text_at_offset_public(entry.text_offset, entry.text_length)
.unwrap();
assert_eq!(retrieved_text, text);
let (_index_path, _data_path) = storage.get_file_paths();
storage.reload_from_files().await.unwrap();
let post_reload_text = storage.get_text(doc_id).unwrap();
assert_eq!(post_reload_text, text);
}
#[tokio::test]
async fn test_recovery_manager_scenarios() {
let temp_dir = TempDir::new().unwrap();
let backup_dir = temp_dir.path().join("recovery_backups");
let storage = DocumentTextStorage::create(&temp_dir, 1024 * 1024).unwrap();
let storage_mutex = Arc::new(Mutex::new(storage));
let recovery_config = RecoveryConfig::default();
let mut recovery_manager =
TextStorageRecoveryManager::new(storage_mutex, backup_dir, recovery_config, None).unwrap();
let test_cases = vec![
ShardexError::text_corruption("Index file corruption"),
ShardexError::Io(std::io::Error::new(
std::io::ErrorKind::PermissionDenied,
"Access denied",
)),
ShardexError::invalid_range(10, 20, 15),
ShardexError::document_too_large(1_000_000, 500_000),
];
for error in test_cases {
let result = recovery_manager.attempt_recovery(&error).await.unwrap();
match result {
crate::error_handling::RecoveryResult::RequiresManualIntervention {
reason,
suggested_actions,
} => {
assert!(!reason.is_empty());
assert!(!suggested_actions.is_empty());
}
crate::error_handling::RecoveryResult::NotRecoverable => {
}
_ => {
}
}
}
}
#[tokio::test]
async fn test_health_monitor_performance_integration() {
let temp_dir = TempDir::new().unwrap();
let storage = Arc::new(DocumentTextStorage::create(&temp_dir, 1024 * 1024).unwrap());
let performance_monitor = Arc::new(PerformanceMonitor::new());
let mut health_monitor =
TextStorageHealthMonitor::new(storage, Duration::from_millis(50), Some(performance_monitor.clone()));
for _ in 0..5 {
let health = health_monitor.check_health().await.unwrap();
assert!(matches!(health, TextStorageHealth::Healthy));
health_monitor.report_health_metrics().await;
tokio::time::sleep(Duration::from_millis(10)).await;
}
let stats = performance_monitor.get_detailed_stats().await;
assert!(stats.uptime > Duration::ZERO);
}
#[tokio::test]
async fn test_error_handling_stress() {
let temp_dir = TempDir::new().unwrap();
let backup_dir = temp_dir.path().join("stress_backups");
let storage = DocumentTextStorage::create(&temp_dir, 1000).unwrap();
let storage_arc = Arc::new(storage);
let health_monitor = Arc::new(tokio::sync::Mutex::new(TextStorageHealthMonitor::new(
Arc::clone(&storage_arc),
Duration::from_millis(100),
None,
)));
let retention_policy = BackupRetentionPolicy {
max_backups: 3, max_age: Duration::from_secs(60),
compression_enabled: false,
};
let backup_manager = Arc::new(BackupManager::new(backup_dir, retention_policy).unwrap());
let mut handles = Vec::new();
let health_monitor_clone = Arc::clone(&health_monitor);
handles.push(tokio::spawn(async move {
for _ in 0..10 {
let mut monitor = health_monitor_clone.lock().await;
let _health = monitor.check_health().await.unwrap();
tokio::time::sleep(Duration::from_millis(50)).await;
}
}));
let backup_manager_clone = Arc::clone(&backup_manager);
handles.push(tokio::spawn(async move {
for i in 0..5 {
let backup_name = format!("stress_backup_{}", i);
let _backup = backup_manager_clone
.create_backup(Some(backup_name))
.await
.unwrap();
tokio::time::sleep(Duration::from_millis(100)).await;
}
}));
for handle in handles {
handle.await.unwrap();
}
let final_backups = backup_manager.list_backups().await.unwrap();
assert!(final_backups.len() <= 3);
let final_health = health_monitor.lock().await.check_health().await.unwrap();
assert!(matches!(final_health, TextStorageHealth::Healthy));
}
#[cfg(test)]
mod recovery_system_tests {
use super::*;
#[test]
fn test_recovery_strategy_configuration() {
let conservative_config = RecoveryConfig {
recovery_strategy: RecoveryStrategy::Conservative,
..RecoveryConfig::default()
};
let aggressive_config = RecoveryConfig {
recovery_strategy: RecoveryStrategy::Aggressive,
max_recovery_attempts: 5,
..RecoveryConfig::default()
};
let interactive_config = RecoveryConfig {
recovery_strategy: RecoveryStrategy::Interactive,
backup_before_recovery: true,
..RecoveryConfig::default()
};
assert_eq!(conservative_config.recovery_strategy, RecoveryStrategy::Conservative);
assert_eq!(aggressive_config.recovery_strategy, RecoveryStrategy::Aggressive);
assert_eq!(aggressive_config.max_recovery_attempts, 5);
assert_eq!(interactive_config.recovery_strategy, RecoveryStrategy::Interactive);
assert!(interactive_config.backup_before_recovery);
}
#[tokio::test]
async fn test_backup_retention_enforcement() {
let temp_dir = TempDir::new().unwrap();
let backup_dir = temp_dir.path().join("retention_test");
let retention_policy = BackupRetentionPolicy {
max_backups: 2, max_age: Duration::from_secs(1), compression_enabled: false,
};
let backup_manager = BackupManager::new(backup_dir, retention_policy).unwrap();
for i in 0..4 {
let backup_name = format!("retention_test_{}", i);
backup_manager
.create_backup(Some(backup_name))
.await
.unwrap();
tokio::time::sleep(Duration::from_millis(100)).await;
}
let backups = backup_manager.list_backups().await.unwrap();
assert!(
backups.len() <= 2,
"Should enforce max_backups limit of 2, found {}",
backups.len()
);
tokio::time::sleep(Duration::from_secs(2)).await;
backup_manager
.create_backup(Some("trigger_cleanup".to_string()))
.await
.unwrap();
let final_backups = backup_manager.list_backups().await.unwrap();
assert!(
final_backups.len() <= 2,
"Should maintain max_backups after age cleanup"
);
}
}