1use crate::core::types::{TrackingError, TrackingResult};
7use std::fmt;
8use std::sync::atomic::{AtomicUsize, Ordering};
9use std::sync::Arc;
10use std::time::{Duration, Instant};
11
12#[derive(Debug, Clone)]
14pub enum ExportError {
15 ParallelProcessingError {
17 shard_index: usize,
19 thread_id: String,
21 error_message: String,
23 partial_results: Option<Vec<u8>>,
25 },
26 ResourceLimitExceeded {
28 resource_type: ResourceType,
30 limit: u64,
32 actual: u64,
34 suggested_action: String,
36 },
37 DataQualityError {
39 validation_type: ValidationType,
41 expected: String,
43 actual: String,
45 affected_records: usize,
47 },
48 PerformanceThresholdExceeded {
50 metric: PerformanceMetric,
52 threshold: f64,
54 actual: f64,
56 stage: ExportStage,
58 },
59 ConcurrencyConflict {
61 operation: String,
63 conflict_type: ConflictType,
65 retry_count: usize,
67 },
68 DataCorruption {
70 corruption_type: CorruptionType,
72 affected_data: String,
74 recovery_possible: bool,
76 },
77 InsufficientResources {
79 required_memory: usize,
81 available_memory: usize,
83 required_disk: usize,
85 available_disk: usize,
87 },
88 ExportInterrupted {
90 stage: ExportStage,
92 progress_percentage: f64,
94 partial_output_path: Option<String>,
96 },
97}
98
99#[derive(Debug, Clone, PartialEq)]
101pub enum ResourceType {
102 Memory,
104 Disk,
106 CPU,
108 FileHandles,
110 ThreadPool,
112}
113
114#[derive(Debug, Clone, PartialEq, Eq, Hash)]
116pub enum ValidationType {
117 JsonStructure,
119 DataIntegrity,
121 AllocationCount,
123 FileSize,
125 Encoding,
127}
128
129#[derive(Debug, Clone, PartialEq)]
131pub enum PerformanceMetric {
132 ExportTime,
134 MemoryUsage,
136 ThroughputRate,
138 ErrorRate,
140 ResponseTime,
142}
143
144#[derive(Debug, Clone, PartialEq)]
146pub enum ExportStage {
147 Initialization,
149 DataLocalization,
151 ParallelProcessing,
153 Writing,
155 Validation,
157 Finalization,
159}
160
161#[derive(Debug, Clone, PartialEq)]
163pub enum ConflictType {
164 LockContention,
166 DataRace,
168 ResourceContention,
170 ThreadPoolExhaustion,
172}
173
174#[derive(Debug, Clone, PartialEq)]
176pub enum CorruptionType {
177 IncompleteData,
179 InvalidFormat,
181 ChecksumMismatch,
183 StructuralDamage,
185}
186
187impl fmt::Display for ExportError {
188 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
189 match self {
190 ExportError::ParallelProcessingError {
191 shard_index,
192 thread_id,
193 error_message,
194 ..
195 } => {
196 write!(f, "parallel processing error - shard {shard_index} (thread {thread_id}): {error_message}")
197 }
198 ExportError::ResourceLimitExceeded {
199 resource_type,
200 limit,
201 actual,
202 suggested_action,
203 } => {
204 write!(f, "resource limit exceeded - {resource_type:?}: limit {limit}, actual {actual}. suggested action: {suggested_action}")
205 }
206 ExportError::DataQualityError {
207 validation_type,
208 expected,
209 actual,
210 affected_records,
211 } => {
212 write!(f, "data quality error - {validation_type:?}: expected {expected}, actual {actual}, affected records {affected_records}")
213 }
214 ExportError::PerformanceThresholdExceeded {
215 metric,
216 threshold,
217 actual,
218 stage,
219 } => {
220 write!(f, "performance threshold exceeded - {metric:?} in {stage:?}: threshold {threshold}, actual {actual}")
221 }
222 ExportError::ConcurrencyConflict {
223 operation,
224 conflict_type,
225 retry_count,
226 } => {
227 write!(f, "concurrency conflict - operation {operation}, type {conflict_type:?}, retry count {retry_count}")
228 }
229 ExportError::DataCorruption {
230 corruption_type,
231 affected_data,
232 recovery_possible,
233 } => {
234 write!(f, "data corruption - type {corruption_type:?}, affected data {affected_data}, recovery possible: {recovery_possible}")
235 }
236 ExportError::InsufficientResources {
237 required_memory,
238 available_memory,
239 required_disk,
240 available_disk,
241 } => {
242 write!(f, "insufficient resources - required memory {required_memory}MB, available {available_memory}MB, required disk {required_disk}MB, available {available_disk}MB")
243 }
244 ExportError::ExportInterrupted {
245 stage,
246 progress_percentage,
247 partial_output_path,
248 } => {
249 write!(f, "export interrupted - stage {stage:?}, progress {progress_percentage:.1}%, partial output: {partial_output_path:?}")
250 }
251 }
252 }
253}
254
255impl std::error::Error for ExportError {}
256
257#[derive(Debug, Clone)]
259pub enum ValidationError {
260 FileAccessError {
262 file_path: String,
264 error: String,
266 },
267 JsonParsingError {
269 file_path: String,
271 error: String,
273 },
274 TimeoutError {
276 file_path: String,
278 timeout_duration: std::time::Duration,
280 },
281 CancelledError {
283 file_path: String,
285 reason: String,
287 },
288 ConfigurationError {
290 error: String,
292 },
293 InternalError {
295 error: String,
297 },
298}
299
300impl fmt::Display for ValidationError {
301 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
302 match self {
303 ValidationError::FileAccessError { file_path, error } => {
304 write!(f, "file access error for {}: {}", file_path, error)
305 }
306 ValidationError::JsonParsingError { file_path, error } => {
307 write!(f, "JSON parsing error in {}: {}", file_path, error)
308 }
309 ValidationError::TimeoutError {
310 file_path,
311 timeout_duration,
312 } => {
313 write!(
314 f,
315 "validation timeout for {} after {:?}",
316 file_path, timeout_duration
317 )
318 }
319 ValidationError::CancelledError { file_path, reason } => {
320 write!(f, "validation cancelled for {}: {}", file_path, reason)
321 }
322 ValidationError::ConfigurationError { error } => {
323 write!(f, "validation configuration error: {}", error)
324 }
325 ValidationError::InternalError { error } => {
326 write!(f, "internal validation error: {}", error)
327 }
328 }
329 }
330}
331
332impl std::error::Error for ValidationError {}
333
334impl From<ValidationError> for TrackingError {
335 fn from(error: ValidationError) -> Self {
336 TrackingError::ExportError(error.to_string())
337 }
338}
339
340impl From<ExportError> for TrackingError {
341 fn from(error: ExportError) -> Self {
342 TrackingError::ExportError(error.to_string())
343 }
344}
345
346#[derive(Debug)]
348pub struct PerformanceLogger {
349 log_level: LogLevel,
351 metrics_collector: Arc<MetricsCollector>,
353 error_stats: Arc<ErrorStatistics>,
355 start_time: Instant,
357}
358
359#[derive(Debug, Clone, PartialEq)]
361pub enum LogLevel {
362 Error,
364 Warn,
366 Info,
368 Debug,
370 Trace,
372}
373
374#[derive(Debug)]
376pub struct MetricsCollector {
377 total_operations: AtomicUsize,
379 successful_operations: AtomicUsize,
381 failed_operations: AtomicUsize,
383 total_processing_time_ms: AtomicUsize,
385 peak_memory_usage: AtomicUsize,
387 current_memory_usage: AtomicUsize,
389}
390
391#[derive(Debug)]
393pub struct ErrorStatistics {
394 parallel_processing_errors: AtomicUsize,
396 resource_limit_errors: AtomicUsize,
398 data_quality_errors: AtomicUsize,
400 performance_threshold_errors: AtomicUsize,
402 concurrency_conflict_errors: AtomicUsize,
404 data_corruption_errors: AtomicUsize,
406 insufficient_resources_errors: AtomicUsize,
408 export_interrupted_errors: AtomicUsize,
410}
411
412impl PerformanceLogger {
413 pub fn new(log_level: LogLevel) -> Self {
415 Self {
416 log_level,
417 metrics_collector: Arc::new(MetricsCollector::new()),
418 error_stats: Arc::new(ErrorStatistics::new()),
419 start_time: Instant::now(),
420 }
421 }
422
423 pub fn log_operation_start(&self, operation: &str, details: &str) {
425 if self.should_log(LogLevel::Info) {
426 println!(
427 "π [{}] start operation: {} - {}",
428 self.format_timestamp(),
429 operation,
430 details
431 );
432 }
433 self.metrics_collector
434 .total_operations
435 .fetch_add(1, Ordering::Relaxed);
436 }
437
438 pub fn log_operation_success(&self, operation: &str, duration: Duration, details: &str) {
440 if self.should_log(LogLevel::Info) {
441 println!(
442 "β
[{}] operation success: {} ({:?}) - {}",
443 self.format_timestamp(),
444 operation,
445 duration,
446 details
447 );
448 }
449 self.metrics_collector
450 .successful_operations
451 .fetch_add(1, Ordering::Relaxed);
452 self.metrics_collector
453 .total_processing_time_ms
454 .fetch_add(duration.as_millis() as usize, Ordering::Relaxed);
455 }
456
457 pub fn log_operation_failure(&self, operation: &str, error: &ExportError, duration: Duration) {
459 if self.should_log(LogLevel::Error) {
460 println!(
461 "β [{}] operation failure: {} ({:?}) - {}",
462 self.format_timestamp(),
463 operation,
464 duration,
465 error
466 );
467 }
468 self.metrics_collector
469 .failed_operations
470 .fetch_add(1, Ordering::Relaxed);
471 self.update_error_statistics(error);
472 }
473
474 pub fn log_performance_metric(
476 &self,
477 metric: PerformanceMetric,
478 value: f64,
479 threshold: Option<f64>,
480 ) {
481 if self.should_log(LogLevel::Debug) {
482 let threshold_info = if let Some(t) = threshold {
483 format!(" (threshold: {t})")
484 } else {
485 String::new()
486 };
487 println!(
488 "π [{}] performance metric - {metric:?}: {value}{threshold_info}",
489 self.format_timestamp()
490 );
491 }
492
493 if let Some(threshold) = threshold {
495 if value > threshold {
496 let error = ExportError::PerformanceThresholdExceeded {
497 metric,
498 threshold,
499 actual: value,
500 stage: ExportStage::ParallelProcessing, };
502 self.log_warning(&format!("performance threshold exceeded: {error}"));
503 }
504 }
505 }
506
507 pub fn log_memory_usage(&self, current_usage: usize, peak_usage: usize) {
509 if self.should_log(LogLevel::Debug) {
510 println!(
511 "πΎ [{}] memory usage - current: {:.2}MB, peak: {:.2}MB",
512 self.format_timestamp(),
513 current_usage as f64 / 1024.0 / 1024.0,
514 peak_usage as f64 / 1024.0 / 1024.0
515 );
516 }
517
518 self.metrics_collector
519 .current_memory_usage
520 .store(current_usage, Ordering::Relaxed);
521
522 let current_peak = self
524 .metrics_collector
525 .peak_memory_usage
526 .load(Ordering::Relaxed);
527 if peak_usage > current_peak {
528 self.metrics_collector
529 .peak_memory_usage
530 .store(peak_usage, Ordering::Relaxed);
531 }
532 }
533
534 pub fn log_warning(&self, message: &str) {
536 if self.should_log(LogLevel::Warn) {
537 println!("β οΈ [{}] warning: {}", self.format_timestamp(), message);
538 }
539 }
540
541 pub fn log_debug(&self, message: &str) {
543 if self.should_log(LogLevel::Debug) {
544 println!("π [{}] debug: {}", self.format_timestamp(), message);
545 }
546 }
547
548 pub fn log_error(&self, error: &ExportError) {
550 if self.should_log(LogLevel::Error) {
551 println!("π₯ [{}] error: {}", self.format_timestamp(), error);
552 }
553 self.update_error_statistics(error);
554 }
555
556 pub fn generate_performance_report(&self) -> PerformanceReport {
558 let total_time = self.start_time.elapsed();
559 let total_ops = self
560 .metrics_collector
561 .total_operations
562 .load(Ordering::Relaxed);
563 let successful_ops = self
564 .metrics_collector
565 .successful_operations
566 .load(Ordering::Relaxed);
567 let failed_ops = self
568 .metrics_collector
569 .failed_operations
570 .load(Ordering::Relaxed);
571 let total_processing_time = self
572 .metrics_collector
573 .total_processing_time_ms
574 .load(Ordering::Relaxed);
575 let peak_memory = self
576 .metrics_collector
577 .peak_memory_usage
578 .load(Ordering::Relaxed);
579 let current_memory = self
580 .metrics_collector
581 .current_memory_usage
582 .load(Ordering::Relaxed);
583
584 let success_rate = if total_ops > 0 {
585 (successful_ops as f64 / total_ops as f64) * 100.0
586 } else {
587 0.0
588 };
589
590 let avg_processing_time = if successful_ops > 0 {
591 total_processing_time as f64 / successful_ops as f64
592 } else {
593 0.0
594 };
595
596 PerformanceReport {
597 total_runtime: total_time,
598 total_operations: total_ops,
599 successful_operations: successful_ops,
600 failed_operations: failed_ops,
601 success_rate,
602 average_processing_time_ms: avg_processing_time,
603 peak_memory_usage_mb: peak_memory as f64 / 1024.0 / 1024.0,
604 current_memory_usage_mb: current_memory as f64 / 1024.0 / 1024.0,
605 error_breakdown: self.get_error_breakdown(),
606 }
607 }
608
609 fn should_log(&self, level: LogLevel) -> bool {
611 match (&self.log_level, &level) {
612 (LogLevel::Error, LogLevel::Error) => true,
613 (LogLevel::Warn, LogLevel::Error | LogLevel::Warn) => true,
614 (LogLevel::Info, LogLevel::Error | LogLevel::Warn | LogLevel::Info) => true,
615 (
616 LogLevel::Debug,
617 LogLevel::Error | LogLevel::Warn | LogLevel::Info | LogLevel::Debug,
618 ) => true,
619 (LogLevel::Trace, _) => true,
620 _ => false,
621 }
622 }
623
624 fn format_timestamp(&self) -> String {
626 let elapsed = self.start_time.elapsed();
627 format!("{:>8.3}s", elapsed.as_secs_f64())
628 }
629
630 fn update_error_statistics(&self, error: &ExportError) {
632 match error {
633 ExportError::ParallelProcessingError { .. } => {
634 self.error_stats
635 .parallel_processing_errors
636 .fetch_add(1, Ordering::Relaxed);
637 }
638 ExportError::ResourceLimitExceeded { .. } => {
639 self.error_stats
640 .resource_limit_errors
641 .fetch_add(1, Ordering::Relaxed);
642 }
643 ExportError::DataQualityError { .. } => {
644 self.error_stats
645 .data_quality_errors
646 .fetch_add(1, Ordering::Relaxed);
647 }
648 ExportError::PerformanceThresholdExceeded { .. } => {
649 self.error_stats
650 .performance_threshold_errors
651 .fetch_add(1, Ordering::Relaxed);
652 }
653 ExportError::ConcurrencyConflict { .. } => {
654 self.error_stats
655 .concurrency_conflict_errors
656 .fetch_add(1, Ordering::Relaxed);
657 }
658 ExportError::DataCorruption { .. } => {
659 self.error_stats
660 .data_corruption_errors
661 .fetch_add(1, Ordering::Relaxed);
662 }
663 ExportError::InsufficientResources { .. } => {
664 self.error_stats
665 .insufficient_resources_errors
666 .fetch_add(1, Ordering::Relaxed);
667 }
668 ExportError::ExportInterrupted { .. } => {
669 self.error_stats
670 .export_interrupted_errors
671 .fetch_add(1, Ordering::Relaxed);
672 }
673 }
674 }
675
676 fn get_error_breakdown(&self) -> ErrorBreakdown {
678 ErrorBreakdown {
679 parallel_processing_errors: self
680 .error_stats
681 .parallel_processing_errors
682 .load(Ordering::Relaxed),
683 resource_limit_errors: self
684 .error_stats
685 .resource_limit_errors
686 .load(Ordering::Relaxed),
687 data_quality_errors: self.error_stats.data_quality_errors.load(Ordering::Relaxed),
688 performance_threshold_errors: self
689 .error_stats
690 .performance_threshold_errors
691 .load(Ordering::Relaxed),
692 concurrency_conflict_errors: self
693 .error_stats
694 .concurrency_conflict_errors
695 .load(Ordering::Relaxed),
696 data_corruption_errors: self
697 .error_stats
698 .data_corruption_errors
699 .load(Ordering::Relaxed),
700 insufficient_resources_errors: self
701 .error_stats
702 .insufficient_resources_errors
703 .load(Ordering::Relaxed),
704 export_interrupted_errors: self
705 .error_stats
706 .export_interrupted_errors
707 .load(Ordering::Relaxed),
708 }
709 }
710}
711
712impl MetricsCollector {
713 fn new() -> Self {
714 Self {
715 total_operations: AtomicUsize::new(0),
716 successful_operations: AtomicUsize::new(0),
717 failed_operations: AtomicUsize::new(0),
718 total_processing_time_ms: AtomicUsize::new(0),
719 peak_memory_usage: AtomicUsize::new(0),
720 current_memory_usage: AtomicUsize::new(0),
721 }
722 }
723}
724
725impl ErrorStatistics {
726 fn new() -> Self {
727 Self {
728 parallel_processing_errors: AtomicUsize::new(0),
729 resource_limit_errors: AtomicUsize::new(0),
730 data_quality_errors: AtomicUsize::new(0),
731 performance_threshold_errors: AtomicUsize::new(0),
732 concurrency_conflict_errors: AtomicUsize::new(0),
733 data_corruption_errors: AtomicUsize::new(0),
734 insufficient_resources_errors: AtomicUsize::new(0),
735 export_interrupted_errors: AtomicUsize::new(0),
736 }
737 }
738}
739
740#[derive(Debug, Clone)]
742pub struct PerformanceReport {
743 pub total_runtime: Duration,
745 pub total_operations: usize,
747 pub successful_operations: usize,
749 pub failed_operations: usize,
751 pub success_rate: f64,
753 pub average_processing_time_ms: f64,
755 pub peak_memory_usage_mb: f64,
757 pub current_memory_usage_mb: f64,
759 pub error_breakdown: ErrorBreakdown,
761}
762
763#[derive(Debug, Clone)]
765pub struct ErrorBreakdown {
766 pub parallel_processing_errors: usize,
768 pub resource_limit_errors: usize,
770 pub data_quality_errors: usize,
772 pub performance_threshold_errors: usize,
774 pub concurrency_conflict_errors: usize,
776 pub data_corruption_errors: usize,
778 pub insufficient_resources_errors: usize,
780 pub export_interrupted_errors: usize,
782}
783
784impl PerformanceReport {
785 pub fn print_detailed_report(&self) {
787 println!("\nπ detailed performance report");
788 println!("================");
789
790 println!("β±οΈ runtime: {:?}", self.total_runtime);
791 println!("π’ total operations: {}", self.total_operations);
792 println!(
793 "β
successful operations: {} ({:.1}%)",
794 self.successful_operations, self.success_rate
795 );
796 println!("β failed operations: {}", self.failed_operations);
797 println!(
798 "β‘ average processing time: {:.2}ms",
799 self.average_processing_time_ms
800 );
801 println!("πΎ peak memory usage: {:.2}MB", self.peak_memory_usage_mb);
802 println!(
803 "πΎ current memory usage: {:.2}MB",
804 self.current_memory_usage_mb
805 );
806
807 println!("\nπ¨ error breakdown:");
808 println!(
809 " parallel processing errors: {}",
810 self.error_breakdown.parallel_processing_errors
811 );
812 println!(
813 " resource limit errors: {}",
814 self.error_breakdown.resource_limit_errors
815 );
816 println!(
817 " data quality errors: {}",
818 self.error_breakdown.data_quality_errors
819 );
820 println!(
821 " performance threshold errors: {}",
822 self.error_breakdown.performance_threshold_errors
823 );
824 println!(
825 " concurrency conflict errors: {}",
826 self.error_breakdown.concurrency_conflict_errors
827 );
828 println!(
829 " data corruption errors: {}",
830 self.error_breakdown.data_corruption_errors
831 );
832 println!(
833 " insufficient resources errors: {}",
834 self.error_breakdown.insufficient_resources_errors
835 );
836 println!(
837 " export interrupted errors: {}",
838 self.error_breakdown.export_interrupted_errors
839 );
840 }
841}
842
843#[derive(Debug)]
845pub struct ResourceMonitor {
846 memory_limit: usize,
848 disk_limit: usize,
850 cpu_limit: f64,
852}
853
854impl ResourceMonitor {
855 pub fn new(memory_limit_mb: usize, disk_limit_mb: usize, cpu_limit_percent: f64) -> Self {
857 Self {
858 memory_limit: memory_limit_mb * 1024 * 1024,
859 disk_limit: disk_limit_mb * 1024 * 1024,
860 cpu_limit: cpu_limit_percent,
861 }
862 }
863
864 pub fn check_resource_usage(&self) -> TrackingResult<ResourceUsage> {
866 let memory_usage = self.get_memory_usage()?;
867 let disk_usage = self.get_disk_usage()?;
868 let cpu_usage = self.get_cpu_usage()?;
869
870 if memory_usage > self.memory_limit {
872 return Err(ExportError::ResourceLimitExceeded {
873 resource_type: ResourceType::Memory,
874 limit: self.memory_limit as u64,
875 actual: memory_usage as u64,
876 suggested_action: "reduce parallelism or enable streaming processing".to_string(),
877 }
878 .into());
879 }
880
881 if disk_usage > self.disk_limit {
882 return Err(ExportError::ResourceLimitExceeded {
883 resource_type: ResourceType::Disk,
884 limit: self.disk_limit as u64,
885 actual: disk_usage as u64,
886 suggested_action: "clean up temporary files or select other output location"
887 .to_string(),
888 }
889 .into());
890 }
891
892 if cpu_usage > self.cpu_limit {
893 return Err(ExportError::ResourceLimitExceeded {
894 resource_type: ResourceType::CPU,
895 limit: (self.cpu_limit * 100.0) as u64,
896 actual: (cpu_usage * 100.0) as u64,
897 suggested_action: "reduce thread count or lower processing priority".to_string(),
898 }
899 .into());
900 }
901
902 Ok(ResourceUsage {
903 memory_usage,
904 disk_usage,
905 cpu_usage,
906 memory_limit: self.memory_limit,
907 disk_limit: self.disk_limit,
908 cpu_limit: self.cpu_limit,
909 })
910 }
911
912 fn get_memory_usage(&self) -> TrackingResult<usize> {
914 Ok(0) }
918
919 fn get_disk_usage(&self) -> TrackingResult<usize> {
921 Ok(0) }
924
925 fn get_cpu_usage(&self) -> TrackingResult<f64> {
927 Ok(0.0) }
930}
931
932#[derive(Debug, Clone)]
934pub struct ResourceUsage {
935 pub memory_usage: usize,
937 pub disk_usage: usize,
939 pub cpu_usage: f64,
941 pub memory_limit: usize,
943 pub disk_limit: usize,
945 pub cpu_limit: f64,
947}
948
949impl ResourceUsage {
950 pub fn memory_usage_percentage(&self) -> f64 {
952 if self.memory_limit > 0 {
953 (self.memory_usage as f64 / self.memory_limit as f64) * 100.0
954 } else {
955 0.0
956 }
957 }
958
959 pub fn disk_usage_percentage(&self) -> f64 {
961 if self.disk_limit > 0 {
962 (self.disk_usage as f64 / self.disk_limit as f64) * 100.0
963 } else {
964 0.0
965 }
966 }
967
968 pub fn cpu_usage_percentage(&self) -> f64 {
970 self.cpu_usage * 100.0
971 }
972}
973
974#[cfg(test)]
975mod tests {
976 use super::*;
977
978 #[test]
979 fn test_export_error_display() {
980 let error = ExportError::ParallelProcessingError {
981 shard_index: 5,
982 thread_id: "thread-1".to_string(),
983 error_message: "serialization failed".to_string(),
984 partial_results: None,
985 };
986
987 let display = format!("{error}");
988 assert!(display.contains("parallel processing error"));
989 assert!(display.contains("shard 5"));
990 assert!(display.contains("thread-1"));
991 }
992
993 #[test]
994 fn test_performance_logger() {
995 let logger = PerformanceLogger::new(LogLevel::Info);
996
997 logger.log_operation_start("test operation", "test details");
998 logger.log_operation_success("test operation", Duration::from_millis(100), "success");
999
1000 let report = logger.generate_performance_report();
1001 assert_eq!(report.total_operations, 1);
1002 assert_eq!(report.successful_operations, 1);
1003 assert_eq!(report.failed_operations, 0);
1004 assert_eq!(report.success_rate, 100.0);
1005 }
1006
1007 #[test]
1008 fn test_resource_monitor() {
1009 let monitor = ResourceMonitor::new(1024, 2048, 80.0);
1010
1011 let result = monitor.check_resource_usage();
1013 assert!(result.is_ok());
1014
1015 let usage = result.unwrap();
1016 assert_eq!(usage.memory_limit, 1024 * 1024 * 1024);
1017 assert_eq!(usage.disk_limit, 2048 * 1024 * 1024);
1018 assert_eq!(usage.cpu_limit, 80.0);
1019 }
1020
1021 #[test]
1022 fn test_resource_usage_percentages() {
1023 let usage = ResourceUsage {
1024 memory_usage: 512 * 1024 * 1024, disk_usage: 1024 * 1024 * 1024, cpu_usage: 0.6, memory_limit: 1024 * 1024 * 1024, disk_limit: 2048 * 1024 * 1024, cpu_limit: 0.8, };
1031
1032 assert_eq!(usage.memory_usage_percentage(), 50.0);
1033 assert_eq!(usage.disk_usage_percentage(), 50.0);
1034 assert_eq!(usage.cpu_usage_percentage(), 60.0);
1035 }
1036}