memscope_rs/export/
error_handling.rs

1//! enhanced error handling and logging system
2//!
3//! this module provides specialized error handling, logging, and recovery mechanisms for export systems,
4//! ensuring detailed error information and appropriate recovery strategies in various。
5
6use crate::core::types::{TrackingError, TrackingResult};
7use std::fmt;
8use std::sync::atomic::{AtomicUsize, Ordering};
9use std::sync::Arc;
10use std::time::{Duration, Instant};
11
12/// export system error type
13#[derive(Debug, Clone)]
14pub enum ExportError {
15    /// parallel processing error
16    ParallelProcessingError {
17        /// shard index
18        shard_index: usize,
19        /// thread id
20        thread_id: String,
21        /// error message
22        error_message: String,
23        /// partial results
24        partial_results: Option<Vec<u8>>,
25    },
26    /// resource limit exceeded error
27    ResourceLimitExceeded {
28        /// resource type
29        resource_type: ResourceType,
30        /// limit
31        limit: u64,
32        /// actual
33        actual: u64,
34        /// suggested action
35        suggested_action: String,
36    },
37    /// data quality error
38    DataQualityError {
39        /// validation type
40        validation_type: ValidationType,
41        /// expected value
42        expected: String,
43        /// actual value
44        actual: String,
45        /// affected records
46        affected_records: usize,
47    },
48    /// performance threshold exceeded error
49    PerformanceThresholdExceeded {
50        /// metric
51        metric: PerformanceMetric,
52        /// threshold
53        threshold: f64,
54        /// actual
55        actual: f64,
56        /// stage
57        stage: ExportStage,
58    },
59    /// concurrency conflict error
60    ConcurrencyConflict {
61        /// operation
62        operation: String,
63        /// conflict type
64        conflict_type: ConflictType,
65        /// retry count
66        retry_count: usize,
67    },
68    /// data corruption error
69    DataCorruption {
70        /// corruption type
71        corruption_type: CorruptionType,
72        /// affected data
73        affected_data: String,
74        /// recovery possible
75        recovery_possible: bool,
76    },
77    /// insufficient resources error
78    InsufficientResources {
79        /// required memory
80        required_memory: usize,
81        /// available memory
82        available_memory: usize,
83        /// required disk
84        required_disk: usize,
85        /// available disk
86        available_disk: usize,
87    },
88    /// export interrupted error
89    ExportInterrupted {
90        /// export stage
91        stage: ExportStage,
92        /// progress percentage
93        progress_percentage: f64,
94        /// partial output path
95        partial_output_path: Option<String>,
96    },
97}
98
99/// resource type enum
100#[derive(Debug, Clone, PartialEq)]
101pub enum ResourceType {
102    /// memory
103    Memory,
104    /// disk
105    Disk,
106    /// cpu
107    CPU,
108    /// file handles
109    FileHandles,
110    /// thread pool
111    ThreadPool,
112}
113
114/// validation type enum
115#[derive(Debug, Clone, PartialEq, Eq, Hash)]
116pub enum ValidationType {
117    /// json structure
118    JsonStructure,
119    /// data integrity
120    DataIntegrity,
121    /// allocation count
122    AllocationCount,
123    /// file size
124    FileSize,
125    /// encoding
126    Encoding,
127}
128
129/// performance metric enum
130#[derive(Debug, Clone, PartialEq)]
131pub enum PerformanceMetric {
132    /// export time
133    ExportTime,
134    /// memory usage
135    MemoryUsage,
136    /// throughput rate
137    ThroughputRate,
138    /// error rate
139    ErrorRate,
140    /// response time
141    ResponseTime,
142}
143
144/// export stage enum
145#[derive(Debug, Clone, PartialEq)]
146pub enum ExportStage {
147    /// initialization
148    Initialization,
149    /// data localization
150    DataLocalization,
151    /// parallel processing
152    ParallelProcessing,
153    /// writing
154    Writing,
155    /// validation
156    Validation,
157    /// finalization
158    Finalization,
159}
160
161/// concurrency conflict type
162#[derive(Debug, Clone, PartialEq)]
163pub enum ConflictType {
164    /// lock contention
165    LockContention,
166    /// data race
167    DataRace,
168    /// resource contention
169    ResourceContention,
170    /// thread pool exhaustion
171    ThreadPoolExhaustion,
172}
173
174/// corruption type enum
175#[derive(Debug, Clone, PartialEq)]
176pub enum CorruptionType {
177    /// incomplete data
178    IncompleteData,
179    /// invalid format
180    InvalidFormat,
181    /// checksum mismatch
182    ChecksumMismatch,
183    /// structural damage
184    StructuralDamage,
185}
186
187impl fmt::Display for ExportError {
188    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
189        match self {
190            ExportError::ParallelProcessingError {
191                shard_index,
192                thread_id,
193                error_message,
194                ..
195            } => {
196                write!(f, "parallel processing error - shard {shard_index} (thread {thread_id}): {error_message}")
197            }
198            ExportError::ResourceLimitExceeded {
199                resource_type,
200                limit,
201                actual,
202                suggested_action,
203            } => {
204                write!(f, "resource limit exceeded - {resource_type:?}: limit {limit}, actual {actual}. suggested action: {suggested_action}")
205            }
206            ExportError::DataQualityError {
207                validation_type,
208                expected,
209                actual,
210                affected_records,
211            } => {
212                write!(f, "data quality error - {validation_type:?}: expected {expected}, actual {actual}, affected records {affected_records}")
213            }
214            ExportError::PerformanceThresholdExceeded {
215                metric,
216                threshold,
217                actual,
218                stage,
219            } => {
220                write!(f, "performance threshold exceeded - {metric:?} in {stage:?}: threshold {threshold}, actual {actual}")
221            }
222            ExportError::ConcurrencyConflict {
223                operation,
224                conflict_type,
225                retry_count,
226            } => {
227                write!(f, "concurrency conflict - operation {operation}, type {conflict_type:?}, retry count {retry_count}")
228            }
229            ExportError::DataCorruption {
230                corruption_type,
231                affected_data,
232                recovery_possible,
233            } => {
234                write!(f, "data corruption - type {corruption_type:?}, affected data {affected_data}, recovery possible: {recovery_possible}")
235            }
236            ExportError::InsufficientResources {
237                required_memory,
238                available_memory,
239                required_disk,
240                available_disk,
241            } => {
242                write!(f, "insufficient resources - required memory {required_memory}MB, available {available_memory}MB, required disk {required_disk}MB, available {available_disk}MB")
243            }
244            ExportError::ExportInterrupted {
245                stage,
246                progress_percentage,
247                partial_output_path,
248            } => {
249                write!(f, "export interrupted - stage {stage:?}, progress {progress_percentage:.1}%, partial output: {partial_output_path:?}")
250            }
251        }
252    }
253}
254
255impl std::error::Error for ExportError {}
256
257/// Validation-specific error types (separate from export errors)
258#[derive(Debug, Clone)]
259pub enum ValidationError {
260    /// File access error during validation
261    FileAccessError {
262        /// File path that couldn't be accessed
263        file_path: String,
264        /// Underlying error message
265        error: String,
266    },
267    /// JSON parsing error during validation
268    JsonParsingError {
269        /// File path with invalid JSON
270        file_path: String,
271        /// JSON error details
272        error: String,
273    },
274    /// Validation timeout error
275    TimeoutError {
276        /// File path that timed out
277        file_path: String,
278        /// Timeout duration
279        timeout_duration: std::time::Duration,
280    },
281    /// Validation was cancelled
282    CancelledError {
283        /// File path being validated
284        file_path: String,
285        /// Cancellation reason
286        reason: String,
287    },
288    /// Configuration error
289    ConfigurationError {
290        /// Configuration error details
291        error: String,
292    },
293    /// Internal validation error
294    InternalError {
295        /// Internal error details
296        error: String,
297    },
298}
299
300impl fmt::Display for ValidationError {
301    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
302        match self {
303            ValidationError::FileAccessError { file_path, error } => {
304                write!(f, "file access error for {}: {}", file_path, error)
305            }
306            ValidationError::JsonParsingError { file_path, error } => {
307                write!(f, "JSON parsing error in {}: {}", file_path, error)
308            }
309            ValidationError::TimeoutError {
310                file_path,
311                timeout_duration,
312            } => {
313                write!(
314                    f,
315                    "validation timeout for {} after {:?}",
316                    file_path, timeout_duration
317                )
318            }
319            ValidationError::CancelledError { file_path, reason } => {
320                write!(f, "validation cancelled for {}: {}", file_path, reason)
321            }
322            ValidationError::ConfigurationError { error } => {
323                write!(f, "validation configuration error: {}", error)
324            }
325            ValidationError::InternalError { error } => {
326                write!(f, "internal validation error: {}", error)
327            }
328        }
329    }
330}
331
332impl std::error::Error for ValidationError {}
333
334impl From<ValidationError> for TrackingError {
335    fn from(error: ValidationError) -> Self {
336        TrackingError::ExportError(error.to_string())
337    }
338}
339
340impl From<ExportError> for TrackingError {
341    fn from(error: ExportError) -> Self {
342        TrackingError::ExportError(error.to_string())
343    }
344}
345
346/// performance logger
347#[derive(Debug)]
348pub struct PerformanceLogger {
349    /// log level
350    log_level: LogLevel,
351    /// performance metrics collector
352    metrics_collector: Arc<MetricsCollector>,
353    /// error statistics
354    error_stats: Arc<ErrorStatistics>,
355    /// start time
356    start_time: Instant,
357}
358
359/// log level
360#[derive(Debug, Clone, PartialEq)]
361pub enum LogLevel {
362    /// error
363    Error,
364    /// warn
365    Warn,
366    /// info
367    Info,
368    /// debug
369    Debug,
370    /// trace
371    Trace,
372}
373
374/// metrics collector
375#[derive(Debug)]
376pub struct MetricsCollector {
377    /// total operations
378    total_operations: AtomicUsize,
379    /// successful operations
380    successful_operations: AtomicUsize,
381    /// failed operations
382    failed_operations: AtomicUsize,
383    /// total processing time (milliseconds)
384    total_processing_time_ms: AtomicUsize,
385    /// peak memory usage (bytes)
386    peak_memory_usage: AtomicUsize,
387    /// current memory usage (bytes)
388    current_memory_usage: AtomicUsize,
389}
390
391/// error statistics
392#[derive(Debug)]
393pub struct ErrorStatistics {
394    /// parallel processing errors
395    parallel_processing_errors: AtomicUsize,
396    /// resource limit errors
397    resource_limit_errors: AtomicUsize,
398    /// data quality errors
399    data_quality_errors: AtomicUsize,
400    /// performance threshold errors
401    performance_threshold_errors: AtomicUsize,
402    /// concurrency conflict errors
403    concurrency_conflict_errors: AtomicUsize,
404    /// data corruption errors
405    data_corruption_errors: AtomicUsize,
406    /// insufficient resources errors
407    insufficient_resources_errors: AtomicUsize,
408    /// export interrupted errors
409    export_interrupted_errors: AtomicUsize,
410}
411
412impl PerformanceLogger {
413    /// Create new performance logger
414    pub fn new(log_level: LogLevel) -> Self {
415        Self {
416            log_level,
417            metrics_collector: Arc::new(MetricsCollector::new()),
418            error_stats: Arc::new(ErrorStatistics::new()),
419            start_time: Instant::now(),
420        }
421    }
422
423    /// record operation start
424    pub fn log_operation_start(&self, operation: &str, details: &str) {
425        if self.should_log(LogLevel::Info) {
426            println!(
427                "πŸš€ [{}] start operation: {} - {}",
428                self.format_timestamp(),
429                operation,
430                details
431            );
432        }
433        self.metrics_collector
434            .total_operations
435            .fetch_add(1, Ordering::Relaxed);
436    }
437
438    /// record operation success
439    pub fn log_operation_success(&self, operation: &str, duration: Duration, details: &str) {
440        if self.should_log(LogLevel::Info) {
441            println!(
442                "βœ… [{}] operation success: {} ({:?}) - {}",
443                self.format_timestamp(),
444                operation,
445                duration,
446                details
447            );
448        }
449        self.metrics_collector
450            .successful_operations
451            .fetch_add(1, Ordering::Relaxed);
452        self.metrics_collector
453            .total_processing_time_ms
454            .fetch_add(duration.as_millis() as usize, Ordering::Relaxed);
455    }
456
457    /// record operation failure
458    pub fn log_operation_failure(&self, operation: &str, error: &ExportError, duration: Duration) {
459        if self.should_log(LogLevel::Error) {
460            println!(
461                "❌ [{}] operation failure: {} ({:?}) - {}",
462                self.format_timestamp(),
463                operation,
464                duration,
465                error
466            );
467        }
468        self.metrics_collector
469            .failed_operations
470            .fetch_add(1, Ordering::Relaxed);
471        self.update_error_statistics(error);
472    }
473
474    /// record performance metric
475    pub fn log_performance_metric(
476        &self,
477        metric: PerformanceMetric,
478        value: f64,
479        threshold: Option<f64>,
480    ) {
481        if self.should_log(LogLevel::Debug) {
482            let threshold_info = if let Some(t) = threshold {
483                format!(" (threshold: {t})")
484            } else {
485                String::new()
486            };
487            println!(
488                "πŸ“Š [{}] performance metric - {metric:?}: {value}{threshold_info}",
489                self.format_timestamp()
490            );
491        }
492
493        // check if exceeded threshold
494        if let Some(threshold) = threshold {
495            if value > threshold {
496                let error = ExportError::PerformanceThresholdExceeded {
497                    metric,
498                    threshold,
499                    actual: value,
500                    stage: ExportStage::ParallelProcessing, // default stage
501                };
502                self.log_warning(&format!("performance threshold exceeded: {error}"));
503            }
504        }
505    }
506
507    /// record memory usage
508    pub fn log_memory_usage(&self, current_usage: usize, peak_usage: usize) {
509        if self.should_log(LogLevel::Debug) {
510            println!(
511                "πŸ’Ύ [{}] memory usage - current: {:.2}MB, peak: {:.2}MB",
512                self.format_timestamp(),
513                current_usage as f64 / 1024.0 / 1024.0,
514                peak_usage as f64 / 1024.0 / 1024.0
515            );
516        }
517
518        self.metrics_collector
519            .current_memory_usage
520            .store(current_usage, Ordering::Relaxed);
521
522        // update peak memory usage
523        let current_peak = self
524            .metrics_collector
525            .peak_memory_usage
526            .load(Ordering::Relaxed);
527        if peak_usage > current_peak {
528            self.metrics_collector
529                .peak_memory_usage
530                .store(peak_usage, Ordering::Relaxed);
531        }
532    }
533
534    /// record warning
535    pub fn log_warning(&self, message: &str) {
536        if self.should_log(LogLevel::Warn) {
537            println!("⚠️ [{}] warning: {}", self.format_timestamp(), message);
538        }
539    }
540
541    /// record debug
542    pub fn log_debug(&self, message: &str) {
543        if self.should_log(LogLevel::Debug) {
544            println!("πŸ” [{}] debug: {}", self.format_timestamp(), message);
545        }
546    }
547
548    /// record error
549    pub fn log_error(&self, error: &ExportError) {
550        if self.should_log(LogLevel::Error) {
551            println!("πŸ’₯ [{}] error: {}", self.format_timestamp(), error);
552        }
553        self.update_error_statistics(error);
554    }
555
556    /// generate performance report
557    pub fn generate_performance_report(&self) -> PerformanceReport {
558        let total_time = self.start_time.elapsed();
559        let total_ops = self
560            .metrics_collector
561            .total_operations
562            .load(Ordering::Relaxed);
563        let successful_ops = self
564            .metrics_collector
565            .successful_operations
566            .load(Ordering::Relaxed);
567        let failed_ops = self
568            .metrics_collector
569            .failed_operations
570            .load(Ordering::Relaxed);
571        let total_processing_time = self
572            .metrics_collector
573            .total_processing_time_ms
574            .load(Ordering::Relaxed);
575        let peak_memory = self
576            .metrics_collector
577            .peak_memory_usage
578            .load(Ordering::Relaxed);
579        let current_memory = self
580            .metrics_collector
581            .current_memory_usage
582            .load(Ordering::Relaxed);
583
584        let success_rate = if total_ops > 0 {
585            (successful_ops as f64 / total_ops as f64) * 100.0
586        } else {
587            0.0
588        };
589
590        let avg_processing_time = if successful_ops > 0 {
591            total_processing_time as f64 / successful_ops as f64
592        } else {
593            0.0
594        };
595
596        PerformanceReport {
597            total_runtime: total_time,
598            total_operations: total_ops,
599            successful_operations: successful_ops,
600            failed_operations: failed_ops,
601            success_rate,
602            average_processing_time_ms: avg_processing_time,
603            peak_memory_usage_mb: peak_memory as f64 / 1024.0 / 1024.0,
604            current_memory_usage_mb: current_memory as f64 / 1024.0 / 1024.0,
605            error_breakdown: self.get_error_breakdown(),
606        }
607    }
608
609    /// check if should log
610    fn should_log(&self, level: LogLevel) -> bool {
611        match (&self.log_level, &level) {
612            (LogLevel::Error, LogLevel::Error) => true,
613            (LogLevel::Warn, LogLevel::Error | LogLevel::Warn) => true,
614            (LogLevel::Info, LogLevel::Error | LogLevel::Warn | LogLevel::Info) => true,
615            (
616                LogLevel::Debug,
617                LogLevel::Error | LogLevel::Warn | LogLevel::Info | LogLevel::Debug,
618            ) => true,
619            (LogLevel::Trace, _) => true,
620            _ => false,
621        }
622    }
623
624    /// format timestamp
625    fn format_timestamp(&self) -> String {
626        let elapsed = self.start_time.elapsed();
627        format!("{:>8.3}s", elapsed.as_secs_f64())
628    }
629
630    /// update error statistics
631    fn update_error_statistics(&self, error: &ExportError) {
632        match error {
633            ExportError::ParallelProcessingError { .. } => {
634                self.error_stats
635                    .parallel_processing_errors
636                    .fetch_add(1, Ordering::Relaxed);
637            }
638            ExportError::ResourceLimitExceeded { .. } => {
639                self.error_stats
640                    .resource_limit_errors
641                    .fetch_add(1, Ordering::Relaxed);
642            }
643            ExportError::DataQualityError { .. } => {
644                self.error_stats
645                    .data_quality_errors
646                    .fetch_add(1, Ordering::Relaxed);
647            }
648            ExportError::PerformanceThresholdExceeded { .. } => {
649                self.error_stats
650                    .performance_threshold_errors
651                    .fetch_add(1, Ordering::Relaxed);
652            }
653            ExportError::ConcurrencyConflict { .. } => {
654                self.error_stats
655                    .concurrency_conflict_errors
656                    .fetch_add(1, Ordering::Relaxed);
657            }
658            ExportError::DataCorruption { .. } => {
659                self.error_stats
660                    .data_corruption_errors
661                    .fetch_add(1, Ordering::Relaxed);
662            }
663            ExportError::InsufficientResources { .. } => {
664                self.error_stats
665                    .insufficient_resources_errors
666                    .fetch_add(1, Ordering::Relaxed);
667            }
668            ExportError::ExportInterrupted { .. } => {
669                self.error_stats
670                    .export_interrupted_errors
671                    .fetch_add(1, Ordering::Relaxed);
672            }
673        }
674    }
675
676    /// get error breakdown
677    fn get_error_breakdown(&self) -> ErrorBreakdown {
678        ErrorBreakdown {
679            parallel_processing_errors: self
680                .error_stats
681                .parallel_processing_errors
682                .load(Ordering::Relaxed),
683            resource_limit_errors: self
684                .error_stats
685                .resource_limit_errors
686                .load(Ordering::Relaxed),
687            data_quality_errors: self.error_stats.data_quality_errors.load(Ordering::Relaxed),
688            performance_threshold_errors: self
689                .error_stats
690                .performance_threshold_errors
691                .load(Ordering::Relaxed),
692            concurrency_conflict_errors: self
693                .error_stats
694                .concurrency_conflict_errors
695                .load(Ordering::Relaxed),
696            data_corruption_errors: self
697                .error_stats
698                .data_corruption_errors
699                .load(Ordering::Relaxed),
700            insufficient_resources_errors: self
701                .error_stats
702                .insufficient_resources_errors
703                .load(Ordering::Relaxed),
704            export_interrupted_errors: self
705                .error_stats
706                .export_interrupted_errors
707                .load(Ordering::Relaxed),
708        }
709    }
710}
711
712impl MetricsCollector {
713    fn new() -> Self {
714        Self {
715            total_operations: AtomicUsize::new(0),
716            successful_operations: AtomicUsize::new(0),
717            failed_operations: AtomicUsize::new(0),
718            total_processing_time_ms: AtomicUsize::new(0),
719            peak_memory_usage: AtomicUsize::new(0),
720            current_memory_usage: AtomicUsize::new(0),
721        }
722    }
723}
724
725impl ErrorStatistics {
726    fn new() -> Self {
727        Self {
728            parallel_processing_errors: AtomicUsize::new(0),
729            resource_limit_errors: AtomicUsize::new(0),
730            data_quality_errors: AtomicUsize::new(0),
731            performance_threshold_errors: AtomicUsize::new(0),
732            concurrency_conflict_errors: AtomicUsize::new(0),
733            data_corruption_errors: AtomicUsize::new(0),
734            insufficient_resources_errors: AtomicUsize::new(0),
735            export_interrupted_errors: AtomicUsize::new(0),
736        }
737    }
738}
739
740/// performance report
741#[derive(Debug, Clone)]
742pub struct PerformanceReport {
743    /// total runtime
744    pub total_runtime: Duration,
745    /// total operations
746    pub total_operations: usize,
747    /// successful operations
748    pub successful_operations: usize,
749    /// failed operations
750    pub failed_operations: usize,
751    /// success rate
752    pub success_rate: f64,
753    /// average processing time (ms)
754    pub average_processing_time_ms: f64,
755    /// peak memory usage (MB)
756    pub peak_memory_usage_mb: f64,
757    /// current memory usage (MB)
758    pub current_memory_usage_mb: f64,
759    /// error breakdown
760    pub error_breakdown: ErrorBreakdown,
761}
762
763/// error breakdown
764#[derive(Debug, Clone)]
765pub struct ErrorBreakdown {
766    /// parallel processing errors
767    pub parallel_processing_errors: usize,
768    /// resource limit errors
769    pub resource_limit_errors: usize,
770    /// data quality errors
771    pub data_quality_errors: usize,
772    /// performance threshold errors
773    pub performance_threshold_errors: usize,
774    /// concurrency conflict errors
775    pub concurrency_conflict_errors: usize,
776    /// data corruption errors
777    pub data_corruption_errors: usize,
778    /// insufficient resources errors
779    pub insufficient_resources_errors: usize,
780    /// export interrupted errors
781    pub export_interrupted_errors: usize,
782}
783
784impl PerformanceReport {
785    /// print detailed report
786    pub fn print_detailed_report(&self) {
787        println!("\nπŸ“ˆ detailed performance report");
788        println!("================");
789
790        println!("⏱️ runtime: {:?}", self.total_runtime);
791        println!("πŸ”’ total operations: {}", self.total_operations);
792        println!(
793            "βœ… successful operations: {} ({:.1}%)",
794            self.successful_operations, self.success_rate
795        );
796        println!("❌ failed operations: {}", self.failed_operations);
797        println!(
798            "⚑ average processing time: {:.2}ms",
799            self.average_processing_time_ms
800        );
801        println!("πŸ’Ύ peak memory usage: {:.2}MB", self.peak_memory_usage_mb);
802        println!(
803            "πŸ’Ύ current memory usage: {:.2}MB",
804            self.current_memory_usage_mb
805        );
806
807        println!("\n🚨 error breakdown:");
808        println!(
809            "   parallel processing errors: {}",
810            self.error_breakdown.parallel_processing_errors
811        );
812        println!(
813            "   resource limit errors: {}",
814            self.error_breakdown.resource_limit_errors
815        );
816        println!(
817            "   data quality errors: {}",
818            self.error_breakdown.data_quality_errors
819        );
820        println!(
821            "   performance threshold errors: {}",
822            self.error_breakdown.performance_threshold_errors
823        );
824        println!(
825            "   concurrency conflict errors: {}",
826            self.error_breakdown.concurrency_conflict_errors
827        );
828        println!(
829            "   data corruption errors: {}",
830            self.error_breakdown.data_corruption_errors
831        );
832        println!(
833            "   insufficient resources errors: {}",
834            self.error_breakdown.insufficient_resources_errors
835        );
836        println!(
837            "   export interrupted errors: {}",
838            self.error_breakdown.export_interrupted_errors
839        );
840    }
841}
842
843/// resource monitor
844#[derive(Debug)]
845pub struct ResourceMonitor {
846    /// memory limit (bytes)
847    memory_limit: usize,
848    /// disk space limit (bytes)
849    disk_limit: usize,
850    /// CPU usage limit (percentage)
851    cpu_limit: f64,
852}
853
854impl ResourceMonitor {
855    /// create new resource monitor
856    pub fn new(memory_limit_mb: usize, disk_limit_mb: usize, cpu_limit_percent: f64) -> Self {
857        Self {
858            memory_limit: memory_limit_mb * 1024 * 1024,
859            disk_limit: disk_limit_mb * 1024 * 1024,
860            cpu_limit: cpu_limit_percent,
861        }
862    }
863
864    /// check resource usage
865    pub fn check_resource_usage(&self) -> TrackingResult<ResourceUsage> {
866        let memory_usage = self.get_memory_usage()?;
867        let disk_usage = self.get_disk_usage()?;
868        let cpu_usage = self.get_cpu_usage()?;
869
870        // check if exceeded limits
871        if memory_usage > self.memory_limit {
872            return Err(ExportError::ResourceLimitExceeded {
873                resource_type: ResourceType::Memory,
874                limit: self.memory_limit as u64,
875                actual: memory_usage as u64,
876                suggested_action: "reduce parallelism or enable streaming processing".to_string(),
877            }
878            .into());
879        }
880
881        if disk_usage > self.disk_limit {
882            return Err(ExportError::ResourceLimitExceeded {
883                resource_type: ResourceType::Disk,
884                limit: self.disk_limit as u64,
885                actual: disk_usage as u64,
886                suggested_action: "clean up temporary files or select other output location"
887                    .to_string(),
888            }
889            .into());
890        }
891
892        if cpu_usage > self.cpu_limit {
893            return Err(ExportError::ResourceLimitExceeded {
894                resource_type: ResourceType::CPU,
895                limit: (self.cpu_limit * 100.0) as u64,
896                actual: (cpu_usage * 100.0) as u64,
897                suggested_action: "reduce thread count or lower processing priority".to_string(),
898            }
899            .into());
900        }
901
902        Ok(ResourceUsage {
903            memory_usage,
904            disk_usage,
905            cpu_usage,
906            memory_limit: self.memory_limit,
907            disk_limit: self.disk_limit,
908            cpu_limit: self.cpu_limit,
909        })
910    }
911
912    /// get memory usage (simplified implementation)
913    fn get_memory_usage(&self) -> TrackingResult<usize> {
914        // in actual implementation, this should call system API to get real memory usage
915        // here use simplified implementation
916        Ok(0) // placeholder implementation
917    }
918
919    /// get disk usage (simplified implementation)
920    fn get_disk_usage(&self) -> TrackingResult<usize> {
921        // in actual implementation, this should call system API to get real disk usage
922        Ok(0) // placeholder implementation
923    }
924
925    /// get CPU usage (simplified implementation)
926    fn get_cpu_usage(&self) -> TrackingResult<f64> {
927        // in actual implementation, this should call system API to get real CPU usage
928        Ok(0.0) // placeholder implementation
929    }
930}
931
932/// resource usage
933#[derive(Debug, Clone)]
934pub struct ResourceUsage {
935    /// Current memory usage in bytes
936    pub memory_usage: usize,
937    /// Current disk usage in bytes
938    pub disk_usage: usize,
939    /// Current CPU usage as percentage (0.0-100.0)
940    pub cpu_usage: f64,
941    /// Memory limit in bytes
942    pub memory_limit: usize,
943    /// Disk limit in bytes
944    pub disk_limit: usize,
945    /// CPU limit as percentage (0.0-100.0)
946    pub cpu_limit: f64,
947}
948
949impl ResourceUsage {
950    /// get memory usage percentage
951    pub fn memory_usage_percentage(&self) -> f64 {
952        if self.memory_limit > 0 {
953            (self.memory_usage as f64 / self.memory_limit as f64) * 100.0
954        } else {
955            0.0
956        }
957    }
958
959    /// get disk usage percentage
960    pub fn disk_usage_percentage(&self) -> f64 {
961        if self.disk_limit > 0 {
962            (self.disk_usage as f64 / self.disk_limit as f64) * 100.0
963        } else {
964            0.0
965        }
966    }
967
968    /// get CPU usage percentage
969    pub fn cpu_usage_percentage(&self) -> f64 {
970        self.cpu_usage * 100.0
971    }
972}
973
974#[cfg(test)]
975mod tests {
976    use super::*;
977
978    #[test]
979    fn test_export_error_display() {
980        let error = ExportError::ParallelProcessingError {
981            shard_index: 5,
982            thread_id: "thread-1".to_string(),
983            error_message: "serialization failed".to_string(),
984            partial_results: None,
985        };
986
987        let display = format!("{error}");
988        assert!(display.contains("parallel processing error"));
989        assert!(display.contains("shard 5"));
990        assert!(display.contains("thread-1"));
991    }
992
993    #[test]
994    fn test_performance_logger() {
995        let logger = PerformanceLogger::new(LogLevel::Info);
996
997        logger.log_operation_start("test operation", "test details");
998        logger.log_operation_success("test operation", Duration::from_millis(100), "success");
999
1000        let report = logger.generate_performance_report();
1001        assert_eq!(report.total_operations, 1);
1002        assert_eq!(report.successful_operations, 1);
1003        assert_eq!(report.failed_operations, 0);
1004        assert_eq!(report.success_rate, 100.0);
1005    }
1006
1007    #[test]
1008    fn test_resource_monitor() {
1009        let monitor = ResourceMonitor::new(1024, 2048, 80.0);
1010
1011        // test resource check (using simplified implementation, should always succeed)
1012        let result = monitor.check_resource_usage();
1013        assert!(result.is_ok());
1014
1015        let usage = result.unwrap();
1016        assert_eq!(usage.memory_limit, 1024 * 1024 * 1024);
1017        assert_eq!(usage.disk_limit, 2048 * 1024 * 1024);
1018        assert_eq!(usage.cpu_limit, 80.0);
1019    }
1020
1021    #[test]
1022    fn test_resource_usage_percentages() {
1023        let usage = ResourceUsage {
1024            memory_usage: 512 * 1024 * 1024,  // 512MB
1025            disk_usage: 1024 * 1024 * 1024,   // 1GB
1026            cpu_usage: 0.6,                   // 60%
1027            memory_limit: 1024 * 1024 * 1024, // 1GB
1028            disk_limit: 2048 * 1024 * 1024,   // 2GB
1029            cpu_limit: 0.8,                   // 80%
1030        };
1031
1032        assert_eq!(usage.memory_usage_percentage(), 50.0);
1033        assert_eq!(usage.disk_usage_percentage(), 50.0);
1034        assert_eq!(usage.cpu_usage_percentage(), 60.0);
1035    }
1036}