unity_asset_binary/
performance.rs

1//! Performance Optimization and Monitoring
2//!
3//! This module provides performance monitoring, optimization utilities,
4//! and memory management improvements for Unity asset parsing.
5
6use serde::{Deserialize, Serialize};
7use std::sync::Arc;
8use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
9use std::time::{Duration, Instant};
10
11/// Global performance metrics collector
12static GLOBAL_METRICS: once_cell::sync::Lazy<Arc<PerformanceMetrics>> =
13    once_cell::sync::Lazy::new(|| Arc::new(PerformanceMetrics::new()));
14
15/// Performance metrics for Unity asset parsing operations
16#[derive(Debug)]
17pub struct PerformanceMetrics {
18    /// Total bytes processed
19    pub bytes_processed: AtomicU64,
20    /// Total parsing time in nanoseconds
21    pub total_parse_time_ns: AtomicU64,
22    /// Number of files processed
23    pub files_processed: AtomicUsize,
24    /// Number of objects parsed
25    pub objects_parsed: AtomicUsize,
26    /// Peak memory usage in bytes
27    pub peak_memory_bytes: AtomicU64,
28    /// Number of cache hits
29    pub cache_hits: AtomicUsize,
30    /// Number of cache misses
31    pub cache_misses: AtomicUsize,
32}
33
34impl PerformanceMetrics {
35    /// Create new performance metrics
36    pub fn new() -> Self {
37        Self {
38            bytes_processed: AtomicU64::new(0),
39            total_parse_time_ns: AtomicU64::new(0),
40            files_processed: AtomicUsize::new(0),
41            objects_parsed: AtomicUsize::new(0),
42            peak_memory_bytes: AtomicU64::new(0),
43            cache_hits: AtomicUsize::new(0),
44            cache_misses: AtomicUsize::new(0),
45        }
46    }
47
48    /// Record bytes processed
49    pub fn record_bytes(&self, bytes: u64) {
50        self.bytes_processed.fetch_add(bytes, Ordering::Relaxed);
51    }
52
53    /// Record parsing time
54    pub fn record_parse_time(&self, duration: Duration) {
55        self.total_parse_time_ns
56            .fetch_add(duration.as_nanos() as u64, Ordering::Relaxed);
57    }
58
59    /// Record file processed
60    pub fn record_file(&self) {
61        self.files_processed.fetch_add(1, Ordering::Relaxed);
62    }
63
64    /// Record object parsed
65    pub fn record_object(&self) {
66        self.objects_parsed.fetch_add(1, Ordering::Relaxed);
67    }
68
69    /// Record memory usage
70    pub fn record_memory(&self, bytes: u64) {
71        let current = self.peak_memory_bytes.load(Ordering::Relaxed);
72        if bytes > current {
73            self.peak_memory_bytes.store(bytes, Ordering::Relaxed);
74        }
75    }
76
77    /// Record cache hit
78    pub fn record_cache_hit(&self) {
79        self.cache_hits.fetch_add(1, Ordering::Relaxed);
80    }
81
82    /// Record cache miss
83    pub fn record_cache_miss(&self) {
84        self.cache_misses.fetch_add(1, Ordering::Relaxed);
85    }
86
87    /// Get current statistics
88    pub fn get_stats(&self) -> PerformanceStats {
89        let bytes = self.bytes_processed.load(Ordering::Relaxed);
90        let time_ns = self.total_parse_time_ns.load(Ordering::Relaxed);
91        let files = self.files_processed.load(Ordering::Relaxed);
92        let objects = self.objects_parsed.load(Ordering::Relaxed);
93        let memory = self.peak_memory_bytes.load(Ordering::Relaxed);
94        let hits = self.cache_hits.load(Ordering::Relaxed);
95        let misses = self.cache_misses.load(Ordering::Relaxed);
96
97        PerformanceStats {
98            bytes_processed: bytes,
99            total_parse_time: Duration::from_nanos(time_ns),
100            files_processed: files,
101            objects_parsed: objects,
102            peak_memory_bytes: memory,
103            throughput_mbps: if time_ns > 0 {
104                (bytes as f64 / 1_048_576.0) / (time_ns as f64 / 1_000_000_000.0)
105            } else {
106                0.0
107            },
108            objects_per_second: if time_ns > 0 {
109                objects as f64 / (time_ns as f64 / 1_000_000_000.0)
110            } else {
111                0.0
112            },
113            cache_hit_rate: if hits + misses > 0 {
114                hits as f64 / (hits + misses) as f64
115            } else {
116                0.0
117            },
118        }
119    }
120
121    /// Reset all metrics
122    pub fn reset(&self) {
123        self.bytes_processed.store(0, Ordering::Relaxed);
124        self.total_parse_time_ns.store(0, Ordering::Relaxed);
125        self.files_processed.store(0, Ordering::Relaxed);
126        self.objects_parsed.store(0, Ordering::Relaxed);
127        self.peak_memory_bytes.store(0, Ordering::Relaxed);
128        self.cache_hits.store(0, Ordering::Relaxed);
129        self.cache_misses.store(0, Ordering::Relaxed);
130    }
131}
132
133impl Default for PerformanceMetrics {
134    fn default() -> Self {
135        Self::new()
136    }
137}
138
139/// Performance statistics snapshot
140#[derive(Debug, Clone, Serialize, Deserialize)]
141pub struct PerformanceStats {
142    pub bytes_processed: u64,
143    pub total_parse_time: Duration,
144    pub files_processed: usize,
145    pub objects_parsed: usize,
146    pub peak_memory_bytes: u64,
147    pub throughput_mbps: f64,
148    pub objects_per_second: f64,
149    pub cache_hit_rate: f64,
150}
151
152/// Performance timer for measuring operations
153pub struct PerformanceTimer {
154    start: Instant,
155    _operation: String,
156}
157
158impl PerformanceTimer {
159    /// Start timing an operation
160    pub fn start(operation: impl Into<String>) -> Self {
161        Self {
162            start: Instant::now(),
163            _operation: operation.into(),
164        }
165    }
166
167    /// Finish timing and record the result
168    pub fn finish(self) -> Duration {
169        let duration = self.start.elapsed();
170        GLOBAL_METRICS.record_parse_time(duration);
171        duration
172    }
173
174    /// Finish timing with byte count
175    pub fn finish_with_bytes(self, bytes: u64) -> Duration {
176        let duration = self.start.elapsed();
177        GLOBAL_METRICS.record_parse_time(duration);
178        GLOBAL_METRICS.record_bytes(bytes);
179        duration
180    }
181}
182
183/// Memory pool for reducing allocations
184pub struct MemoryPool<T> {
185    pool: std::sync::Mutex<Vec<T>>,
186    factory: Box<dyn Fn() -> T + Send + Sync>,
187}
188
189impl<T> MemoryPool<T> {
190    /// Create a new memory pool
191    pub fn new<F>(factory: F) -> Self
192    where
193        F: Fn() -> T + Send + Sync + 'static,
194    {
195        Self {
196            pool: std::sync::Mutex::new(Vec::new()),
197            factory: Box::new(factory),
198        }
199    }
200
201    /// Get an item from the pool or create a new one
202    pub fn get(&self) -> PooledItem<'_, T> {
203        let item = {
204            let mut pool = self.pool.lock().unwrap();
205            pool.pop().unwrap_or_else(|| (self.factory)())
206        };
207        PooledItem {
208            item: Some(item),
209            pool: &self.pool,
210        }
211    }
212
213    /// Get the current pool size
214    pub fn size(&self) -> usize {
215        self.pool.lock().unwrap().len()
216    }
217}
218
219/// An item borrowed from a memory pool
220pub struct PooledItem<'a, T> {
221    item: Option<T>,
222    pool: &'a std::sync::Mutex<Vec<T>>,
223}
224
225impl<'a, T> std::ops::Deref for PooledItem<'a, T> {
226    type Target = T;
227
228    fn deref(&self) -> &Self::Target {
229        self.item.as_ref().unwrap()
230    }
231}
232
233impl<'a, T> std::ops::DerefMut for PooledItem<'a, T> {
234    fn deref_mut(&mut self) -> &mut Self::Target {
235        self.item.as_mut().unwrap()
236    }
237}
238
239impl<'a, T> Drop for PooledItem<'a, T> {
240    fn drop(&mut self) {
241        if let Some(item) = self.item.take() {
242            let mut pool = self.pool.lock().unwrap();
243            pool.push(item);
244        }
245    }
246}
247
248/// Buffer pool for byte vectors
249pub type BufferPool = MemoryPool<Vec<u8>>;
250
251/// Create a global buffer pool
252static BUFFER_POOL: once_cell::sync::Lazy<BufferPool> =
253    once_cell::sync::Lazy::new(|| BufferPool::new(|| Vec::with_capacity(8192)));
254
255/// Get a buffer from the global pool
256pub fn get_buffer() -> PooledItem<'static, Vec<u8>> {
257    let mut buffer = BUFFER_POOL.get();
258    buffer.clear(); // Clear but keep capacity
259    buffer
260}
261
262/// Performance optimization settings
263#[derive(Debug, Clone)]
264pub struct OptimizationSettings {
265    /// Enable memory pooling
266    pub use_memory_pools: bool,
267    /// Enable parallel processing
268    pub use_parallel_processing: bool,
269    /// Maximum number of threads to use
270    pub max_threads: usize,
271    /// Buffer size for I/O operations
272    pub io_buffer_size: usize,
273    /// Enable compression caching
274    pub cache_decompressed_data: bool,
275    /// Maximum cache size in bytes
276    pub max_cache_size: usize,
277}
278
279impl Default for OptimizationSettings {
280    fn default() -> Self {
281        Self {
282            use_memory_pools: true,
283            use_parallel_processing: true,
284            max_threads: num_cpus::get(),
285            io_buffer_size: 64 * 1024, // 64KB
286            cache_decompressed_data: true,
287            max_cache_size: 100 * 1024 * 1024, // 100MB
288        }
289    }
290}
291
292/// Global performance functions
293pub fn get_global_metrics() -> Arc<PerformanceMetrics> {
294    GLOBAL_METRICS.clone()
295}
296
297/// Get current performance statistics
298pub fn get_performance_stats() -> PerformanceStats {
299    GLOBAL_METRICS.get_stats()
300}
301
302/// Reset global performance metrics
303pub fn reset_performance_metrics() {
304    GLOBAL_METRICS.reset();
305}
306
307/// Record that a file was processed
308pub fn record_file_processed() {
309    GLOBAL_METRICS.record_file();
310}
311
312/// Record that an object was parsed
313pub fn record_object_parsed() {
314    GLOBAL_METRICS.record_object();
315}
316
317/// Record memory usage
318pub fn record_memory_usage(bytes: u64) {
319    GLOBAL_METRICS.record_memory(bytes);
320}
321
322/// Record cache hit
323pub fn record_cache_hit() {
324    GLOBAL_METRICS.record_cache_hit();
325}
326
327/// Record cache miss
328pub fn record_cache_miss() {
329    GLOBAL_METRICS.record_cache_miss();
330}
331
332#[cfg(test)]
333mod tests {
334    use super::*;
335    use std::thread;
336    use std::time::Duration;
337
338    #[test]
339    fn test_performance_metrics() {
340        let metrics = PerformanceMetrics::new();
341
342        metrics.record_bytes(1024);
343        metrics.record_parse_time(Duration::from_millis(100));
344        metrics.record_file();
345        metrics.record_object();
346
347        let stats = metrics.get_stats();
348        assert_eq!(stats.bytes_processed, 1024);
349        assert_eq!(stats.files_processed, 1);
350        assert_eq!(stats.objects_parsed, 1);
351        assert!(stats.throughput_mbps > 0.0);
352    }
353
354    #[test]
355    fn test_performance_timer() {
356        let timer = PerformanceTimer::start("test_operation");
357        thread::sleep(Duration::from_millis(10));
358        let duration = timer.finish();
359        assert!(duration >= Duration::from_millis(10));
360    }
361
362    #[test]
363    fn test_memory_pool() {
364        let pool = MemoryPool::new(|| Vec::<u8>::with_capacity(1024));
365
366        {
367            let mut item1 = pool.get();
368            item1.push(42);
369            assert_eq!(item1.len(), 1);
370        } // item1 is returned to pool here
371
372        {
373            let mut item2 = pool.get();
374            // The vector might be reused, but we need to clear it manually
375            item2.clear(); // Clear it for our test
376            assert_eq!(item2.len(), 0);
377            assert!(item2.capacity() >= 1024);
378        }
379    }
380
381    #[test]
382    fn test_buffer_pool() {
383        let buffer1 = get_buffer();
384        let capacity1 = buffer1.capacity();
385        drop(buffer1);
386
387        let buffer2 = get_buffer();
388        let capacity2 = buffer2.capacity();
389
390        // Should reuse the same buffer
391        assert_eq!(capacity1, capacity2);
392    }
393
394    #[test]
395    fn test_optimization_settings() {
396        let settings = OptimizationSettings::default();
397        assert!(settings.use_memory_pools);
398        assert!(settings.use_parallel_processing);
399        assert!(settings.max_threads > 0);
400        assert!(settings.io_buffer_size > 0);
401    }
402}