Skip to main content

actix_web_csp/monitoring/
perf.rs

1use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
2use std::time::{Duration, Instant};
3
4#[derive(Debug)]
5pub struct PerformanceMetrics {
6    header_generation_samples: AtomicUsize,
7    header_generation_total_ns: AtomicU64,
8    header_generation_min_ns: AtomicU64,
9    header_generation_max_ns: AtomicU64,
10
11    policy_hash_samples: AtomicUsize,
12    policy_hash_total_ns: AtomicU64,
13
14    cache_hit_ratio: AtomicUsize,
15    cache_miss_ratio: AtomicUsize,
16
17    memory_pressure_events: AtomicUsize,
18    gc_events: AtomicUsize,
19}
20
21impl Default for PerformanceMetrics {
22    fn default() -> Self {
23        Self {
24            header_generation_samples: AtomicUsize::new(0),
25            header_generation_total_ns: AtomicU64::new(0),
26            header_generation_min_ns: AtomicU64::new(u64::MAX),
27            header_generation_max_ns: AtomicU64::new(0),
28
29            policy_hash_samples: AtomicUsize::new(0),
30            policy_hash_total_ns: AtomicU64::new(0),
31
32            cache_hit_ratio: AtomicUsize::new(0),
33            cache_miss_ratio: AtomicUsize::new(0),
34
35            memory_pressure_events: AtomicUsize::new(0),
36            gc_events: AtomicUsize::new(0),
37        }
38    }
39}
40
41impl PerformanceMetrics {
42    pub fn new() -> Self {
43        Self::default()
44    }
45
46    pub fn record_header_generation(&self, duration: Duration) {
47        let ns = duration.as_nanos() as u64;
48
49        self.header_generation_samples
50            .fetch_add(1, Ordering::Relaxed);
51        self.header_generation_total_ns
52            .fetch_add(ns, Ordering::Relaxed);
53
54        loop {
55            let current_min = self.header_generation_min_ns.load(Ordering::Relaxed);
56            if ns >= current_min
57                || self
58                    .header_generation_min_ns
59                    .compare_exchange_weak(current_min, ns, Ordering::Relaxed, Ordering::Relaxed)
60                    .is_ok()
61            {
62                break;
63            }
64        }
65
66        loop {
67            let current_max = self.header_generation_max_ns.load(Ordering::Relaxed);
68            if ns <= current_max
69                || self
70                    .header_generation_max_ns
71                    .compare_exchange_weak(current_max, ns, Ordering::Relaxed, Ordering::Relaxed)
72                    .is_ok()
73            {
74                break;
75            }
76        }
77
78        if ns > 1_000_000 {
79            self.memory_pressure_events.fetch_add(1, Ordering::Relaxed);
80        }
81    }
82
83    pub fn record_policy_hash(&self, duration: Duration) {
84        let ns = duration.as_nanos() as u64;
85
86        self.policy_hash_samples.fetch_add(1, Ordering::Relaxed);
87        self.policy_hash_total_ns.fetch_add(ns, Ordering::Relaxed);
88    }
89
90    pub fn record_cache_hit(&self) {
91        self.cache_hit_ratio.fetch_add(1, Ordering::Relaxed);
92    }
93
94    pub fn record_cache_miss(&self) {
95        self.cache_miss_ratio.fetch_add(1, Ordering::Relaxed);
96    }
97
98    pub fn avg_header_generation_ns(&self) -> f64 {
99        let samples = self.header_generation_samples.load(Ordering::Relaxed);
100        if samples == 0 {
101            0.0
102        } else {
103            self.header_generation_total_ns.load(Ordering::Relaxed) as f64 / samples as f64
104        }
105    }
106
107    pub fn avg_policy_hash_ns(&self) -> f64 {
108        let samples = self.policy_hash_samples.load(Ordering::Relaxed);
109        if samples == 0 {
110            0.0
111        } else {
112            self.policy_hash_total_ns.load(Ordering::Relaxed) as f64 / samples as f64
113        }
114    }
115
116    pub fn cache_hit_rate(&self) -> f64 {
117        let hits = self.cache_hit_ratio.load(Ordering::Relaxed);
118        let misses = self.cache_miss_ratio.load(Ordering::Relaxed);
119        let total = hits + misses;
120
121        if total == 0 {
122            0.0
123        } else {
124            hits as f64 / total as f64
125        }
126    }
127
128    pub fn min_header_generation_ns(&self) -> u64 {
129        let min = self.header_generation_min_ns.load(Ordering::Relaxed);
130        if min == u64::MAX {
131            0
132        } else {
133            min
134        }
135    }
136
137    pub fn max_header_generation_ns(&self) -> u64 {
138        self.header_generation_max_ns.load(Ordering::Relaxed)
139    }
140
141    pub fn reset(&self) {
142        self.header_generation_samples.store(0, Ordering::Relaxed);
143        self.header_generation_total_ns.store(0, Ordering::Relaxed);
144        self.header_generation_min_ns
145            .store(u64::MAX, Ordering::Relaxed);
146        self.header_generation_max_ns.store(0, Ordering::Relaxed);
147
148        self.policy_hash_samples.store(0, Ordering::Relaxed);
149        self.policy_hash_total_ns.store(0, Ordering::Relaxed);
150
151        self.cache_hit_ratio.store(0, Ordering::Relaxed);
152        self.cache_miss_ratio.store(0, Ordering::Relaxed);
153
154        self.memory_pressure_events.store(0, Ordering::Relaxed);
155        self.gc_events.store(0, Ordering::Relaxed);
156    }
157}
158
159#[derive(Debug)]
160pub struct PerformanceTimer {
161    start: Instant,
162}
163
164impl PerformanceTimer {
165    pub fn new() -> Self {
166        Self {
167            start: Instant::now(),
168        }
169    }
170
171    pub fn elapsed(&self) -> Duration {
172        self.start.elapsed()
173    }
174}
175
176impl Default for PerformanceTimer {
177    fn default() -> Self {
178        Self::new()
179    }
180}
181
182pub struct AdaptiveCache<K, V> {
183    cache: lru::LruCache<K, V>,
184    hit_count: AtomicUsize,
185    miss_count: AtomicUsize,
186    last_resize: Instant,
187    resize_threshold: usize,
188}
189
190impl<K: std::hash::Hash + Eq, V> AdaptiveCache<K, V> {
191    pub fn new(capacity: std::num::NonZeroUsize) -> Self {
192        Self {
193            cache: lru::LruCache::new(capacity),
194            hit_count: AtomicUsize::new(0),
195            miss_count: AtomicUsize::new(0),
196            last_resize: Instant::now(),
197            resize_threshold: 1000,
198        }
199    }
200
201    pub fn get(&mut self, key: &K) -> Option<&V> {
202        let is_hit = self.cache.contains(key);
203        if is_hit {
204            self.hit_count.fetch_add(1, Ordering::Relaxed);
205            self.cache.get(key)
206        } else {
207            self.miss_count.fetch_add(1, Ordering::Relaxed);
208            self.maybe_resize();
209            None
210        }
211    }
212
213    pub fn put(&mut self, key: K, value: V) -> Option<V> {
214        self.cache.put(key, value)
215    }
216
217    pub fn hit_rate(&self) -> f64 {
218        let hits = self.hit_count.load(Ordering::Relaxed);
219        let misses = self.miss_count.load(Ordering::Relaxed);
220        let total = hits + misses;
221
222        if total == 0 {
223            0.0
224        } else {
225            hits as f64 / total as f64
226        }
227    }
228
229    fn maybe_resize(&mut self) {
230        let total_requests =
231            self.hit_count.load(Ordering::Relaxed) + self.miss_count.load(Ordering::Relaxed);
232
233        if total_requests % self.resize_threshold == 0
234            && self.last_resize.elapsed() > Duration::from_secs(60)
235        {
236            let hit_rate = self.hit_rate();
237            if hit_rate < 0.7 && self.cache.cap().get() < 512 {
238                let new_cap = (self.cache.cap().get() * 2).min(512);
239                if let Some(new_capacity) = std::num::NonZeroUsize::new(new_cap) {
240                    self.cache.resize(new_capacity);
241                    self.last_resize = Instant::now();
242                }
243            }
244        }
245    }
246
247    pub fn clear(&mut self) {
248        self.cache.clear();
249        self.hit_count.store(0, Ordering::Relaxed);
250        self.miss_count.store(0, Ordering::Relaxed);
251    }
252}