1use crate::core::atomic_stats::AtomicPerformanceCounters;
7use parking_lot::{Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
8use std::time::{Duration, Instant};
9
10#[derive(Debug)]
12pub struct OptimizedMutex<T> {
13 inner: Mutex<T>,
14 counters: AtomicPerformanceCounters,
15}
16
17impl<T> OptimizedMutex<T> {
18 pub fn new(data: T) -> Self {
20 Self {
21 inner: Mutex::new(data),
22 counters: AtomicPerformanceCounters::new(),
23 }
24 }
25
26 pub fn lock(&self) -> OptimizedMutexGuard<'_, T> {
28 let start = Instant::now();
29
30 if let Some(guard) = self.inner.try_lock() {
32 let wait_time = start.elapsed();
33 self.counters.record_lock_acquisition(wait_time);
34 return OptimizedMutexGuard {
35 guard,
36 _counters: &self.counters,
37 };
38 }
39
40 self.counters.record_lock_contention();
42
43 let guard = self.inner.lock();
45 let wait_time = start.elapsed();
46 self.counters.record_lock_acquisition(wait_time);
47
48 OptimizedMutexGuard {
49 guard,
50 _counters: &self.counters,
51 }
52 }
53
54 pub fn try_lock(&self) -> Option<OptimizedMutexGuard<'_, T>> {
56 let start = Instant::now();
57
58 if let Some(guard) = self.inner.try_lock() {
59 let wait_time = start.elapsed();
60 self.counters.record_lock_acquisition(wait_time);
61 Some(OptimizedMutexGuard {
62 guard,
63 _counters: &self.counters,
64 })
65 } else {
66 self.counters.record_lock_contention();
67 None
68 }
69 }
70
71 pub fn try_lock_for(&self, timeout: Duration) -> Option<OptimizedMutexGuard<'_, T>> {
73 let start = Instant::now();
74
75 if let Some(guard) = self.inner.try_lock_for(timeout) {
76 let wait_time = start.elapsed();
77 self.counters.record_lock_acquisition(wait_time);
78 Some(OptimizedMutexGuard {
79 guard,
80 _counters: &self.counters,
81 })
82 } else {
83 self.counters.record_lock_contention();
84 None
85 }
86 }
87
88 pub fn performance_stats(&self) -> crate::core::atomic_stats::PerformanceSnapshot {
90 self.counters.snapshot()
91 }
92}
93
94pub struct OptimizedMutexGuard<'a, T> {
96 guard: MutexGuard<'a, T>,
97 _counters: &'a AtomicPerformanceCounters,
98}
99
100impl<'a, T> std::ops::Deref for OptimizedMutexGuard<'a, T> {
101 type Target = T;
102
103 fn deref(&self) -> &Self::Target {
104 &self.guard
105 }
106}
107
108impl<'a, T> std::ops::DerefMut for OptimizedMutexGuard<'a, T> {
109 fn deref_mut(&mut self) -> &mut Self::Target {
110 &mut self.guard
111 }
112}
113
114#[derive(Debug)]
116pub struct OptimizedRwLock<T> {
117 inner: RwLock<T>,
118 counters: AtomicPerformanceCounters,
119}
120
121impl<T> OptimizedRwLock<T> {
122 pub fn new(data: T) -> Self {
124 Self {
125 inner: RwLock::new(data),
126 counters: AtomicPerformanceCounters::new(),
127 }
128 }
129
130 pub fn read(&self) -> OptimizedRwLockReadGuard<'_, T> {
132 let start = Instant::now();
133
134 if let Some(guard) = self.inner.try_read() {
136 let wait_time = start.elapsed();
137 self.counters.record_lock_acquisition(wait_time);
138 return OptimizedRwLockReadGuard {
139 guard,
140 _counters: &self.counters,
141 };
142 }
143
144 self.counters.record_lock_contention();
146
147 let guard = self.inner.read();
149 let wait_time = start.elapsed();
150 self.counters.record_lock_acquisition(wait_time);
151
152 OptimizedRwLockReadGuard {
153 guard,
154 _counters: &self.counters,
155 }
156 }
157
158 pub fn write(&self) -> OptimizedRwLockWriteGuard<'_, T> {
160 let start = Instant::now();
161
162 if let Some(guard) = self.inner.try_write() {
164 let wait_time = start.elapsed();
165 self.counters.record_lock_acquisition(wait_time);
166 return OptimizedRwLockWriteGuard {
167 guard,
168 _counters: &self.counters,
169 };
170 }
171
172 self.counters.record_lock_contention();
174
175 let guard = self.inner.write();
177 let wait_time = start.elapsed();
178 self.counters.record_lock_acquisition(wait_time);
179
180 OptimizedRwLockWriteGuard {
181 guard,
182 _counters: &self.counters,
183 }
184 }
185
186 pub fn try_read(&self) -> Option<OptimizedRwLockReadGuard<'_, T>> {
188 let start = Instant::now();
189
190 if let Some(guard) = self.inner.try_read() {
191 let wait_time = start.elapsed();
192 self.counters.record_lock_acquisition(wait_time);
193 Some(OptimizedRwLockReadGuard {
194 guard,
195 _counters: &self.counters,
196 })
197 } else {
198 self.counters.record_lock_contention();
199 None
200 }
201 }
202
203 pub fn try_write(&self) -> Option<OptimizedRwLockWriteGuard<'_, T>> {
205 let start = Instant::now();
206
207 if let Some(guard) = self.inner.try_write() {
208 let wait_time = start.elapsed();
209 self.counters.record_lock_acquisition(wait_time);
210 Some(OptimizedRwLockWriteGuard {
211 guard,
212 _counters: &self.counters,
213 })
214 } else {
215 self.counters.record_lock_contention();
216 None
217 }
218 }
219
220 pub fn performance_stats(&self) -> crate::core::atomic_stats::PerformanceSnapshot {
222 self.counters.snapshot()
223 }
224}
225
226pub struct OptimizedRwLockReadGuard<'a, T> {
228 guard: RwLockReadGuard<'a, T>,
229 _counters: &'a AtomicPerformanceCounters,
230}
231
232impl<'a, T> std::ops::Deref for OptimizedRwLockReadGuard<'a, T> {
233 type Target = T;
234
235 fn deref(&self) -> &Self::Target {
236 &self.guard
237 }
238}
239
240pub struct OptimizedRwLockWriteGuard<'a, T> {
242 guard: RwLockWriteGuard<'a, T>,
243 _counters: &'a AtomicPerformanceCounters,
244}
245
246impl<'a, T> std::ops::Deref for OptimizedRwLockWriteGuard<'a, T> {
247 type Target = T;
248
249 fn deref(&self) -> &Self::Target {
250 &self.guard
251 }
252}
253
254impl<'a, T> std::ops::DerefMut for OptimizedRwLockWriteGuard<'a, T> {
255 fn deref_mut(&mut self) -> &mut Self::Target {
256 &mut self.guard
257 }
258}
259
260#[derive(Debug)]
262pub struct LockFreeCounter {
263 value: std::sync::atomic::AtomicU64,
264}
265
266impl LockFreeCounter {
267 pub fn new(initial_value: u64) -> Self {
269 Self {
270 value: std::sync::atomic::AtomicU64::new(initial_value),
271 }
272 }
273
274 pub fn increment(&self) -> u64 {
276 self.value
277 .fetch_add(1, std::sync::atomic::Ordering::Relaxed)
278 + 1
279 }
280
281 pub fn decrement(&self) -> u64 {
283 self.value
284 .fetch_sub(1, std::sync::atomic::Ordering::Relaxed)
285 - 1
286 }
287
288 pub fn add(&self, value: u64) -> u64 {
290 self.value
291 .fetch_add(value, std::sync::atomic::Ordering::Relaxed)
292 + value
293 }
294
295 pub fn sub(&self, value: u64) -> u64 {
297 self.value
298 .fetch_sub(value, std::sync::atomic::Ordering::Relaxed)
299 - value
300 }
301
302 pub fn get(&self) -> u64 {
304 self.value.load(std::sync::atomic::Ordering::Relaxed)
305 }
306
307 pub fn set(&self, value: u64) {
309 self.value
310 .store(value, std::sync::atomic::Ordering::Relaxed);
311 }
312
313 pub fn compare_and_swap(&self, current: u64, new: u64) -> Result<u64, u64> {
315 self.value.compare_exchange(
316 current,
317 new,
318 std::sync::atomic::Ordering::Relaxed,
319 std::sync::atomic::Ordering::Relaxed,
320 )
321 }
322}
323
324impl Default for LockFreeCounter {
325 fn default() -> Self {
326 Self::new(0)
327 }
328}
329
330#[cfg(test)]
331mod tests {
332 use super::*;
333 use std::sync::Arc;
334 use std::thread;
335 use std::time::Duration;
336
337 #[test]
338 fn test_optimized_mutex_basic_functionality() {
339 let mutex = OptimizedMutex::new(42);
340
341 {
342 let guard = mutex.lock();
343 assert_eq!(*guard, 42);
344 }
345
346 {
348 let mut guard = mutex.lock();
349 *guard = 100;
350 }
351
352 let guard = mutex.lock();
353 assert_eq!(*guard, 100);
354 }
355
356 #[test]
357 fn test_optimized_mutex_try_lock() {
358 let mutex = OptimizedMutex::new(42);
359
360 let guard1 = mutex.try_lock();
362 assert!(guard1.is_some());
363 assert_eq!(*guard1.unwrap(), 42);
364
365 let _guard1 = mutex.lock(); let guard2 = mutex.try_lock();
368 assert!(guard2.is_none());
369 }
370
371 #[test]
372 fn test_optimized_rwlock_basic_functionality() {
373 let rwlock = OptimizedRwLock::new(42);
374
375 {
377 let read_guard = rwlock.read();
378 assert_eq!(*read_guard, 42);
379 }
380
381 {
383 let mut write_guard = rwlock.write();
384 *write_guard = 100;
385 }
386
387 let read_guard = rwlock.read();
389 assert_eq!(*read_guard, 100);
390 }
391
392 #[test]
393 fn test_optimized_rwlock_writer_exclusivity() {
394 let rwlock = Arc::new(OptimizedRwLock::new(0));
395 let rwlock_clone = Arc::clone(&rwlock);
396
397 let writer_handle = thread::spawn(move || {
399 let mut write_guard = rwlock_clone.write();
400 *write_guard = 100;
401 thread::sleep(Duration::from_millis(50)); *write_guard = 200;
403 });
404
405 thread::sleep(Duration::from_millis(10));
407
408 let read_guard = rwlock.read();
410
411 assert_eq!(*read_guard, 200);
413
414 writer_handle.join().unwrap();
415 }
416
417 #[test]
418 fn test_optimized_rwlock_try_operations() {
419 let rwlock = OptimizedRwLock::new(42);
420
421 let read_guard = rwlock.try_read();
423 assert!(read_guard.is_some());
424 if let Some(guard) = read_guard {
425 assert_eq!(*guard, 42);
426 }
427
428 let write_guard = rwlock.try_write();
430 assert!(write_guard.is_some());
431
432 let try_read = rwlock.try_read();
434 assert!(try_read.is_none());
435
436 let try_write = rwlock.try_write();
438 assert!(try_write.is_none());
439 }
440
441 #[test]
442 fn test_lock_free_counter_basic_operations() {
443 let counter = LockFreeCounter::new(0);
444
445 assert_eq!(counter.get(), 0);
446
447 assert_eq!(counter.increment(), 1);
448 assert_eq!(counter.get(), 1);
449
450 assert_eq!(counter.add(5), 6);
451 assert_eq!(counter.get(), 6);
452
453 assert_eq!(counter.decrement(), 5);
454 assert_eq!(counter.get(), 5);
455
456 assert_eq!(counter.sub(3), 2);
457 assert_eq!(counter.get(), 2);
458 }
459
460 #[test]
461 fn test_lock_free_counter_concurrent_increments() {
462 let counter = Arc::new(LockFreeCounter::new(0));
463 let mut handles = Vec::new();
464
465 for _ in 0..10 {
467 let counter_clone = Arc::clone(&counter);
468 let handle = thread::spawn(move || {
469 for _ in 0..1000 {
470 counter_clone.increment();
471 }
472 });
473 handles.push(handle);
474 }
475
476 for handle in handles {
478 handle.join().unwrap();
479 }
480
481 assert_eq!(counter.get(), 10000);
483 }
484
485 #[test]
486 fn test_lock_free_counter_mixed_operations() {
487 let counter = Arc::new(LockFreeCounter::new(0));
488 let mut handles = Vec::new();
489
490 for _ in 0..5 {
492 let counter_clone = Arc::clone(&counter);
493 let handle = thread::spawn(move || {
494 for _ in 0..1000 {
495 counter_clone.increment();
496 }
497 });
498 handles.push(handle);
499 }
500
501 for _ in 0..3 {
503 let counter_clone = Arc::clone(&counter);
504 let handle = thread::spawn(move || {
505 for _ in 0..500 {
506 counter_clone.decrement();
507 }
508 });
509 handles.push(handle);
510 }
511
512 for _ in 0..2 {
514 let counter_clone = Arc::clone(&counter);
515 let handle = thread::spawn(move || {
516 for _ in 0..100 {
517 counter_clone.add(10);
518 }
519 });
520 handles.push(handle);
521 }
522
523 for handle in handles {
524 handle.join().unwrap();
525 }
526
527 assert_eq!(counter.get(), 5500);
529 }
530
531 #[test]
532 fn test_lock_free_counter_compare_and_swap() {
533 let counter = LockFreeCounter::new(42);
534
535 let result = counter.compare_and_swap(42, 100);
537 assert_eq!(result, Ok(42));
538 assert_eq!(counter.get(), 100);
539
540 let result = counter.compare_and_swap(42, 200);
542 assert_eq!(result, Err(100)); assert_eq!(counter.get(), 100); }
545
546 #[test]
547 fn test_lock_free_counter_set_operation() {
548 let counter = LockFreeCounter::new(0);
549
550 counter.set(12345);
551 assert_eq!(counter.get(), 12345);
552
553 counter.set(u64::MAX - 1);
555 assert_eq!(counter.get(), u64::MAX - 1);
556 }
557
558 #[test]
559 fn test_lock_free_counter_default() {
560 let counter = LockFreeCounter::default();
561 assert_eq!(counter.get(), 0);
562
563 counter.increment();
564 assert_eq!(counter.get(), 1);
565 }
566
567 #[test]
568 fn test_lock_contention_performance() {
569 let mutex = Arc::new(OptimizedMutex::new(0));
570 let mut handles = Vec::new();
571
572 let start = std::time::Instant::now();
573
574 for _ in 0..20 {
576 let mutex_clone = Arc::clone(&mutex);
577 let handle = thread::spawn(move || {
578 for _ in 0..100 {
579 let mut guard = mutex_clone.lock();
580 *guard += 1;
581 std::hint::spin_loop();
583 }
584 });
585 handles.push(handle);
586 }
587
588 for handle in handles {
589 handle.join().unwrap();
590 }
591
592 let duration = start.elapsed();
593
594 assert_eq!(*mutex.lock(), 2000);
596 assert!(duration < Duration::from_secs(1)); }
598
599 #[test]
600 fn test_rwlock_reader_writer_fairness() {
601 let rwlock = Arc::new(OptimizedRwLock::new(0));
602 let rwlock_clone = Arc::clone(&rwlock);
603
604 let mut reader_handles = Vec::new();
606 for i in 0..5 {
607 let rwlock_clone = Arc::clone(&rwlock);
608 let handle = thread::spawn(move || {
609 thread::sleep(Duration::from_millis(i * 10)); let _guard = rwlock_clone.read();
611 thread::sleep(Duration::from_millis(50)); });
613 reader_handles.push(handle);
614 }
615
616 thread::sleep(Duration::from_millis(20));
618 let writer_handle = thread::spawn(move || {
619 let mut guard = rwlock_clone.write();
620 *guard = 42;
621 });
622
623 for handle in reader_handles {
625 handle.join().unwrap();
626 }
627 writer_handle.join().unwrap();
628
629 let guard = rwlock.read();
631 assert_eq!(*guard, 42);
632 }
633}