layout_audit/analysis/
false_sharing.rs

1use crate::types::{
2    AtomicMember, CacheLineSpanningWarning, FalseSharingAnalysis, FalseSharingWarning, StructLayout,
3};
4use std::collections::BTreeMap;
5
6const ATOMIC_PATTERNS: &[&str] = &[
7    // Rust std atomics (full paths)
8    "std::sync::atomic::Atomic",
9    "core::sync::atomic::Atomic",
10    // Rust std sync primitives (these use internal atomics)
11    "std::sync::Mutex",
12    "std::sync::RwLock",
13    "std::sync::Condvar",
14    "std::sync::Once",
15    "std::sync::OnceLock",
16    "std::sync::Barrier",
17    // C++ std::atomic (various implementations)
18    "std::atomic<",
19    "std::__1::atomic<",
20    "std::__cxx11::atomic<",
21    // C11 _Atomic (with space or parenthesized)
22    "_Atomic ",
23    "_Atomic(",
24    // parking_lot
25    "parking_lot::Mutex",
26    "parking_lot::RwLock",
27    "parking_lot::Once",
28    "parking_lot::Condvar",
29    "parking_lot::ReentrantMutex",
30    "parking_lot::FairMutex",
31    "parking_lot::RawMutex",
32    "parking_lot::RawRwLock",
33    // crossbeam
34    "crossbeam::atomic::AtomicCell",
35    "crossbeam_utils::atomic::AtomicCell",
36    "crossbeam_epoch::Atomic",
37    // atomic_refcell
38    "atomic_refcell::AtomicRefCell",
39    // tokio sync primitives
40    "tokio::sync::Mutex",
41    "tokio::sync::RwLock",
42    "tokio::sync::Semaphore",
43    "tokio::sync::Notify",
44    "tokio::sync::Barrier",
45    "tokio::sync::OnceCell",
46    // arc_swap
47    "arc_swap::ArcSwap",
48    "arc_swap::ArcSwapOption",
49    "arc_swap::ArcSwapAny",
50];
51
52fn is_atomic_type_by_name(type_name: &str) -> bool {
53    ATOMIC_PATTERNS.iter().any(|pattern| type_name.contains(pattern))
54}
55
56/// Analyzes a struct layout for potential false sharing issues.
57///
58/// # Panics
59/// Panics if `cache_line_size` is 0.
60pub fn analyze_false_sharing(layout: &StructLayout, cache_line_size: u32) -> FalseSharingAnalysis {
61    assert!(cache_line_size > 0, "cache_line_size must be > 0");
62    let cache_line_size_u64 = cache_line_size as u64;
63
64    let atomic_members: Vec<AtomicMember> = layout
65        .members
66        .iter()
67        // Use DWARF-detected is_atomic flag OR fall back to string pattern matching
68        .filter(|m| m.is_atomic || is_atomic_type_by_name(&m.type_name))
69        .filter_map(|m| {
70            let offset = m.offset?;
71            let size = m.size?;
72            if size == 0 {
73                return None;
74            }
75            let cache_line = offset / cache_line_size_u64;
76            // Use checked arithmetic to handle malformed DWARF with extreme offsets
77            let Some(end_offset) = offset.checked_add(size).and_then(|v| v.checked_sub(1)) else {
78                return None; // Skip member with overflowing offset+size
79            };
80            let end_cache_line = end_offset / cache_line_size_u64;
81            let spans_cache_lines = end_cache_line > cache_line;
82
83            Some(AtomicMember {
84                name: m.name.clone(),
85                type_name: m.type_name.clone(),
86                offset,
87                size,
88                cache_line,
89                end_cache_line,
90                spans_cache_lines,
91            })
92        })
93        .collect();
94
95    if atomic_members.is_empty() {
96        return FalseSharingAnalysis::default();
97    }
98
99    // Generate spanning warnings for atomics that cross cache line boundaries
100    let spanning_warnings: Vec<CacheLineSpanningWarning> = atomic_members
101        .iter()
102        .filter(|m| m.spans_cache_lines)
103        .map(|m| CacheLineSpanningWarning {
104            member: m.name.clone(),
105            type_name: m.type_name.clone(),
106            offset: m.offset,
107            size: m.size,
108            start_cache_line: m.cache_line,
109            end_cache_line: m.end_cache_line,
110            lines_spanned: m.end_cache_line - m.cache_line + 1,
111        })
112        .collect();
113
114    if atomic_members.len() < 2 {
115        return FalseSharingAnalysis { atomic_members, warnings: Vec::new(), spanning_warnings };
116    }
117
118    // Group atomics by all cache lines they touch (not just start)
119    // Use BTreeMap for deterministic iteration order (ascending by cache_line)
120    let mut by_cache_line: BTreeMap<u64, Vec<&AtomicMember>> = BTreeMap::new();
121    for member in &atomic_members {
122        for cache_line in member.cache_line..=member.end_cache_line {
123            by_cache_line.entry(cache_line).or_default().push(member);
124        }
125    }
126
127    let mut warnings = Vec::new();
128    let mut seen_pairs: std::collections::HashSet<(&str, &str)> = std::collections::HashSet::new();
129
130    for (cache_line, members) in &by_cache_line {
131        if members.len() < 2 {
132            continue;
133        }
134
135        for i in 0..members.len() {
136            for j in (i + 1)..members.len() {
137                let a = members[i];
138                let b = members[j];
139
140                // Ensure consistent ordering and deduplicate
141                let (first, second) = if a.offset <= b.offset { (a, b) } else { (b, a) };
142
143                let pair_key = (first.name.as_str(), second.name.as_str());
144                if seen_pairs.contains(&pair_key) {
145                    continue;
146                }
147                seen_pairs.insert(pair_key);
148
149                // gap_bytes = second.offset - (first.offset + first.size)
150                // Negative = overlap, Zero = adjacent, Positive = gap
151                let first_end = first.offset.saturating_add(first.size);
152                // Safe conversion: cap values at i64::MAX before cast to avoid sign bit issues
153                let second_offset_i64 = second.offset.min(i64::MAX as u64) as i64;
154                let first_end_i64 = first_end.min(i64::MAX as u64) as i64;
155                let gap_bytes = second_offset_i64.saturating_sub(first_end_i64);
156
157                warnings.push(FalseSharingWarning {
158                    member_a: first.name.clone(),
159                    member_b: second.name.clone(),
160                    cache_line: *cache_line,
161                    gap_bytes,
162                });
163            }
164        }
165    }
166
167    // Sort by (cache_line, member_a, member_b) without cloning strings
168    warnings.sort_by(|a, b| {
169        a.cache_line
170            .cmp(&b.cache_line)
171            .then_with(|| a.member_a.cmp(&b.member_a))
172            .then_with(|| a.member_b.cmp(&b.member_b))
173    });
174
175    FalseSharingAnalysis { atomic_members, warnings, spanning_warnings }
176}
177
178#[cfg(test)]
179mod tests {
180    use super::*;
181    use crate::types::MemberLayout;
182
183    fn make_layout_with_members(members: Vec<MemberLayout>) -> StructLayout {
184        let mut layout = StructLayout::new("TestStruct".to_string(), 128, Some(8));
185        layout.members = members;
186        layout
187    }
188
189    #[test]
190    fn test_two_atomics_same_cache_line() {
191        let layout = make_layout_with_members(vec![
192            MemberLayout::new(
193                "counter".to_string(),
194                "std::sync::atomic::AtomicU64".to_string(),
195                Some(0),
196                Some(8),
197            ),
198            MemberLayout::new(
199                "flag".to_string(),
200                "std::sync::atomic::AtomicBool".to_string(),
201                Some(8),
202                Some(1),
203            ),
204        ]);
205
206        let analysis = analyze_false_sharing(&layout, 64);
207
208        assert_eq!(analysis.atomic_members.len(), 2);
209        assert_eq!(analysis.warnings.len(), 1);
210        assert_eq!(analysis.warnings[0].cache_line, 0);
211        assert_eq!(analysis.warnings[0].member_a, "counter");
212        assert_eq!(analysis.warnings[0].member_b, "flag");
213        assert_eq!(analysis.warnings[0].gap_bytes, 0); // Adjacent
214    }
215
216    #[test]
217    fn test_two_atomics_different_cache_lines() {
218        let layout = make_layout_with_members(vec![
219            MemberLayout::new(
220                "counter1".to_string(),
221                "std::sync::atomic::AtomicU64".to_string(),
222                Some(0),
223                Some(8),
224            ),
225            MemberLayout::new(
226                "counter2".to_string(),
227                "std::sync::atomic::AtomicU64".to_string(),
228                Some(64),
229                Some(8),
230            ),
231        ]);
232
233        let analysis = analyze_false_sharing(&layout, 64);
234
235        assert_eq!(analysis.atomic_members.len(), 2);
236        assert!(analysis.warnings.is_empty());
237    }
238
239    #[test]
240    fn test_three_atomics_same_cache_line() {
241        let layout = make_layout_with_members(vec![
242            MemberLayout::new(
243                "a".to_string(),
244                "std::sync::atomic::AtomicU64".to_string(),
245                Some(0),
246                Some(8),
247            ),
248            MemberLayout::new(
249                "b".to_string(),
250                "std::sync::atomic::AtomicU64".to_string(),
251                Some(8),
252                Some(8),
253            ),
254            MemberLayout::new(
255                "c".to_string(),
256                "std::sync::atomic::AtomicU64".to_string(),
257                Some(16),
258                Some(8),
259            ),
260        ]);
261
262        let analysis = analyze_false_sharing(&layout, 64);
263
264        assert_eq!(analysis.atomic_members.len(), 3);
265        assert_eq!(analysis.warnings.len(), 3); // (a,b), (a,c), (b,c)
266    }
267
268    #[test]
269    fn test_non_atomic_ignored() {
270        let layout = make_layout_with_members(vec![
271            MemberLayout::new(
272                "counter".to_string(),
273                "std::sync::atomic::AtomicU64".to_string(),
274                Some(0),
275                Some(8),
276            ),
277            MemberLayout::new("data".to_string(), "u64".to_string(), Some(8), Some(8)),
278        ]);
279
280        let analysis = analyze_false_sharing(&layout, 64);
281
282        assert_eq!(analysis.atomic_members.len(), 1);
283        assert!(analysis.warnings.is_empty());
284    }
285
286    #[test]
287    fn test_cpp_atomic_detection() {
288        let layout = make_layout_with_members(vec![
289            MemberLayout::new("a".to_string(), "std::atomic<int>".to_string(), Some(0), Some(4)),
290            MemberLayout::new("b".to_string(), "std::atomic<int>".to_string(), Some(4), Some(4)),
291        ]);
292
293        let analysis = analyze_false_sharing(&layout, 64);
294
295        assert_eq!(analysis.atomic_members.len(), 2);
296        assert_eq!(analysis.warnings.len(), 1);
297    }
298
299    #[test]
300    fn test_c11_atomic_detection() {
301        let layout = make_layout_with_members(vec![
302            MemberLayout::new("a".to_string(), "_Atomic int".to_string(), Some(0), Some(4)),
303            MemberLayout::new("b".to_string(), "_Atomic int".to_string(), Some(4), Some(4)),
304        ]);
305
306        let analysis = analyze_false_sharing(&layout, 64);
307
308        assert_eq!(analysis.atomic_members.len(), 2);
309        assert_eq!(analysis.warnings.len(), 1);
310    }
311
312    #[test]
313    fn test_parking_lot_detection() {
314        let layout = make_layout_with_members(vec![
315            MemberLayout::new(
316                "lock1".to_string(),
317                "parking_lot::Mutex<T>".to_string(),
318                Some(0),
319                Some(8),
320            ),
321            MemberLayout::new(
322                "lock2".to_string(),
323                "parking_lot::RwLock<T>".to_string(),
324                Some(8),
325                Some(16),
326            ),
327        ]);
328
329        let analysis = analyze_false_sharing(&layout, 64);
330
331        assert_eq!(analysis.atomic_members.len(), 2);
332        assert_eq!(analysis.warnings.len(), 1);
333    }
334
335    // New tests for P2
336
337    #[test]
338    fn test_std_sync_mutex_detection() {
339        let layout = make_layout_with_members(vec![
340            MemberLayout::new(
341                "lock1".to_string(),
342                "std::sync::Mutex<i32>".to_string(),
343                Some(0),
344                Some(16),
345            ),
346            MemberLayout::new(
347                "lock2".to_string(),
348                "std::sync::RwLock<i32>".to_string(),
349                Some(16),
350                Some(24),
351            ),
352        ]);
353
354        let analysis = analyze_false_sharing(&layout, 64);
355
356        assert_eq!(analysis.atomic_members.len(), 2);
357        assert_eq!(analysis.warnings.len(), 1);
358    }
359
360    #[test]
361    fn test_single_atomic_no_warnings() {
362        let layout = make_layout_with_members(vec![MemberLayout::new(
363            "counter".to_string(),
364            "std::sync::atomic::AtomicU64".to_string(),
365            Some(0),
366            Some(8),
367        )]);
368
369        let analysis = analyze_false_sharing(&layout, 64);
370
371        assert_eq!(analysis.atomic_members.len(), 1);
372        assert!(analysis.warnings.is_empty());
373        assert!(analysis.spanning_warnings.is_empty());
374    }
375
376    #[test]
377    fn test_zero_size_atomic_ignored() {
378        let layout = make_layout_with_members(vec![
379            MemberLayout::new(
380                "counter".to_string(),
381                "std::sync::atomic::AtomicU64".to_string(),
382                Some(0),
383                Some(8),
384            ),
385            MemberLayout::new(
386                "zst".to_string(),
387                "std::sync::atomic::AtomicUnit".to_string(), // hypothetical ZST
388                Some(8),
389                Some(0),
390            ),
391        ]);
392
393        let analysis = analyze_false_sharing(&layout, 64);
394
395        assert_eq!(analysis.atomic_members.len(), 1);
396        assert!(analysis.warnings.is_empty());
397    }
398
399    #[test]
400    fn test_c11_atomic_parenthesized() {
401        let layout = make_layout_with_members(vec![
402            MemberLayout::new("a".to_string(), "_Atomic(int)".to_string(), Some(0), Some(4)),
403            MemberLayout::new("b".to_string(), "_Atomic(int)".to_string(), Some(4), Some(4)),
404        ]);
405
406        let analysis = analyze_false_sharing(&layout, 64);
407
408        assert_eq!(analysis.atomic_members.len(), 2);
409        assert_eq!(analysis.warnings.len(), 1);
410    }
411
412    #[test]
413    fn test_tokio_sync_detection() {
414        let layout = make_layout_with_members(vec![
415            MemberLayout::new(
416                "lock1".to_string(),
417                "tokio::sync::Mutex<i32>".to_string(),
418                Some(0),
419                Some(16),
420            ),
421            MemberLayout::new(
422                "lock2".to_string(),
423                "tokio::sync::RwLock<i32>".to_string(),
424                Some(16),
425                Some(24),
426            ),
427        ]);
428
429        let analysis = analyze_false_sharing(&layout, 64);
430
431        assert_eq!(analysis.atomic_members.len(), 2);
432        assert_eq!(analysis.warnings.len(), 1);
433    }
434
435    // Tests for P3: cache line spanning
436
437    #[test]
438    fn test_atomic_spanning_cache_lines() {
439        // An atomic at offset 60 with size 8 spans bytes 60-67, crossing the 64-byte boundary
440        let layout = make_layout_with_members(vec![MemberLayout::new(
441            "spanning".to_string(),
442            "std::sync::atomic::AtomicU64".to_string(),
443            Some(60),
444            Some(8),
445        )]);
446
447        let analysis = analyze_false_sharing(&layout, 64);
448
449        assert_eq!(analysis.atomic_members.len(), 1);
450        assert!(analysis.atomic_members[0].spans_cache_lines);
451        assert_eq!(analysis.atomic_members[0].cache_line, 0);
452        assert_eq!(analysis.atomic_members[0].end_cache_line, 1);
453
454        assert_eq!(analysis.spanning_warnings.len(), 1);
455        assert_eq!(analysis.spanning_warnings[0].member, "spanning");
456        assert_eq!(analysis.spanning_warnings[0].lines_spanned, 2);
457    }
458
459    #[test]
460    fn test_atomic_not_spanning() {
461        // An atomic at offset 0 with size 8 stays within cache line 0
462        let layout = make_layout_with_members(vec![MemberLayout::new(
463            "aligned".to_string(),
464            "std::sync::atomic::AtomicU64".to_string(),
465            Some(0),
466            Some(8),
467        )]);
468
469        let analysis = analyze_false_sharing(&layout, 64);
470
471        assert_eq!(analysis.atomic_members.len(), 1);
472        assert!(!analysis.atomic_members[0].spans_cache_lines);
473        assert!(analysis.spanning_warnings.is_empty());
474    }
475
476    #[test]
477    fn test_gap_bytes_calculation() {
478        let layout = make_layout_with_members(vec![
479            MemberLayout::new(
480                "a".to_string(),
481                "std::sync::atomic::AtomicU64".to_string(),
482                Some(0),
483                Some(8),
484            ),
485            MemberLayout::new(
486                "b".to_string(),
487                "std::sync::atomic::AtomicU64".to_string(),
488                Some(16), // 8-byte gap between a (ends at 8) and b (starts at 16)
489                Some(8),
490            ),
491        ]);
492
493        let analysis = analyze_false_sharing(&layout, 64);
494
495        assert_eq!(analysis.warnings.len(), 1);
496        assert_eq!(analysis.warnings[0].gap_bytes, 8); // Positive gap
497    }
498
499    #[test]
500    fn test_gap_bytes_adjacent() {
501        let layout = make_layout_with_members(vec![
502            MemberLayout::new(
503                "a".to_string(),
504                "std::sync::atomic::AtomicU64".to_string(),
505                Some(0),
506                Some(8),
507            ),
508            MemberLayout::new(
509                "b".to_string(),
510                "std::sync::atomic::AtomicU64".to_string(),
511                Some(8), // Adjacent: a ends at 8, b starts at 8
512                Some(8),
513            ),
514        ]);
515
516        let analysis = analyze_false_sharing(&layout, 64);
517
518        assert_eq!(analysis.warnings.len(), 1);
519        assert_eq!(analysis.warnings[0].gap_bytes, 0); // Zero = adjacent
520    }
521
522    #[test]
523    fn test_spanning_atomic_shares_with_both_lines() {
524        // Atomic at offset 60-67 spans cache lines 0 and 1
525        // Another atomic at offset 70 is on cache line 1
526        // They should produce a warning for cache line 1
527        let layout = make_layout_with_members(vec![
528            MemberLayout::new(
529                "spanning".to_string(),
530                "std::sync::atomic::AtomicU64".to_string(),
531                Some(60),
532                Some(8),
533            ),
534            MemberLayout::new(
535                "other".to_string(),
536                "std::sync::atomic::AtomicU64".to_string(),
537                Some(70),
538                Some(8),
539            ),
540        ]);
541
542        let analysis = analyze_false_sharing(&layout, 64);
543
544        assert_eq!(analysis.atomic_members.len(), 2);
545        // One warning for the pair on cache line 1
546        assert_eq!(analysis.warnings.len(), 1);
547        assert_eq!(analysis.warnings[0].cache_line, 1);
548    }
549
550    // Coverage tests for edge cases and newly fixed paths
551
552    #[test]
553    fn test_no_atomics_returns_default() {
554        // Layout with only non-atomic members should return empty analysis
555        let layout = make_layout_with_members(vec![
556            MemberLayout::new("x".to_string(), "u64".to_string(), Some(0), Some(8)),
557            MemberLayout::new("y".to_string(), "u64".to_string(), Some(8), Some(8)),
558        ]);
559
560        let analysis = analyze_false_sharing(&layout, 64);
561
562        assert!(analysis.atomic_members.is_empty());
563        assert!(analysis.warnings.is_empty());
564        assert!(analysis.spanning_warnings.is_empty());
565    }
566
567    #[test]
568    fn test_overflow_offset_skipped() {
569        // Atomic with offset near u64::MAX that would overflow when adding size
570        // Should be skipped gracefully, not panic
571        let layout = make_layout_with_members(vec![
572            MemberLayout::new(
573                "normal".to_string(),
574                "std::sync::atomic::AtomicU64".to_string(),
575                Some(0),
576                Some(8),
577            ),
578            MemberLayout::new(
579                "overflow".to_string(),
580                "std::sync::atomic::AtomicU64".to_string(),
581                Some(u64::MAX - 3), // offset + size (8) would overflow
582                Some(8),
583            ),
584        ]);
585
586        let analysis = analyze_false_sharing(&layout, 64);
587
588        // Only the normal atomic should be included; overflow one is skipped
589        assert_eq!(analysis.atomic_members.len(), 1);
590        assert_eq!(analysis.atomic_members[0].name, "normal");
591        assert!(analysis.warnings.is_empty());
592    }
593
594    #[test]
595    fn test_duplicate_pair_dedupe_deterministic() {
596        // Two atomics that both span cache lines 0 and 1
597        // The pair should appear only ONCE, and always with the lowest cache_line (0)
598        // This tests both the dedupe logic AND the BTreeMap determinism fix
599        let layout = make_layout_with_members(vec![
600            MemberLayout::new(
601                "spanning_a".to_string(),
602                "std::sync::atomic::AtomicU64".to_string(),
603                Some(60), // spans cache lines 0 and 1
604                Some(8),
605            ),
606            MemberLayout::new(
607                "spanning_b".to_string(),
608                "std::sync::atomic::AtomicU64".to_string(),
609                Some(62), // also spans cache lines 0 and 1
610                Some(8),
611            ),
612        ]);
613
614        let analysis = analyze_false_sharing(&layout, 64);
615
616        assert_eq!(analysis.atomic_members.len(), 2);
617        // Only ONE warning for this pair (dedupe worked)
618        assert_eq!(analysis.warnings.len(), 1);
619        // Should always be cache_line 0 (BTreeMap iteration is deterministic, ascending)
620        assert_eq!(analysis.warnings[0].cache_line, 0);
621        assert_eq!(analysis.warnings[0].member_a, "spanning_a");
622        assert_eq!(analysis.warnings[0].member_b, "spanning_b");
623    }
624
625    #[test]
626    fn test_empty_layout_returns_default() {
627        // Layout with no members at all
628        let layout = make_layout_with_members(vec![]);
629
630        let analysis = analyze_false_sharing(&layout, 64);
631
632        assert!(analysis.atomic_members.is_empty());
633        assert!(analysis.warnings.is_empty());
634        assert!(analysis.spanning_warnings.is_empty());
635    }
636}