Skip to main content

padlock_core/analysis/
false_sharing.rs

1// padlock-core/src/analysis/false_sharing.rs
2
3use crate::ir::{AccessPattern, SharingConflict, StructLayout};
4
5/// Return all groups of fields that share a cache line.
6/// Any cache line with two or more fields is a potential false-sharing hazard.
7pub fn find_sharing_conflicts(layout: &StructLayout) -> Vec<SharingConflict> {
8    let line = layout.arch.cache_line_size;
9    if line == 0 || layout.fields.is_empty() {
10        return Vec::new();
11    }
12
13    let mut buckets: std::collections::BTreeMap<usize, Vec<String>> =
14        std::collections::BTreeMap::new();
15    for field in &layout.fields {
16        if matches!(field.access, AccessPattern::Padding) {
17            continue;
18        }
19        let cl = field.offset / line;
20        buckets.entry(cl).or_default().push(field.name.clone());
21    }
22
23    buckets
24        .into_iter()
25        .filter(|(_, fields)| fields.len() > 1)
26        .map(|(cache_line, fields)| SharingConflict { fields, cache_line })
27        .collect()
28}
29
30/// Return `true` if any cache line contains two or more `Concurrent` fields
31/// with *different* lock guards — a confirmed false-sharing hazard.
32///
33/// ## Heuristic tightening
34///
35/// The type-name heuristic assigns each field's own name as its guard, so two
36/// `AtomicU64` fields always receive different guards and would naively trigger
37/// this check. However, two purely-atomic fields sharing a cache line is a
38/// performance concern (cache-line bouncing) rather than *false sharing* in the
39/// classical lock-based sense. To avoid noisy findings from the heuristic, we
40/// only flag a conflict when **at least one** of the two fields has
41/// `is_atomic: false` (i.e. it is a mutex/lock type, or was explicitly
42/// annotated as lock-protected data).
43///
44/// Explicit guard annotations (`GUARDED_BY`, `#[lock_protected_by]`, etc.) always
45/// set `is_atomic: false`, so annotated conflicts are always reported.
46pub fn has_false_sharing(layout: &StructLayout) -> bool {
47    let line = layout.arch.cache_line_size;
48    if line == 0 {
49        return false;
50    }
51
52    let concurrent: Vec<(usize, Option<&str>, bool)> = layout
53        .fields
54        .iter()
55        .filter_map(|f| {
56            if let AccessPattern::Concurrent { guard, is_atomic } = &f.access {
57                Some((f.offset / line, guard.as_deref(), *is_atomic))
58            } else {
59                None
60            }
61        })
62        .collect();
63
64    for i in 0..concurrent.len() {
65        for j in (i + 1)..concurrent.len() {
66            let (cl_a, guard_a, atomic_a) = concurrent[i];
67            let (cl_b, guard_b, atomic_b) = concurrent[j];
68            if cl_a == cl_b && guard_a != guard_b {
69                // Skip if both fields are purely atomic with no lock involvement —
70                // that pattern is handled by the locality analysis, not false sharing.
71                if atomic_a && atomic_b {
72                    continue;
73                }
74                return true;
75            }
76        }
77    }
78    false
79}
80
81// ── tests ─────────────────────────────────────────────────────────────────────
82
83#[cfg(test)]
84mod tests {
85    use super::*;
86    use crate::arch::X86_64_SYSV;
87    use crate::ir::{Field, StructLayout, TypeInfo};
88
89    fn make_layout(fields: Vec<Field>) -> StructLayout {
90        StructLayout {
91            name: "T".into(),
92            total_size: 128,
93            align: 8,
94            fields,
95            source_file: None,
96            source_line: None,
97            arch: &X86_64_SYSV,
98            is_packed: false,
99            is_union: false,
100            is_repr_rust: false,
101            suppressed_findings: Vec::new(),
102        }
103    }
104
105    fn concurrent(name: &str, offset: usize, guard: &str) -> Field {
106        Field {
107            name: name.into(),
108            ty: TypeInfo::Primitive {
109                name: "u64".into(),
110                size: 8,
111                align: 8,
112            },
113            offset,
114            size: 8,
115            align: 8,
116            source_file: None,
117            source_line: None,
118            access: AccessPattern::Concurrent {
119                guard: Some(guard.into()),
120                is_atomic: false,
121            },
122        }
123    }
124
125    fn atomic(name: &str, offset: usize) -> Field {
126        Field {
127            name: name.into(),
128            ty: TypeInfo::Primitive {
129                name: "AtomicU64".into(),
130                size: 8,
131                align: 8,
132            },
133            offset,
134            size: 8,
135            align: 8,
136            source_file: None,
137            source_line: None,
138            access: AccessPattern::Concurrent {
139                guard: Some(name.into()),
140                is_atomic: true,
141            },
142        }
143    }
144
145    fn plain(name: &str, offset: usize) -> Field {
146        Field {
147            name: name.into(),
148            ty: TypeInfo::Primitive {
149                name: "u64".into(),
150                size: 8,
151                align: 8,
152            },
153            offset,
154            size: 8,
155            align: 8,
156            source_file: None,
157            source_line: None,
158            access: AccessPattern::Unknown,
159        }
160    }
161
162    #[test]
163    fn two_fields_on_same_line_is_conflict() {
164        let layout = make_layout(vec![plain("a", 0), plain("b", 8)]);
165        let conflicts = find_sharing_conflicts(&layout);
166        assert_eq!(conflicts.len(), 1);
167        assert_eq!(conflicts[0].cache_line, 0);
168    }
169
170    #[test]
171    fn fields_on_different_lines_no_conflict() {
172        let layout = make_layout(vec![plain("a", 0), plain("b", 64)]);
173        assert!(find_sharing_conflicts(&layout).is_empty());
174    }
175
176    #[test]
177    fn has_false_sharing_when_different_guards_same_line() {
178        let layout = make_layout(vec![
179            concurrent("readers", 0, "lock_a"),
180            concurrent("writers", 8, "lock_b"),
181        ]);
182        assert!(has_false_sharing(&layout));
183    }
184
185    #[test]
186    fn no_false_sharing_when_same_guard() {
187        let layout = make_layout(vec![concurrent("a", 0, "mu"), concurrent("b", 8, "mu")]);
188        assert!(!has_false_sharing(&layout));
189    }
190
191    #[test]
192    fn no_false_sharing_when_all_unknown() {
193        let layout = make_layout(vec![plain("a", 0), plain("b", 8)]);
194        assert!(!has_false_sharing(&layout));
195    }
196
197    #[test]
198    fn no_false_sharing_when_different_lines() {
199        let layout = make_layout(vec![
200            concurrent("a", 0, "lock_a"),
201            concurrent("b", 64, "lock_b"),
202        ]);
203        assert!(!has_false_sharing(&layout));
204    }
205
206    // Heuristic tightening: two pure atomics sharing a cache line is cache-line
207    // bouncing (a locality concern), not classical false sharing.
208    #[test]
209    fn no_false_sharing_for_two_pure_atomics_same_line() {
210        let layout = make_layout(vec![atomic("counter_a", 0), atomic("counter_b", 8)]);
211        assert!(!has_false_sharing(&layout));
212    }
213
214    // A mutex-protected field paired with an atomic on the same line IS false sharing.
215    #[test]
216    fn false_sharing_for_atomic_and_mutex_same_line() {
217        let layout = make_layout(vec![
218            atomic("hot_counter", 0),
219            concurrent("protected_data", 8, "mu"),
220        ]);
221        assert!(has_false_sharing(&layout));
222    }
223
224    // Three fields: two atomics (same line) plus one mutex-protected — the mutex
225    // conflicts with both atomics, so false sharing should be detected.
226    #[test]
227    fn false_sharing_detected_with_mixed_atomics_and_mutex() {
228        let layout = make_layout(vec![
229            atomic("reads", 0),
230            atomic("writes", 8),
231            concurrent("state", 16, "mu"),
232        ]);
233        assert!(has_false_sharing(&layout));
234    }
235
236    // Two atomics on the same line, all with the same guard — no false sharing.
237    #[test]
238    fn no_false_sharing_for_pure_atomics_on_different_lines() {
239        let layout = make_layout(vec![atomic("counter_a", 0), atomic("counter_b", 64)]);
240        assert!(!has_false_sharing(&layout));
241    }
242}