Skip to main content

padlock_core/analysis/
false_sharing.rs

1// padlock-core/src/analysis/false_sharing.rs
2
3use crate::ir::{AccessPattern, SharingConflict, StructLayout};
4
5/// Normalise a guard name for comparison.
6///
7/// Strips language-specific prefixes that refer to the same object:
8/// - `self.mu` → `mu`  (Rust / Python)
9/// - `this->mu` → `mu` (C++ member)
10/// - `this.mu` → `mu`  (Go/Java style)
11/// - Leading `&` / `*` dereference operators
12///
13/// Only the outermost layer of each prefix is stripped so that deeply qualified
14/// names (e.g. `self.inner.mu`) still compare differently from `self.outer.mu`.
15pub fn normalize_guard(guard: &str) -> &str {
16    let s = guard
17        .strip_prefix("self.")
18        .or_else(|| guard.strip_prefix("this->"))
19        .or_else(|| guard.strip_prefix("this."))
20        .unwrap_or(guard);
21    s.trim_start_matches(['&', '*'])
22}
23
24/// Return all groups of fields that share a cache line.
25/// Any cache line with two or more fields is a potential false-sharing hazard.
26pub fn find_sharing_conflicts(layout: &StructLayout) -> Vec<SharingConflict> {
27    let line = layout.arch.cache_line_size;
28    if line == 0 || layout.fields.is_empty() {
29        return Vec::new();
30    }
31
32    let mut buckets: std::collections::BTreeMap<usize, Vec<String>> =
33        std::collections::BTreeMap::new();
34    for field in &layout.fields {
35        if matches!(field.access, AccessPattern::Padding) {
36            continue;
37        }
38        let cl = field.offset / line;
39        buckets.entry(cl).or_default().push(field.name.clone());
40    }
41
42    buckets
43        .into_iter()
44        .filter(|(_, fields)| fields.len() > 1)
45        .map(|(cache_line, fields)| SharingConflict { fields, cache_line })
46        .collect()
47}
48
49/// Return `true` if any cache line contains two or more `Concurrent` fields
50/// with *different* lock guards — a confirmed false-sharing hazard.
51///
52/// ## Heuristic tightening
53///
54/// The type-name heuristic assigns each field's own name as its guard, so two
55/// `AtomicU64` fields always receive different guards and would naively trigger
56/// this check. However, two purely-atomic fields sharing a cache line is a
57/// performance concern (cache-line bouncing) rather than *false sharing* in the
58/// classical lock-based sense. To avoid noisy findings from the heuristic, we
59/// only flag a conflict when **at least one** of the two fields has
60/// `is_atomic: false` (i.e. it is a mutex/lock type, or was explicitly
61/// annotated as lock-protected data).
62///
63/// Explicit guard annotations (`GUARDED_BY`, `#[lock_protected_by]`, etc.) always
64/// set `is_atomic: false`, so annotated conflicts are always reported.
65pub fn has_false_sharing(layout: &StructLayout) -> bool {
66    let line = layout.arch.cache_line_size;
67    if line == 0 {
68        return false;
69    }
70
71    let concurrent: Vec<(usize, Option<&str>, bool)> = layout
72        .fields
73        .iter()
74        .filter_map(|f| {
75            if let AccessPattern::Concurrent {
76                guard, is_atomic, ..
77            } = &f.access
78            {
79                Some((f.offset / line, guard.as_deref(), *is_atomic))
80            } else {
81                None
82            }
83        })
84        .collect();
85
86    for i in 0..concurrent.len() {
87        for j in (i + 1)..concurrent.len() {
88            let (cl_a, guard_a, atomic_a) = concurrent[i];
89            let (cl_b, guard_b, atomic_b) = concurrent[j];
90            if cl_a == cl_b && guard_a.map(normalize_guard) != guard_b.map(normalize_guard) {
91                // Skip if both fields are purely atomic with no lock involvement —
92                // that pattern is handled by the locality analysis, not false sharing.
93                if atomic_a && atomic_b {
94                    continue;
95                }
96                return true;
97            }
98        }
99    }
100    false
101}
102
103// ── tests ─────────────────────────────────────────────────────────────────────
104
105#[cfg(test)]
106mod tests {
107    use super::*;
108    use crate::arch::X86_64_SYSV;
109    use crate::ir::{Field, StructLayout, TypeInfo};
110
111    fn make_layout(fields: Vec<Field>) -> StructLayout {
112        StructLayout {
113            name: "T".into(),
114            total_size: 128,
115            align: 8,
116            fields,
117            source_file: None,
118            source_line: None,
119            arch: &X86_64_SYSV,
120            is_packed: false,
121            is_union: false,
122            is_repr_rust: false,
123            suppressed_findings: Vec::new(),
124        }
125    }
126
127    fn concurrent(name: &str, offset: usize, guard: &str) -> Field {
128        Field {
129            name: name.into(),
130            ty: TypeInfo::Primitive {
131                name: "u64".into(),
132                size: 8,
133                align: 8,
134            },
135            offset,
136            size: 8,
137            align: 8,
138            source_file: None,
139            source_line: None,
140            access: AccessPattern::Concurrent {
141                guard: Some(guard.into()),
142                is_atomic: false,
143                is_annotated: false,
144            },
145        }
146    }
147
148    fn atomic(name: &str, offset: usize) -> Field {
149        Field {
150            name: name.into(),
151            ty: TypeInfo::Primitive {
152                name: "AtomicU64".into(),
153                size: 8,
154                align: 8,
155            },
156            offset,
157            size: 8,
158            align: 8,
159            source_file: None,
160            source_line: None,
161            access: AccessPattern::Concurrent {
162                guard: Some(name.into()),
163                is_atomic: true,
164                is_annotated: false,
165            },
166        }
167    }
168
169    fn plain(name: &str, offset: usize) -> Field {
170        Field {
171            name: name.into(),
172            ty: TypeInfo::Primitive {
173                name: "u64".into(),
174                size: 8,
175                align: 8,
176            },
177            offset,
178            size: 8,
179            align: 8,
180            source_file: None,
181            source_line: None,
182            access: AccessPattern::Unknown,
183        }
184    }
185
186    #[test]
187    fn two_fields_on_same_line_is_conflict() {
188        let layout = make_layout(vec![plain("a", 0), plain("b", 8)]);
189        let conflicts = find_sharing_conflicts(&layout);
190        assert_eq!(conflicts.len(), 1);
191        assert_eq!(conflicts[0].cache_line, 0);
192    }
193
194    #[test]
195    fn fields_on_different_lines_no_conflict() {
196        let layout = make_layout(vec![plain("a", 0), plain("b", 64)]);
197        assert!(find_sharing_conflicts(&layout).is_empty());
198    }
199
200    #[test]
201    fn has_false_sharing_when_different_guards_same_line() {
202        let layout = make_layout(vec![
203            concurrent("readers", 0, "lock_a"),
204            concurrent("writers", 8, "lock_b"),
205        ]);
206        assert!(has_false_sharing(&layout));
207    }
208
209    #[test]
210    fn no_false_sharing_when_same_guard() {
211        let layout = make_layout(vec![concurrent("a", 0, "mu"), concurrent("b", 8, "mu")]);
212        assert!(!has_false_sharing(&layout));
213    }
214
215    #[test]
216    fn no_false_sharing_when_all_unknown() {
217        let layout = make_layout(vec![plain("a", 0), plain("b", 8)]);
218        assert!(!has_false_sharing(&layout));
219    }
220
221    #[test]
222    fn no_false_sharing_when_different_lines() {
223        let layout = make_layout(vec![
224            concurrent("a", 0, "lock_a"),
225            concurrent("b", 64, "lock_b"),
226        ]);
227        assert!(!has_false_sharing(&layout));
228    }
229
230    // Heuristic tightening: two pure atomics sharing a cache line is cache-line
231    // bouncing (a locality concern), not classical false sharing.
232    #[test]
233    fn no_false_sharing_for_two_pure_atomics_same_line() {
234        let layout = make_layout(vec![atomic("counter_a", 0), atomic("counter_b", 8)]);
235        assert!(!has_false_sharing(&layout));
236    }
237
238    // A mutex-protected field paired with an atomic on the same line IS false sharing.
239    #[test]
240    fn false_sharing_for_atomic_and_mutex_same_line() {
241        let layout = make_layout(vec![
242            atomic("hot_counter", 0),
243            concurrent("protected_data", 8, "mu"),
244        ]);
245        assert!(has_false_sharing(&layout));
246    }
247
248    // Three fields: two atomics (same line) plus one mutex-protected — the mutex
249    // conflicts with both atomics, so false sharing should be detected.
250    #[test]
251    fn false_sharing_detected_with_mixed_atomics_and_mutex() {
252        let layout = make_layout(vec![
253            atomic("reads", 0),
254            atomic("writes", 8),
255            concurrent("state", 16, "mu"),
256        ]);
257        assert!(has_false_sharing(&layout));
258    }
259
260    // Two atomics on the same line, all with the same guard — no false sharing.
261    #[test]
262    fn no_false_sharing_for_pure_atomics_on_different_lines() {
263        let layout = make_layout(vec![atomic("counter_a", 0), atomic("counter_b", 64)]);
264        assert!(!has_false_sharing(&layout));
265    }
266
267    // ── normalize_guard ───────────────────────────────────────────────────────
268
269    #[test]
270    fn normalize_strips_self_prefix() {
271        assert_eq!(normalize_guard("self.mu"), "mu");
272    }
273
274    #[test]
275    fn normalize_strips_this_arrow_prefix() {
276        assert_eq!(normalize_guard("this->mu"), "mu");
277    }
278
279    #[test]
280    fn normalize_strips_this_dot_prefix() {
281        assert_eq!(normalize_guard("this.mu"), "mu");
282    }
283
284    #[test]
285    fn normalize_strips_leading_ampersand() {
286        assert_eq!(normalize_guard("&mu"), "mu");
287    }
288
289    #[test]
290    fn normalize_strips_leading_star() {
291        assert_eq!(normalize_guard("*mu"), "mu");
292    }
293
294    #[test]
295    fn normalize_no_change_for_plain_name() {
296        assert_eq!(normalize_guard("mu"), "mu");
297    }
298
299    // Guards with different receiver prefixes but the same base name should NOT
300    // trigger false sharing.
301    #[test]
302    fn no_false_sharing_when_guards_differ_only_by_self_prefix() {
303        // "self.mu" and "mu" normalise to the same base name.
304        let layout = make_layout(vec![
305            concurrent("readers", 0, "self.mu"),
306            concurrent("writers", 8, "mu"),
307        ]);
308        assert!(!has_false_sharing(&layout));
309    }
310
311    #[test]
312    fn no_false_sharing_when_guards_differ_only_by_this_arrow_prefix() {
313        let layout = make_layout(vec![
314            concurrent("readers", 0, "this->lock"),
315            concurrent("writers", 8, "lock"),
316        ]);
317        assert!(!has_false_sharing(&layout));
318    }
319}