Skip to main content

padlock_core/analysis/
false_sharing.rs

1// padlock-core/src/analysis/false_sharing.rs
2
3use crate::ir::{AccessPattern, SharingConflict, StructLayout};
4
5/// Normalise a guard name for comparison.
6///
7/// Strips language-specific prefixes that refer to the same object:
8/// - `self.mu` → `mu`  (Rust / Python)
9/// - `this->mu` → `mu` (C++ member)
10/// - `this.mu` → `mu`  (Go/Java style)
11/// - Leading `&` / `*` dereference operators
12///
13/// Only the outermost layer of each prefix is stripped so that deeply qualified
14/// names (e.g. `self.inner.mu`) still compare differently from `self.outer.mu`.
15pub fn normalize_guard(guard: &str) -> &str {
16    let s = guard
17        .strip_prefix("self.")
18        .or_else(|| guard.strip_prefix("this->"))
19        .or_else(|| guard.strip_prefix("this."))
20        .unwrap_or(guard);
21    s.trim_start_matches(['&', '*'])
22}
23
24/// Return all groups of fields that share a cache line.
25/// Any cache line with two or more fields is a potential false-sharing hazard.
26pub fn find_sharing_conflicts(layout: &StructLayout) -> Vec<SharingConflict> {
27    let line = layout.arch.cache_line_size;
28    if line == 0 || layout.fields.is_empty() {
29        return Vec::new();
30    }
31
32    let mut buckets: std::collections::BTreeMap<usize, Vec<String>> =
33        std::collections::BTreeMap::new();
34    for field in &layout.fields {
35        if matches!(field.access, AccessPattern::Padding) {
36            continue;
37        }
38        let cl = field.offset / line;
39        buckets.entry(cl).or_default().push(field.name.clone());
40    }
41
42    buckets
43        .into_iter()
44        .filter(|(_, fields)| fields.len() > 1)
45        .map(|(cache_line, fields)| SharingConflict { fields, cache_line })
46        .collect()
47}
48
49/// Return `true` if any cache line contains two or more `Concurrent` fields
50/// with *different* lock guards — a confirmed false-sharing hazard.
51///
52/// ## Heuristic tightening
53///
54/// The type-name heuristic assigns each field's own name as its guard, so two
55/// `AtomicU64` fields always receive different guards and would naively trigger
56/// this check. However, two purely-atomic fields sharing a cache line is a
57/// performance concern (cache-line bouncing) rather than *false sharing* in the
58/// classical lock-based sense. To avoid noisy findings from the heuristic, we
59/// only flag a conflict when **at least one** of the two fields has
60/// `is_atomic: false` (i.e. it is a mutex/lock type, or was explicitly
61/// annotated as lock-protected data).
62///
63/// Explicit guard annotations (`GUARDED_BY`, `#[lock_protected_by]`, etc.) always
64/// set `is_atomic: false`, so annotated conflicts are always reported.
65pub fn has_false_sharing(layout: &StructLayout) -> bool {
66    let line = layout.arch.cache_line_size;
67    if line == 0 {
68        return false;
69    }
70
71    let concurrent: Vec<(usize, Option<&str>, bool)> = layout
72        .fields
73        .iter()
74        .filter_map(|f| {
75            if let AccessPattern::Concurrent {
76                guard, is_atomic, ..
77            } = &f.access
78            {
79                Some((f.offset / line, guard.as_deref(), *is_atomic))
80            } else {
81                None
82            }
83        })
84        .collect();
85
86    for i in 0..concurrent.len() {
87        for j in (i + 1)..concurrent.len() {
88            let (cl_a, guard_a, atomic_a) = concurrent[i];
89            let (cl_b, guard_b, atomic_b) = concurrent[j];
90            if cl_a == cl_b && guard_a.map(normalize_guard) != guard_b.map(normalize_guard) {
91                // Skip if both fields are purely atomic with no lock involvement —
92                // that pattern is handled by the locality analysis, not false sharing.
93                if atomic_a && atomic_b {
94                    continue;
95                }
96                return true;
97            }
98        }
99    }
100    false
101}
102
103// ── tests ─────────────────────────────────────────────────────────────────────
104
105#[cfg(test)]
106mod tests {
107    use super::*;
108    use crate::arch::X86_64_SYSV;
109    use crate::ir::{Field, StructLayout, TypeInfo};
110
111    fn make_layout(fields: Vec<Field>) -> StructLayout {
112        StructLayout {
113            name: "T".into(),
114            total_size: 128,
115            align: 8,
116            fields,
117            source_file: None,
118            source_line: None,
119            arch: &X86_64_SYSV,
120            is_packed: false,
121            is_union: false,
122            is_repr_rust: false,
123            suppressed_findings: Vec::new(),
124            uncertain_fields: Vec::new(),
125        }
126    }
127
128    fn concurrent(name: &str, offset: usize, guard: &str) -> Field {
129        Field {
130            name: name.into(),
131            ty: TypeInfo::Primitive {
132                name: "u64".into(),
133                size: 8,
134                align: 8,
135            },
136            offset,
137            size: 8,
138            align: 8,
139            source_file: None,
140            source_line: None,
141            access: AccessPattern::Concurrent {
142                guard: Some(guard.into()),
143                is_atomic: false,
144                is_annotated: false,
145            },
146        }
147    }
148
149    fn atomic(name: &str, offset: usize) -> Field {
150        Field {
151            name: name.into(),
152            ty: TypeInfo::Primitive {
153                name: "AtomicU64".into(),
154                size: 8,
155                align: 8,
156            },
157            offset,
158            size: 8,
159            align: 8,
160            source_file: None,
161            source_line: None,
162            access: AccessPattern::Concurrent {
163                guard: Some(name.into()),
164                is_atomic: true,
165                is_annotated: false,
166            },
167        }
168    }
169
170    fn plain(name: &str, offset: usize) -> Field {
171        Field {
172            name: name.into(),
173            ty: TypeInfo::Primitive {
174                name: "u64".into(),
175                size: 8,
176                align: 8,
177            },
178            offset,
179            size: 8,
180            align: 8,
181            source_file: None,
182            source_line: None,
183            access: AccessPattern::Unknown,
184        }
185    }
186
187    #[test]
188    fn two_fields_on_same_line_is_conflict() {
189        let layout = make_layout(vec![plain("a", 0), plain("b", 8)]);
190        let conflicts = find_sharing_conflicts(&layout);
191        assert_eq!(conflicts.len(), 1);
192        assert_eq!(conflicts[0].cache_line, 0);
193    }
194
195    #[test]
196    fn fields_on_different_lines_no_conflict() {
197        let layout = make_layout(vec![plain("a", 0), plain("b", 64)]);
198        assert!(find_sharing_conflicts(&layout).is_empty());
199    }
200
201    #[test]
202    fn has_false_sharing_when_different_guards_same_line() {
203        let layout = make_layout(vec![
204            concurrent("readers", 0, "lock_a"),
205            concurrent("writers", 8, "lock_b"),
206        ]);
207        assert!(has_false_sharing(&layout));
208    }
209
210    #[test]
211    fn no_false_sharing_when_same_guard() {
212        let layout = make_layout(vec![concurrent("a", 0, "mu"), concurrent("b", 8, "mu")]);
213        assert!(!has_false_sharing(&layout));
214    }
215
216    #[test]
217    fn no_false_sharing_when_all_unknown() {
218        let layout = make_layout(vec![plain("a", 0), plain("b", 8)]);
219        assert!(!has_false_sharing(&layout));
220    }
221
222    #[test]
223    fn no_false_sharing_when_different_lines() {
224        let layout = make_layout(vec![
225            concurrent("a", 0, "lock_a"),
226            concurrent("b", 64, "lock_b"),
227        ]);
228        assert!(!has_false_sharing(&layout));
229    }
230
231    // Heuristic tightening: two pure atomics sharing a cache line is cache-line
232    // bouncing (a locality concern), not classical false sharing.
233    #[test]
234    fn no_false_sharing_for_two_pure_atomics_same_line() {
235        let layout = make_layout(vec![atomic("counter_a", 0), atomic("counter_b", 8)]);
236        assert!(!has_false_sharing(&layout));
237    }
238
239    // A mutex-protected field paired with an atomic on the same line IS false sharing.
240    #[test]
241    fn false_sharing_for_atomic_and_mutex_same_line() {
242        let layout = make_layout(vec![
243            atomic("hot_counter", 0),
244            concurrent("protected_data", 8, "mu"),
245        ]);
246        assert!(has_false_sharing(&layout));
247    }
248
249    // Three fields: two atomics (same line) plus one mutex-protected — the mutex
250    // conflicts with both atomics, so false sharing should be detected.
251    #[test]
252    fn false_sharing_detected_with_mixed_atomics_and_mutex() {
253        let layout = make_layout(vec![
254            atomic("reads", 0),
255            atomic("writes", 8),
256            concurrent("state", 16, "mu"),
257        ]);
258        assert!(has_false_sharing(&layout));
259    }
260
261    // Two atomics on the same line, all with the same guard — no false sharing.
262    #[test]
263    fn no_false_sharing_for_pure_atomics_on_different_lines() {
264        let layout = make_layout(vec![atomic("counter_a", 0), atomic("counter_b", 64)]);
265        assert!(!has_false_sharing(&layout));
266    }
267
268    // ── normalize_guard ───────────────────────────────────────────────────────
269
270    #[test]
271    fn normalize_strips_self_prefix() {
272        assert_eq!(normalize_guard("self.mu"), "mu");
273    }
274
275    #[test]
276    fn normalize_strips_this_arrow_prefix() {
277        assert_eq!(normalize_guard("this->mu"), "mu");
278    }
279
280    #[test]
281    fn normalize_strips_this_dot_prefix() {
282        assert_eq!(normalize_guard("this.mu"), "mu");
283    }
284
285    #[test]
286    fn normalize_strips_leading_ampersand() {
287        assert_eq!(normalize_guard("&mu"), "mu");
288    }
289
290    #[test]
291    fn normalize_strips_leading_star() {
292        assert_eq!(normalize_guard("*mu"), "mu");
293    }
294
295    #[test]
296    fn normalize_no_change_for_plain_name() {
297        assert_eq!(normalize_guard("mu"), "mu");
298    }
299
300    // Guards with different receiver prefixes but the same base name should NOT
301    // trigger false sharing.
302    #[test]
303    fn no_false_sharing_when_guards_differ_only_by_self_prefix() {
304        // "self.mu" and "mu" normalise to the same base name.
305        let layout = make_layout(vec![
306            concurrent("readers", 0, "self.mu"),
307            concurrent("writers", 8, "mu"),
308        ]);
309        assert!(!has_false_sharing(&layout));
310    }
311
312    #[test]
313    fn no_false_sharing_when_guards_differ_only_by_this_arrow_prefix() {
314        let layout = make_layout(vec![
315            concurrent("readers", 0, "this->lock"),
316            concurrent("writers", 8, "lock"),
317        ]);
318        assert!(!has_false_sharing(&layout));
319    }
320}