Skip to main content

memscope_rs/analysis/relation_inference/
slice_detector.rs

1//! Slice detection — allocation pointing into the middle of another.
2//!
3//! A Slice relationship exists when allocation A's pointer falls strictly
4//! inside allocation B's address range (not at the start), and A is small
5//! enough to be a slice metadata (fat pointer).
6//!
7//! # False Positive Mitigation
8//!
9//! Slice detection requires `type_kind == FatPtr` to reduce false positives.
10//! This ensures only fat pointer types (&[T], &str, etc.) are considered
11//! as potential slice metadata, filtering out struct fields that happen
12//! to point into other allocations.
13
14use crate::analysis::relation_inference::{InferenceRecord, RangeMap, Relation, RelationEdge};
15use crate::analysis::unsafe_inference::TypeKind;
16
17const MIN_VALID_POINTER: usize = 0x1000;
18
19/// Maximum size for a slice allocation to be considered a Slice relationship.
20/// Fat pointers (&[T], &str) are 16 bytes; we allow up to 256 to cover
21/// small wrapper structs while filtering out large struct false positives.
22const MAX_SLICE_SIZE: usize = 256;
23
24/// Detect Slice relationships for all allocations.
25///
26/// A Slice edge A → B is created when:
27/// 1. A's pointer falls strictly inside B's range: `B.start < A.ptr < B.end`
28/// 2. A's memory range fits within B: `A.ptr + A.size <= B.end`
29/// 3. A is small enough: `A.size <= MAX_SLICE_SIZE`
30/// 4. A is inferred as FatPtr type (reduces false positives)
31///
32/// # Arguments
33///
34/// * `records` - All inference records with allocation metadata.
35/// * `allocations` - Original allocations list for address range lookup.
36/// * `range_map` - Pre-built RangeMap for O(log n) address lookups.
37pub fn detect_slice(
38    records: &[InferenceRecord],
39    allocations: &[crate::snapshot::types::ActiveAllocation],
40    range_map: &RangeMap,
41) -> Vec<RelationEdge> {
42    let mut relations = Vec::new();
43
44    for record in records {
45        // Type constraint: only FatPtr types can be slice metadata
46        // This filters out struct fields that happen to point into other allocations
47        if record.type_kind != TypeKind::FatPtr {
48            continue;
49        }
50
51        if record.size > MAX_SLICE_SIZE {
52            continue;
53        }
54
55        let ptr = record.ptr;
56        if ptr == 0 || ptr < MIN_VALID_POINTER {
57            continue;
58        }
59
60        let Some(target_id) = range_map.find_containing(ptr) else {
61            continue;
62        };
63
64        if target_id == record.id {
65            continue;
66        }
67
68        let target = &allocations[target_id];
69        let target_start = match target.ptr {
70            Some(p) => p,
71            None => continue, // Skip allocations without heap pointer
72        };
73        let target_end = target_start.saturating_add(target.size);
74
75        if ptr == target_start {
76            continue;
77        }
78
79        let slice_end = ptr.saturating_add(record.size);
80        if slice_end > target_end {
81            continue;
82        }
83
84        relations.push(RelationEdge {
85            from: record.id,
86            to: target_id,
87            relation: Relation::Slice,
88        });
89    }
90
91    relations
92}
93
94#[cfg(test)]
95mod tests {
96    use super::*;
97    use crate::analysis::unsafe_inference::TypeKind;
98
99    fn make_alloc(ptr: usize, size: usize) -> crate::snapshot::types::ActiveAllocation {
100        crate::snapshot::types::ActiveAllocation {
101            ptr: Some(ptr),
102            kind: crate::core::types::TrackKind::HeapOwner { ptr, size },
103            size,
104            allocated_at: 0,
105            var_name: None,
106            type_name: None,
107            thread_id: 0,
108            call_stack_hash: None,
109            module_path: None,
110            stack_ptr: None,
111        }
112    }
113
114    fn make_record(id: usize, ptr: usize, size: usize) -> InferenceRecord {
115        InferenceRecord {
116            id,
117            ptr,
118            size,
119            memory: None,
120            type_kind: TypeKind::FatPtr,
121            confidence: 100,
122            call_stack_hash: None,
123            alloc_time: 0,
124            stack_ptr: None,
125        }
126    }
127
128    #[test]
129    fn test_slice_size_boundary_256() {
130        // A 256-byte allocation pointing into a buffer should be detected.
131        let slice_ptr: usize = 0x10050;
132        let allocs = vec![
133            make_alloc(0x5000, 256), // exactly at MAX_SLICE_SIZE boundary
134            make_alloc(0x10000, 0x2000),
135        ];
136        let range_map = RangeMap::new(&allocs);
137        let records = vec![make_record(0, slice_ptr, 256)];
138        let edges = detect_slice(&records, &allocs, &range_map);
139        assert_eq!(edges.len(), 1);
140
141        // A 257-byte allocation should NOT be detected (exceeds threshold).
142        let allocs2 = vec![make_alloc(0x5000, 257), make_alloc(0x10000, 0x2000)];
143        let records2 = vec![make_record(0, slice_ptr, 257)];
144        let edges2 = detect_slice(&records2, &allocs2, &range_map);
145        assert!(edges2.is_empty());
146    }
147
148    #[test]
149    fn test_slice_detection_basic() {
150        // Scenario: a small allocation (fat pointer metadata) at 0x5000
151        // points into the middle of a large buffer at 0x10000.
152        // All allocations are non-overlapping — realistic heap layout.
153        let slice_ptr: usize = 0x10050; // Inside the buffer, not at start
154        let allocs = vec![
155            make_alloc(0x5000, 16),      // slice metadata (alloc[0])
156            make_alloc(0x10000, 0x2000), // large buffer (alloc[1])
157        ];
158        let range_map = RangeMap::new(&allocs);
159
160        // Record[0] represents the slice: ptr=0x10050 (inside alloc[1])
161        let records = vec![make_record(0, slice_ptr, 16)];
162        let edges = detect_slice(&records, &allocs, &range_map);
163
164        assert_eq!(edges.len(), 1);
165        assert_eq!(edges[0].from, 0); // slice →
166        assert_eq!(edges[0].to, 1); // ... buffer
167        assert_eq!(edges[0].relation, Relation::Slice);
168    }
169
170    #[test]
171    fn test_slice_too_large() {
172        let allocs = vec![make_alloc(0x1000, 1024), make_alloc(0x5000, 4096)];
173        let range_map = RangeMap::new(&allocs);
174
175        let records = vec![make_record(0, 0x1000, 1024)];
176        let edges = detect_slice(&records, &allocs, &range_map);
177        assert!(edges.is_empty());
178    }
179
180    #[test]
181    fn test_slice_at_target_start_is_not_slice() {
182        // Slice pointer at the exact start of the target buffer → should be Owner, not Slice.
183        // Non-overlapping: alloc[0] at 0x5000 (metadata), alloc[1] at 0x10000 (buffer).
184        let allocs = vec![make_alloc(0x5000, 16), make_alloc(0x10000, 0x2000)];
185        let range_map = RangeMap::new(&allocs);
186
187        // Slice pointer equals the target's start address.
188        let records = vec![make_record(0, 0x10000, 16)];
189        let edges = detect_slice(&records, &allocs, &range_map);
190        assert!(edges.is_empty()); // Points to start → Owner, not Slice
191    }
192
193    #[test]
194    fn test_slice_overflowing_target() {
195        // Slice extends beyond the target's end → should not match.
196        // slice_ptr=0x100F0, size=256 → end=0x101F0
197        // target alloc at 0x10000, size=0x100 → end=0x10100
198        // 0x101F0 > 0x10100 → overflows
199        let slice_ptr: usize = 0x100F0;
200        let allocs = vec![
201            make_alloc(0x5000, 256),    // slice metadata (alloc[0])
202            make_alloc(0x10000, 0x100), // target buffer (alloc[1])
203        ];
204        let range_map = RangeMap::new(&allocs);
205
206        let records = vec![make_record(0, slice_ptr, 256)];
207        let edges = detect_slice(&records, &allocs, &range_map);
208        assert!(edges.is_empty()); // Slice extends past target end
209    }
210
211    #[test]
212    fn test_slice_no_containing_allocation() {
213        let allocs = vec![make_alloc(0x9000, 100)];
214        let range_map = RangeMap::new(&allocs);
215
216        let records = vec![make_record(0, 0x1000, 16)];
217        let edges = detect_slice(&records, &allocs, &range_map);
218        assert!(edges.is_empty());
219    }
220
221    #[test]
222    fn test_slice_self_reference_skipped() {
223        // Record[0]'s ptr (0x5010) falls inside alloc[0] itself (0x5000..0x5064).
224        // Self-reference should be skipped.
225        let allocs = vec![make_alloc(0x5000, 100)];
226        let range_map = RangeMap::new(&allocs);
227
228        // ptr=0x5010 is inside alloc[0] (0x5000..0x5064)
229        let records = vec![make_record(0, 0x5010, 16)];
230        let edges = detect_slice(&records, &allocs, &range_map);
231        assert!(edges.is_empty()); // target_id == record.id → skipped
232    }
233
234    #[test]
235    fn test_slice_multiple_candidates() {
236        // Realistic heap layout: two large buffers and two small slice metadatas.
237        // alloc[0]: 0x10000..0x12000 (buffer A, 8KB)
238        // alloc[1]: 0x20000..0x22000 (buffer B, 8KB)
239        // alloc[2]: 0x5000 (slice A metadata, 16 bytes)
240        // alloc[3]: 0x6000 (slice B metadata, 16 bytes)
241        // alloc[4]: 0x7000 (unrelated small alloc)
242        //
243        // record[2] at 0x5000 has ptr=0x10050 (inside buffer A) → Slice → alloc[0]
244        // record[3] at 0x6000 has ptr=0x20080 (inside buffer B) → Slice → alloc[1]
245        // record[4] at 0x7000 has ptr=0x9000 (no containing alloc) → no edge
246        let allocs = vec![
247            make_alloc(0x10000, 0x2000), // buffer A (id=0)
248            make_alloc(0x20000, 0x2000), // buffer B (id=1)
249            make_alloc(0x5000, 16),      // slice A metadata (id=2)
250            make_alloc(0x6000, 16),      // slice B metadata (id=3)
251            make_alloc(0x7000, 16),      // unrelated (id=4)
252        ];
253        let range_map = RangeMap::new(&allocs);
254
255        // We need records whose ptr fields point INTO the buffers.
256        // record[2]: metadata at 0x5000, but its ptr field is 0x10050 (inside buffer A)
257        // record[3]: metadata at 0x6000, but its ptr field is 0x20080 (inside buffer B)
258        let records = vec![
259            make_record(2, 0x10050, 16), // slice into buffer A
260            make_record(3, 0x20080, 16), // slice into buffer B
261            make_record(4, 0x9000, 16),  // no containing allocation
262        ];
263
264        let edges = detect_slice(&records, &allocs, &range_map);
265        assert_eq!(edges.len(), 2);
266
267        // Verify the edges point to the right targets.
268        let mut targets: Vec<(usize, usize)> = edges.iter().map(|e| (e.from, e.to)).collect();
269        targets.sort();
270        assert_eq!(targets, vec![(2, 0), (3, 1)]);
271    }
272}