Skip to main content

ringkernel_procint/models/
partial_order.rs

1//! Partial order structures for concurrent activity analysis.
2//!
3//! Represents precedence relationships between activities using bit-packed matrices.
4
5use super::{ActivityId, HybridTimestamp};
6use rkyv::{Archive, Deserialize, Serialize};
7
8/// GPU-compatible partial order trace (256 bytes).
9///
10/// Contains a 16x16 bit precedence matrix representing ordering relationships.
11#[derive(Debug, Clone, Copy, Archive, Serialize, Deserialize)]
12#[repr(C, align(256))]
13pub struct GpuPartialOrderTrace {
14    /// Trace identifier.
15    pub trace_id: u64,
16    /// Case identifier.
17    pub case_id: u64,
18    /// Number of events in the trace.
19    pub event_count: u32,
20    /// Number of unique activities.
21    pub activity_count: u32,
22    /// Start timestamp.
23    pub start_time: HybridTimestamp,
24    /// End timestamp.
25    pub end_time: HybridTimestamp,
26    /// Maximum concurrency level (width).
27    pub max_width: u32,
28    /// Flags: has_cycles, is_total_order, has_transitive_closure.
29    pub flags: u32,
30    /// Precedence matrix (16x16 bits = 32 bytes).
31    /// Row i bit j set means activity i precedes activity j.
32    pub precedence_matrix: [u16; 16],
33    /// Activity ID mapping (index -> activity ID), up to 16 activities.
34    pub activity_ids: [u32; 16],
35    /// Activity start times (relative to trace start, in SECONDS), up to 16 activities.
36    /// Using seconds instead of ms to avoid u16 overflow for long-running activities.
37    pub activity_start_secs: [u16; 16],
38    /// Activity durations (in SECONDS), up to 16 activities.
39    /// Using seconds instead of ms to avoid u16 overflow for long-running activities.
40    pub activity_duration_secs: [u16; 16],
41    /// Reserved padding to reach 256 bytes.
42    pub _reserved: [u8; 32],
43}
44
45impl Default for GpuPartialOrderTrace {
46    fn default() -> Self {
47        Self {
48            trace_id: 0,
49            case_id: 0,
50            event_count: 0,
51            activity_count: 0,
52            start_time: HybridTimestamp::default(),
53            end_time: HybridTimestamp::default(),
54            max_width: 0,
55            flags: 0,
56            precedence_matrix: [0; 16],
57            activity_ids: [u32::MAX; 16],
58            activity_start_secs: [0; 16],
59            activity_duration_secs: [0; 16],
60            _reserved: [0; 32],
61        }
62    }
63}
64
65// Verify size: 8+8+4+4+16+16+4+4+32+64+32+32+32 = 256
66const _: () = assert!(std::mem::size_of::<GpuPartialOrderTrace>() == 256);
67
68impl GpuPartialOrderTrace {
69    /// Create a new partial order trace.
70    pub fn new(trace_id: u64, case_id: u64) -> Self {
71        Self {
72            trace_id,
73            case_id,
74            ..Default::default()
75        }
76    }
77
78    /// Set precedence: i precedes j.
79    pub fn set_precedence(&mut self, i: usize, j: usize) {
80        if i < 16 && j < 16 {
81            self.precedence_matrix[i] |= 1 << j;
82        }
83    }
84
85    /// Check if i precedes j.
86    pub fn precedes(&self, i: usize, j: usize) -> bool {
87        if i < 16 && j < 16 {
88            (self.precedence_matrix[i] & (1 << j)) != 0
89        } else {
90            false
91        }
92    }
93
94    /// Check if i and j are concurrent (neither precedes the other).
95    pub fn is_concurrent(&self, i: usize, j: usize) -> bool {
96        !self.precedes(i, j) && !self.precedes(j, i)
97    }
98
99    /// Add an activity and return its index.
100    pub fn add_activity(&mut self, activity_id: ActivityId) -> Option<usize> {
101        // Check if already exists
102        for i in 0..self.activity_count as usize {
103            if self.activity_ids[i] == activity_id {
104                return Some(i);
105            }
106        }
107
108        // Add new (max 16 activities)
109        if (self.activity_count as usize) < 16 {
110            let idx = self.activity_count as usize;
111            self.activity_ids[idx] = activity_id;
112            self.activity_count += 1;
113            Some(idx)
114        } else {
115            None
116        }
117    }
118
119    /// Get activity index by ID.
120    pub fn get_activity_index(&self, activity_id: ActivityId) -> Option<usize> {
121        (0..self.activity_count as usize).find(|&i| self.activity_ids[i] == activity_id)
122    }
123
124    /// Compute transitive closure using Floyd-Warshall.
125    pub fn compute_transitive_closure(&mut self) {
126        let n = self.activity_count as usize;
127        for k in 0..n {
128            for i in 0..n {
129                for j in 0..n {
130                    if self.precedes(i, k) && self.precedes(k, j) {
131                        self.set_precedence(i, j);
132                    }
133                }
134            }
135        }
136        self.flags |= 0x01; // Mark as having transitive closure
137    }
138
139    /// Check if has cycles (after transitive closure, if i precedes i).
140    pub fn has_cycles(&self) -> bool {
141        for i in 0..self.activity_count as usize {
142            if self.precedes(i, i) {
143                return true;
144            }
145        }
146        false
147    }
148
149    /// Check if this is a total order (all pairs are ordered).
150    pub fn is_total_order(&self) -> bool {
151        let n = self.activity_count as usize;
152        for i in 0..n {
153            for j in (i + 1)..n {
154                if !self.precedes(i, j) && !self.precedes(j, i) {
155                    return false;
156                }
157            }
158        }
159        true
160    }
161
162    /// Compute width (maximum antichain size = maximum concurrency).
163    pub fn compute_width(&mut self) -> u32 {
164        // Simple greedy approach: find largest set of mutually concurrent activities
165        let n = self.activity_count as usize;
166        let mut max_width = 1u32;
167
168        // For each activity, count how many are concurrent with it
169        for i in 0..n {
170            let mut concurrent_count = 1;
171            for j in 0..n {
172                if i != j && self.is_concurrent(i, j) {
173                    concurrent_count += 1;
174                }
175            }
176            max_width = max_width.max(concurrent_count);
177        }
178
179        self.max_width = max_width;
180        max_width
181    }
182
183    /// Get concurrent activities with given activity.
184    pub fn concurrent_with(&self, activity_idx: usize) -> Vec<usize> {
185        let n = self.activity_count as usize;
186        (0..n)
187            .filter(|&j| j != activity_idx && self.is_concurrent(activity_idx, j))
188            .collect()
189    }
190}
191
192/// Conflict matrix for tracking conflicts across multiple traces.
193#[derive(Debug, Clone)]
194pub struct ConflictMatrix {
195    /// Alphabet size (number of unique activities).
196    pub alphabet_size: usize,
197    /// Number of traces analyzed.
198    pub trace_count: u32,
199    /// Conflict threshold for detecting conflict relationship.
200    pub conflict_threshold: f32,
201    /// Conflict counts: conflicts[i][j] = times i and j were concurrent.
202    pub conflict_counts: Vec<Vec<u32>>,
203    /// Co-occurrence counts: cooccurrence[i][j] = times i and j appeared together.
204    pub cooccurrence_counts: Vec<Vec<u32>>,
205}
206
207impl ConflictMatrix {
208    /// Create a new conflict matrix.
209    pub fn new(alphabet_size: usize) -> Self {
210        Self {
211            alphabet_size,
212            trace_count: 0,
213            conflict_threshold: 0.1,
214            conflict_counts: vec![vec![0; alphabet_size]; alphabet_size],
215            cooccurrence_counts: vec![vec![0; alphabet_size]; alphabet_size],
216        }
217    }
218
219    /// Update from a partial order trace.
220    pub fn update(&mut self, trace: &GpuPartialOrderTrace) {
221        let n = trace.activity_count as usize;
222
223        for i in 0..n {
224            for j in (i + 1)..n {
225                let ai = trace.activity_ids[i] as usize;
226                let aj = trace.activity_ids[j] as usize;
227
228                if ai < self.alphabet_size && aj < self.alphabet_size {
229                    // Both appear together
230                    self.cooccurrence_counts[ai][aj] += 1;
231                    self.cooccurrence_counts[aj][ai] += 1;
232
233                    // If concurrent, they're in conflict
234                    if trace.is_concurrent(i, j) {
235                        self.conflict_counts[ai][aj] += 1;
236                        self.conflict_counts[aj][ai] += 1;
237                    }
238                }
239            }
240        }
241
242        self.trace_count += 1;
243    }
244
245    /// Get conflict ratio between two activities.
246    pub fn conflict_ratio(&self, i: usize, j: usize) -> f32 {
247        let cooc = self.cooccurrence_counts[i][j];
248        if cooc > 0 {
249            self.conflict_counts[i][j] as f32 / cooc as f32
250        } else {
251            0.0
252        }
253    }
254
255    /// Check if two activities are in conflict relation.
256    pub fn are_in_conflict(&self, i: usize, j: usize) -> bool {
257        self.conflict_ratio(i, j) >= self.conflict_threshold
258    }
259}
260
261/// Interval event for partial order derivation.
262#[derive(Debug, Clone, Copy, Archive, Serialize, Deserialize)]
263#[repr(C)]
264pub struct IntervalEvent {
265    /// Event identifier.
266    pub event_id: u64,
267    /// Activity identifier.
268    pub activity_id: u32,
269    /// Padding.
270    pub _padding: u32,
271    /// Start timestamp.
272    pub start_time: HybridTimestamp,
273    /// End timestamp.
274    pub end_time: HybridTimestamp,
275}
276
277impl IntervalEvent {
278    /// Check if this event precedes another (ends before other starts).
279    pub fn precedes(&self, other: &IntervalEvent) -> bool {
280        self.end_time.physical_ms <= other.start_time.physical_ms
281    }
282
283    /// Check if this event overlaps with another.
284    pub fn overlaps(&self, other: &IntervalEvent) -> bool {
285        self.start_time.physical_ms < other.end_time.physical_ms
286            && other.start_time.physical_ms < self.end_time.physical_ms
287    }
288
289    /// Check if concurrent (overlaps but neither strictly precedes).
290    pub fn is_concurrent(&self, other: &IntervalEvent) -> bool {
291        !self.precedes(other) && !other.precedes(self)
292    }
293}
294
295#[cfg(test)]
296mod tests {
297    use super::*;
298
299    #[test]
300    fn test_partial_order_size() {
301        assert_eq!(std::mem::size_of::<GpuPartialOrderTrace>(), 256);
302    }
303
304    #[test]
305    fn test_precedence_operations() {
306        let mut po = GpuPartialOrderTrace::new(1, 100);
307
308        po.add_activity(10);
309        po.add_activity(20);
310        po.add_activity(30);
311
312        po.set_precedence(0, 1); // A precedes B
313        po.set_precedence(1, 2); // B precedes C
314
315        assert!(po.precedes(0, 1));
316        assert!(po.precedes(1, 2));
317        assert!(!po.precedes(0, 2)); // Not yet transitive
318
319        po.compute_transitive_closure();
320        assert!(po.precedes(0, 2)); // Now transitive
321
322        assert!(!po.has_cycles());
323        assert!(po.is_total_order());
324    }
325
326    #[test]
327    fn test_concurrent_detection() {
328        let mut po = GpuPartialOrderTrace::new(1, 100);
329
330        po.add_activity(10);
331        po.add_activity(20);
332        po.add_activity(30);
333
334        po.set_precedence(0, 2); // A precedes C
335        po.set_precedence(1, 2); // B precedes C
336                                 // A and B are concurrent (neither precedes the other)
337
338        assert!(po.is_concurrent(0, 1));
339        assert!(!po.is_concurrent(0, 2));
340    }
341
342    #[test]
343    fn test_interval_events() {
344        let e1 = IntervalEvent {
345            event_id: 1,
346            activity_id: 1,
347            _padding: 0,
348            start_time: HybridTimestamp::new(0, 0),
349            end_time: HybridTimestamp::new(100, 0),
350        };
351
352        let e2 = IntervalEvent {
353            event_id: 2,
354            activity_id: 2,
355            _padding: 0,
356            start_time: HybridTimestamp::new(100, 0),
357            end_time: HybridTimestamp::new(200, 0),
358        };
359
360        let e3 = IntervalEvent {
361            event_id: 3,
362            activity_id: 3,
363            _padding: 0,
364            start_time: HybridTimestamp::new(50, 0),
365            end_time: HybridTimestamp::new(150, 0),
366        };
367
368        assert!(e1.precedes(&e2));
369        assert!(!e1.precedes(&e3)); // They overlap
370        assert!(e1.is_concurrent(&e3));
371    }
372}