ringkernel_procint/models/
partial_order.rs1use super::{ActivityId, HybridTimestamp};
6use rkyv::{Archive, Deserialize, Serialize};
7
8#[derive(Debug, Clone, Copy, Archive, Serialize, Deserialize)]
12#[repr(C, align(256))]
13pub struct GpuPartialOrderTrace {
14 pub trace_id: u64,
16 pub case_id: u64,
18 pub event_count: u32,
20 pub activity_count: u32,
22 pub start_time: HybridTimestamp,
24 pub end_time: HybridTimestamp,
26 pub max_width: u32,
28 pub flags: u32,
30 pub precedence_matrix: [u16; 16],
33 pub activity_ids: [u32; 16],
35 pub activity_start_secs: [u16; 16],
38 pub activity_duration_secs: [u16; 16],
41 pub _reserved: [u8; 32],
43}
44
45impl Default for GpuPartialOrderTrace {
46 fn default() -> Self {
47 Self {
48 trace_id: 0,
49 case_id: 0,
50 event_count: 0,
51 activity_count: 0,
52 start_time: HybridTimestamp::default(),
53 end_time: HybridTimestamp::default(),
54 max_width: 0,
55 flags: 0,
56 precedence_matrix: [0; 16],
57 activity_ids: [u32::MAX; 16],
58 activity_start_secs: [0; 16],
59 activity_duration_secs: [0; 16],
60 _reserved: [0; 32],
61 }
62 }
63}
64
65const _: () = assert!(std::mem::size_of::<GpuPartialOrderTrace>() == 256);
67
68impl GpuPartialOrderTrace {
69 pub fn new(trace_id: u64, case_id: u64) -> Self {
71 Self {
72 trace_id,
73 case_id,
74 ..Default::default()
75 }
76 }
77
78 pub fn set_precedence(&mut self, i: usize, j: usize) {
80 if i < 16 && j < 16 {
81 self.precedence_matrix[i] |= 1 << j;
82 }
83 }
84
85 pub fn precedes(&self, i: usize, j: usize) -> bool {
87 if i < 16 && j < 16 {
88 (self.precedence_matrix[i] & (1 << j)) != 0
89 } else {
90 false
91 }
92 }
93
94 pub fn is_concurrent(&self, i: usize, j: usize) -> bool {
96 !self.precedes(i, j) && !self.precedes(j, i)
97 }
98
99 pub fn add_activity(&mut self, activity_id: ActivityId) -> Option<usize> {
101 for i in 0..self.activity_count as usize {
103 if self.activity_ids[i] == activity_id {
104 return Some(i);
105 }
106 }
107
108 if (self.activity_count as usize) < 16 {
110 let idx = self.activity_count as usize;
111 self.activity_ids[idx] = activity_id;
112 self.activity_count += 1;
113 Some(idx)
114 } else {
115 None
116 }
117 }
118
119 pub fn get_activity_index(&self, activity_id: ActivityId) -> Option<usize> {
121 (0..self.activity_count as usize).find(|&i| self.activity_ids[i] == activity_id)
122 }
123
124 pub fn compute_transitive_closure(&mut self) {
126 let n = self.activity_count as usize;
127 for k in 0..n {
128 for i in 0..n {
129 for j in 0..n {
130 if self.precedes(i, k) && self.precedes(k, j) {
131 self.set_precedence(i, j);
132 }
133 }
134 }
135 }
136 self.flags |= 0x01; }
138
139 pub fn has_cycles(&self) -> bool {
141 for i in 0..self.activity_count as usize {
142 if self.precedes(i, i) {
143 return true;
144 }
145 }
146 false
147 }
148
149 pub fn is_total_order(&self) -> bool {
151 let n = self.activity_count as usize;
152 for i in 0..n {
153 for j in (i + 1)..n {
154 if !self.precedes(i, j) && !self.precedes(j, i) {
155 return false;
156 }
157 }
158 }
159 true
160 }
161
162 pub fn compute_width(&mut self) -> u32 {
164 let n = self.activity_count as usize;
166 let mut max_width = 1u32;
167
168 for i in 0..n {
170 let mut concurrent_count = 1;
171 for j in 0..n {
172 if i != j && self.is_concurrent(i, j) {
173 concurrent_count += 1;
174 }
175 }
176 max_width = max_width.max(concurrent_count);
177 }
178
179 self.max_width = max_width;
180 max_width
181 }
182
183 pub fn concurrent_with(&self, activity_idx: usize) -> Vec<usize> {
185 let n = self.activity_count as usize;
186 (0..n)
187 .filter(|&j| j != activity_idx && self.is_concurrent(activity_idx, j))
188 .collect()
189 }
190}
191
192#[derive(Debug, Clone)]
194pub struct ConflictMatrix {
195 pub alphabet_size: usize,
197 pub trace_count: u32,
199 pub conflict_threshold: f32,
201 pub conflict_counts: Vec<Vec<u32>>,
203 pub cooccurrence_counts: Vec<Vec<u32>>,
205}
206
207impl ConflictMatrix {
208 pub fn new(alphabet_size: usize) -> Self {
210 Self {
211 alphabet_size,
212 trace_count: 0,
213 conflict_threshold: 0.1,
214 conflict_counts: vec![vec![0; alphabet_size]; alphabet_size],
215 cooccurrence_counts: vec![vec![0; alphabet_size]; alphabet_size],
216 }
217 }
218
219 pub fn update(&mut self, trace: &GpuPartialOrderTrace) {
221 let n = trace.activity_count as usize;
222
223 for i in 0..n {
224 for j in (i + 1)..n {
225 let ai = trace.activity_ids[i] as usize;
226 let aj = trace.activity_ids[j] as usize;
227
228 if ai < self.alphabet_size && aj < self.alphabet_size {
229 self.cooccurrence_counts[ai][aj] += 1;
231 self.cooccurrence_counts[aj][ai] += 1;
232
233 if trace.is_concurrent(i, j) {
235 self.conflict_counts[ai][aj] += 1;
236 self.conflict_counts[aj][ai] += 1;
237 }
238 }
239 }
240 }
241
242 self.trace_count += 1;
243 }
244
245 pub fn conflict_ratio(&self, i: usize, j: usize) -> f32 {
247 let cooc = self.cooccurrence_counts[i][j];
248 if cooc > 0 {
249 self.conflict_counts[i][j] as f32 / cooc as f32
250 } else {
251 0.0
252 }
253 }
254
255 pub fn are_in_conflict(&self, i: usize, j: usize) -> bool {
257 self.conflict_ratio(i, j) >= self.conflict_threshold
258 }
259}
260
261#[derive(Debug, Clone, Copy, Archive, Serialize, Deserialize)]
263#[repr(C)]
264pub struct IntervalEvent {
265 pub event_id: u64,
267 pub activity_id: u32,
269 pub _padding: u32,
271 pub start_time: HybridTimestamp,
273 pub end_time: HybridTimestamp,
275}
276
277impl IntervalEvent {
278 pub fn precedes(&self, other: &IntervalEvent) -> bool {
280 self.end_time.physical_ms <= other.start_time.physical_ms
281 }
282
283 pub fn overlaps(&self, other: &IntervalEvent) -> bool {
285 self.start_time.physical_ms < other.end_time.physical_ms
286 && other.start_time.physical_ms < self.end_time.physical_ms
287 }
288
289 pub fn is_concurrent(&self, other: &IntervalEvent) -> bool {
291 !self.precedes(other) && !other.precedes(self)
292 }
293}
294
295#[cfg(test)]
296mod tests {
297 use super::*;
298
299 #[test]
300 fn test_partial_order_size() {
301 assert_eq!(std::mem::size_of::<GpuPartialOrderTrace>(), 256);
302 }
303
304 #[test]
305 fn test_precedence_operations() {
306 let mut po = GpuPartialOrderTrace::new(1, 100);
307
308 po.add_activity(10);
309 po.add_activity(20);
310 po.add_activity(30);
311
312 po.set_precedence(0, 1); po.set_precedence(1, 2); assert!(po.precedes(0, 1));
316 assert!(po.precedes(1, 2));
317 assert!(!po.precedes(0, 2)); po.compute_transitive_closure();
320 assert!(po.precedes(0, 2)); assert!(!po.has_cycles());
323 assert!(po.is_total_order());
324 }
325
326 #[test]
327 fn test_concurrent_detection() {
328 let mut po = GpuPartialOrderTrace::new(1, 100);
329
330 po.add_activity(10);
331 po.add_activity(20);
332 po.add_activity(30);
333
334 po.set_precedence(0, 2); po.set_precedence(1, 2); assert!(po.is_concurrent(0, 1));
339 assert!(!po.is_concurrent(0, 2));
340 }
341
342 #[test]
343 fn test_interval_events() {
344 let e1 = IntervalEvent {
345 event_id: 1,
346 activity_id: 1,
347 _padding: 0,
348 start_time: HybridTimestamp::new(0, 0),
349 end_time: HybridTimestamp::new(100, 0),
350 };
351
352 let e2 = IntervalEvent {
353 event_id: 2,
354 activity_id: 2,
355 _padding: 0,
356 start_time: HybridTimestamp::new(100, 0),
357 end_time: HybridTimestamp::new(200, 0),
358 };
359
360 let e3 = IntervalEvent {
361 event_id: 3,
362 activity_id: 3,
363 _padding: 0,
364 start_time: HybridTimestamp::new(50, 0),
365 end_time: HybridTimestamp::new(150, 0),
366 };
367
368 assert!(e1.precedes(&e2));
369 assert!(!e1.precedes(&e3)); assert!(e1.is_concurrent(&e3));
371 }
372}