loom/rt/
path.rs

1use crate::rt::{execution, object, thread, MAX_ATOMIC_HISTORY, MAX_THREADS};
2
3#[cfg(feature = "checkpoint")]
4use serde::{Deserialize, Serialize};
5
6/// An execution path
7#[derive(Debug)]
8#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
9pub(crate) struct Path {
10    preemption_bound: Option<u8>,
11
12    /// Current execution's position in the branches vec.
13    ///
14    /// When the execution starts, this is zero, but `branches` might not be
15    /// empty.
16    ///
17    /// In order to perform an exhaustive search, the execution is seeded with a
18    /// set of branches.
19    pos: usize,
20
21    /// List of all branches in the execution.
22    ///
23    /// A branch is of type `Schedule`, `Load`, or `Spurious`
24    branches: object::Store<Entry>,
25
26    /// If true, exploring is enabled at start
27    exploring: bool,
28
29    /// If true, the user decided to skip the current execution branch. We do
30    /// not do any further exploration here.
31    skipping: bool,
32
33    /// How to reset the `exploring` state
34    exploring_on_start: bool,
35}
36
37#[derive(Debug)]
38#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
39pub(crate) struct Schedule {
40    /// Number of times the thread leading to this branch point has been
41    /// pre-empted.
42    preemptions: u8,
43
44    /// The thread that was active first
45    initial_active: Option<u8>,
46
47    /// State of each thread
48    threads: [Thread; MAX_THREADS],
49
50    /// The previous schedule branch
51    prev: Option<object::Ref<Schedule>>,
52
53    exploring: bool,
54}
55
56#[derive(Debug)]
57#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
58pub(crate) struct Load {
59    /// All possible values
60    values: [u8; MAX_ATOMIC_HISTORY],
61
62    /// Current value
63    pos: u8,
64
65    /// Number of values in list
66    len: u8,
67
68    exploring: bool,
69}
70
71#[derive(Debug)]
72#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
73pub(crate) struct Spurious {
74    spur: bool,
75    exploring: bool,
76}
77
78objects! {
79    #[derive(Debug)]
80    #[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
81    Entry,
82    Schedule(Schedule),
83    Load(Load),
84    Spurious(Spurious),
85}
86
87#[derive(Debug, Eq, PartialEq, Clone, Copy)]
88#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
89pub(crate) enum Thread {
90    /// The thread is currently disabled
91    Disabled,
92
93    /// The thread should not be explored
94    Skip,
95
96    /// The thread is in a yield state.
97    Yield,
98
99    /// The thread is waiting to be explored
100    Pending,
101
102    /// The thread is currently being explored
103    Active,
104
105    /// The thread has been explored
106    Visited,
107}
108
109macro_rules! assert_path_len {
110    ($branches:expr) => {{
111        assert!(
112            // if we are panicking, we may be performing a branch due to a
113            // `Drop` impl (e.g., for `Arc`, or for a user type that does an
114            // atomic operation in its `Drop` impl). if that's the case,
115            // asserting this again will double panic. therefore, short-circuit
116            // the assertion if the thread is panicking.
117            $branches.len() < $branches.capacity() || std::thread::panicking(),
118            "Model exceeded maximum number of branches. This is often caused \
119             by an algorithm requiring the processor to make progress, e.g. \
120             spin locks.",
121        );
122    }};
123}
124
125impl Path {
126    /// Create a new, blank, configured to branch at most `max_branches` times
127    /// and at most `preemption_bound` thread preemptions.
128    pub(crate) fn new(max_branches: usize, preemption_bound: Option<u8>, exploring: bool) -> Path {
129        Path {
130            preemption_bound,
131            pos: 0,
132            branches: object::Store::with_capacity(max_branches),
133            exploring,
134            skipping: false,
135            exploring_on_start: exploring,
136        }
137    }
138
139    pub(crate) fn explore_state(&mut self) {
140        if !self.skipping {
141            assert!(!self.exploring, "not in critical state");
142            self.exploring = true;
143        }
144    }
145
146    pub(crate) fn critical(&mut self) {
147        if !self.skipping {
148            assert!(self.exploring, "not in exploring state");
149            self.exploring = false;
150        }
151    }
152
153    pub(crate) fn skip_branch(&mut self) {
154        self.exploring = false;
155        self.skipping = true;
156    }
157
158    pub(crate) fn set_max_branches(&mut self, max_branches: usize) {
159        self.branches
160            .reserve_exact(max_branches - self.branches.len());
161    }
162
163    /// Returns `true` if the execution has reached a point where the known path
164    /// has been traversed and has reached a new branching point.
165    pub(super) fn is_traversed(&self) -> bool {
166        self.pos == self.branches.len()
167    }
168
169    pub(super) fn pos(&self) -> usize {
170        self.pos
171    }
172
173    /// Push a new atomic-load branch
174    pub(super) fn push_load(&mut self, seed: &[u8]) {
175        assert_path_len!(self.branches);
176
177        let load_ref = self.branches.insert(Load {
178            values: [0; MAX_ATOMIC_HISTORY],
179            pos: 0,
180            len: 0,
181            exploring: self.exploring,
182        });
183
184        let load = load_ref.get_mut(&mut self.branches);
185
186        for (i, &store) in seed.iter().enumerate() {
187            assert!(
188                store < MAX_ATOMIC_HISTORY as u8,
189                "[loom internal bug] store = {}; max = {}",
190                store,
191                MAX_ATOMIC_HISTORY
192            );
193            assert!(
194                i < MAX_ATOMIC_HISTORY,
195                "[loom internal bug] i = {}; max = {}",
196                i,
197                MAX_ATOMIC_HISTORY
198            );
199
200            load.values[i] = store;
201            load.len += 1;
202        }
203    }
204
205    /// Returns the atomic write to read
206    pub(super) fn branch_load(&mut self) -> usize {
207        assert!(!self.is_traversed(), "[loom internal bug]");
208
209        let load = object::Ref::from_usize(self.pos)
210            .downcast::<Load>(&self.branches)
211            .expect("Reached unexpected exploration state. Is the model fully deterministic?")
212            .get(&self.branches);
213
214        self.pos += 1;
215
216        load.values[load.pos as usize] as usize
217    }
218
219    /// Branch on spurious notifications
220    pub(super) fn branch_spurious(&mut self) -> bool {
221        if self.is_traversed() {
222            assert_path_len!(self.branches);
223
224            self.branches.insert(Spurious {
225                spur: false,
226                exploring: self.exploring,
227            });
228        }
229
230        let spurious = object::Ref::from_usize(self.pos)
231            .downcast::<Spurious>(&self.branches)
232            .expect("Reached unexpected exploration state. Is the model fully deterministic?")
233            .get(&self.branches)
234            .spur;
235
236        self.pos += 1;
237        spurious
238    }
239
240    /// Returns the thread identifier to schedule
241    pub(super) fn branch_thread(
242        &mut self,
243        execution_id: execution::Id,
244        seed: impl ExactSizeIterator<Item = Thread>,
245    ) -> Option<thread::Id> {
246        if self.is_traversed() {
247            assert_path_len!(self.branches);
248
249            // Find the last thread scheduling branch in the path
250            let prev = self.last_schedule();
251
252            // Entering a new exploration space.
253            //
254            // Initialize a  new branch. The initial field values don't matter
255            // as they will be updated below.
256            let schedule_ref = self.branches.insert(Schedule {
257                preemptions: 0,
258                initial_active: None,
259                threads: [Thread::Disabled; MAX_THREADS],
260                prev,
261                exploring: self.exploring,
262            });
263
264            // Get a reference to the branch in the object store.
265            let schedule = schedule_ref.get_mut(&mut self.branches);
266
267            assert!(seed.len() <= MAX_THREADS, "[loom internal bug]");
268
269            // Currently active thread
270            let mut active = None;
271
272            for (i, v) in seed.enumerate() {
273                // Initialize thread states
274                schedule.threads[i] = v;
275
276                if v.is_active() {
277                    assert!(
278                        active.is_none(),
279                        "[loom internal bug] only one thread should start as active"
280                    );
281                    active = Some(i as u8);
282                }
283            }
284
285            // Ensure at least one thread is active, otherwise toggle a yielded
286            // thread.
287            if active.is_none() {
288                for (i, th) in schedule.threads.iter_mut().enumerate() {
289                    if *th == Thread::Yield {
290                        *th = Thread::Active;
291                        active = Some(i as u8);
292                        break;
293                    }
294                }
295            }
296
297            let mut initial_active = active;
298
299            if let Some(prev) = prev {
300                if initial_active != prev.get(&self.branches).active_thread_index() {
301                    initial_active = None;
302                }
303            }
304
305            let preemptions = prev
306                .map(|prev| prev.get(&self.branches).preemptions())
307                .unwrap_or(0);
308
309            debug_assert!(
310                self.preemption_bound.is_none() || Some(preemptions) <= self.preemption_bound,
311                "[loom internal bug] max = {:?}; curr = {}",
312                self.preemption_bound,
313                preemptions,
314            );
315
316            let schedule = schedule_ref.get_mut(&mut self.branches);
317            schedule.initial_active = initial_active;
318            schedule.preemptions = preemptions;
319        }
320
321        let schedule = object::Ref::from_usize(self.pos)
322            .downcast::<Schedule>(&self.branches)
323            .expect("Reached unexpected exploration state. Is the model fully deterministic?")
324            .get(&self.branches);
325
326        self.pos += 1;
327
328        schedule
329            .threads
330            .iter()
331            .enumerate()
332            .find(|&(_, th)| th.is_active())
333            .map(|(i, _)| thread::Id::new(execution_id, i))
334    }
335
336    pub(super) fn backtrack(&mut self, mut point: usize, thread_id: thread::Id) {
337        let schedule = loop {
338            if let Some(schedule_ref) =
339                object::Ref::from_usize(point).downcast::<Schedule>(&self.branches)
340            {
341                let schedule = schedule_ref.get_mut(&mut self.branches);
342
343                if schedule.exploring {
344                    schedule.backtrack(thread_id, self.preemption_bound);
345                    break schedule;
346                }
347            }
348
349            if point == 0 {
350                return;
351            }
352
353            point -= 1;
354        };
355
356        let mut curr = if let Some(curr) = schedule.prev {
357            curr
358        } else {
359            return;
360        };
361
362        if self.preemption_bound.is_some() {
363            loop {
364                // Preemption bounded DPOR requires conservatively adding
365                // another backtrack point to cover cases missed by the bounds.
366                if let Some(prev) = curr.get(&self.branches).prev {
367                    let active_a = curr.get(&self.branches).active_thread_index();
368                    let active_b = prev.get(&self.branches).active_thread_index();
369
370                    if active_a != active_b && curr.get_mut(&mut self.branches).exploring {
371                        curr.get_mut(&mut self.branches)
372                            .backtrack(thread_id, self.preemption_bound);
373                        return;
374                    }
375
376                    curr = prev;
377                } else {
378                    if curr.get(&mut self.branches).exploring {
379                        // This is the very first schedule
380                        curr.get_mut(&mut self.branches)
381                            .backtrack(thread_id, self.preemption_bound);
382                    }
383                    return;
384                }
385            }
386        }
387    }
388
389    /// Reset the path to prepare for the next exploration of the model.
390    ///
391    /// This function will also trim the object store, dropping any objects that
392    /// are created in pruned sections of the path.
393    pub(super) fn step(&mut self) -> bool {
394        // Reset the position to zero, the path will start traversing from the
395        // beginning
396        self.pos = 0;
397
398        // Reset exploring / critical / skip
399        self.exploring = self.exploring_on_start;
400        self.skipping = false;
401
402        // Set the final branch to try the next option. If all options have been
403        // traversed, pop the final branch and try again w/ the one under it.
404        //
405        // This is depth-first tree traversal.
406        //
407        for last in (0..self.branches.len()).rev() {
408            let last = object::Ref::from_usize(last);
409
410            // Remove all objects that were created **after** this branch
411            self.branches.truncate(last);
412
413            if let Some(schedule_ref) = last.downcast::<Schedule>(&self.branches) {
414                let schedule = schedule_ref.get_mut(&mut self.branches);
415
416                if !schedule.exploring {
417                    continue;
418                }
419
420                // Transition the active thread to visited.
421                if let Some(thread) = schedule.threads.iter_mut().find(|th| th.is_active()) {
422                    *thread = Thread::Visited;
423                }
424
425                // Find a pending thread and transition it to active
426                let rem = schedule
427                    .threads
428                    .iter_mut()
429                    .find(|th| th.is_pending())
430                    .map(|th| {
431                        *th = Thread::Active;
432                    })
433                    .is_some();
434
435                if rem {
436                    return true;
437                }
438            } else if let Some(load_ref) = last.downcast::<Load>(&self.branches) {
439                let load = load_ref.get_mut(&mut self.branches);
440
441                if !load.exploring {
442                    continue;
443                }
444
445                load.pos += 1;
446
447                if load.pos < load.len {
448                    return true;
449                }
450            } else if let Some(spurious_ref) = last.downcast::<Spurious>(&self.branches) {
451                let spurious = spurious_ref.get_mut(&mut self.branches);
452
453                if !spurious.exploring {
454                    continue;
455                }
456
457                if !spurious.spur {
458                    spurious.spur = true;
459                    return true;
460                }
461            } else {
462                unreachable!();
463            }
464        }
465
466        false
467    }
468
469    fn last_schedule(&self) -> Option<object::Ref<Schedule>> {
470        self.branches.iter_ref::<Schedule>().rev().next()
471    }
472}
473
474impl Schedule {
475    /// Returns the index of the currently active thread
476    fn active_thread_index(&self) -> Option<u8> {
477        self.threads
478            .iter()
479            .enumerate()
480            .find(|(_, th)| th.is_active())
481            .map(|(index, _)| index as u8)
482    }
483
484    /// Compute the number of preemptions for the current state of the branch
485    fn preemptions(&self) -> u8 {
486        if self.initial_active.is_some() && self.initial_active != self.active_thread_index() {
487            return self.preemptions + 1;
488        }
489
490        self.preemptions
491    }
492
493    fn backtrack(&mut self, thread_id: thread::Id, preemption_bound: Option<u8>) {
494        assert!(self.exploring);
495
496        if let Some(bound) = preemption_bound {
497            assert!(
498                self.preemptions <= bound,
499                "[loom internal bug] actual = {}, bound = {}",
500                self.preemptions,
501                bound
502            );
503
504            if self.preemptions == bound {
505                return;
506            }
507        }
508
509        let thread_id = thread_id.as_usize();
510
511        if thread_id >= self.threads.len() {
512            return;
513        }
514
515        if self.threads[thread_id].is_enabled() {
516            self.threads[thread_id].explore();
517        } else {
518            for th in &mut self.threads {
519                th.explore();
520            }
521        }
522    }
523}
524
525impl Thread {
526    fn explore(&mut self) {
527        if *self == Thread::Skip {
528            *self = Thread::Pending;
529        }
530    }
531
532    fn is_pending(&self) -> bool {
533        *self == Thread::Pending
534    }
535
536    fn is_active(&self) -> bool {
537        *self == Thread::Active
538    }
539
540    fn is_enabled(&self) -> bool {
541        !self.is_disabled()
542    }
543
544    fn is_disabled(&self) -> bool {
545        *self == Thread::Disabled
546    }
547}