1use crate::rt::{execution, object, thread, MAX_ATOMIC_HISTORY, MAX_THREADS};
2
3#[cfg(feature = "checkpoint")]
4use serde::{Deserialize, Serialize};
5
6#[derive(Debug)]
8#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
9pub(crate) struct Path {
10 preemption_bound: Option<u8>,
11
12 pos: usize,
20
21 branches: object::Store<Entry>,
25
26 exploring: bool,
28
29 skipping: bool,
32
33 exploring_on_start: bool,
35}
36
37#[derive(Debug)]
38#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
39pub(crate) struct Schedule {
40 preemptions: u8,
43
44 initial_active: Option<u8>,
46
47 threads: [Thread; MAX_THREADS],
49
50 prev: Option<object::Ref<Schedule>>,
52
53 exploring: bool,
54}
55
56#[derive(Debug)]
57#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
58pub(crate) struct Load {
59 values: [u8; MAX_ATOMIC_HISTORY],
61
62 pos: u8,
64
65 len: u8,
67
68 exploring: bool,
69}
70
71#[derive(Debug)]
72#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
73pub(crate) struct Spurious {
74 spur: bool,
75 exploring: bool,
76}
77
78objects! {
79 #[derive(Debug)]
80 #[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
81 Entry,
82 Schedule(Schedule),
83 Load(Load),
84 Spurious(Spurious),
85}
86
87#[derive(Debug, Eq, PartialEq, Clone, Copy)]
88#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))]
89pub(crate) enum Thread {
90 Disabled,
92
93 Skip,
95
96 Yield,
98
99 Pending,
101
102 Active,
104
105 Visited,
107}
108
109macro_rules! assert_path_len {
110 ($branches:expr) => {{
111 assert!(
112 $branches.len() < $branches.capacity() || std::thread::panicking(),
118 "Model exceeded maximum number of branches. This is often caused \
119 by an algorithm requiring the processor to make progress, e.g. \
120 spin locks.",
121 );
122 }};
123}
124
125impl Path {
126 pub(crate) fn new(max_branches: usize, preemption_bound: Option<u8>, exploring: bool) -> Path {
129 Path {
130 preemption_bound,
131 pos: 0,
132 branches: object::Store::with_capacity(max_branches),
133 exploring,
134 skipping: false,
135 exploring_on_start: exploring,
136 }
137 }
138
139 pub(crate) fn explore_state(&mut self) {
140 if !self.skipping {
141 assert!(!self.exploring, "not in critical state");
142 self.exploring = true;
143 }
144 }
145
146 pub(crate) fn critical(&mut self) {
147 if !self.skipping {
148 assert!(self.exploring, "not in exploring state");
149 self.exploring = false;
150 }
151 }
152
153 pub(crate) fn skip_branch(&mut self) {
154 self.exploring = false;
155 self.skipping = true;
156 }
157
158 pub(crate) fn set_max_branches(&mut self, max_branches: usize) {
159 self.branches
160 .reserve_exact(max_branches - self.branches.len());
161 }
162
163 pub(super) fn is_traversed(&self) -> bool {
166 self.pos == self.branches.len()
167 }
168
169 pub(super) fn pos(&self) -> usize {
170 self.pos
171 }
172
173 pub(super) fn push_load(&mut self, seed: &[u8]) {
175 assert_path_len!(self.branches);
176
177 let load_ref = self.branches.insert(Load {
178 values: [0; MAX_ATOMIC_HISTORY],
179 pos: 0,
180 len: 0,
181 exploring: self.exploring,
182 });
183
184 let load = load_ref.get_mut(&mut self.branches);
185
186 for (i, &store) in seed.iter().enumerate() {
187 assert!(
188 store < MAX_ATOMIC_HISTORY as u8,
189 "[loom internal bug] store = {}; max = {}",
190 store,
191 MAX_ATOMIC_HISTORY
192 );
193 assert!(
194 i < MAX_ATOMIC_HISTORY,
195 "[loom internal bug] i = {}; max = {}",
196 i,
197 MAX_ATOMIC_HISTORY
198 );
199
200 load.values[i] = store;
201 load.len += 1;
202 }
203 }
204
205 pub(super) fn branch_load(&mut self) -> usize {
207 assert!(!self.is_traversed(), "[loom internal bug]");
208
209 let load = object::Ref::from_usize(self.pos)
210 .downcast::<Load>(&self.branches)
211 .expect("Reached unexpected exploration state. Is the model fully deterministic?")
212 .get(&self.branches);
213
214 self.pos += 1;
215
216 load.values[load.pos as usize] as usize
217 }
218
219 pub(super) fn branch_spurious(&mut self) -> bool {
221 if self.is_traversed() {
222 assert_path_len!(self.branches);
223
224 self.branches.insert(Spurious {
225 spur: false,
226 exploring: self.exploring,
227 });
228 }
229
230 let spurious = object::Ref::from_usize(self.pos)
231 .downcast::<Spurious>(&self.branches)
232 .expect("Reached unexpected exploration state. Is the model fully deterministic?")
233 .get(&self.branches)
234 .spur;
235
236 self.pos += 1;
237 spurious
238 }
239
240 pub(super) fn branch_thread(
242 &mut self,
243 execution_id: execution::Id,
244 seed: impl ExactSizeIterator<Item = Thread>,
245 ) -> Option<thread::Id> {
246 if self.is_traversed() {
247 assert_path_len!(self.branches);
248
249 let prev = self.last_schedule();
251
252 let schedule_ref = self.branches.insert(Schedule {
257 preemptions: 0,
258 initial_active: None,
259 threads: [Thread::Disabled; MAX_THREADS],
260 prev,
261 exploring: self.exploring,
262 });
263
264 let schedule = schedule_ref.get_mut(&mut self.branches);
266
267 assert!(seed.len() <= MAX_THREADS, "[loom internal bug]");
268
269 let mut active = None;
271
272 for (i, v) in seed.enumerate() {
273 schedule.threads[i] = v;
275
276 if v.is_active() {
277 assert!(
278 active.is_none(),
279 "[loom internal bug] only one thread should start as active"
280 );
281 active = Some(i as u8);
282 }
283 }
284
285 if active.is_none() {
288 for (i, th) in schedule.threads.iter_mut().enumerate() {
289 if *th == Thread::Yield {
290 *th = Thread::Active;
291 active = Some(i as u8);
292 break;
293 }
294 }
295 }
296
297 let mut initial_active = active;
298
299 if let Some(prev) = prev {
300 if initial_active != prev.get(&self.branches).active_thread_index() {
301 initial_active = None;
302 }
303 }
304
305 let preemptions = prev
306 .map(|prev| prev.get(&self.branches).preemptions())
307 .unwrap_or(0);
308
309 debug_assert!(
310 self.preemption_bound.is_none() || Some(preemptions) <= self.preemption_bound,
311 "[loom internal bug] max = {:?}; curr = {}",
312 self.preemption_bound,
313 preemptions,
314 );
315
316 let schedule = schedule_ref.get_mut(&mut self.branches);
317 schedule.initial_active = initial_active;
318 schedule.preemptions = preemptions;
319 }
320
321 let schedule = object::Ref::from_usize(self.pos)
322 .downcast::<Schedule>(&self.branches)
323 .expect("Reached unexpected exploration state. Is the model fully deterministic?")
324 .get(&self.branches);
325
326 self.pos += 1;
327
328 schedule
329 .threads
330 .iter()
331 .enumerate()
332 .find(|&(_, th)| th.is_active())
333 .map(|(i, _)| thread::Id::new(execution_id, i))
334 }
335
336 pub(super) fn backtrack(&mut self, mut point: usize, thread_id: thread::Id) {
337 let schedule = loop {
338 if let Some(schedule_ref) =
339 object::Ref::from_usize(point).downcast::<Schedule>(&self.branches)
340 {
341 let schedule = schedule_ref.get_mut(&mut self.branches);
342
343 if schedule.exploring {
344 schedule.backtrack(thread_id, self.preemption_bound);
345 break schedule;
346 }
347 }
348
349 if point == 0 {
350 return;
351 }
352
353 point -= 1;
354 };
355
356 let mut curr = if let Some(curr) = schedule.prev {
357 curr
358 } else {
359 return;
360 };
361
362 if self.preemption_bound.is_some() {
363 loop {
364 if let Some(prev) = curr.get(&self.branches).prev {
367 let active_a = curr.get(&self.branches).active_thread_index();
368 let active_b = prev.get(&self.branches).active_thread_index();
369
370 if active_a != active_b && curr.get_mut(&mut self.branches).exploring {
371 curr.get_mut(&mut self.branches)
372 .backtrack(thread_id, self.preemption_bound);
373 return;
374 }
375
376 curr = prev;
377 } else {
378 if curr.get(&mut self.branches).exploring {
379 curr.get_mut(&mut self.branches)
381 .backtrack(thread_id, self.preemption_bound);
382 }
383 return;
384 }
385 }
386 }
387 }
388
389 pub(super) fn step(&mut self) -> bool {
394 self.pos = 0;
397
398 self.exploring = self.exploring_on_start;
400 self.skipping = false;
401
402 for last in (0..self.branches.len()).rev() {
408 let last = object::Ref::from_usize(last);
409
410 self.branches.truncate(last);
412
413 if let Some(schedule_ref) = last.downcast::<Schedule>(&self.branches) {
414 let schedule = schedule_ref.get_mut(&mut self.branches);
415
416 if !schedule.exploring {
417 continue;
418 }
419
420 if let Some(thread) = schedule.threads.iter_mut().find(|th| th.is_active()) {
422 *thread = Thread::Visited;
423 }
424
425 let rem = schedule
427 .threads
428 .iter_mut()
429 .find(|th| th.is_pending())
430 .map(|th| {
431 *th = Thread::Active;
432 })
433 .is_some();
434
435 if rem {
436 return true;
437 }
438 } else if let Some(load_ref) = last.downcast::<Load>(&self.branches) {
439 let load = load_ref.get_mut(&mut self.branches);
440
441 if !load.exploring {
442 continue;
443 }
444
445 load.pos += 1;
446
447 if load.pos < load.len {
448 return true;
449 }
450 } else if let Some(spurious_ref) = last.downcast::<Spurious>(&self.branches) {
451 let spurious = spurious_ref.get_mut(&mut self.branches);
452
453 if !spurious.exploring {
454 continue;
455 }
456
457 if !spurious.spur {
458 spurious.spur = true;
459 return true;
460 }
461 } else {
462 unreachable!();
463 }
464 }
465
466 false
467 }
468
469 fn last_schedule(&self) -> Option<object::Ref<Schedule>> {
470 self.branches.iter_ref::<Schedule>().rev().next()
471 }
472}
473
474impl Schedule {
475 fn active_thread_index(&self) -> Option<u8> {
477 self.threads
478 .iter()
479 .enumerate()
480 .find(|(_, th)| th.is_active())
481 .map(|(index, _)| index as u8)
482 }
483
484 fn preemptions(&self) -> u8 {
486 if self.initial_active.is_some() && self.initial_active != self.active_thread_index() {
487 return self.preemptions + 1;
488 }
489
490 self.preemptions
491 }
492
493 fn backtrack(&mut self, thread_id: thread::Id, preemption_bound: Option<u8>) {
494 assert!(self.exploring);
495
496 if let Some(bound) = preemption_bound {
497 assert!(
498 self.preemptions <= bound,
499 "[loom internal bug] actual = {}, bound = {}",
500 self.preemptions,
501 bound
502 );
503
504 if self.preemptions == bound {
505 return;
506 }
507 }
508
509 let thread_id = thread_id.as_usize();
510
511 if thread_id >= self.threads.len() {
512 return;
513 }
514
515 if self.threads[thread_id].is_enabled() {
516 self.threads[thread_id].explore();
517 } else {
518 for th in &mut self.threads {
519 th.explore();
520 }
521 }
522 }
523}
524
525impl Thread {
526 fn explore(&mut self) {
527 if *self == Thread::Skip {
528 *self = Thread::Pending;
529 }
530 }
531
532 fn is_pending(&self) -> bool {
533 *self == Thread::Pending
534 }
535
536 fn is_active(&self) -> bool {
537 *self == Thread::Active
538 }
539
540 fn is_enabled(&self) -> bool {
541 !self.is_disabled()
542 }
543
544 fn is_disabled(&self) -> bool {
545 *self == Thread::Disabled
546 }
547}