1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
use crate::rwlock::{Locks, MutexGuardWrapper};
use lazy_static::lazy_static;
#[cfg(feature = "logging")]
use log::debug;
use parking_lot::{Mutex, RwLock};
use std::{
    cell::RefCell,
    collections::HashMap,
    ops::{Deref, DerefMut},
    sync::{atomic::AtomicU32, Arc},
    time::{Duration, Instant},
};

pub(crate) struct UniqueReentrantMutex {
    locks: Locks,

    // Only actually used for tests
    #[allow(dead_code)]
    pub(crate) id: u32,
}

impl UniqueReentrantMutex {
    pub(crate) fn lock(&self) -> MutexGuardWrapper {
        self.locks.serial()
    }

    pub(crate) fn start_parallel(&self) {
        self.locks.start_parallel();
    }

    pub(crate) fn end_parallel(&self) {
        self.locks.end_parallel();
    }

    #[cfg(test)]
    pub fn parallel_count(&self) -> u32 {
        self.locks.parallel_count()
    }

    #[cfg(test)]
    pub fn is_locked(&self) -> bool {
        self.locks.is_locked()
    }
}

lazy_static! {
    pub(crate) static ref LOCKS: Arc<RwLock<HashMap<String, UniqueReentrantMutex>>> =
        Arc::new(RwLock::new(HashMap::new()));
    static ref MAX_WAIT: Arc<Mutex<RefCell<Duration>>> =
        Arc::new(Mutex::new(RefCell::new(Duration::from_secs(60))));
    static ref MUTEX_ID: Arc<AtomicU32> = Arc::new(AtomicU32::new(1));
}

impl Default for UniqueReentrantMutex {
    fn default() -> Self {
        Self {
            locks: Locks::new(),
            id: MUTEX_ID.fetch_add(1, std::sync::atomic::Ordering::SeqCst),
        }
    }
}

/// Sets the maximum amount of time the serial locks will wait to unlock.
/// By default, this is set to 60 seconds, which is almost always much longer than is needed.
/// This is deliberately set high to try and avoid situations where we accidentally hit the limits
/// but is set at all so we can timeout rather than hanging forever.
///
/// However, sometimes if you've got a *lot* of serial tests it might theoretically not be enough,
/// hence this method.
pub fn set_max_wait(max_wait: Duration) {
    MAX_WAIT.lock().replace(max_wait);
}

pub(crate) fn wait_duration() -> Duration {
    *MAX_WAIT.lock().borrow()
}

pub(crate) fn check_new_key(name: &str) {
    let start = Instant::now();
    loop {
        #[cfg(feature = "logging")]
        {
            let duration = Instant::now() - start;
            debug!("Waiting for '{}' {:?}", name, duration);
        }
        // Check if a new key is needed. Just need a read lock, which can be done in sync with everyone else
        let try_unlock = LOCKS.try_read_recursive_for(Duration::from_secs(1));
        if let Some(unlock) = try_unlock {
            if unlock.deref().contains_key(name) {
                return;
            }
            drop(unlock); // so that we don't hold the read lock and so the writer can maybe succeed
        } else {
            continue; // wasn't able to get read lock
        }

        // This is the rare path, which avoids the multi-writer situation mostly
        let try_lock = LOCKS.try_write_for(Duration::from_secs(1));

        if let Some(mut lock) = try_lock {
            lock.deref_mut().entry(name.to_string()).or_default();
            return;
        }

        // If the try_lock fails, then go around the loop again
        // Odds are another test was also locking on the write and has now written the key

        let duration = Instant::now() - start;
        if duration >= wait_duration() {
            panic!("check_new_key timed out!");
        }
    }
}