1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::{AtomicUsize, Ordering};
use atomic_prim_traits::AtomicInt;
use primitive_traits::*;

// These are currently broken. We'll just not incude them for now so
// we can get the 0.1.0 release out
//
// #[cfg(feature="async")]
// mod future;
// #[cfg(feature="async")]
// pub use future::*;

/// Can happen when we try to take a read lease.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Blocked {
    /// There are too many readers, try again in a moment.
    Readers,
    /// There is a writer. Maybe it won't be just a moment, who knows?
    Writer,
    /// We were beaten by another thread in the CAS
    LostRace,
}

/// An RWLock, but:
/// * Choose your atomic unsigned integer for storage:
///   * We will steal the high bit for the writer.
///   * We will count readers on the remaining bits.
/// * Bring your own synchronisation primitive:
///   * No looping
/// * Writers wait for a lack of readers before assuming Write access.
#[derive(Debug)]
pub struct RWLease<T, A=AtomicUsize>
where A: AtomicInt, A::Prim: AddSign {
    pub(crate) atomic: A,
    pub(crate) value: UnsafeCell<T>,
}

impl<T, A> RWLease<T, A>
where A: AtomicInt, A::Prim: AddSign {

    pub fn new(value: T) -> RWLease<T, A> {
        RWLease { atomic: A::default(), value: UnsafeCell::new(value) }
    }

    #[cfg(test)]
    pub(crate) fn new_with_state(state: A::Prim, value: T) -> RWLease<T, A> {
        RWLease { atomic: AtomicInt::new(state), value: UnsafeCell::new(value) }
    }

    /// Attempt to take a read lease by CAS or explain why we couldn't.
    pub fn read(&self) -> Result<ReadGuard<T, A>, Blocked> {
        self.poll_read()?;
        Ok(ReadGuard::new(&self))
    }

    pub fn write<'a>(&'a self) -> Result<DrainGuard<'a, T, A>, Blocked> {
        self.poll_write_mark().map(|ready| DrainGuard::new(&self, ready))
    }

    pub fn into_inner(self) -> T {
        self.value.into_inner()
    }

    // pub crate

    pub(crate) fn poll_read(&self) -> Result<(), Blocked> {
        let mask = <<A::Prim as AddSign>::Signed as Integer>::MIN.drop_sign();
        let current = self.atomic.load(Ordering::SeqCst);
        if current < <A::Prim as Integer>::MAX { // avoid overflow on the next line
            let new = current + <A::Prim as Integer>::ONE;
            if new < mask {
                // Hot path, if we assume writes and read saturation are
                // rare. I would like to remove the CAS from here, but
                // until we have saturating addition or more complex
                // atomic ops, that doesn't seem possible.
                if self.atomic.compare_exchange_weak(
                    current, new, Ordering::SeqCst, Ordering::SeqCst
                ).is_ok() {
                    Ok(())
                } else {
                    Err(Blocked::LostRace)
                }
            } else if (current & mask) != mask {
                Err(Blocked::Readers)
            } else {
                Err(Blocked::Writer)
            }
        } else {
            Err(Blocked::Writer)
        }
    }

    pub(crate) fn poll_write_mark(&self) -> Result<bool, Blocked> {
        let mask = <<A::Prim as AddSign>::Signed as Integer>::MIN.drop_sign();
        let ret = self.atomic.fetch_or(mask, Ordering::SeqCst);

        if ret == <A::Prim as Integer>::ZERO { Ok(true) } // No readers
        else if (ret & mask) != mask { Ok(false) } // We'll have to wait for some readers
        else { Err(Blocked::Writer) }
    }

    pub(crate) fn poll_write_upgrade(&self) -> bool {
        let drained = <<A::Prim as AddSign>::Signed as Integer>::MIN.drop_sign();
        drained == self.atomic.load(Ordering::SeqCst)
    }

    pub(crate) fn done_reading(&self) -> <A as AtomicInt>::Prim {
        let one = <<A as AtomicInt>::Prim as Integer>::ONE;
        self.atomic.fetch_sub(one, Ordering::SeqCst)
    }

    pub(crate) fn done_writing(&self) {
        let mask = !<<A::Prim as AddSign>::Signed as Integer>::MIN.drop_sign();
        self.atomic.fetch_and(mask, Ordering::SeqCst);
    }

}

unsafe impl<T: Send> Send for RWLease<T> {}
unsafe impl<T: Sync> Sync for RWLease<T> {}

/// The DrainGuard represents waiting for the readers to release their
/// leases so we can take a write lease.
#[derive(Debug)]
pub struct DrainGuard<'a, T, A>
where A: 'a + AtomicInt, A::Prim: AddSign, T: 'a {
    pub(crate) lease: Option<&'a RWLease<T, A>>,
    pub(crate) ready: bool,
}

impl<'a, T, A> DrainGuard<'a, T, A>
where A: 'a + AtomicInt, A::Prim: AddSign, T: 'a {

    pub(crate) fn new(lease: &'a RWLease<T, A>, ready: bool) -> DrainGuard<'a, T, A> {
        DrainGuard { lease: Some(lease), ready }
    }

    /// Attempts to upgrade to a WriteGuard. If readers are still
    /// locking it, returns self so you can try again
    pub fn upgrade(mut self) -> Result<WriteGuard<'a, T, A>, DrainGuard<'a, T, A>> {
        if self.ready {
            return self.lease.take().map(|lease| WriteGuard::new(lease)).ok_or(self);
        }
        if let Some(lease) = self.lease.take() {
            if lease.poll_write_upgrade() {
                return Ok(WriteGuard::new(lease));
            }
        }
        Err(self)
    }
}

impl<'a, T, A> Drop for DrainGuard<'a, T, A>
where A: 'a + AtomicInt, A::Prim: AddSign, T: 'a {
    fn drop(&mut self) {
        if let Some(lease) = self.lease.take() {
            let mask = !<<A::Prim as AddSign>::Signed as Integer>::MIN.drop_sign();
            lease.atomic.fetch_and(mask, Ordering::SeqCst);
        }
    }
}

/// This guard signifies read access. When it drops, it will release the read lock.
#[derive(Debug)]
pub struct ReadGuard<'a, T, A>
where A: 'a + AtomicInt, A::Prim: AddSign, T: 'a {
    pub(crate) lease: Option<&'a RWLease<T, A>>, 
}

impl<'a, T, A: AtomicInt> ReadGuard<'a, T, A>
where A: 'a + AtomicInt, A::Prim: AddSign, T: 'a {
    pub(crate) fn new(lease: &'a RWLease<T, A>) -> ReadGuard<'a, T, A> {
        ReadGuard { lease: Some(lease) }
    }
}

impl<'a, T, A> Deref for ReadGuard<'a, T, A>
where A: 'a + AtomicInt, A::Prim: AddSign, T: 'a {
    type Target = T;
    fn deref(&self) -> &T {
        unsafe { &*self.lease.unwrap().value.get() }
    }
}

impl<'a, T, A> Drop for ReadGuard<'a, T, A>
where A: 'a + AtomicInt, A::Prim: AddSign, T: 'a {
    fn drop(&mut self) {
        if let Some(lease) = self.lease.take() {
            lease.done_reading();
        }
    }
}

/// This guard signifies write access. When it drops, it will release the write lock.
#[derive(Debug)]
pub struct WriteGuard<'a, T, A>
where A: 'a + AtomicInt, A::Prim: AddSign, T: 'a {
    pub(crate) lease: &'a RWLease<T, A>,
}

impl<'a, T, A> WriteGuard<'a, T, A>
where A: 'a + AtomicInt, A::Prim: AddSign, T: 'a {
    fn new(lease: &'a RWLease<T, A>) -> WriteGuard<'a, T, A> {
        WriteGuard { lease }
    }
}

impl<'a, T, A> Deref for WriteGuard<'a, T, A>
where A: 'a + AtomicInt, A::Prim: AddSign, T: 'a {
    type Target = T;
    fn deref(&self) -> &T {
        unsafe { &*self.lease.value.get() }
    }
}

impl<'a, T, A> DerefMut for WriteGuard<'a, T, A>
where A: 'a + AtomicInt, A::Prim: AddSign, T: 'a {
    fn deref_mut(&mut self) -> &mut T {
        unsafe { &mut *self.lease.value.get() }
    }
}

impl<'a, T, A> Drop for WriteGuard<'a, T, A>
where A: 'a + AtomicInt, A::Prim: AddSign, T: 'a {
    fn drop(&mut self) {
        self.lease.done_writing();
    }
}

#[cfg(test)]
mod tests {
    use crate::*;
    use std::sync::atomic::AtomicU8;

    #[test]
    fn solo_reading() {
        let rw: RWLease<usize, AtomicUsize> = RWLease::new(123);
        let r = rw.read().expect("read guard");
        assert_eq!(*r, 123);
    }

    #[test]
    fn read_with_writer() {
        // maximum readers, writer bit
        let rw: RWLease<u8, AtomicU8> = RWLease::new_with_state(128, 123);
        assert_eq!(rw.read().unwrap_err(), Blocked::Writer);
    }

    #[test]
    fn read_all_ones() {
        // maximum readers, writer bit
        let rw: RWLease<u8, AtomicU8> = RWLease::new_with_state(255, 123);
        assert_eq!(rw.read().unwrap_err(), Blocked::Writer);
    }

    #[test]
    fn read_with_max_readers() {
        let rw: RWLease<u8, AtomicU8> = RWLease::new_with_state(127, 123);
        assert_eq!(rw.read().unwrap_err(), Blocked::Readers);
    }

    #[test]
    fn solo_writing() {
        let rw: RWLease<usize> = RWLease::new(123);
        {
            let d = rw.write().expect("drain guard");
            let mut w = d.upgrade().expect("write guard");
            assert_eq!(*w, 123);
            *w = 124;
            assert_eq!(*w, 124);
            assert_eq!(rw.read().unwrap_err(), Blocked::Writer);
        }
        let r = rw.read().expect("read guard");
        assert_eq!(*r, 124);
    }

}