1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
// MIT/Apache2 License

//! Implementation of a basic spin-based RwLock

#![no_std]
#![warn(clippy::pedantic)]

use core::sync::atomic::{spin_loop_hint, AtomicUsize, Ordering};
use lock_api::{GuardSend, RawRwLock, RawRwLockDowngrade, RawRwLockUpgrade, RwLock as LARwLock, RwLockReadGuard as LARwLockReadGuard, RwLockWriteGuard as LARwLockWriteGuard, RwLockUpgradableReadGuard as LARwLockUpgradableReadGuard};

/// Raw spinlock rwlock, wrapped in the lock_api RwLock struct.
pub struct RawRwSpinlock(AtomicUsize);

// flags stored in the usize struct
const READER: usize = 1 << 2;
const UPGRADED: usize = 1 << 1;
const WRITER: usize = 1 << 0;

unsafe impl RawRwLock for RawRwSpinlock {
    const INIT: RawRwSpinlock = RawRwSpinlock(AtomicUsize::new(0));

    type GuardMarker = GuardSend;

    fn lock_shared(&self) {
        while !self.try_lock_shared() {
            spin_loop_hint()
        }
    }

    fn try_lock_shared(&self) -> bool {
        let value = self.0.fetch_add(READER, Ordering::Acquire);

        if value & (WRITER | UPGRADED) != 0 {
            self.0.fetch_sub(READER, Ordering::Relaxed);
            false
        } else {
            true
        }
    }

    fn try_lock_exclusive(&self) -> bool {
        self.0
            .compare_exchange(0, WRITER, Ordering::Acquire, Ordering::Relaxed)
            .is_ok()
    }

    fn lock_exclusive(&self) {
        loop {
            match self
                .0
                .compare_exchange_weak(0, WRITER, Ordering::Acquire, Ordering::Relaxed)
            {
                Ok(_) => return,
                Err(_) => spin_loop_hint(),
            }
        }
    }

    unsafe fn unlock_shared(&self) {
        self.0.fetch_sub(READER, Ordering::Release);
    }

    unsafe fn unlock_exclusive(&self) {
        self.0.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
    }
}

unsafe impl RawRwLockUpgrade for RawRwSpinlock {
    fn lock_upgradable(&self) {
        while !self.try_lock_upgradable() {
            spin_loop_hint()
        }
    }

    fn try_lock_upgradable(&self) -> bool {
        self.0.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0
    }

    unsafe fn try_upgrade(&self) -> bool {
        self.0
            .compare_exchange(UPGRADED, WRITER, Ordering::Acquire, Ordering::Relaxed)
            .is_ok()
    }

    unsafe fn upgrade(&self) {
        loop {
            match self.0.compare_exchange_weak(
                UPGRADED,
                WRITER,
                Ordering::Acquire,
                Ordering::Relaxed,
            ) {
                Ok(_) => return,
                Err(_) => spin_loop_hint(),
            }
        }
    }

    unsafe fn unlock_upgradable(&self) {
        self.0.fetch_sub(UPGRADED, Ordering::AcqRel);
    }
}

unsafe impl RawRwLockDowngrade for RawRwSpinlock {
    unsafe fn downgrade(&self) {
        self.0.fetch_add(READER, Ordering::Acquire);
        self.unlock_exclusive();
    }
}

/// A read-write lock that uses a spinlock internally.
pub type RwLock<T> = LARwLock<RawRwSpinlock, T>;
/// A read guard for the read-write lock.
pub type RwLockReadGuard<'a, T> = LARwLockReadGuard<'a, RawRwSpinlock, T>;
/// A write guard fo the read-write lock.
pub type RwLockWriteGuard<'a, T> = LARwLockWriteGuard<'a, RawRwSpinlock, T>;
/// An upgradable read guard for the read-write lock.
pub type RwLockUpgradableReadGuard<'a, T> = LARwLockUpgradableReadGuard<'a, RawRwSpinlock, T>; 

#[test]
fn basics() {
    let rwlock = RwLock::new(8);
    assert_eq!(*rwlock.read(), 8);
    *rwlock.write() = 7;
    assert_eq!(*rwlock.read(), 7);
}