1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
use std::marker;
use std::cell::UnsafeCell;
use std::fmt;
use std::mem;
use std::ptr;
use std::ops::{Deref, DerefMut};

// For now, treat this as an arch-independent constant.
const CACHE_LINE: usize = 32;

#[cfg_attr(feature = "nightly",
           repr(simd))]
#[derive(Debug)]
struct Padding(u64, u64, u64, u64);

/// Pad `T` to the length of a cacheline.
///
/// Sometimes concurrent programming requires a piece of data to be padded out
/// to the size of a cacheline to avoid "false sharing": cachelines being
/// invalidated due to unrelated concurrent activity. Use the `CachePadded` type
/// when you want to *avoid* cache locality.
///
/// At the moment, cache lines are assumed to be 32 * sizeof(usize) on all
/// architectures.
///
/// **Warning**: the wrapped data is never dropped; move out using `ptr::read`
/// if you need to run dtors.
pub struct CachePadded<T> {
    data: UnsafeCell<[usize; CACHE_LINE]>,
    _marker: ([Padding; 0], marker::PhantomData<T>),
}

impl<T> fmt::Debug for CachePadded<T> {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "CachePadded {{ ... }}")
    }
}

unsafe impl<T: Send> Send for CachePadded<T> {}
unsafe impl<T: Sync> Sync for CachePadded<T> {}

#[cfg(not(feature = "nightly"))]
macro_rules! declare_zeros_valid {
    () => {
        /// Types for which mem::zeroed() is safe.
        ///
        /// If a type `T: ZerosValid`, then a sequence of zeros the size of `T` must be
        /// a valid member of the type `T`.
        pub unsafe trait ZerosValid {}
    }
}

#[cfg(feature = "nightly")]
macro_rules! declare_zeros_valid {
    () => {
        /// Types for which mem::zeroed() is safe.
        ///
        /// If a type `T: ZerosValid`, then a sequence of zeros the size of `T` must be
        /// a valid member of the type `T`.
        pub unsafe auto trait ZerosValid {}
    }
}

declare_zeros_valid!();

macro_rules! zeros_valid { ($( $T:ty )*) => ($(
    unsafe impl ZerosValid for $T {}
)*)}

zeros_valid!(u8 u16 u32 u64 usize);
zeros_valid!(i8 i16 i32 i64 isize);

unsafe impl ZerosValid for ::std::sync::atomic::AtomicUsize {}
unsafe impl<T> ZerosValid for ::std::sync::atomic::AtomicPtr<T> {}

impl<T: ZerosValid> CachePadded<T> {
    /// A const fn equivalent to mem::zeroed().
    #[cfg(not(feature = "nightly"))]
    pub fn zeroed() -> CachePadded<T> {
        CachePadded {
            data: UnsafeCell::new(([0; CACHE_LINE])),
            _marker: ([], marker::PhantomData),
        }
    }

    /// A const fn equivalent to mem::zeroed().
    #[cfg(feature = "nightly")]
    pub const fn zeroed() -> CachePadded<T> {
        CachePadded {
            data: UnsafeCell::new(([0; CACHE_LINE])),
            _marker: ([], marker::PhantomData),
        }
    }
}

#[inline]
/// Assert that the size and alignment of `T` are consistent with `CachePadded<T>`.
fn assert_valid<T>() {
    assert!(mem::size_of::<T>() <= mem::size_of::<CachePadded<T>>());
    assert!(mem::align_of::<T>() <= mem::align_of::<CachePadded<T>>());
}

impl<T> CachePadded<T> {
    /// Wrap `t` with cacheline padding.
    ///
    /// **Warning**: the wrapped data is never dropped; move out using
    /// `ptr:read` if you need to run dtors.
    pub fn new(t: T) -> CachePadded<T> {
        assert_valid::<T>();
        let ret = CachePadded {
            data: UnsafeCell::new(([0; CACHE_LINE])),
            _marker: ([], marker::PhantomData),
        };
        unsafe {
            let p: *mut T = mem::transmute(&ret.data);
            ptr::write(p, t);
        }
        ret
    }
}

impl<T> Deref for CachePadded<T> {
    type Target = T;
    fn deref(&self) -> &T {
        assert_valid::<T>();
        unsafe { mem::transmute(&self.data) }
    }
}

impl<T> DerefMut for CachePadded<T> {
    fn deref_mut(&mut self) -> &mut T {
        assert_valid::<T>();
        unsafe { mem::transmute(&mut self.data) }
    }
}

// FIXME: support Drop by pulling out a version usable for statics
/*
impl<T> Drop for CachePadded<T> {
    fn drop(&mut self) {
        assert_valid::<T>();
        let p: *mut T = mem::transmute(&self.data);
        mem::drop(ptr::read(p));
    }
}
*/

#[cfg(test)]
mod test {
    use super::*;

    #[test]
    fn cache_padded_store_u64() {
        let x: CachePadded<u64> = CachePadded::new(17);
        assert_eq!(*x, 17);
    }

    #[test]
    fn cache_padded_store_pair() {
        let x: CachePadded<(u64, u64)> = CachePadded::new((17, 37));
        assert_eq!(x.0, 17);
        assert_eq!(x.1, 37);
    }
}