1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
//! Utility of project
extern crate time;

use std::ops::{Deref, DerefMut};
use std::sync::atomic;

/// Wrap struct into WrappedAlign64Type to make it 64bytes aligned.
#[repr(align(64))]
pub struct WrappedAlign64Type<T>(pub T);

impl<T> Default for WrappedAlign64Type<T>
where
    T: Default,
{
    fn default() -> Self {
        WrappedAlign64Type(T::default())
    }
}

impl<T> Deref for WrappedAlign64Type<T> {
    type Target = T;

    fn deref(&self) -> &<Self as Deref>::Target {
        &self.0
    }
}

impl<T> DerefMut for WrappedAlign64Type<T> {
    fn deref_mut(&mut self) -> &mut <Self as Deref>::Target {
        &mut self.0
    }
}

impl<T> From<T> for WrappedAlign64Type<T> {
    fn from(x: T) -> Self {
        WrappedAlign64Type(x)
    }
}

impl<T> WrappedAlign64Type<T> {
    #[inline]
    pub fn as_ptr(&self) -> *const T {
        &self.0
    }

    #[inline]
    pub fn as_mut_ptr(&self) -> *mut T {
        self.as_ptr() as *mut _
    }

    #[inline]
    pub fn get(&self) -> &T {
        &self.0
    }

    #[inline]
    pub fn get_mut(&mut self) -> &mut T {
        &mut self.0
    }
}

/// Return current unix timestamp(microsecond).
pub fn get_cur_microseconds_time() -> i64 {
    (time::precise_time_ns() / 1_000) as i64
}

#[cfg(any(target_arch = "x86_64"))]
mod atomic_x86 {
    use std::ops::Add;
    use std::intrinsics;
    use std::mem;
    use std::cell::Cell;

    /// Auto increase global thread id.
    pub static mut GLOBAL_THREAD_ID: Cell<i64> = Cell::new(0);

    /// Return an unique ID for current thread.
    pub fn get_thread_id() -> i64 {
        thread_local! {static THREAD_ID: Cell<i64> = Cell::new(-1);};
        THREAD_ID.with(|tid| {
            if -1 == tid.get() {
                tid.set(unsafe { sync_fetch_and_add(GLOBAL_THREAD_ID.get_mut(), 1) });
            }
            tid.get()
        })
    }

    /// Like __sync_add_and_fetch in C.
    pub unsafe fn sync_add_and_fetch<T>(dst: *mut T, src: T) -> T
    where
        T: Add<Output = T> + Copy,
    {
        intrinsics::atomic_xadd::<T>(dst, src) + src
    }

    /// Like __sync_fetch_and_add in C.
    pub unsafe fn sync_fetch_and_add<T>(dst: *mut T, src: T) -> T {
        intrinsics::atomic_xadd::<T>(dst, src)
    }

    /// Atomic load raw pointer.
    pub unsafe fn atomic_load_raw_ptr<T>(ptr: *const *mut T) -> *mut T {
        intrinsics::atomic_load(ptr as *const usize) as *mut T
    }

    /// Atomic CAS raw pointer.
    pub unsafe fn atomic_cxchg_raw_ptr<T>(
        ptr: *mut *mut T,
        old: *mut T,
        src: *mut T,
    ) -> (*mut T, bool) {
        mem::transmute(intrinsics::atomic_cxchg(
            ptr as *mut usize,
            old as usize,
            src as usize,
        ))
    }
}

pub use self::atomic_x86::*;

/// Yield current thread.
#[inline]
pub fn pause() {
    atomic::spin_loop_hint();
}