use core::marker::PhantomData;
use core::ops;
pub mod delay;
pub mod hrtimer;
pub const NSEC_PER_USEC: i64 = bindings::NSEC_PER_USEC as i64;
pub const NSEC_PER_MSEC: i64 = bindings::NSEC_PER_MSEC as i64;
pub const NSEC_PER_SEC: i64 = bindings::NSEC_PER_SEC as i64;
pub type Jiffies = crate::ffi::c_ulong;
pub type Msecs = crate::ffi::c_uint;
#[inline]
pub fn msecs_to_jiffies(msecs: Msecs) -> Jiffies {
unsafe { bindings::__msecs_to_jiffies(msecs) }
}
pub unsafe trait ClockSource {
const ID: bindings::clockid_t;
fn ktime_get() -> bindings::ktime_t;
}
pub struct Monotonic;
unsafe impl ClockSource for Monotonic {
const ID: bindings::clockid_t = bindings::CLOCK_MONOTONIC as bindings::clockid_t;
fn ktime_get() -> bindings::ktime_t {
unsafe { bindings::ktime_get() }
}
}
pub struct RealTime;
unsafe impl ClockSource for RealTime {
const ID: bindings::clockid_t = bindings::CLOCK_REALTIME as bindings::clockid_t;
fn ktime_get() -> bindings::ktime_t {
unsafe { bindings::ktime_get_real() }
}
}
pub struct BootTime;
unsafe impl ClockSource for BootTime {
const ID: bindings::clockid_t = bindings::CLOCK_BOOTTIME as bindings::clockid_t;
fn ktime_get() -> bindings::ktime_t {
unsafe { bindings::ktime_get_boottime() }
}
}
pub struct Tai;
unsafe impl ClockSource for Tai {
const ID: bindings::clockid_t = bindings::CLOCK_TAI as bindings::clockid_t;
fn ktime_get() -> bindings::ktime_t {
unsafe { bindings::ktime_get_clocktai() }
}
}
#[repr(transparent)]
#[derive(PartialEq, PartialOrd, Eq, Ord)]
pub struct Instant<C: ClockSource> {
inner: bindings::ktime_t,
_c: PhantomData<C>,
}
impl<C: ClockSource> Clone for Instant<C> {
fn clone(&self) -> Self {
*self
}
}
impl<C: ClockSource> Copy for Instant<C> {}
impl<C: ClockSource> Instant<C> {
#[inline]
pub fn now() -> Self {
Self {
inner: C::ktime_get(),
_c: PhantomData,
}
}
#[inline]
pub fn elapsed(&self) -> Delta {
Self::now() - *self
}
#[inline]
pub(crate) fn as_nanos(&self) -> i64 {
self.inner
}
#[inline]
pub(crate) unsafe fn from_ktime(ktime: bindings::ktime_t) -> Self {
debug_assert!(ktime >= 0);
Self {
inner: ktime,
_c: PhantomData,
}
}
}
impl<C: ClockSource> ops::Sub for Instant<C> {
type Output = Delta;
#[inline]
fn sub(self, other: Instant<C>) -> Delta {
Delta {
nanos: self.inner - other.inner,
}
}
}
impl<T: ClockSource> ops::Add<Delta> for Instant<T> {
type Output = Self;
#[inline]
fn add(self, rhs: Delta) -> Self::Output {
let res = self.inner + rhs.nanos;
#[cfg(CONFIG_RUST_OVERFLOW_CHECKS)]
assert!(res >= 0);
Self {
inner: res,
_c: PhantomData,
}
}
}
impl<T: ClockSource> ops::Sub<Delta> for Instant<T> {
type Output = Self;
#[inline]
fn sub(self, rhs: Delta) -> Self::Output {
let res = self.inner - rhs.nanos;
#[cfg(CONFIG_RUST_OVERFLOW_CHECKS)]
assert!(res >= 0);
Self {
inner: res,
_c: PhantomData,
}
}
}
#[derive(Copy, Clone, PartialEq, PartialOrd, Eq, Ord, Debug)]
pub struct Delta {
nanos: i64,
}
impl ops::Add for Delta {
type Output = Self;
#[inline]
fn add(self, rhs: Self) -> Self {
Self {
nanos: self.nanos + rhs.nanos,
}
}
}
impl ops::AddAssign for Delta {
#[inline]
fn add_assign(&mut self, rhs: Self) {
self.nanos += rhs.nanos;
}
}
impl ops::Sub for Delta {
type Output = Self;
#[inline]
fn sub(self, rhs: Self) -> Self::Output {
Self {
nanos: self.nanos - rhs.nanos,
}
}
}
impl ops::SubAssign for Delta {
#[inline]
fn sub_assign(&mut self, rhs: Self) {
self.nanos -= rhs.nanos;
}
}
impl ops::Mul<i64> for Delta {
type Output = Self;
#[inline]
fn mul(self, rhs: i64) -> Self::Output {
Self {
nanos: self.nanos * rhs,
}
}
}
impl ops::MulAssign<i64> for Delta {
#[inline]
fn mul_assign(&mut self, rhs: i64) {
self.nanos *= rhs;
}
}
impl ops::Div for Delta {
type Output = i64;
#[inline]
fn div(self, rhs: Self) -> Self::Output {
#[cfg(CONFIG_64BIT)]
{
self.nanos / rhs.nanos
}
#[cfg(not(CONFIG_64BIT))]
{
unsafe { bindings::div64_s64(self.nanos, rhs.nanos) }
}
}
}
impl Delta {
pub const ZERO: Self = Self { nanos: 0 };
#[inline]
pub const fn from_nanos(nanos: i64) -> Self {
Self { nanos }
}
#[inline]
pub const fn from_micros(micros: i64) -> Self {
Self {
nanos: micros.saturating_mul(NSEC_PER_USEC),
}
}
#[inline]
pub const fn from_millis(millis: i64) -> Self {
Self {
nanos: millis.saturating_mul(NSEC_PER_MSEC),
}
}
#[inline]
pub const fn from_secs(secs: i64) -> Self {
Self {
nanos: secs.saturating_mul(NSEC_PER_SEC),
}
}
#[inline]
pub fn is_zero(self) -> bool {
self.as_nanos() == 0
}
#[inline]
pub fn is_negative(self) -> bool {
self.as_nanos() < 0
}
#[inline]
pub const fn as_nanos(self) -> i64 {
self.nanos
}
#[inline]
pub fn as_micros_ceil(self) -> i64 {
#[cfg(CONFIG_64BIT)]
{
self.as_nanos().saturating_add(NSEC_PER_USEC - 1) / NSEC_PER_USEC
}
#[cfg(not(CONFIG_64BIT))]
unsafe {
bindings::ktime_to_us(self.as_nanos().saturating_add(NSEC_PER_USEC - 1))
}
}
#[inline]
pub fn as_millis(self) -> i64 {
#[cfg(CONFIG_64BIT)]
{
self.as_nanos() / NSEC_PER_MSEC
}
#[cfg(not(CONFIG_64BIT))]
unsafe {
bindings::ktime_to_ms(self.as_nanos())
}
}
#[inline]
pub fn rem_nanos(self, dividend: i32) -> Self {
#[cfg(CONFIG_64BIT)]
{
Self {
nanos: self.as_nanos() % i64::from(dividend),
}
}
#[cfg(not(CONFIG_64BIT))]
{
let mut rem = 0;
unsafe { bindings::div_s64_rem(self.as_nanos(), dividend, &mut rem) };
Self {
nanos: i64::from(rem),
}
}
}
}