1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
use core::time::Duration;
pub(crate) trait Clamp {
fn clamp(self, lower: Self, upper: Self) -> Self;
}
impl<C: PartialOrd> Clamp for C {
fn clamp(self, lower: Self, upper: Self) -> Self {
if self < lower {
lower
} else if upper < self {
upper
} else {
self
}
}
}
pub(crate) trait DurationHelpers {
fn from_secs_f32_2(secs: f32) -> Self;
fn as_secs_f32_2(&self) -> f32;
}
const NANOS_PER_SEC: u32 = 1_000_000_000;
impl DurationHelpers for Duration {
fn from_secs_f32_2(secs: f32) -> Self {
let nanos = secs * (NANOS_PER_SEC as f32);
assert!(nanos.is_finite());
let nanos = nanos as u128;
Duration::new(
(nanos / u128::from(NANOS_PER_SEC)) as u64,
(nanos % u128::from(NANOS_PER_SEC)) as u32,
)
}
fn as_secs_f32_2(&self) -> f32 {
(self.as_secs() as f32)
+ (self.subsec_nanos() as f32) / (NANOS_PER_SEC as f32)
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct CummulativeSteps {
steps_per_unit: f32,
steps: f32,
}
impl CummulativeSteps {
pub const fn new(steps_per_unit: f32) -> CummulativeSteps {
CummulativeSteps {
steps: 0.0,
steps_per_unit,
}
}
pub const fn real_location(&self) -> f32 { self.steps }
pub const fn steps_per_unit(&self) -> f32 { self.steps_per_unit }
pub const fn with_steps_per_unit(
&self,
steps_per_unit: f32,
) -> CummulativeSteps {
CummulativeSteps {
steps_per_unit,
steps: self.steps,
}
}
pub fn move_by(&mut self, delta: f32) -> i64 {
let previous_steps = self.steps.round();
self.steps += delta * self.steps_per_unit;
let rounded_steps = (self.steps - previous_steps).round();
rounded_steps as i64
}
}