vminer_core/
addr.rs

1use crate::mask_range;
2
3use super::mask;
4use core::ops::Sub;
5use core::ops::SubAssign;
6use core::{
7    fmt,
8    ops::{Add, AddAssign},
9};
10
11#[derive(
12    Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, bytemuck::Pod, bytemuck::Zeroable,
13)]
14#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
15#[repr(transparent)]
16pub struct PhysicalAddress(pub u64);
17
18impl fmt::LowerHex for PhysicalAddress {
19    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
20        self.0.fmt(f)
21    }
22}
23
24impl fmt::UpperHex for PhysicalAddress {
25    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
26        self.0.fmt(f)
27    }
28}
29
30impl Add<u64> for PhysicalAddress {
31    type Output = PhysicalAddress;
32
33    #[inline]
34    fn add(self, rhs: u64) -> Self::Output {
35        Self(self.0 + rhs)
36    }
37}
38
39impl AddAssign<u64> for PhysicalAddress {
40    #[inline]
41    fn add_assign(&mut self, rhs: u64) {
42        self.0 += rhs;
43    }
44}
45
46impl Add<i64> for PhysicalAddress {
47    type Output = Self;
48
49    #[inline]
50    #[track_caller]
51    fn add(self, rhs: i64) -> Self {
52        let (res, o) = self.0.overflowing_add_signed(rhs);
53
54        if cfg!(debug_assertions) && o {
55            panic!("attempt to add with overflow");
56        }
57
58        Self(res)
59    }
60}
61
62impl AddAssign<i64> for PhysicalAddress {
63    #[inline]
64    fn add_assign(&mut self, rhs: i64) {
65        *self = *self + rhs;
66    }
67}
68
69impl Sub<PhysicalAddress> for PhysicalAddress {
70    type Output = i64;
71
72    #[inline]
73    fn sub(self, rhs: PhysicalAddress) -> Self::Output {
74        self.0.wrapping_sub(rhs.0) as i64
75    }
76}
77
78impl Sub<u64> for PhysicalAddress {
79    type Output = PhysicalAddress;
80
81    #[inline]
82    fn sub(self, rhs: u64) -> Self::Output {
83        Self(self.0 - rhs)
84    }
85}
86
87#[derive(
88    Debug,
89    Default,
90    Clone,
91    Copy,
92    Hash,
93    PartialEq,
94    Eq,
95    PartialOrd,
96    Ord,
97    bytemuck::Pod,
98    bytemuck::Zeroable,
99)]
100#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
101#[repr(transparent)]
102pub struct VirtualAddress(pub u64);
103
104impl VirtualAddress {
105    #[inline]
106    pub const fn is_null(self) -> bool {
107        self.0 == 0
108    }
109
110    #[inline]
111    pub const fn is_kernel(self) -> bool {
112        (self.0 as i64) < 0
113    }
114
115    #[inline]
116    pub const fn pml4e(self) -> u64 {
117        (self.0 >> 39) & mask(9)
118    }
119
120    #[inline]
121    pub const fn pdpe(self) -> u64 {
122        (self.0 >> 30) & mask(9)
123    }
124
125    #[inline]
126    pub const fn pde(self) -> u64 {
127        (self.0 >> 21) & mask(9)
128    }
129
130    #[inline]
131    pub const fn pte(self) -> u64 {
132        (self.0 >> 12) & mask(9)
133    }
134
135    /// Offset for normal pages (4Ko)
136    #[inline]
137    pub const fn page_offset(self) -> u64 {
138        self.0 & mask(12)
139    }
140
141    /// Offset for large pages (2Mo)
142    #[inline]
143    pub const fn large_page_offset(self) -> u64 {
144        self.0 & mask(21)
145    }
146
147    /// Offset for huge pages (1Go)
148    #[inline]
149    pub const fn huge_page_offset(self) -> u64 {
150        self.0 & mask(30)
151    }
152}
153
154impl Add<u64> for VirtualAddress {
155    type Output = VirtualAddress;
156
157    #[inline]
158    fn add(self, rhs: u64) -> Self::Output {
159        Self(self.0 + rhs)
160    }
161}
162
163impl Add<i32> for VirtualAddress {
164    type Output = VirtualAddress;
165
166    #[inline]
167    fn add(self, rhs: i32) -> Self::Output {
168        self + rhs as i64
169    }
170}
171
172impl Add<u32> for VirtualAddress {
173    type Output = VirtualAddress;
174
175    #[inline]
176    fn add(self, rhs: u32) -> Self::Output {
177        Self(self.0 + rhs as u64)
178    }
179}
180
181impl AddAssign<u64> for VirtualAddress {
182    #[inline]
183    fn add_assign(&mut self, rhs: u64) {
184        self.0 += rhs;
185    }
186}
187
188impl Add<i64> for VirtualAddress {
189    type Output = VirtualAddress;
190
191    #[inline]
192    #[track_caller]
193    fn add(self, rhs: i64) -> Self::Output {
194        let (res, o) = self.0.overflowing_add_signed(rhs);
195
196        if cfg!(debug_assertions) && o {
197            panic!("attempt to add with overflow");
198        }
199
200        Self(res)
201    }
202}
203
204impl Sub<VirtualAddress> for VirtualAddress {
205    type Output = i64;
206
207    #[inline]
208    fn sub(self, rhs: VirtualAddress) -> i64 {
209        self.0.wrapping_sub(rhs.0) as i64
210    }
211}
212
213impl Sub<u64> for VirtualAddress {
214    type Output = Self;
215
216    #[inline]
217    #[track_caller]
218    fn sub(self, rhs: u64) -> Self {
219        Self(self.0 - rhs)
220    }
221}
222
223impl SubAssign<u64> for VirtualAddress {
224    #[inline]
225    #[track_caller]
226    fn sub_assign(&mut self, rhs: u64) {
227        self.0 -= rhs;
228    }
229}
230
231impl fmt::LowerHex for VirtualAddress {
232    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
233        self.0.fmt(f)
234    }
235}
236
237impl fmt::UpperHex for VirtualAddress {
238    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
239        self.0.fmt(f)
240    }
241}
242
243#[derive(Clone, Copy, Debug, bytemuck::Pod, bytemuck::Zeroable)]
244#[repr(transparent)]
245pub struct MmuEntry(pub u64);
246
247impl MmuEntry {
248    #[inline]
249    pub const fn take_bits(self, from: u32, to: u32) -> PhysicalAddress {
250        PhysicalAddress(self.0 & mask_range(from, to))
251    }
252}
253
254impl core::ops::Sub<u64> for MmuEntry {
255    type Output = Self;
256
257    #[inline]
258    fn sub(self, rhs: u64) -> Self {
259        Self(self.0 - rhs)
260    }
261}
262
263impl core::ops::SubAssign<u64> for MmuEntry {
264    #[inline]
265    fn sub_assign(&mut self, rhs: u64) {
266        self.0 -= rhs;
267    }
268}