Skip to main content

vmread/
rwlist.rs

1use std::marker::PhantomData;
2use smallvec::{SmallVec, smallvec};
3
4/// A list of memory operations to be executed on its destruction
5///
6/// This provides a more efficient way of performing RW operations when the data is not needed
7/// immediately. The operations get cached and executed when the object goes out of scope.
8pub struct RWList<'a> {
9    process: *const sys::ProcessData,
10    dir_base: u64,
11    read_list: SmallVec<[sys::RWInfo; 8]>,
12    write_list: SmallVec<[sys::RWInfo; 8]>,
13    phantom: PhantomData<&'a u8>,
14}
15
16impl<'a> RWList<'a> {
17    /// Create a new RWList instance
18    ///
19    /// # Arguments
20    ///
21    /// * `ctx` - vmread C context
22    /// * `dir_base` - virtual address translation entry point. 0 for physical address mode
23    pub fn new(ctx: &'a sys::WinCtx, dir_base: u64) -> RWList<'a> {
24        RWList {
25            process: &ctx.process,
26            dir_base: dir_base,
27            read_list: smallvec![],
28            write_list: smallvec![],
29            phantom: PhantomData,
30        }
31    }
32
33    /// Queue a write operation
34    ///
35    /// # Arguments
36    /// 
37    /// * `address` - address to write the data to
38    /// * `val` - reference to the value to be written
39    pub fn write<T>(&mut self, address: u64, val: &'a T) -> &mut Self {
40        self.write_list.push(sys::RWInfo {
41            local: val as *const T as u64,
42            remote: address,
43            size: std::mem::size_of::<T>() as u64
44        });
45        self
46    }
47
48    /// Queue an array write operation
49    ///
50    /// # Arguments
51    /// 
52    /// * `address` - address to write the data to
53    /// * `val` - reference to the slice to be written
54    pub fn write_arr<T>(&mut self, address: u64, val: &'a [T]) -> &mut Self {
55        self.write_list.push(sys::RWInfo {
56            local: val.as_ptr() as u64,
57            remote: address,
58            size: (std::mem::size_of::<T>() * val.len()) as u64
59        });
60        self
61    }
62
63    /// Queue a read operation
64    ///
65    /// # Arguments
66    /// 
67    /// * `address` - address to read the data from
68    /// * `val` - reference to the value to read the data into
69    pub fn read<T>(&mut self, address: u64, val: &'a mut T) -> &mut Self {
70        self.read_list.push(sys::RWInfo {
71            local: val as *mut T as u64,
72            remote: address,
73            size: std::mem::size_of::<T>() as u64
74        });
75        self
76    }
77
78    /// Queue an array read operation
79    ///
80    /// # Arguments
81    /// 
82    /// * `address` - address to read the data from
83    /// * `val` - reference to the slice to read the data into
84    pub fn read_arr<T>(&mut self, address: u64, val: &'a mut [T]) -> &mut Self {
85        self.read_list.push(sys::RWInfo {
86            local: val.as_mut_ptr() as u64,
87            remote: address,
88            size: (std::mem::size_of::<T>() * val.len()) as u64
89        });
90        self
91    }
92
93    /// Perform all cached memory operations
94    ///
95    /// Both read and write lists get iterated from the starting points and vmread C library gets invoked.
96    /// The lists then get truncated to the size of given starting points. The lists work like a
97    /// stack, with the latest elements having priority over the older elements.
98    ///
99    /// # Arguments
100    ///
101    /// * `read_start` - starting index for read operations
102    /// * `write_start` - starting index for write operations
103    pub fn commit(&mut self, read_start: usize, write_start: usize) -> (&mut Self, usize, usize) {
104        let mut done_rwlen : usize = 0;
105        let mut queued_rwlen : usize = 0;
106
107        if read_start < self.read_list.len() {
108            {
109                let read_list = &mut self.read_list[read_start..];
110                read_list.sort_unstable_by(|a, b| (a.remote & !0xfff).partial_cmp(&(b.remote & !0xfff)).unwrap());
111                queued_rwlen += read_list.iter().fold(0, |acc, a| acc + a.size) as usize;
112               
113                done_rwlen += unsafe {
114                    (if self.dir_base != 0 {
115                        sys::VMemReadMul(self.process, self.dir_base, read_list.as_mut_ptr(), read_list.len() as u64)
116                    } else {
117                        sys::MemReadMul(self.process, read_list.as_mut_ptr(), read_list.len() as u64)
118                    }) as usize
119                };
120            }
121
122            self.read_list.truncate(read_start);
123        }
124
125        if write_start < self.write_list.len() {
126            {
127                let write_list = &mut self.write_list[write_start..];
128                write_list.sort_unstable_by(|a, b| (a.remote & !0xfff).partial_cmp(&(b.remote & !0xfff)).unwrap());
129                queued_rwlen += write_list.iter().fold(0, |acc, a| acc + a.size) as usize;
130
131                done_rwlen += unsafe {
132                    (if self.dir_base != 0 {
133                        sys::VMemWriteMul(self.process, self.dir_base, write_list.as_mut_ptr(), write_list.len() as u64)
134                    } else {
135                        sys::MemWriteMul(self.process, write_list.as_mut_ptr(), write_list.len() as u64)
136                    }) as usize
137                } 
138            }
139            
140            self.write_list.truncate(write_start);
141
142        }
143
144        (self, queued_rwlen, done_rwlen)
145    }
146
147    /// Commit all RW operations in the list
148    pub fn commit_rw(&mut self) -> (&mut Self, usize, usize) {
149        self.commit(0, 0)
150    }
151
152    /// Commit only read operations in the list
153    pub fn commit_read(&mut self) -> (&mut Self, usize, usize) {
154        self.commit(0, self.write_list.len())
155    }
156
157    /// Commit only write operations in the list
158    pub fn commit_write(&mut self) -> (&mut Self, usize, usize) {
159        self.commit(self.read_list.len(), 0)
160    }
161
162}
163
164impl Drop for RWList<'_> {
165    fn drop(&mut self) {
166        self.commit_rw();
167    }
168}