1use std::marker::PhantomData;
2use smallvec::{SmallVec, smallvec};
3
4pub struct RWList<'a> {
9 process: *const sys::ProcessData,
10 dir_base: u64,
11 read_list: SmallVec<[sys::RWInfo; 8]>,
12 write_list: SmallVec<[sys::RWInfo; 8]>,
13 phantom: PhantomData<&'a u8>,
14}
15
16impl<'a> RWList<'a> {
17 pub fn new(ctx: &'a sys::WinCtx, dir_base: u64) -> RWList<'a> {
24 RWList {
25 process: &ctx.process,
26 dir_base: dir_base,
27 read_list: smallvec![],
28 write_list: smallvec![],
29 phantom: PhantomData,
30 }
31 }
32
33 pub fn write<T>(&mut self, address: u64, val: &'a T) -> &mut Self {
40 self.write_list.push(sys::RWInfo {
41 local: val as *const T as u64,
42 remote: address,
43 size: std::mem::size_of::<T>() as u64
44 });
45 self
46 }
47
48 pub fn write_arr<T>(&mut self, address: u64, val: &'a [T]) -> &mut Self {
55 self.write_list.push(sys::RWInfo {
56 local: val.as_ptr() as u64,
57 remote: address,
58 size: (std::mem::size_of::<T>() * val.len()) as u64
59 });
60 self
61 }
62
63 pub fn read<T>(&mut self, address: u64, val: &'a mut T) -> &mut Self {
70 self.read_list.push(sys::RWInfo {
71 local: val as *mut T as u64,
72 remote: address,
73 size: std::mem::size_of::<T>() as u64
74 });
75 self
76 }
77
78 pub fn read_arr<T>(&mut self, address: u64, val: &'a mut [T]) -> &mut Self {
85 self.read_list.push(sys::RWInfo {
86 local: val.as_mut_ptr() as u64,
87 remote: address,
88 size: (std::mem::size_of::<T>() * val.len()) as u64
89 });
90 self
91 }
92
93 pub fn commit(&mut self, read_start: usize, write_start: usize) -> (&mut Self, usize, usize) {
104 let mut done_rwlen : usize = 0;
105 let mut queued_rwlen : usize = 0;
106
107 if read_start < self.read_list.len() {
108 {
109 let read_list = &mut self.read_list[read_start..];
110 read_list.sort_unstable_by(|a, b| (a.remote & !0xfff).partial_cmp(&(b.remote & !0xfff)).unwrap());
111 queued_rwlen += read_list.iter().fold(0, |acc, a| acc + a.size) as usize;
112
113 done_rwlen += unsafe {
114 (if self.dir_base != 0 {
115 sys::VMemReadMul(self.process, self.dir_base, read_list.as_mut_ptr(), read_list.len() as u64)
116 } else {
117 sys::MemReadMul(self.process, read_list.as_mut_ptr(), read_list.len() as u64)
118 }) as usize
119 };
120 }
121
122 self.read_list.truncate(read_start);
123 }
124
125 if write_start < self.write_list.len() {
126 {
127 let write_list = &mut self.write_list[write_start..];
128 write_list.sort_unstable_by(|a, b| (a.remote & !0xfff).partial_cmp(&(b.remote & !0xfff)).unwrap());
129 queued_rwlen += write_list.iter().fold(0, |acc, a| acc + a.size) as usize;
130
131 done_rwlen += unsafe {
132 (if self.dir_base != 0 {
133 sys::VMemWriteMul(self.process, self.dir_base, write_list.as_mut_ptr(), write_list.len() as u64)
134 } else {
135 sys::MemWriteMul(self.process, write_list.as_mut_ptr(), write_list.len() as u64)
136 }) as usize
137 }
138 }
139
140 self.write_list.truncate(write_start);
141
142 }
143
144 (self, queued_rwlen, done_rwlen)
145 }
146
147 pub fn commit_rw(&mut self) -> (&mut Self, usize, usize) {
149 self.commit(0, 0)
150 }
151
152 pub fn commit_read(&mut self) -> (&mut Self, usize, usize) {
154 self.commit(0, self.write_list.len())
155 }
156
157 pub fn commit_write(&mut self) -> (&mut Self, usize, usize) {
159 self.commit(self.read_list.len(), 0)
160 }
161
162}
163
164impl Drop for RWList<'_> {
165 fn drop(&mut self) {
166 self.commit_rw();
167 }
168}