1use crate::memory::Memory;
6use crate::{TrapCode, Vm, get_reg, i16_from_u8s};
7use crate::{set_reg, u16_from_u8s};
8use hashmap_mem::MapHeader;
9use std::ptr;
10use swamp_vm_isa::MapIterator;
11
12impl Vm {
13 pub fn get_map_header(&self, header_reg: u8) -> *mut MapHeader {
14 self.get_ptr_from_reg(header_reg) as *mut MapHeader
15 }
16
17 pub fn get_map_header_mut(&self, addr: u32) -> *mut MapHeader {
18 self.memory.get_heap_ptr(addr as usize) as *mut MapHeader
19 }
20
21 pub fn get_map_header_const(&self, addr: u32) -> *const MapHeader {
22 self.memory.get_heap_ptr(addr as usize) as *const MapHeader
23 }
24
25 pub fn read_map_header(&self, header_reg: u8) -> (MapHeader, u32) {
26 let (map_ptr, map_addr) = self.get_ptr_and_addr_from_reg(header_reg);
27 unsafe { (*(map_ptr as *const MapHeader), map_addr) }
28 }
29
30 #[must_use]
31 pub fn read_map_header_from_heap(map_header_heap_addr: u32, heap: &Memory) -> MapHeader {
32 let ptr = heap
33 .get_heap_const_ptr(map_header_heap_addr as usize)
34 .cast::<MapHeader>();
35 unsafe { *ptr }
36 }
37
38 pub const ELEMENT_COUNT_FACTOR: f32 = 1.5;
39
40 #[allow(clippy::too_many_lines)]
41 fn get_or_reserve_entry(memory: &Memory, header: *mut MapHeader, key_ptr_addr: usize) -> u32 {
42 unsafe {
43 let key_ptr = memory.get_heap_ptr(key_ptr_addr);
44 let entry_ptr = hashmap_mem::get_or_reserve_entry(header as *mut u8, key_ptr);
45 if entry_ptr.is_null() {
46 return 0;
47 }
48
49 memory.get_heap_offset(entry_ptr)
50 }
51 }
52 pub fn execute_map_open_addressing_get_entry_location(
53 &mut self,
54 dst_entry_address: u8,
55 self_map_header_reg: u8,
56 key_source: u8,
57 ) {
58 let map_header_addr = get_reg!(self, self_map_header_reg);
59 let map_header_ptr = self.get_map_header_mut(map_header_addr);
60 let key_source_address = get_reg!(self, key_source) as usize;
61 #[cfg(feature = "debug_vm")]
62 if self.debug_operations_enabled {
63 unsafe {
64 eprintln!(
65 "-- map_get_or_reserve_entry start {map_header_addr:X}, key_source: {key_source_address:X} {:?}",
66 *map_header_ptr
67 );
68 }
69 }
70
71 let address_to_entry: u32;
72
73 unsafe {
74 address_to_entry =
75 Self::lookup_open_addressing(&self.memory, map_header_ptr, key_source_address);
76 }
77
78 if address_to_entry == 0 {
79 return self.internal_trap(TrapCode::MapEntryNotFound);
80 }
81
82 set_reg!(self, dst_entry_address, address_to_entry);
83 }
84
85 #[inline]
89
90 pub fn execute_map_open_addressing_init(
91 &mut self,
92 self_map_header_reg: u8,
93 logical_limit_immediate_lower: u8,
94 logical_limit_immediate_upper: u8,
95 key_size_reg: u8,
96 key_alignment: u8,
97 value_size_reg: u8,
98 value_alignment: u8,
99 ) {
100 let map_header_addr = get_reg!(self, self_map_header_reg);
101 let map_header = self.memory.get_heap_ptr(map_header_addr as usize);
102 let logical_limit =
103 u16_from_u8s!(logical_limit_immediate_lower, logical_limit_immediate_upper);
104 let key_size = get_reg!(self, key_size_reg);
105 let value_size = get_reg!(self, value_size_reg);
106 let capacity = logical_limit.next_power_of_two();
107 debug_assert_ne!(capacity, 0);
108 assert!(capacity.is_power_of_two());
109
110 let (bucket_layout, map_init) = hashmap_mem::layout(
111 key_size,
112 key_alignment,
113 value_size,
114 value_alignment,
115 logical_limit,
116 );
117
118 #[cfg(feature = "debug_vm")]
119 if self.debug_operations_enabled {
120 let map_header_addr = get_reg!(self, self_map_header_reg);
121 eprintln!(
122 "map_init {map_header_addr:08X}: logical_limit:{logical_limit} capacity:{capacity}, key_size:{key_size}, key_alignment:{key_alignment}, value_size:{value_size}, value_alignment:{value_alignment}, bucket_size:{}, value_offset:{}",
123 bucket_layout.bucket_size, bucket_layout.value_offset,
124 );
125 }
126
127 unsafe {
128 hashmap_mem::init(map_header, &map_init);
129 }
130 }
131
132 pub fn execute_map_open_addressing_get_or_reserve_entry(
133 &mut self,
134 dst_entry_address: u8,
135 self_map_header_reg: u8,
136 key_source_ptr_reg: u8,
137 ) {
138 let map_header_addr = get_reg!(self, self_map_header_reg);
139 let map_header = self.get_map_header_mut(map_header_addr);
140
141 let key_source_address = get_reg!(self, key_source_ptr_reg) as usize;
142
143 #[cfg(feature = "debug_vm")]
144 if self.debug_operations_enabled {
145 eprintln!(
146 "-- map_get_or_reserve_entry start {map_header_addr:X}, key_source: {key_source_address:X} {map_header:?}",
147 );
148 }
149
150 let entry_address;
151
152 #[cfg(feature = "debug_vm")]
154 if self.debug_operations_enabled {
155 eprintln!("map_get_or_reserve_entry: it didn't exist, so try to find a new entry");
156 }
157 unsafe {
158 entry_address =
159 Self::get_or_reserve_entry(&self.memory, map_header, key_source_address);
160 if entry_address == 0 {
161 return self.internal_trap(TrapCode::MapEntryNotFoundAndCouldNotBeCreated);
162 }
163 }
164 set_reg!(self, dst_entry_address, entry_address);
167 }
168
169 pub fn execute_map_open_addressing_has(
170 &mut self,
171 dest_reg: u8,
172 self_const_map_header_reg: u8,
173 key_source_reg: u8,
174 ) {
175 let map_header_addr = get_reg!(self, self_const_map_header_reg) as usize;
176 let map_header_ptr = self.get_map_header_mut(map_header_addr as u32);
177 let key_source_address = get_reg!(self, key_source_reg) as usize;
178
179 unsafe {
180 let found = Self::has_open_addressing(&self.memory, map_header_ptr, key_source_address);
181
182 #[cfg(feature = "debug_vm")]
183 if self.debug_operations_enabled {
184 unsafe {
185 eprintln!(
186 "map.has returned: {found} for key pointer:{:X} map:{:?}",
187 key_source_address, *map_header_ptr
188 );
189 }
190 }
191
192 set_reg!(self, dest_reg, found);
193 }
194 }
195
196 pub fn execute_map_overwrite(&mut self, target_map_header_reg: u8, source_map_header_reg: u8) {
197 let target_map_header_addr = get_reg!(self, target_map_header_reg);
198 let target_map_header = self.get_map_header_mut(target_map_header_addr);
199
200 let source_map_header_addr = get_reg!(self, source_map_header_reg);
201 let source_map_header = self.get_map_header_const(source_map_header_addr);
202
203 let could_overwrite = unsafe {
204 hashmap_mem::overwrite(target_map_header as *mut u8, source_map_header as *const u8)
205 };
206 if !could_overwrite {
207 self.internal_trap(TrapCode::MapCouldNotBeCopied);
208 }
209 }
210
211 pub fn execute_map_open_addressing_remove(
212 &mut self,
213 self_map_header_reg: u8,
214 key_source_reg: u8,
215 ) {
216 let map_header_addr = get_reg!(self, self_map_header_reg);
217 let map_header = self.get_map_header_mut(map_header_addr);
218 let key_source_address = get_reg!(self, key_source_reg) as usize;
219
220 unsafe {
221 let found = Self::remove_open_addressing(&self.memory, map_header, key_source_address);
222 if !found {
223 self.internal_trap(TrapCode::MapEntryNotFoundForRemoval);
224 }
225 }
226 }
227
228 unsafe fn has_open_addressing(
232 memory: &Memory,
233 map_header: *mut MapHeader,
234 key_ptr_addr: usize,
235 ) -> bool {
236 unsafe {
237 let key_ptr = memory.get_heap_const_ptr(key_ptr_addr);
238 hashmap_mem::has(map_header as *mut u8, key_ptr)
239 }
240 }
241
242 unsafe fn lookup_open_addressing(
248 memory: &Memory,
249 map_header: *mut MapHeader,
250 key_ptr_addr: usize,
251 ) -> u32 {
252 unsafe {
253 let key_ptr = memory.get_heap_const_ptr(key_ptr_addr);
254
255 let value_ptr = hashmap_mem::lookup(map_header as *mut u8, key_ptr);
256
257 if value_ptr.is_null() {
258 0
259 } else {
260 memory.get_heap_offset(value_ptr)
261 }
262 }
263 }
264
265 unsafe fn remove_open_addressing(
270 memory: &Memory,
271 map_header_ptr: *mut MapHeader,
272 key_ptr_addr: usize,
273 ) -> bool {
274 unsafe {
275 let key_ptr = memory.get_heap_const_ptr(key_ptr_addr);
276
277 hashmap_mem::remove(map_header_ptr as *mut u8, key_ptr)
278 }
279 }
280
281 pub(crate) fn execute_map_iter_init(
282 &mut self,
283 target_map_iterator_header_reg: u8,
284 map_header_reg: u8,
285 ) {
286 let map_header_addr = get_reg!(self, map_header_reg);
287
288 #[cfg(feature = "debug_vm")]
289 if self.debug_operations_enabled {
290 let iter_addr = get_reg!(self, target_map_iterator_header_reg);
291 let map_header = Self::read_map_header_from_heap(map_header_addr, &self.memory);
292 debug_assert_eq!(
293 map_header.padding_and_secret_code,
294 hashmap_mem::SECRET_CODE,
295 "secret code is not the same"
296 );
297 eprintln!(
298 "map_iter_init: iter_addr: {iter_addr:04X} map_header_addr:{map_header_addr:04X} key_size:{}, value_offset:{}, value_size:{} bucket_size: {}",
299 map_header.key_size,
300 map_header.value_offset,
301 map_header.value_size,
302 (map_header.bucket_size)
303 );
304 }
305 let map_iterator = MapIterator {
306 map_header_frame_offset: map_header_addr,
307 index: 0,
308 };
309
310 let map_iterator_mut_ptr =
311 self.get_ptr_from_reg(target_map_iterator_header_reg) as *mut MapIterator;
312
313 unsafe {
314 ptr::write(map_iterator_mut_ptr, map_iterator);
315 }
316 }
317
318 pub fn get_map_iterator_header_ptr_from_reg(&self, map_iterator_reg: u8) -> *mut MapIterator {
319 self.get_ptr_from_reg(map_iterator_reg) as *mut MapIterator
320 }
321
322 pub fn execute_map_iter_next_pair(
323 &mut self,
324 map_iterator_header_reg: u8,
325 target_key_reg: u8,
326 target_value_reg: u8,
327 branch_offset_lower: u8,
328 branch_offset_upper: u8,
329 ) {
330 let map_iterator = self.get_map_iterator_header_ptr_from_reg(map_iterator_header_reg);
331
332 unsafe {
333 let map_header_addr = (*map_iterator).map_header_frame_offset;
334 let map_header_ptr =
335 self.memory.get_heap_const_ptr(map_header_addr as usize) as *const MapHeader;
336 let map_header = &*map_header_ptr;
337 if map_header.padding_and_secret_code != hashmap_mem::SECRET_CODE {
338 return self.internal_trap(TrapCode::MemoryCorruption);
339 }
340 debug_assert_eq!(map_header.padding_and_secret_code, hashmap_mem::SECRET_CODE);
341
342 #[cfg(feature = "debug_vm")]
343 if self.debug_operations_enabled {
344 let iter_addr = get_reg!(self, map_iterator_header_reg);
345 let index = (*map_iterator).index;
346 eprintln!(
347 "map_iter_next: iter_addr: {iter_addr:04X} addr:{map_header_addr:04X} index:{index} len: {}, capacity: {}",
348 map_header.element_count, map_header.capacity
349 );
350 }
351
352 let index = (*map_iterator).index;
353
354 let (key_ptr, value_ptr, found_index) =
355 hashmap_mem::find_next_valid_entry(map_header_ptr as *mut u8, index as u16);
356 if !key_ptr.is_null() {
357 let key_offset = self.memory.get_heap_offset(key_ptr);
358 let value_offset = self.memory.get_heap_offset(value_ptr);
359
360 set_reg!(self, target_key_reg, key_offset);
361 set_reg!(self, target_value_reg, value_offset);
362 (*map_iterator).index = (found_index + 1) as u32;
363 } else {
364 let branch_offset = i16_from_u8s!(branch_offset_lower, branch_offset_upper);
366
367 #[cfg(feature = "debug_vm")]
368 {
369 if self.debug_operations_enabled {
370 eprintln!(
371 "map_iter_next_pair complete. jumping with offset {branch_offset}"
372 );
373 }
374 }
375
376 self.pc = (self.pc as i32 + branch_offset as i32) as usize;
377 }
378 }
379 }
380
381 pub fn execute_map_iter_next(
382 &mut self,
383 map_iterator_header_reg: u8,
384 target_value_reg: u8,
385 branch_offset_lower: u8,
386 branch_offset_upper: u8,
387 ) {
388 let map_iterator = self.get_map_iterator_header_ptr_from_reg(map_iterator_header_reg);
389
390 unsafe {
391 let map_header_addr = (*map_iterator).map_header_frame_offset;
392 let map_header_ptr =
393 self.memory.get_heap_const_ptr(map_header_addr as usize) as *const MapHeader;
394 let map_header = &*map_header_ptr;
395
396 #[cfg(feature = "debug_vm")]
397 if self.debug_operations_enabled {
398 let iter_addr = get_reg!(self, map_iterator_header_reg);
399 let index = (*map_iterator).index;
400 eprintln!(
401 "map_iter_next: iter_addr: {iter_addr:04X} addr:{map_header_addr:04X} index:{index} len: {}, capacity: {}",
402 map_header.element_count, map_header.capacity
403 );
404 }
405
406 let index = (*map_iterator).index;
407
408 let (_key_ptr, value_ptr, found_index) =
409 hashmap_mem::find_next_valid_entry(map_header_ptr as *mut u8, index as u16);
410 if !value_ptr.is_null() {
411 let value_offset = self.memory.get_heap_offset(value_ptr);
412
413 set_reg!(self, target_value_reg, value_offset);
414 (*map_iterator).index = (found_index + 1) as u32;
415 } else {
416 let branch_offset = i16_from_u8s!(branch_offset_lower, branch_offset_upper);
418
419 #[cfg(feature = "debug_vm")]
420 {
421 if self.debug_operations_enabled {
422 eprintln!(
423 "map_iter_next_pair complete. jumping with offset {branch_offset}"
424 );
425 }
426 }
427
428 self.pc = (self.pc as i32 + branch_offset as i32) as usize;
429 }
430 }
431 }
432}