swamp_vm/
vec.rs

1/*
2 * Copyright (c) Peter Bjorklund. All rights reserved. https://github.com/swamp/swamp
3 * Licensed under the MIT License. See LICENSE in the project root for license information.
4 */
5use crate::memory::Memory;
6use crate::set_reg;
7use crate::{TrapCode, Vm, get_reg, i16_from_u8s, u16_from_u8s, u32_from_u8s};
8use std::ptr;
9use swamp_vm_isa::{
10    VEC_HEADER_MAGIC_CODE, VEC_HEADER_PAYLOAD_OFFSET, VEC_HEADER_SIZE, VecHeader, VecIterator,
11};
12
13impl Vm {
14    pub fn get_vec_iterator_header_ptr_from_reg(&self, vec_iterator_reg: u8) -> *mut VecIterator {
15        self.get_ptr_from_reg(vec_iterator_reg) as *mut VecIterator
16    }
17
18    #[inline]
19    pub fn execute_array_init(
20        &mut self,
21        target_vec_ptr_reg: u8,
22        capacity_lower: u8,
23        capacity_upper: u8,
24        element_size_0: u8,
25        element_size_1: u8,
26        element_size_2: u8,
27        element_size_3: u8,
28    ) {
29        let vec_addr = get_reg!(self, target_vec_ptr_reg);
30        let mut_vec_ptr = self
31            .memory
32            .get_heap_ptr(vec_addr as usize)
33            .cast::<VecHeader>();
34        let capacity = u16_from_u8s!(capacity_lower, capacity_upper);
35        let element_size = u32_from_u8s!(
36            element_size_0,
37            element_size_1,
38            element_size_2,
39            element_size_3
40        );
41        unsafe {
42            (*mut_vec_ptr).element_count = capacity;
43            (*mut_vec_ptr).capacity = capacity;
44            (*mut_vec_ptr).element_size = element_size;
45            (*mut_vec_ptr).padding = VEC_HEADER_MAGIC_CODE;
46        }
47
48        if self.debug_operations_enabled {
49            eprintln!("array init element_size:{element_size} into vec_addr: {vec_addr:X}");
50        }
51    }
52
53    #[inline]
54    pub fn execute_vec_cmp(
55        &mut self,
56        bool_target_reg: u8,
57        left_vec_ptr_reg: u8,
58        right_vec_ptr_reg: u8,
59    ) {
60        let left_vec_addr = get_reg!(self, left_vec_ptr_reg);
61        let right_vec_addr = get_reg!(self, right_vec_ptr_reg);
62
63        let left_vec_ptr = self
64            .memory
65            .get_heap_const_ptr(left_vec_addr as usize)
66            .cast::<VecHeader>();
67
68        let right_vec_ptr = self
69            .memory
70            .get_heap_const_ptr(right_vec_addr as usize)
71            .cast::<VecHeader>();
72
73        unsafe {
74            if (*left_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
75                return self.internal_trap(TrapCode::MemoryCorruption);
76            }
77            if (*right_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
78                return self.internal_trap(TrapCode::MemoryCorruption);
79            }
80
81            if (*left_vec_ptr).element_count != (*right_vec_ptr).element_count {
82                set_reg!(self, bool_target_reg, 0);
83            }
84
85            let left_ptr = self
86                .memory
87                .get_heap_const_ptr(left_vec_addr as usize)
88                .add(VEC_HEADER_PAYLOAD_OFFSET.0 as usize);
89
90            let right_ptr = self
91                .memory
92                .get_heap_const_ptr(right_vec_addr as usize)
93                .add(VEC_HEADER_PAYLOAD_OFFSET.0 as usize);
94
95            let byte_size =
96                ((*left_vec_ptr).element_count as usize) * (*left_vec_ptr).element_size as usize;
97            let slice_a = std::slice::from_raw_parts(left_ptr, byte_size);
98            let slice_b = std::slice::from_raw_parts(right_ptr, byte_size);
99
100            set_reg!(self, bool_target_reg, slice_a == slice_b);
101        }
102    }
103
104    #[inline]
105    pub fn execute_vec_copy(&mut self, target_vec_ptr_reg: u8, source_vec_ptr_reg: u8) {
106        let target_vec_addr = get_reg!(self, target_vec_ptr_reg);
107        let source_vec_addr = get_reg!(self, source_vec_ptr_reg);
108
109        #[cfg(feature = "debug_vm")]
110        if self.debug_operations_enabled {
111            eprintln!("vec_copy {target_vec_addr:X} <- {source_vec_addr:X}")
112        }
113
114        let mut_vec_ptr = self
115            .memory
116            .get_heap_ptr(target_vec_addr as usize)
117            .cast::<VecHeader>();
118
119        let src_vec_ptr = self
120            .memory
121            .get_heap_const_ptr(source_vec_addr as usize)
122            .cast::<VecHeader>();
123
124        unsafe {
125            if (*mut_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
126                return self.internal_trap(TrapCode::MemoryCorruption);
127            }
128            if (*src_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
129                return self.internal_trap(TrapCode::MemoryCorruption);
130            }
131
132            if (*mut_vec_ptr).capacity < (*src_vec_ptr).element_count {
133                return self.internal_trap(TrapCode::VecOutOfCapacity {
134                    encountered: (*src_vec_ptr).element_count,
135                    capacity: (*mut_vec_ptr).capacity,
136                });
137            }
138
139            #[cfg(feature = "debug_vm")]
140            if self.debug_operations_enabled {
141                let target_capacity = (*mut_vec_ptr).capacity;
142                let target_len = (*mut_vec_ptr).element_count;
143                let target_elem_size = (*mut_vec_ptr).element_size;
144
145                let source_capacity = (*src_vec_ptr).capacity;
146                let source_len = (*src_vec_ptr).element_count;
147                let source_elem_size = (*src_vec_ptr).element_size;
148
149                eprintln!(
150                    "vec_copy target capacity: {target_capacity} len: {target_len} elem_size:{target_elem_size}, source_capacity:{source_capacity}, source_len:{source_len}, elem_size:{source_elem_size}"
151                )
152            }
153
154            let target_capacity = (*mut_vec_ptr).capacity;
155
156            let target_tail = (target_vec_addr + 2) as usize; // Skip capacity
157            let target_raw = self.memory.get_heap_ptr(target_tail);
158            let source_tail = (source_vec_addr + 2) as usize; // Skip capacity
159            let source_raw = self.memory.get_heap_const_ptr(source_tail);
160
161            let total_bytes_to_copy = (VEC_HEADER_SIZE.0 - 2)
162                + ((*src_vec_ptr).element_count as u32) * (*src_vec_ptr).element_size;
163
164            if self.debug_operations_enabled {
165                eprintln!(
166                    "vec_copy bytes_to_copy: {total_bytes_to_copy} to {target_tail:X} from {source_tail:X}"
167                );
168            }
169
170            ptr::copy_nonoverlapping(source_raw, target_raw, total_bytes_to_copy as usize);
171
172            debug_assert_eq!(
173                (*mut_vec_ptr).element_count,
174                (*src_vec_ptr).element_count,
175                "element count differs"
176            );
177            debug_assert_eq!(
178                (*mut_vec_ptr).capacity,
179                target_capacity,
180                "capacity has been modified"
181            );
182        }
183    }
184
185    #[inline]
186    pub fn execute_vec_copy_range(
187        &mut self,
188        target_vec_ptr_reg: u8,
189        source_vec_ptr_reg: u8,
190        range_reg: u8,
191    ) {
192        let target_vec_addr = get_reg!(self, target_vec_ptr_reg);
193        let source_vec_addr = get_reg!(self, source_vec_ptr_reg);
194        let range_header = self.range_header_from_reg(range_reg);
195
196        let mut_vec_ptr = self
197            .memory
198            .get_heap_ptr(target_vec_addr as usize)
199            .cast::<VecHeader>();
200
201        let src_vec_ptr = self
202            .memory
203            .get_heap_const_ptr(source_vec_addr as usize)
204            .cast::<VecHeader>();
205
206        unsafe {
207            if (*mut_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
208                return self.internal_trap(TrapCode::MemoryCorruption);
209            }
210
211            if (*src_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
212                return self.internal_trap(TrapCode::MemoryCorruption);
213            }
214
215            if range_header.max < range_header.min {
216                return self.internal_trap(TrapCode::ReverseRangeNotAllowedHere);
217            }
218
219            debug_assert!(range_header.max >= range_header.min);
220
221            let num_elements_to_copy = if range_header.inclusive {
222                (range_header.max - range_header.min + 1) as u32
223            } else {
224                (range_header.max - range_header.min) as u32
225            };
226            let source_element_index = range_header.min as u32;
227            let required_source_element_count = source_element_index + num_elements_to_copy;
228
229            if (*mut_vec_ptr).capacity < num_elements_to_copy as u16 {
230                return self.internal_trap(TrapCode::VecOutOfCapacity {
231                    encountered: (*src_vec_ptr).element_count,
232                    capacity: (*mut_vec_ptr).capacity,
233                });
234            }
235
236            if (*src_vec_ptr).element_count < required_source_element_count as u16 {
237                return self.internal_trap(TrapCode::VecBoundsFail {
238                    encountered: required_source_element_count as usize,
239                    element_count: (*src_vec_ptr).element_count as usize,
240                });
241            }
242
243            let target_capacity = (*mut_vec_ptr).capacity;
244
245            let target_payload = (target_vec_addr + VEC_HEADER_PAYLOAD_OFFSET.0) as usize;
246            let target_raw = self.memory.get_heap_ptr(target_payload);
247
248            let source_slice_start = (source_vec_addr
249                + VEC_HEADER_PAYLOAD_OFFSET.0
250                + source_element_index * (*src_vec_ptr).element_size)
251                as usize;
252            let source_raw = self.memory.get_heap_const_ptr(source_slice_start);
253
254            let total_bytes_to_copy = num_elements_to_copy * (*src_vec_ptr).element_size;
255
256            // HACK: For now allow overlapping, since the slice target can be self
257            //ptr::copy_nonoverlapping(source_raw, target_raw, total_bytes_to_copy as usize);
258            ptr::copy(source_raw, target_raw, total_bytes_to_copy as usize);
259            (*mut_vec_ptr).element_count = num_elements_to_copy as u16;
260
261            debug_assert_eq!(
262                (*mut_vec_ptr).capacity,
263                target_capacity,
264                "capacity has been modified"
265            );
266        }
267    }
268
269    #[inline]
270    pub fn execute_vec_init(
271        &mut self,
272        target_vec_ptr_reg: u8,
273        capacity_lower: u8,
274        capacity_upper: u8,
275        element_size_0: u8,
276        element_size_1: u8,
277        element_size_2: u8,
278        element_size_3: u8,
279    ) {
280        let vec_addr = get_reg!(self, target_vec_ptr_reg);
281        let mut_vec_ptr = self
282            .memory
283            .get_heap_ptr(vec_addr as usize)
284            .cast::<VecHeader>();
285        let capacity = u16_from_u8s!(capacity_lower, capacity_upper);
286        let element_size = u32_from_u8s!(
287            element_size_0,
288            element_size_1,
289            element_size_2,
290            element_size_3
291        );
292        unsafe {
293            //(*mut_vec_ptr).element_count = 0; // zero is default, so shouldn't be needed
294            (*mut_vec_ptr).capacity = capacity;
295            (*mut_vec_ptr).element_size = element_size;
296            (*mut_vec_ptr).padding = VEC_HEADER_MAGIC_CODE;
297        }
298    }
299
300    #[inline]
301    pub fn execute_vec_iter_init(
302        &mut self,
303        target_vec_iterator_header_reg: u8,
304        vec_header_reg: u8,
305    ) {
306        let vec_header_addr = get_reg!(self, vec_header_reg);
307
308        // Check that vec header is correct
309        let vec_header_ptr = self
310            .memory
311            .get_heap_const_ptr(vec_header_addr as usize)
312            .cast::<VecHeader>();
313        let vec_header = unsafe { &*vec_header_ptr };
314
315        if vec_header.padding != VEC_HEADER_MAGIC_CODE {
316            return self.internal_trap(TrapCode::MemoryCorruption);
317        }
318
319        #[cfg(feature = "debug_vm")]
320        if self.debug_operations_enabled {
321            let iter_addr = get_reg!(self, target_vec_iterator_header_reg);
322            eprintln!(
323                "vec_iter_init: iter_addr: {iter_addr:04X} vec_header_addr:{vec_header_addr:04X} element_size: {}",
324                vec_header.element_size
325            );
326        }
327
328        // Assert that element_size is reasonable
329        debug_assert!(vec_header.element_size > 0, "Element size cannot be zero");
330
331        let vec_iterator = VecIterator {
332            vec_header_heap_ptr: vec_header_addr,
333            index: 0,
334        };
335
336        let vec_iterator_mut_ptr =
337            self.get_ptr_from_reg(target_vec_iterator_header_reg) as *mut VecIterator;
338
339        unsafe {
340            ptr::write(vec_iterator_mut_ptr, vec_iterator);
341        }
342    }
343
344    #[inline]
345    pub fn execute_vec_iter_next(
346        &mut self,
347        vec_iterator_header_reg: u8,
348        target_variable: u8,
349        branch_offset_lower: u8,
350        branch_offset_upper: u8,
351    ) {
352        let vec_iterator = self.get_vec_iterator_header_ptr_from_reg(vec_iterator_header_reg);
353
354        unsafe {
355            let vec_header_addr = (*vec_iterator).vec_header_heap_ptr;
356            let vec_header_ptr = self
357                .memory
358                .get_heap_const_ptr(vec_header_addr as usize)
359                .cast::<VecHeader>();
360            let vec_header = &*vec_header_ptr;
361            if vec_header.padding != VEC_HEADER_MAGIC_CODE {
362                return self.internal_trap(TrapCode::MemoryCorruption);
363            }
364
365            #[cfg(feature = "debug_vm")]
366            if self.debug_operations_enabled {
367                let iter_addr = get_reg!(self, vec_iterator_header_reg);
368                let index = (*vec_iterator).index;
369                eprintln!(
370                    "vec_iter_next: iter_addr: {iter_addr:04X} addr:{vec_header_addr:04X} index:{index} len: {}, capacity: {}, element_size: {}",
371                    vec_header.element_count, vec_header.capacity, vec_header.element_size
372                );
373            }
374
375            // Check if we've reached the end
376            if (*vec_iterator).index >= vec_header.element_count {
377                // Jump to the provided address if we're done
378                let branch_offset = i16_from_u8s!(branch_offset_lower, branch_offset_upper);
379
380                #[cfg(feature = "debug_vm")]
381                {
382                    if self.debug_operations_enabled {
383                        eprintln!("vec_iter_next complete. jumping with offset {branch_offset}");
384                    }
385                }
386
387                self.pc = (self.pc as i32 + branch_offset as i32) as usize;
388
389                return;
390            }
391
392            // Calculate the address of the current element
393            let element_addr = (*vec_iterator).vec_header_heap_ptr
394                + VEC_HEADER_PAYLOAD_OFFSET.0
395                + (*vec_iterator).index as u32 * vec_header.element_size;
396
397            #[cfg(feature = "debug_vm")]
398            if self.debug_operations_enabled {
399                eprintln!(
400                    "vec_iter_next: element_addr {element_addr:04X} to reg {target_variable}"
401                );
402            }
403
404            set_reg!(self, target_variable, element_addr);
405
406            (*vec_iterator).index += 1;
407        }
408    }
409
410    #[inline]
411    pub fn execute_vec_iter_next_pair(
412        &mut self,
413        vec_iterator_header_reg: u8,
414        target_key_reg: u8,
415        target_value_reg: u8,
416        branch_offset_lower: u8,
417        branch_offset_upper: u8,
418    ) {
419        let vec_iterator = self.get_vec_iterator_header_ptr_from_reg(vec_iterator_header_reg);
420
421        unsafe {
422            let vec_header_addr = (*vec_iterator).vec_header_heap_ptr;
423            let vec_header_ptr =
424                self.memory.get_heap_const_ptr(vec_header_addr as usize) as *const VecHeader;
425            let vec_header = &*vec_header_ptr;
426            if vec_header.padding != VEC_HEADER_MAGIC_CODE {
427                return self.internal_trap(TrapCode::MemoryCorruption);
428            }
429            #[cfg(feature = "debug_vm")]
430            if self.debug_operations_enabled {
431                let iter_addr = get_reg!(self, vec_iterator_header_reg);
432                let index = (*vec_iterator).index;
433                eprintln!(
434                    "vec_iter_next: iter_addr: {iter_addr:04X} addr:{vec_header_addr:04X} index:{index} len: {}, capacity: {}",
435                    vec_header.element_count, vec_header.capacity
436                );
437            }
438
439            // Check if we've reached the end
440            if (*vec_iterator).index >= vec_header.element_count {
441                // Jump to the provided address if we're done
442                let branch_offset = i16_from_u8s!(branch_offset_lower, branch_offset_upper);
443
444                #[cfg(feature = "debug_vm")]
445                {
446                    if self.debug_operations_enabled {
447                        eprintln!(
448                            "vec_iter_next_pair complete. jumping with offset {branch_offset}"
449                        );
450                    }
451                }
452
453                self.pc = (self.pc as i32 + branch_offset as i32) as usize;
454
455                return;
456            }
457
458            // Calculate the address of the current element
459            let element_addr = (*vec_iterator).vec_header_heap_ptr
460                + VEC_HEADER_PAYLOAD_OFFSET.0
461                + (*vec_iterator).index as u32 * vec_header.element_size;
462
463            #[cfg(feature = "debug_vm")]
464            if self.debug_operations_enabled {
465                eprintln!(
466                    "vec_iter_next: element_addr {element_addr:04X} to reg {target_value_reg}"
467                );
468            }
469
470            set_reg!(self, target_key_reg, (*vec_iterator).index);
471            set_reg!(self, target_value_reg, element_addr);
472
473            (*vec_iterator).index += 1;
474        }
475    }
476
477    pub fn vec_header_from_heap(heap: &Memory, heap_offset: u32) -> VecHeader {
478        unsafe { *(heap.get_heap_const_ptr(heap_offset as usize) as *const VecHeader) }
479    }
480
481    pub fn read_vec_header_from_ptr_reg(&self, vec_header_ptr_reg: u8) -> VecHeader {
482        let vec_header_const_ptr =
483            self.get_const_ptr_from_reg(vec_header_ptr_reg) as *const VecHeader;
484        unsafe { *vec_header_const_ptr }
485    }
486
487    pub fn get_vec_header_ptr_from_reg(&self, vec_header_ptr_reg: u8) -> *mut VecHeader {
488        self.get_ptr_from_reg(vec_header_ptr_reg) as *mut VecHeader
489    }
490
491    #[inline]
492    pub fn execute_vec_get(&mut self, element_target_reg: u8, vec_header_ptr_reg: u8, int_reg: u8) {
493        let vec_addr = get_reg!(self, vec_header_ptr_reg);
494
495        let vec_header = Self::vec_header_from_heap(&self.memory, vec_addr);
496        let index = get_reg!(self, int_reg);
497        if vec_header.padding != VEC_HEADER_MAGIC_CODE {
498            return self.internal_trap(TrapCode::MemoryCorruption);
499        }
500
501        #[cfg(feature = "debug_vm")]
502        if self.debug_operations_enabled {
503            eprintln!(
504                "vec_get: vec_header_addr: {vec_addr:04X} index: {index} count: {}, capacity: {} ",
505                vec_header.element_count, vec_header.capacity
506            );
507        }
508
509        #[cfg(feature = "debug_vm")]
510        {
511            if self.debug_operations_enabled {
512                eprintln!(
513                    "vec_get {} {} (capacity: {}) ",
514                    index, vec_header.element_count, vec_header.capacity
515                );
516            }
517        }
518
519        if index >= vec_header.element_count as u32 {
520            return self.internal_trap(TrapCode::VecBoundsFail {
521                encountered: index as usize,
522                element_count: vec_header.element_count as usize,
523            });
524        }
525
526        let address_of_element =
527            vec_addr + VEC_HEADER_PAYLOAD_OFFSET.0 + index * vec_header.element_size;
528
529        set_reg!(self, element_target_reg, address_of_element);
530    }
531
532    #[inline]
533    pub fn execute_vec_set(&mut self, vec_header_ptr_reg: u8, int_index_reg: u8, item_ptr_reg: u8) {
534        #[cfg(feature = "debug_vm")]
535        {
536            eprintln!("vec_set ");
537        }
538    }
539
540    #[inline]
541    pub fn execute_vec_extend(&mut self, destination_vec_reg: u8, src_other_vec: u8) {
542        let vec_addr = get_reg!(self, destination_vec_reg);
543        let src_addr = get_reg!(self, src_other_vec);
544
545        let mut_vec_ptr = self.memory.get_heap_ptr(vec_addr as usize) as *mut VecHeader;
546        let src_vec_ptr = self.memory.get_heap_const_ptr(src_addr as usize) as *const VecHeader;
547        #[cfg(feature = "debug_vm")]
548        if self.debug_operations_enabled {
549            unsafe {
550                eprintln!(
551                    "vec_extend: vec_addr: {vec_addr:08X}, payload_offset: {:?}",
552                    (*mut_vec_ptr)
553                );
554            }
555        }
556
557        let target_len = unsafe { (*mut_vec_ptr).element_count };
558        let src_element_count_to_copy = unsafe { (*src_vec_ptr).element_count };
559        let total_len = target_len + src_element_count_to_copy;
560
561        unsafe {
562            if (*mut_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
563                return self.internal_trap(TrapCode::MemoryCorruption);
564            }
565
566            if total_len > (*mut_vec_ptr).capacity {
567                return self.internal_trap(TrapCode::VecOutOfCapacity {
568                    encountered: total_len,
569                    capacity: (*mut_vec_ptr).capacity,
570                });
571            }
572
573            if unsafe { (*src_vec_ptr).padding } != VEC_HEADER_MAGIC_CODE {
574                return self.internal_trap(TrapCode::MemoryCorruption);
575            }
576
577            let tail_of_target_addr = vec_addr
578                + VEC_HEADER_PAYLOAD_OFFSET.0
579                + target_len as u32 * (*mut_vec_ptr).element_size;
580            let start_of_source_addr = src_addr + VEC_HEADER_PAYLOAD_OFFSET.0;
581
582            let dest_ptr = self.memory.get_heap_ptr(tail_of_target_addr as usize);
583            let src_ptr = self
584                .memory
585                .get_heap_const_ptr(start_of_source_addr as usize);
586            let byte_count =
587                src_element_count_to_copy as usize * (*mut_vec_ptr).element_size as usize;
588
589            ptr::copy_nonoverlapping(src_ptr, dest_ptr, byte_count);
590            (*mut_vec_ptr).element_count += src_element_count_to_copy;
591        }
592    }
593
594    #[inline]
595    pub fn execute_vec_push_addr(
596        &mut self,
597        destination_entry_addr_reg: u8,
598        src_vec_header_ptr_reg: u8,
599    ) {
600        let vec_addr = get_reg!(self, src_vec_header_ptr_reg);
601
602        let mut_vec_ptr = self.memory.get_heap_ptr(vec_addr as usize) as *mut VecHeader;
603        #[cfg(feature = "debug_vm")]
604        if self.debug_operations_enabled {
605            unsafe {
606                eprintln!(
607                    "vec_push_addr: vec_addr: {vec_addr:08X}, payload_offset: {:?}",
608                    (*mut_vec_ptr)
609                );
610            }
611        }
612
613        let len = unsafe { (*mut_vec_ptr).element_count };
614
615        unsafe {
616            if (*mut_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
617                return self.internal_trap(TrapCode::MemoryCorruption);
618            }
619
620            if len >= (*mut_vec_ptr).capacity {
621                return self.internal_trap(TrapCode::VecOutOfCapacity {
622                    encountered: len,
623                    capacity: (*mut_vec_ptr).capacity,
624                });
625            }
626            (*mut_vec_ptr).element_count += 1;
627        }
628
629        let address_of_new_element = unsafe {
630            vec_addr + VEC_HEADER_PAYLOAD_OFFSET.0 + len as u32 * (*mut_vec_ptr).element_size
631        };
632
633        set_reg!(self, destination_entry_addr_reg, address_of_new_element);
634    }
635
636    #[inline]
637    pub fn execute_vec_pop(&mut self, dst_reg: u8, vec_header_ptr_reg: u8) {
638        let vec_addr = get_reg!(self, vec_header_ptr_reg);
639        let mut_vec_ptr = self
640            .memory
641            .get_heap_ptr(vec_addr as usize)
642            .cast::<VecHeader>();
643
644        unsafe {
645            let header = &mut *mut_vec_ptr;
646
647            // Check if vector is empty
648            if header.element_count == 0 {
649                return self.internal_trap(TrapCode::VecEmpty);
650            }
651            // Get the last element index
652            let last_index = u32::from(header.element_count) - 1;
653
654            // Calculate address of the element to be popped
655            let address_of_element_to_pop =
656                vec_addr + VEC_HEADER_PAYLOAD_OFFSET.0 + last_index * header.element_size;
657
658            header.element_count -= 1;
659
660            set_reg!(self, dst_reg, address_of_element_to_pop);
661        }
662    }
663
664    #[inline]
665    pub fn execute_vec_remove_index(&mut self, vec_header_ptr_reg: u8, remove_index_reg: u8) {
666        let vec_addr = get_reg!(self, vec_header_ptr_reg);
667        let mut_vec_ptr = self
668            .memory
669            .get_heap_ptr(vec_addr as usize)
670            .cast::<VecHeader>();
671
672        let index = get_reg!(self, remove_index_reg);
673
674        unsafe {
675            if index >= u32::from((*mut_vec_ptr).element_count) {
676                return self.internal_trap(TrapCode::VecBoundsFail {
677                    encountered: index as usize,
678                    element_count: (*mut_vec_ptr).element_count as usize,
679                });
680            }
681        }
682
683        let size_of_each_element = unsafe { (*mut_vec_ptr).element_size };
684        let address_of_element_to_be_removed =
685            vec_addr + VEC_HEADER_PAYLOAD_OFFSET.0 + index * size_of_each_element;
686
687        unsafe {
688            let header = &mut *mut_vec_ptr;
689            let count = u32::from(header.element_count);
690
691            if index < count - 1 {
692                let src_addr = address_of_element_to_be_removed + size_of_each_element;
693                let dst_addr = address_of_element_to_be_removed;
694                let elems_after = (count - index - 1) as usize;
695                let bytes_to_move = elems_after * size_of_each_element as usize;
696
697                let src_ptr = self.memory.get_heap_ptr(src_addr as usize).cast_const();
698                let dst_ptr = self.memory.get_heap_ptr(dst_addr as usize);
699
700                // MemMove (copy *with* overlap)
701                ptr::copy(src_ptr, dst_ptr, bytes_to_move);
702            }
703
704            header.element_count -= 1;
705        }
706    }
707}