swamp_vm/
vec.rs

1/*
2 * Copyright (c) Peter Bjorklund. All rights reserved. https://github.com/swamp/swamp
3 * Licensed under the MIT License. See LICENSE in the project root for license information.
4 */
5use crate::memory::Memory;
6use crate::set_reg;
7use crate::{get_reg, i16_from_u8s, u16_from_u8s, u32_from_u8s, TrapCode, Vm};
8use std::ptr;
9use swamp_vm_types::{
10    VecHeader, VecIterator, VEC_HEADER_MAGIC_CODE, VEC_HEADER_PAYLOAD_OFFSET, VEC_HEADER_SIZE,
11};
12
13impl Vm {
14    pub fn get_vec_iterator_header_ptr_from_reg(&self, vec_iterator_reg: u8) -> *mut VecIterator {
15        self.get_ptr_from_reg(vec_iterator_reg) as *mut VecIterator
16    }
17
18    #[inline]
19    pub fn execute_array_init(
20        &mut self,
21        target_vec_ptr_reg: u8,
22        capacity_lower: u8,
23        capacity_upper: u8,
24        element_size_0: u8,
25        element_size_1: u8,
26        element_size_2: u8,
27        element_size_3: u8,
28    ) {
29        let vec_addr = get_reg!(self, target_vec_ptr_reg);
30        let mut_vec_ptr = self
31            .memory
32            .get_heap_ptr(vec_addr as usize)
33            .cast::<VecHeader>();
34        let capacity = u16_from_u8s!(capacity_lower, capacity_upper);
35        let element_size = u32_from_u8s!(
36            element_size_0,
37            element_size_1,
38            element_size_2,
39            element_size_3
40        );
41        debug_assert_ne!(capacity, 0, "illegal capacity");
42        unsafe {
43            (*mut_vec_ptr).element_count = capacity;
44            (*mut_vec_ptr).capacity = capacity;
45            (*mut_vec_ptr).element_size = element_size;
46            (*mut_vec_ptr).padding = VEC_HEADER_MAGIC_CODE;
47        }
48
49        if self.debug_operations_enabled {
50            eprintln!("array init element_size:{element_size} into vec_addr: {vec_addr:X}");
51        }
52    }
53
54    #[inline]
55    pub fn execute_vec_cmp(
56        &mut self,
57        bool_target_reg: u8,
58        left_vec_ptr_reg: u8,
59        right_vec_ptr_reg: u8,
60    ) {
61        let left_vec_addr = get_reg!(self, left_vec_ptr_reg);
62        let right_vec_addr = get_reg!(self, right_vec_ptr_reg);
63
64        let left_vec_ptr = self
65            .memory
66            .get_heap_const_ptr(left_vec_addr as usize)
67            .cast::<VecHeader>();
68
69        let right_vec_ptr = self
70            .memory
71            .get_heap_const_ptr(right_vec_addr as usize)
72            .cast::<VecHeader>();
73
74        unsafe {
75            if (*left_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
76                return self.internal_trap(TrapCode::MemoryCorruption);
77            }
78            if (*left_vec_ptr).capacity == 0 {
79                eprintln!("TARGET IS NOT INITIALIZED");
80                return self.internal_trap(TrapCode::VecNeverInitialized);
81            }
82            if (*right_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
83                return self.internal_trap(TrapCode::MemoryCorruption);
84            }
85            if (*right_vec_ptr).capacity == 0 {
86                eprintln!("SOURCE IS NOT INITIALIZED");
87                return self.internal_trap(TrapCode::VecNeverInitialized);
88            }
89
90            if (*left_vec_ptr).element_count != (*right_vec_ptr).element_count {
91                set_reg!(self, bool_target_reg, 0);
92            }
93
94            let left_ptr = self
95                .memory
96                .get_heap_const_ptr(left_vec_addr as usize)
97                .add(VEC_HEADER_PAYLOAD_OFFSET.0 as usize);
98
99            let right_ptr = self
100                .memory
101                .get_heap_const_ptr(right_vec_addr as usize)
102                .add(VEC_HEADER_PAYLOAD_OFFSET.0 as usize);
103
104            let byte_size =
105                ((*left_vec_ptr).element_count as usize) * (*left_vec_ptr).element_size as usize;
106            let slice_a = std::slice::from_raw_parts(left_ptr, byte_size);
107            let slice_b = std::slice::from_raw_parts(right_ptr, byte_size);
108
109            set_reg!(self, bool_target_reg, slice_a == slice_b);
110        }
111    }
112
113    #[inline]
114    pub fn execute_vec_copy(&mut self, target_vec_ptr_reg: u8, source_vec_ptr_reg: u8) {
115        let target_vec_addr = get_reg!(self, target_vec_ptr_reg);
116        let source_vec_addr = get_reg!(self, source_vec_ptr_reg);
117
118        let mut_vec_ptr = self
119            .memory
120            .get_heap_ptr(target_vec_addr as usize)
121            .cast::<VecHeader>();
122
123        let src_vec_ptr = self
124            .memory
125            .get_heap_const_ptr(source_vec_addr as usize)
126            .cast::<VecHeader>();
127
128        unsafe {
129            if (*mut_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
130                return self.internal_trap(TrapCode::MemoryCorruption);
131            }
132            if (*mut_vec_ptr).capacity == 0 {
133                eprintln!("TARGET IS NOT INITIALIZED");
134                return self.internal_trap(TrapCode::VecNeverInitialized);
135            }
136            if (*src_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
137                return self.internal_trap(TrapCode::MemoryCorruption);
138            }
139            if (*src_vec_ptr).capacity == 0 {
140                eprintln!("SOURCE IS NOT INITIALIZED");
141                return self.internal_trap(TrapCode::VecNeverInitialized);
142            }
143
144            if (*mut_vec_ptr).capacity < (*src_vec_ptr).element_count {
145                return self.internal_trap(TrapCode::VecOutOfCapacity {
146                    encountered: (*src_vec_ptr).element_count,
147                    capacity: (*mut_vec_ptr).capacity,
148                });
149            }
150
151            let target_capacity = (*mut_vec_ptr).capacity;
152
153            let target_tail = (target_vec_addr + 2) as usize; // Skip capacity
154            let target_raw = self.memory.get_heap_ptr(target_tail);
155            let source_tail = (source_vec_addr + 2) as usize; // Skip capacity
156            let source_raw = self.memory.get_heap_const_ptr(source_tail);
157
158            let total_bytes_to_copy = (VEC_HEADER_SIZE.0 - 2)
159                + ((*src_vec_ptr).element_count as u32) * (*src_vec_ptr).element_size;
160
161            ptr::copy_nonoverlapping(source_raw, target_raw, total_bytes_to_copy as usize);
162
163            debug_assert_eq!(
164                (*mut_vec_ptr).element_count,
165                (*src_vec_ptr).element_count,
166                "element count differs"
167            );
168            debug_assert_eq!(
169                (*mut_vec_ptr).capacity,
170                target_capacity,
171                "capacity has been modified"
172            );
173        }
174    }
175
176
177    #[inline]
178    pub fn execute_vec_copy_range(&mut self, target_vec_ptr_reg: u8, source_vec_ptr_reg: u8, range_reg: u8) {
179        let target_vec_addr = get_reg!(self, target_vec_ptr_reg);
180        let source_vec_addr = get_reg!(self, source_vec_ptr_reg);
181        let range_header = self.range_header_from_reg(range_reg);
182
183        let mut_vec_ptr = self
184            .memory
185            .get_heap_ptr(target_vec_addr as usize)
186            .cast::<VecHeader>();
187
188        let src_vec_ptr = self
189            .memory
190            .get_heap_const_ptr(source_vec_addr as usize)
191            .cast::<VecHeader>();
192
193        unsafe {
194            if (*mut_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
195                return self.internal_trap(TrapCode::MemoryCorruption);
196            }
197            if (*mut_vec_ptr).capacity == 0 {
198                eprintln!("TARGET IS NOT INITIALIZED");
199                return self.internal_trap(TrapCode::VecNeverInitialized);
200            }
201            if (*src_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
202                return self.internal_trap(TrapCode::MemoryCorruption);
203            }
204            if (*src_vec_ptr).capacity == 0 {
205                return self.internal_trap(TrapCode::VecNeverInitialized);
206            }
207
208            if range_header.max < range_header.min {
209                return self.internal_trap(TrapCode::ReverseRangeNotAllowedHere);
210            }
211
212
213            debug_assert!(range_header.max >= range_header.min);
214
215            let num_elements_to_copy = if range_header.inclusive { (range_header.max - range_header.min + 1) as u32 } else { (range_header.max - range_header.min) as u32 };
216            let source_element_index = range_header.min as u32;
217            let required_source_element_count = source_element_index + num_elements_to_copy;
218
219            if (*mut_vec_ptr).capacity < num_elements_to_copy as u16 {
220                return self.internal_trap(TrapCode::VecOutOfCapacity {
221                    encountered: (*src_vec_ptr).element_count,
222                    capacity: (*mut_vec_ptr).capacity,
223                });
224            }
225
226            if (*src_vec_ptr).element_count < required_source_element_count as u16 {
227                return self.internal_trap(TrapCode::VecBoundsFail {
228                    encountered: required_source_element_count as usize,
229                    element_count: (*src_vec_ptr).element_count as usize,
230                });
231            }
232
233            let target_capacity = (*mut_vec_ptr).capacity;
234
235            let target_payload = (target_vec_addr + VEC_HEADER_PAYLOAD_OFFSET.0) as usize;
236            let target_raw = self.memory.get_heap_ptr(target_payload);
237
238            let source_slice_start = (source_vec_addr + VEC_HEADER_PAYLOAD_OFFSET.0 + source_element_index * (*src_vec_ptr).element_size) as usize;
239            let source_raw = self.memory.get_heap_const_ptr(source_slice_start);
240
241            let total_bytes_to_copy = num_elements_to_copy * (*src_vec_ptr).element_size;
242
243            ptr::copy_nonoverlapping(source_raw, target_raw, total_bytes_to_copy as usize);
244            (*mut_vec_ptr).element_count = num_elements_to_copy as u16;
245
246            debug_assert_eq!(
247                (*mut_vec_ptr).capacity,
248                target_capacity,
249                "capacity has been modified"
250            );
251        }
252    }
253
254    #[inline]
255    pub fn execute_vec_init(
256        &mut self,
257        target_vec_ptr_reg: u8,
258        capacity_lower: u8,
259        capacity_upper: u8,
260        element_size_0: u8,
261        element_size_1: u8,
262        element_size_2: u8,
263        element_size_3: u8,
264    ) {
265        let vec_addr = get_reg!(self, target_vec_ptr_reg);
266        let mut_vec_ptr = self
267            .memory
268            .get_heap_ptr(vec_addr as usize)
269            .cast::<VecHeader>();
270        let capacity = u16_from_u8s!(capacity_lower, capacity_upper);
271        let element_size = u32_from_u8s!(
272            element_size_0,
273            element_size_1,
274            element_size_2,
275            element_size_3
276        );
277        debug_assert_ne!(capacity, 0, "illegal capacity");
278        unsafe {
279            //(*mut_vec_ptr).element_count = 0; // zero is default, so shouldn't be needed
280            (*mut_vec_ptr).capacity = capacity;
281            (*mut_vec_ptr).element_size = element_size;
282            (*mut_vec_ptr).padding = VEC_HEADER_MAGIC_CODE;
283        }
284    }
285
286    #[inline]
287    pub fn execute_vec_iter_init(
288        &mut self,
289        target_vec_iterator_header_reg: u8,
290        vec_header_reg: u8,
291    ) {
292        let vec_header_addr = get_reg!(self, vec_header_reg);
293
294        // Check that vec header is correct
295        let vec_header_ptr = self
296            .memory
297            .get_heap_const_ptr(vec_header_addr as usize)
298            .cast::<VecHeader>();
299        let vec_header = unsafe { &*vec_header_ptr };
300
301        if vec_header.padding != VEC_HEADER_MAGIC_CODE {
302            return self.internal_trap(TrapCode::MemoryCorruption);
303        }
304        if vec_header.capacity == 0 {
305            return self.internal_trap(TrapCode::VecNeverInitialized);
306        }
307
308        #[cfg(feature = "debug_vm")]
309        if self.debug_operations_enabled {
310            let iter_addr = get_reg!(self, target_vec_iterator_header_reg);
311            eprintln!(
312                "vec_iter_init: iter_addr: {iter_addr:04X} vec_header_addr:{vec_header_addr:04X} element_size: {}",
313                vec_header.element_size
314            );
315        }
316
317        // Assert that element_size is reasonable
318        debug_assert!(vec_header.element_size > 0, "Element size cannot be zero");
319
320        let vec_iterator = VecIterator {
321            vec_header_heap_ptr: vec_header_addr,
322            index: 0,
323        };
324
325        let vec_iterator_mut_ptr =
326            self.get_ptr_from_reg(target_vec_iterator_header_reg) as *mut VecIterator;
327
328        unsafe {
329            ptr::write(vec_iterator_mut_ptr, vec_iterator);
330        }
331    }
332
333    #[inline]
334    pub fn execute_vec_iter_next(
335        &mut self,
336        vec_iterator_header_reg: u8,
337        target_variable: u8,
338        branch_offset_lower: u8,
339        branch_offset_upper: u8,
340    ) {
341        let vec_iterator = self.get_vec_iterator_header_ptr_from_reg(vec_iterator_header_reg);
342
343        unsafe {
344            let vec_header_addr = (*vec_iterator).vec_header_heap_ptr;
345            let vec_header_ptr = self
346                .memory
347                .get_heap_const_ptr(vec_header_addr as usize)
348                .cast::<VecHeader>();
349            let vec_header = &*vec_header_ptr;
350            if vec_header.padding != VEC_HEADER_MAGIC_CODE {
351                return self.internal_trap(TrapCode::MemoryCorruption);
352            }
353
354            #[cfg(feature = "debug_vm")]
355            if self.debug_operations_enabled {
356                let iter_addr = get_reg!(self, vec_iterator_header_reg);
357                let index = (*vec_iterator).index;
358                eprintln!(
359                    "vec_iter_next: iter_addr: {iter_addr:04X} addr:{vec_header_addr:04X} index:{index} len: {}, capacity: {}, element_size: {}",
360                    vec_header.element_count, vec_header.capacity, vec_header.element_size
361                );
362            }
363
364            // Check if we've reached the end
365            if (*vec_iterator).index >= vec_header.element_count {
366                // Jump to the provided address if we're done
367                let branch_offset = i16_from_u8s!(branch_offset_lower, branch_offset_upper);
368
369                #[cfg(feature = "debug_vm")]
370                {
371                    if self.debug_operations_enabled {
372                        eprintln!("vec_iter_next complete. jumping with offset {branch_offset}");
373                    }
374                }
375
376                self.pc = (self.pc as i32 + branch_offset as i32) as usize;
377
378                return;
379            }
380
381            // Calculate the address of the current element
382            let element_addr = (*vec_iterator).vec_header_heap_ptr
383                + VEC_HEADER_PAYLOAD_OFFSET.0
384                + (*vec_iterator).index as u32 * vec_header.element_size;
385
386            #[cfg(feature = "debug_vm")]
387            if self.debug_operations_enabled {
388                eprintln!(
389                    "vec_iter_next: element_addr {element_addr:04X} to reg {target_variable}"
390                );
391            }
392
393            set_reg!(self, target_variable, element_addr);
394
395            (*vec_iterator).index += 1;
396        }
397    }
398
399    #[inline]
400    pub fn execute_vec_iter_next_pair(
401        &mut self,
402        vec_iterator_header_reg: u8,
403        target_key_reg: u8,
404        target_value_reg: u8,
405        branch_offset_lower: u8,
406        branch_offset_upper: u8,
407    ) {
408        let vec_iterator = self.get_vec_iterator_header_ptr_from_reg(vec_iterator_header_reg);
409
410        unsafe {
411            let vec_header_addr = (*vec_iterator).vec_header_heap_ptr;
412            let vec_header_ptr =
413                self.memory.get_heap_const_ptr(vec_header_addr as usize) as *const VecHeader;
414            let vec_header = &*vec_header_ptr;
415            if vec_header.padding != VEC_HEADER_MAGIC_CODE {
416                return self.internal_trap(TrapCode::MemoryCorruption);
417            }
418            #[cfg(feature = "debug_vm")]
419            if self.debug_operations_enabled {
420                let iter_addr = get_reg!(self, vec_iterator_header_reg);
421                let index = (*vec_iterator).index;
422                eprintln!(
423                    "vec_iter_next: iter_addr: {iter_addr:04X} addr:{vec_header_addr:04X} index:{index} len: {}, capacity: {}",
424                    vec_header.element_count, vec_header.capacity
425                );
426            }
427
428            // Check if we've reached the end
429            if (*vec_iterator).index >= vec_header.element_count {
430                // Jump to the provided address if we're done
431                let branch_offset = i16_from_u8s!(branch_offset_lower, branch_offset_upper);
432
433                #[cfg(feature = "debug_vm")]
434                {
435                    if self.debug_operations_enabled {
436                        eprintln!(
437                            "vec_iter_next_pair complete. jumping with offset {branch_offset}"
438                        );
439                    }
440                }
441
442                self.pc = (self.pc as i32 + branch_offset as i32) as usize;
443
444                return;
445            }
446
447            // Calculate the address of the current element
448            let element_addr = (*vec_iterator).vec_header_heap_ptr
449                + VEC_HEADER_PAYLOAD_OFFSET.0
450                + (*vec_iterator).index as u32 * vec_header.element_size;
451
452            #[cfg(feature = "debug_vm")]
453            if self.debug_operations_enabled {
454                eprintln!(
455                    "vec_iter_next: element_addr {element_addr:04X} to reg {target_value_reg}"
456                );
457            }
458
459            set_reg!(self, target_key_reg, (*vec_iterator).index);
460            set_reg!(self, target_value_reg, element_addr);
461
462            (*vec_iterator).index += 1;
463        }
464    }
465
466    pub fn vec_header_from_heap(heap: &Memory, heap_offset: u32) -> VecHeader {
467        unsafe { *(heap.get_heap_const_ptr(heap_offset as usize) as *const VecHeader) }
468    }
469
470    pub fn read_vec_header_from_ptr_reg(&self, vec_header_ptr_reg: u8) -> VecHeader {
471        let vec_header_const_ptr =
472            self.get_const_ptr_from_reg(vec_header_ptr_reg) as *const VecHeader;
473        unsafe { *vec_header_const_ptr }
474    }
475
476    pub fn get_vec_header_ptr_from_reg(&self, vec_header_ptr_reg: u8) -> *mut VecHeader {
477        self.get_ptr_from_reg(vec_header_ptr_reg) as *mut VecHeader
478    }
479
480    #[inline]
481    pub fn execute_vec_get(&mut self, element_target_reg: u8, vec_header_ptr_reg: u8, int_reg: u8) {
482        let vec_addr = get_reg!(self, vec_header_ptr_reg);
483
484        let vec_header = Self::vec_header_from_heap(&self.memory, vec_addr);
485        let index = get_reg!(self, int_reg);
486        if vec_header.padding != VEC_HEADER_MAGIC_CODE {
487            return self.internal_trap(TrapCode::MemoryCorruption);
488        }
489
490        #[cfg(feature = "debug_vm")]
491        if self.debug_operations_enabled {
492            eprintln!(
493                "vec_get: vec_header_addr: {vec_addr:04X} index: {index} count: {}, capacity: {} ",
494                vec_header.element_count, vec_header.capacity
495            );
496        }
497
498        #[cfg(feature = "debug_vm")]
499        {
500            if self.debug_operations_enabled {
501                eprintln!(
502                    "vec_get {} {} (capacity: {}) ",
503                    index, vec_header.element_count, vec_header.capacity
504                );
505            }
506        }
507
508        if index >= vec_header.element_count as u32 {
509            return self.internal_trap(TrapCode::VecBoundsFail {
510                encountered: index as usize,
511                element_count: vec_header.element_count as usize,
512            });
513        }
514
515        let address_of_element =
516            vec_addr + VEC_HEADER_PAYLOAD_OFFSET.0 + index * vec_header.element_size;
517
518        set_reg!(self, element_target_reg, address_of_element);
519    }
520
521    #[inline]
522    pub fn execute_vec_set(&mut self, vec_header_ptr_reg: u8, int_index_reg: u8, item_ptr_reg: u8) {
523        #[cfg(feature = "debug_vm")]
524        {
525            eprintln!("vec_set ");
526        }
527    }
528
529    #[inline]
530    pub fn execute_vec_push_addr(
531        &mut self,
532        destination_entry_addr_reg: u8,
533        src_vec_header_ptr_reg: u8,
534    ) {
535        let vec_addr = get_reg!(self, src_vec_header_ptr_reg);
536
537        let mut_vec_ptr = self.memory.get_heap_ptr(vec_addr as usize) as *mut VecHeader;
538        #[cfg(feature = "debug_vm")]
539        if self.debug_operations_enabled {
540            unsafe {
541                eprintln!(
542                    "vec_push_addr: vec_addr: {vec_addr:08X}, payload_offset: {:?}",
543                    (*mut_vec_ptr)
544                );
545            }
546        }
547
548        let len = unsafe { (*mut_vec_ptr).element_count };
549
550        unsafe {
551            if (*mut_vec_ptr).padding != VEC_HEADER_MAGIC_CODE {
552                return self.internal_trap(TrapCode::MemoryCorruption);
553            }
554
555            if (*mut_vec_ptr).capacity == 0 {
556                return self.internal_trap(TrapCode::VecNeverInitialized);
557            }
558            if len >= (*mut_vec_ptr).capacity {
559                return self.internal_trap(TrapCode::VecOutOfCapacity {
560                    encountered: len,
561                    capacity: (*mut_vec_ptr).capacity,
562                });
563            }
564            (*mut_vec_ptr).element_count += 1;
565        }
566
567        let address_of_new_element = unsafe {
568            vec_addr + VEC_HEADER_PAYLOAD_OFFSET.0 + len as u32 * (*mut_vec_ptr).element_size
569        };
570
571        set_reg!(self, destination_entry_addr_reg, address_of_new_element);
572    }
573
574    #[inline]
575    pub fn execute_vec_pop(&mut self, dst_reg: u8, vec_header_ptr_reg: u8) {
576        let vec_addr = get_reg!(self, vec_header_ptr_reg);
577        let mut_vec_ptr = self
578            .memory
579            .get_heap_ptr(vec_addr as usize)
580            .cast::<VecHeader>();
581
582        unsafe {
583            let header = &mut *mut_vec_ptr;
584
585            // Check if vector is empty
586            if header.element_count == 0 {
587                return self.internal_trap(TrapCode::VecEmpty);
588            }
589            // Get the last element index
590            let last_index = u32::from(header.element_count) - 1;
591
592            // Calculate address of the element to be popped
593            let address_of_element_to_pop =
594                vec_addr + VEC_HEADER_PAYLOAD_OFFSET.0 + last_index * header.element_size;
595
596            header.element_count -= 1;
597
598            set_reg!(self, dst_reg, address_of_element_to_pop);
599        }
600    }
601
602    #[inline]
603    pub fn execute_vec_remove_index(&mut self, vec_header_ptr_reg: u8, remove_index_reg: u8) {
604        let vec_addr = get_reg!(self, vec_header_ptr_reg);
605        let mut_vec_ptr = self
606            .memory
607            .get_heap_ptr(vec_addr as usize)
608            .cast::<VecHeader>();
609
610        let index = get_reg!(self, remove_index_reg);
611
612        unsafe {
613            if index >= u32::from((*mut_vec_ptr).element_count) {
614                return self.internal_trap(TrapCode::VecBoundsFail {
615                    encountered: index as usize,
616                    element_count: (*mut_vec_ptr).element_count as usize,
617                });
618            }
619        }
620
621        let size_of_each_element = unsafe { (*mut_vec_ptr).element_size };
622        let address_of_element_to_be_removed =
623            vec_addr + VEC_HEADER_PAYLOAD_OFFSET.0 + index * size_of_each_element;
624
625        unsafe {
626            let header = &mut *mut_vec_ptr;
627            let count = u32::from(header.element_count);
628
629            if index < count - 1 {
630                let src_addr = address_of_element_to_be_removed + size_of_each_element;
631                let dst_addr = address_of_element_to_be_removed;
632                let elems_after = (count - index - 1) as usize;
633                let bytes_to_move = elems_after * size_of_each_element as usize;
634
635                let src_ptr = self.memory.get_heap_ptr(src_addr as usize).cast_const();
636                let dst_ptr = self.memory.get_heap_ptr(dst_addr as usize);
637
638                // MemMove (copy *with* overlap)
639                ptr::copy(src_ptr, dst_ptr, bytes_to_move);
640            }
641
642            header.element_count -= 1;
643        }
644    }
645}