hyperlight_host/mem/
mgr.rs

1/*
2Copyright 2025  The Hyperlight Authors.
3
4Licensed under the Apache License, Version 2.0 (the "License");
5you may not use this file except in compliance with the License.
6You may obtain a copy of the License at
7
8    http://www.apache.org/licenses/LICENSE-2.0
9
10Unless required by applicable law or agreed to in writing, software
11distributed under the License is distributed on an "AS IS" BASIS,
12WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13See the License for the specific language governing permissions and
14limitations under the License.
15*/
16
17use std::cmp::Ordering;
18
19use hyperlight_common::flatbuffer_wrappers::function_call::{
20    FunctionCall, validate_guest_function_call_buffer,
21};
22use hyperlight_common::flatbuffer_wrappers::function_types::ReturnValue;
23use hyperlight_common::flatbuffer_wrappers::guest_error::GuestError;
24use hyperlight_common::flatbuffer_wrappers::guest_log_data::GuestLogData;
25use hyperlight_common::flatbuffer_wrappers::host_function_details::HostFunctionDetails;
26use tracing::{Span, instrument};
27
28use super::exe::ExeInfo;
29use super::layout::SandboxMemoryLayout;
30use super::memory_region::MemoryRegion;
31#[cfg(feature = "init-paging")]
32use super::memory_region::{DEFAULT_GUEST_BLOB_MEM_FLAGS, MemoryRegionType};
33use super::ptr::{GuestPtr, RawPtr};
34use super::ptr_offset::Offset;
35use super::shared_mem::{ExclusiveSharedMemory, GuestSharedMemory, HostSharedMemory, SharedMemory};
36use super::shared_mem_snapshot::SharedMemorySnapshot;
37use crate::sandbox::SandboxConfiguration;
38use crate::sandbox::uninitialized::GuestBlob;
39use crate::{Result, log_then_return, new_error};
40
41cfg_if::cfg_if! {
42    if #[cfg(feature = "init-paging")] {
43        /// Paging Flags
44        ///
45        /// See the following links explaining paging, also see paging-development-notes.md in docs:
46        ///
47        /// * Very basic description: https://stackoverflow.com/a/26945892
48        /// * More in-depth descriptions: https://wiki.osdev.org/Paging
49        pub(crate) const PAGE_PRESENT: u64 = 1; // Page is Present
50        pub(crate) const PAGE_RW: u64 = 1 << 1; // Page is Read/Write (if not set page is read only so long as the WP bit in CR0 is set to 1 - which it is in Hyperlight)
51        pub(crate) const PAGE_USER: u64 = 1 << 2; // User/Supervisor (if this bit is set then the page is accessible by user mode code)
52        pub(crate) const PAGE_NX: u64 = 1 << 63; // Execute Disable (if this bit is set then data in the page cannot be executed)`
53        // The amount of memory that can be mapped per page table
54        pub(super) const AMOUNT_OF_MEMORY_PER_PT: usize = 0x200_000;
55    }
56}
57
58/// Read/write permissions flag for the 64-bit PDE
59/// The page size for the 64-bit PDE
60/// The size of stack guard cookies
61pub(crate) const STACK_COOKIE_LEN: usize = 16;
62
63/// A struct that is responsible for laying out and managing the memory
64/// for a given `Sandbox`.
65#[derive(Clone)]
66pub(crate) struct SandboxMemoryManager<S> {
67    /// Shared memory for the Sandbox
68    pub(crate) shared_mem: S,
69    /// The memory layout of the underlying shared memory
70    pub(crate) layout: SandboxMemoryLayout,
71    /// Pointer to where to load memory from
72    pub(crate) load_addr: RawPtr,
73    /// Offset for the execution entrypoint from `load_addr`
74    pub(crate) entrypoint_offset: Offset,
75    /// How many memory regions were mapped after sandbox creation
76    pub(crate) mapped_rgns: u64,
77    /// Stack cookie for stack guard verification
78    pub(crate) stack_cookie: [u8; STACK_COOKIE_LEN],
79    /// Buffer for accumulating guest abort messages
80    pub(crate) abort_buffer: Vec<u8>,
81}
82
83impl<S> SandboxMemoryManager<S>
84where
85    S: SharedMemory,
86{
87    /// Create a new `SandboxMemoryManager` with the given parameters
88    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
89    pub(crate) fn new(
90        layout: SandboxMemoryLayout,
91        shared_mem: S,
92        load_addr: RawPtr,
93        entrypoint_offset: Offset,
94        stack_cookie: [u8; STACK_COOKIE_LEN],
95    ) -> Self {
96        Self {
97            layout,
98            shared_mem,
99            load_addr,
100            entrypoint_offset,
101            mapped_rgns: 0,
102            stack_cookie,
103            abort_buffer: Vec::new(),
104        }
105    }
106
107    /// Get the stack cookie
108    #[instrument(skip_all, parent = Span::current(), level= "Trace")]
109    pub(crate) fn get_stack_cookie(&self) -> &[u8; STACK_COOKIE_LEN] {
110        &self.stack_cookie
111    }
112
113    /// Get mutable access to the abort buffer
114    pub(crate) fn get_abort_buffer_mut(&mut self) -> &mut Vec<u8> {
115        &mut self.abort_buffer
116    }
117
118    /// Get `SharedMemory` in `self` as a mutable reference
119    #[cfg(any(gdb, test))]
120    pub(crate) fn get_shared_mem_mut(&mut self) -> &mut S {
121        &mut self.shared_mem
122    }
123
124    /// Set up the hypervisor partition in the given `SharedMemory` parameter
125    /// `shared_mem`, with the given memory size `mem_size`
126    // TODO: This should perhaps happen earlier and use an
127    // ExclusiveSharedMemory from the beginning.
128    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
129    #[cfg(feature = "init-paging")]
130    pub(crate) fn set_up_shared_memory(
131        &mut self,
132        mem_size: u64,
133        regions: &mut [MemoryRegion],
134    ) -> Result<u64> {
135        let rsp: u64 = self.layout.get_top_of_user_stack_offset() as u64
136            + SandboxMemoryLayout::BASE_ADDRESS as u64
137            + self.layout.stack_size as u64
138            // TODO: subtracting 0x28 was a requirement for MSVC. It should no longer be
139            // necessary now, but, for some reason, without this, the `multiple_parameters`
140            // test from `sandbox_host_tests` fails. We should investigate this further.
141            // See issue #498 for more details.
142            - 0x28;
143
144        self.shared_mem.with_exclusivity(|shared_mem| {
145            // Create PDL4 table with only 1 PML4E
146            shared_mem.write_u64(
147                SandboxMemoryLayout::PML4_OFFSET,
148                SandboxMemoryLayout::PDPT_GUEST_ADDRESS as u64 | PAGE_PRESENT | PAGE_RW,
149            )?;
150
151            // Create PDPT with only 1 PDPTE
152            shared_mem.write_u64(
153                SandboxMemoryLayout::PDPT_OFFSET,
154                SandboxMemoryLayout::PD_GUEST_ADDRESS as u64 | PAGE_PRESENT | PAGE_RW,
155            )?;
156
157            for i in 0..512 {
158                let offset = SandboxMemoryLayout::PD_OFFSET + (i * 8);
159                let val_to_write: u64 = (SandboxMemoryLayout::PT_GUEST_ADDRESS as u64
160                    + (i * 4096) as u64)
161                    | PAGE_PRESENT
162                    | PAGE_RW;
163                shared_mem.write_u64(offset, val_to_write)?;
164            }
165
166            // We only need to create enough PTEs to map the amount of memory we have
167            // We need one PT for every 2MB of memory that is mapped
168            // We can use the memory size to calculate the number of PTs we need
169            // We round up mem_size/2MB
170
171            let mem_size = usize::try_from(mem_size)?;
172
173            let num_pages: usize = mem_size.div_ceil(AMOUNT_OF_MEMORY_PER_PT);
174
175            // Create num_pages PT with 512 PTEs
176            // Pre-allocate buffer for all page table entries to minimize shared memory writes
177            let total_ptes = num_pages * 512;
178            let mut pte_buffer = vec![0u64; total_ptes]; // Pre-allocate u64 buffer directly
179            let mut cached_region_idx: Option<usize> = None; // Cache for optimized region lookup
180            let mut pte_index = 0;
181
182            for p in 0..num_pages {
183                for i in 0..512 {
184                    // Each PTE maps a 4KB page
185                    let flags = match Self::get_page_flags(p, i, regions, &mut cached_region_idx) {
186                        Ok(region_type) => match region_type {
187                            // TODO: We parse and load the exe according to its sections and then
188                            // have the correct flags set rather than just marking the entire binary as executable
189                            MemoryRegionType::Code => PAGE_PRESENT | PAGE_RW | PAGE_USER,
190                            MemoryRegionType::InitData => self
191                                .layout
192                                .init_data_permissions
193                                .map(|perm| perm.translate_flags())
194                                .unwrap_or(DEFAULT_GUEST_BLOB_MEM_FLAGS.translate_flags()),
195                            MemoryRegionType::Stack => PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_NX,
196                            #[cfg(feature = "executable_heap")]
197                            MemoryRegionType::Heap => PAGE_PRESENT | PAGE_RW | PAGE_USER,
198                            #[cfg(not(feature = "executable_heap"))]
199                            MemoryRegionType::Heap => PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_NX,
200                            // The guard page is marked RW and User so that if it gets written to we can detect it in the host
201                            // If/When we implement an interrupt handler for page faults in the guest then we can remove this access and handle things properly there
202                            MemoryRegionType::GuardPage => {
203                                PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_NX
204                            }
205                            MemoryRegionType::InputData => PAGE_PRESENT | PAGE_RW | PAGE_NX,
206                            MemoryRegionType::OutputData => PAGE_PRESENT | PAGE_RW | PAGE_NX,
207                            MemoryRegionType::Peb => PAGE_PRESENT | PAGE_RW | PAGE_NX,
208                            // Host Function Definitions are readonly in the guest
209                            MemoryRegionType::HostFunctionDefinitions => PAGE_PRESENT | PAGE_NX,
210                            MemoryRegionType::PageTables => PAGE_PRESENT | PAGE_RW | PAGE_NX,
211                        },
212                        // If there is an error then the address isn't mapped so mark it as not present
213                        Err(_) => 0,
214                    };
215                    let val_to_write = ((p << 21) as u64 | (i << 12) as u64) | flags;
216                    // Write u64 directly to buffer - more efficient than converting to bytes
217                    pte_buffer[pte_index] = val_to_write.to_le();
218                    pte_index += 1;
219                }
220            }
221
222            // Write the entire PTE buffer to shared memory in a single operation
223            // Convert u64 buffer to bytes for writing to shared memory
224            let pte_bytes = unsafe {
225                std::slice::from_raw_parts(pte_buffer.as_ptr() as *const u8, pte_buffer.len() * 8)
226            };
227            shared_mem.copy_from_slice(pte_bytes, SandboxMemoryLayout::PT_OFFSET)?;
228            Ok::<(), crate::HyperlightError>(())
229        })??;
230
231        Ok(rsp)
232    }
233
234    /// Optimized page flags getter that maintains state for sequential access patterns
235    #[cfg(feature = "init-paging")]
236    fn get_page_flags(
237        p: usize,
238        i: usize,
239        regions: &[MemoryRegion],
240        cached_region_idx: &mut Option<usize>,
241    ) -> Result<MemoryRegionType> {
242        let addr = (p << 21) + (i << 12);
243
244        // First check if we're still in the cached region
245        if let Some(cached_idx) = *cached_region_idx
246            && cached_idx < regions.len()
247            && regions[cached_idx].guest_region.contains(&addr)
248        {
249            return Ok(regions[cached_idx].region_type);
250        }
251
252        // If not in cached region, try adjacent regions first (common for sequential access)
253        if let Some(cached_idx) = *cached_region_idx {
254            // Check next region
255            if cached_idx + 1 < regions.len()
256                && regions[cached_idx + 1].guest_region.contains(&addr)
257            {
258                *cached_region_idx = Some(cached_idx + 1);
259                return Ok(regions[cached_idx + 1].region_type);
260            }
261        }
262
263        // Fall back to binary search for non-sequential access
264        let idx = regions.binary_search_by(|region| {
265            if region.guest_region.contains(&addr) {
266                std::cmp::Ordering::Equal
267            } else if region.guest_region.start > addr {
268                std::cmp::Ordering::Greater
269            } else {
270                std::cmp::Ordering::Less
271            }
272        });
273
274        match idx {
275            Ok(index) => {
276                *cached_region_idx = Some(index);
277                Ok(regions[index].region_type)
278            }
279            Err(_) => Err(new_error!("Could not find region for address: {}", addr)),
280        }
281    }
282
283    /// Create a snapshot with the given mapped regions
284    pub(crate) fn snapshot(
285        &mut self,
286        sandbox_id: u64,
287        mapped_regions: Vec<MemoryRegion>,
288    ) -> Result<SharedMemorySnapshot> {
289        SharedMemorySnapshot::new(&mut self.shared_mem, sandbox_id, mapped_regions)
290    }
291
292    /// This function restores a memory snapshot from a given snapshot.
293    pub(crate) fn restore_snapshot(&mut self, snapshot: &SharedMemorySnapshot) -> Result<()> {
294        if self.shared_mem.mem_size() != snapshot.mem_size() {
295            return Err(new_error!(
296                "Snapshot size does not match current memory size: {} != {}",
297                self.shared_mem.raw_mem_size(),
298                snapshot.mem_size()
299            ));
300        }
301        snapshot.restore_from_snapshot(&mut self.shared_mem)?;
302        Ok(())
303    }
304}
305
306impl SandboxMemoryManager<ExclusiveSharedMemory> {
307    /// Load the binary represented by `pe_info` into memory, ensuring
308    /// all necessary relocations are made prior to completing the load
309    /// operation, then create a new `SharedMemory` to store the new PE
310    /// file and a `SandboxMemoryLayout` to describe the layout of that
311    /// new `SharedMemory`.
312    ///
313    /// Returns the following:
314    ///
315    /// - The newly-created `SharedMemory`
316    /// - The `SandboxMemoryLayout` describing that `SharedMemory`
317    /// - The offset to the entrypoint.
318    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
319    pub(crate) fn load_guest_binary_into_memory(
320        cfg: SandboxConfiguration,
321        exe_info: ExeInfo,
322        guest_blob: Option<&GuestBlob>,
323    ) -> Result<(Self, super::exe::LoadInfo)> {
324        let guest_blob_size = guest_blob.map(|b| b.data.len()).unwrap_or(0);
325        let guest_blob_mem_flags = guest_blob.map(|b| b.permissions);
326
327        let layout = SandboxMemoryLayout::new(
328            cfg,
329            exe_info.loaded_size(),
330            usize::try_from(cfg.get_stack_size(&exe_info))?,
331            usize::try_from(cfg.get_heap_size(&exe_info))?,
332            guest_blob_size,
333            guest_blob_mem_flags,
334        )?;
335        let mut shared_mem = ExclusiveSharedMemory::new(layout.get_memory_size()?)?;
336
337        let load_addr: RawPtr = RawPtr::try_from(layout.get_guest_code_address())?;
338
339        let entrypoint_offset = exe_info.entrypoint();
340
341        let offset = layout.get_code_pointer_offset();
342
343        {
344            // write the code pointer to shared memory
345            let load_addr_u64: u64 = load_addr.clone().into();
346            shared_mem.write_u64(offset, load_addr_u64)?;
347        }
348
349        // The load method returns a LoadInfo which can also be a different type once the
350        // `unwind_guest` feature is enabled.
351        #[allow(clippy::let_unit_value)]
352        let load_info = exe_info.load(
353            load_addr.clone().try_into()?,
354            &mut shared_mem.as_mut_slice()[layout.get_guest_code_offset()..],
355        )?;
356
357        let stack_cookie = rand::random::<[u8; STACK_COOKIE_LEN]>();
358        let stack_offset = layout.get_top_of_user_stack_offset();
359        shared_mem.copy_from_slice(&stack_cookie, stack_offset)?;
360
361        Ok((
362            Self::new(
363                layout,
364                shared_mem,
365                load_addr,
366                entrypoint_offset,
367                stack_cookie,
368            ),
369            load_info,
370        ))
371    }
372
373    /// Writes host function details to memory
374    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
375    pub(crate) fn write_buffer_host_function_details(&mut self, buffer: &[u8]) -> Result<()> {
376        let host_function_details = HostFunctionDetails::try_from(buffer).map_err(|e| {
377            new_error!(
378                "write_buffer_host_function_details: failed to convert buffer to HostFunctionDetails: {}",
379                e
380            )
381        })?;
382
383        let host_function_call_buffer: Vec<u8> = (&host_function_details).try_into().map_err(|_| {
384            new_error!(
385                "write_buffer_host_function_details: failed to convert HostFunctionDetails to Vec<u8>"
386            )
387        })?;
388
389        let buffer_size = {
390            let size_u64 = self
391                .shared_mem
392                .read_u64(self.layout.get_host_function_definitions_size_offset())?;
393            usize::try_from(size_u64)
394        }?;
395
396        if host_function_call_buffer.len() > buffer_size {
397            log_then_return!(
398                "Host Function Details buffer is too big for the host_function_definitions buffer"
399            );
400        }
401
402        self.shared_mem.copy_from_slice(
403            host_function_call_buffer.as_slice(),
404            self.layout.host_function_definitions_buffer_offset,
405        )?;
406        Ok(())
407    }
408
409    /// Write memory layout
410    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
411    pub(crate) fn write_memory_layout(&mut self) -> Result<()> {
412        let mem_size = self.shared_mem.mem_size();
413        self.layout.write(
414            &mut self.shared_mem,
415            SandboxMemoryLayout::BASE_ADDRESS,
416            mem_size,
417        )
418    }
419
420    /// Write init data
421    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
422    pub(crate) fn write_init_data(&mut self, user_memory: &[u8]) -> Result<()> {
423        self.layout
424            .write_init_data(&mut self.shared_mem, user_memory)?;
425        Ok(())
426    }
427
428    /// Wraps ExclusiveSharedMemory::build
429    pub fn build(
430        self,
431    ) -> (
432        SandboxMemoryManager<HostSharedMemory>,
433        SandboxMemoryManager<GuestSharedMemory>,
434    ) {
435        let (hshm, gshm) = self.shared_mem.build();
436        (
437            SandboxMemoryManager {
438                shared_mem: hshm,
439                layout: self.layout,
440                load_addr: self.load_addr.clone(),
441                entrypoint_offset: self.entrypoint_offset,
442                mapped_rgns: self.mapped_rgns,
443                stack_cookie: self.stack_cookie,
444                abort_buffer: self.abort_buffer,
445            },
446            SandboxMemoryManager {
447                shared_mem: gshm,
448                layout: self.layout,
449                load_addr: self.load_addr.clone(),
450                entrypoint_offset: self.entrypoint_offset,
451                mapped_rgns: self.mapped_rgns,
452                stack_cookie: self.stack_cookie,
453                abort_buffer: Vec::new(), // Guest doesn't need abort buffer
454            },
455        )
456    }
457}
458
459impl SandboxMemoryManager<HostSharedMemory> {
460    /// Check the stack guard of the memory in `shared_mem`, using
461    /// `layout` to calculate its location.
462    ///
463    /// Return `true`
464    /// if `shared_mem` could be accessed properly and the guard
465    /// matches `cookie`. If it could be accessed properly and the
466    /// guard doesn't match `cookie`, return `false`. Otherwise, return
467    /// a descriptive error.
468    ///
469    /// This method could be an associated function instead. See
470    /// documentation at the bottom `set_stack_guard` for description
471    /// of why it isn't.
472    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
473    pub(crate) fn check_stack_guard(&self) -> Result<bool> {
474        let expected = self.stack_cookie;
475        let offset = self.layout.get_top_of_user_stack_offset();
476        let actual: [u8; STACK_COOKIE_LEN] = self.shared_mem.read(offset)?;
477        let cmp_res = expected.iter().cmp(actual.iter());
478        Ok(cmp_res == Ordering::Equal)
479    }
480
481    /// Get the address of the dispatch function in memory
482    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
483    pub(crate) fn get_pointer_to_dispatch_function(&self) -> Result<u64> {
484        let guest_dispatch_function_ptr = self
485            .shared_mem
486            .read::<u64>(self.layout.get_dispatch_function_pointer_offset())?;
487
488        // This pointer is written by the guest library but is accessible to
489        // the guest engine so we should bounds check it before we return it.
490
491        let guest_ptr = GuestPtr::try_from(RawPtr::from(guest_dispatch_function_ptr))?;
492        guest_ptr.absolute()
493    }
494
495    /// Reads a host function call from memory
496    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
497    pub(crate) fn get_host_function_call(&mut self) -> Result<FunctionCall> {
498        self.shared_mem.try_pop_buffer_into::<FunctionCall>(
499            self.layout.output_data_buffer_offset,
500            self.layout.sandbox_memory_config.get_output_data_size(),
501        )
502    }
503
504    /// Writes a function call result to memory
505    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
506    pub(crate) fn write_response_from_host_method_call(&mut self, res: &ReturnValue) -> Result<()> {
507        let function_call_ret_val_buffer = Vec::<u8>::try_from(res).map_err(|_| {
508            new_error!(
509                "write_response_from_host_method_call: failed to convert ReturnValue to Vec<u8>"
510            )
511        })?;
512        self.shared_mem.push_buffer(
513            self.layout.input_data_buffer_offset,
514            self.layout.sandbox_memory_config.get_input_data_size(),
515            function_call_ret_val_buffer.as_slice(),
516        )
517    }
518
519    /// Writes a guest function call to memory
520    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
521    pub(crate) fn write_guest_function_call(&mut self, buffer: &[u8]) -> Result<()> {
522        validate_guest_function_call_buffer(buffer).map_err(|e| {
523            new_error!(
524                "Guest function call buffer validation failed: {}",
525                e.to_string()
526            )
527        })?;
528
529        self.shared_mem.push_buffer(
530            self.layout.input_data_buffer_offset,
531            self.layout.sandbox_memory_config.get_input_data_size(),
532            buffer,
533        )
534    }
535
536    /// Reads a function call result from memory
537    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
538    pub(crate) fn get_guest_function_call_result(&mut self) -> Result<ReturnValue> {
539        self.shared_mem.try_pop_buffer_into::<ReturnValue>(
540            self.layout.output_data_buffer_offset,
541            self.layout.sandbox_memory_config.get_output_data_size(),
542        )
543    }
544
545    /// Read guest log data from the `SharedMemory` contained within `self`
546    #[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
547    pub(crate) fn read_guest_log_data(&mut self) -> Result<GuestLogData> {
548        self.shared_mem.try_pop_buffer_into::<GuestLogData>(
549            self.layout.output_data_buffer_offset,
550            self.layout.sandbox_memory_config.get_output_data_size(),
551        )
552    }
553
554    /// Get the guest error data
555    pub(crate) fn get_guest_error(&mut self) -> Result<GuestError> {
556        self.shared_mem.try_pop_buffer_into::<GuestError>(
557            self.layout.output_data_buffer_offset,
558            self.layout.sandbox_memory_config.get_output_data_size(),
559        )
560    }
561
562    pub(crate) fn clear_io_buffers(&mut self) {
563        // Clear the output data buffer
564        loop {
565            let Ok(_) = self.shared_mem.try_pop_buffer_into::<Vec<u8>>(
566                self.layout.output_data_buffer_offset,
567                self.layout.sandbox_memory_config.get_output_data_size(),
568            ) else {
569                break;
570            };
571        }
572        // Clear the input data buffer
573        loop {
574            let Ok(_) = self.shared_mem.try_pop_buffer_into::<Vec<u8>>(
575                self.layout.input_data_buffer_offset,
576                self.layout.sandbox_memory_config.get_input_data_size(),
577            ) else {
578                break;
579            };
580        }
581    }
582}