wry_bindgen/
batch.rs

1//! Batching system for grouping multiple JS operations into single messages.
2//!
3//! This module provides the batching infrastructure that allows multiple
4//! JS operations to be grouped together for efficient execution.
5
6use alloc::collections::BTreeMap;
7use alloc::vec::Vec;
8use core::any::Any;
9use core::cell::{Ref, RefCell, RefMut};
10use std::boxed::Box;
11
12use crate::encode::{BatchableResult, BinaryDecode};
13use crate::ipc::DecodedData;
14use crate::ipc::{EncodedData, IPCMessage, MessageType};
15use crate::lazy::ThreadLocalKey;
16use crate::runtime::WryIPC;
17use crate::value::{JSIDX_OFFSET, JSIDX_RESERVED};
18
19/// State for batching operations and object storage.
20/// Every evaluation is a batch - it may just have one operation.
21///
22/// Uses a free-list strategy for heap ID allocation to stay in sync with JS heap.
23/// Also stores exported Rust structs and callback functions.
24pub struct Runtime {
25    /// The encoder accumulating batched operations
26    encoder: EncodedData,
27    /// Stack of freed IDs available for reuse
28    free_ids: Vec<u64>,
29    /// Next ID to allocate if free_ids is empty
30    max_id: u64,
31    /// A stack of ongoing function encodings with the ids
32    /// that need to be freed after each one is done
33    ids_to_free: Vec<Vec<u64>>,
34    /// Whether we're inside a batch() call
35    is_batching: bool,
36    /// Borrow stack pointer - uses indices 1-127, growing downward from JSIDX_OFFSET (128) to 1
37    /// Reset after each operation completes
38    borrow_stack_pointer: u64,
39    /// Frame stack for nested operations - saves borrow stack pointers
40    borrow_frame_stack: Vec<u64>,
41    /// Count of IDs reserved as placeholders during the current batch.
42    /// This is sent to JS so it can skip these IDs during nested callback allocations.
43    reserved_placeholder_count: u32,
44    /// Type cache for avoiding resending type definitions to JS
45    type_cache: BTreeMap<Vec<u8>, u32>,
46    /// Next type ID to assign
47    next_type_id: u32,
48    /// Exported Rust structs stored by handle
49    objects: BTreeMap<u32, Box<dyn Any>>,
50    /// Next handle to assign for exported objects
51    next_object_handle: u32,
52    /// The ipc layer used to communicate with the JS runtime
53    ipc: WryIPC,
54    /// The id of the webview this is associated with
55    webview_id: u64,
56    /// Thread locals associated with the runtime
57    thread_locals: BTreeMap<ThreadLocalKey<'static>, Box<dyn Any>>,
58}
59
60impl Runtime {
61    pub(crate) fn new(ipc: WryIPC, webview_id: u64) -> Self {
62        Self {
63            encoder: Self::new_encoder_for_evaluate(),
64            free_ids: Vec::new(),
65            // Start allocating heap IDs from JSIDX_RESERVED to match JS heap
66            max_id: JSIDX_RESERVED,
67            ids_to_free: Vec::new(),
68            is_batching: false,
69            // Borrow stack starts at JSIDX_OFFSET (128) and grows downward to 1
70            borrow_stack_pointer: JSIDX_OFFSET,
71            // Frame stack starts empty
72            borrow_frame_stack: Vec::new(),
73            // No reserved placeholders initially
74            reserved_placeholder_count: 0,
75            // Type cache starts empty
76            type_cache: BTreeMap::new(),
77            // Type IDs start at 0
78            next_type_id: 0,
79            // Object store starts empty
80            objects: BTreeMap::new(),
81            // Object handles start at 0
82            next_object_handle: 0,
83            ipc,
84            webview_id,
85            thread_locals: BTreeMap::new(),
86        }
87    }
88
89    fn new_encoder_for_evaluate() -> EncodedData {
90        let mut encoder = EncodedData::new();
91        encoder.push_u8(MessageType::Evaluate as u8);
92        encoder
93    }
94
95    /// Get the next heap ID for placeholder allocation.
96    /// Uses free-list strategy: reuses freed IDs first, then allocates new ones.
97    pub fn get_next_heap_id(&mut self) -> u64 {
98        let id = self.max_id;
99        self.max_id += 1;
100        id
101    }
102
103    /// Get the next heap ID for a batched return value placeholder.
104    /// This also tracks the reserved placeholder count so JS can skip these IDs
105    /// during nested callback allocations.
106    pub fn get_next_placeholder_id(&mut self) -> u64 {
107        let id = self.get_next_heap_id();
108        if self.is_batching {
109            self.reserved_placeholder_count += 1;
110        }
111        id
112    }
113
114    /// Get the next borrow ID from the borrow stack (indices 1-127).
115    /// The borrow stack grows downward from JSIDX_OFFSET (128) toward 1.
116    /// Panics if the borrow stack overflows (more than 127 borrowed refs in one operation).
117    pub fn get_next_borrow_id(&mut self) -> u64 {
118        if self.borrow_stack_pointer <= 1 {
119            panic!("Borrow stack overflow: too many borrowed references in a single operation");
120        }
121        self.borrow_stack_pointer -= 1;
122        self.borrow_stack_pointer
123    }
124
125    /// Push a borrow frame before a nested operation that may use borrowed refs.
126    /// This saves the current borrow stack pointer so we can restore it later.
127    pub fn push_borrow_frame(&mut self) {
128        self.borrow_frame_stack.push(self.borrow_stack_pointer);
129    }
130
131    /// Pop a borrow frame after a nested operation completes.
132    /// This restores the borrow stack pointer to where it was before the nested operation.
133    pub fn pop_borrow_frame(&mut self) {
134        if let Some(saved_pointer) = self.borrow_frame_stack.pop() {
135            self.borrow_stack_pointer = saved_pointer;
136        } else {
137            panic!("pop_borrow_frame called with empty frame stack");
138        }
139    }
140
141    /// Release a heap ID back to the free-list and queue it for JS drop.
142    pub fn release_heap_id(&mut self, id: u64) -> Option<u64> {
143        // Never release reserved IDs
144        if id < JSIDX_RESERVED {
145            unreachable!("Attempted to release reserved JS heap ID {}", id);
146        }
147
148        debug_assert!(
149            !self.free_ids.contains(&id) && !self.ids_to_free.iter().any(|ids| ids.contains(&id)),
150            "Double-free detected for heap ID {id}"
151        );
152        match self.ids_to_free.last_mut() {
153            Some(ids) => {
154                ids.push(id);
155                None
156            }
157            None => {
158                self.free_ids.push(id);
159                Some(id)
160            }
161        }
162    }
163
164    /// Take the message data and reset the batch for reuse.
165    /// Includes any pending drops at the start of the message.
166    /// Prepends the reserved placeholder count so JS can skip those IDs during nested allocations.
167    pub(crate) fn take_message(&mut self) -> IPCMessage {
168        let reserved_count = self.take_reserved_placeholder_count();
169        let mut encoder = self.take_encoder();
170        encoder.prepend_u32(reserved_count);
171        IPCMessage::new(encoder.to_bytes())
172    }
173
174    pub(crate) fn is_empty(&self) -> bool {
175        // 12 bytes for offsets + 1 byte for message type
176        self.encoder.byte_len() <= 13
177    }
178
179    pub(crate) fn push_ids_to_free(&mut self) {
180        self.ids_to_free.push(Vec::new());
181    }
182
183    pub(crate) fn pop_and_release_ids(&mut self) -> Vec<u64> {
184        let mut to_free = Vec::new();
185        if let Some(ids) = self.ids_to_free.pop() {
186            for id in ids {
187                if let Some(freed_id) = self.release_heap_id(id) {
188                    to_free.push(freed_id);
189                }
190            }
191        }
192        to_free
193    }
194
195    pub(crate) fn set_batching(&mut self, batching: bool) {
196        self.is_batching = batching;
197    }
198
199    pub(crate) fn is_batching(&self) -> bool {
200        self.is_batching
201    }
202
203    /// Take and reset the reserved placeholder count.
204    /// Called when building a message to send to JS.
205    pub(crate) fn take_reserved_placeholder_count(&mut self) -> u32 {
206        core::mem::take(&mut self.reserved_placeholder_count)
207    }
208
209    pub(crate) fn take_encoder(&mut self) -> EncodedData {
210        core::mem::replace(&mut self.encoder, Self::new_encoder_for_evaluate())
211    }
212
213    pub(crate) fn extend_encoder(&mut self, other: &EncodedData) {
214        // Manually extend to avoid adding an extra message type byte
215        self.encoder.u8_buf.extend_from_slice(&other.u8_buf[1..]);
216        self.encoder.u16_buf.extend_from_slice(&other.u16_buf);
217        self.encoder.u32_buf.extend_from_slice(&other.u32_buf);
218        self.encoder.str_buf.extend_from_slice(&other.str_buf);
219    }
220
221    /// Get or create a type ID for the given type definition bytes.
222    /// Returns (type_id, is_cached) where is_cached is true if the type was already in the cache.
223    pub(crate) fn get_or_create_type_id(&mut self, type_bytes: Vec<u8>) -> (u32, bool) {
224        if let Some(&id) = self.type_cache.get(&type_bytes) {
225            (id, true)
226        } else {
227            let id = self.next_type_id;
228            self.next_type_id += 1;
229            self.type_cache.insert(type_bytes, id);
230            (id, false)
231        }
232    }
233
234    /// Insert an exported object and return its handle.
235    pub(crate) fn insert_object<T: 'static>(&mut self, obj: T) -> u32 {
236        let handle = self.next_object_handle;
237        self.next_object_handle = self.next_object_handle.wrapping_add(1);
238        self.objects.insert(handle, Box::new(RefCell::new(obj)));
239        handle
240    }
241
242    /// Get a thread-local variable.
243    pub(crate) fn take_thread_local<T: 'static>(&mut self, key: ThreadLocalKey<'static>) -> T {
244        *self
245            .thread_locals
246            .remove(&key)
247            .expect("thread local not found")
248            .downcast::<T>()
249            .expect("type mismatch")
250    }
251
252    /// Insert a thread-local variable.
253    pub(crate) fn insert_thread_local<T: 'static>(
254        &mut self,
255        key: ThreadLocalKey<'static>,
256        value: T,
257    ) {
258        self.thread_locals.insert(key, Box::new(value));
259    }
260
261    /// Check if a thread-local variable exists.
262    pub(crate) fn has_thread_local(&self, key: ThreadLocalKey<'static>) -> bool {
263        self.thread_locals.contains_key(&key)
264    }
265
266    /// Get a reference to an exported object.
267    pub(crate) fn get_object<T: 'static>(&self, handle: u32) -> Ref<'_, T> {
268        let boxed = self.objects.get(&handle).expect("invalid handle");
269        let cell = boxed.downcast_ref::<RefCell<T>>().expect("type mismatch");
270        cell.borrow()
271    }
272
273    /// Get a mutable reference to an exported object.
274    pub(crate) fn get_object_mut<T: 'static>(&self, handle: u32) -> RefMut<'_, T> {
275        let boxed = self.objects.get(&handle).expect("invalid handle");
276        let cell = boxed.downcast_ref::<RefCell<T>>().expect("type mismatch");
277        cell.borrow_mut()
278    }
279
280    /// Remove an exported object and return it.
281    pub(crate) fn remove_object<T: 'static>(&mut self, handle: u32) -> T {
282        let boxed = self.objects.remove(&handle).expect("invalid handle");
283        let cell = boxed.downcast::<RefCell<T>>().expect("type mismatch");
284        cell.into_inner()
285    }
286
287    /// Remove an exported object without returning it.
288    pub(crate) fn remove_object_untyped(&mut self, handle: u32) -> Option<Box<dyn Any>> {
289        self.objects.remove(&handle)
290    }
291
292    /// Get a reference to the IPC layer.
293    pub(crate) fn ipc(&self) -> &WryIPC {
294        &self.ipc
295    }
296
297    /// Get the webview ID associated with this runtime.
298    pub(crate) fn webview_id(&self) -> u64 {
299        self.webview_id
300    }
301}
302
303thread_local! {
304    /// Thread-local runtime state - always exists, reset after each flush
305    pub(crate) static RUNTIME: RefCell<Vec<Runtime>> = const { RefCell::new(Vec::new()) };
306}
307
308fn push_runtime(runtime: Runtime) {
309    RUNTIME.with(|state| {
310        state.borrow_mut().push(runtime);
311    });
312}
313
314fn pop_runtime() -> Runtime {
315    RUNTIME.with(|state| {
316        state
317            .borrow_mut()
318            .pop()
319            .expect("No runtime available to pop")
320    })
321}
322
323pub(crate) fn in_runtime<O>(runtime: Runtime, run: impl FnOnce() -> O) -> (Runtime, O) {
324    push_runtime(runtime);
325    let out = run();
326    let runtime = pop_runtime();
327    (runtime, out)
328}
329
330pub(crate) fn with_runtime<R>(f: impl FnOnce(&mut Runtime) -> R) -> R {
331    RUNTIME.with(|state| {
332        let mut state = state.borrow_mut();
333        f(state.last_mut().expect("No runtime available"))
334    })
335}
336
337/// Check if we're currently inside a batch() call
338pub fn is_batching() -> bool {
339    with_runtime(|state| state.is_batching())
340}
341
342/// Queue a JS drop operation for a heap ID.
343/// This is called when a JsValue is dropped.
344pub(crate) fn queue_js_drop(id: u64) {
345    debug_assert!(
346        id >= JSIDX_RESERVED,
347        "Attempted to drop reserved JS heap ID {id}"
348    );
349
350    let runtime_already_dropped = RUNTIME.with(|state| state.borrow().is_empty());
351    // If the runtime has already been dropped, we don't need to drop the JS reference
352    if runtime_already_dropped {
353        return;
354    }
355
356    let id = with_runtime(|state| state.release_heap_id(id));
357    if let Some(id) = id {
358        crate::js_helpers::js_drop_heap_ref(id);
359    }
360}
361
362/// Add an operation to the current batch.
363pub(crate) fn add_operation(
364    encoder: &mut EncodedData,
365    fn_id: u32,
366    add_args: impl FnOnce(&mut EncodedData),
367) {
368    encoder.push_u32(fn_id);
369    add_args(encoder);
370}
371
372/// Core function for executing JavaScript calls.
373///
374/// For each call:
375/// 1. Encode the current evaluate message into the current batch
376/// 2. If the return value is needed immediately, flush the batch and return the result
377/// 3. Otherwise get the pending result from BatchableResult
378pub(crate) fn run_js_sync<R: BatchableResult>(
379    fn_id: u32,
380    add_args: impl FnOnce(&mut EncodedData),
381) -> R {
382    // Step 1: Encode the operation into the batch and get placeholder for non-flush types
383    // We take the current encoder out of the thread-local state to avoid borrowing issues
384    // and then put it back after adding the operation. Drops or other calls may happen while
385    // we are encoding, but they should be queued after this operation.
386    let mut batch = with_runtime(|state| {
387        // Push a new operation into the batch
388        state.push_ids_to_free();
389        state.take_encoder()
390    });
391    add_operation(&mut batch, fn_id, add_args);
392
393    // Check if any encoded argument requires immediate flush (e.g., stack-allocated callbacks)
394    let needs_flush = batch.needs_flush;
395
396    with_runtime(|state| {
397        let encoded_during_op = core::mem::replace(&mut state.encoder, batch);
398        state.extend_encoder(&encoded_during_op);
399    });
400
401    // Try to get a placeholder for opaque types that don't need flush
402    // This also increments opaque_count to keep heap IDs in sync
403    let get_placeholder = || with_runtime(|state| R::try_placeholder(state));
404
405    // Must flush if: not batching, or if the operation requires immediate execution
406    // (e.g., stack-allocated callbacks that must be invoked before returning)
407    let result = if !is_batching() || needs_flush {
408        flush_and_then(|mut data| {
409            let response = get_placeholder()
410                .unwrap_or_else(|| R::decode(&mut data).expect("Failed to decode return value"));
411            assert!(
412                data.is_empty(),
413                "Extra data remaining after decoding response"
414            );
415            response
416        })
417    } else {
418        get_placeholder().unwrap_or_else(|| flush_and_return::<R>())
419    };
420
421    // After running, free any queued IDs for this operation
422    let ids = with_runtime(|state| state.pop_and_release_ids());
423    for id in ids {
424        crate::js_helpers::js_drop_heap_ref(id);
425    }
426
427    result
428}
429
430/// Flush the current batch and return the decoded result.
431pub(crate) fn flush_and_return<R: BinaryDecode>() -> R {
432    flush_and_then(|mut data| {
433        let response = R::decode(&mut data).expect("Failed to decode return value");
434        assert!(
435            data.is_empty(),
436            "Extra data remaining after decoding response"
437        );
438        response
439    })
440}
441
442pub(crate) fn flush_and_then<R>(then: impl for<'a> Fn(DecodedData<'a>) -> R) -> R {
443    use crate::runtime::WryBindgenEvent;
444
445    let batch_msg = with_runtime(|state| state.take_message());
446
447    // Send and wait for result
448    with_runtime(|runtime| {
449        (runtime.ipc().proxy)(WryBindgenEvent::ipc(runtime.webview_id(), batch_msg))
450    });
451    loop {
452        if let Some(result) = crate::runtime::progress_js_with(&then) {
453            return result;
454        }
455    }
456}
457
458/// Execute operations inside a batch. Operations that return opaque types (like JsValue)
459/// will be batched and executed together. Operations that return non-opaque types will
460/// flush the batch to get the actual result.
461pub fn batch<R, F: FnOnce() -> R>(f: F) -> R {
462    let currently_batching = is_batching();
463    // Start batching
464    with_runtime(|state| state.set_batching(true));
465
466    // Execute the closure
467    let result = f();
468
469    if !currently_batching {
470        // Flush any remaining batched operations
471        force_flush();
472    }
473
474    // End batching
475    with_runtime(|state| state.set_batching(currently_batching));
476
477    result
478}
479
480/// Like `batch`, but async.
481pub fn batch_async<'a, R, F: core::future::Future<Output = R> + 'a>(
482    f: F,
483) -> impl core::future::Future<Output = R> + 'a {
484    let mut f = Box::pin(f);
485    std::future::poll_fn(move |ctx| batch(|| f.as_mut().poll(ctx)))
486}
487
488pub fn force_flush() {
489    let has_pending = with_runtime(|state| !state.is_empty());
490    if has_pending {
491        flush_and_return::<()>();
492    }
493}