mlua_codemp_patch/state/
util.rs

1use std::os::raw::c_int;
2use std::panic::{catch_unwind, AssertUnwindSafe};
3use std::ptr;
4use std::sync::Arc;
5
6use crate::error::{Error, Result};
7use crate::state::{ExtraData, RawLua};
8use crate::util::{self, get_internal_metatable, WrappedFailure};
9
10const WRAPPED_FAILURE_POOL_SIZE: usize = 64;
11
12pub(super) struct StateGuard<'a>(&'a RawLua, *mut ffi::lua_State);
13
14impl<'a> StateGuard<'a> {
15    pub(super) fn new(inner: &'a RawLua, mut state: *mut ffi::lua_State) -> Self {
16        state = inner.state.replace(state);
17        Self(inner, state)
18    }
19}
20
21impl<'a> Drop for StateGuard<'a> {
22    fn drop(&mut self) {
23        self.0.state.set(self.1);
24    }
25}
26
27// An optimized version of `callback_error` that does not allocate `WrappedFailure` userdata
28// and instead reuses unsed values from previous calls (or allocates new).
29pub(super) unsafe fn callback_error_ext<F, R>(
30    state: *mut ffi::lua_State,
31    mut extra: *mut ExtraData,
32    f: F,
33) -> R
34where
35    F: FnOnce(*mut ExtraData, c_int) -> Result<R>,
36{
37    if extra.is_null() {
38        extra = ExtraData::get(state);
39    }
40
41    let nargs = ffi::lua_gettop(state);
42
43    enum PreallocatedFailure {
44        New(*mut WrappedFailure),
45        Existing(i32),
46    }
47
48    impl PreallocatedFailure {
49        unsafe fn reserve(state: *mut ffi::lua_State, extra: *mut ExtraData) -> Self {
50            match (*extra).wrapped_failure_pool.pop() {
51                Some(index) => PreallocatedFailure::Existing(index),
52                None => {
53                    // We need to check stack for Luau in case when callback is called from interrupt
54                    // See https://github.com/Roblox/luau/issues/446 and mlua #142 and #153
55                    #[cfg(feature = "luau")]
56                    ffi::lua_rawcheckstack(state, 2);
57                    // Place it to the beginning of the stack
58                    let ud = WrappedFailure::new_userdata(state);
59                    ffi::lua_insert(state, 1);
60                    PreallocatedFailure::New(ud)
61                }
62            }
63        }
64
65        unsafe fn r#use(&self, state: *mut ffi::lua_State, extra: *mut ExtraData) -> *mut WrappedFailure {
66            let ref_thread = (*extra).ref_thread;
67            match *self {
68                PreallocatedFailure::New(ud) => {
69                    ffi::lua_settop(state, 1);
70                    ud
71                }
72                PreallocatedFailure::Existing(index) => {
73                    ffi::lua_settop(state, 0);
74                    #[cfg(feature = "luau")]
75                    ffi::lua_rawcheckstack(state, 2);
76                    ffi::lua_pushvalue(ref_thread, index);
77                    ffi::lua_xmove(ref_thread, state, 1);
78                    ffi::lua_pushnil(ref_thread);
79                    ffi::lua_replace(ref_thread, index);
80                    (*extra).ref_free.push(index);
81                    ffi::lua_touserdata(state, -1) as *mut WrappedFailure
82                }
83            }
84        }
85
86        unsafe fn release(self, state: *mut ffi::lua_State, extra: *mut ExtraData) {
87            let ref_thread = (*extra).ref_thread;
88            match self {
89                PreallocatedFailure::New(_) => {
90                    if (*extra).wrapped_failure_pool.len() < WRAPPED_FAILURE_POOL_SIZE {
91                        ffi::lua_rotate(state, 1, -1);
92                        ffi::lua_xmove(state, ref_thread, 1);
93                        let index = ref_stack_pop(extra);
94                        (*extra).wrapped_failure_pool.push(index);
95                    } else {
96                        ffi::lua_remove(state, 1);
97                    }
98                }
99                PreallocatedFailure::Existing(index) => {
100                    if (*extra).wrapped_failure_pool.len() < WRAPPED_FAILURE_POOL_SIZE {
101                        (*extra).wrapped_failure_pool.push(index);
102                    } else {
103                        ffi::lua_pushnil(ref_thread);
104                        ffi::lua_replace(ref_thread, index);
105                        (*extra).ref_free.push(index);
106                    }
107                }
108            }
109        }
110    }
111
112    // We cannot shadow Rust errors with Lua ones, so we need to reserve pre-allocated memory
113    // to store a wrapped failure (error or panic) *before* we proceed.
114    let prealloc_failure = PreallocatedFailure::reserve(state, extra);
115
116    match catch_unwind(AssertUnwindSafe(|| f(extra, nargs))) {
117        Ok(Ok(r)) => {
118            // Return unused `WrappedFailure` to the pool
119            prealloc_failure.release(state, extra);
120            r
121        }
122        Ok(Err(err)) => {
123            let wrapped_error = prealloc_failure.r#use(state, extra);
124
125            // Build `CallbackError` with traceback
126            let traceback = if ffi::lua_checkstack(state, ffi::LUA_TRACEBACK_STACK) != 0 {
127                ffi::luaL_traceback(state, state, ptr::null(), 0);
128                let traceback = util::to_string(state, -1);
129                ffi::lua_pop(state, 1);
130                traceback
131            } else {
132                "<not enough stack space for traceback>".to_string()
133            };
134            let cause = Arc::new(err);
135            ptr::write(
136                wrapped_error,
137                WrappedFailure::Error(Error::CallbackError { traceback, cause }),
138            );
139            get_internal_metatable::<WrappedFailure>(state);
140            ffi::lua_setmetatable(state, -2);
141
142            ffi::lua_error(state)
143        }
144        Err(p) => {
145            let wrapped_panic = prealloc_failure.r#use(state, extra);
146            ptr::write(wrapped_panic, WrappedFailure::Panic(Some(p)));
147            get_internal_metatable::<WrappedFailure>(state);
148            ffi::lua_setmetatable(state, -2);
149            ffi::lua_error(state)
150        }
151    }
152}
153
154pub(super) unsafe fn ref_stack_pop(extra: *mut ExtraData) -> c_int {
155    let extra = &mut *extra;
156    if let Some(free) = extra.ref_free.pop() {
157        ffi::lua_replace(extra.ref_thread, free);
158        return free;
159    }
160
161    // Try to grow max stack size
162    if extra.ref_stack_top >= extra.ref_stack_size {
163        let mut inc = extra.ref_stack_size; // Try to double stack size
164        while inc > 0 && ffi::lua_checkstack(extra.ref_thread, inc) == 0 {
165            inc /= 2;
166        }
167        if inc == 0 {
168            // Pop item on top of the stack to avoid stack leaking and successfully run destructors
169            // during unwinding.
170            ffi::lua_pop(extra.ref_thread, 1);
171            let top = extra.ref_stack_top;
172            // It is a user error to create enough references to exhaust the Lua max stack size for
173            // the ref thread.
174            panic!("cannot create a Lua reference, out of auxiliary stack space (used {top} slots)");
175        }
176        extra.ref_stack_size += inc;
177    }
178    extra.ref_stack_top += 1;
179    extra.ref_stack_top
180}