ext_php_rs/zend/bailout_guard.rs
1//! Provides cleanup guarantees for values that need to be dropped even when PHP bailout occurs.
2//!
3//! When PHP triggers a bailout (via `exit()`, fatal error, etc.), it uses `longjmp` which
4//! bypasses Rust's normal stack unwinding. This means destructors for stack-allocated values
5//! won't run. `BailoutGuard` solves this by heap-allocating values and registering cleanup
6//! callbacks that run when a bailout is caught.
7//!
8//! # Example
9//!
10//! ```ignore
11//! use ext_php_rs::zend::BailoutGuard;
12//!
13//! #[php_function]
14//! pub fn my_function(callback: ZendCallable) {
15//! // Wrap resources that MUST be cleaned up in BailoutGuard
16//! let resource = BailoutGuard::new(ExpensiveResource::new());
17//!
18//! // Use the resource (BailoutGuard implements Deref/DerefMut)
19//! resource.do_something();
20//!
21//! // If the callback triggers exit(), the resource will still be cleaned up
22//! let _ = callback.try_call(vec![]);
23//! }
24//! ```
25
26use std::cell::RefCell;
27use std::ops::{Deref, DerefMut};
28
29/// A cleanup entry: (callback, active). The active flag is set to false when
30/// the guard is dropped normally, so we don't double-drop.
31type CleanupEntry = (Box<dyn FnOnce()>, bool);
32
33thread_local! {
34 /// Stack of cleanup callbacks to run when bailout is caught.
35 static CLEANUP_STACK: RefCell<Vec<CleanupEntry>> = const { RefCell::new(Vec::new()) };
36}
37
38/// A guard that ensures a value is dropped even if PHP bailout occurs.
39///
40/// `BailoutGuard` heap-allocates the wrapped value and registers a cleanup callback.
41/// If a bailout occurs, the cleanup runs before the bailout is re-triggered.
42/// If the guard is dropped normally, the cleanup is cancelled and the value is dropped.
43///
44/// # Performance Note
45///
46/// This incurs a heap allocation. Only use for values that absolutely must be
47/// cleaned up (file handles, network connections, locks, etc.). For simple values,
48/// the overhead isn't worth it.
49pub struct BailoutGuard<T> {
50 /// Pointer to the heap-allocated value. Using raw pointer because we need
51 /// to pass it to the cleanup callback.
52 value: *mut T,
53 /// Index in the cleanup stack. Used to deactivate cleanup on normal drop.
54 index: usize,
55}
56
57// SAFETY: BailoutGuard can be sent between threads if T can.
58// The cleanup stack is thread-local, so each thread has its own.
59unsafe impl<T: Send> Send for BailoutGuard<T> {}
60
61impl<T: 'static> BailoutGuard<T> {
62 /// Creates a new `BailoutGuard` wrapping the given value.
63 ///
64 /// The value is heap-allocated and a cleanup callback is registered.
65 /// If a bailout occurs, the value will be dropped. If this guard is
66 /// dropped normally, the value is dropped and the cleanup is cancelled.
67 pub fn new(value: T) -> Self {
68 let boxed = Box::new(value);
69 let ptr = Box::into_raw(boxed);
70
71 let index = CLEANUP_STACK.with(|stack| {
72 let mut stack = stack.borrow_mut();
73 let idx = stack.len();
74 let ptr_copy = ptr;
75 // Register cleanup that drops the heap-allocated value
76 stack.push((
77 Box::new(move || {
78 // SAFETY: This only runs if bailout occurred and normal drop didn't.
79 // The pointer is valid because we heap-allocated it.
80 unsafe {
81 drop(Box::from_raw(ptr_copy));
82 }
83 }),
84 true, // active
85 ));
86 idx
87 });
88
89 Self { value: ptr, index }
90 }
91
92 /// Returns a reference to the wrapped value.
93 #[inline]
94 #[must_use]
95 pub fn get(&self) -> &T {
96 // SAFETY: The pointer is valid as long as self exists.
97 unsafe { &*self.value }
98 }
99
100 /// Returns a mutable reference to the wrapped value.
101 #[inline]
102 pub fn get_mut(&mut self) -> &mut T {
103 // SAFETY: The pointer is valid as long as self exists, and we have &mut self.
104 unsafe { &mut *self.value }
105 }
106
107 /// Consumes the guard and returns the wrapped value.
108 ///
109 /// The cleanup callback is cancelled.
110 #[must_use]
111 pub fn into_inner(self) -> T {
112 // Deactivate cleanup
113 CLEANUP_STACK.with(|stack| {
114 let mut stack = stack.borrow_mut();
115 if self.index < stack.len() {
116 stack[self.index].1 = false;
117 }
118 });
119
120 // Take ownership of the value
121 // SAFETY: We're consuming self, so no one else can access the pointer.
122 let value = unsafe { *Box::from_raw(self.value) };
123
124 // Prevent Drop from running (we've already handled cleanup)
125 std::mem::forget(self);
126
127 value
128 }
129}
130
131impl<T> Deref for BailoutGuard<T> {
132 type Target = T;
133
134 #[inline]
135 fn deref(&self) -> &T {
136 // SAFETY: The pointer is valid as long as self exists.
137 unsafe { &*self.value }
138 }
139}
140
141impl<T> DerefMut for BailoutGuard<T> {
142 #[inline]
143 fn deref_mut(&mut self) -> &mut T {
144 // SAFETY: The pointer is valid as long as self exists, and we have &mut self.
145 unsafe { &mut *self.value }
146 }
147}
148
149impl<T> Drop for BailoutGuard<T> {
150 fn drop(&mut self) {
151 // Deactivate cleanup callback (we're dropping normally)
152 CLEANUP_STACK.with(|stack| {
153 let mut stack = stack.borrow_mut();
154 if self.index < stack.len() {
155 stack[self.index].1 = false;
156 }
157 });
158
159 // Drop the heap-allocated value
160 // SAFETY: We're in Drop, so no one else can access the pointer.
161 unsafe {
162 drop(Box::from_raw(self.value));
163 }
164 }
165}
166
167/// Runs all registered bailout cleanup callbacks.
168///
169/// This should be called after catching a bailout and before re-triggering it.
170/// Only active cleanups (those whose guards haven't been dropped) are run.
171///
172/// # Note
173///
174/// This function is automatically called by the generated handler code when a
175/// bailout is caught. You typically don't need to call this directly.
176#[doc(hidden)]
177pub fn run_bailout_cleanups() {
178 CLEANUP_STACK.with(|stack| {
179 // Drain and run all active cleanups in reverse order (LIFO)
180 for (cleanup, active) in stack.borrow_mut().drain(..).rev() {
181 if active {
182 cleanup();
183 }
184 }
185 });
186}
187
188#[cfg(test)]
189mod tests {
190 use super::*;
191 use std::sync::Arc;
192 use std::sync::atomic::{AtomicUsize, Ordering};
193
194 /// Creates a drop counter that increments the given `AtomicUsize` on drop.
195 fn make_drop_counter(counter: Arc<AtomicUsize>) -> impl Drop + 'static {
196 struct DropCounter(Arc<AtomicUsize>);
197 impl Drop for DropCounter {
198 fn drop(&mut self) {
199 self.0.fetch_add(1, Ordering::SeqCst);
200 }
201 }
202 DropCounter(counter)
203 }
204
205 #[test]
206 fn test_normal_drop() {
207 let drop_count = Arc::new(AtomicUsize::new(0));
208 // Clear any leftover cleanup entries from previous tests
209 CLEANUP_STACK.with(|stack| stack.borrow_mut().clear());
210
211 {
212 let _guard = BailoutGuard::new(make_drop_counter(Arc::clone(&drop_count)));
213 assert_eq!(drop_count.load(Ordering::SeqCst), 0);
214 }
215
216 // Value should be dropped when guard goes out of scope
217 assert_eq!(drop_count.load(Ordering::SeqCst), 1);
218
219 // Cleanup stack should be empty (cleanup was deactivated)
220 CLEANUP_STACK.with(|stack| {
221 assert!(stack.borrow().is_empty() || !stack.borrow()[0].1);
222 });
223 }
224
225 #[test]
226 fn test_bailout_cleanup() {
227 let drop_count = Arc::new(AtomicUsize::new(0));
228 // Clear any leftover cleanup entries from previous tests
229 CLEANUP_STACK.with(|stack| stack.borrow_mut().clear());
230
231 // Simulate what happens during bailout:
232 // 1. Guard is created
233 // 2. Bailout occurs (longjmp) - guard's Drop doesn't run
234 // 3. run_bailout_cleanups() is called
235
236 let guard = BailoutGuard::new(make_drop_counter(Arc::clone(&drop_count)));
237
238 // Simulate bailout - don't drop the guard normally
239 std::mem::forget(guard);
240
241 // Value hasn't been dropped yet
242 assert_eq!(drop_count.load(Ordering::SeqCst), 0);
243
244 // Run bailout cleanups (simulating what try_catch does)
245 run_bailout_cleanups();
246
247 // Value should now be dropped
248 assert_eq!(drop_count.load(Ordering::SeqCst), 1);
249 }
250
251 #[test]
252 fn test_into_inner() {
253 let drop_count = Arc::new(AtomicUsize::new(0));
254 // Clear any leftover cleanup entries from previous tests
255 CLEANUP_STACK.with(|stack| stack.borrow_mut().clear());
256
257 let guard = BailoutGuard::new(make_drop_counter(Arc::clone(&drop_count)));
258 let value = guard.into_inner();
259
260 // Value hasn't been dropped yet (we own it now)
261 assert_eq!(drop_count.load(Ordering::SeqCst), 0);
262
263 drop(value);
264
265 // Now it's dropped
266 assert_eq!(drop_count.load(Ordering::SeqCst), 1);
267 }
268
269 #[test]
270 fn test_deref() {
271 let guard = BailoutGuard::new(String::from("hello"));
272 assert_eq!(&*guard, "hello");
273 assert_eq!(guard.len(), 5);
274 }
275
276 #[test]
277 fn test_deref_mut() {
278 let mut guard = BailoutGuard::new(String::from("hello"));
279 guard.push_str(" world");
280 assert_eq!(&*guard, "hello world");
281 }
282}