stalloc/
syncstalloc.rs

1use core::alloc::{GlobalAlloc, Layout};
2use core::fmt::{self, Debug, Formatter};
3use core::marker::PhantomData;
4use core::ops::Deref;
5use core::ptr::NonNull;
6
7extern crate std;
8use std::sync::{Mutex, MutexGuard};
9
10use crate::align::{Align, Alignment};
11use crate::{AllocChain, AllocError, ChainableAlloc, UnsafeStalloc};
12
13/// A wrapper around `UnsafeStalloc` that is safe to create because it prevents data races using a Mutex.
14/// In comparison to `UnsafeStalloc`, the mutex may cause a slight overhead.
15#[repr(C)]
16pub struct SyncStalloc<const L: usize, const B: usize>(Mutex<()>, UnsafeStalloc<L, B>)
17where
18	Align<B>: Alignment;
19
20/// A lock around `SyncStalloc`. Constructing this type is proof that the user holds an exclusive
21/// lock on the inner `UnsafeStalloc`. When this falls out of scope, the `SyncStalloc` is unlocked.
22///
23/// This is effectively a reimplementation of `std::sync::MutexGuard`.
24pub struct StallocGuard<'a, const L: usize, const B: usize>
25where
26	Align<B>: Alignment,
27{
28	_guard: MutexGuard<'a, ()>,
29	inner: &'a UnsafeStalloc<L, B>,
30	_not_sync: PhantomData<*const ()>,
31}
32
33impl<const L: usize, const B: usize> Deref for StallocGuard<'_, L, B>
34where
35	Align<B>: Alignment,
36{
37	type Target = UnsafeStalloc<L, B>;
38
39	fn deref(&self) -> &Self::Target {
40		self.inner
41	}
42}
43
44impl<const L: usize, const B: usize> SyncStalloc<L, B>
45where
46	Align<B>: Alignment,
47{
48	/// Initializes a new empty `SyncStalloc` instance.
49	///
50	/// # Examples
51	/// ```
52	/// use stalloc::SyncStalloc;
53	///
54	/// let alloc = SyncStalloc::<200, 8>::new();
55	/// ```
56	#[must_use]
57	pub const fn new() -> Self {
58		// SAFETY: The `UnsafeStalloc` can only be accessed through `acquire_locked()`,
59		// which guarantees that the mutex is locked before proceeding.
60		Self(Mutex::new(()), unsafe { UnsafeStalloc::<L, B>::new() })
61	}
62
63	/// Checks if the allocator is completely out of memory.
64	/// If this is false, then you are guaranteed to be able to allocate
65	/// a layout with a size and alignment of `B` bytes.
66	/// This runs in O(1).
67	pub fn is_oom(&self) -> bool {
68		self.acquire_locked().is_oom()
69	}
70
71	/// Checks if the allocator is empty.
72	/// If this is true, then you are guaranteed to be able to allocate
73	/// a layout with a size of `B * L` bytes and an alignment of `B` bytes.
74	/// If this is false, then this is guaranteed to be impossible.
75	/// This runs in O(1).
76	pub fn is_empty(&self) -> bool {
77		self.acquire_locked().is_empty()
78	}
79
80	/// # Safety
81	///
82	/// Calling this function immediately invalidates all pointers into the allocator. Calling
83	/// `deallocate_blocks()` with an invalidated pointer will result in the free list being corrupted.
84	pub unsafe fn clear(&self) {
85		// SAFETY: Upheld by the caller.
86		unsafe { self.acquire_locked().clear() }
87	}
88
89	/// Tries to allocate `count` blocks. If the allocation succeed, a pointer is returned. This function
90	/// never allocates more than necessary.
91	///
92	/// # Safety
93	///
94	/// `size` must be nonzero, and `align` must be a power of 2 in the range `1..=2^29 / B`.
95	///
96	/// # Errors
97	///
98	/// Will return `AllocError` if the allocation was unsuccessful, in which case this function was a no-op.
99	pub unsafe fn allocate_blocks(
100		&self,
101		size: usize,
102		align: usize,
103	) -> Result<NonNull<u8>, AllocError> {
104		// SAFETY: Upheld by the caller.
105		unsafe { self.acquire_locked().allocate_blocks(size, align) }
106	}
107
108	/// Deallocates a pointer.
109	///
110	/// # Safety
111	///
112	/// `ptr` must point to an allocation, and `size` must be the number of blocks
113	/// in the allocation. That is, `size` is always in `1..=L`.
114	pub unsafe fn deallocate_blocks(&self, ptr: NonNull<u8>, size: usize) {
115		// SAFETY: Upheld by the caller.
116		unsafe { self.acquire_locked().deallocate_blocks(ptr, size) }
117	}
118
119	/// Shrinks the allocation. This function always succeeds and never reallocates.
120	///
121	/// # Safety
122	///
123	/// `ptr` must point to a valid allocation of `old_size` blocks, and `new_size` must be in `1..old_size`.
124	pub unsafe fn shrink_in_place(&self, ptr: NonNull<u8>, old_size: usize, new_size: usize) {
125		// SAFETY: Upheld by the caller.
126		unsafe {
127			self.acquire_locked()
128				.shrink_in_place(ptr, old_size, new_size);
129		}
130	}
131
132	/// Tries to grow the current allocation in-place. If that isn't possible, this function is a no-op.
133	///
134	/// # Safety
135	///
136	/// `ptr` must point to a valid allocation of `old_size` blocks. Also, `new_size > old_size`.
137	///
138	/// # Errors
139	///
140	/// Will return `AllocError` if the grow was unsuccessful, in which case this function was a no-op.
141	pub unsafe fn grow_in_place(
142		&self,
143		ptr: NonNull<u8>,
144		old_size: usize,
145		new_size: usize,
146	) -> Result<(), AllocError> {
147		// SAFETY: Upheld by the caller.
148		unsafe { self.acquire_locked().grow_in_place(ptr, old_size, new_size) }
149	}
150
151	/// Tries to grow the current allocation in-place. If that isn't possible, the allocator grows by as much
152	/// as it is able to, and the new length of the allocation is returned. The new length is guaranteed to be
153	/// in the range `old_size..=new_size`.
154	/// # Safety
155	///
156	/// `ptr` must point to a valid allocation of `old_size` blocks. Also, `new_size > old_size`.
157	pub unsafe fn grow_up_to(&self, ptr: NonNull<u8>, old_size: usize, new_size: usize) -> usize {
158		// SAFETY: Upheld by the caller.
159		unsafe { self.acquire_locked().grow_up_to(ptr, old_size, new_size) }
160	}
161
162	/// Acquires an exclusive lock for the allocator. This can be used to chain multiple
163	/// operations on the allocator without having to repeatedly acquire locks for each one.
164	///
165	/// # Example
166	/// ```
167	/// use stalloc::SyncStalloc;
168	///
169	/// let alloc = SyncStalloc::<100, 4>::new();
170	///
171	/// let lock = alloc.acquire_locked();
172	/// for _ in 0..20 {
173	///     // make multiple allocations in a row
174	///     unsafe { lock.allocate_blocks(5, 1) }.unwrap();
175	/// }
176	/// drop(lock); // until we drop the lock, all accesses to `alloc` will block
177	///
178	/// assert!(alloc.is_oom());
179	/// ```
180	pub fn acquire_locked(&self) -> StallocGuard<'_, L, B> {
181		// SAFETY: if this Mutex is poisoned, it means that one of the allocator functions panicked,
182		// which is already declared to be UB. Therefore, we can assume that this is never poisoned.
183		StallocGuard {
184			_guard: unsafe { self.0.lock().unwrap_unchecked() },
185			inner: &self.1,
186			_not_sync: PhantomData,
187		}
188	}
189}
190
191impl<const L: usize, const B: usize> Default for SyncStalloc<L, B>
192where
193	Align<B>: Alignment,
194{
195	fn default() -> Self {
196		Self::new()
197	}
198}
199
200impl<const L: usize, const B: usize> Debug for SyncStalloc<L, B>
201where
202	Align<B>: Alignment,
203{
204	fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
205		write!(f, "{:?}", self.acquire_locked().inner)
206	}
207}
208
209unsafe impl<const L: usize, const B: usize> GlobalAlloc for SyncStalloc<L, B>
210where
211	Align<B>: Alignment,
212{
213	unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
214		// SAFETY: Upheld by the caller.
215		unsafe { self.acquire_locked().alloc(layout) }
216	}
217
218	unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
219		// SAFETY: Upheld by the caller.
220		unsafe { self.acquire_locked().alloc_zeroed(layout) }
221	}
222
223	unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
224		// SAFETY: Upheld by the caller.
225		unsafe { self.acquire_locked().dealloc(ptr, layout) }
226	}
227
228	unsafe fn realloc(&self, ptr: *mut u8, old_layout: Layout, new_size: usize) -> *mut u8 {
229		// SAFETY: Upheld by the caller.
230		unsafe { self.acquire_locked().realloc(ptr, old_layout, new_size) }
231	}
232}
233
234#[cfg(any(feature = "allocator-api", feature = "allocator-api2"))]
235use crate::Allocator;
236
237#[cfg(any(feature = "allocator-api", feature = "allocator-api2"))]
238unsafe impl<const L: usize, const B: usize> Allocator for &SyncStalloc<L, B>
239where
240	Align<B>: Alignment,
241{
242	fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
243		(&*self.acquire_locked()).allocate(layout)
244	}
245
246	unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
247		// SAFETY: Upheld by the caller.
248		unsafe {
249			(&*self.acquire_locked()).deallocate(ptr, layout);
250		}
251	}
252
253	fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
254		(&*self.acquire_locked()).allocate_zeroed(layout)
255	}
256
257	unsafe fn grow(
258		&self,
259		ptr: NonNull<u8>,
260		old_layout: Layout,
261		new_layout: Layout,
262	) -> Result<NonNull<[u8]>, AllocError> {
263		// SAFETY: Upheld by the caller.
264		unsafe { (&*self.acquire_locked()).grow(ptr, old_layout, new_layout) }
265	}
266
267	unsafe fn grow_zeroed(
268		&self,
269		ptr: NonNull<u8>,
270		old_layout: Layout,
271		new_layout: Layout,
272	) -> Result<NonNull<[u8]>, AllocError> {
273		// SAFETY: Upheld by the caller.
274		unsafe { (&*self.acquire_locked()).grow_zeroed(ptr, old_layout, new_layout) }
275	}
276
277	unsafe fn shrink(
278		&self,
279		ptr: NonNull<u8>,
280		old_layout: Layout,
281		new_layout: Layout,
282	) -> Result<NonNull<[u8]>, AllocError> {
283		// SAFETY: Upheld by the caller.
284		unsafe { (&*self.acquire_locked()).shrink(ptr, old_layout, new_layout) }
285	}
286
287	fn by_ref(&self) -> &Self
288	where
289		Self: Sized,
290	{
291		self
292	}
293}
294
295unsafe impl<const L: usize, const B: usize> ChainableAlloc for SyncStalloc<L, B>
296where
297	Align<B>: Alignment,
298{
299	fn claims(&self, ptr: *mut u8, layout: Layout) -> bool {
300		self.1.claims(ptr, layout)
301	}
302}
303
304impl<const L: usize, const B: usize> SyncStalloc<L, B>
305where
306	Align<B>: Alignment,
307{
308	/// Creates a new `AllocChain` containing this allocator and `next`.
309	pub const fn chain<T>(self, next: &T) -> AllocChain<'_, Self, T>
310	where
311		Self: Sized,
312	{
313		AllocChain::new(self, next)
314	}
315}