bulk_allocator/bulk_b/mod.rs
1// Copyright 2020-2023 Shin Yoshida
2//
3// "LGPL-3.0-or-later OR Apache-2.0"
4//
5// This is part of rust-bulk-allocator
6//
7// rust-bulk-allocator is free software: you can redistribute it and/or modify
8// it under the terms of the GNU Lesser General Public License as published by
9// the Free Software Foundation, either version 3 of the License, or
10// (at your option) any later version.
11//
12// rust-bulk-allocator is distributed in the hope that it will be useful,
13// but WITHOUT ANY WARRANTY; without even the implied warranty of
14// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15// GNU Lesser General Public License for more details.
16//
17// You should have received a copy of the GNU Lesser General Public License
18// along with rust-bulk-allocator. If not, see <http://www.gnu.org/licenses/>.
19//
20// Licensed under the Apache License, Version 2.0 (the "License");
21// you may not use this file except in compliance with the License.
22// You may obtain a copy of the License at
23//
24// http://www.apache.org/licenses/LICENSE-2.0
25//
26// Unless required by applicable law or agreed to in writing, software
27// distributed under the License is distributed on an "AS IS" BASIS,
28// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
29// See the License for the specific language governing permissions and
30// limitations under the License.
31
32mod large_cache;
33mod small_cache;
34
35use self::large_cache::LargeCache;
36use self::small_cache::SmallCache;
37use crate::MEMORY_CHUNK_SIZE;
38use std::alloc::{GlobalAlloc, Layout};
39use std::cell::{Cell, UnsafeCell};
40use std::mem::{align_of, size_of};
41use std::ptr::{null_mut, NonNull};
42
43type Link<T> = Option<NonNull<T>>;
44const ALIGN: usize = if 8 < align_of::<usize>() {
45 align_of::<usize>()
46} else {
47 8
48};
49
50/// `BulkAlloc` is an implementation of [`GlobalAlloc`](`std::alloc::GlobalAlloc`)
51/// holding memory cache.
52/// This struct acquires bulk memories from the backend, and frees them on the
53/// drop at once for the performance.
54///
55/// Each instance has 2 kinds of caches: some forward linked lists to store a specific sized
56/// pointers, and one red black tree to store arbitrary sized memory block.
57///
58/// The balanced tree cache merges 2 holding pointers if they are placed next to each other.
59/// `BulkAlloc` stores a pointer into the tree cache if the size is large enough
60/// or if the pointer can be merged into another;
61/// otherwise, it is stored into the linked list cache for the size.
62///
63/// # Safety
64///
65/// Instance drop releases all the memories acquired from the backend.
66/// All the pointers allocated via the instance will be invalid after then.
67/// Accessing such a pointer may lead to memory unsafety even if the pointer itself is not
68/// deallocated.
69///
70/// # See also
71///
72/// - [`alloc`]
73/// - [`dealloc`]
74///
75/// [`alloc`]: Self::alloc
76/// [`dealloc`]: Self::dealloc
77pub struct BulkAlloc<B>
78where
79 B: GlobalAlloc,
80{
81 large_cache: UnsafeCell<LargeCache>,
82 small_cache: UnsafeCell<SmallCache>,
83 to_free: Cell<Link<u8>>,
84 backend: B,
85}
86
87impl<B> BulkAlloc<B>
88where
89 B: GlobalAlloc,
90{
91 /// The max byte size that `BulkAlloc` can cache.
92 pub const MAX_CACHE_SIZE: usize = MEMORY_CHUNK_SIZE - size_of::<Link<u8>>();
93}
94
95impl<B> Drop for BulkAlloc<B>
96where
97 B: GlobalAlloc,
98{
99 fn drop(&mut self) {
100 let mut it = self.to_free.get();
101
102 unsafe {
103 let layout = Layout::from_size_align(MEMORY_CHUNK_SIZE, ALIGN).unwrap();
104
105 while let Some(ptr) = it {
106 it = NonNull::new(*ptr.cast().as_ref());
107
108 let ptr = ptr.as_ptr().offset(-1 * Self::MAX_CACHE_SIZE as isize);
109 self.backend.dealloc(ptr, layout);
110 }
111 }
112 }
113}
114
115impl<B> BulkAlloc<B>
116where
117 B: GlobalAlloc,
118{
119 /// Creates a new instance.
120 pub const fn new(backend: B) -> Self {
121 Self {
122 large_cache: UnsafeCell::new(LargeCache::new()),
123 small_cache: UnsafeCell::new(SmallCache::new()),
124 to_free: Cell::new(None),
125 backend,
126 }
127 }
128}
129
130unsafe impl<B> GlobalAlloc for BulkAlloc<B>
131where
132 B: GlobalAlloc,
133{
134 /// Method `alloc` delegates the request to the backend if `layout` is too large (i.e. the size is
135 /// greater than [`MAX_CACHE_SIZE`] or the align is greater than [`align_of::<usize>()`](align_of).
136 /// Note that usually, the alignment of [`Layout`] is less than
137 /// or equals to the value unless the caller dares to enlarge it.)
138 ///
139 /// Otherwise, `alloc` searches the cache for a larger or the same size memory in the cache.
140 /// If no proper memory is found, it acquires a [`MEMORY_CHUNK_SIZE`] bytes chunk
141 /// from the backend allocator at first.
142 /// Then, it takes a pointer from the memory block to return and caches the rest again.
143 ///
144 /// # Safety
145 ///
146 /// All the pointers allocated via the instance will be invalid after the instance drop.
147 /// Accessing such a pointer may lead to memory unsafety even if the pointer itself is not
148 /// deallocated.
149 ///
150 /// [read more](std::alloc::GlobalAlloc::alloc)
151 ///
152 /// [`MAX_CACHE_SIZE`]: Self::MAX_CACHE_SIZE
153 /// [`align_of`]: std::mem::align_of
154 /// [`Layout`]: std::alloc::Layout
155 /// [`MEMORY_CHUNK_SIZE`]: crate::MEMORY_CHUNK_SIZE
156 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
157 // Delegate the request if layout is too large.
158 if Self::MAX_CACHE_SIZE < layout.size() || ALIGN < layout.align() {
159 return self.backend.alloc(layout);
160 }
161
162 // Round up size.
163 let request_size = (layout.size() + ALIGN - 1) / ALIGN * ALIGN;
164
165 let small_cache = &mut *self.small_cache.get();
166 let large_cache = &mut *self.large_cache.get();
167
168 // Search memory block in small_cache and large_cache.
169 // If no cache is hit, allocate from the backend.
170 let (ptr, alloc_size) = if let Some((ptr, size)) = small_cache.alloc(request_size) {
171 (ptr, size)
172 } else if let Some((ptr, size)) = large_cache.alloc(request_size) {
173 (ptr, size)
174 } else {
175 let layout = Layout::from_size_align(MEMORY_CHUNK_SIZE, ALIGN).unwrap();
176 let ptr = self.backend.alloc(layout);
177
178 if ptr.is_null() {
179 return ptr;
180 } else {
181 // Take the end of memory block and append to self.to_free.
182 {
183 let ptr = ptr.add(Self::MAX_CACHE_SIZE);
184 *ptr.cast() = self
185 .to_free
186 .get()
187 .map(NonNull::as_ptr)
188 .unwrap_or(null_mut());
189 self.to_free.set(NonNull::new(ptr));
190 }
191
192 (NonNull::new_unchecked(ptr), Self::MAX_CACHE_SIZE)
193 }
194 };
195
196 debug_assert!(alloc_size % ALIGN == 0);
197
198 // Take the end of the memory block as the return value, and cache the rest again if necessary.
199 let rest_size = alloc_size - request_size;
200 if 0 < rest_size {
201 let _is_ok = large_cache.dealloc_without_merge(ptr, rest_size)
202 || small_cache.dealloc(ptr, rest_size);
203 debug_assert!(_is_ok);
204 }
205
206 ptr.as_ptr().add(rest_size)
207 }
208
209 /// Method `dealloc` delegates the request to the backend if `layout` is too large (i.e. the size
210 /// is greater than [`MAX_CACHE_SIZE`] or the align is greater
211 /// than [`align_of::<usize>()`](align_of).
212 /// Note that usually, the alignment of [`Layout`] is less than or equals to the value
213 /// unless the caller dare to enlarge it.)
214 ///
215 /// Otherwise, `dealloc` stores the passed pointer into the proper cache.
216 /// It is when the instance is dropped when the pointer is released.
217 ///
218 /// # Safety
219 ///
220 /// All the pointers allocated via the instance will be invalid after the instance drop.
221 /// Accessing such a pointer may lead to memory unsafety even if the pointer itself is not
222 /// deallocated.
223 ///
224 /// [read more](std::alloc::GlobalAlloc::dealloc)
225 ///
226 /// [`MAX_CACHE_SIZE`]: Self::MAX_CACHE_SIZE
227 /// [`Layout`]: std::alloc::Layout
228 /// [`align_of`]: std::mem::align_of
229 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
230 // Delegate the request if layout is too large.
231 if Self::MAX_CACHE_SIZE < layout.size() || ALIGN < layout.align() {
232 self.backend.dealloc(ptr, layout);
233 return;
234 }
235
236 // Round up size.
237 let size = (layout.size() + ALIGN - 1) / ALIGN * ALIGN;
238 debug_assert!(ptr as usize % ALIGN == 0);
239
240 // Cache ptr.
241 let small_cache = &mut *self.small_cache.get();
242 let large_cache = &mut *self.large_cache.get();
243 let ptr = NonNull::new(ptr).unwrap();
244
245 let _is_ok = large_cache.dealloc(ptr, size) || small_cache.dealloc(ptr, size);
246 debug_assert!(_is_ok);
247 }
248}
249
250#[cfg(test)]
251mod tests {
252 use super::*;
253 use gharial::GAlloc;
254
255 type Alloc = BulkAlloc<GAlloc>;
256
257 #[test]
258 fn test_alloc_large_layout() {
259 let backend = GAlloc::default();
260 let alloc = BulkAlloc::new(backend.clone());
261
262 // Large align
263 for size in (1..64).chain([Alloc::MAX_CACHE_SIZE, Alloc::MAX_CACHE_SIZE + 1]) {
264 unsafe {
265 let align = 2 * ALIGN;
266 let layout = Layout::from_size_align(size, align).unwrap();
267
268 let ptr = alloc.alloc(layout);
269 assert_eq!(ptr.is_null(), false);
270 assert_eq!(backend.providing_pointers(), [(ptr, layout)]);
271
272 ptr.write_bytes(0xff, size);
273 alloc.dealloc(ptr, layout);
274 }
275 }
276
277 // Large size
278 let mut align = 1;
279 while align <= ALIGN {
280 unsafe {
281 let size = Alloc::MAX_CACHE_SIZE + 1;
282 let layout = Layout::from_size_align(size, align).unwrap();
283
284 let ptr = alloc.alloc(layout);
285 assert_eq!(ptr.is_null(), false);
286 assert_eq!(backend.providing_pointers(), [(ptr, layout)]);
287
288 ptr.write_bytes(0xff, size);
289 alloc.dealloc(ptr, layout);
290 }
291
292 align *= 2;
293 }
294 }
295
296 #[test]
297 fn test_alloc_dealloc() {
298 let backend = GAlloc::default();
299 let alloc = Alloc::new(backend.clone());
300
301 unsafe {
302 for _ in 0..16 {
303 let mut align = 1;
304 let mut pointers = Vec::new();
305
306 while align <= ALIGN {
307 for size in (0..1024).chain([Alloc::MAX_CACHE_SIZE]) {
308 let layout = Layout::from_size_align(size, align).unwrap();
309 let ptr = alloc.alloc(layout);
310
311 assert_eq!(ptr.is_null(), false);
312 ptr.write_bytes(0xff, layout.size());
313 pointers.push((ptr, layout));
314 }
315
316 align *= 2;
317 }
318
319 for (ptr, layout) in pointers {
320 alloc.dealloc(ptr, layout);
321 }
322 }
323 }
324 }
325}