1use crate::{AllocError, Allocator};
5use core::alloc::Layout;
6use core::cell::Cell;
7use core::ptr::{slice_from_raw_parts_mut, NonNull};
8
9pub struct LinearAllocator<A: Allocator> {
19 allocation_ptr: NonNull<u8>,
20 allocation_layout: Layout,
21 size: Cell<usize>,
22 allocator: A,
23}
24
25unsafe impl<A: Allocator> Send for LinearAllocator<A> {}
26
27impl<A: Allocator> LinearAllocator<A> {
28 pub fn new_in(layout: Layout, allocator: A) -> Result<Self, AllocError> {
33 let allocation = allocator.allocate(layout)?;
34 let allocation_layout =
37 unsafe { Layout::from_size_align(allocation.len(), layout.align()).unwrap_unchecked() };
38 Ok(Self {
39 allocation_ptr: allocation.cast(),
40 allocation_layout,
41 size: Cell::new(0),
42 allocator,
43 })
44 }
45
46 #[inline]
48 pub fn used_bytes(&self) -> usize {
49 self.size.get()
50 }
51
52 #[inline]
55 pub fn reserved_bytes(&self) -> usize {
56 self.allocation_layout.size()
57 }
58
59 pub fn remaining_capacity(&self) -> usize {
62 self.reserved_bytes() - self.used_bytes()
63 }
64
65 fn base_ptr(&self) -> *mut u8 {
66 self.allocation_ptr.as_ptr()
67 }
68
69 pub fn has_capacity_for(&self, layout: Layout) -> bool {
71 let align_offset =
75 unsafe { self.base_ptr().add(self.used_bytes()) }.align_offset(layout.align());
76 if let Some(needed_size) = align_offset.checked_add(layout.size()) {
77 self.remaining_capacity() >= needed_size
78 } else {
79 false
80 }
81 }
82}
83
84impl<A: Allocator> Drop for LinearAllocator<A> {
85 fn drop(&mut self) {
86 let ptr = self.allocation_ptr;
87 let layout = self.allocation_layout;
88 unsafe { self.allocator.deallocate(ptr, layout) };
90 }
91}
92
93unsafe impl<A: Allocator> Allocator for LinearAllocator<A> {
94 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
95 if layout.size() == 0 {
96 return Err(AllocError);
97 }
98
99 let size = self.used_bytes();
101 let align_offset = unsafe { self.base_ptr().add(size) }.align_offset(layout.align());
105 let needed_size = align_offset.checked_add(layout.size()).ok_or(AllocError)?;
106 let remaining_capacity = self.reserved_bytes() - size;
107
108 if needed_size > remaining_capacity {
110 return Err(AllocError);
111 }
112
113 let wide_ptr = {
115 let thin_ptr = unsafe { self.base_ptr().add(size + align_offset) };
118
119 debug_assert_eq!(0, thin_ptr.align_offset(layout.align()));
121 slice_from_raw_parts_mut(thin_ptr, layout.size())
122 };
123
124 let non_null = unsafe { NonNull::new_unchecked(wide_ptr) };
127
128 self.size.set(size + needed_size);
130 Ok(non_null)
131 }
132
133 unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {
134 }
136}
137
138#[cfg(test)]
139mod tests {
140 use super::*;
141 use crate::utils::*;
142 use allocator_api2::alloc::Global;
143 use bolero::generator::TypeGenerator;
144
145 #[test]
146 fn fuzz() {
147 const MAX_SIZE: usize = 0x10000000;
149
150 let size_hint = 0..=MAX_SIZE;
151 let align_bits = 0..=32;
152 let size = 0..=MAX_SIZE;
153 let idx = 0..=MAX_SIZE;
154 let val = u8::produce();
155 let allocs = Vec::<(usize, u32, usize, u8)>::produce()
156 .with()
157 .values((size, align_bits, idx, val));
158 bolero::check!()
159 .with_generator((size_hint, allocs))
160 .for_each(|(size_hint, size_align_vec)| {
161 let allocator = LinearAllocator::new_in(
162 Layout::from_size_align(*size_hint, 1).unwrap(),
163 Global,
164 )
165 .unwrap();
166
167 for (size, align_bits, idx, val) in size_align_vec {
168 fuzzer_inner_loop(&allocator, *size, *align_bits, *idx, *val, MAX_SIZE)
169 }
170 })
171 }
172
173 #[test]
174 fn test_basics() -> Result<(), AllocError> {
175 let alloc = LinearAllocator::new_in(Layout::array::<u8>(24).unwrap(), Global)?;
176 const WIDTH: usize = 8;
177 let layout = Layout::new::<[u8; WIDTH]>();
178 assert!(alloc.has_capacity_for(layout));
179 let first = alloc.allocate(layout)?;
180 assert!(alloc.has_capacity_for(layout));
181 let second = alloc.allocate(layout)?;
182 assert!(alloc.has_capacity_for(layout));
183 let third = alloc.allocate(layout)?;
184
185 assert_ne!(first.as_ptr(), second.as_ptr());
186 assert_ne!(first.as_ptr(), third.as_ptr());
187 assert_ne!(second.as_ptr(), third.as_ptr());
188
189 assert_eq!(WIDTH, first.len());
192 assert_eq!(WIDTH, second.len());
193 assert_eq!(WIDTH, third.len());
194
195 let first = first.as_ptr() as *mut u8;
196 let second = second.as_ptr() as *mut u8;
197 let third = third.as_ptr() as *mut u8;
198
199 unsafe {
200 assert_eq!(WIDTH, second.offset_from(first) as usize);
201 assert_eq!(WIDTH, third.offset_from(second) as usize);
202 }
203
204 assert!(!alloc.has_capacity_for(Layout::new::<bool>()));
206 _ = alloc.allocate(Layout::new::<bool>()).unwrap_err();
207
208 Ok(())
209 }
210}
211
212#[cfg(test)]
213mod alignment_tests {
214 use super::*;
215 use allocator_api2::alloc::Global;
216 use core::mem::{align_of, size_of};
217 use core::ops::RangeInclusive;
218
219 #[repr(C)]
221 struct S {
222 first: u8,
223 second: u16,
224 third: u32,
225 fourth: u64,
226 }
227
228 struct TestAllocator {
229 wide_ptr: NonNull<[u8]>,
230 align_to: usize,
231 allocated: Cell<bool>,
232 }
233
234 fn align_offset(ptr: *const u8, align_to: usize) -> usize {
235 let uintptr = ptr as usize;
236 let rem = uintptr % align_to;
237 if rem == 0 {
238 0
239 } else {
240 align_to - rem
241 }
242 }
243
244 impl Drop for TestAllocator {
245 fn drop(&mut self) {
246 #[cfg(debug_assertions)]
247 if self.allocated.get() {
248 panic!("TestAllocator dropped while allocation was still held.");
249 }
250
251 let layout = unsafe { Layout::from_size_align_unchecked(self.wide_ptr.len(), 1) };
252 unsafe { Global.deallocate(self.wide_ptr.cast(), layout) };
253 }
254 }
255
256 impl TestAllocator {
257 #[track_caller]
258 fn new(align_to: usize) -> Self {
259 assert!(align_to <= 8);
260
261 let size = 2 * align_of::<S>() + size_of::<S>();
263 let layout = unsafe { Layout::from_size_align_unchecked(size, 1) };
264 let orig = Global.allocate(layout).unwrap();
265
266 Self {
267 wide_ptr: orig,
268 align_to,
269 allocated: Cell::new(false),
270 }
271 }
272 }
273
274 unsafe impl Allocator for TestAllocator {
275 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
276 assert!(layout.size() <= self.wide_ptr.len());
277 assert_eq!(1, layout.align());
278 if self.allocated.get() {
279 Err(AllocError)
280 } else {
281 self.allocated.set(true);
282 let unaligned = self.wide_ptr.as_ptr() as *mut u8;
283 let offset = align_offset(unaligned, self.align_to);
284 let aligned = unsafe { unaligned.add(offset) };
285 let wide = slice_from_raw_parts_mut(aligned, self.wide_ptr.len() - offset);
286 Ok(unsafe { NonNull::new_unchecked(wide) })
287 }
288 }
289
290 #[track_caller]
291 unsafe fn deallocate(&self, _ptr: NonNull<u8>, layout: Layout) {
292 assert!(self.allocated.get());
293 let unaligned = self.wide_ptr.as_ptr() as *mut u8;
294 let offset = align_offset(unaligned, self.align_to);
295 assert_eq!(self.wide_ptr.len() - offset, layout.size());
296 assert_eq!(1, layout.align());
297 self.allocated.set(false);
298 }
299 }
300
301 #[track_caller]
302 fn test_alignment(align_to: usize) {
303 let layout_u8 = Layout::new::<u8>();
304 let layout_u16 = Layout::new::<u16>();
305 let layout_u32 = Layout::new::<u32>();
306 let layout_u64 = Layout::new::<u64>();
307
308 let max_size = size_of::<S>() + align_of::<S>() - 1;
309 let test_alloc = TestAllocator::new(align_to);
310
311 let alloc = {
312 let layout = Layout::array::<u8>(max_size).unwrap();
313 LinearAllocator::new_in(layout, test_alloc).unwrap()
314 };
315
316 assert!(alloc.has_capacity_for(layout_u8));
318 let ptr_u8 = alloc.allocate(layout_u8).unwrap();
319 assert!(alloc.has_capacity_for(layout_u16));
320 let ptr_u16 = alloc.allocate(layout_u16).unwrap();
321 assert!(alloc.has_capacity_for(layout_u32));
322 let ptr_u32 = alloc.allocate(layout_u32).unwrap();
323 assert!(alloc.has_capacity_for(layout_u64));
324 let ptr_u64 = alloc.allocate(layout_u64).unwrap();
325
326 assert_eq!(layout_u8.size(), ptr_u8.len());
328 assert_eq!(layout_u16.size(), ptr_u16.len());
329 assert_eq!(layout_u32.size(), ptr_u32.len());
330 assert_eq!(layout_u64.size(), ptr_u64.len());
331
332 let thinptr_u8 = ptr_u8.as_ptr() as *mut u8;
333 let thinptr_u16 = ptr_u16.as_ptr() as *mut u8;
334 let thinptr_u32 = ptr_u32.as_ptr() as *mut u8;
335 let thinptr_u64 = ptr_u64.as_ptr() as *mut u8;
336
337 #[track_caller]
338 fn assert_distance_in(second: *mut u8, first: *mut u8, range: RangeInclusive<usize>) {
339 let distance = unsafe { second.offset_from(first) };
341 let udistance = usize::try_from(distance).unwrap();
342 assert!(range.contains(&udistance));
343 }
344
345 assert_distance_in(thinptr_u16, thinptr_u8, 1..=2);
347 assert_distance_in(thinptr_u32, thinptr_u16, 2..=4);
348 assert_distance_in(thinptr_u64, thinptr_u32, 4..=8);
349
350 assert_eq!(0, thinptr_u8.align_offset(layout_u8.align()));
351 assert_eq!(0, thinptr_u16.align_offset(layout_u16.align()));
352 assert_eq!(0, thinptr_u32.align_offset(layout_u32.align()));
353 assert_eq!(0, thinptr_u64.align_offset(layout_u64.align()));
354
355 let has_capacity = alloc.has_capacity_for(layout_u64);
358 assert_eq!(has_capacity, alloc.allocate(layout_u64).is_ok())
359 }
360 #[test]
361 fn test_alignment_1() {
362 test_alignment(1);
363 }
364
365 #[test]
366 fn test_alignment_2() {
367 test_alignment(2);
368 }
369
370 #[test]
371 fn test_alignment_3() {
372 test_alignment(3);
373 }
374
375 #[test]
376 fn test_alignment_4() {
377 test_alignment(4);
378 }
379
380 #[test]
381 fn test_alignment_5() {
382 test_alignment(5);
383 }
384
385 #[test]
386 fn test_alignment_6() {
387 test_alignment(6);
388 }
389
390 #[test]
391 fn test_alignment_7() {
392 test_alignment(7);
393 }
394
395 #[test]
396 fn test_alignment_8() {
397 test_alignment(8);
398 }
399}