1#![doc = include_str!("../README.md")]
4#![deny(unsafe_code)]
5#![warn(missing_docs)]
6
7use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
8
9use log::debug;
10use nonmax::{NonMaxU16, NonMaxU32};
11
12pub mod ext;
13
14mod small_float;
15
16#[cfg(test)]
17mod tests;
18
19const NUM_TOP_BINS: usize = 32;
20const BINS_PER_LEAF: usize = 8;
21const TOP_BINS_INDEX_SHIFT: u32 = 3;
22const LEAF_BINS_INDEX_MASK: u32 = 7;
23const NUM_LEAF_BINS: usize = NUM_TOP_BINS * BINS_PER_LEAF;
24
25pub trait NodeIndex: Clone + Copy + Default {
32 type NonMax: NodeIndexNonMax + TryFrom<Self> + Into<Self>;
36
37 const MAX: u32;
39
40 fn from_u32(val: u32) -> Self;
42
43 fn to_usize(self) -> usize;
45}
46
47pub trait NodeIndexNonMax: Clone + Copy + PartialEq + Default + Debug + Display {
51 fn to_usize(self) -> usize;
53}
54
55pub struct Allocator<NI = u32>
58where
59 NI: NodeIndex,
60{
61 size: u32,
62 max_allocs: u32,
63 free_storage: u32,
64
65 used_bins_top: u32,
66 used_bins: [u8; NUM_TOP_BINS],
67 bin_indices: [Option<NI::NonMax>; NUM_LEAF_BINS],
68
69 nodes: Vec<Node<NI>>,
70 free_nodes: Vec<NI::NonMax>,
71 free_offset: u32,
72}
73
74#[derive(Clone, Copy)]
76pub struct Allocation<NI = u32>
77where
78 NI: NodeIndex,
79{
80 pub offset: NI,
82 metadata: NI::NonMax,
84}
85
86#[derive(Debug)]
88pub struct StorageReport {
89 pub total_free_space: u32,
91 pub largest_free_region: u32,
93}
94
95#[derive(Debug)]
97pub struct StorageReportFull {
98 pub free_regions: [StorageReportFullRegion; NUM_LEAF_BINS],
100}
101
102#[derive(Clone, Copy, Debug, Default)]
104pub struct StorageReportFullRegion {
105 pub size: u32,
107 pub count: u32,
109}
110
111#[derive(Clone, Copy, Default)]
112struct Node<NI = u32>
113where
114 NI: NodeIndex,
115{
116 data_offset: u32,
117 data_size: u32,
118 bin_list_prev: Option<NI::NonMax>,
119 bin_list_next: Option<NI::NonMax>,
120 neighbor_prev: Option<NI::NonMax>,
121 neighbor_next: Option<NI::NonMax>,
122 used: bool, }
124
125fn find_lowest_bit_set_after(bit_mask: u32, start_bit_index: u32) -> Option<NonMaxU32> {
127 let mask_before_start_index = (1 << start_bit_index) - 1;
128 let mask_after_start_index = !mask_before_start_index;
129 let bits_after = bit_mask & mask_after_start_index;
130 if bits_after == 0 {
131 None
132 } else {
133 NonMaxU32::try_from(bits_after.trailing_zeros()).ok()
134 }
135}
136
137impl<NI> Allocator<NI>
138where
139 NI: NodeIndex,
140{
141 pub fn new(size: u32) -> Self {
144 Allocator::with_max_allocs(size, u32::min(128 * 1024, NI::MAX - 1))
145 }
146
147 pub fn with_max_allocs(size: u32, max_allocs: u32) -> Self {
154 assert!(max_allocs < NI::MAX - 1);
155
156 let mut this = Self {
157 size,
158 max_allocs,
159 free_storage: 0,
160 used_bins_top: 0,
161 free_offset: 0,
162 used_bins: [0; NUM_TOP_BINS],
163 bin_indices: [None; NUM_LEAF_BINS],
164 nodes: vec![],
165 free_nodes: vec![],
166 };
167 this.reset();
168 this
169 }
170
171 pub fn reset(&mut self) {
173 self.free_storage = 0;
174 self.used_bins_top = 0;
175 self.free_offset = self.max_allocs - 1;
176
177 self.used_bins.iter_mut().for_each(|bin| *bin = 0);
178
179 self.bin_indices.iter_mut().for_each(|index| *index = None);
180
181 self.nodes = vec![Node::default(); self.max_allocs as usize];
182
183 self.free_nodes = (0..self.max_allocs)
185 .map(|i| {
186 NI::NonMax::try_from(NI::from_u32(self.max_allocs - i - 1)).unwrap_or_default()
187 })
188 .collect();
189
190 self.insert_node_into_bin(self.size, 0);
193 }
194
195 pub fn allocate(&mut self, size: u32) -> Option<Allocation<NI>> {
200 if self.free_offset == 0 {
202 return None;
203 }
204
205 let min_bin_index = small_float::uint_to_float_round_up(size);
208
209 let min_top_bin_index = min_bin_index >> TOP_BINS_INDEX_SHIFT;
210 let min_leaf_bin_index = min_bin_index & LEAF_BINS_INDEX_MASK;
211
212 let mut top_bin_index = min_top_bin_index;
213 let mut leaf_bin_index = None;
214
215 if (self.used_bins_top & (1 << top_bin_index)) != 0 {
217 leaf_bin_index = find_lowest_bit_set_after(
218 self.used_bins[top_bin_index as usize] as _,
219 min_leaf_bin_index,
220 );
221 }
222
223 let leaf_bin_index = match leaf_bin_index {
225 Some(leaf_bin_index) => leaf_bin_index,
226 None => {
227 top_bin_index =
228 find_lowest_bit_set_after(self.used_bins_top, min_top_bin_index + 1)?.into();
229
230 NonMaxU32::try_from(self.used_bins[top_bin_index as usize].trailing_zeros())
236 .unwrap()
237 }
238 };
239
240 let bin_index = (top_bin_index << TOP_BINS_INDEX_SHIFT) | u32::from(leaf_bin_index);
241
242 let node_index = self.bin_indices[bin_index as usize].unwrap();
244 let node = &mut self.nodes[node_index.to_usize()];
245 let node_total_size = node.data_size;
246 node.data_size = size;
247 node.used = true;
248 self.bin_indices[bin_index as usize] = node.bin_list_next;
249 if let Some(bin_list_next) = node.bin_list_next {
250 self.nodes[bin_list_next.to_usize()].bin_list_prev = None;
251 }
252 self.free_storage -= node_total_size;
253 debug!(
254 "Free storage: {} (-{}) (allocate)",
255 self.free_storage, node_total_size
256 );
257
258 if self.bin_indices[bin_index as usize].is_none() {
260 self.used_bins[top_bin_index as usize] &= !(1 << u32::from(leaf_bin_index));
262
263 if self.used_bins[top_bin_index as usize] == 0 {
265 self.used_bins_top &= !(1 << top_bin_index);
267 }
268 }
269
270 let remainder_size = node_total_size - size;
272 if remainder_size > 0 {
273 let Node {
274 data_offset,
275 neighbor_next,
276 ..
277 } = self.nodes[node_index.to_usize()];
278
279 let new_node_index = self.insert_node_into_bin(remainder_size, data_offset + size);
280
281 let node = &mut self.nodes[node_index.to_usize()];
284 if let Some(neighbor_next) = node.neighbor_next {
285 self.nodes[neighbor_next.to_usize()].neighbor_prev = Some(new_node_index);
286 }
287 self.nodes[new_node_index.to_usize()].neighbor_prev = Some(node_index);
288 self.nodes[new_node_index.to_usize()].neighbor_next = neighbor_next;
289 self.nodes[node_index.to_usize()].neighbor_next = Some(new_node_index);
290 }
291
292 let node = &mut self.nodes[node_index.to_usize()];
293 Some(Allocation {
294 offset: NI::from_u32(node.data_offset),
295 metadata: node_index,
296 })
297 }
298
299 pub fn free(&mut self, allocation: Allocation<NI>) {
306 let node_index = allocation.metadata;
307
308 let Node {
310 data_offset: mut offset,
311 data_size: mut size,
312 used,
313 ..
314 } = self.nodes[node_index.to_usize()];
315
316 assert!(used);
318
319 if let Some(neighbor_prev) = self.nodes[node_index.to_usize()].neighbor_prev {
320 if !self.nodes[neighbor_prev.to_usize()].used {
321 let prev_node = &self.nodes[neighbor_prev.to_usize()];
324 offset = prev_node.data_offset;
325 size += prev_node.data_size;
326
327 self.remove_node_from_bin(neighbor_prev);
330
331 let prev_node = &self.nodes[neighbor_prev.to_usize()];
332 debug_assert_eq!(prev_node.neighbor_next, Some(node_index));
333 self.nodes[node_index.to_usize()].neighbor_prev = prev_node.neighbor_prev;
334 }
335 }
336
337 if let Some(neighbor_next) = self.nodes[node_index.to_usize()].neighbor_next {
338 if !self.nodes[neighbor_next.to_usize()].used {
339 let next_node = &self.nodes[neighbor_next.to_usize()];
342 size += next_node.data_size;
343
344 self.remove_node_from_bin(neighbor_next);
347
348 let next_node = &self.nodes[neighbor_next.to_usize()];
349 debug_assert_eq!(next_node.neighbor_prev, Some(node_index));
350 self.nodes[node_index.to_usize()].neighbor_next = next_node.neighbor_next;
351 }
352 }
353
354 let Node {
355 neighbor_next,
356 neighbor_prev,
357 ..
358 } = self.nodes[node_index.to_usize()];
359
360 debug!(
362 "Putting node {} into freelist[{}] (free)",
363 node_index,
364 self.free_offset + 1
365 );
366 self.free_offset += 1;
367 self.free_nodes[self.free_offset as usize] = node_index;
368
369 let combined_node_index = self.insert_node_into_bin(size, offset);
371
372 if let Some(neighbor_next) = neighbor_next {
374 self.nodes[combined_node_index.to_usize()].neighbor_next = Some(neighbor_next);
375 self.nodes[neighbor_next.to_usize()].neighbor_prev = Some(combined_node_index);
376 }
377 if let Some(neighbor_prev) = neighbor_prev {
378 self.nodes[combined_node_index.to_usize()].neighbor_prev = Some(neighbor_prev);
379 self.nodes[neighbor_prev.to_usize()].neighbor_next = Some(combined_node_index);
380 }
381 }
382
383 fn insert_node_into_bin(&mut self, size: u32, data_offset: u32) -> NI::NonMax {
384 let bin_index = small_float::uint_to_float_round_down(size);
386
387 let top_bin_index = bin_index >> TOP_BINS_INDEX_SHIFT;
388 let leaf_bin_index = bin_index & LEAF_BINS_INDEX_MASK;
389
390 if self.bin_indices[bin_index as usize].is_none() {
392 self.used_bins[top_bin_index as usize] |= 1 << leaf_bin_index;
394 self.used_bins_top |= 1 << top_bin_index;
395 }
396
397 let top_node_index = self.bin_indices[bin_index as usize];
399 let free_offset = self.free_offset;
400 let node_index = self.free_nodes[free_offset as usize];
401 self.free_offset -= 1;
402 debug!(
403 "Getting node {} from freelist[{}]",
404 node_index,
405 self.free_offset + 1
406 );
407 self.nodes[node_index.to_usize()] = Node {
408 data_offset,
409 data_size: size,
410 bin_list_next: top_node_index,
411 ..Node::default()
412 };
413 if let Some(top_node_index) = top_node_index {
414 self.nodes[top_node_index.to_usize()].bin_list_prev = Some(node_index);
415 }
416 self.bin_indices[bin_index as usize] = Some(node_index);
417
418 self.free_storage += size;
419 debug!(
420 "Free storage: {} (+{}) (insert_node_into_bin)",
421 self.free_storage, size
422 );
423 node_index
424 }
425
426 fn remove_node_from_bin(&mut self, node_index: NI::NonMax) {
427 let node = self.nodes[node_index.to_usize()];
429
430 match node.bin_list_prev {
431 Some(bin_list_prev) => {
432 self.nodes[bin_list_prev.to_usize()].bin_list_next = node.bin_list_next;
434 if let Some(bin_list_next) = node.bin_list_next {
435 self.nodes[bin_list_next.to_usize()].bin_list_prev = node.bin_list_prev;
436 }
437 }
438 None => {
439 let bin_index = small_float::uint_to_float_round_down(node.data_size);
443
444 let top_bin_index = (bin_index >> TOP_BINS_INDEX_SHIFT) as usize;
445 let leaf_bin_index = (bin_index & LEAF_BINS_INDEX_MASK) as usize;
446
447 self.bin_indices[bin_index as usize] = node.bin_list_next;
448 if let Some(bin_list_next) = node.bin_list_next {
449 self.nodes[bin_list_next.to_usize()].bin_list_prev = None;
450 }
451
452 if self.bin_indices[bin_index as usize].is_none() {
454 self.used_bins[top_bin_index as usize] &= !(1 << leaf_bin_index);
456
457 if self.used_bins[top_bin_index as usize] == 0 {
459 self.used_bins_top &= !(1 << top_bin_index);
461 }
462 }
463 }
464 }
465
466 debug!(
468 "Putting node {} into freelist[{}] (remove_node_from_bin)",
469 node_index,
470 self.free_offset + 1
471 );
472 self.free_offset += 1;
473 self.free_nodes[self.free_offset as usize] = node_index;
474
475 self.free_storage -= node.data_size;
476 debug!(
477 "Free storage: {} (-{}) (remove_node_from_bin)",
478 self.free_storage, node.data_size
479 );
480 }
481
482 pub fn allocation_size(&self, allocation: Allocation<NI>) -> u32 {
487 self.nodes
488 .get(allocation.metadata.to_usize())
489 .map(|node| node.data_size)
490 .unwrap_or_default()
491 }
492
493 pub fn storage_report(&self) -> StorageReport {
496 let mut largest_free_region = 0;
497 let mut free_storage = 0;
498
499 if self.free_offset > 0 {
501 free_storage = self.free_storage;
502 if self.used_bins_top > 0 {
503 let top_bin_index = 31 - self.used_bins_top.leading_zeros();
504 let leaf_bin_index =
505 31 - (self.used_bins[top_bin_index as usize] as u32).leading_zeros();
506 largest_free_region = small_float::float_to_uint(
507 (top_bin_index << TOP_BINS_INDEX_SHIFT) | leaf_bin_index,
508 );
509 debug_assert!(free_storage >= largest_free_region);
510 }
511 }
512
513 StorageReport {
514 total_free_space: free_storage,
515 largest_free_region,
516 }
517 }
518
519 pub fn storage_report_full(&self) -> StorageReportFull {
522 let mut report = StorageReportFull::default();
523 for i in 0..NUM_LEAF_BINS {
524 let mut count = 0;
525 let mut maybe_node_index = self.bin_indices[i];
526 while let Some(node_index) = maybe_node_index {
527 maybe_node_index = self.nodes[node_index.to_usize()].bin_list_next;
528 count += 1;
529 }
530 report.free_regions[i] = StorageReportFullRegion {
531 size: small_float::float_to_uint(i as u32),
532 count,
533 }
534 }
535 report
536 }
537}
538
539impl Default for StorageReportFull {
540 fn default() -> Self {
541 Self {
542 free_regions: [Default::default(); NUM_LEAF_BINS],
543 }
544 }
545}
546
547impl<NI> Debug for Allocator<NI>
548where
549 NI: NodeIndex,
550{
551 fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
552 self.storage_report().fmt(f)
553 }
554}
555
556impl NodeIndex for u32 {
557 type NonMax = NonMaxU32;
558 const MAX: u32 = u32::MAX;
559
560 fn from_u32(val: u32) -> Self {
561 val
562 }
563
564 fn to_usize(self) -> usize {
565 self as usize
566 }
567}
568
569impl NodeIndex for u16 {
570 type NonMax = NonMaxU16;
571 const MAX: u32 = u16::MAX as u32;
572
573 fn from_u32(val: u32) -> Self {
574 val as u16
575 }
576
577 fn to_usize(self) -> usize {
578 self as usize
579 }
580}
581
582impl NodeIndexNonMax for NonMaxU32 {
583 fn to_usize(self) -> usize {
584 u32::from(self) as usize
585 }
586}
587
588impl NodeIndexNonMax for NonMaxU16 {
589 fn to_usize(self) -> usize {
590 u16::from(self) as usize
591 }
592}