1use super::freelist::*;
2use super::memory::MmapStrategy;
3use crate::util::address::Address;
4use crate::util::constants::*;
5use crate::util::conversions;
6use crate::util::memory::MmapAnnotation;
7
8const LOG_ENTRY_BITS: usize = LOG_BITS_IN_INT as _;
10
11const LOG_BYTES_IN_ENTRY: usize = LOG_ENTRY_BITS - (LOG_BITS_IN_BYTE as usize);
13
14const LOG_BYTES_IN_UNIT: usize = LOG_BYTES_IN_ENTRY + 1;
16
17#[derive(Debug)]
18pub struct RawMemoryFreeList {
19 pub head: i32,
20 pub heads: i32,
21 base: Address,
22 limit: Address,
23 high_water: Address,
24 max_units: i32,
25 grain: i32,
26 current_units: i32,
27 pages_per_block: i32,
28 strategy: MmapStrategy,
29}
30
31impl FreeList for RawMemoryFreeList {
32 fn head(&self) -> i32 {
33 self.head
34 }
35 fn heads(&self) -> i32 {
36 self.heads
37 }
38 fn get_entry(&self, index: i32) -> i32 {
39 let offset = (index << LOG_BYTES_IN_ENTRY) as usize;
40 debug_assert!(self.base + offset >= self.base && self.base + offset < self.high_water);
41 unsafe { (self.base + offset).load() }
42 }
43 fn set_entry(&mut self, index: i32, value: i32) {
44 let offset = (index << LOG_BYTES_IN_ENTRY) as usize;
45 debug_assert!(
46 self.base + offset >= self.base && self.base + offset < self.high_water,
47 "base={:?} offset={:?} index={:?} high_water={:?}",
48 self.base,
49 offset,
50 self.base + offset,
51 self.high_water
52 );
53 unsafe { (self.base + offset).store(value) }
54 }
55 fn alloc(&mut self, size: i32) -> i32 {
56 if self.current_units == 0 {
57 return FAILURE;
58 }
59 let mut unit = self.head();
60 let mut s = 0;
61 while ({
62 unit = self.get_next(unit);
63 unit != self.head()
64 }) && ({
65 s = self.get_size(unit);
66 s < size
67 }) {}
68 if unit == self.head() {
69 FAILURE
70 } else {
71 self.__alloc(size, unit, s)
72 }
73 }
74}
75
76impl RawMemoryFreeList {
77 fn units_per_block(&self) -> i32 {
78 (conversions::pages_to_bytes(self.pages_per_block as _) >> LOG_BYTES_IN_UNIT) as _
79 }
80 fn units_in_first_block(&self) -> i32 {
81 self.units_per_block() - self.heads - 1
82 }
83 pub fn default_block_size(units: i32, heads: i32) -> i32 {
84 usize::min(Self::size_in_pages(units, heads) as _, 16) as _
85 }
86 pub fn size_in_pages(units: i32, heads: i32) -> i32 {
87 let map_size = ((units + heads + 1) as usize) << LOG_BYTES_IN_UNIT;
88 conversions::bytes_to_pages_up(map_size as _) as _
89 }
90
91 pub fn new(
92 base: Address,
93 limit: Address,
94 pages_per_block: i32,
95 units: i32,
96 grain: i32,
97 heads: i32,
98 strategy: MmapStrategy,
99 ) -> Self {
100 debug_assert!(units <= MAX_UNITS && heads <= MAX_HEADS);
101 debug_assert!(
102 base + conversions::pages_to_bytes(Self::size_in_pages(units, heads) as _) <= limit
103 );
104 Self {
105 head: -1,
106 heads,
107 base,
108 limit,
109 high_water: base,
110 max_units: units,
111 grain,
112 current_units: 0,
113 pages_per_block,
114 strategy,
115 }
116 }
117
118 fn current_capacity(&self) -> i32 {
119 let list_blocks = conversions::bytes_to_pages_up(self.high_water - self.base) as i32
120 / self.pages_per_block;
121 self.units_in_first_block() + (list_blocks - 1) * self.units_per_block()
122 }
123
124 pub fn grow_freelist(&mut self, units: i32) -> bool {
125 let required_units = units + self.current_units;
126 if required_units > self.max_units {
127 return false;
128 }
129 let blocks = if required_units > self.current_capacity() {
130 let units_requested = required_units - self.current_capacity();
131 (units_requested + self.units_per_block() - 1) / self.units_per_block()
132 } else {
133 0
134 };
135 self.grow_list_by_blocks(blocks, required_units);
136 true
137 }
138 fn grow_list_by_blocks(&mut self, blocks: i32, new_max: i32) {
139 debug_assert!(
140 (new_max <= self.grain) || (((new_max / self.grain) * self.grain) == new_max)
141 );
142
143 if blocks > 0 {
144 self.raise_high_water(blocks);
146 }
147
148 let old_max = self.current_units;
149 assert!(
150 new_max <= self.current_capacity(),
151 "blocks and new max are inconsistent: need more blocks for the requested capacity"
152 );
153 assert!(
154 new_max <= self.max_units,
155 "Requested list to grow larger than the configured maximum"
156 );
157 self.current_units = new_max;
158
159 if old_max == 0 {
160 for i in 1..=self.heads {
162 self.set_sentinel(-i);
163 }
164 } else {
165 self.set_size(old_max, 1);
167 }
168
169 if new_max == 0 {
170 return;
171 }
172
173 self.set_sentinel(new_max);
175
176 let mut cursor = new_max;
177
178 let grain = i32::min(self.grain, new_max - old_max);
180 cursor -= grain;
181 while cursor >= old_max {
182 self.set_size(cursor, grain);
183 self.add_to_free(cursor);
184 cursor -= grain;
185 }
186 }
187
188 fn raise_high_water(&mut self, blocks: i32) {
189 let mut grow_extent = conversions::pages_to_bytes((self.pages_per_block * blocks) as _);
190 assert_ne!(
191 self.high_water, self.limit,
192 "Attempt to grow FreeList beyond limit"
193 );
194 if self.high_water + grow_extent > self.limit {
195 grow_extent = self.high_water - self.limit;
196 }
197 self.mmap(self.high_water, grow_extent);
198 self.high_water += grow_extent;
199 }
200
201 fn mmap(&self, start: Address, bytes: usize) {
202 let res = super::memory::dzmmap_noreplace(
203 start,
204 bytes,
205 self.strategy,
206 &MmapAnnotation::Misc {
207 name: "RawMemoryFreeList",
208 },
209 );
210 assert!(res.is_ok(), "Can't get more space with mmap()");
211 }
212 pub fn get_limit(&self) -> Address {
213 self.limit
214 }
215}
216
217#[cfg(test)]
221impl Drop for RawMemoryFreeList {
222 fn drop(&mut self) {
223 let len = self.high_water - self.base;
224 if len != 0 {
225 unsafe {
226 ::libc::munmap(self.base.as_usize() as _, len);
227 }
228 }
229 }
230}
231
232#[cfg(test)]
243mod tests {
244 use super::FreeList;
245 use super::*;
246 use std::sync::{Mutex, MutexGuard};
247
248 const TOP_SENTINEL: i32 = -1;
249 const FIRST_UNIT: i32 = 0;
250
251 lazy_static! {
252 static ref MUTEX: Mutex<()> = Mutex::new(());
256 }
257
258 fn new_raw_memory_freelist<'a>(
259 list_size: usize,
260 grain: i32,
261 ) -> (MutexGuard<'a, ()>, RawMemoryFreeList, i32, i32, i32) {
262 let guard = match MUTEX.lock() {
270 Ok(guard) => guard,
271 Err(poisoned) => poisoned.into_inner(),
272 };
273 let start = crate::util::test_util::RAW_MEMORY_FREELIST_TEST_REGION.start;
274 let extent = BYTES_IN_PAGE;
275 let pages_per_block = RawMemoryFreeList::default_block_size(list_size as _, 1);
276 assert_eq!(pages_per_block, 1);
277 let mut l = RawMemoryFreeList::new(
278 start,
279 start + extent,
280 pages_per_block,
281 list_size as _,
282 grain,
283 1,
284 MmapStrategy::TEST,
285 );
286 l.grow_freelist(list_size as _);
288 let last_unit = list_size as i32 - grain;
289 let bottom_sentinel = list_size as i32;
290 (guard, l, list_size as _, last_unit, bottom_sentinel)
291 }
292
293 #[test]
294 #[allow(clippy::cognitive_complexity)] fn new_free_list_grain1() {
296 let (_guard, l, _, last_unit, bottom_sentinel) = new_raw_memory_freelist(5, 1);
297 assert_eq!(l.head(), TOP_SENTINEL);
298
299 assert_eq!(l.get_prev(TOP_SENTINEL), last_unit);
300 assert_eq!(l.get_next(TOP_SENTINEL), FIRST_UNIT);
301
302 assert_eq!(l.get_size(FIRST_UNIT), 1);
303 assert_eq!(l.get_left(FIRST_UNIT), -1);
304 assert_eq!(l.get_prev(FIRST_UNIT), -1);
305 assert_eq!(l.get_right(FIRST_UNIT), 1);
306 assert_eq!(l.get_next(FIRST_UNIT), 1);
307 assert!(l.is_free(FIRST_UNIT));
308 assert!(l.is_coalescable(FIRST_UNIT));
309 assert!(!l.is_multi(FIRST_UNIT));
310
311 assert_eq!(l.get_size(1), 1);
312 assert_eq!(l.get_left(1), 0);
313 assert_eq!(l.get_prev(1), 0);
314 assert_eq!(l.get_right(1), 2);
315 assert_eq!(l.get_next(1), 2);
316 assert!(l.is_free(1));
317 assert!(l.is_coalescable(1));
318 assert!(!l.is_multi(1));
319
320 assert_eq!(l.get_size(last_unit), 1);
321 assert_eq!(l.get_left(last_unit), last_unit - 1);
322 assert_eq!(l.get_prev(last_unit), last_unit - 1);
323 assert_eq!(l.get_right(last_unit), bottom_sentinel);
324 assert_eq!(l.get_next(last_unit), -1);
325 assert!(l.is_free(last_unit));
326 assert!(l.is_coalescable(last_unit));
327 assert!(!l.is_multi(last_unit));
328
329 assert_eq!(l.get_prev(bottom_sentinel), bottom_sentinel);
330 assert_eq!(l.get_next(bottom_sentinel), bottom_sentinel);
331 }
332
333 #[test]
334 #[allow(clippy::cognitive_complexity)] fn new_free_list_grain2() {
336 let (_guard, l, _, last_unit, bottom_sentinel) = new_raw_memory_freelist(6, 2);
337 assert_eq!(l.head(), TOP_SENTINEL);
338
339 assert_eq!(l.get_prev(TOP_SENTINEL), last_unit);
340 assert_eq!(l.get_next(TOP_SENTINEL), FIRST_UNIT);
341
342 assert_eq!(l.get_size(FIRST_UNIT), 2);
343 assert_eq!(l.get_left(FIRST_UNIT), -1);
344 assert_eq!(l.get_prev(FIRST_UNIT), -1);
345 assert_eq!(l.get_right(FIRST_UNIT), 2);
346 assert_eq!(l.get_next(FIRST_UNIT), 2);
347 assert!(l.is_free(FIRST_UNIT));
348 assert!(l.is_coalescable(FIRST_UNIT));
349 assert!(l.is_multi(FIRST_UNIT));
350
351 assert_eq!(l.get_size(2), 2);
352 assert_eq!(l.get_left(2), 0);
353 assert_eq!(l.get_prev(2), 0);
354 assert_eq!(l.get_right(2), 4);
355 assert_eq!(l.get_next(2), 4);
356 assert!(l.is_free(2));
357 assert!(l.is_coalescable(2));
358 assert!(l.is_multi(2));
359
360 assert_eq!(l.get_size(last_unit), 2);
361 assert_eq!(l.get_left(last_unit), last_unit - 2);
362 assert_eq!(l.get_prev(last_unit), last_unit - 2);
363 assert_eq!(l.get_right(last_unit), bottom_sentinel);
364 assert_eq!(l.get_next(last_unit), -1);
365 assert!(l.is_free(last_unit));
366 assert!(l.is_coalescable(last_unit));
367 assert!(l.is_multi(last_unit));
368
369 assert_eq!(l.get_prev(bottom_sentinel), bottom_sentinel);
370 assert_eq!(l.get_next(bottom_sentinel), bottom_sentinel);
371 }
372
373 #[test]
374 #[should_panic]
375 fn free_list_access_out_of_bounds() {
376 let (_guard, l, _, _, _) = new_raw_memory_freelist(5, 1);
377 l.get_size(4096);
378 }
380
381 #[test]
382 fn alloc_fit() {
383 let (_guard, mut l, _, last_unit, _) = new_raw_memory_freelist(6, 2);
384 let result = l.alloc(2);
385 assert_eq!(result, 0);
386
387 const NEXT: i32 = 2;
388
389 assert_eq!(l.get_prev(TOP_SENTINEL), last_unit);
390 assert_eq!(l.get_next(TOP_SENTINEL), NEXT);
391
392 assert_eq!(l.get_size(FIRST_UNIT), 2);
393 assert_eq!(l.get_left(FIRST_UNIT), -1);
394 assert_eq!(l.get_prev(FIRST_UNIT), -1);
395 assert_eq!(l.get_right(FIRST_UNIT), 2);
396 assert_eq!(l.get_next(FIRST_UNIT), 2);
397 assert!(!l.is_free(FIRST_UNIT)); assert!(l.is_coalescable(FIRST_UNIT));
399 assert!(l.is_multi(FIRST_UNIT));
400
401 assert_eq!(l.get_size(2), 2);
402 assert_eq!(l.get_left(2), 0);
403 assert_eq!(l.get_prev(2), -1); assert_eq!(l.get_right(2), 4);
405 assert_eq!(l.get_next(2), 4);
406 assert!(l.is_free(2));
407 assert!(l.is_coalescable(2));
408 assert!(l.is_multi(2));
409 }
410
411 #[test]
412 #[allow(clippy::cognitive_complexity)] fn alloc_split() {
414 let (_guard, mut l, _, last_unit, _) = new_raw_memory_freelist(6, 2);
415 let result = l.alloc(1);
416 assert_eq!(result, 0);
417
418 const NEXT: i32 = 1;
419 assert_eq!(l.get_prev(TOP_SENTINEL), last_unit);
420 assert_eq!(l.get_next(TOP_SENTINEL), NEXT);
421
422 assert_eq!(l.get_size(FIRST_UNIT), 1);
423 assert_eq!(l.get_left(FIRST_UNIT), -1);
424 assert_eq!(l.get_prev(FIRST_UNIT), 1); assert_eq!(l.get_right(FIRST_UNIT), 1); assert_eq!(l.get_next(FIRST_UNIT), 2);
427 assert!(!l.is_free(FIRST_UNIT)); assert!(l.is_coalescable(FIRST_UNIT));
429 assert!(!l.is_multi(FIRST_UNIT)); assert_eq!(l.get_size(1), 1);
432 assert_eq!(l.get_left(1), 0); assert_eq!(l.get_prev(1), -1); assert_eq!(l.get_right(1), 2);
435 assert_eq!(l.get_next(1), 2);
436 assert!(l.is_free(1)); assert!(l.is_coalescable(1));
438 assert!(!l.is_multi(1)); assert_eq!(l.get_size(2), 2);
441 assert_eq!(l.get_left(2), 1);
442 assert_eq!(l.get_prev(2), 1); assert_eq!(l.get_right(2), 4);
444 assert_eq!(l.get_next(2), 4);
445 assert!(l.is_free(2));
446 assert!(l.is_coalescable(2));
447 assert!(l.is_multi(2));
448 }
449
450 #[test]
451 fn alloc_split_twice() {
452 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
453 let res1 = l.alloc(1);
455 assert_eq!(res1, 0);
456 let res2 = l.alloc(1);
458 assert_eq!(res2, 1);
459
460 assert_eq!(l.get_prev(2), -1);
462 }
463
464 #[test]
465 fn alloc_skip() {
466 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
467 let res1 = l.alloc(1);
469 assert_eq!(res1, 0);
470 let res2 = l.alloc(2);
472 assert_eq!(res2, 2);
473
474 assert!(l.is_free(1));
476 assert_eq!(l.get_next(1), 4);
477 assert_eq!(l.get_prev(4), 1);
478 }
479
480 #[test]
481 fn alloc_exhaust() {
482 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
483 let res1 = l.alloc(2);
484 assert_eq!(res1, 0);
485 let res2 = l.alloc(2);
486 assert_eq!(res2, 2);
487 let res3 = l.alloc(2);
488 assert_eq!(res3, 4);
489 let res4 = l.alloc(2);
490 assert_eq!(res4, FAILURE);
491 }
492
493 #[test]
494 fn free_unit() {
495 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
496 let res1 = l.alloc(2);
497 assert_eq!(res1, 0);
498 let res2 = l.alloc(2);
499 assert_eq!(res2, 2);
500
501 assert_eq!(l.get_prev(4), -1);
503
504 let freed = l.free(res2, false);
506 assert_eq!(freed, res2);
507 assert!(l.is_free(res2));
508 }
509
510 #[test]
511 fn free_coalesce() {
512 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
513 let res1 = l.alloc(2);
514 assert_eq!(res1, 0);
515 let res2 = l.alloc(2);
516 assert_eq!(res2, 2);
517
518 let coalesced_size = l.free(res2, true);
520 assert_eq!(coalesced_size, 4);
521 }
522
523 #[test]
524 fn free_cant_coalesce() {
525 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
526 let res1 = l.alloc(2);
527 assert_eq!(res1, 0);
528 let res2 = l.alloc(2);
529 assert_eq!(res2, 2);
530 let res3 = l.alloc(1);
531 assert_eq!(res3, 4);
532
533 let coalesced_size = l.free(res2, true);
535 assert_eq!(coalesced_size, 2);
536 }
537
538 #[test]
539 fn free_realloc() {
540 let (_guard, mut l, _, _, _) = new_raw_memory_freelist(6, 2);
541 let res1 = l.alloc(2);
542 assert_eq!(res1, 0);
543 let res2 = l.alloc(2);
544 assert_eq!(res2, 2);
545
546 assert_eq!(l.get_prev(4), -1);
548
549 let freed = l.free(res2, false);
551 assert_eq!(freed, res2);
552 assert!(l.is_free(res2));
553
554 let res3 = l.alloc(2);
556 assert_eq!(res3, 2);
557 assert!(!l.is_free(res3));
558
559 let res4 = l.alloc(1);
560 assert_eq!(res4, 4);
561 }
562}