1use alloc::vec::Vec;
2use core::cell::{Cell, UnsafeCell};
3use core::mem::size_of;
4use core::ops::Range;
5use core::ptr::null_mut;
6
7
8use crate::util::{
9 align_down, align_up, bit_vector_clear, bit_vector_fill, bit_vector_get_bit,
10 bit_vector_index_of, bit_vector_set_bit,
11};
12use crate::virtual_memory::{
13 self, alloc, alloc_dual_mapping, protect_jit_memory, release, release_dual_mapping,
14 DualMapping, MemoryFlags, ProtectJitAccess, flush_instruction_cache,
15};
16use crate::Error;
17
18#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
19#[repr(u32)]
20pub enum ResetPolicy {
22 Soft = 0,
24
25 Hard = 1,
27}
28#[derive(Clone, Copy, Debug, PartialEq, Eq)]
29pub struct JitAllocatorOptions {
30 pub use_dual_mapping: bool,
41 pub use_multiple_pools: bool,
49 pub fill_unused_memory: bool,
54 pub immediate_release: bool,
60 pub custom_fill_pattern: Option<u32>,
61
62 pub block_size: u32,
63 pub granularity: u32,
64}
65
66impl Default for JitAllocatorOptions {
67 fn default() -> Self {
68 Self {
69 use_dual_mapping: true,
70 use_multiple_pools: true,
71 fill_unused_memory: true,
72 immediate_release: false,
73 custom_fill_pattern: None,
74 block_size: 0,
75 granularity: 0,
76 }
77 }
78}
79
80#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
81const DEFAULT_FILL_PATTERN: u32 = 0xCCCCCCCC; #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
83const DEFAULT_FILL_PATTERN: u32 = 0x0; const MULTI_POOL_COUNT: usize = 3;
91
92const MIN_GRANULARITY: usize = 64;
94
95const MAX_BLOCK_SIZE: usize = 32 * 1024 * 1024;
97
98struct BitVectorRangeIterator<'a, const B: u32> {
99 slice: &'a [u32],
100 idx: usize,
101 end: usize,
102 bit_word: u32,
103}
104
105const BIT_WORD_SIZE: usize = core::mem::size_of::<u32>() * 8;
106
107impl<'a, const B: u32> BitVectorRangeIterator<'a, B> {
108 const XOR_MASK: u32 = if B == 0 { u32::MAX } else { 0 };
109
110 fn from_slice_and_nbitwords(data: &'a [u32], num_bit_words: usize) -> Self {
111 Self::new(data, num_bit_words, 0, num_bit_words * BIT_WORD_SIZE)
112 }
113
114 fn new(data: &'a [u32], _num_bit_words: usize, start: usize, end: usize) -> Self {
115 let idx = align_down(start, BIT_WORD_SIZE);
116 let slice = &data[idx / BIT_WORD_SIZE..];
117
118 let mut bit_word = 0;
119
120 if idx < end {
121 bit_word =
122 (slice[0] ^ Self::XOR_MASK) & (u32::MAX << (start as u32 % BIT_WORD_SIZE as u32));
123 }
124
125 Self {
126 slice,
127 idx,
128 end,
129 bit_word,
130 }
131 }
132
133 fn next_range(&mut self, range_hint: u32) -> Option<Range<u32>> {
134 while self.bit_word == 0 {
135 self.idx += BIT_WORD_SIZE;
136
137 if self.idx >= self.end {
138 return None;
139 }
140
141 self.slice = &self.slice[1..];
142 self.bit_word = self.slice[0] ^ Self::XOR_MASK;
143 }
144
145 let i = self.bit_word.trailing_zeros();
146 let start = self.idx as u32 + i;
147 self.bit_word = !(self.bit_word ^ !(u32::MAX << i));
148 let mut end;
149 if self.bit_word == 0 {
150 end = (self.idx as u32 + BIT_WORD_SIZE as u32).min(self.end as _);
151
152 while end.wrapping_sub(start) < range_hint {
153 self.idx += BIT_WORD_SIZE;
154
155 if self.idx >= self.end {
156 break;
157 }
158
159 self.slice = &self.slice[1..];
160 self.bit_word = self.slice[0] ^ Self::XOR_MASK;
161
162 if self.bit_word != u32::MAX {
163 let j = self.bit_word.trailing_zeros();
164 end = (self.idx as u32 + j).min(self.end as _);
165 self.bit_word = !(self.bit_word ^ !(u32::MAX << j));
166 break;
167 }
168
169 end = (self.idx as u32 + BIT_WORD_SIZE as u32).min(self.end as _);
170 self.bit_word = 0;
171 continue;
172 }
173
174 Some(start..end)
175 } else {
176 let j = self.bit_word.trailing_zeros();
177 end = (self.idx as u32 + j).min(self.end as _);
178
179 self.bit_word = !(self.bit_word ^ !(u32::MAX << j));
180
181 Some(start..end)
182 }
183 }
184}
185
186impl<'a> Iterator for BitVectorRangeIterator<'a, 0> {
187 type Item = Range<u32>;
188
189 fn next(&mut self) -> Option<Self::Item> {
190 self.next_range(u32::MAX)
191 }
192}
193
194use intrusive_collections::{intrusive_adapter, rbtree::*};
195use intrusive_collections::{KeyAdapter, UnsafeRef};
196
197struct JitAllocatorBlock {
198 node: Link,
199 list_node: intrusive_collections::LinkedListLink,
200
201 pool: *mut JitAllocatorPool,
203 mapping: DualMapping,
206 block_size: usize,
208
209 flags: Cell<u32>,
210 area_size: Cell<u32>,
211 area_used: Cell<u32>,
212 largest_unused_area: Cell<u32>,
213 search_start: Cell<u32>,
214 search_end: Cell<u32>,
215
216 used_bitvector: UnsafeCell<alloc::vec::Vec<u32>>,
217 stop_bitvector: UnsafeCell<alloc::vec::Vec<u32>>,
218}
219
220impl JitAllocatorBlock {
221 const FLAG_EMPTY: u32 = 0x00000001;
222 const FLAG_DIRTY: u32 = 0x00000002;
223 const FLAG_DUAL_MAPPED: u32 = 0x00000004;
224
225 fn pool(&self) -> *mut JitAllocatorPool {
226 self.pool
227 }
228
229 fn rx_ptr(&self) -> *const u8 {
230 self.mapping.rx
231 }
232
233 fn rw_ptr(&self) -> *mut u8 {
234 self.mapping.rw
235 }
236
237 fn flags(&self) -> u32 {
238 self.flags.get()
239 }
240
241 fn add_flags(&self, flags: u32) {
242 self.flags.set(self.flags() | flags);
243 }
244
245 fn clear_flags(&self, flags: u32) {
246 self.flags.set(self.flags() & !flags);
247 }
248
249 fn is_dirty(&self) -> bool {
250 (self.flags() & Self::FLAG_DIRTY) != 0
251 }
252
253 fn block_size(&self) -> usize {
254 self.block_size
255 }
256
257 fn area_used(&self) -> u32 {
258 self.area_used.get()
259 }
260
261 fn area_size(&self) -> u32 {
262 self.area_size.get()
263 }
264
265 fn largest_unused_area(&self) -> u32 {
266 self.largest_unused_area.get()
267 }
268
269 fn search_start(&self) -> u32 {
270 self.search_start.get()
271 }
272
273 fn search_end(&self) -> u32 {
274 self.search_end.get()
275 }
276
277 fn used_bitvector(&self) -> &alloc::vec::Vec<u32> {
278 unsafe { &*self.used_bitvector.get() }
279 }
280
281 fn stop_bitvector(&self) -> &alloc::vec::Vec<u32> {
282 unsafe { &*self.stop_bitvector.get() }
283 }
284
285 fn used_bitvector_mut(&self) -> &mut alloc::vec::Vec<u32> {
286 unsafe { &mut *self.used_bitvector.get() }
287 }
288
289 fn stop_bitvector_mut(&self) -> &mut alloc::vec::Vec<u32> {
290 unsafe { &mut *self.stop_bitvector.get() }
291 }
292
293 fn area_available(&self) -> u32 {
294 self.area_size() - self.area_used()
295 }
296
297 fn mark_allocated_area(&self, allocated_area_start: u32, allocated_area_end: u32) {
298 let allocated_area_size = allocated_area_end - allocated_area_start;
299
300 bit_vector_fill(
301 self.used_bitvector_mut(),
302 allocated_area_start as _,
303 allocated_area_size as _,
304 );
305 bit_vector_set_bit(
306 self.stop_bitvector_mut(),
307 allocated_area_end as usize - 1,
308 true,
309 );
310
311 unsafe {
313 (*self.pool).total_area_used += allocated_area_size as usize;
314 }
315
316 self.area_used
317 .set(self.area_used() + allocated_area_size as u32);
318
319 if self.area_available() == 0 {
320 self.search_start.set(self.area_size());
321 self.search_end.set(0);
322 self.largest_unused_area.set(0);
323 self.clear_flags(Self::FLAG_DIRTY);
324 } else {
325 if self.search_start.get() == allocated_area_start {
326 self.search_start.set(allocated_area_end as _);
327 }
328
329 if self.search_end.get() == allocated_area_end {
330 self.search_end.set(allocated_area_start as _);
331 }
332
333 self.add_flags(Self::FLAG_DIRTY);
334 }
335 }
336 fn mark_released_area(&self, released_area_start: u32, released_area_end: u32) {
337 let released_area_size = released_area_end - released_area_start;
338
339 unsafe {
341 (*self.pool).total_area_used -= released_area_size as usize;
342 }
343
344 self.area_used
345 .set(self.area_used() - released_area_size as u32);
346 self.search_start
347 .set(self.search_start.get().min(released_area_start));
348 self.search_end
349 .set(self.search_end.get().max(released_area_end));
350
351 bit_vector_clear(
352 self.used_bitvector_mut(),
353 released_area_start as _,
354 released_area_size as _,
355 );
356 bit_vector_set_bit(
357 self.stop_bitvector_mut(),
358 released_area_end as usize - 1,
359 false,
360 );
361
362 if self.area_used() == 0 {
363 self.search_start.set(0);
364 self.search_end.set(self.area_size());
365 self.largest_unused_area.set(self.area_size());
366 self.add_flags(Self::FLAG_EMPTY);
367 self.clear_flags(Self::FLAG_DIRTY);
368 } else {
369 self.add_flags(Self::FLAG_DIRTY);
370 }
371 }
372
373 fn mark_shrunk_area(&self, shrunk_area_start: u32, shrunk_area_end: u32) {
374 let shrunk_area_size = shrunk_area_end - shrunk_area_start;
375
376 assert!(shrunk_area_start != 0);
379 assert!(shrunk_area_end != self.area_size());
380
381 unsafe {
383 (*self.pool).total_area_used -= shrunk_area_size as usize;
384 }
385
386 self.area_used.set(self.area_used() - shrunk_area_size);
387 self.search_start
388 .set(self.search_start.get().min(shrunk_area_start));
389 self.search_end
390 .set(self.search_end.get().max(shrunk_area_end));
391
392 bit_vector_clear(
393 &mut self.used_bitvector_mut(),
394 shrunk_area_start as _,
395 shrunk_area_size as _,
396 );
397 bit_vector_set_bit(
398 &mut self.stop_bitvector_mut(),
399 shrunk_area_end as usize - 1,
400 false,
401 );
402 bit_vector_set_bit(
403 &mut self.stop_bitvector_mut(),
404 shrunk_area_start as usize - 1,
405 true,
406 );
407
408 self.add_flags(Self::FLAG_DIRTY);
409 }
410}
411
412impl PartialOrd for JitAllocatorBlock {
413 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
414 self.rx_ptr().partial_cmp(&other.rx_ptr())
415 }
416}
417
418impl Ord for JitAllocatorBlock {
419 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
420 self.rx_ptr().cmp(&other.rx_ptr())
421 }
422}
423
424impl PartialEq for JitAllocatorBlock {
425 fn eq(&self, other: &Self) -> bool {
426 self.rx_ptr() == other.rx_ptr()
427 }
428}
429
430impl Eq for JitAllocatorBlock {}
431use intrusive_collections::linked_list::LinkedList;
432intrusive_adapter!(JitAllocatorBlockAdapter = UnsafeRef<JitAllocatorBlock> : JitAllocatorBlock { node: Link });
433intrusive_adapter!(BlockListAdapter = UnsafeRef<JitAllocatorBlock> : JitAllocatorBlock { list_node: intrusive_collections::LinkedListLink });
434
435struct BlockKey {
436 rxptr: *const u8,
437 block_size: u32,
438}
439
440impl PartialEq for BlockKey {
441 fn eq(&self, other: &Self) -> bool {
442 self.rxptr == other.rxptr
443 }
444}
445
446impl Eq for BlockKey {}
447
448impl PartialOrd for BlockKey {
449 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
450 Some(self.cmp(other))
451 }
452}
453
454impl Ord for BlockKey {
455 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
456 let addr_off = other.rxptr as usize + other.block_size as usize;
457
458 if addr_off <= self.rxptr as usize {
459 return core::cmp::Ordering::Less;
460 } else if other.rxptr > self.rxptr {
461 return core::cmp::Ordering::Greater;
462 } else {
463 return core::cmp::Ordering::Equal;
464 }
465 }
466}
467
468impl<'a> KeyAdapter<'a> for JitAllocatorBlockAdapter {
469 type Key = BlockKey;
470
471 fn get_key(
472 &self,
473 value: &'a <Self::PointerOps as intrusive_collections::PointerOps>::Value,
474 ) -> Self::Key {
475 BlockKey {
476 rxptr: value.rx_ptr(),
477 block_size: value.block_size as _,
478 }
479 }
480}
481
482struct JitAllocatorPool {
483 blocks: LinkedList<BlockListAdapter>,
484 cursor: *mut JitAllocatorBlock,
485
486 block_count: u32,
487 granularity: u16,
488 granularity_log2: u8,
489 empty_block_count: u8,
490 total_area_size: usize,
491 total_area_used: usize,
492 total_overhead_bytes: usize,
493}
494
495impl JitAllocatorPool {
496 fn new(granularity: u32) -> Self {
497 let granularity_log2 = granularity.trailing_zeros() as u8;
498 let granularity = granularity as u16;
499
500 Self {
501 blocks: LinkedList::new(BlockListAdapter::new()),
502 cursor: core::ptr::null_mut(),
503 block_count: 0,
504 granularity,
505 granularity_log2,
506 empty_block_count: 0,
507 total_area_size: 0,
508 total_area_used: 0,
509 total_overhead_bytes: 0,
510 }
511 }
512
513 fn reset(&mut self) {
514 self.blocks.clear();
515 self.cursor = core::ptr::null_mut();
516 self.block_count = 0;
517 self.total_area_size = 0;
518 self.total_area_used = 0;
519 self.total_overhead_bytes = 0;
520 }
521
522 fn byte_size_from_area_size(&self, area_size: u32) -> usize {
523 area_size as usize * self.granularity as usize
524 }
525
526 fn area_size_from_byte_size(&self, byte_size: usize) -> u32 {
527 ((byte_size + self.granularity as usize - 1) >> self.granularity_log2) as u32
528 }
529
530 fn bit_word_count_from_area_size(&self, area_size: u32) -> usize {
531 align_up(area_size as _, 32) / 32
532 }
533}
534use alloc::boxed::Box;
535
536pub struct JitAllocator {
553 options: JitAllocatorOptions,
554 block_size: usize,
555 granulariy: usize,
556 fill_pattern: u32,
557
558 allocation_count: usize,
559 tree: RBTree<JitAllocatorBlockAdapter>,
560 pools: Box<[*mut JitAllocatorPool]>,
561}
562
563impl JitAllocator {
564 pub fn new(params: JitAllocatorOptions) -> Box<Self> {
566 let vm_info = virtual_memory::info();
567
568 let mut block_size = params.block_size;
569 let mut granularity = params.block_size;
570
571 let mut pool_count = 1;
572
573 if params.use_multiple_pools && false {
574 pool_count = MULTI_POOL_COUNT;
575 }
576
577 if block_size < 64 * 1024 || block_size > 256 * 1024 * 1024 || !block_size.is_power_of_two()
578 {
579 block_size = vm_info.page_granularity as _;
580 }
581
582 if granularity < 64 || granularity > 256 || !granularity.is_power_of_two() {
583 granularity = MIN_GRANULARITY as _;
584 }
585
586 let fill_pattern = params.custom_fill_pattern.unwrap_or(DEFAULT_FILL_PATTERN);
587
588 let mut pools = Vec::with_capacity(pool_count);
589
590 for _ in 0..pool_count {
591 pools.push(Box::into_raw(Box::new(JitAllocatorPool::new(granularity))));
592 }
593
594 let allocator = Box::new(Self {
595 options: params,
596 block_size: block_size as _,
597 granulariy: granularity as _,
598 fill_pattern,
599 allocation_count: 0,
600 tree: RBTree::new(JitAllocatorBlockAdapter::new()),
601 pools: pools.into_boxed_slice(),
602 });
603
604 allocator
605 }
606
607 fn size_to_pool_id(&self, size: usize) -> usize {
608 let mut pool_id = self.pools.len() - 1;
609 let mut granularity = self.granulariy << pool_id;
610
611 while pool_id != 0 {
612 if align_up(size, granularity) == size {
613 break;
614 }
615
616 pool_id -= 1;
617 granularity >>= 1;
618 }
619
620 pool_id
621 }
622
623 fn bitvector_size_to_byte_size(area_size: u32) -> usize {
624 ((area_size as usize + 32 - 1) / 32) * size_of::<u32>()
625 }
626
627 fn calculate_ideal_block_size(
628 &self,
629 pool: *mut JitAllocatorPool,
630 allocation_size: usize,
631 ) -> usize {
632 unsafe {
633 let last = (*pool).blocks.back();
634
635 let mut block_size = if !last.is_null() {
636 last.get().unwrap().block_size()
637 } else {
638 self.block_size
639 };
640
641 if block_size < MAX_BLOCK_SIZE {
642 block_size *= 2;
643 }
644
645 if allocation_size > block_size {
646 block_size = align_up(allocation_size, block_size);
647
648 if block_size < allocation_size {
650 return 0;
651 }
652 }
653
654 block_size
655 }
656 }
657
658 unsafe fn new_block(
659 &mut self,
660 pool: *mut JitAllocatorPool,
661 block_size: usize,
662 ) -> Result<Box<JitAllocatorBlock>, Error> {
663 let area_size =
664 (block_size + (*pool).granularity as usize - 1) >> (*pool).granularity_log2;
665 let num_bit_words = (area_size + 32 - 1) / 32;
666
667 let mut block = Box::new(JitAllocatorBlock {
668 node: Link::new(),
669 list_node: intrusive_collections::LinkedListLink::new(),
670 pool,
671 mapping: DualMapping {
672 rx: null_mut(),
673 rw: null_mut(),
674 },
675 block_size: block_size as _,
676 flags: Cell::new(0),
677 area_size: Cell::new(0),
678 area_used: Cell::new(0),
679 largest_unused_area: Cell::new(area_size as _),
680 search_end: Cell::new(area_size as _),
681 search_start: Cell::new(0),
682 used_bitvector: UnsafeCell::new({
683 let mut v = Vec::with_capacity(num_bit_words);
684 v.resize(num_bit_words * size_of::<u32>(), 0);
685 v
686 }),
687 stop_bitvector: UnsafeCell::new({
688 let mut v = Vec::with_capacity(num_bit_words);
689 v.resize(num_bit_words * size_of::<u32>(), 0);
690 v
691 }),
692 });
693 let mut block_flags = 0;
694 let virt_mem = if self.options.use_dual_mapping {
695 block_flags |= JitAllocatorBlock::FLAG_DUAL_MAPPED;
696 alloc_dual_mapping(block_size, MemoryFlags::ACCESS_RWX.into())?
697 } else {
698 let rx = alloc(block_size, MemoryFlags::ACCESS_RWX.into())?;
699 DualMapping { rx, rw: rx }
700 };
701
702 if self.options.fill_unused_memory {
703 protect_jit_memory(ProtectJitAccess::ReadWrite);
704 fill_pattern(virt_mem.rw, self.fill_pattern, block_size);
705 protect_jit_memory(ProtectJitAccess::ReadExecute);
706 flush_instruction_cache(virt_mem.rx, block_size);
707 }
708
709 block.area_size.set(area_size as _);
710 block.mapping = virt_mem;
711 block.flags.set(block_flags);
712 Ok(block)
713 }
714
715 unsafe fn delete_block(&mut self, block: *mut JitAllocatorBlock) {
716 let mut block = Box::from_raw(block);
717 if (block.flags() & JitAllocatorBlock::FLAG_DUAL_MAPPED) != 0 {
718 let _ = release_dual_mapping(&mut block.mapping, block.block_size);
719 } else {
720 let _ = release(block.mapping.rx as _, block.block_size);
721 }
722
723 drop(block);
724 }
725
726 unsafe fn insert_block(&mut self, block: *mut JitAllocatorBlock) {
727 let b = &mut *block;
728 let pool = &mut *b.pool();
729
730 if pool.cursor.is_null() {
731 pool.cursor = block;
732 }
733
734 self.tree.insert(UnsafeRef::from_raw(block));
735 pool.blocks.push_front(UnsafeRef::from_raw(block));
736
737 pool.block_count += 1;
738 pool.total_area_size += b.area_size() as usize;
739
740 pool.total_overhead_bytes +=
741 size_of::<JitAllocatorBlock>() + Self::bitvector_size_to_byte_size(b.area_size()) * 2;
742 }
743
744 unsafe fn remove_block(
745 &mut self,
746 block: &mut intrusive_collections::linked_list::CursorMut<'_, BlockListAdapter>,
747 ) -> *mut JitAllocatorBlock {
748 let b = block.get().unwrap();
749 let pool = &mut *b.pool();
750
751 if pool.cursor == b as *const JitAllocatorBlock as *mut _ {
752 pool.cursor = if let Some(block) = block.peek_prev().get() {
753 block as *const _ as *mut _
754 } else if let Some(block) = block.peek_next().get() {
755 block as *const _ as *mut _
756 } else {
757 null_mut()
758 };
759 }
760
761 match self.tree.entry(&BlockKey {
762 rxptr: b.rx_ptr(),
763 block_size: b.block_size as _,
764 }) {
765 Entry::Occupied(mut c) => {
766 assert_eq!(
767 UnsafeRef::into_raw(c.remove().unwrap()),
768 b as *const _ as *mut JitAllocatorBlock,
769 "blocks are not the same"
770 );
771 }
772
773 _ => (),
774 }
775 let area_size = b.area_size();
776
777 pool.block_count -= 1;
778 pool.total_area_size -= area_size as usize;
779
780 pool.total_overhead_bytes -=
781 size_of::<JitAllocatorBlock>() + Self::bitvector_size_to_byte_size(area_size) * 2;
782
783 UnsafeRef::into_raw(block.remove().unwrap())
784 }
785
786 unsafe fn wipe_out_block(
787 &mut self,
788 block: &mut intrusive_collections::linked_list::CursorMut<'_, BlockListAdapter>,
789 ) {
790 let b = block.get().unwrap();
791 if (b.flags() & JitAllocatorBlock::FLAG_EMPTY) != 0 {
792 return;
793 }
794
795 let pool = &mut *b.pool();
796
797 let area_size = b.area_size();
798 let granularity = pool.granularity;
799
800 virtual_memory::protect_jit_memory(ProtectJitAccess::ReadWrite);
801
802 if self.options.fill_unused_memory {
803 let rw_ptr = b.rw_ptr();
804
805 let it = BitVectorRangeIterator::from_slice_and_nbitwords(
806 &b.stop_bitvector(),
807 pool.bit_word_count_from_area_size(b.area_size()),
808 );
809
810 for range in it {
811 let span_ptr = rw_ptr.add(range.start as usize * granularity as usize);
812 let span_size = (range.end as usize - range.start as usize) * granularity as usize;
813
814 let mut n = 0;
815 while n < span_size {
816 *span_ptr.add(n).cast::<u32>() = self.fill_pattern;
817 n += size_of::<u32>();
818 }
819
820 virtual_memory::flush_instruction_cache(span_ptr, span_size as usize);
821 }
822 }
823
824 virtual_memory::protect_jit_memory(ProtectJitAccess::ReadExecute);
825
826 let b = &mut *UnsafeRef::into_raw(block.remove().unwrap());
827 b.used_bitvector_mut().fill(0);
828 b.stop_bitvector_mut().fill(0);
829
830 b.area_used.set(0);
831 b.largest_unused_area.set(area_size);
832 b.search_start.set(0);
833 b.search_end.set(area_size);
834 b.add_flags(JitAllocatorBlock::FLAG_EMPTY);
835 b.clear_flags(JitAllocatorBlock::FLAG_DIRTY);
836 }
837
838 pub unsafe fn reset(&mut self, reset_policy: ResetPolicy) {
842 self.tree.clear();
843
844 let pool_count = self.pools.len();
845
846 for pool_id in 0..pool_count {
847 let pool = unsafe { &mut *self.pools[pool_id] };
848
849 let mut cursor = pool.blocks.cursor_mut();
850 cursor.move_next();
851 let mut block_to_keep = false;
852 if reset_policy != ResetPolicy::Hard && !self.options.immediate_release {
853 block_to_keep = true;
854 cursor.move_next();
855 }
856 unsafe {
857 while !cursor.is_null() {
858 let block = UnsafeRef::into_raw(cursor.remove().unwrap());
859 self.delete_block(block);
860 cursor.move_next();
861 }
862
863 pool.reset();
864
865 if block_to_keep {
866 let mut front = pool.blocks.cursor_mut();
867 front.move_next();
868 self.wipe_out_block(&mut front);
869 pool.empty_block_count = 1;
870 }
871 }
872 }
873 }
874
875 pub fn alloc(&mut self, size: usize) -> Result<(*const u8, *mut u8), Error> {
879 const NO_INDEX: u32 = u32::MAX;
880
881 let size = align_up(size, self.granulariy);
882
883 if size == 0 {
884 return Err(Error::InvalidArgument);
885 }
886
887 if size > u32::MAX as usize / 2 {
888 return Err(Error::TooLarge);
889 }
890
891 unsafe {
892 let pool_id = self.size_to_pool_id(size);
893 let pool = &mut *self.pools[pool_id];
894
895 let mut area_index = NO_INDEX;
896 let area_size = pool.area_size_from_byte_size(size);
897
898 let mut block = pool.blocks.cursor();
899 block.move_next();
900 if let Some(initial) = block.get().map(|x| x as *const JitAllocatorBlock) {
901 loop {
902 let b = block.get().unwrap();
903
904 if b.area_available() >= area_size {
905 if b.is_dirty() || b.largest_unused_area() >= area_size {
906 let mut it = BitVectorRangeIterator::<0>::new(
907 b.used_bitvector(),
908 pool.bit_word_count_from_area_size(b.area_size()),
909 b.search_start() as _,
910 b.search_end() as _,
911 );
912
913 let mut range_start;
914 let mut range_end = b.area_size() as usize;
915
916 let mut search_start = usize::MAX;
917 let mut largest_area = 0;
918
919 while let Some(range) = it.next_range(area_size as _) {
920 range_start = range.start as _;
921 range_end = range.end as _;
922
923 let range_size = range_end - range_start;
924
925 if range_size >= area_size as usize {
926 area_index = range_start as _;
927 break;
928 }
929
930 search_start = search_start.min(range_start);
931 largest_area = largest_area.max(range_size);
932 }
933
934 if area_index != NO_INDEX {
935 break;
936 }
937
938 if search_start != usize::MAX {
939 let search_end = range_end;
940
941 b.search_start.set(search_start as _);
942
943 b.search_end.set(search_end as _);
944 b.largest_unused_area.set(largest_area as _);
945 b.clear_flags(JitAllocatorBlock::FLAG_DIRTY);
946 }
947 }
948 }
949
950 block.move_next();
951
952 if block.get().map(|x| x as *const _) == Some(initial) {
953 break;
954 }
955
956 if block.is_null() {
957 break;
958 }
959 }
960 }
961
962 let mut block = block.get();
963
964 if area_index == NO_INDEX {
965 let block_size = self.calculate_ideal_block_size(pool, size);
966
967 {
968 let nblock = self.new_block(pool, block_size)?;
969
970 area_index = 0;
971
972 nblock.search_start.set(area_size as _);
973
974 nblock
975 .largest_unused_area
976 .set(nblock.area_size() - area_size);
977
978 let nblock = Box::into_raw(nblock);
979
980 self.insert_block(nblock);
981
982 block = Some(&*nblock);
983 }
984 } else if (block.unwrap().flags() & JitAllocatorBlock::FLAG_EMPTY) != 0 {
985 pool.empty_block_count -= 1;
986 block.unwrap().clear_flags(JitAllocatorBlock::FLAG_EMPTY);
987 }
988
989 self.allocation_count += 1;
990
991 let block = block.unwrap();
992
993 block.mark_allocated_area(area_index, area_index + area_size);
994
995 let offset = pool.byte_size_from_area_size(area_index);
996
997 Ok((block.rx_ptr().add(offset), block.rw_ptr().add(offset)))
998 }
999 }
1000
1001 pub unsafe fn release(&mut self, rx_ptr: *const u8) -> Result<(), Error> {
1009 if rx_ptr.is_null() {
1010 return Err(Error::InvalidArgument);
1011 }
1012
1013 let block = self.tree.find(&BlockKey {
1014 rxptr: rx_ptr,
1015 block_size: 0,
1016 });
1017
1018 let Some(block) = block.get() else {
1019 return Err(Error::InvalidState)
1020 };
1021
1022 unsafe {
1023 let pool = &mut *block.pool;
1024
1025 let offset = rx_ptr as usize - block.rx_ptr() as usize;
1026
1027 let area_index = (offset >> pool.granularity_log2 as usize) as u32;
1028 let area_end =
1029 bit_vector_index_of(&block.stop_bitvector(), area_index as _, true) as u32 + 1;
1030 let area_size = area_end - area_index;
1031
1032 self.allocation_count -= 1;
1033
1034 block.mark_released_area(area_index, area_end);
1035
1036 if self.options.fill_unused_memory {
1037 let span_ptr = block
1038 .rw_ptr()
1039 .add(area_index as usize * pool.granularity as usize);
1040 let span_size = area_size as usize * pool.granularity as usize;
1041
1042 protect_jit_memory(ProtectJitAccess::ReadWrite);
1043 fill_pattern(span_ptr, self.fill_pattern, span_size);
1044 protect_jit_memory(ProtectJitAccess::ReadExecute);
1045 flush_instruction_cache(span_ptr, span_size);
1046 }
1047
1048 if block.area_used() == 0 {
1049 if pool.empty_block_count != 0 || self.options.immediate_release {
1050 let mut cursor = pool.blocks.cursor_mut_from_ptr(block);
1051 let block = self.remove_block(&mut cursor);
1052
1053 self.delete_block(block);
1054 } else {
1055 pool.empty_block_count += 1;
1056 }
1057 }
1058 }
1059
1060 Ok(())
1061 }
1062 pub unsafe fn shrink(&mut self, rx_ptr: *const u8, new_size: usize) -> Result<(), Error> {
1068 if rx_ptr.is_null() {
1069
1070 return Err(Error::InvalidArgument);
1071 }
1072
1073 if new_size == 0 {
1074 return self.release(rx_ptr);
1075 }
1076
1077 let Some(block) = self.tree.find(&BlockKey {
1078 rxptr: rx_ptr,
1079 block_size: 0,
1080 }).get() else {
1081
1082 return Err(Error::InvalidArgument);
1083 };
1084
1085 unsafe {
1086 let pool = &mut *block.pool;
1087 let offset = rx_ptr as usize - block.rx_ptr() as usize;
1088 let area_start = (offset >> pool.granularity_log2 as usize) as u32;
1089
1090 let is_used = bit_vector_get_bit(block.used_bitvector(), area_start as _);
1091
1092 if !is_used {
1093 return Err(Error::InvalidArgument);
1094 }
1095
1096 let area_end =
1097 bit_vector_index_of(&block.stop_bitvector(), area_start as _, true) as u32 + 1;
1098
1099 let area_prev_size = area_end - area_start;
1100 let area_shrunk_size = pool.area_size_from_byte_size(new_size);
1101
1102 if area_shrunk_size > area_prev_size {
1103 return Err(Error::InvalidState);
1104 }
1105
1106 let area_diff = area_prev_size - area_shrunk_size;
1107
1108 if area_diff != 0 {
1109 block.mark_shrunk_area(area_start + area_shrunk_size, area_end);
1110
1111 if self.options.fill_unused_memory {
1112 let span_ptr = block
1113 .rw_ptr()
1114 .add(area_start as usize * pool.granularity as usize);
1115 let span_size = area_diff as usize * pool.granularity as usize;
1116
1117 protect_jit_memory(ProtectJitAccess::ReadWrite);
1118 fill_pattern(span_ptr, self.fill_pattern, span_size);
1119 protect_jit_memory(ProtectJitAccess::ReadExecute);
1120 flush_instruction_cache(span_ptr, span_size);
1121 }
1122 }
1123 }
1124
1125 Ok(())
1126 }
1127
1128 pub fn query(&mut self, rx_ptr: *const u8) -> Result<(*const u8, *mut u8, usize), Error> {
1131 let Some(block) = self.tree.find(&BlockKey {
1132 rxptr: rx_ptr,
1133 block_size: 0,
1134 }).get() else {
1135 return Err(Error::InvalidArgument);
1136 };
1137
1138 unsafe {
1139 let pool = &mut *block.pool;
1140 let offset = rx_ptr as usize - block.rx_ptr() as usize;
1141
1142 let area_start = (offset >> pool.granularity_log2 as usize) as u32;
1143
1144 let is_used = bit_vector_get_bit(block.used_bitvector(), area_start as _);
1145
1146 if !is_used {
1147 return Err(Error::InvalidArgument);
1148 }
1149
1150 let area_end = bit_vector_index_of(&block.stop_bitvector(), area_start as _, true) as u32 + 1;
1151 let byte_offset = pool.byte_size_from_area_size(area_start);
1152 let byte_size = pool.byte_size_from_area_size(area_end - area_start);
1153
1154 Ok((block.rx_ptr().add(byte_offset), block.rw_ptr().add(byte_offset), byte_size))
1155 }
1156 }
1157}
1158
1159#[inline]
1160unsafe fn fill_pattern(mem: *mut u8, pattern: u32, size_in_bytes: usize) {
1161 let n = size_in_bytes / 4;
1162
1163 let p = mem as *mut u32;
1164
1165 for i in 0..n {
1166 p.add(i).write(pattern);
1167 }
1168}
1169
1170unsafe impl Send for JitAllocator {}