1use crate::util::virtual_memory::{
14 self, alloc, alloc_dual_mapping, flush_instruction_cache, protect_jit_memory, release,
15 release_dual_mapping, DualMapping, MemoryFlags, ProtectJitAccess,
16};
17use crate::util::{
18 align_down, align_up, bit_vector_clear, bit_vector_fill, bit_vector_get_bit,
19 bit_vector_index_of, bit_vector_set_bit,
20};
21use crate::AsmError;
22use alloc::vec::Vec;
23use core::cell::{Cell, UnsafeCell};
24use core::mem::size_of;
25use core::ops::Range;
26use core::ptr::null_mut;
27
28#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
29#[repr(u32)]
30pub enum ResetPolicy {
32 Soft = 0,
34
35 Hard = 1,
37}
38#[derive(Clone, Copy, Debug, PartialEq, Eq)]
39pub struct JitAllocatorOptions {
40 pub use_dual_mapping: bool,
51 pub use_multiple_pools: bool,
59 pub fill_unused_memory: bool,
64 pub immediate_release: bool,
70 pub custom_fill_pattern: Option<u32>,
71
72 pub block_size: u32,
73 pub granularity: u32,
74}
75
76impl Default for JitAllocatorOptions {
77 fn default() -> Self {
78 Self {
79 use_dual_mapping: true,
80 use_multiple_pools: true,
81 fill_unused_memory: true,
82 immediate_release: false,
83 custom_fill_pattern: None,
84 block_size: 0,
85 granularity: 0,
86 }
87 }
88}
89
90#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
91const DEFAULT_FILL_PATTERN: u32 = 0xCCCCCCCC; #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
93const DEFAULT_FILL_PATTERN: u32 = 0x0; const MULTI_POOL_COUNT: usize = 3;
101
102const MIN_GRANULARITY: usize = 64;
104
105const MAX_BLOCK_SIZE: usize = 32 * 1024 * 1024;
107
108struct BitVectorRangeIterator<'a, const B: u32> {
109 slice: &'a [u32],
110 idx: usize,
111 end: usize,
112 bit_word: u32,
113}
114
115const BIT_WORD_SIZE: usize = core::mem::size_of::<u32>() * 8;
116
117impl<'a, const B: u32> BitVectorRangeIterator<'a, B> {
118 const XOR_MASK: u32 = if B == 0 { u32::MAX } else { 0 };
119
120 fn from_slice_and_nbitwords(data: &'a [u32], num_bit_words: usize) -> Self {
121 Self::new(data, num_bit_words, 0, num_bit_words * BIT_WORD_SIZE)
122 }
123
124 fn new(data: &'a [u32], _num_bit_words: usize, start: usize, end: usize) -> Self {
125 let idx = align_down(start, BIT_WORD_SIZE);
126 let slice = &data[idx / BIT_WORD_SIZE..];
127
128 let mut bit_word = 0;
129
130 if idx < end {
131 bit_word =
132 (slice[0] ^ Self::XOR_MASK) & (u32::MAX << (start as u32 % BIT_WORD_SIZE as u32));
133 }
134
135 Self {
136 slice,
137 idx,
138 end,
139 bit_word,
140 }
141 }
142
143 fn next_range(&mut self, range_hint: u32) -> Option<Range<u32>> {
144 while self.bit_word == 0 {
145 self.idx += BIT_WORD_SIZE;
146
147 if self.idx >= self.end {
148 return None;
149 }
150
151 self.slice = &self.slice[1..];
152 self.bit_word = self.slice[0] ^ Self::XOR_MASK;
153 }
154
155 let i = self.bit_word.trailing_zeros();
156 let start = self.idx as u32 + i;
157 self.bit_word = !(self.bit_word ^ !(u32::MAX << i));
158 let mut end;
159 if self.bit_word == 0 {
160 end = (self.idx as u32 + BIT_WORD_SIZE as u32).min(self.end as _);
161
162 while end.wrapping_sub(start) < range_hint {
163 self.idx += BIT_WORD_SIZE;
164
165 if self.idx >= self.end {
166 break;
167 }
168
169 self.slice = &self.slice[1..];
170 self.bit_word = self.slice[0] ^ Self::XOR_MASK;
171
172 if self.bit_word != u32::MAX {
173 let j = self.bit_word.trailing_zeros();
174 end = (self.idx as u32 + j).min(self.end as _);
175 self.bit_word = !(self.bit_word ^ !(u32::MAX << j));
176 break;
177 }
178
179 end = (self.idx as u32 + BIT_WORD_SIZE as u32).min(self.end as _);
180 self.bit_word = 0;
181 continue;
182 }
183
184 Some(start..end)
185 } else {
186 let j = self.bit_word.trailing_zeros();
187 end = (self.idx as u32 + j).min(self.end as _);
188
189 self.bit_word = !(self.bit_word ^ !(u32::MAX << j));
190
191 Some(start..end)
192 }
193 }
194}
195
196impl<'a> Iterator for BitVectorRangeIterator<'a, 0> {
197 type Item = Range<u32>;
198
199 fn next(&mut self) -> Option<Self::Item> {
200 self.next_range(u32::MAX)
201 }
202}
203
204use intrusive_collections::{intrusive_adapter, rbtree::*};
205use intrusive_collections::{KeyAdapter, UnsafeRef};
206
207struct JitAllocatorBlock {
208 node: Link,
209 list_node: intrusive_collections::LinkedListLink,
210
211 pool: *mut JitAllocatorPool,
213 mapping: DualMapping,
216 block_size: usize,
218
219 flags: Cell<u32>,
220 area_size: Cell<u32>,
221 area_used: Cell<u32>,
222 largest_unused_area: Cell<u32>,
223 search_start: Cell<u32>,
224 search_end: Cell<u32>,
225
226 used_bitvector: UnsafeCell<alloc::vec::Vec<u32>>,
227 stop_bitvector: UnsafeCell<alloc::vec::Vec<u32>>,
228}
229
230impl JitAllocatorBlock {
231 const FLAG_EMPTY: u32 = 0x00000001;
232 const FLAG_DIRTY: u32 = 0x00000002;
233 const FLAG_DUAL_MAPPED: u32 = 0x00000004;
234
235 fn pool(&self) -> *mut JitAllocatorPool {
236 self.pool
237 }
238
239 fn rx_ptr(&self) -> *const u8 {
240 self.mapping.rx
241 }
242
243 fn rw_ptr(&self) -> *mut u8 {
244 self.mapping.rw
245 }
246
247 fn flags(&self) -> u32 {
248 self.flags.get()
249 }
250
251 fn add_flags(&self, flags: u32) {
252 self.flags.set(self.flags() | flags);
253 }
254
255 fn clear_flags(&self, flags: u32) {
256 self.flags.set(self.flags() & !flags);
257 }
258
259 fn is_dirty(&self) -> bool {
260 (self.flags() & Self::FLAG_DIRTY) != 0
261 }
262
263 fn block_size(&self) -> usize {
264 self.block_size
265 }
266
267 fn area_used(&self) -> u32 {
268 self.area_used.get()
269 }
270
271 fn area_size(&self) -> u32 {
272 self.area_size.get()
273 }
274
275 fn largest_unused_area(&self) -> u32 {
276 self.largest_unused_area.get()
277 }
278
279 fn search_start(&self) -> u32 {
280 self.search_start.get()
281 }
282
283 fn search_end(&self) -> u32 {
284 self.search_end.get()
285 }
286
287 fn used_bitvector(&self) -> &alloc::vec::Vec<u32> {
288 unsafe { &*self.used_bitvector.get() }
289 }
290
291 fn stop_bitvector(&self) -> &alloc::vec::Vec<u32> {
292 unsafe { &*self.stop_bitvector.get() }
293 }
294
295 fn used_bitvector_mut(&self) -> &mut alloc::vec::Vec<u32> {
296 unsafe { &mut *self.used_bitvector.get() }
297 }
298
299 fn stop_bitvector_mut(&self) -> &mut alloc::vec::Vec<u32> {
300 unsafe { &mut *self.stop_bitvector.get() }
301 }
302
303 fn area_available(&self) -> u32 {
304 self.area_size() - self.area_used()
305 }
306
307 fn mark_allocated_area(&self, allocated_area_start: u32, allocated_area_end: u32) {
308 let allocated_area_size = allocated_area_end - allocated_area_start;
309
310 bit_vector_fill(
311 self.used_bitvector_mut(),
312 allocated_area_start as _,
313 allocated_area_size as _,
314 );
315 bit_vector_set_bit(
316 self.stop_bitvector_mut(),
317 allocated_area_end as usize - 1,
318 true,
319 );
320
321 unsafe {
323 (*self.pool).total_area_used += allocated_area_size as usize;
324 }
325
326 self.area_used
327 .set(self.area_used() + allocated_area_size as u32);
328
329 if self.area_available() == 0 {
330 self.search_start.set(self.area_size());
331 self.search_end.set(0);
332 self.largest_unused_area.set(0);
333 self.clear_flags(Self::FLAG_DIRTY);
334 } else {
335 if self.search_start.get() == allocated_area_start {
336 self.search_start.set(allocated_area_end as _);
337 }
338
339 if self.search_end.get() == allocated_area_end {
340 self.search_end.set(allocated_area_start as _);
341 }
342
343 self.add_flags(Self::FLAG_DIRTY);
344 }
345 }
346 fn mark_released_area(&self, released_area_start: u32, released_area_end: u32) {
347 let released_area_size = released_area_end - released_area_start;
348
349 unsafe {
351 (*self.pool).total_area_used -= released_area_size as usize;
352 }
353
354 self.area_used
355 .set(self.area_used() - released_area_size as u32);
356 self.search_start
357 .set(self.search_start.get().min(released_area_start));
358 self.search_end
359 .set(self.search_end.get().max(released_area_end));
360
361 bit_vector_clear(
362 self.used_bitvector_mut(),
363 released_area_start as _,
364 released_area_size as _,
365 );
366 bit_vector_set_bit(
367 self.stop_bitvector_mut(),
368 released_area_end as usize - 1,
369 false,
370 );
371
372 if self.area_used() == 0 {
373 self.search_start.set(0);
374 self.search_end.set(self.area_size());
375 self.largest_unused_area.set(self.area_size());
376 self.add_flags(Self::FLAG_EMPTY);
377 self.clear_flags(Self::FLAG_DIRTY);
378 } else {
379 self.add_flags(Self::FLAG_DIRTY);
380 }
381 }
382
383 fn mark_shrunk_area(&self, shrunk_area_start: u32, shrunk_area_end: u32) {
384 let shrunk_area_size = shrunk_area_end - shrunk_area_start;
385
386 assert!(shrunk_area_start != 0);
389 assert!(shrunk_area_end != self.area_size());
390
391 unsafe {
393 (*self.pool).total_area_used -= shrunk_area_size as usize;
394 }
395
396 self.area_used.set(self.area_used() - shrunk_area_size);
397 self.search_start
398 .set(self.search_start.get().min(shrunk_area_start));
399 self.search_end
400 .set(self.search_end.get().max(shrunk_area_end));
401
402 bit_vector_clear(
403 &mut self.used_bitvector_mut(),
404 shrunk_area_start as _,
405 shrunk_area_size as _,
406 );
407 bit_vector_set_bit(
408 &mut self.stop_bitvector_mut(),
409 shrunk_area_end as usize - 1,
410 false,
411 );
412 bit_vector_set_bit(
413 &mut self.stop_bitvector_mut(),
414 shrunk_area_start as usize - 1,
415 true,
416 );
417
418 self.add_flags(Self::FLAG_DIRTY);
419 }
420}
421
422impl PartialOrd for JitAllocatorBlock {
423 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
424 self.rx_ptr().partial_cmp(&other.rx_ptr())
425 }
426}
427
428impl Ord for JitAllocatorBlock {
429 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
430 self.rx_ptr().cmp(&other.rx_ptr())
431 }
432}
433
434impl PartialEq for JitAllocatorBlock {
435 fn eq(&self, other: &Self) -> bool {
436 self.rx_ptr() == other.rx_ptr()
437 }
438}
439
440impl Eq for JitAllocatorBlock {}
441use intrusive_collections::linked_list::LinkedList;
442intrusive_adapter!(JitAllocatorBlockAdapter = UnsafeRef<JitAllocatorBlock> : JitAllocatorBlock { node: Link });
443intrusive_adapter!(BlockListAdapter = UnsafeRef<JitAllocatorBlock> : JitAllocatorBlock { list_node: intrusive_collections::LinkedListLink });
444
445struct BlockKey {
446 rxptr: *const u8,
447 block_size: u32,
448}
449
450impl PartialEq for BlockKey {
451 fn eq(&self, other: &Self) -> bool {
452 self.rxptr == other.rxptr
453 }
454}
455
456impl Eq for BlockKey {}
457
458impl PartialOrd for BlockKey {
459 fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
460 Some(self.cmp(other))
461 }
462}
463
464impl Ord for BlockKey {
465 fn cmp(&self, other: &Self) -> core::cmp::Ordering {
466 let addr_off = other.rxptr as usize + other.block_size as usize;
467
468 if addr_off <= self.rxptr as usize {
469 return core::cmp::Ordering::Less;
470 } else if other.rxptr > self.rxptr {
471 return core::cmp::Ordering::Greater;
472 } else {
473 return core::cmp::Ordering::Equal;
474 }
475 }
476}
477
478impl<'a> KeyAdapter<'a> for JitAllocatorBlockAdapter {
479 type Key = BlockKey;
480
481 fn get_key(
482 &self,
483 value: &'a <Self::PointerOps as intrusive_collections::PointerOps>::Value,
484 ) -> Self::Key {
485 BlockKey {
486 rxptr: value.rx_ptr(),
487 block_size: value.block_size as _,
488 }
489 }
490}
491
492struct JitAllocatorPool {
493 blocks: LinkedList<BlockListAdapter>,
494 cursor: *mut JitAllocatorBlock,
495
496 block_count: u32,
497 granularity: u16,
498 granularity_log2: u8,
499 empty_block_count: u8,
500 total_area_size: usize,
501 total_area_used: usize,
502 total_overhead_bytes: usize,
503}
504
505impl JitAllocatorPool {
506 fn new(granularity: u32) -> Self {
507 let granularity_log2 = granularity.trailing_zeros() as u8;
508 let granularity = granularity as u16;
509
510 Self {
511 blocks: LinkedList::new(BlockListAdapter::new()),
512 cursor: core::ptr::null_mut(),
513 block_count: 0,
514 granularity,
515 granularity_log2,
516 empty_block_count: 0,
517 total_area_size: 0,
518 total_area_used: 0,
519 total_overhead_bytes: 0,
520 }
521 }
522
523 fn reset(&mut self) {
524 self.blocks.clear();
525 self.cursor = core::ptr::null_mut();
526 self.block_count = 0;
527 self.total_area_size = 0;
528 self.total_area_used = 0;
529 self.total_overhead_bytes = 0;
530 }
531
532 fn byte_size_from_area_size(&self, area_size: u32) -> usize {
533 area_size as usize * self.granularity as usize
534 }
535
536 fn area_size_from_byte_size(&self, byte_size: usize) -> u32 {
537 ((byte_size + self.granularity as usize - 1) >> self.granularity_log2) as u32
538 }
539
540 fn bit_word_count_from_area_size(&self, area_size: u32) -> usize {
541 align_up(area_size as _, 32) / 32
542 }
543}
544use alloc::boxed::Box;
545
546pub struct JitAllocator {
563 options: JitAllocatorOptions,
564 block_size: usize,
565 granulariy: usize,
566 fill_pattern: u32,
567
568 allocation_count: usize,
569 tree: RBTree<JitAllocatorBlockAdapter>,
570 pools: Box<[*mut JitAllocatorPool]>,
571}
572
573impl JitAllocator {
574 pub fn new(params: JitAllocatorOptions) -> Box<Self> {
576 let vm_info = virtual_memory::info();
577
578 let mut block_size = params.block_size;
579 let mut granularity = params.granularity;
580
581 let mut pool_count = 1;
582
583 if params.use_multiple_pools {
584 pool_count = MULTI_POOL_COUNT;
585 }
586
587 if block_size < 64 * 1024 || block_size > 256 * 1024 * 1024 || !block_size.is_power_of_two()
588 {
589 block_size = vm_info.page_granularity as _;
590 }
591
592 if granularity < 64 || granularity > 256 || !granularity.is_power_of_two() {
593 granularity = MIN_GRANULARITY as _;
594 }
595
596 let fill_pattern = params.custom_fill_pattern.unwrap_or(DEFAULT_FILL_PATTERN);
597
598 let mut pools = Vec::with_capacity(pool_count);
599
600 for _ in 0..pool_count {
601 pools.push(Box::into_raw(Box::new(JitAllocatorPool::new(granularity))));
602 }
603
604 let allocator = Box::new(Self {
605 options: params,
606 block_size: block_size as _,
607 granulariy: granularity as _,
608 fill_pattern,
609 allocation_count: 0,
610 tree: RBTree::new(JitAllocatorBlockAdapter::new()),
611 pools: pools.into_boxed_slice(),
612 });
613
614 allocator
615 }
616
617 fn size_to_pool_id(&self, size: usize) -> usize {
618 let mut pool_id = self.pools.len() - 1;
619 let mut granularity = self.granulariy << pool_id;
620
621 while pool_id != 0 {
622 if align_up(size, granularity) == size {
623 break;
624 }
625
626 pool_id -= 1;
627 granularity >>= 1;
628 }
629
630 pool_id
631 }
632
633 fn bitvector_size_to_byte_size(area_size: u32) -> usize {
634 ((area_size as usize + 32 - 1) / 32) * size_of::<u32>()
635 }
636
637 fn calculate_ideal_block_size(
638 &self,
639 pool: *mut JitAllocatorPool,
640 allocation_size: usize,
641 ) -> usize {
642 unsafe {
643 let last = (*pool).blocks.back();
644
645 let mut block_size = if !last.is_null() {
646 last.get().unwrap().block_size()
647 } else {
648 self.block_size
649 };
650
651 if block_size < MAX_BLOCK_SIZE {
652 block_size *= 2;
653 }
654
655 if allocation_size > block_size {
656 block_size = align_up(allocation_size, block_size);
657
658 if block_size < allocation_size {
660 return 0;
661 }
662 }
663
664 block_size
665 }
666 }
667
668 unsafe fn new_block(
669 &mut self,
670 pool: *mut JitAllocatorPool,
671 block_size: usize,
672 ) -> Result<Box<JitAllocatorBlock>, AsmError> {
673 let area_size = (block_size + (*pool).granularity as usize - 1) >> (*pool).granularity_log2;
674 let num_bit_words = (area_size + 32 - 1) / 32;
675
676 let mut block = Box::new(JitAllocatorBlock {
677 node: Link::new(),
678 list_node: intrusive_collections::LinkedListLink::new(),
679 pool,
680 mapping: DualMapping {
681 rx: null_mut(),
682 rw: null_mut(),
683 },
684 block_size: block_size as _,
685 flags: Cell::new(0),
686 area_size: Cell::new(0),
687 area_used: Cell::new(0),
688 largest_unused_area: Cell::new(area_size as _),
689 search_end: Cell::new(area_size as _),
690 search_start: Cell::new(0),
691 used_bitvector: UnsafeCell::new({
692 let mut v = Vec::with_capacity(num_bit_words);
693 v.resize(num_bit_words * size_of::<u32>(), 0);
694 v
695 }),
696 stop_bitvector: UnsafeCell::new({
697 let mut v = Vec::with_capacity(num_bit_words);
698 v.resize(num_bit_words * size_of::<u32>(), 0);
699 v
700 }),
701 });
702 let mut block_flags = 0;
703 let virt_mem = if self.options.use_dual_mapping {
704 block_flags |= JitAllocatorBlock::FLAG_DUAL_MAPPED;
705 alloc_dual_mapping(block_size, MemoryFlags::ACCESS_RWX.into())?
706 } else {
707 let rx = alloc(block_size, MemoryFlags::ACCESS_RWX.into())?;
708 DualMapping { rx, rw: rx }
709 };
710
711 if self.options.fill_unused_memory {
712 protect_jit_memory(ProtectJitAccess::ReadWrite);
713 fill_pattern(virt_mem.rw, self.fill_pattern, block_size);
714 protect_jit_memory(ProtectJitAccess::ReadExecute);
715 flush_instruction_cache(virt_mem.rx, block_size);
716 }
717
718 block.area_size.set(area_size as _);
719 block.mapping = virt_mem;
720 block.flags.set(block_flags);
721 Ok(block)
722 }
723
724 unsafe fn delete_block(&mut self, block: *mut JitAllocatorBlock) {
725 let mut block = Box::from_raw(block);
726 if (block.flags() & JitAllocatorBlock::FLAG_DUAL_MAPPED) != 0 {
727 let _ = release_dual_mapping(&mut block.mapping, block.block_size);
728 } else {
729 let _ = release(block.mapping.rx as _, block.block_size);
730 }
731
732 drop(block);
733 }
734
735 unsafe fn insert_block(&mut self, block: *mut JitAllocatorBlock) {
736 let b = &mut *block;
737 let pool = &mut *b.pool();
738
739 if pool.cursor.is_null() {
740 pool.cursor = block;
741 }
742
743 self.tree.insert(UnsafeRef::from_raw(block));
744 pool.blocks.push_front(UnsafeRef::from_raw(block));
745
746 pool.block_count += 1;
747 pool.total_area_size += b.area_size() as usize;
748
749 pool.total_overhead_bytes +=
750 size_of::<JitAllocatorBlock>() + Self::bitvector_size_to_byte_size(b.area_size()) * 2;
751 }
752
753 unsafe fn remove_block(
754 &mut self,
755 block: &mut intrusive_collections::linked_list::CursorMut<'_, BlockListAdapter>,
756 ) -> *mut JitAllocatorBlock {
757 let b = block.get().unwrap();
758 let pool = &mut *b.pool();
759
760 if pool.cursor == b as *const JitAllocatorBlock as *mut _ {
761 pool.cursor = if let Some(block) = block.peek_prev().get() {
762 block as *const _ as *mut _
763 } else if let Some(block) = block.peek_next().get() {
764 block as *const _ as *mut _
765 } else {
766 null_mut()
767 };
768 }
769
770 match self.tree.entry(&BlockKey {
771 rxptr: b.rx_ptr(),
772 block_size: b.block_size as _,
773 }) {
774 Entry::Occupied(mut c) => {
775 assert_eq!(
776 UnsafeRef::into_raw(c.remove().unwrap()),
777 b as *const _ as *mut JitAllocatorBlock,
778 "blocks are not the same"
779 );
780 }
781
782 _ => (),
783 }
784 let area_size = b.area_size();
785
786 pool.block_count -= 1;
787 pool.total_area_size -= area_size as usize;
788
789 pool.total_overhead_bytes -=
790 size_of::<JitAllocatorBlock>() + Self::bitvector_size_to_byte_size(area_size) * 2;
791
792 UnsafeRef::into_raw(block.remove().unwrap())
793 }
794
795 unsafe fn wipe_out_block(
796 &mut self,
797 block: &mut intrusive_collections::linked_list::CursorMut<'_, BlockListAdapter>,
798 ) {
799 let b = block.get().unwrap();
800 if (b.flags() & JitAllocatorBlock::FLAG_EMPTY) != 0 {
801 return;
802 }
803
804 let pool = &mut *b.pool();
805
806 let area_size = b.area_size();
807 let granularity = pool.granularity;
808
809 virtual_memory::protect_jit_memory(ProtectJitAccess::ReadWrite);
810
811 if self.options.fill_unused_memory {
812 let rw_ptr = b.rw_ptr();
813
814 let it = BitVectorRangeIterator::from_slice_and_nbitwords(
815 &b.stop_bitvector(),
816 pool.bit_word_count_from_area_size(b.area_size()),
817 );
818
819 for range in it {
820 let span_ptr = rw_ptr.add(range.start as usize * granularity as usize);
821 let span_size = (range.end as usize - range.start as usize) * granularity as usize;
822
823 let mut n = 0;
824 while n < span_size {
825 *span_ptr.add(n).cast::<u32>() = self.fill_pattern;
826 n += size_of::<u32>();
827 }
828
829 virtual_memory::flush_instruction_cache(span_ptr, span_size as usize);
830 }
831 }
832
833 virtual_memory::protect_jit_memory(ProtectJitAccess::ReadExecute);
834
835 let b = &mut *UnsafeRef::into_raw(block.remove().unwrap());
836 b.used_bitvector_mut().fill(0);
837 b.stop_bitvector_mut().fill(0);
838
839 b.area_used.set(0);
840 b.largest_unused_area.set(area_size);
841 b.search_start.set(0);
842 b.search_end.set(area_size);
843 b.add_flags(JitAllocatorBlock::FLAG_EMPTY);
844 b.clear_flags(JitAllocatorBlock::FLAG_DIRTY);
845 }
846
847 pub unsafe fn reset(&mut self, reset_policy: ResetPolicy) {
851 self.tree.clear();
852
853 let pool_count = self.pools.len();
854
855 for pool_id in 0..pool_count {
856 let pool = unsafe { &mut *self.pools[pool_id] };
857
858 let mut cursor = pool.blocks.cursor_mut();
859 cursor.move_next();
860 let mut block_to_keep = false;
861 if reset_policy != ResetPolicy::Hard && !self.options.immediate_release {
862 block_to_keep = true;
863 cursor.move_next();
864 }
865 unsafe {
866 while !cursor.is_null() {
867 let block = UnsafeRef::into_raw(cursor.remove().unwrap());
868 self.delete_block(block);
869 cursor.move_next();
870 }
871
872 pool.reset();
873
874 if block_to_keep {
875 let mut front = pool.blocks.cursor_mut();
876 front.move_next();
877 self.wipe_out_block(&mut front);
878 pool.empty_block_count = 1;
879 }
880 }
881 }
882 }
883
884 pub fn alloc(&mut self, size: usize) -> Result<Span, AsmError> {
888 const NO_INDEX: u32 = u32::MAX;
889
890 let size = align_up(size, self.granulariy);
891
892 if size == 0 {
893 return Err(AsmError::InvalidArgument);
894 }
895
896 if size > u32::MAX as usize / 2 {
897 return Err(AsmError::TooLarge);
898 }
899
900 unsafe {
901 let pool_id = self.size_to_pool_id(size);
902 let pool = &mut *self.pools[pool_id];
903
904 let mut area_index = NO_INDEX;
905 let area_size = pool.area_size_from_byte_size(size);
906
907 let mut block = pool.blocks.cursor();
908 block.move_next();
909 if let Some(initial) = block.get().map(|x| x as *const JitAllocatorBlock) {
910 loop {
911 let b = block.get().unwrap();
912
913 if b.area_available() >= area_size {
914 if b.is_dirty() || b.largest_unused_area() >= area_size {
915 let mut it = BitVectorRangeIterator::<0>::new(
916 b.used_bitvector(),
917 pool.bit_word_count_from_area_size(b.area_size()),
918 b.search_start() as _,
919 b.search_end() as _,
920 );
921
922 let mut range_start;
923 let mut range_end = b.area_size() as usize;
924
925 let mut search_start = usize::MAX;
926 let mut largest_area = 0;
927
928 while let Some(range) = it.next_range(area_size as _) {
929 range_start = range.start as _;
930 range_end = range.end as _;
931
932 let range_size = range_end - range_start;
933
934 if range_size >= area_size as usize {
935 area_index = range_start as _;
936 break;
937 }
938
939 search_start = search_start.min(range_start);
940 largest_area = largest_area.max(range_size);
941 }
942
943 if area_index != NO_INDEX {
944 break;
945 }
946
947 if search_start != usize::MAX {
948 let search_end = range_end;
949
950 b.search_start.set(search_start as _);
951
952 b.search_end.set(search_end as _);
953 b.largest_unused_area.set(largest_area as _);
954 b.clear_flags(JitAllocatorBlock::FLAG_DIRTY);
955 }
956 }
957 }
958
959 block.move_next();
960
961 if block.get().map(|x| x as *const _) == Some(initial) {
962 break;
963 }
964
965 if block.is_null() {
966 break;
967 }
968 }
969 }
970
971 let mut block = block.get();
972
973 if area_index == NO_INDEX {
974 let block_size = self.calculate_ideal_block_size(pool, size);
975
976 {
977 let nblock = self.new_block(pool, block_size)?;
978
979 area_index = 0;
980
981 nblock.search_start.set(area_size as _);
982
983 nblock
984 .largest_unused_area
985 .set(nblock.area_size() - area_size);
986
987 let nblock = Box::into_raw(nblock);
988
989 self.insert_block(nblock);
990
991 block = Some(&*nblock);
992 }
993 } else if (block.unwrap().flags() & JitAllocatorBlock::FLAG_EMPTY) != 0 {
994 pool.empty_block_count -= 1;
995 block.unwrap().clear_flags(JitAllocatorBlock::FLAG_EMPTY);
996 }
997
998 self.allocation_count += 1;
999
1000 let block = block.unwrap();
1001
1002 block.mark_allocated_area(area_index, area_index + area_size);
1003
1004 let offset = pool.byte_size_from_area_size(area_index);
1005
1006 let rx = block.rx_ptr().add(offset);
1007 let rw = block.rw_ptr().add(offset);
1008
1009 Ok(Span {
1010 block: block as *const JitAllocatorBlock as _,
1011 rx,
1012 rw,
1013 size,
1014 icache_clean: true,
1015 })
1016
1017 }
1019 }
1020
1021 pub unsafe fn release(&mut self, rx_ptr: *const u8) -> Result<(), AsmError> {
1029 if rx_ptr.is_null() {
1030 return Err(AsmError::InvalidArgument);
1031 }
1032
1033 let block = self.tree.find(&BlockKey {
1034 rxptr: rx_ptr,
1035 block_size: 0,
1036 });
1037
1038 let Some(block) = block.get() else {
1039 return Err(AsmError::InvalidState);
1040 };
1041
1042 unsafe {
1043 let pool = &mut *block.pool;
1044
1045 let offset = rx_ptr as usize - block.rx_ptr() as usize;
1046
1047 let area_index = (offset >> pool.granularity_log2 as usize) as u32;
1048 let area_end =
1049 bit_vector_index_of(&block.stop_bitvector(), area_index as _, true) as u32 + 1;
1050 let area_size = area_end - area_index;
1051
1052 self.allocation_count -= 1;
1053
1054 block.mark_released_area(area_index, area_end);
1055
1056 if self.options.fill_unused_memory {
1057 let span_ptr = block
1058 .rw_ptr()
1059 .add(area_index as usize * pool.granularity as usize);
1060 let span_size = area_size as usize * pool.granularity as usize;
1061
1062 protect_jit_memory(ProtectJitAccess::ReadWrite);
1063 fill_pattern(span_ptr, self.fill_pattern, span_size);
1064 protect_jit_memory(ProtectJitAccess::ReadExecute);
1065 flush_instruction_cache(span_ptr, span_size);
1066 }
1067
1068 if block.area_used() == 0 {
1069 if pool.empty_block_count != 0 || self.options.immediate_release {
1070 let mut cursor = pool.blocks.cursor_mut_from_ptr(block);
1071 let block = self.remove_block(&mut cursor);
1072
1073 self.delete_block(block);
1074 } else {
1075 pool.empty_block_count += 1;
1076 }
1077 }
1078 }
1079
1080 Ok(())
1081 }
1082 pub unsafe fn shrink(&mut self, rx_ptr: *const u8, new_size: usize) -> Result<(), AsmError> {
1088 if rx_ptr.is_null() {
1089 return Err(AsmError::InvalidArgument);
1090 }
1091
1092 if new_size == 0 {
1093 return self.release(rx_ptr);
1094 }
1095
1096 let Some(block) = self
1097 .tree
1098 .find(&BlockKey {
1099 rxptr: rx_ptr,
1100 block_size: 0,
1101 })
1102 .get()
1103 else {
1104 return Err(AsmError::InvalidArgument);
1105 };
1106
1107 unsafe {
1108 let pool = &mut *block.pool;
1109 let offset = rx_ptr as usize - block.rx_ptr() as usize;
1110 let area_start = (offset >> pool.granularity_log2 as usize) as u32;
1111
1112 let is_used = bit_vector_get_bit(block.used_bitvector(), area_start as _);
1113
1114 if !is_used {
1115 return Err(AsmError::InvalidArgument);
1116 }
1117
1118 let area_end =
1119 bit_vector_index_of(&block.stop_bitvector(), area_start as _, true) as u32 + 1;
1120
1121 let area_prev_size = area_end - area_start;
1122 let area_shrunk_size = pool.area_size_from_byte_size(new_size);
1123
1124 if area_shrunk_size > area_prev_size {
1125 return Err(AsmError::InvalidState);
1126 }
1127
1128 let area_diff = area_prev_size - area_shrunk_size;
1129
1130 if area_diff != 0 {
1131 block.mark_shrunk_area(area_start + area_shrunk_size, area_end);
1132
1133 if self.options.fill_unused_memory {
1134 let span_ptr = block
1135 .rw_ptr()
1136 .add(area_start as usize * pool.granularity as usize);
1137 let span_size = area_diff as usize * pool.granularity as usize;
1138
1139 protect_jit_memory(ProtectJitAccess::ReadWrite);
1140 fill_pattern(span_ptr, self.fill_pattern, span_size);
1141 protect_jit_memory(ProtectJitAccess::ReadExecute);
1142 flush_instruction_cache(span_ptr, span_size);
1143 }
1144 }
1145 }
1146
1147 Ok(())
1148 }
1149
1150 pub fn query(&self, rx_ptr: *const u8) -> Result<Span, AsmError> {
1153 let Some(block) = self
1154 .tree
1155 .find(&BlockKey {
1156 rxptr: rx_ptr,
1157 block_size: 0,
1158 })
1159 .get()
1160 else {
1161 return Err(AsmError::InvalidArgument);
1162 };
1163
1164 unsafe {
1165 let pool = &mut *block.pool;
1166 let offset = rx_ptr as usize - block.rx_ptr() as usize;
1167
1168 let area_start = (offset >> pool.granularity_log2 as usize) as u32;
1169
1170 let is_used = bit_vector_get_bit(block.used_bitvector(), area_start as _);
1171
1172 if !is_used {
1173 return Err(AsmError::InvalidArgument);
1174 }
1175
1176 let area_end =
1177 bit_vector_index_of(&block.stop_bitvector(), area_start as _, true) as u32 + 1;
1178 let byte_offset = pool.byte_size_from_area_size(area_start);
1179 let byte_size = pool.byte_size_from_area_size(area_end - area_start);
1180
1181 Ok(Span {
1182 rx: block.rx_ptr().add(byte_offset),
1183 rw: block.rw_ptr().add(byte_offset),
1184 size: byte_size,
1185 icache_clean: false,
1186 block: block as *const JitAllocatorBlock as _,
1187 })
1188 }
1189 }
1190
1191 pub unsafe fn write(
1201 &mut self,
1202 span: &mut Span,
1203 mut write_func: impl FnMut(&mut Span),
1204 ) -> Result<(), AsmError> {
1205 let size = span.size();
1206
1207 if size == 0 {
1208 return Ok(());
1209 }
1210
1211 protect_jit_memory(ProtectJitAccess::ReadWrite);
1212 write_func(span);
1213 protect_jit_memory(ProtectJitAccess::ReadExecute);
1214 flush_instruction_cache(span.rx(), span.size());
1215
1216 if span.size() != size {
1217 self.shrink(span.rx(), span.size)?;
1218 }
1219
1220 Ok(())
1221 }
1222
1223 pub unsafe fn copy_from_slice(
1224 &mut self,
1225 span: &mut Span,
1226 offset: usize,
1227 slice: &[u8],
1228 ) -> Result<(), AsmError> {
1229 if slice.len() == 0 {
1230 return Ok(());
1231 }
1232
1233 protect_jit_memory(ProtectJitAccess::ReadWrite);
1234 span.rw()
1235 .add(offset)
1236 .copy_from_nonoverlapping(slice.as_ptr(), slice.len());
1237 protect_jit_memory(ProtectJitAccess::ReadExecute);
1238 flush_instruction_cache(span.rx(), span.size());
1239 Ok(())
1240 }
1241}
1242
1243#[inline]
1244unsafe fn fill_pattern(mem: *mut u8, pattern: u32, size_in_bytes: usize) {
1245 let n = size_in_bytes / 4;
1246
1247 let p = mem as *mut u32;
1248
1249 for i in 0..n {
1250 p.add(i).write(pattern);
1251 }
1252}
1253
1254unsafe impl Send for JitAllocator {}
1255
1256#[allow(dead_code)]
1258#[derive(Debug, Clone, Copy)]
1259pub struct Span {
1260 rx: *const u8,
1261 rw: *mut u8,
1262 size: usize,
1263 block: *mut u8,
1264 icache_clean: bool,
1265}
1266
1267impl Span {
1268 pub const fn rx(&self) -> *const u8 {
1272 self.rx
1273 }
1274 pub const fn rw(&self) -> *mut u8 {
1284 self.rw
1285 }
1286
1287 pub const fn size(&self) -> usize {
1288 self.size
1289 }
1290
1291 pub fn is_icache_clean(&self) -> bool {
1292 self.icache_clean
1293 }
1294
1295 pub fn is_directly_writeable(&self) -> bool {
1296 !self.rw.is_null()
1297 }
1298}