1use super::alloc_frames;
2use crate::formation::*;
3use crate::{cls, frame_size, free_frames, MEM_CACHE_BOOT};
4use crate::{current_cpu_id, SLAB_CACHES};
5use alloc::alloc::dealloc;
6use core::alloc::Layout;
7use bitflags::bitflags;
8use core::cmp::{max, min};
9use core::fmt::{Debug, Formatter, Write};
10use core::mem::forget;
11use core::ops::Add;
12use core::sync::atomic::AtomicUsize;
13use doubly_linked_list::*;
14use preprint::pprintln;
15use spin::mutex::SpinMutex;
16use spin::rwlock::RwLock;
17use spin::Mutex;
18
19const PER_CPU_OBJECTS: usize = 16;
21const CPUS: usize = 8;
23const FREE_LIST_MAX:usize = 16;
25
26const VAL: ArrayCache = ArrayCache::new();
27static mut ARRAY_CACHE_FOR_BOOT: [ArrayCache; CPUS] = [VAL; CPUS];
28static mut ARRAY_CACHE_FOR_ARRAY: [ArrayCache; CPUS] = [VAL; CPUS];
29static mut ARRAY_CACHE_NODE_BOOT: ArrayCache = ArrayCache::new();
30static mut ARRAY_CACHE_NODE_ARRAY: ArrayCache = ArrayCache::new();
31
32
33
34bitflags! {
35 pub struct Flags:u8{
36 const SLAB_OFF = 0b0000_0000;
37 const SLAB_ON = 0b0000_0001;
38 const DESTROY = 0b0000_0010;
39 }
40}
41
42pub struct SlabInfo {
43 pub cache_name: &'static str,
44 pub object_size: u32,
45 pub align: u32,
46 pub per_frames: u32,
47 pub per_objects: u32,
48 pub total_objects: u32,
49 pub used_objects: u32,
50 pub limit: u32,
51 pub batch_count: u32,
52 pub local_objects: u32,
53 pub shared_objects: u32,
54}
55
56#[derive(Debug)]
57pub struct MemCache {
58 array_cache: [*mut ArrayCache; CPUS as usize],
60 list: ListHead,
61 per_objects: u32,
63 per_frames: u32,
65 align: u32,
67 object_size: u32,
69 color: u32,
71 color_off: u32,
73 color_next: u32,
75 mem_cache_node: CacheNode,
77 cache_name: &'static str,
79 flags: Flags,
81}
82unsafe impl Sync for MemCache {}
83unsafe impl Send for MemCache {}
84
85macro_rules! mut_ref_memcache {
86 ($addr:expr) => {
87 unsafe { &mut (*container_of!($addr as usize, MemCache, list)) }
88 };
89}
90macro_rules! ref_memcache {
91 ($addr:expr) => {
92 unsafe { &(*container_of!($addr as usize, MemCache, list)) }
93 };
94}
95
96macro_rules! mut_ref_slab {
97 ($addr:expr) => {
98 unsafe { &mut (*container_of!($addr as usize, Slab, list)) }
99 };
100}
101macro_rules! ref_slab {
102 ($addr:expr) => {
103 unsafe { &(*container_of!($addr as usize, Slab, list)) }
104 };
105}
106
107macro_rules! cache_layout {
108 ()=> {
109 Layout::from_size_align(core::mem::size_of::<MemCache>(), core::mem::align_of::<MemCache>()).unwrap()
110 };
111}
112macro_rules! slab_layout {
113 ()=> {
114 Layout::from_size_align(core::mem::size_of::<Slab>(), core::mem::align_of::<Slab>()).unwrap()
115 };
116}
117macro_rules! array_cache_layout {
118 ()=> {
119 Layout::from_size_align(core::mem::size_of::<ArrayCache>(), core::mem::align_of::<ArrayCache>()).unwrap()
120 };
121}
122
123
124
125
126impl MemCache {
127 pub const fn new() -> Self {
128 Self {
129 array_cache: [core::ptr::null_mut(); CPUS as usize],
130 list: ListHead::new(),
131 per_objects: 0,
132 per_frames: 0,
133 align: 0,
134 object_size: 0,
135 color: 0,
136 color_off: 0,
137 color_next: 0,
138 mem_cache_node: CacheNode::new(),
139 cache_name: "",
140 flags: Flags::empty(),
141 }
142 }
143 pub fn print_info(&self) {
145 let slab_info = self.get_cache_info();
146 pprintln!(
147 "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}",
148 self.cache_name,
149 self.object_size,
150 self.align,
151 self.per_frames,
152 self.per_objects,
153 slab_info.total_objects,
154 slab_info.used_objects,
155 PER_CPU_OBJECTS,
156 PER_CPU_OBJECTS / 2,
157 slab_info.local_objects,
158 slab_info.shared_objects
159 );
160 }
161 pub fn get_cache_info(&self) -> SlabInfo {
162 let per_objects = self.per_objects as usize;
164 let total = self.mem_cache_node.total_slabs() * per_objects;
165 let used = self.mem_cache_node.used_objects(per_objects);
166 let mut local = 0;
168 for i in 0..CPUS {
169 local += unsafe { (*self.array_cache[i]).inner.lock().avail };
170 }
171 let shared = unsafe { (*self.mem_cache_node.shared).inner.lock().avail };
173 assert!(used as u32 >= local + shared);
174 SlabInfo {
175 cache_name: self.cache_name,
176 object_size: self.object_size,
177 align: self.align,
178 per_frames: self.per_frames,
179 per_objects: self.per_objects,
180 total_objects: total as u32,
181 used_objects: used as u32 - shared - local,
182 limit: PER_CPU_OBJECTS as u32,
183 batch_count: PER_CPU_OBJECTS as u32 / 2,
184 local_objects: local,
185 shared_objects: shared,
186 }
187 }
188
189 fn init_cache_object_num(&mut self) {
193 let mut order = 0;
194 let mut left_over = 0;
195 loop {
196 let total_size = frame_size() * (1 << order);
197 let object_num = if self.flags == Flags::SLAB_OFF {
198 total_size / self.object_size as usize
200 } else {
201 let mut object_num = (total_size - core::mem::size_of::<Slab>())
203 / (self.object_size as usize + core::mem::size_of::<u32>());
204 while let slab_align = slab_descriptor_align_size(object_num as u32, self.align) {
206 if (slab_align + object_num as u32 * self.object_size) < total_size as u32 {
207 break;
208 }
209 object_num -= 1;
210 } object_num
212 };
213 left_over = total_size - object_num * self.object_size as usize;
215 if self.flags == Flags::SLAB_ON {
216 left_over -= slab_descriptor_align_size(object_num as u32, self.align) as usize;
217 }
218 if left_over * 8 < total_size {
219 self.per_objects = object_num as u32;
220 self.per_frames = order;
221 self.color = (left_over / cls()) as u32;
223 break;
224 } order += 1;
226 }
227 trace!(
228 "left_over is {}, total_size is {}",
229 left_over,
230 frame_size() * (1 << self.per_frames)
231 );
232 }
233
234 fn set_array_cache(&mut self) -> Result<(), SlabError> {
237 for i in 0..CPUS {
239 let array_cache_addr = get_array_cache()?;
240 self.array_cache[i] = array_cache_addr as *mut ArrayCache;
241 unsafe { (*self.array_cache[i]).inner.lock().init() };
242 }
243 self.mem_cache_node.set_array_cache()?;
244 Ok(())
245 }
246
247 fn init(&mut self, name: &'static str, object_size: u32, align: u32) -> Result<(), SlabError> {
248 self.array_cache = [core::ptr::null_mut(); CPUS];
249 self.mem_cache_node.init();
250 self.cache_name = name;
251 self.color_off = cls() as u32; self.align = if align.is_power_of_two() && align != 0 {
253 max(align, 8)
254 } else {
255 core::mem::size_of::<usize>() as u32
256 };
257 self.object_size = align_to!(object_size, self.align);
259 self.flags = if object_size * 8 >= frame_size() as u32 {
260 Flags::SLAB_OFF
261 } else {
262 Flags::SLAB_ON
263 };
264 self.init_cache_object_num();
268 Ok(())
269 }
270 pub fn alloc(&self) -> Result<*mut u8,SlabError> {
271 if self.flags.contains(Flags::DESTROY) {
272 panic!("cache had been destroyed");
273 }
274 let cpu_id = unsafe { current_cpu_id() };
280 let array_cache = unsafe { &mut *self.array_cache[cpu_id] };
281 let mut array_cache = array_cache.inner.lock();
282 if array_cache.is_empty() {
283 let mut new_objects = [0usize; PER_CPU_OBJECTS];
284 let mem_cache_ptr = self as *const MemCache as *mut MemCache;
285 self.mem_cache_node
286 .alloc(mem_cache_ptr, &mut new_objects[0..array_cache.batch_count as usize])?;
287 let batch_count = array_cache.batch_count as usize;
288 array_cache.push(&new_objects[0..batch_count]);
289 }
290 Ok(array_cache.get())
291 }
292
293 pub fn dealloc(&self, addr: *mut u8) -> Result<(), SlabError> {
294 if self.flags.contains(Flags::DESTROY) {
295 panic!("cache had been destroyed");
296 }
297 let cpu_id = unsafe { current_cpu_id() };
299 let array_cache = unsafe { &mut *self.array_cache[cpu_id] };
300 let mut array_cache = array_cache.inner.lock();
301 if array_cache.is_full() {
303 let mut objects = [0usize; PER_CPU_OBJECTS];
304 let batch_count = array_cache.batch_count as usize;
305 array_cache.pop(&mut objects[0..batch_count]);
306 self.mem_cache_node.dealloc(&objects[0..batch_count]);
307 }
308 array_cache.put(addr);
309 Ok(())
310 }
311 pub fn destroy(&mut self) {
317 for i in 0..CPUS {
319 let array_cache = self.array_cache[i];
320 let cache_head = unsafe { &mut MEM_CACHE_BOOT };
322 let next_cache = mut_ref_memcache!(cache_head.list.next);
323 next_cache.dealloc(array_cache as *mut u8);
324 }
325 self.mem_cache_node.destroy();
326 let addr = self as *const Self as *mut u8;
328 self.flags = Flags::DESTROY;
329 list_del!(to_list_head_ptr!(self.list));
330 let cache_head = unsafe { &mut MEM_CACHE_BOOT };
331 cache_head.dealloc(addr);
332 }
333}
334
335
336
337
338#[inline]
339fn slab_descriptor_align_size(object_num: u32, align: u32) -> u32 {
340 align_to!(
341 object_num * core::mem::size_of::<u32>() as u32 + core::mem::size_of::<Slab>() as u32,
342 align
343 )
344}
345
346struct ArrayCache {
354 inner: Mutex<ArrayCacheInner>,
355}
356
357impl ArrayCache {
358 const fn new() -> Self {
359 Self {
360 inner: Mutex::new(ArrayCacheInner::new()),
361 }
362 }
363}
364
365struct ArrayCacheInner {
366 avail: u32,
367 limit: u32,
368 batch_count: u32,
369 entries: [usize; PER_CPU_OBJECTS as usize],
370}
371
372impl ArrayCacheInner {
373 const fn new() -> Self {
374 Self {
375 avail: 0,
376 limit: PER_CPU_OBJECTS as u32,
377 batch_count: PER_CPU_OBJECTS as u32 / 2,
378 entries: [0; PER_CPU_OBJECTS],
379 }
380 }
381 #[inline]
382 fn init(&mut self) {
383 *self = Self::new();
384 }
385
386 fn push(&mut self, addrs: &[usize]) {
388 assert!(addrs.len() <= self.batch_count as usize);
391 assert!(addrs.len() + self.avail as usize <= self.limit as usize);
392 self.entries[self.avail as usize..(self.avail + self.batch_count) as usize]
393 .copy_from_slice(addrs);
394
395 self.avail += self.batch_count;
396 }
397 fn pop_back(&mut self, addrs: &mut [usize]) {
398 assert_eq!(addrs.len(), self.batch_count as usize);
399 assert!(self.avail >= self.batch_count);
400 let begin = self.avail - self.batch_count;
402 addrs.copy_from_slice(&self.entries[begin as usize..(begin + self.batch_count) as usize]);
403 self.avail -= self.batch_count;
404 }
405 fn pop(&mut self, addrs: &mut [usize]) {
406 assert_eq!(addrs.len(), self.batch_count as usize);
409 assert_eq!(self.avail, self.limit);
410 addrs.copy_from_slice(&self.entries[0..self.batch_count as usize]);
411 for i in self.batch_count as usize..self.avail as usize {
413 self.entries[i - self.batch_count as usize] = self.entries[i];
414 }
415
416 self.avail -= self.batch_count;
417 }
418
419 #[inline]
421 fn get(&mut self) -> *mut u8 {
422 assert!(self.avail > 0);
424 let t = self.entries[self.avail as usize - 1] as *mut u8;
425 self.avail -= 1;
426 t
427 }
428 #[inline]
429 fn put(&mut self, addr: *mut u8) {
430 assert!(self.avail < self.limit);
432 self.entries[self.avail as usize] = addr as usize;
433 self.avail += 1;
434 }
435 #[inline]
436 fn is_empty(&self) -> bool {
437 self.avail == 0
438 }
439 #[inline]
440 fn is_full(&self) -> bool {
441 self.avail == self.limit
442 }
443}
444
445#[derive(Debug)]
450pub struct CacheNode {
451 shared: *mut ArrayCache,
452 slab_partial: ListHead,
453 slab_free: ListHead,
454 slab_full: ListHead,
455 free_list_len: RwLock<u32>,
456}
457
458impl CacheNode {
459 const fn new() -> Self {
460 CacheNode {
461 shared: core::ptr::null_mut(),
462 slab_partial: ListHead::new(),
463 slab_free: ListHead::new(),
464 slab_full: ListHead::new(),
465 free_list_len: RwLock::new(0),
466 }
467 }
468}
469
470impl CacheNode {
471 fn init(&mut self) {
472 self.shared = core::ptr::null_mut();
473 list_head_init!(self.slab_partial);
474 list_head_init!(self.slab_free);
475 list_head_init!(self.slab_full);
476 }
477
478 fn set_array_cache(&mut self) -> Result<(), SlabError> {
479 let array_cache_addr = get_array_cache()?;
481 self.shared = array_cache_addr as *mut ArrayCache;
482 unsafe { (*self.shared).inner.lock().init() };
483 Ok(())
484 }
485
486 fn alloc_inner(&self, cache: *mut MemCache) -> Result<&mut Slab,SlabError> {
487 let cache = unsafe{&mut *cache};
488 let mut slab_list = to_list_head_ptr!(self.slab_partial);
490 let slab = if !is_list_empty!(slab_list) {
491 slab_list = self.slab_partial.next; let slab = mut_ref_slab!(slab_list);
494 slab
495 } else if is_list_empty!(to_list_head_ptr!(self.slab_free)) {
496 trace!("alloc new rslab");
499 Slab::new(cache)?; assert!(!is_list_empty!(to_list_head_ptr!(self.slab_free)));
502 let slab = mut_ref_slab!( self.slab_free.next);
504 slab.move_to(to_list_head_ptr!(self.slab_partial));
505 slab
506 } else {
507 slab_list = self.slab_free.next;
509 let slab = mut_ref_slab!(slab_list);
510 slab.move_to(to_list_head_ptr!(self.slab_partial));
512 *self.free_list_len.write() -= 1;
514 slab
515 };
516 Ok(slab)
517 }
518
519 fn alloc(&self, cache: *mut MemCache, addrs: &mut [usize])->Result<(),SlabError> {
520 let shared_array = unsafe { &mut *self.shared };
522 let mut shared_array = shared_array.inner.lock();
523 if shared_array.avail >= addrs.len() as u32 {
524 shared_array.pop_back(addrs);
526 } else {
527 let mut i = 0;
530 let mcache = unsafe{&*cache};
531 while i < shared_array.batch_count as usize{
532 let mut slab = self.alloc_inner(cache)?;
533 while slab.used_object != mcache.per_objects{
534 let addr = slab.alloc();
535 addrs[i] = addr as usize;
536 i += 1;
537 if i == shared_array.batch_count as usize{
538 break;
539 }
540 }
541 if slab.used_object == mcache.per_objects{
542 slab.move_to(to_list_head_ptr!(self.slab_full));
543 }
544 }
545 }
546 Ok(())
547 }
548
549 fn is_in_cache(&self, addr: *mut u8) -> Result<&mut Slab,SlabError> {
550 let slab_list = self.slab_partial.iter().find(|&slab_list| {
553 let slab = mut_ref_slab!(slab_list);
554 slab.is_in_slab(addr)
555 });
556 if slab_list.is_some() {
557 return Ok(mut_ref_slab!(slab_list.unwrap()) );
558 }
559 let slab_list = self.slab_full.iter().find(|&slab_list| {
560 let slab = mut_ref_slab!(slab_list) ;
561 slab.is_in_slab(addr)
562 });
563 if slab_list.is_some() {
564 return Ok(mut_ref_slab!(slab_list.unwrap()));
565 }
566 Err(SlabError::NotInCache)
567 }
568 fn dealloc_inner(&self, addr: *mut u8) {
569 let slab = self.is_in_cache(addr).unwrap();
572 slab.dealloc(addr);
573 if slab.used_object == 0 {
574 slab.move_to(to_list_head_ptr!(self.slab_free));
576 *self.free_list_len.write() +=1;
577 self.check_and_reclaim();
579 } else {
580 slab.move_to(to_list_head_ptr!(self.slab_partial));
581 }
582 }
583 fn check_and_reclaim(&self){
586 let mut free_len = self.free_list_len.write();
587 if *free_len > FREE_LIST_MAX as u32 {
588 self.slab_free.iter().take(*free_len as usize - FREE_LIST_MAX).for_each(|slab_list|{
590 let slab = mut_ref_slab!(slab_list);
591 slab.reclaim();
592 list_del!(slab_list);
593 });
594 *free_len = FREE_LIST_MAX as u32;
595 }
596 }
597 fn dealloc(&self, addrs: &[usize]) {
598 let shared_array = unsafe { &mut *self.shared };
599 let mut shared_array = shared_array.inner.lock();
600 if shared_array.is_full() {
601 let mut temp = [0usize; PER_CPU_OBJECTS];
604 let batch_count = shared_array.batch_count as usize;
605 shared_array.pop(&mut temp[0..batch_count]);
606 for i in 0..shared_array.batch_count as usize {
607 self.dealloc_inner(temp[i] as *mut u8);
608 }
609 }
610 shared_array.push(addrs);
612 }
613
614 fn total_slabs(&self) -> usize {
615 self.slab_partial.len() + self.slab_full.len() + self.slab_free.len()
616 }
617 fn used_objects(&self, per_objects: usize) -> usize {
618 self.slab_partial
619 .iter()
620 .map(|slab_list| {
621 mut_ref_slab!(slab_list).used_object as usize
622 })
623 .sum::<usize>()
624 + self.slab_full.len() * per_objects
625 }
626 fn destroy(&self) {
627 let shared = self.shared;
629 let cache_head = unsafe { &mut MEM_CACHE_BOOT };
631 let next_cache = mut_ref_memcache!(cache_head.list.next);
632 next_cache.dealloc(shared as *mut u8);
633
634 self.slab_partial.iter().for_each(|slab_list| {
635 let slab = mut_ref_slab!(slab_list);
636 slab.reclaim();
637 list_del!(slab_list);
639 });
640 self.slab_full.iter().for_each(|slab_list| {
641 let slab = mut_ref_slab!(slab_list);
642 slab.reclaim();
643 list_del!(slab_list);
645 });
646 self.slab_free.iter().for_each(|slab_list| {
647 let slab = mut_ref_slab!(slab_list);
648 slab.reclaim();
649 list_del!(slab_list);
651 });
652 }
653}
654
655pub struct Slab {
662 list: ListHead,
663 cache: *mut MemCache,
664 used_object: u32,
665 next_free: u32,
666 color_off: u32,
667 fist_object: usize,
668 free_list: *mut u32,
669}
670
671impl Debug for Slab {
672 fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
673 f.write_fmt(format_args!(
674 "Slab{{\n\
675 \tlist:{:?},\n\
676 \tcache:{:?},\n\
677 \tused_object:{},\n\
678 \tnext_free:{},\n\
679 \tfist_object:{:#x},\n\
680 \tfree_list:{:?}\
681 }}",
682 self.list,
683 self.cache,
684 self.used_object,
685 self.next_free,
686 self.fist_object,
687 self.free_list
688 ))
689 }
690}
691
692impl Slab {
693 fn new(cache: &MemCache)->Result<(),SlabError> {
694 let per_frames = cache.per_frames;
700 let start_addr = alloc_frames_for_cache(1 << per_frames);
701 if start_addr.is_null(){
702 return Err(SlabError::CantAllocFrame);
703 }
704 let start_addr = start_addr as usize;
705 let mut slab_desc_align_size = 0; if cache.flags == Flags::SLAB_ON {
707 slab_desc_align_size = slab_descriptor_align_size(cache.per_objects, cache.align);
708 }
709 let mut first_object_addr = start_addr.add(slab_desc_align_size as usize);
710 first_object_addr += cache.color_off as usize * cache.color_next as usize;
712
713 let (slab_ptr, free_list_addr) = if cache.flags == Flags::SLAB_ON {
714 (start_addr, start_addr.add(core::mem::size_of::<Slab>()))
715 } else {
716 let layout = Layout::from_size_align(cache.per_objects as usize * core::mem::size_of::<u32>(), 4).unwrap();
718 let free_list_ptr = alloc_from_slab(
719 layout
720 )?;
721 let slab_ptr =
722 alloc_from_slab(slab_layout!())?;
723 (slab_ptr as usize, free_list_ptr as usize)
724 };
725 let slab = Slab {
726 list: ListHead::new(),
727 cache: cache as *const MemCache as *mut MemCache,
728 used_object: 0,
729 next_free: 0,
730 color_off: cache.color_next,
731 fist_object: first_object_addr as usize,
732 free_list: free_list_addr as *mut u32,
733 };
734 unsafe {
736 core::ptr::write(slab_ptr as *mut Slab, slab);
737 for i in 0..cache.per_objects {
739 core::ptr::write(
740 free_list_addr.add(i as usize * core::mem::size_of::<u32>()) as *mut u32,
741 i,
742 );
743 }
744 }
745 let slab = unsafe { &mut *(slab_ptr as *mut Slab) };
746 list_head_init!(slab.list);
747 trace!("{:?}", slab);
748 list_add_tail!(
750 to_list_head_ptr!(slab.list),
751 to_list_head_ptr!(cache.mem_cache_node.slab_free)
752 );
753 let mut cache = slab.cache;
754 unsafe {
755 if (*cache).color_next == (*cache).color {
756 (*cache).color_next = 0; } }
759 Ok(())
760 }
761
762 fn alloc(&mut self) -> *mut u8 {
763 let cache = unsafe { &mut *self.cache };
764 let per_objects = cache.per_objects;
765 if self.next_free < per_objects {
766 let pos = unsafe { self.free_list.add(self.next_free as usize).read() };
767 let addr = self
768 .fist_object
769 .add(pos as usize * cache.object_size as usize);
770 self.next_free += 1;
771 self.used_object += 1;
772 return addr as *mut u8;
773 }
774 core::ptr::null_mut()
775 }
776 fn dealloc(&mut self, addr: *mut u8) {
777 let cache = unsafe { &mut *self.cache };
778 let pos = (addr as usize - self.fist_object) / cache.object_size as usize;
779 self.next_free -= 1;
780 unsafe {
781 self.free_list
782 .add(self.next_free as usize)
783 .write_volatile(pos as u32);
784 }
785 self.used_object -= 1;
786 trace!(
787 "rslab dealloc {:?}, object_size is {}, used: {}",
788 addr,
789 cache.object_size,
790 self.used_object
791 );
792 }
793 fn reclaim(&self) {
794 let cache = unsafe { &mut *self.cache };
798 let per_frames = cache.per_frames;
799 if cache.flags == Flags::SLAB_OFF {
800 dealloc_to_slab(self as *const Slab as *mut u8,slab_layout!());
802 let layout = Layout::from_size_align(cache.per_objects as usize * core::mem::size_of::<u32>(), 4).unwrap();
803 dealloc_to_slab(self.free_list as *mut u8,layout);
804 }
805 unsafe {
806 free_frames(self.start() as *const Slab as *mut u8, 1 << per_frames);
807 }
808 }
809 fn start(&self) -> usize {
810 let cache = unsafe { &mut *self.cache };
812 if cache.flags == Flags::SLAB_ON {
813 self as *const Slab as usize
814 } else {
815 self.fist_object
816 }
817 }
818
819 #[inline]
820 fn move_to(&mut self, to: *mut ListHead) {
821 list_del!(to_list_head_ptr!(self.list));
822 list_add_tail!(to_list_head_ptr!(self.list), to);
823 }
824 #[inline]
825 fn is_in_slab(&self, addr: *mut u8) -> bool {
826 let addr = addr as usize;
828 let cache = unsafe { &mut *self.cache };
829 let start_addr = self.start();
830 let end_addr = start_addr.add((1 << cache.per_frames as usize) * unsafe { frame_size() });
831 (start_addr <= addr) && (addr < end_addr)
832 }
833}
834
835fn alloc_frames_for_cache(num: u32) -> *mut u8 {
837 trace!("alloc {} frames for cache", num);
838 unsafe { alloc_frames(num as usize) }
839}
840
841fn get_array_cache()->Result<*mut u8,SlabError>{
842 let cache_head = unsafe { &mut MEM_CACHE_BOOT };
843 let next_cache = mut_ref_memcache!(cache_head.list.next);
844 assert_eq!(next_cache.cache_name,"array_cache");
845 next_cache.alloc()
846}
847
848pub fn mem_cache_init()->Result<(),SlabError> {
850 unsafe {
851 list_head_init!(SLAB_CACHES);
852 }
853 let cache = unsafe { &mut MEM_CACHE_BOOT };
854 let cache_layout = cache_layout!();
855 cache.init(
856 "kmem_cache",
857 cache_layout.size() as u32,
858 cache_layout.align() as u32,
859 );
860 unsafe {
862 for i in 0..CPUS {
863 cache.array_cache[i] = &mut ARRAY_CACHE_FOR_BOOT[i] as *mut ArrayCache;
864 }
865 cache.mem_cache_node.shared = &mut ARRAY_CACHE_NODE_BOOT as *mut ArrayCache;
866 }
867 list_add_tail!(
868 to_list_head_ptr!(cache.list),
869 to_list_head_ptr!(SLAB_CACHES)
870 );
871
872 let array_cache_layout = array_cache_layout!();
873 let array_cache = create(
875 "array_cache",
876 array_cache_layout.size() as u32,
877 array_cache_layout.align() as u32,
878 )?;
879 unsafe {
880 for i in 0..CPUS {
881 array_cache.array_cache[i] = &mut ARRAY_CACHE_FOR_ARRAY[i] as *mut ArrayCache;
882 }
883 array_cache.mem_cache_node.shared = &mut ARRAY_CACHE_NODE_ARRAY as *mut ArrayCache;
884 }
885 Ok(())
886}
887
888pub fn create_mem_cache(
890 name: &'static str,
891 object_size: u32,
892 align: u32,
893) -> Result<&mut MemCache, SlabError> {
894 let cache_head = unsafe { &mut SLAB_CACHES };
896 let find = cache_head.iter().find(|&cache_list| {
897 let cache = mut_ref_memcache!(cache_list);
898 cache.cache_name.eq(name)
900 });
901 if find.is_some() {
902 return Err(SlabError::NameDuplicate);
903 }
904 let cache_object = create(name, object_size, align)?;
905 cache_object.set_array_cache()?;
907 Ok(cache_object)
908}
909
910fn create(name: &'static str, object_size: u32, align: u32) -> Result<&mut MemCache, SlabError> {
911 let cache = unsafe { &mut MEM_CACHE_BOOT };
913 let cache_object_addr = cache.alloc()?;
914 let cache_object_addr = cache_object_addr as *mut MemCache;
915 let cache_object = unsafe { &mut (*cache_object_addr) };
916 cache_object.init(name, object_size, align).unwrap();
918 list_add_tail!(
920 to_list_head_ptr!(cache_object.list),
921 to_list_head_ptr!(SLAB_CACHES)
922
923 );
924 Ok(cache_object)
925}
926
927
928pub fn alloc_from_slab(layout:Layout) -> Result<*mut u8,SlabError> {
931 let cache_list = unsafe { &mut SLAB_CACHES };
936
937 let size = layout.size();
938 let index = size.next_power_of_two();
940 let index = index.trailing_zeros() as usize;
941 if index > 23 {
942 return Err(SlabError::SizeTooLarge);
943 }
944 let find = cache_list.iter().find(|&cache_list| {
945 let cache = mut_ref_memcache!(cache_list);
946 cache.object_size.trailing_zeros() as usize >= index && cache.cache_name !="array_cache" && cache.cache_name!="kmem_cache"
947 }).unwrap();
948 let cache = mut_ref_memcache!(find);
949 cache.alloc()
950}
951
952pub fn dealloc_to_slab(addr: *mut u8,layout:Layout) -> Result<(), SlabError> {
954 let cache_list = unsafe { &SLAB_CACHES };
955 let size = layout.size();
956 let index = size.next_power_of_two();
957 let index = index.trailing_zeros() as usize;
958 if index > 23 {
959 return Err(SlabError::NotInCache);
960 }
961 let find = cache_list.iter().find(|&cache_list| {
962 let cache = mut_ref_memcache!(cache_list);
963 cache.object_size.trailing_zeros() as usize == index && cache.cache_name !="array_cache" && cache.cache_name!="kmem_cache"
965 });
966 if find.is_none() {
967 return Err(SlabError::NotInCache);
968 }
969 let cache = mut_ref_memcache!(find.unwrap());
970 cache.dealloc(addr)
971}
972
973pub fn print_slab_system_info() {
975 let cache_list = unsafe { &SLAB_CACHES };
976 pprintln!("There are {} caches in system:", cache_list.len());
977 pprintln!("cache_name object_size align p_frames p_objects total_object used_object limit batch_count local_cpus shared");
978 cache_list.iter().for_each(|cache| {
979 let cache = ref_memcache!(cache);
980 pprintln!("----------------------------------------------------------------------------------------------------------");
981 cache.print_info();
982 });
983}
984
985
986
987#[cfg(test)]
988mod array_cache_test {
989 use crate::slab::{ArrayCache, PER_CPU_OBJECTS};
990 #[test]
991 fn test_push_pop() {
992 let mut cache = ArrayCache::new();
993 let mut inner = cache.inner.lock();
994 assert_eq!(inner.is_empty(), true);
995 assert_eq!(inner.batch_count as usize, PER_CPU_OBJECTS / 2);
996 assert_eq!(inner.limit as usize, PER_CPU_OBJECTS);
997 let mut data = [0; PER_CPU_OBJECTS];
998 let batch = inner.batch_count as usize;
999 inner.push(&data[0..batch]);
1000 inner.push(&data[0..batch]);
1001 assert_eq!(inner.is_empty(), false);
1002 assert_eq!(inner.avail as usize, PER_CPU_OBJECTS);
1003 inner.pop(&mut data[0..batch]);
1004 assert_eq!(inner.avail, PER_CPU_OBJECTS as u32 / 2);
1005 }
1006 #[test]
1007 #[should_panic]
1008 fn test_push_pop_panic1() {
1009 let mut cache = ArrayCache::new();
1010 let mut inner = cache.inner.lock();
1011 let mut data = [0; PER_CPU_OBJECTS];
1012 let batch = inner.batch_count as usize;
1013 inner.push(&data[0..]);
1015 }
1016 #[test]
1017 #[should_panic]
1018 fn test_push_pop_panic2() {
1019 let mut cache = ArrayCache::new();
1020 let mut inner = cache.inner.lock();
1021 let mut data = [0; PER_CPU_OBJECTS];
1022 let batch = inner.batch_count as usize;
1023 inner.push(&data[0..batch]);
1025 inner.pop(&mut data[0..batch]);
1026 }
1027 #[test]
1028 fn test_get_put() {
1029 let mut cache = ArrayCache::new();
1030 let mut inner = cache.inner.lock();
1031 let mut data = [10; PER_CPU_OBJECTS];
1032 let batch = inner.batch_count as usize;
1033 inner.push(&data[0..batch]);
1035 let t = inner.get();
1036 assert_eq!(10, t as usize);
1037 assert_eq!(inner.avail as usize, batch - 1);
1038 }
1039 #[test]
1040 #[should_panic]
1041 fn test_get_put_panic() {
1042 let mut cache = ArrayCache::new();
1043 let mut inner = cache.inner.lock();
1044 inner.get();
1045 }
1046 #[test]
1047 #[should_panic]
1048 fn test_get_put_panic1() {
1049 let mut cache = ArrayCache::new();
1050 let mut inner = cache.inner.lock();
1051 let mut data = [0; PER_CPU_OBJECTS];
1052 let batch = inner.batch_count as usize;
1053 inner.push(&data[0..batch]);
1055 inner.push(&data[0..batch]);
1056 inner.put(10 as *mut u8);
1057 }
1058}
1059
1060#[cfg(test)]
1061mod slab_test {
1062 use crate::slab::{mem_cache_init, CacheNode, Flags};
1063 use crate::MemCache;
1064 #[no_mangle]
1065 unsafe fn free_frames(addr: *mut u8, num: usize) {}
1066 #[no_mangle]
1067 fn current_cpu_id() -> usize {
1068 0
1069 }
1070 #[no_mangle]
1071 unsafe fn alloc_frames(num: usize) -> *mut u8 {
1072 core::ptr::null_mut()
1073 }
1074
1075 #[test]
1076 fn test_init_cache_small_obj() {
1077 let mut cache = MemCache::new();
1078 cache.init("test_cache", 128, 7);
1079 assert_eq!(cache.align, 8);
1080 assert_eq!(cache.cache_name, "test_cache");
1081 assert_eq!(cache.object_size, 128);
1082 assert_eq!(cache.flags, Flags::SLAB_ON);
1083 assert_eq!(cache.per_frames, 0);
1084 assert_eq!(cache.per_objects, 30);
1085 assert_eq!(cache.color, 5);
1086 cache.init("test_cache", 127, 7);
1087 assert_eq!(cache.object_size, 128);
1088 }
1089
1090 #[test]
1091 fn test_init_cache_big_obj() {
1092 let mut cache = MemCache::new();
1093 cache.init("test_cache", 512, 7);
1094 assert_eq!(cache.flags, Flags::SLAB_OFF);
1095 assert_eq!(cache.color, 0);
1096 assert_eq!(cache.per_frames, 0);
1097 assert_eq!(cache.per_objects, 8);
1098 }
1099
1100 #[test]
1101 fn test_cache_node() {
1102 let mut node = CacheNode::new();
1103 node.init();
1104 let x = node.total_slabs();
1105 assert_eq!(x, 0);
1106 assert_eq!(node.used_objects(10), 0);
1107 }
1108}