rslab/
slab.rs

1use super::alloc_frames;
2use crate::formation::*;
3use crate::{cls, frame_size, free_frames, MEM_CACHE_BOOT};
4use crate::{current_cpu_id, SLAB_CACHES};
5use alloc::alloc::dealloc;
6use core::alloc::Layout;
7use bitflags::bitflags;
8use core::cmp::{max, min};
9use core::fmt::{Debug, Formatter, Write};
10use core::mem::forget;
11use core::ops::Add;
12use core::sync::atomic::AtomicUsize;
13use doubly_linked_list::*;
14use preprint::pprintln;
15use spin::mutex::SpinMutex;
16use spin::rwlock::RwLock;
17use spin::Mutex;
18
19/// 高速缓存的limit
20const PER_CPU_OBJECTS: usize = 16;
21/// 支持8个核
22const CPUS: usize = 8;
23/// 空闲链表的上限,达到上线将触发回收页面
24const FREE_LIST_MAX:usize = 16;
25
26const VAL: ArrayCache = ArrayCache::new();
27static mut ARRAY_CACHE_FOR_BOOT: [ArrayCache; CPUS] = [VAL; CPUS];
28static mut ARRAY_CACHE_FOR_ARRAY: [ArrayCache; CPUS] = [VAL; CPUS];
29static mut ARRAY_CACHE_NODE_BOOT: ArrayCache = ArrayCache::new();
30static mut ARRAY_CACHE_NODE_ARRAY: ArrayCache = ArrayCache::new();
31
32
33
34bitflags! {
35    pub struct Flags:u8{
36        const SLAB_OFF = 0b0000_0000;
37        const SLAB_ON = 0b0000_0001;
38        const DESTROY = 0b0000_0010;
39    }
40}
41
42pub struct SlabInfo {
43    pub cache_name: &'static str,
44    pub object_size: u32,
45    pub align: u32,
46    pub per_frames: u32,
47    pub per_objects: u32,
48    pub total_objects: u32,
49    pub used_objects: u32,
50    pub limit: u32,
51    pub batch_count: u32,
52    pub local_objects: u32,
53    pub shared_objects: u32,
54}
55
56#[derive(Debug)]
57pub struct MemCache {
58    /// 本地高速缓存
59    array_cache: [*mut ArrayCache; CPUS as usize],
60    list: ListHead,
61    /// 每个slab的对象数量
62    per_objects: u32,
63    /// 每个slab的页帧数量 2^per_frames
64    per_frames: u32,
65    /// 对象对齐大小
66    align: u32,
67    /// 对象大小
68    object_size: u32,
69    /// 可着色数量
70    color: u32,
71    /// 着色偏移==缓存行大小
72    color_off: u32,
73    /// 下一个着色位置
74    color_next: u32,
75    /// Slab管理节点
76    mem_cache_node: CacheNode,
77    /// Cache名称
78    cache_name: &'static str,
79    /// 控制信息
80    flags: Flags,
81}
82unsafe impl Sync for MemCache {}
83unsafe impl Send for MemCache {}
84
85macro_rules! mut_ref_memcache {
86    ($addr:expr) => {
87        unsafe { &mut (*container_of!($addr as usize, MemCache, list)) }
88    };
89}
90macro_rules! ref_memcache {
91    ($addr:expr) => {
92        unsafe { &(*container_of!($addr as usize, MemCache, list)) }
93    };
94}
95
96macro_rules! mut_ref_slab {
97    ($addr:expr) => {
98        unsafe { &mut (*container_of!($addr as usize, Slab, list)) }
99    };
100}
101macro_rules! ref_slab {
102    ($addr:expr) => {
103        unsafe { &(*container_of!($addr as usize, Slab, list)) }
104    };
105}
106
107macro_rules! cache_layout {
108    ()=> {
109        Layout::from_size_align(core::mem::size_of::<MemCache>(), core::mem::align_of::<MemCache>()).unwrap()
110    };
111}
112macro_rules! slab_layout {
113    ()=> {
114        Layout::from_size_align(core::mem::size_of::<Slab>(), core::mem::align_of::<Slab>()).unwrap()
115    };
116}
117macro_rules! array_cache_layout {
118    ()=> {
119        Layout::from_size_align(core::mem::size_of::<ArrayCache>(), core::mem::align_of::<ArrayCache>()).unwrap()
120    };
121}
122
123
124
125
126impl MemCache {
127    pub const fn new() -> Self {
128        Self {
129            array_cache: [core::ptr::null_mut(); CPUS as usize],
130            list: ListHead::new(),
131            per_objects: 0,
132            per_frames: 0,
133            align: 0,
134            object_size: 0,
135            color: 0,
136            color_off: 0,
137            color_next: 0,
138            mem_cache_node: CacheNode::new(),
139            cache_name: "",
140            flags: Flags::empty(),
141        }
142    }
143    /// 打印信息
144    pub fn print_info(&self) {
145        let slab_info = self.get_cache_info();
146        pprintln!(
147            "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}",
148            self.cache_name,
149            self.object_size,
150            self.align,
151            self.per_frames,
152            self.per_objects,
153            slab_info.total_objects,
154            slab_info.used_objects,
155            PER_CPU_OBJECTS,
156            PER_CPU_OBJECTS / 2,
157            slab_info.local_objects,
158            slab_info.shared_objects
159        );
160    }
161    pub fn get_cache_info(&self) -> SlabInfo {
162        // 计算总的对象和已使用的对象
163        let per_objects = self.per_objects as usize;
164        let total = self.mem_cache_node.total_slabs() * per_objects;
165        let used = self.mem_cache_node.used_objects(per_objects);
166        // 计算本地高速缓存的对象数量
167        let mut local = 0;
168        for i in 0..CPUS {
169            local += unsafe { (*self.array_cache[i]).inner.lock().avail };
170        }
171        //计算共享高速缓存的对象数量
172        let shared = unsafe { (*self.mem_cache_node.shared).inner.lock().avail };
173        assert!(used as u32 >= local + shared);
174        SlabInfo {
175            cache_name: self.cache_name,
176            object_size: self.object_size,
177            align: self.align,
178            per_frames: self.per_frames,
179            per_objects: self.per_objects,
180            total_objects: total as u32,
181            used_objects: used as u32 - shared - local,
182            limit: PER_CPU_OBJECTS as u32,
183            batch_count: PER_CPU_OBJECTS as u32 / 2,
184            local_objects: local,
185            shared_objects: shared,
186        }
187    }
188
189    /// 需要根据对象大小和对齐方式计算出
190    /// 需要的页面数量,确保内部碎片 < 12.5%
191    /// 再计算每个slab中对象的数量
192    fn init_cache_object_num(&mut self) {
193        let mut order = 0;
194        let mut left_over = 0;
195        loop {
196            let total_size = frame_size() * (1 << order);
197            let object_num = if self.flags == Flags::SLAB_OFF {
198                // slab描述符和freelist数组在外部
199                total_size / self.object_size as usize
200            } else {
201                // slab描述符和freelist数组在内部
202                let mut object_num = (total_size - core::mem::size_of::<Slab>())
203                    / (self.object_size as usize + core::mem::size_of::<u32>());
204                // 计算对齐后slab描述符的大小
205                while let slab_align = slab_descriptor_align_size(object_num as u32, self.align) {
206                    if (slab_align + object_num as u32 * self.object_size) < total_size as u32 {
207                        break;
208                    }
209                    object_num -= 1;
210                } //找到正确的对象数量,一般是需要运行一次即可
211                object_num
212            };
213            //检查内部碎片的比例
214            left_over = total_size - object_num * self.object_size as usize;
215            if self.flags == Flags::SLAB_ON {
216                left_over -= slab_descriptor_align_size(object_num as u32, self.align) as usize;
217            }
218            if left_over * 8 < total_size {
219                self.per_objects = object_num as u32;
220                self.per_frames = order;
221                //初始化可着色的数量
222                self.color = (left_over / cls()) as u32;
223                break;
224            } // 找到页帧正确的数量
225            order += 1;
226        }
227        trace!(
228            "left_over is {}, total_size is {}",
229            left_over,
230            frame_size() * (1 << self.per_frames)
231        );
232    }
233
234    /// 在使用init初始化cache后需要使用此函数完成array_cache的初始化
235    /// 对于系统初始化阶段的两个初始cache不经过这里
236    fn set_array_cache(&mut self) -> Result<(), SlabError> {
237        //从array_cache中分配得到
238        for i in 0..CPUS {
239            let array_cache_addr = get_array_cache()?;
240            self.array_cache[i] = array_cache_addr as *mut ArrayCache;
241            unsafe { (*self.array_cache[i]).inner.lock().init() };
242        }
243        self.mem_cache_node.set_array_cache()?;
244        Ok(())
245    }
246
247    fn init(&mut self, name: &'static str, object_size: u32, align: u32) -> Result<(), SlabError> {
248        self.array_cache = [core::ptr::null_mut(); CPUS];
249        self.mem_cache_node.init();
250        self.cache_name = name;
251        self.color_off = cls() as u32; //cache行大小
252        self.align = if align.is_power_of_two() && align != 0 {
253            max(align, 8)
254        } else {
255            core::mem::size_of::<usize>() as u32
256        };
257        // 对象大小对齐到align
258        self.object_size = align_to!(object_size, self.align);
259        self.flags = if object_size * 8 >= frame_size() as u32 {
260            Flags::SLAB_OFF
261        } else {
262            Flags::SLAB_ON
263        };
264        // 分配的物理页帧起始位置由
265        // slab结构体 + free_list数组构成
266        // 第一个对象的地址需要对齐到align
267        self.init_cache_object_num();
268        Ok(())
269    }
270    pub fn alloc(&self) -> Result<*mut u8,SlabError> {
271        if self.flags.contains(Flags::DESTROY) {
272            panic!("cache had been destroyed");
273        }
274        /// 先从高速缓存分配
275        ///
276        /// todo!(多cpu访问一致性保证 ?)
277        ///
278        /// 如果一个cpu上的线程正在分配内存并且以及获取了cpu_id,此时其再被抢占放到另一个cpu上可能会发生错误?
279        let cpu_id = unsafe { current_cpu_id() };
280        let array_cache = unsafe { &mut *self.array_cache[cpu_id] };
281        let mut array_cache = array_cache.inner.lock();
282        if array_cache.is_empty() {
283            let mut new_objects = [0usize; PER_CPU_OBJECTS];
284            let mem_cache_ptr = self as *const MemCache as *mut MemCache;
285            self.mem_cache_node
286                .alloc(mem_cache_ptr, &mut new_objects[0..array_cache.batch_count as usize])?;
287            let batch_count = array_cache.batch_count as usize;
288            array_cache.push(&new_objects[0..batch_count]);
289        }
290        Ok(array_cache.get())
291    }
292
293    pub fn dealloc(&self, addr: *mut u8) -> Result<(), SlabError> {
294        if self.flags.contains(Flags::DESTROY) {
295            panic!("cache had been destroyed");
296        }
297        /// 判断此地址是否属于此cache
298        let cpu_id = unsafe { current_cpu_id() };
299        let array_cache = unsafe { &mut *self.array_cache[cpu_id] };
300        let mut array_cache = array_cache.inner.lock();
301        // self.mem_cache_node.is_in_cache(addr)?;
302        if array_cache.is_full() {
303            let mut objects = [0usize; PER_CPU_OBJECTS];
304            let batch_count = array_cache.batch_count as usize;
305            array_cache.pop(&mut objects[0..batch_count]);
306            self.mem_cache_node.dealloc(&objects[0..batch_count]);
307        }
308        array_cache.put(addr);
309        Ok(())
310    }
311    /// 调用destroy会将cache管理的所有slab回收掉。
312    /// 包括free/partial/full
313    /// 并且对于cache本身不再可用,
314    /// 由于cache本身的地址仍然会是有效的,使用者可能会再次使用已经destroy的
315    /// cache分配内存,以此需要设置标志防止其再使用
316    pub fn destroy(&mut self) {
317        // 先把高速缓存的内存回收
318        for i in 0..CPUS {
319            let array_cache = self.array_cache[i];
320            // 直接回收到array_cache中
321            let cache_head = unsafe { &mut MEM_CACHE_BOOT };
322            let next_cache = mut_ref_memcache!(cache_head.list.next);
323            next_cache.dealloc(array_cache as *mut u8);
324        }
325        self.mem_cache_node.destroy();
326        //回收掉自己
327        let addr = self as *const Self as *mut u8;
328        self.flags = Flags::DESTROY;
329        list_del!(to_list_head_ptr!(self.list));
330        let cache_head = unsafe { &mut MEM_CACHE_BOOT };
331        cache_head.dealloc(addr);
332    }
333}
334
335
336
337
338#[inline]
339fn slab_descriptor_align_size(object_num: u32, align: u32) -> u32 {
340    align_to!(
341        object_num * core::mem::size_of::<u32>() as u32 + core::mem::size_of::<Slab>() as u32,
342        align
343    )
344}
345
346/// array_cache define\
347/// target: for multicore\
348/// limit: 可以拥有的最大对象数量\
349/// batch_count: 每次从shared或者slab系统获取的对象\
350/// entries: object address\
351/// 为了缓存命中率更高,
352/// 取对象的时候从后往前取,放对象的时候从前往后放
353struct ArrayCache {
354    inner: Mutex<ArrayCacheInner>,
355}
356
357impl ArrayCache {
358    const fn new() -> Self {
359        Self {
360            inner: Mutex::new(ArrayCacheInner::new()),
361        }
362    }
363}
364
365struct ArrayCacheInner {
366    avail: u32,
367    limit: u32,
368    batch_count: u32,
369    entries: [usize; PER_CPU_OBJECTS as usize],
370}
371
372impl ArrayCacheInner {
373    const fn new() -> Self {
374        Self {
375            avail: 0,
376            limit: PER_CPU_OBJECTS as u32,
377            batch_count: PER_CPU_OBJECTS as u32 / 2,
378            entries: [0; PER_CPU_OBJECTS],
379        }
380    }
381    #[inline]
382    fn init(&mut self) {
383        *self = Self::new();
384    }
385
386    /// 需要保证
387    fn push(&mut self, addrs: &[usize]) {
388        //从下一层获取的batch_count个对象
389        //放到array_cache中
390        assert!(addrs.len() <= self.batch_count as usize);
391        assert!(addrs.len() + self.avail as usize <= self.limit as usize);
392        self.entries[self.avail as usize..(self.avail + self.batch_count) as usize]
393            .copy_from_slice(addrs);
394
395        self.avail += self.batch_count;
396    }
397    fn pop_back(&mut self, addrs: &mut [usize]) {
398        assert_eq!(addrs.len(), self.batch_count as usize);
399        assert!(self.avail >= self.batch_count);
400        //从后往前取
401        let begin = self.avail - self.batch_count;
402        addrs.copy_from_slice(&self.entries[begin as usize..(begin + self.batch_count) as usize]);
403        self.avail -= self.batch_count;
404    }
405    fn pop(&mut self, addrs: &mut [usize]) {
406        //从本层往下一层回收的batch_count个对象
407        //从前往后取
408        assert_eq!(addrs.len(), self.batch_count as usize);
409        assert_eq!(self.avail, self.limit);
410        addrs.copy_from_slice(&self.entries[0..self.batch_count as usize]);
411        //从前往后取,所以后面的对象往前移动
412        for i in self.batch_count as usize..self.avail as usize {
413            self.entries[i - self.batch_count as usize] = self.entries[i];
414        }
415
416        self.avail -= self.batch_count;
417    }
418
419    /// 需要调用者保证存在可用的对象
420    #[inline]
421    fn get(&mut self) -> *mut u8 {
422        //从本层获取一个对象
423        assert!(self.avail > 0);
424        let t = self.entries[self.avail as usize - 1] as *mut u8;
425        self.avail -= 1;
426        t
427    }
428    #[inline]
429    fn put(&mut self, addr: *mut u8) {
430        //往本层放一个对象
431        assert!(self.avail < self.limit);
432        self.entries[self.avail as usize] = addr as usize;
433        self.avail += 1;
434    }
435    #[inline]
436    fn is_empty(&self) -> bool {
437        self.avail == 0
438    }
439    #[inline]
440    fn is_full(&self) -> bool {
441        self.avail == self.limit
442    }
443}
444
445/// Cache Node define\
446/// slab_partial: 部分分配链表\
447/// slab_free: 空Slab/未分配\
448/// slab_full: 完全分配\
449#[derive(Debug)]
450pub struct CacheNode {
451    shared: *mut ArrayCache,
452    slab_partial: ListHead,
453    slab_free: ListHead,
454    slab_full: ListHead,
455    free_list_len: RwLock<u32>,
456}
457
458impl CacheNode {
459    const fn new() -> Self {
460        CacheNode {
461            shared: core::ptr::null_mut(),
462            slab_partial: ListHead::new(),
463            slab_free: ListHead::new(),
464            slab_full: ListHead::new(),
465            free_list_len: RwLock::new(0),
466        }
467    }
468}
469
470impl CacheNode {
471    fn init(&mut self) {
472        self.shared = core::ptr::null_mut();
473        list_head_init!(self.slab_partial);
474        list_head_init!(self.slab_free);
475        list_head_init!(self.slab_full);
476    }
477
478    fn set_array_cache(&mut self) -> Result<(), SlabError> {
479        //从array_cache中分配得到
480        let array_cache_addr = get_array_cache()?;
481        self.shared = array_cache_addr as *mut ArrayCache;
482        unsafe { (*self.shared).inner.lock().init() };
483        Ok(())
484    }
485
486    fn alloc_inner(&self, cache: *mut MemCache) -> Result<&mut Slab,SlabError> {
487        let cache = unsafe{&mut *cache};
488        // 先检查partial链表
489        let mut slab_list = to_list_head_ptr!(self.slab_partial);
490        let slab = if !is_list_empty!(slab_list) {
491            // 非空则从slab中分配
492            slab_list = self.slab_partial.next; //第一个可用slab
493            let slab = mut_ref_slab!(slab_list);
494            slab
495        } else if is_list_empty!(to_list_head_ptr!(self.slab_free)) {
496            // 如果partial链表为空,则检查free链表
497            // 如果free链表也为空,则需要分配新的slab
498            trace!("alloc new rslab");
499            // 在碰到大对象时,尽量多分配一些slab
500            Slab::new(cache)?; // 创建新的slab,并加入到cache的free链表中
501            assert!(!is_list_empty!(to_list_head_ptr!(self.slab_free)));
502            // 第一个可用slab
503            let slab = mut_ref_slab!( self.slab_free.next);
504            slab.move_to(to_list_head_ptr!(self.slab_partial));
505            slab
506        } else {
507            // 如果free链表不为空,则将free链表中的slab移动到partial链表中
508            slab_list = self.slab_free.next;
509            let slab = mut_ref_slab!(slab_list);
510            // 将slab移动到partial部分
511            slab.move_to(to_list_head_ptr!(self.slab_partial));
512            // 空闲链表数量减少
513            *self.free_list_len.write() -= 1;
514            slab
515        };
516        Ok(slab)
517    }
518
519    fn alloc(&self, cache: *mut MemCache, addrs: &mut [usize])->Result<(),SlabError> {
520        // 检查共享的本地高速缓存是否有足够的对象
521        let shared_array = unsafe { &mut *self.shared };
522        let mut shared_array = shared_array.inner.lock();
523        if shared_array.avail >= addrs.len() as u32 {
524            // 从共享的本地高速缓存中获取对象
525            shared_array.pop_back(addrs);
526        } else {
527            // 按批次从slab中分配过来
528            // 直接返回给上一层的请求
529            let mut i = 0;
530            let mcache = unsafe{&*cache};
531            while i < shared_array.batch_count as usize{
532                let mut slab = self.alloc_inner(cache)?;
533                while slab.used_object != mcache.per_objects{
534                    let addr = slab.alloc();
535                    addrs[i] = addr as usize;
536                    i += 1;
537                    if i == shared_array.batch_count as usize{
538                        break;
539                    }
540                }
541                if slab.used_object == mcache.per_objects{
542                    slab.move_to(to_list_head_ptr!(self.slab_full));
543                }
544            }
545        }
546        Ok(())
547    }
548
549    fn is_in_cache(&self, addr: *mut u8) -> Result<&mut Slab,SlabError> {
550        // 查找此对象所在的slab
551        // 这个地址可能位于partial / full
552        let slab_list = self.slab_partial.iter().find(|&slab_list| {
553            let slab = mut_ref_slab!(slab_list);
554            slab.is_in_slab(addr)
555        });
556        if slab_list.is_some() {
557            return Ok(mut_ref_slab!(slab_list.unwrap()) );
558        }
559        let slab_list = self.slab_full.iter().find(|&slab_list| {
560            let slab = mut_ref_slab!(slab_list) ;
561            slab.is_in_slab(addr)
562        });
563        if slab_list.is_some() {
564            return Ok(mut_ref_slab!(slab_list.unwrap()));
565        }
566        Err(SlabError::NotInCache)
567    }
568    fn dealloc_inner(&self, addr: *mut u8) {
569        // 查找此对象所在的slab
570        // 这个地址可能位于partial / full
571        let slab = self.is_in_cache(addr).unwrap();
572        slab.dealloc(addr);
573        if slab.used_object == 0 {
574            // 如果slab中的对象已经全部释放,则将slab移动到free链表中
575            slab.move_to(to_list_head_ptr!(self.slab_free));
576            *self.free_list_len.write() +=1;
577            // 检查是否需要释放slab回收页帧
578            self.check_and_reclaim();
579        } else {
580            slab.move_to(to_list_head_ptr!(self.slab_partial));
581        }
582    }
583    /// 检查空闲的slab是否超过了最大值
584    /// 如果超过了,则释放多余的slab
585    fn check_and_reclaim(&self){
586        let mut free_len = self.free_list_len.write();
587        if *free_len > FREE_LIST_MAX as u32 {
588            // 如果超过了最大值,则释放一部分
589            self.slab_free.iter().take(*free_len as usize - FREE_LIST_MAX).for_each(|slab_list|{
590                let slab = mut_ref_slab!(slab_list);
591                slab.reclaim();
592                list_del!(slab_list);
593            });
594            *free_len = FREE_LIST_MAX as u32;
595        }
596    }
597    fn dealloc(&self, addrs: &[usize]) {
598        let shared_array = unsafe { &mut *self.shared };
599        let mut shared_array = shared_array.inner.lock();
600        if shared_array.is_full() {
601            // 如果共享的本地高速缓存已经满了,
602            // 将缓存中旧的对象释放
603            let mut temp = [0usize; PER_CPU_OBJECTS];
604            let batch_count = shared_array.batch_count as usize;
605            shared_array.pop(&mut temp[0..batch_count]);
606            for i in 0..shared_array.batch_count as usize {
607                self.dealloc_inner(temp[i] as *mut u8);
608            }
609        }
610        // 如果共享的本地高速缓存没有满,则将对象放入共享的本地高速缓存中
611        shared_array.push(addrs);
612    }
613
614    fn total_slabs(&self) -> usize {
615        self.slab_partial.len() + self.slab_full.len() + self.slab_free.len()
616    }
617    fn used_objects(&self, per_objects: usize) -> usize {
618        self.slab_partial
619            .iter()
620            .map(|slab_list|  {
621                mut_ref_slab!(slab_list).used_object as usize
622            })
623            .sum::<usize>()
624            + self.slab_full.len() * per_objects
625    }
626    fn destroy(&self) {
627        // 回收本地共享高速缓存
628        let shared = self.shared;
629        // 直接释放
630        let cache_head = unsafe { &mut MEM_CACHE_BOOT };
631        let next_cache = mut_ref_memcache!(cache_head.list.next);
632        next_cache.dealloc(shared as *mut u8);
633
634        self.slab_partial.iter().for_each(|slab_list| {
635            let slab = mut_ref_slab!(slab_list);
636            slab.reclaim();
637            // 从slab_partial链表中移除
638            list_del!(slab_list);
639        });
640        self.slab_full.iter().for_each(|slab_list| {
641            let slab = mut_ref_slab!(slab_list);
642            slab.reclaim();
643            // 从slab_full链表中移除
644            list_del!(slab_list);
645        });
646        self.slab_free.iter().for_each(|slab_list| {
647            let slab = mut_ref_slab!(slab_list);
648            slab.reclaim();
649            // 从slab_free链表中移除
650            list_del!(slab_list);
651        });
652    }
653}
654
655/// Slab define\
656/// cache: 指向所属的Cache\
657/// used_object:已分配的对象数量\
658/// next_free: 下一个空闲的对象\
659/// first_object: 第一个对象的地址\
660/// free_list: 数组索引用来记录空闲的对象\
661pub struct Slab {
662    list: ListHead,
663    cache: *mut MemCache,
664    used_object: u32,
665    next_free: u32,
666    color_off: u32,
667    fist_object: usize,
668    free_list: *mut u32,
669}
670
671impl Debug for Slab {
672    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
673        f.write_fmt(format_args!(
674            "Slab{{\n\
675            \tlist:{:?},\n\
676            \tcache:{:?},\n\
677            \tused_object:{},\n\
678            \tnext_free:{},\n\
679            \tfist_object:{:#x},\n\
680            \tfree_list:{:?}\
681            }}",
682            self.list,
683            self.cache,
684            self.used_object,
685            self.next_free,
686            self.fist_object,
687            self.free_list
688        ))
689    }
690}
691
692impl Slab {
693    fn new(cache: &MemCache)->Result<(),SlabError> {
694        // 创建一个slab
695        // 从cache获取需要申请的页面和对象大小
696        // 申请页面
697        // 初始化slab
698        // 将slab添加到cache的slab_partial链表中
699        let per_frames = cache.per_frames;
700        let start_addr = alloc_frames_for_cache(1 << per_frames);
701        if start_addr.is_null(){
702            return Err(SlabError::CantAllocFrame);
703        }
704        let start_addr = start_addr as usize;
705        let mut slab_desc_align_size = 0; //确定slab描述符对齐后大小
706        if cache.flags == Flags::SLAB_ON {
707            slab_desc_align_size = slab_descriptor_align_size(cache.per_objects, cache.align);
708        }
709        let mut first_object_addr = start_addr.add(slab_desc_align_size as usize);
710        //需要根据cache的着色偏移来调整
711        first_object_addr += cache.color_off as usize * cache.color_next as usize;
712
713        let (slab_ptr, free_list_addr) = if cache.flags == Flags::SLAB_ON {
714            (start_addr, start_addr.add(core::mem::size_of::<Slab>()))
715        } else {
716            //从外面分配对象来保存slab描述符以及free_list
717            let layout = Layout::from_size_align(cache.per_objects as usize * core::mem::size_of::<u32>(), 4).unwrap();
718            let free_list_ptr = alloc_from_slab(
719                layout
720            )?;
721            let slab_ptr =
722                alloc_from_slab(slab_layout!())?;
723            (slab_ptr as usize, free_list_ptr as usize)
724        };
725        let slab = Slab {
726            list: ListHead::new(),
727            cache: cache as *const MemCache as *mut MemCache,
728            used_object: 0,
729            next_free: 0,
730            color_off: cache.color_next,
731            fist_object: first_object_addr as usize,
732            free_list: free_list_addr as *mut u32,
733        };
734        // 写入slab信息到开始位置
735        unsafe {
736            core::ptr::write(slab_ptr as *mut Slab, slab);
737            // 初始化free_list
738            for i in 0..cache.per_objects {
739                core::ptr::write(
740                    free_list_addr.add(i as usize * core::mem::size_of::<u32>()) as *mut u32,
741                    i,
742                );
743            }
744        }
745        let slab = unsafe { &mut *(slab_ptr as *mut Slab) };
746        list_head_init!(slab.list);
747        trace!("{:?}", slab);
748        // 加入到cache的slab_free链表中
749        list_add_tail!(
750            to_list_head_ptr!(slab.list),
751            to_list_head_ptr!(cache.mem_cache_node.slab_free)
752        );
753        let mut cache = slab.cache;
754        unsafe {
755            if (*cache).color_next == (*cache).color {
756                (*cache).color_next = 0; //从0开始新的循环
757            } //更新 cache的着色偏移
758        }
759        Ok(())
760    }
761
762    fn alloc(&mut self) -> *mut u8 {
763        let cache = unsafe { &mut *self.cache };
764        let per_objects = cache.per_objects;
765        if self.next_free < per_objects {
766            let pos = unsafe { self.free_list.add(self.next_free as usize).read() };
767            let addr = self
768                .fist_object
769                .add(pos as usize * cache.object_size as usize);
770            self.next_free += 1;
771            self.used_object += 1;
772            return addr as *mut u8;
773        }
774        core::ptr::null_mut()
775    }
776    fn dealloc(&mut self, addr: *mut u8) {
777        let cache = unsafe { &mut *self.cache };
778        let pos = (addr as usize - self.fist_object) / cache.object_size as usize;
779        self.next_free -= 1;
780        unsafe {
781            self.free_list
782                .add(self.next_free as usize)
783                .write_volatile(pos as u32);
784        }
785        self.used_object -= 1;
786        trace!(
787            "rslab dealloc {:?}, object_size is {}, used: {}",
788            addr,
789            cache.object_size,
790            self.used_object
791        );
792    }
793    fn reclaim(&self) {
794        // 回收自己的页面
795        // 如果是SLAB_ON,则正常释放内存即可
796        // 如果是SLAB_OFF,则需要释放slab描述符和free_list
797        let cache = unsafe { &mut *self.cache };
798        let per_frames = cache.per_frames;
799        if cache.flags == Flags::SLAB_OFF {
800            //释放slab描述符和free_list
801            dealloc_to_slab(self as *const Slab as *mut u8,slab_layout!());
802            let layout = Layout::from_size_align(cache.per_objects as usize * core::mem::size_of::<u32>(), 4).unwrap();
803            dealloc_to_slab(self.free_list as *mut u8,layout);
804        }
805        unsafe {
806            free_frames(self.start() as *const Slab as *mut u8, 1 << per_frames);
807        }
808    }
809    fn start(&self) -> usize {
810        // 返回slab页面起始地址
811        let cache = unsafe { &mut *self.cache };
812        if cache.flags == Flags::SLAB_ON {
813            self as *const Slab as usize
814        } else {
815            self.fist_object
816        }
817    }
818
819    #[inline]
820    fn move_to(&mut self, to: *mut ListHead) {
821        list_del!(to_list_head_ptr!(self.list));
822        list_add_tail!(to_list_head_ptr!(self.list), to);
823    }
824    #[inline]
825    fn is_in_slab(&self, addr: *mut u8) -> bool {
826        //检查此地址是否位于slab中
827        let addr = addr as usize;
828        let cache = unsafe { &mut *self.cache };
829        let start_addr = self.start();
830        let end_addr = start_addr.add((1 << cache.per_frames as usize) * unsafe { frame_size() });
831        (start_addr <= addr) && (addr < end_addr)
832    }
833}
834
835/// 请求num个frame
836fn alloc_frames_for_cache(num: u32) -> *mut u8 {
837    trace!("alloc {} frames for cache", num);
838    unsafe { alloc_frames(num as usize) }
839}
840
841fn get_array_cache()->Result<*mut u8,SlabError>{
842    let cache_head = unsafe { &mut MEM_CACHE_BOOT };
843    let next_cache = mut_ref_memcache!(cache_head.list.next);
844    assert_eq!(next_cache.cache_name,"array_cache");
845    next_cache.alloc()
846}
847
848/// 初始化第一个cache
849pub fn mem_cache_init()->Result<(),SlabError> {
850    unsafe {
851        list_head_init!(SLAB_CACHES);
852    }
853    let cache = unsafe { &mut MEM_CACHE_BOOT };
854    let cache_layout = cache_layout!();
855    cache.init(
856        "kmem_cache",
857        cache_layout.size() as u32,
858        cache_layout.align() as u32,
859    );
860    /// 初始化本地高速缓存信息
861    unsafe {
862        for i in 0..CPUS {
863            cache.array_cache[i] = &mut ARRAY_CACHE_FOR_BOOT[i] as *mut ArrayCache;
864        }
865        cache.mem_cache_node.shared = &mut ARRAY_CACHE_NODE_BOOT as *mut ArrayCache;
866    }
867    list_add_tail!(
868        to_list_head_ptr!(cache.list),
869        to_list_head_ptr!(SLAB_CACHES)
870    );
871
872    let array_cache_layout = array_cache_layout!();
873    /// 初始化array_cache,用于后面分配本地高速缓存对象
874    let array_cache = create(
875        "array_cache",
876        array_cache_layout.size() as u32,
877        array_cache_layout.align() as u32,
878    )?;
879    unsafe {
880        for i in 0..CPUS {
881            array_cache.array_cache[i] = &mut ARRAY_CACHE_FOR_ARRAY[i] as *mut ArrayCache;
882        }
883        array_cache.mem_cache_node.shared = &mut ARRAY_CACHE_NODE_ARRAY as *mut ArrayCache;
884    }
885    Ok(())
886}
887
888/// 创建自定义的cache
889pub fn create_mem_cache(
890    name: &'static str,
891    object_size: u32,
892    align: u32,
893) -> Result<&mut MemCache, SlabError> {
894    // 创建一个自定义cache
895    let cache_head = unsafe { &mut SLAB_CACHES };
896    let find = cache_head.iter().find(|&cache_list| {
897        let cache = mut_ref_memcache!(cache_list);
898        // //查找是否存在同名的cache
899        cache.cache_name.eq(name)
900    });
901    if find.is_some() {
902        return Err(SlabError::NameDuplicate);
903    }
904    let cache_object = create(name, object_size, align)?;
905    // 初始化高速缓存
906    cache_object.set_array_cache()?;
907    Ok(cache_object)
908}
909
910fn create(name: &'static str, object_size: u32, align: u32) -> Result<&mut MemCache, SlabError> {
911    /// 从第一个初始化的cache中分配一个cached对象
912    let cache = unsafe { &mut MEM_CACHE_BOOT };
913    let cache_object_addr = cache.alloc()?;
914    let cache_object_addr =  cache_object_addr as *mut MemCache;
915    let cache_object = unsafe { &mut (*cache_object_addr) };
916    /// 初始化cache
917    cache_object.init(name, object_size, align).unwrap();
918    /// 将cache加入到SLAB_CACHES链表中
919    list_add_tail!(
920        to_list_head_ptr!(cache_object.list),
921        to_list_head_ptr!(SLAB_CACHES)
922
923    );
924    Ok(cache_object)
925}
926
927
928/// 分配一个指定大小和对齐方式的内存
929/// 这里暂时忽略了对齐带来的影响
930pub fn alloc_from_slab(layout:Layout) -> Result<*mut u8,SlabError> {
931    // 遍历所有的cache,找到第一个能够分配的cache
932    // 跳过第一个cache,因为第一个cache是用来分配cache的
933    // 跳过第二个cache,因为第二个slab是用来分配array_cache的
934    // 不在用户创建的cache上分配
935    let cache_list = unsafe { &mut SLAB_CACHES };
936
937    let size = layout.size();
938    // 8B-8MB == 3-23
939    let index = size.next_power_of_two();
940    let index = index.trailing_zeros() as usize;
941    if index > 23 {
942        return Err(SlabError::SizeTooLarge);
943    }
944    let find = cache_list.iter().find(|&cache_list| {
945        let cache = mut_ref_memcache!(cache_list);
946        cache.object_size.trailing_zeros() as usize >= index && cache.cache_name !="array_cache" && cache.cache_name!="kmem_cache"
947    }).unwrap();
948    let cache = mut_ref_memcache!(find);
949    cache.alloc()
950}
951
952/// 将分配的对象还给slab系统
953pub fn dealloc_to_slab(addr: *mut u8,layout:Layout) -> Result<(), SlabError> {
954    let cache_list = unsafe { &SLAB_CACHES };
955    let size = layout.size();
956    let index = size.next_power_of_two();
957    let index = index.trailing_zeros() as usize;
958    if index > 23 {
959        return Err(SlabError::NotInCache);
960    }
961    let find = cache_list.iter().find(|&cache_list| {
962        let cache = mut_ref_memcache!(cache_list);
963        // 查找是否存在同名的cache
964        cache.object_size.trailing_zeros() as usize == index && cache.cache_name !="array_cache" && cache.cache_name!="kmem_cache"
965    });
966    if find.is_none() {
967        return Err(SlabError::NotInCache);
968    }
969    let cache = mut_ref_memcache!(find.unwrap());
970    cache.dealloc(addr)
971}
972
973/// 打印系统内的所有cache 信息
974pub fn print_slab_system_info() {
975    let cache_list = unsafe { &SLAB_CACHES };
976    pprintln!("There are {} caches in system:", cache_list.len());
977    pprintln!("cache_name object_size align p_frames p_objects  total_object used_object limit batch_count local_cpus shared");
978    cache_list.iter().for_each(|cache| {
979        let cache = ref_memcache!(cache);
980        pprintln!("----------------------------------------------------------------------------------------------------------");
981        cache.print_info();
982    });
983}
984
985
986
987#[cfg(test)]
988mod array_cache_test {
989    use crate::slab::{ArrayCache, PER_CPU_OBJECTS};
990    #[test]
991    fn test_push_pop() {
992        let mut cache = ArrayCache::new();
993        let mut inner = cache.inner.lock();
994        assert_eq!(inner.is_empty(), true);
995        assert_eq!(inner.batch_count as usize, PER_CPU_OBJECTS / 2);
996        assert_eq!(inner.limit as usize, PER_CPU_OBJECTS);
997        let mut data = [0; PER_CPU_OBJECTS];
998        let batch = inner.batch_count as usize;
999        inner.push(&data[0..batch]);
1000        inner.push(&data[0..batch]);
1001        assert_eq!(inner.is_empty(), false);
1002        assert_eq!(inner.avail as usize, PER_CPU_OBJECTS);
1003        inner.pop(&mut data[0..batch]);
1004        assert_eq!(inner.avail, PER_CPU_OBJECTS as u32 / 2);
1005    }
1006    #[test]
1007    #[should_panic]
1008    fn test_push_pop_panic1() {
1009        let mut cache = ArrayCache::new();
1010        let mut inner = cache.inner.lock();
1011        let mut data = [0; PER_CPU_OBJECTS];
1012        let batch = inner.batch_count as usize;
1013        // 需要保证按批次送入
1014        inner.push(&data[0..]);
1015    }
1016    #[test]
1017    #[should_panic]
1018    fn test_push_pop_panic2() {
1019        let mut cache = ArrayCache::new();
1020        let mut inner = cache.inner.lock();
1021        let mut data = [0; PER_CPU_OBJECTS];
1022        let batch = inner.batch_count as usize;
1023        // 只有在队列满的情况下才会回收对象
1024        inner.push(&data[0..batch]);
1025        inner.pop(&mut data[0..batch]);
1026    }
1027    #[test]
1028    fn test_get_put() {
1029        let mut cache = ArrayCache::new();
1030        let mut inner = cache.inner.lock();
1031        let mut data = [10; PER_CPU_OBJECTS];
1032        let batch = inner.batch_count as usize;
1033        // 只有在队列满的情况下才会回收对象
1034        inner.push(&data[0..batch]);
1035        let t = inner.get();
1036        assert_eq!(10, t as usize);
1037        assert_eq!(inner.avail as usize, batch - 1);
1038    }
1039    #[test]
1040    #[should_panic]
1041    fn test_get_put_panic() {
1042        let mut cache = ArrayCache::new();
1043        let mut inner = cache.inner.lock();
1044        inner.get();
1045    }
1046    #[test]
1047    #[should_panic]
1048    fn test_get_put_panic1() {
1049        let mut cache = ArrayCache::new();
1050        let mut inner = cache.inner.lock();
1051        let mut data = [0; PER_CPU_OBJECTS];
1052        let batch = inner.batch_count as usize;
1053        // 只有在队列满的情况下才会回收对象
1054        inner.push(&data[0..batch]);
1055        inner.push(&data[0..batch]);
1056        inner.put(10 as *mut u8);
1057    }
1058}
1059
1060#[cfg(test)]
1061mod slab_test {
1062    use crate::slab::{mem_cache_init, CacheNode, Flags};
1063    use crate::MemCache;
1064    #[no_mangle]
1065    unsafe fn free_frames(addr: *mut u8, num: usize) {}
1066    #[no_mangle]
1067    fn current_cpu_id() -> usize {
1068        0
1069    }
1070    #[no_mangle]
1071    unsafe fn alloc_frames(num: usize) -> *mut u8 {
1072        core::ptr::null_mut()
1073    }
1074
1075    #[test]
1076    fn test_init_cache_small_obj() {
1077        let mut cache = MemCache::new();
1078        cache.init("test_cache", 128, 7);
1079        assert_eq!(cache.align, 8);
1080        assert_eq!(cache.cache_name, "test_cache");
1081        assert_eq!(cache.object_size, 128);
1082        assert_eq!(cache.flags, Flags::SLAB_ON);
1083        assert_eq!(cache.per_frames, 0);
1084        assert_eq!(cache.per_objects, 30);
1085        assert_eq!(cache.color, 5);
1086        cache.init("test_cache", 127, 7);
1087        assert_eq!(cache.object_size, 128);
1088    }
1089
1090    #[test]
1091    fn test_init_cache_big_obj() {
1092        let mut cache = MemCache::new();
1093        cache.init("test_cache", 512, 7);
1094        assert_eq!(cache.flags, Flags::SLAB_OFF);
1095        assert_eq!(cache.color, 0);
1096        assert_eq!(cache.per_frames, 0);
1097        assert_eq!(cache.per_objects, 8);
1098    }
1099
1100    #[test]
1101    fn test_cache_node() {
1102        let mut node = CacheNode::new();
1103        node.init();
1104        let x = node.total_slabs();
1105        assert_eq!(x, 0);
1106        assert_eq!(node.used_objects(10), 0);
1107    }
1108}