use semx_unsafe_list::{UnsafeListHead, UnsafeListNode, init_unsafe_list_head};
use super::{
GfpFlags, Page, alloc_page, alloc_pages, error::Result, free_page, free_pages, page::PageType,
};
use crate::{
space::{
addr::Vaddr,
mm::pgtabledef::{PAGE_MASK, PAGE_SHIFT, PAGE_SIZE},
},
sync::spinlock::Spinlock,
};
#[derive(Clone, Copy)]
#[repr(C)]
pub(crate) struct KmemPage {
list: UnsafeListNode<Page>,
index: u16,
inuse: u16,
}
#[repr(align(64))]
pub(crate) struct KmemCache {
lists: UnsafeListHead<Page>,
freelist: usize,
objsize: u16,
objects: u16,
name: &'static str,
}
unsafe impl Send for KmemCache {}
unsafe impl Sync for KmemCache {}
static KMEM_CACHE: [Spinlock<KmemCache>; 8] = [
Spinlock::new(KmemCache {
lists: UnsafeListHead::new(),
freelist: 0,
objsize: 64,
objects: PAGE_SIZE as u16 / 64,
name: "slab-64",
}),
Spinlock::new(KmemCache {
lists: UnsafeListHead::new(),
freelist: 0,
objsize: 96,
objects: PAGE_SIZE as u16 / 96,
name: "slab-96",
}),
Spinlock::new(KmemCache {
lists: UnsafeListHead::new(),
freelist: 0,
objsize: 128,
objects: PAGE_SIZE as u16 / 128,
name: "slab-128",
}),
Spinlock::new(KmemCache {
lists: UnsafeListHead::new(),
freelist: 0,
objsize: 192,
objects: PAGE_SIZE as u16 / 192,
name: "slab-192",
}),
Spinlock::new(KmemCache {
lists: UnsafeListHead::new(),
freelist: 0,
objsize: 256,
objects: PAGE_SIZE as u16 / 256,
name: "slab-256",
}),
Spinlock::new(KmemCache {
lists: UnsafeListHead::new(),
freelist: 0,
objsize: 512,
objects: PAGE_SIZE as u16 / 512,
name: "slab-512",
}),
Spinlock::new(KmemCache {
lists: UnsafeListHead::new(),
freelist: 0,
objsize: 1024,
objects: PAGE_SIZE as u16 / 1024,
name: "slab-1024",
}),
Spinlock::new(KmemCache {
lists: UnsafeListHead::new(),
freelist: 0,
objsize: 2048,
objects: PAGE_SIZE as u16 / 2048,
name: "slab-2048",
}),
];
#[inline(always)]
fn slab_index(size: usize) -> isize {
if size > 64 && size <= 96 {
return 1;
}
if size > 128 && size <= 192 {
return 3;
}
if size <= 64 {
return 0;
}
if size <= 128 {
return 2;
}
if size <= 256 {
return 4;
}
if size <= 512 {
return 5;
}
if size <= 1024 {
return 6;
}
if size <= 2048 {
return 7;
}
-1
}
#[allow(unused)]
#[inline(always)]
fn get_order(size: usize) -> usize {
(size - 1).ilog2() as usize + 1 - PAGE_SHIFT
}
#[allow(clippy::cast_sign_loss, unused)]
#[inline(always)]
pub(crate) fn kmalloc(size: usize, flags: GfpFlags) -> Result<Vaddr> {
let index = slab_index(size);
if index == -1 {
return alloc_pages(flags, get_order(size));
}
let mut cache = KMEM_CACHE[index as usize].lock_irq_save();
cache.slab_alloc()
}
fn free_pages_norder(vaddr: Vaddr) {
let page = vaddr.to_page();
let order = if page.is_PageComp() { page.compound_or_head_order() } else { 0 };
free_pages(vaddr, order);
}
#[allow(unused)]
#[inline(always)]
pub(crate) fn kfree(vaddr: Vaddr) {
let page = vaddr.to_page_mut();
if !page.is_PageSlab() {
free_pages_norder(vaddr);
return;
}
let page_raw = page as *mut Page;
match &mut page.ty {
PageType::Slab(kp) => {
let mut cache = KMEM_CACHE[kp.index as usize].lock_irq_save();
cache.slab_free(page_raw, vaddr, kp);
},
_ => {
unreachable!()
},
}
}
#[allow(unused)]
impl KmemCache {
fn slab_alloc(&mut self) -> Result<Vaddr> {
let object;
if self.freelist != 0 {
object = Vaddr::from(self.freelist);
let p = object.to_page_mut();
match &mut p.ty {
PageType::Slab(kp) => {
kp.inuse += 1;
},
_ => {
unreachable!()
},
}
self.freelist = unsafe { (self.freelist as *const usize).read() };
} else {
object = self.new_slab_alloc()?;
}
Ok(object)
}
fn slab_free(&mut self, page: *mut Page, vaddr: Vaddr, kp: &mut KmemPage) {
let p = unsafe { &mut *page };
kp.inuse -= 1;
if kp.inuse == 0 {
p.clear_PageSlab();
unsafe { kp.list.list_del() };
p.ty = PageType::None;
let v = p.to_virt().unwrap();
if v.to_value() == self.freelist & PAGE_MASK {
self.freelist = 0;
}
free_page(v);
return;
}
unsafe {
(vaddr.to_value() as *mut usize).write(self.freelist);
}
self.freelist = vaddr.to_value();
}
fn set_freepointer(object: usize, fp: usize) {
unsafe {
(object as *mut usize).write(fp);
}
}
#[allow(clippy::cast_sign_loss)]
fn new_slab_alloc(&mut self) -> Result<Vaddr> {
let vaddr = alloc_page(GfpFlags::Clean)?;
let page = vaddr.to_page_mut();
page.set_PageSlab();
page.ty = PageType::Slab(KmemPage {
list: UnsafeListNode::new(),
index: slab_index(self.objsize as usize) as u16,
inuse: 1,
});
match &mut page.ty {
PageType::Slab(s) => unsafe { self.lists.list_add(&mut s.list) },
_ => {
unreachable!()
},
}
let start = vaddr.to_value();
let mut last = start;
let mut this = start;
loop {
KmemCache::set_freepointer(last, this);
last = this;
this += self.objsize as usize;
if this >= start + self.objects as usize * self.objsize as usize {
break;
}
}
KmemCache::set_freepointer(last, 0);
self.freelist = unsafe { (start as *const usize).read() };
Ok(vaddr)
}
}
pub(crate) fn slab_init() {
for cache in &KMEM_CACHE {
let mut this = cache.lock();
let l = &mut this.lists;
init_unsafe_list_head!(l, Page, ty.Slab.0);
}
}
pub fn vaddr_name(vaddr: Vaddr) -> &'static str {
let page = vaddr.to_page();
if page.is_PageSlab() {
match &page.ty {
PageType::Slab(kp) => {
let cache = KMEM_CACHE[kp.index as usize].lock();
cache.name
},
_ => {
unreachable!()
},
}
} else {
"Maybe Buddy"
}
}