#![allow(static_mut_refs)]
use alloc::{boxed::Box, vec::Vec};
use core::{any::Any, hint::likely};
use bsp_define::{
irqchip::{IrqState, IrqType},
smp::NR_CPUS,
};
use crate::{
bsp::{__irqchip_irq_init, __irqchip_irq_init_cpu, __irqchip_irqs_max},
irq::{
irqctl::{irq_ack, irq_disable, irq_enable, irq_eoi, irq_set_type},
irqdesc::IrqDesc,
},
println,
processor::nr_cpus,
sched::{
preempt::{HARDIRQ_OFFSET, preempt_count_add, preempt_count_sub},
task::stack::THREAD_STACK_SIZE,
},
space::kalloc::{GfpFlags, kmalloc},
sync::spinlock::Spinlock,
};
pub mod irqctl;
pub mod irqdesc;
pub mod irqflags;
struct IrqManage {
v_desc: Vec<Spinlock<IrqDesc>>,
}
impl IrqManage {
const fn new() -> Self {
Self { v_desc: Vec::new() }
}
fn init(&mut self) {
let irqsmax = unsafe { __irqchip_irqs_max() };
for idx in 0..irqsmax {
let mut irqdesc = IrqDesc::new();
irqdesc.set_hwirq(idx as u32);
irqdesc.set_irq_state(IrqState::IRQ_MASKED | IrqState::IRQ_NOREQUEST, true);
irq_disable(idx as u32);
self.v_desc.push(Spinlock::new(irqdesc));
}
}
fn request_irq(
&self,
hwirq: u32,
irqtype: IrqType,
f: IrqHandlerFunc,
private: Option<IrqDevData>,
) {
let mut desc = self.v_desc[hwirq as usize].lock();
debug_assert!(desc.irq_state().contains(IrqState::IRQ_NOREQUEST));
desc.set_handler(f, private);
desc.set_irq_type(irqtype, true);
irq_set_type(hwirq, irqtype).unwrap();
irq_enable(hwirq);
desc.set_irq_state(IrqState::IRQ_NOREQUEST | IrqState::IRQ_MASKED, false);
}
fn request_percpu_irq(
&self,
hwirq: u32,
irqtype: IrqType,
f: IrqHandlerFunc,
private: Option<IrqDevData>,
) {
{
let mut desc = self.v_desc[hwirq as usize].lock();
desc.set_irq_state(IrqState::IRQ_PER_CPU, true);
}
self.request_irq(hwirq, irqtype, f, private);
}
}
static mut IRQ_MANAGE: IrqManage = IrqManage::new();
#[unsafe(no_mangle)]
static mut IRQ_STACK_PTR: [usize; NR_CPUS] = [0; NR_CPUS];
fn irq_stack_init() {
unsafe {
for ptr in IRQ_STACK_PTR.iter_mut().take(nr_cpus()) {
*ptr = kmalloc(THREAD_STACK_SIZE, GfpFlags::Clean).unwrap().to_value();
}
}
}
pub(crate) fn irq_init() {
irq_stack_init();
unsafe {
__irqchip_irq_init();
IRQ_MANAGE.init();
}
}
pub(crate) fn irq_init_cpu() {
unsafe {
__irqchip_irq_init_cpu();
}
}
pub(crate) fn irq_enter() {
preempt_count_add(HARDIRQ_OFFSET as u32);
}
pub(crate) fn irq_exit() {
preempt_count_sub(HARDIRQ_OFFSET as u32);
}
pub(crate) fn handle_irq(hwirq: u32) {
irq_enter();
let irqs_max = unsafe { __irqchip_irqs_max() };
if likely(hwirq < irqs_max as u32) {
let mut desc = unsafe { IRQ_MANAGE.v_desc[hwirq as usize].lock() };
desc.set_irq_state(IrqState::IRQ_INPROGRESS, true);
irq_ack(hwirq);
let ret = desc.handle_irq();
irq_eoi(hwirq);
desc.set_irq_state(IrqState::IRQ_INPROGRESS, false);
if matches!(ret, IrqHandlerRet::IrqNone) {
desc.irq_unhandled_inc();
if desc.irq_unhandled() >= 100 {
irq_disable(hwirq);
desc.set_irq_state(IrqState::IRQ_MASKED | IrqState::IRQ_PENDING, true);
println!("hwirq: {}, irq unhandled {}", hwirq, desc.irq_unhandled());
}
}
}
irq_exit();
}
pub type IrqDevData = Box<dyn Any + Send + Sync>;
pub type IrqHandlerFunc = fn(u32, &Option<IrqDevData>) -> IrqHandlerRet;
pub enum IrqHandlerRet {
IrqNone,
IrqHandled,
IrqIpc,
}
pub fn request_irq(hwirq: u32, irqtype: IrqType, f: IrqHandlerFunc, pri: Option<IrqDevData>) {
unsafe {
IRQ_MANAGE.request_irq(hwirq, irqtype, f, pri);
}
}
pub fn request_percpu_irq(
hwirq: u32,
irqtype: IrqType,
f: IrqHandlerFunc,
pri: Option<IrqDevData>,
) {
unsafe {
IRQ_MANAGE.request_percpu_irq(hwirq, irqtype, f, pri);
}
}