use alloc::boxed::Box;
use alloc::vec::Vec;
use arch;
#[cfg(feature = "acpi")]
use arch::x86_64::kernel::acpi;
use arch::x86_64::kernel::idt;
use arch::x86_64::kernel::irq;
use arch::x86_64::kernel::percore::*;
use arch::x86_64::kernel::processor;
#[cfg(not(test))]
use arch::x86_64::kernel::smp_boot_code::SMP_BOOT_CODE;
use arch::x86_64::kernel::BOOT_INFO;
use arch::x86_64::mm::paging;
use arch::x86_64::mm::paging::{BasePageSize, PageSize, PageTableEntryFlags};
use arch::x86_64::mm::virtualmem;
use config::*;
use core::convert::TryInto;
use core::sync::atomic::spin_loop_hint;
use core::{cmp, fmt, intrinsics, mem, ptr, u32};
use environment;
use mm;
use scheduler;
use scheduler::CoreId;
use x86::controlregs::*;
use x86::msr::*;
const APIC_ICR2: usize = 0x0310;
const APIC_DIV_CONF_DIVIDE_BY_8: u64 = 0b0010;
const APIC_EOI_ACK: u64 = 0;
const APIC_ICR_DELIVERY_MODE_FIXED: u64 = 0x000;
const APIC_ICR_DELIVERY_MODE_INIT: u64 = 0x500;
const APIC_ICR_DELIVERY_MODE_STARTUP: u64 = 0x600;
const APIC_ICR_DELIVERY_STATUS_PENDING: u32 = 1 << 12;
const APIC_ICR_LEVEL_TRIGGERED: u64 = 1 << 15;
const APIC_ICR_LEVEL_ASSERT: u64 = 1 << 14;
const APIC_LVT_MASK: u64 = 1 << 16;
const APIC_LVT_TIMER_TSC_DEADLINE: u64 = 1 << 18;
const APIC_SIVR_ENABLED: u64 = 1 << 8;
#[allow(dead_code)]
const IOAPIC_REG_ID: u32 = 0x0000;
const IOAPIC_REG_VER: u32 = 0x0001;
const IOAPIC_REG_TABLE: u32 = 0x0010;
const TLB_FLUSH_INTERRUPT_NUMBER: u8 = 112;
const WAKEUP_INTERRUPT_NUMBER: u8 = 121;
pub const TIMER_INTERRUPT_NUMBER: u8 = 123;
const ERROR_INTERRUPT_NUMBER: u8 = 126;
const SPURIOUS_INTERRUPT_NUMBER: u8 = 127;
const SMP_BOOT_CODE_ADDRESS: usize = 0x8000;
const SMP_BOOT_CODE_OFFSET_PML4: usize = 0x18;
const SMP_BOOT_CODE_OFFSET_ENTRY: usize = 0x08;
const SMP_BOOT_CODE_OFFSET_BOOTINFO: usize = 0x10;
const X2APIC_ENABLE: u64 = 1 << 10;
static mut LOCAL_APIC_ADDRESS: usize = 0;
static mut IOAPIC_ADDRESS: usize = 0;
static mut CPU_LOCAL_APIC_IDS: Option<Vec<u8>> = None;
static mut CALIBRATED_COUNTER_VALUE: u64 = 0;
#[cfg(feature = "acpi")]
#[repr(C, packed)]
struct AcpiMadtHeader {
local_apic_address: u32,
flags: u32,
}
#[cfg(feature = "acpi")]
#[repr(C, packed)]
struct AcpiMadtRecordHeader {
entry_type: u8,
length: u8,
}
#[repr(C, packed)]
struct ProcessorLocalApicRecord {
acpi_processor_id: u8,
apic_id: u8,
flags: u32,
}
impl fmt::Display for ProcessorLocalApicRecord {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{{ acpi_processor_id: {}, ", { self.acpi_processor_id })?;
write!(f, "apic_id: {}, ", { self.apic_id })?;
write!(f, "flags: {} }}", { self.flags })?;
Ok(())
}
}
#[cfg(feature = "acpi")]
const CPU_FLAG_ENABLED: u32 = 1 << 0;
#[repr(C, packed)]
struct IoApicRecord {
id: u8,
reserved: u8,
address: u32,
global_system_interrupt_base: u32,
}
impl fmt::Display for IoApicRecord {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{{ id: {}, ", { self.id })?;
write!(f, "reserved: {}, ", { self.reserved })?;
write!(f, "address: {:#X}, ", { self.address })?;
write!(f, "global_system_interrupt_base: {} }}", {
self.global_system_interrupt_base
})?;
Ok(())
}
}
extern "x86-interrupt" fn tlb_flush_handler(_stack_frame: &mut irq::ExceptionStackFrame) {
debug!("Received TLB Flush Interrupt");
unsafe {
cr3_write(cr3());
}
eoi();
}
extern "x86-interrupt" fn error_interrupt_handler(stack_frame: &mut irq::ExceptionStackFrame) {
error!("APIC LVT Error Interrupt");
error!("ESR: {:#X}", local_apic_read(IA32_X2APIC_ESR));
error!("{:#?}", stack_frame);
eoi();
scheduler::abort();
}
extern "x86-interrupt" fn spurious_interrupt_handler(stack_frame: &mut irq::ExceptionStackFrame) {
error!("Spurious Interrupt: {:#?}", stack_frame);
scheduler::abort();
}
extern "x86-interrupt" fn wakeup_handler(_stack_frame: &mut irq::ExceptionStackFrame) {
debug!("Received Wakeup Interrupt");
let core_scheduler = core_scheduler();
core_scheduler.check_input();
eoi();
if core_scheduler.is_scheduling() {
core_scheduler.scheduler();
}
}
#[inline]
pub fn add_local_apic_id(id: u8) {
unsafe {
CPU_LOCAL_APIC_IDS.as_mut().unwrap().push(id);
}
}
#[cfg(not(feature = "acpi"))]
fn detect_from_acpi() -> Result<usize, ()> {
Err(())
}
#[cfg(feature = "acpi")]
fn detect_from_acpi() -> Result<usize, ()> {
let madt = acpi::get_madt().expect("HermitCore requires a MADT in the ACPI tables");
let madt_header = unsafe { &*(madt.table_start_address() as *const AcpiMadtHeader) };
let mut current_address = madt.table_start_address() + mem::size_of::<AcpiMadtHeader>();
while current_address < madt.table_end_address() {
let record = unsafe { &*(current_address as *const AcpiMadtRecordHeader) };
current_address += mem::size_of::<AcpiMadtRecordHeader>();
match record.entry_type {
0 => {
let processor_local_apic_record =
unsafe { &*(current_address as *const ProcessorLocalApicRecord) };
debug!(
"Found Processor Local APIC record: {}",
processor_local_apic_record
);
if processor_local_apic_record.flags & CPU_FLAG_ENABLED > 0 {
add_local_apic_id(processor_local_apic_record.apic_id);
}
}
1 => {
let ioapic_record = unsafe { &*(current_address as *const IoApicRecord) };
debug!("Found I/O APIC record: {}", ioapic_record);
unsafe {
IOAPIC_ADDRESS = virtualmem::allocate(BasePageSize::SIZE).unwrap();
debug!(
"Mapping IOAPIC at {:#X} to virtual address {:#X}",
ioapic_record.address, IOAPIC_ADDRESS
);
let mut flags = PageTableEntryFlags::empty();
flags.device().writable().execute_disable();
paging::map::<BasePageSize>(
IOAPIC_ADDRESS,
ioapic_record.address as usize,
1,
flags,
);
}
}
_ => {
}
}
current_address += record.length as usize - mem::size_of::<AcpiMadtRecordHeader>();
}
Ok(madt_header.local_apic_address as usize)
}
fn detect_from_uhyve() -> Result<usize, ()> {
if environment::is_uhyve() {
let defaullt_address = 0xFEC0_0000usize;
unsafe {
IOAPIC_ADDRESS = virtualmem::allocate(BasePageSize::SIZE).unwrap();
debug!(
"Mapping IOAPIC at {:#X} to virtual address {:#X}",
defaullt_address, IOAPIC_ADDRESS
);
let mut flags = PageTableEntryFlags::empty();
flags.device().writable().execute_disable();
paging::map::<BasePageSize>(IOAPIC_ADDRESS, defaullt_address, 1, flags);
}
return Ok(0xFEE0_0000usize);
}
Err(())
}
#[no_mangle]
pub extern "C" fn eoi() {
local_apic_write(IA32_X2APIC_EOI, APIC_EOI_ACK);
}
pub fn init() {
unsafe {
CPU_LOCAL_APIC_IDS = Some(Vec::new());
}
let local_apic_physical_address = detect_from_uhyve()
.or_else(|_e| detect_from_acpi())
.expect("HermitCore requires an APIC system");
init_x2apic();
if !processor::supports_x2apic() {
unsafe {
LOCAL_APIC_ADDRESS = virtualmem::allocate(BasePageSize::SIZE).unwrap();
debug!(
"Mapping Local APIC at {:#X} to virtual address {:#X}",
local_apic_physical_address, LOCAL_APIC_ADDRESS
);
let mut flags = PageTableEntryFlags::empty();
flags.device().writable().execute_disable();
paging::map::<BasePageSize>(LOCAL_APIC_ADDRESS, local_apic_physical_address, 1, flags);
}
}
idt::set_gate(TLB_FLUSH_INTERRUPT_NUMBER, tlb_flush_handler as usize, 0);
idt::set_gate(ERROR_INTERRUPT_NUMBER, error_interrupt_handler as usize, 0);
idt::set_gate(
SPURIOUS_INTERRUPT_NUMBER,
spurious_interrupt_handler as usize,
0,
);
idt::set_gate(WAKEUP_INTERRUPT_NUMBER, wakeup_handler as usize, 0);
init_local_apic();
if !processor::supports_tsc_deadline() {
calibrate_timer();
}
init_ioapic();
}
fn init_ioapic() {
let max_entry = ioapic_max_redirection_entry() + 1;
info!("IOAPIC v{} has {} entries", ioapic_version(), max_entry);
for i in 0..max_entry {
if i != 2 {
ioapic_inton(i, 0 ).unwrap();
} else {
info!("Disable IOAPIC timer");
ioapic_intoff(2, 0 ).unwrap();
}
}
}
fn ioapic_inton(irq: u8, apicid: u8) -> Result<(), ()> {
if irq > 24 {
error!("IOAPIC: trying to turn on irq {} which is too high\n", irq);
return Err(());
}
let off = u32::from(irq * 2);
let ioredirect_upper: u32 = u32::from(apicid) << 24;
let ioredirect_lower: u32 = u32::from(0x20 + irq);
ioapic_write(IOAPIC_REG_TABLE + off, ioredirect_lower);
ioapic_write(IOAPIC_REG_TABLE + 1 + off, ioredirect_upper);
Ok(())
}
fn ioapic_intoff(irq: u32, apicid: u32) -> Result<(), ()> {
if irq > 24 {
error!("IOAPIC: trying to turn off irq {} which is too high\n", irq);
return Err(());
}
let off = (irq * 2) as u32;
let ioredirect_upper: u32 = (apicid as u32) << 24;
let ioredirect_lower: u32 = ((0x20 + irq) as u32) | (1 << 16);
ioapic_write(IOAPIC_REG_TABLE + off, ioredirect_lower);
ioapic_write(IOAPIC_REG_TABLE + 1 + off, ioredirect_upper);
Ok(())
}
pub fn init_local_apic() {
local_apic_write(IA32_X2APIC_LVT_TIMER, APIC_LVT_MASK);
local_apic_write(IA32_X2APIC_LVT_THERMAL, APIC_LVT_MASK);
local_apic_write(IA32_X2APIC_LVT_PMI, APIC_LVT_MASK);
local_apic_write(IA32_X2APIC_LVT_LINT0, APIC_LVT_MASK);
local_apic_write(IA32_X2APIC_LVT_LINT1, APIC_LVT_MASK);
local_apic_write(IA32_X2APIC_LVT_ERROR, u64::from(ERROR_INTERRUPT_NUMBER));
local_apic_write(IA32_X2APIC_TPR, 0x00);
local_apic_write(
IA32_X2APIC_SIVR,
APIC_SIVR_ENABLED | (u64::from(SPURIOUS_INTERRUPT_NUMBER)),
);
}
fn calibrate_timer() {
let microseconds = 30_000;
local_apic_write(IA32_X2APIC_DIV_CONF, APIC_DIV_CONF_DIVIDE_BY_8);
local_apic_write(IA32_X2APIC_INIT_COUNT, u64::from(u32::MAX));
processor::udelay(microseconds);
unsafe {
CALIBRATED_COUNTER_VALUE =
(u64::from(u32::MAX - local_apic_read(IA32_X2APIC_CUR_COUNT))) / microseconds;
debug!(
"Calibrated APIC Timer with a counter value of {} for 1 microsecond",
CALIBRATED_COUNTER_VALUE
);
}
}
pub fn set_oneshot_timer(wakeup_time: Option<u64>) {
if let Some(wt) = wakeup_time {
if processor::supports_tsc_deadline() {
let tsc_deadline = wt * (u64::from(processor::get_frequency()));
local_apic_write(
IA32_X2APIC_LVT_TIMER,
APIC_LVT_TIMER_TSC_DEADLINE | u64::from(TIMER_INTERRUPT_NUMBER),
);
unsafe {
wrmsr(IA32_TSC_DEADLINE, tsc_deadline);
}
} else {
let current_time = processor::get_timer_ticks();
let ticks = if wt > current_time {
wt - current_time
} else {
1
};
let init_count = cmp::min(
unsafe { CALIBRATED_COUNTER_VALUE } * ticks,
u64::from(u32::MAX),
);
local_apic_write(IA32_X2APIC_LVT_TIMER, u64::from(TIMER_INTERRUPT_NUMBER));
local_apic_write(IA32_X2APIC_INIT_COUNT, init_count);
}
} else {
local_apic_write(IA32_X2APIC_LVT_TIMER, APIC_LVT_MASK);
}
}
pub fn init_x2apic() {
if processor::supports_x2apic() {
debug!("Enable x2APIC support");
let mut apic_base = unsafe { rdmsr(IA32_APIC_BASE) };
apic_base |= X2APIC_ENABLE;
unsafe {
wrmsr(IA32_APIC_BASE, apic_base);
}
}
}
pub fn init_next_processor_variables(core_id: CoreId) {
let stack = mm::allocate(KERNEL_STACK_SIZE, true);
let boxed_percore = Box::new(PerCoreVariables::new(core_id));
unsafe {
intrinsics::volatile_store(&mut (*BOOT_INFO).current_stack_address, stack as u64);
intrinsics::volatile_store(
&mut (*BOOT_INFO).current_percore_address,
Box::into_raw(boxed_percore) as u64,
);
}
}
#[cfg(not(test))]
pub fn boot_application_processors() {
assert!(
SMP_BOOT_CODE.len() < BasePageSize::SIZE,
"SMP Boot Code is larger than a page"
);
debug!("SMP boot code is {} bytes long", SMP_BOOT_CODE.len());
debug!(
"Mapping SMP boot code to physical and virtual address {:#X}",
SMP_BOOT_CODE_ADDRESS
);
let mut flags = PageTableEntryFlags::empty();
flags.normal().writable();
paging::map::<BasePageSize>(SMP_BOOT_CODE_ADDRESS, SMP_BOOT_CODE_ADDRESS, 1, flags);
unsafe {
ptr::copy_nonoverlapping(
&SMP_BOOT_CODE as *const u8,
SMP_BOOT_CODE_ADDRESS as *mut u8,
SMP_BOOT_CODE.len(),
);
}
unsafe {
*((SMP_BOOT_CODE_ADDRESS + SMP_BOOT_CODE_OFFSET_PML4) as *mut u32) = cr3() as u32;
debug!(
"Set entry point for application processor to 0x{:x}",
arch::x86_64::kernel::start::_start as usize
);
*((SMP_BOOT_CODE_ADDRESS + SMP_BOOT_CODE_OFFSET_ENTRY) as *mut usize) =
arch::x86_64::kernel::start::_start as usize;
*((SMP_BOOT_CODE_ADDRESS + SMP_BOOT_CODE_OFFSET_BOOTINFO) as *mut usize) =
BOOT_INFO as usize;
}
let apic_ids = unsafe { CPU_LOCAL_APIC_IDS.as_ref().unwrap() };
let core_id = core_id();
for core_id_to_boot in 0..apic_ids.len() {
if core_id_to_boot != core_id.try_into().unwrap() {
let apic_id = apic_ids[core_id_to_boot];
let destination = u64::from(apic_id) << 32;
debug!(
"Waking up CPU {} with Local APIC ID {}",
core_id_to_boot, apic_id
);
init_next_processor_variables(core_id_to_boot.try_into().unwrap());
let current_processor_count = arch::get_processor_count();
local_apic_write(
IA32_X2APIC_ICR,
destination
| APIC_ICR_LEVEL_TRIGGERED
| APIC_ICR_LEVEL_ASSERT
| APIC_ICR_DELIVERY_MODE_INIT,
);
processor::udelay(200);
local_apic_write(
IA32_X2APIC_ICR,
destination | APIC_ICR_LEVEL_TRIGGERED | APIC_ICR_DELIVERY_MODE_INIT,
);
processor::udelay(10000);
local_apic_write(
IA32_X2APIC_ICR,
destination
| APIC_ICR_DELIVERY_MODE_STARTUP
| ((SMP_BOOT_CODE_ADDRESS as u64) >> 12),
);
debug!("Waiting for it to respond");
while current_processor_count == arch::get_processor_count() {
processor::udelay(1000);
}
}
}
}
pub fn ipi_tlb_flush() {
if arch::get_processor_count() > 1 {
let apic_ids = unsafe { CPU_LOCAL_APIC_IDS.as_ref().unwrap() };
let core_id = core_id();
unsafe {
llvm_asm!("mfence" ::: "memory" : "volatile");
}
for core_id_to_interrupt in 0..apic_ids.len() {
if core_id_to_interrupt != core_id.try_into().unwrap() {
let local_apic_id = apic_ids[core_id_to_interrupt];
let destination = u64::from(local_apic_id) << 32;
local_apic_write(
IA32_X2APIC_ICR,
destination
| APIC_ICR_LEVEL_ASSERT | APIC_ICR_DELIVERY_MODE_FIXED
| u64::from(TLB_FLUSH_INTERRUPT_NUMBER),
);
}
}
}
}
pub fn wakeup_core(core_id_to_wakeup: CoreId) {
if core_id_to_wakeup != core_id() {
let apic_ids = unsafe { CPU_LOCAL_APIC_IDS.as_ref().unwrap() };
let local_apic_id = apic_ids[core_id_to_wakeup as usize];
let destination = u64::from(local_apic_id) << 32;
local_apic_write(
IA32_X2APIC_ICR,
destination
| APIC_ICR_LEVEL_ASSERT
| APIC_ICR_DELIVERY_MODE_FIXED
| u64::from(WAKEUP_INTERRUPT_NUMBER),
);
}
}
#[inline]
fn translate_x2apic_msr_to_xapic_address(x2apic_msr: u32) -> usize {
unsafe { LOCAL_APIC_ADDRESS + ((x2apic_msr as usize & 0xFF) << 4) }
}
fn local_apic_read(x2apic_msr: u32) -> u32 {
if processor::supports_x2apic() {
unsafe { rdmsr(x2apic_msr) as u32 }
} else {
unsafe { *(translate_x2apic_msr_to_xapic_address(x2apic_msr) as *const u32) }
}
}
fn ioapic_write(reg: u32, value: u32) {
unsafe {
intrinsics::volatile_store(IOAPIC_ADDRESS as *mut u32, reg);
intrinsics::volatile_store(
(IOAPIC_ADDRESS + 4 * mem::size_of::<u32>()) as *mut u32,
value,
);
}
}
fn ioapic_read(reg: u32) -> u32 {
let value;
unsafe {
intrinsics::volatile_store(IOAPIC_ADDRESS as *mut u32, reg);
value =
intrinsics::volatile_load((IOAPIC_ADDRESS + 4 * mem::size_of::<u32>()) as *const u32);
}
value
}
fn ioapic_version() -> u32 {
ioapic_read(IOAPIC_REG_VER) & 0xFF
}
fn ioapic_max_redirection_entry() -> u8 {
((ioapic_read(IOAPIC_REG_VER) >> 16) & 0xFF) as u8
}
fn local_apic_write(x2apic_msr: u32, value: u64) {
if processor::supports_x2apic() {
unsafe {
wrmsr(x2apic_msr, value);
}
} else {
if x2apic_msr == IA32_X2APIC_ICR {
let destination = ((value >> 8) & 0xFF00_0000) as u32;
let icr2 = unsafe { &mut *((LOCAL_APIC_ADDRESS + APIC_ICR2) as *mut u32) };
*icr2 = destination;
}
let value_ref =
unsafe { &mut *(translate_x2apic_msr_to_xapic_address(x2apic_msr) as *mut u32) };
*value_ref = value as u32;
if x2apic_msr == IA32_X2APIC_ICR {
while (unsafe { intrinsics::volatile_load(value_ref) }
& APIC_ICR_DELIVERY_STATUS_PENDING)
> 0
{
spin_loop_hint();
}
}
}
}
pub fn print_information() {
infoheader!(" MULTIPROCESSOR INFORMATION ");
infoentry!(
"APIC in use",
if processor::supports_x2apic() {
"x2APIC"
} else {
"xAPIC"
}
);
infoentry!("Initialized CPUs", arch::get_processor_count());
infofooter!();
}