use bitflags::bitflags;
use core::fmt::{self, Debug, Display, Formatter};
use core::mem::size_of;
use core::slice;
use core::str;
use crate::{get_bits, CpuIdResult, Vendor};
pub struct ExtendedProcessorFeatureIdentifiers {
vendor: Vendor,
eax: u32,
ebx: u32,
ecx: ExtendedFunctionInfoEcx,
edx: ExtendedFunctionInfoEdx,
}
impl ExtendedProcessorFeatureIdentifiers {
pub(crate) fn new(vendor: Vendor, data: CpuIdResult) -> Self {
Self {
vendor,
eax: data.eax,
ebx: data.ebx,
ecx: ExtendedFunctionInfoEcx::from_bits_truncate(data.ecx),
edx: ExtendedFunctionInfoEdx::from_bits_truncate(data.edx),
}
}
pub fn extended_signature(&self) -> u32 {
self.eax
}
pub fn pkg_type(&self) -> u32 {
get_bits(self.ebx, 28, 31)
}
pub fn brand_id(&self) -> u32 {
get_bits(self.ebx, 0, 15)
}
pub fn has_lahf_sahf(&self) -> bool {
self.ecx.contains(ExtendedFunctionInfoEcx::LAHF_SAHF)
}
pub fn has_cmp_legacy(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::CMP_LEGACY)
}
pub fn has_svm(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::SVM)
}
pub fn has_ext_apic_space(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::EXT_APIC_SPACE)
}
pub fn has_alt_mov_cr8(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::ALTMOVCR8)
}
pub fn has_lzcnt(&self) -> bool {
self.ecx.contains(ExtendedFunctionInfoEcx::LZCNT)
}
pub fn has_sse4a(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::SSE4A)
}
pub fn has_misaligned_sse_mode(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::MISALIGNSSE)
}
pub fn has_prefetchw(&self) -> bool {
self.ecx.contains(ExtendedFunctionInfoEcx::PREFETCHW)
}
pub fn has_osvw(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::OSVW)
}
pub fn has_ibs(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::IBS)
}
pub fn has_xop(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::XOP)
}
pub fn has_skinit(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::SKINIT)
}
pub fn has_wdt(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::WDT)
}
pub fn has_lwp(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::LWP)
}
pub fn has_fma4(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::FMA4)
}
pub fn has_tbm(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::TBM)
}
pub fn has_topology_extensions(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::TOPEXT)
}
pub fn has_perf_cntr_extensions(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFCTREXT)
}
pub fn has_nb_perf_cntr_extensions(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFCTREXTNB)
}
pub fn has_data_access_bkpt_extension(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::DATABRKPEXT)
}
pub fn has_perf_tsc(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFTSC)
}
pub fn has_perf_cntr_llc_extensions(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFCTREXTLLC)
}
pub fn has_monitorx_mwaitx(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::MONITORX)
}
pub fn has_addr_mask_extension(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::ADDRMASKEXT)
}
pub fn has_syscall_sysret(&self) -> bool {
self.edx.contains(ExtendedFunctionInfoEdx::SYSCALL_SYSRET)
}
pub fn has_execute_disable(&self) -> bool {
self.edx.contains(ExtendedFunctionInfoEdx::EXECUTE_DISABLE)
}
pub fn has_mmx_extensions(&self) -> bool {
self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::MMXEXT)
}
pub fn has_fast_fxsave_fxstor(&self) -> bool {
self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::FFXSR)
}
pub fn has_1gib_pages(&self) -> bool {
self.edx.contains(ExtendedFunctionInfoEdx::GIB_PAGES)
}
pub fn has_rdtscp(&self) -> bool {
self.edx.contains(ExtendedFunctionInfoEdx::RDTSCP)
}
pub fn has_64bit_mode(&self) -> bool {
self.edx.contains(ExtendedFunctionInfoEdx::I64BIT_MODE)
}
pub fn has_amd_3dnow_extensions(&self) -> bool {
self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::THREEDNOWEXT)
}
pub fn has_3dnow(&self) -> bool {
self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::THREEDNOW)
}
}
impl Debug for ExtendedProcessorFeatureIdentifiers {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
let mut ds = f.debug_struct("ExtendedProcessorFeatureIdentifiers");
ds.field("extended_signature", &self.extended_signature());
if self.vendor == Vendor::Amd {
ds.field("pkg_type", &self.pkg_type());
ds.field("brand_id", &self.brand_id());
}
ds.field("ecx_features", &self.ecx);
ds.field("edx_features", &self.edx);
ds.finish()
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ExtendedFunctionInfoEcx: u32 {
const LAHF_SAHF = 1 << 0;
const CMP_LEGACY = 1 << 1;
const SVM = 1 << 2;
const EXT_APIC_SPACE = 1 << 3;
const ALTMOVCR8 = 1 << 4;
const LZCNT = 1 << 5;
const SSE4A = 1 << 6;
const MISALIGNSSE = 1 << 7;
const PREFETCHW = 1 << 8;
const OSVW = 1 << 9;
const IBS = 1 << 10;
const XOP = 1 << 11;
const SKINIT = 1 << 12;
const WDT = 1 << 13;
const LWP = 1 << 15;
const FMA4 = 1 << 16;
const TBM = 1 << 21;
const TOPEXT = 1 << 22;
const PERFCTREXT = 1 << 23;
const PERFCTREXTNB = 1 << 24;
const DATABRKPEXT = 1 << 26;
const PERFTSC = 1 << 27;
const PERFCTREXTLLC = 1 << 28;
const MONITORX = 1 << 29;
const ADDRMASKEXT = 1 << 30;
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ExtendedFunctionInfoEdx: u32 {
const SYSCALL_SYSRET = 1 << 11;
const EXECUTE_DISABLE = 1 << 20;
const MMXEXT = 1 << 22;
const FFXSR = 1 << 24;
const GIB_PAGES = 1 << 26;
const RDTSCP = 1 << 27;
const I64BIT_MODE = 1 << 29;
const THREEDNOWEXT = 1 << 30;
const THREEDNOW = 1 << 31;
}
}
pub struct ProcessorBrandString {
data: [CpuIdResult; 3],
}
impl ProcessorBrandString {
pub(crate) fn new(data: [CpuIdResult; 3]) -> Self {
Self { data }
}
pub fn as_str(&self) -> &str {
let slice: &[u8] = unsafe {
slice::from_raw_parts(
self.data.as_ptr() as *const u8,
self.data.len() * size_of::<CpuIdResult>(),
)
};
let slice = slice.split(|&x| x == 0).next().unwrap();
str::from_utf8(slice)
.unwrap_or("Invalid Processor Brand String")
.trim()
}
}
impl Debug for ProcessorBrandString {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
f.debug_struct("ProcessorBrandString")
.field("as_str", &self.as_str())
.finish()
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct L1CacheTlbInfo {
eax: u32,
ebx: u32,
ecx: u32,
edx: u32,
}
impl L1CacheTlbInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: data.ebx,
ecx: data.ecx,
edx: data.edx,
}
}
pub fn dtlb_2m_4m_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.eax, 24, 31) as u8;
Associativity::for_l1(assoc_bits)
}
pub fn dtlb_2m_4m_size(&self) -> u8 {
get_bits(self.eax, 16, 23) as u8
}
pub fn itlb_2m_4m_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.eax, 8, 15) as u8;
Associativity::for_l1(assoc_bits)
}
pub fn itlb_2m_4m_size(&self) -> u8 {
get_bits(self.eax, 0, 7) as u8
}
pub fn dtlb_4k_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ebx, 24, 31) as u8;
Associativity::for_l1(assoc_bits)
}
pub fn dtlb_4k_size(&self) -> u8 {
get_bits(self.ebx, 16, 23) as u8
}
pub fn itlb_4k_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ebx, 8, 15) as u8;
Associativity::for_l1(assoc_bits)
}
pub fn itlb_4k_size(&self) -> u8 {
get_bits(self.ebx, 0, 7) as u8
}
pub fn dcache_size(&self) -> u8 {
get_bits(self.ecx, 24, 31) as u8
}
pub fn dcache_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ecx, 16, 23) as u8;
Associativity::for_l1(assoc_bits)
}
pub fn dcache_lines_per_tag(&self) -> u8 {
get_bits(self.ecx, 8, 15) as u8
}
pub fn dcache_line_size(&self) -> u8 {
get_bits(self.ecx, 0, 7) as u8
}
pub fn icache_size(&self) -> u8 {
get_bits(self.edx, 24, 31) as u8
}
pub fn icache_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.edx, 16, 23) as u8;
Associativity::for_l1(assoc_bits)
}
pub fn icache_lines_per_tag(&self) -> u8 {
get_bits(self.edx, 8, 15) as u8
}
pub fn icache_line_size(&self) -> u8 {
get_bits(self.edx, 0, 7) as u8
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct L2And3CacheTlbInfo {
eax: u32,
ebx: u32,
ecx: u32,
edx: u32,
}
impl L2And3CacheTlbInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: data.ebx,
ecx: data.ecx,
edx: data.edx,
}
}
pub fn dtlb_2m_4m_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.eax, 28, 31) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn dtlb_2m_4m_size(&self) -> u16 {
get_bits(self.eax, 16, 27) as u16
}
pub fn itlb_2m_4m_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.eax, 12, 15) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn itlb_2m_4m_size(&self) -> u16 {
get_bits(self.eax, 0, 11) as u16
}
pub fn dtlb_4k_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ebx, 28, 31) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn dtlb_4k_size(&self) -> u16 {
get_bits(self.ebx, 16, 27) as u16
}
pub fn itlb_4k_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ebx, 12, 15) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn itlb_4k_size(&self) -> u16 {
get_bits(self.ebx, 0, 11) as u16
}
pub fn l2cache_line_size(&self) -> u8 {
get_bits(self.ecx, 0, 7) as u8
}
pub fn l2cache_lines_per_tag(&self) -> u8 {
get_bits(self.ecx, 8, 11) as u8
}
pub fn l2cache_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ecx, 12, 15) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn l2cache_size(&self) -> u16 {
get_bits(self.ecx, 16, 31) as u16
}
pub fn l3cache_line_size(&self) -> u8 {
get_bits(self.edx, 0, 7) as u8
}
pub fn l3cache_lines_per_tag(&self) -> u8 {
get_bits(self.edx, 8, 11) as u8
}
pub fn l3cache_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.edx, 12, 15) as u8;
Associativity::for_l3(assoc_bits)
}
pub fn l3cache_size(&self) -> u16 {
get_bits(self.edx, 18, 31) as u16
}
}
#[derive(PartialEq, Eq, Debug)]
pub enum Associativity {
Disabled,
DirectMapped,
NWay(u8),
FullyAssociative,
Unknown,
}
impl Display for Associativity {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let s = match self {
Associativity::Disabled => "Disabled",
Associativity::DirectMapped => "Direct mapped",
Associativity::NWay(n) => {
return write!(f, "NWay({})", n);
}
Associativity::FullyAssociative => "Fully associative",
Associativity::Unknown => "Unknown (check leaf 0x8000_001d)",
};
f.write_str(s)
}
}
impl Associativity {
fn for_l1(n: u8) -> Associativity {
match n {
0x0 => Associativity::Disabled, 0x1 => Associativity::DirectMapped,
0x2..=0xfe => Associativity::NWay(n),
0xff => Associativity::FullyAssociative,
}
}
fn for_l2(n: u8) -> Associativity {
match n {
0x0 => Associativity::Disabled,
0x1 => Associativity::DirectMapped,
0x2 => Associativity::NWay(2),
0x4 => Associativity::NWay(4),
0x5 => Associativity::NWay(6), 0x6 => Associativity::NWay(8),
0x8 => Associativity::NWay(16),
0x9 => Associativity::Unknown, 0xa => Associativity::NWay(32),
0xb => Associativity::NWay(48),
0xc => Associativity::NWay(64),
0xd => Associativity::NWay(96),
0xe => Associativity::NWay(128),
0xF => Associativity::FullyAssociative,
_ => Associativity::Unknown,
}
}
fn for_l3(n: u8) -> Associativity {
Associativity::for_l2(n)
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct ApmInfo {
_eax: u32,
ebx: RasCapabilities,
ecx: u32,
edx: ApmInfoEdx,
}
impl ApmInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
_eax: data.eax,
ebx: RasCapabilities::from_bits_truncate(data.ebx),
ecx: data.ecx,
edx: ApmInfoEdx::from_bits_truncate(data.edx),
}
}
pub fn has_mca_overflow_recovery(&self) -> bool {
self.ebx.contains(RasCapabilities::MCAOVFLRECOV)
}
pub fn has_succor(&self) -> bool {
self.ebx.contains(RasCapabilities::SUCCOR)
}
pub fn has_hwa(&self) -> bool {
self.ebx.contains(RasCapabilities::HWA)
}
pub fn cpu_pwr_sample_time_ratio(&self) -> u32 {
self.ecx
}
pub fn has_ts(&self) -> bool {
self.edx.contains(ApmInfoEdx::TS)
}
pub fn has_freq_id_ctrl(&self) -> bool {
self.edx.contains(ApmInfoEdx::FID)
}
pub fn has_volt_id_ctrl(&self) -> bool {
self.edx.contains(ApmInfoEdx::VID)
}
pub fn has_thermtrip(&self) -> bool {
self.edx.contains(ApmInfoEdx::TTP)
}
pub fn has_tm(&self) -> bool {
self.edx.contains(ApmInfoEdx::TM)
}
pub fn has_100mhz_steps(&self) -> bool {
self.edx.contains(ApmInfoEdx::MHZSTEPS100)
}
pub fn has_hw_pstate(&self) -> bool {
self.edx.contains(ApmInfoEdx::HWPSTATE)
}
pub fn has_invariant_tsc(&self) -> bool {
self.edx.contains(ApmInfoEdx::INVTSC)
}
pub fn has_cpb(&self) -> bool {
self.edx.contains(ApmInfoEdx::CPB)
}
pub fn has_ro_effective_freq_iface(&self) -> bool {
self.edx.contains(ApmInfoEdx::EFFFREQRO)
}
pub fn has_feedback_iface(&self) -> bool {
self.edx.contains(ApmInfoEdx::PROCFEEDBACKIF)
}
pub fn has_power_reporting_iface(&self) -> bool {
self.edx.contains(ApmInfoEdx::PROCPWRREPORT)
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ApmInfoEdx: u32 {
const TS = 1 << 0;
const FID = 1 << 1;
const VID = 1 << 2;
const TTP = 1 << 3;
const TM = 1 << 4;
const MHZSTEPS100 = 1 << 6;
const HWPSTATE = 1 << 7;
const INVTSC = 1 << 8;
const CPB = 1 << 9;
const EFFFREQRO = 1 << 10;
const PROCFEEDBACKIF = 1 << 11;
const PROCPWRREPORT = 1 << 12;
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct RasCapabilities: u32 {
const MCAOVFLRECOV = 1 << 0;
const SUCCOR = 1 << 1;
const HWA = 1 << 2;
}
}
#[derive(PartialEq, Eq)]
pub struct ProcessorCapacityAndFeatureInfo {
eax: u32,
ebx: ProcessorCapacityAndFeatureEbx,
ecx: u32,
edx: u32,
}
impl ProcessorCapacityAndFeatureInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: ProcessorCapacityAndFeatureEbx::from_bits_truncate(data.ebx),
ecx: data.ecx,
edx: data.edx,
}
}
pub fn physical_address_bits(&self) -> u8 {
get_bits(self.eax, 0, 7) as u8
}
pub fn linear_address_bits(&self) -> u8 {
get_bits(self.eax, 8, 15) as u8
}
pub fn guest_physical_address_bits(&self) -> u8 {
get_bits(self.eax, 16, 23) as u8
}
pub fn has_cl_zero(&self) -> bool {
self.ebx.contains(ProcessorCapacityAndFeatureEbx::CLZERO)
}
pub fn has_inst_ret_cntr_msr(&self) -> bool {
self.ebx
.contains(ProcessorCapacityAndFeatureEbx::INST_RETCNT_MSR)
}
pub fn has_restore_fp_error_ptrs(&self) -> bool {
self.ebx
.contains(ProcessorCapacityAndFeatureEbx::RSTR_FP_ERR_PTRS)
}
pub fn has_invlpgb(&self) -> bool {
self.ebx.contains(ProcessorCapacityAndFeatureEbx::INVLPGB)
}
pub fn has_rdpru(&self) -> bool {
self.ebx.contains(ProcessorCapacityAndFeatureEbx::RDPRU)
}
pub fn has_mcommit(&self) -> bool {
self.ebx.contains(ProcessorCapacityAndFeatureEbx::MCOMMIT)
}
pub fn has_wbnoinvd(&self) -> bool {
self.ebx.contains(ProcessorCapacityAndFeatureEbx::WBNOINVD)
}
pub fn has_int_wbinvd(&self) -> bool {
self.ebx
.contains(ProcessorCapacityAndFeatureEbx::INT_WBINVD)
}
pub fn has_unsupported_efer_lmsle(&self) -> bool {
self.ebx
.contains(ProcessorCapacityAndFeatureEbx::EFER_LMSLE_UNSUPP)
}
pub fn has_invlpgb_nested(&self) -> bool {
self.ebx
.contains(ProcessorCapacityAndFeatureEbx::INVLPGB_NESTED)
}
pub fn perf_tsc_size(&self) -> usize {
let s = get_bits(self.ecx, 16, 17) as u8;
match s & 0b11 {
0b00 => 40,
0b01 => 48,
0b10 => 56,
0b11 => 64,
_ => unreachable!("AND with 0b11 in match"),
}
}
pub fn apic_id_size(&self) -> u8 {
get_bits(self.ecx, 12, 15) as u8
}
pub fn maximum_logical_processors(&self) -> usize {
usize::pow(2, self.apic_id_size() as u32)
}
pub fn num_phys_threads(&self) -> usize {
get_bits(self.ecx, 0, 7) as usize + 1
}
pub fn invlpgb_max_pages(&self) -> u16 {
get_bits(self.edx, 0, 15) as u16
}
pub fn max_rdpru_id(&self) -> u16 {
get_bits(self.edx, 16, 31) as u16
}
}
impl Debug for ProcessorCapacityAndFeatureInfo {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("ProcessorCapacityAndFeatureInfo")
.field("physical_address_bits", &self.physical_address_bits())
.field("linear_address_bits", &self.linear_address_bits())
.field(
"guest_physical_address_bits",
&self.guest_physical_address_bits(),
)
.field("has_cl_zero", &self.has_cl_zero())
.field("has_inst_ret_cntr_msr", &self.has_inst_ret_cntr_msr())
.field(
"has_restore_fp_error_ptrs",
&self.has_restore_fp_error_ptrs(),
)
.field("has_invlpgb", &self.has_invlpgb())
.field("has_rdpru", &self.has_rdpru())
.field("has_mcommit", &self.has_mcommit())
.field("has_wbnoinvd", &self.has_wbnoinvd())
.field("has_int_wbinvd", &self.has_int_wbinvd())
.field(
"has_unsupported_efer_lmsle",
&self.has_unsupported_efer_lmsle(),
)
.field("has_invlpgb_nested", &self.has_invlpgb_nested())
.field("perf_tsc_size", &self.perf_tsc_size())
.field("apic_id_size", &self.apic_id_size())
.field(
"maximum_logical_processors",
&self.maximum_logical_processors(),
)
.field("num_phys_threads", &self.num_phys_threads())
.field("invlpgb_max_pages", &self.invlpgb_max_pages())
.field("max_rdpru_id", &self.max_rdpru_id())
.finish()
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ProcessorCapacityAndFeatureEbx: u32 {
const CLZERO = 1 << 0;
const INST_RETCNT_MSR = 1 << 1;
const RSTR_FP_ERR_PTRS = 1 << 2;
const INVLPGB = 1 << 3;
const RDPRU = 1 << 4;
const MCOMMIT = 1 << 8;
const WBNOINVD = 1 << 9;
const INT_WBINVD = 1 << 13;
const EFER_LMSLE_UNSUPP = 1 << 20;
const INVLPGB_NESTED = 1 << 21;
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct SvmFeatures {
eax: u32,
ebx: u32,
_ecx: u32,
edx: SvmFeaturesEdx,
}
impl SvmFeatures {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: data.ebx,
_ecx: data.ecx,
edx: SvmFeaturesEdx::from_bits_truncate(data.edx),
}
}
pub fn revision(&self) -> u8 {
get_bits(self.eax, 0, 7) as u8
}
pub fn supported_asids(&self) -> u32 {
self.ebx
}
pub fn has_nested_paging(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::NP)
}
pub fn has_lbr_virtualization(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::LBR_VIRT)
}
pub fn has_svm_lock(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::SVML)
}
pub fn has_nrip(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::NRIPS)
}
pub fn has_tsc_rate_msr(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::TSC_RATE_MSR)
}
pub fn has_vmcb_clean_bits(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::VMCB_CLEAN)
}
pub fn has_flush_by_asid(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::FLUSH_BY_ASID)
}
pub fn has_decode_assists(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::DECODE_ASSISTS)
}
pub fn has_pause_filter(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::PAUSE_FILTER)
}
pub fn has_pause_filter_threshold(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::PAUSE_FILTER_THRESHOLD)
}
pub fn has_avic(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::AVIC)
}
pub fn has_vmsave_virtualization(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::VMSAVE_VIRT)
}
pub fn has_gif(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::VGIF)
}
pub fn has_gmet(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::GMET)
}
pub fn has_sss_check(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::SSS_CHECK)
}
pub fn has_spec_ctrl(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::SPEC_CTRL)
}
pub fn has_host_mce_override(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::HOST_MCE_OVERRIDE)
}
pub fn has_tlb_ctrl(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::TLB_CTL)
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct SvmFeaturesEdx: u32 {
const NP = 1 << 0;
const LBR_VIRT = 1 << 1;
const SVML = 1 << 2;
const NRIPS = 1 << 3;
const TSC_RATE_MSR = 1 << 4;
const VMCB_CLEAN = 1 << 5;
const FLUSH_BY_ASID = 1 << 6;
const DECODE_ASSISTS = 1 << 7;
const PAUSE_FILTER = 1 << 10;
const PAUSE_FILTER_THRESHOLD = 1 << 12;
const AVIC = 1 << 13;
const VMSAVE_VIRT = 1 << 15;
const VGIF = 1 << 16;
const GMET = 1 << 17;
const SSS_CHECK = 1 << 19;
const SPEC_CTRL = 1 << 20;
const HOST_MCE_OVERRIDE = 1 << 23;
const TLB_CTL = 1 << 24;
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct Tlb1gbPageInfo {
eax: u32,
ebx: u32,
_ecx: u32,
_edx: u32,
}
impl Tlb1gbPageInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: data.ebx,
_ecx: data.ecx,
_edx: data.edx,
}
}
pub fn dtlb_l1_1gb_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.eax, 28, 31) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn dtlb_l1_1gb_size(&self) -> u8 {
get_bits(self.eax, 16, 27) as u8
}
pub fn itlb_l1_1gb_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.eax, 12, 15) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn itlb_l1_1gb_size(&self) -> u8 {
get_bits(self.eax, 0, 11) as u8
}
pub fn dtlb_l2_1gb_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ebx, 28, 31) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn dtlb_l2_1gb_size(&self) -> u8 {
get_bits(self.ebx, 16, 27) as u8
}
pub fn itlb_l2_1gb_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ebx, 12, 15) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn itlb_l2_1gb_size(&self) -> u8 {
get_bits(self.ebx, 0, 11) as u8
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct PerformanceOptimizationInfo {
eax: PerformanceOptimizationInfoEax,
_ebx: u32,
_ecx: u32,
_edx: u32,
}
impl PerformanceOptimizationInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: PerformanceOptimizationInfoEax::from_bits_truncate(data.eax),
_ebx: data.ebx,
_ecx: data.ecx,
_edx: data.edx,
}
}
pub fn has_fp128(&self) -> bool {
self.eax.contains(PerformanceOptimizationInfoEax::FP128)
}
pub fn has_movu(&self) -> bool {
self.eax.contains(PerformanceOptimizationInfoEax::MOVU)
}
pub fn has_fp256(&self) -> bool {
self.eax.contains(PerformanceOptimizationInfoEax::FP256)
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct PerformanceOptimizationInfoEax: u32 {
const FP128 = 1 << 0;
const MOVU = 1 << 1;
const FP256 = 1 << 2;
}
}
#[derive(PartialEq, Eq)]
pub struct ProcessorTopologyInfo {
eax: u32,
ebx: u32,
ecx: u32,
_edx: u32,
}
impl ProcessorTopologyInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: data.ebx,
ecx: data.ecx,
_edx: data.edx,
}
}
pub fn x2apic_id(&self) -> u32 {
self.eax
}
pub fn core_id(&self) -> u8 {
get_bits(self.ebx, 0, 7) as u8
}
pub fn threads_per_core(&self) -> u8 {
get_bits(self.ebx, 8, 15) as u8 + 1
}
pub fn node_id(&self) -> u8 {
get_bits(self.ecx, 0, 7) as u8
}
pub fn nodes_per_processor(&self) -> u8 {
get_bits(self.ecx, 8, 10) as u8 + 1
}
}
impl Debug for ProcessorTopologyInfo {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("ProcessorTopologyInfo")
.field("x2apic_id", &self.x2apic_id())
.field("core_id", &self.core_id())
.field("threads_per_core", &self.threads_per_core())
.field("node_id", &self.node_id())
.field("nodes_per_processor", &self.nodes_per_processor())
.finish()
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct MemoryEncryptionInfo {
eax: MemoryEncryptionInfoEax,
ebx: u32,
ecx: u32,
edx: u32,
}
impl MemoryEncryptionInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: MemoryEncryptionInfoEax::from_bits_truncate(data.eax),
ebx: data.ebx,
ecx: data.ecx,
edx: data.edx,
}
}
pub fn has_sme(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::SME)
}
pub fn has_sev(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::SEV)
}
pub fn has_page_flush_msr(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::PAGE_FLUSH_MSR)
}
pub fn has_sev_es(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::SEV_ES)
}
pub fn has_sev_snp(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::SEV_SNP)
}
pub fn has_vmpl(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::VMPL)
}
pub fn has_hw_enforced_cache_coh(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::HWENFCACHECOH)
}
pub fn has_64bit_mode(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::HOST64)
}
pub fn has_restricted_injection(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::RESTINJECT)
}
pub fn has_alternate_injection(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::ALTINJECT)
}
pub fn has_debug_swap(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::DBGSWP)
}
pub fn has_prevent_host_ibs(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::PREVHOSTIBS)
}
pub fn has_vte(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::VTE)
}
pub fn c_bit_position(&self) -> u8 {
get_bits(self.ebx, 0, 5) as u8
}
pub fn physical_address_reduction(&self) -> u8 {
get_bits(self.ebx, 6, 11) as u8
}
pub fn max_encrypted_guests(&self) -> u32 {
self.ecx
}
pub fn min_sev_no_es_asid(&self) -> u32 {
self.edx
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct MemoryEncryptionInfoEax: u32 {
const SME = 1 << 0;
const SEV = 1 << 1;
const PAGE_FLUSH_MSR = 1 << 2;
const SEV_ES = 1 << 3;
const SEV_SNP = 1 << 4;
const VMPL = 1 << 5;
const HWENFCACHECOH = 1 << 10;
const HOST64 = 1 << 11;
const RESTINJECT = 1 << 12;
const ALTINJECT = 1 << 13;
const DBGSWP = 1 << 14;
const PREVHOSTIBS = 1 << 15;
const VTE = 1 << 16;
}
}