use bitflags::bitflags;
use core::fmt::{self, Debug, Display, Formatter};
use core::mem::size_of;
use core::slice;
use core::str;
use crate::{
get_bits, CpuIdReader, CpuIdResult, Vendor, EAX_EXTENDED_CPU_TOPOLOGY,
EAX_PQOS_EXTENDED_FEATURES,
};
pub struct ExtendedProcessorFeatureIdentifiers {
vendor: Vendor,
eax: u32,
ebx: u32,
ecx: ExtendedFunctionInfoEcx,
edx: ExtendedFunctionInfoEdx,
}
impl ExtendedProcessorFeatureIdentifiers {
pub(crate) fn new(vendor: Vendor, data: CpuIdResult) -> Self {
Self {
vendor,
eax: data.eax,
ebx: data.ebx,
ecx: ExtendedFunctionInfoEcx::from_bits_truncate(data.ecx),
edx: ExtendedFunctionInfoEdx::from_bits_truncate(data.edx),
}
}
pub fn extended_signature(&self) -> u32 {
self.eax
}
pub fn pkg_type(&self) -> u32 {
get_bits(self.ebx, 28, 31)
}
pub fn brand_id(&self) -> u32 {
get_bits(self.ebx, 0, 15)
}
pub fn has_lahf_sahf(&self) -> bool {
self.ecx.contains(ExtendedFunctionInfoEcx::LAHF_SAHF)
}
pub fn has_cmp_legacy(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::CMP_LEGACY)
}
pub fn has_svm(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::SVM)
}
pub fn has_ext_apic_space(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::EXT_APIC_SPACE)
}
pub fn has_alt_mov_cr8(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::ALTMOVCR8)
}
pub fn has_lzcnt(&self) -> bool {
self.ecx.contains(ExtendedFunctionInfoEcx::LZCNT)
}
pub fn has_sse4a(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::SSE4A)
}
pub fn has_misaligned_sse_mode(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::MISALIGNSSE)
}
pub fn has_prefetchw(&self) -> bool {
self.ecx.contains(ExtendedFunctionInfoEcx::PREFETCHW)
}
pub fn has_osvw(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::OSVW)
}
pub fn has_ibs(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::IBS)
}
pub fn has_xop(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::XOP)
}
pub fn has_skinit(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::SKINIT)
}
pub fn has_wdt(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::WDT)
}
pub fn has_lwp(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::LWP)
}
pub fn has_fma4(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::FMA4)
}
pub fn has_tbm(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::TBM)
}
pub fn has_topology_extensions(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::TOPEXT)
}
pub fn has_perf_cntr_extensions(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFCTREXT)
}
pub fn has_nb_perf_cntr_extensions(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFCTREXTNB)
}
pub fn has_data_access_bkpt_extension(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::DATABRKPEXT)
}
pub fn has_perf_tsc(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFTSC)
}
pub fn has_perf_cntr_llc_extensions(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFCTREXTLLC)
}
pub fn has_monitorx_mwaitx(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::MONITORX)
}
pub fn has_addr_mask_extension(&self) -> bool {
self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::ADDRMASKEXT)
}
pub fn has_syscall_sysret(&self) -> bool {
self.edx.contains(ExtendedFunctionInfoEdx::SYSCALL_SYSRET)
}
pub fn has_execute_disable(&self) -> bool {
self.edx.contains(ExtendedFunctionInfoEdx::EXECUTE_DISABLE)
}
pub fn has_mmx_extensions(&self) -> bool {
self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::MMXEXT)
}
pub fn has_fast_fxsave_fxstor(&self) -> bool {
self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::FFXSR)
}
pub fn has_1gib_pages(&self) -> bool {
self.edx.contains(ExtendedFunctionInfoEdx::GIB_PAGES)
}
pub fn has_rdtscp(&self) -> bool {
self.edx.contains(ExtendedFunctionInfoEdx::RDTSCP)
}
pub fn has_64bit_mode(&self) -> bool {
self.edx.contains(ExtendedFunctionInfoEdx::I64BIT_MODE)
}
pub fn has_amd_3dnow_extensions(&self) -> bool {
self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::THREEDNOWEXT)
}
pub fn has_3dnow(&self) -> bool {
self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::THREEDNOW)
}
}
impl Debug for ExtendedProcessorFeatureIdentifiers {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
let mut ds = f.debug_struct("ExtendedProcessorFeatureIdentifiers");
ds.field("extended_signature", &self.extended_signature());
if self.vendor == Vendor::Amd {
ds.field("pkg_type", &self.pkg_type());
ds.field("brand_id", &self.brand_id());
}
ds.field("ecx_features", &self.ecx);
ds.field("edx_features", &self.edx);
ds.finish()
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ExtendedFunctionInfoEcx: u32 {
const LAHF_SAHF = 1 << 0;
const CMP_LEGACY = 1 << 1;
const SVM = 1 << 2;
const EXT_APIC_SPACE = 1 << 3;
const ALTMOVCR8 = 1 << 4;
const LZCNT = 1 << 5;
const SSE4A = 1 << 6;
const MISALIGNSSE = 1 << 7;
const PREFETCHW = 1 << 8;
const OSVW = 1 << 9;
const IBS = 1 << 10;
const XOP = 1 << 11;
const SKINIT = 1 << 12;
const WDT = 1 << 13;
const LWP = 1 << 15;
const FMA4 = 1 << 16;
const TBM = 1 << 21;
const TOPEXT = 1 << 22;
const PERFCTREXT = 1 << 23;
const PERFCTREXTNB = 1 << 24;
const DATABRKPEXT = 1 << 26;
const PERFTSC = 1 << 27;
const PERFCTREXTLLC = 1 << 28;
const MONITORX = 1 << 29;
const ADDRMASKEXT = 1 << 30;
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ExtendedFunctionInfoEdx: u32 {
const SYSCALL_SYSRET = 1 << 11;
const EXECUTE_DISABLE = 1 << 20;
const MMXEXT = 1 << 22;
const FFXSR = 1 << 25;
const GIB_PAGES = 1 << 26;
const RDTSCP = 1 << 27;
const I64BIT_MODE = 1 << 29;
const THREEDNOWEXT = 1 << 30;
const THREEDNOW = 1 << 31;
}
}
pub struct ProcessorBrandString {
data: [CpuIdResult; 3],
}
impl ProcessorBrandString {
pub(crate) fn new(data: [CpuIdResult; 3]) -> Self {
Self { data }
}
pub fn as_str(&self) -> &str {
let slice: &[u8] = unsafe {
slice::from_raw_parts(
self.data.as_ptr() as *const u8,
self.data.len() * size_of::<CpuIdResult>(),
)
};
let slice = slice.split(|&x| x == 0).next().unwrap();
str::from_utf8(slice)
.unwrap_or("Invalid Processor Brand String")
.trim()
}
}
impl Debug for ProcessorBrandString {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
f.debug_struct("ProcessorBrandString")
.field("as_str", &self.as_str())
.finish()
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct L1CacheTlbInfo {
eax: u32,
ebx: u32,
ecx: u32,
edx: u32,
}
impl L1CacheTlbInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: data.ebx,
ecx: data.ecx,
edx: data.edx,
}
}
pub fn dtlb_2m_4m_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.eax, 24, 31) as u8;
Associativity::for_l1(assoc_bits)
}
pub fn dtlb_2m_4m_size(&self) -> u8 {
get_bits(self.eax, 16, 23) as u8
}
pub fn itlb_2m_4m_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.eax, 8, 15) as u8;
Associativity::for_l1(assoc_bits)
}
pub fn itlb_2m_4m_size(&self) -> u8 {
get_bits(self.eax, 0, 7) as u8
}
pub fn dtlb_4k_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ebx, 24, 31) as u8;
Associativity::for_l1(assoc_bits)
}
pub fn dtlb_4k_size(&self) -> u8 {
get_bits(self.ebx, 16, 23) as u8
}
pub fn itlb_4k_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ebx, 8, 15) as u8;
Associativity::for_l1(assoc_bits)
}
pub fn itlb_4k_size(&self) -> u8 {
get_bits(self.ebx, 0, 7) as u8
}
pub fn dcache_size(&self) -> u8 {
get_bits(self.ecx, 24, 31) as u8
}
pub fn dcache_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ecx, 16, 23) as u8;
Associativity::for_l1(assoc_bits)
}
pub fn dcache_lines_per_tag(&self) -> u8 {
get_bits(self.ecx, 8, 15) as u8
}
pub fn dcache_line_size(&self) -> u8 {
get_bits(self.ecx, 0, 7) as u8
}
pub fn icache_size(&self) -> u8 {
get_bits(self.edx, 24, 31) as u8
}
pub fn icache_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.edx, 16, 23) as u8;
Associativity::for_l1(assoc_bits)
}
pub fn icache_lines_per_tag(&self) -> u8 {
get_bits(self.edx, 8, 15) as u8
}
pub fn icache_line_size(&self) -> u8 {
get_bits(self.edx, 0, 7) as u8
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct L2And3CacheTlbInfo {
eax: u32,
ebx: u32,
ecx: u32,
edx: u32,
}
impl L2And3CacheTlbInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: data.ebx,
ecx: data.ecx,
edx: data.edx,
}
}
pub fn dtlb_2m_4m_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.eax, 28, 31) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn dtlb_2m_4m_size(&self) -> u16 {
get_bits(self.eax, 16, 27) as u16
}
pub fn itlb_2m_4m_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.eax, 12, 15) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn itlb_2m_4m_size(&self) -> u16 {
get_bits(self.eax, 0, 11) as u16
}
pub fn dtlb_4k_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ebx, 28, 31) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn dtlb_4k_size(&self) -> u16 {
get_bits(self.ebx, 16, 27) as u16
}
pub fn itlb_4k_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ebx, 12, 15) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn itlb_4k_size(&self) -> u16 {
get_bits(self.ebx, 0, 11) as u16
}
pub fn l2cache_line_size(&self) -> u8 {
get_bits(self.ecx, 0, 7) as u8
}
pub fn l2cache_lines_per_tag(&self) -> u8 {
get_bits(self.ecx, 8, 11) as u8
}
pub fn l2cache_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ecx, 12, 15) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn l2cache_size(&self) -> u16 {
get_bits(self.ecx, 16, 31) as u16
}
pub fn l3cache_line_size(&self) -> u8 {
get_bits(self.edx, 0, 7) as u8
}
pub fn l3cache_lines_per_tag(&self) -> u8 {
get_bits(self.edx, 8, 11) as u8
}
pub fn l3cache_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.edx, 12, 15) as u8;
Associativity::for_l3(assoc_bits)
}
pub fn l3cache_size(&self) -> u16 {
get_bits(self.edx, 18, 31) as u16
}
}
#[derive(PartialEq, Eq, Debug)]
pub enum Associativity {
Disabled,
DirectMapped,
NWay(u8),
FullyAssociative,
Unknown,
}
impl Display for Associativity {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let s = match self {
Associativity::Disabled => "Disabled",
Associativity::DirectMapped => "Direct mapped",
Associativity::NWay(n) => {
return write!(f, "NWay({})", n);
}
Associativity::FullyAssociative => "Fully associative",
Associativity::Unknown => "Unknown (check leaf 0x8000_001d)",
};
f.write_str(s)
}
}
impl Associativity {
fn for_l1(n: u8) -> Associativity {
match n {
0x0 => Associativity::Disabled, 0x1 => Associativity::DirectMapped,
0x2..=0xfe => Associativity::NWay(n),
0xff => Associativity::FullyAssociative,
}
}
fn for_l2(n: u8) -> Associativity {
match n {
0x0 => Associativity::Disabled,
0x1 => Associativity::DirectMapped,
0x2 => Associativity::NWay(2),
0x4 => Associativity::NWay(4),
0x5 => Associativity::NWay(6), 0x6 => Associativity::NWay(8),
0x8 => Associativity::NWay(16),
0x9 => Associativity::Unknown, 0xa => Associativity::NWay(32),
0xb => Associativity::NWay(48),
0xc => Associativity::NWay(64),
0xd => Associativity::NWay(96),
0xe => Associativity::NWay(128),
0xF => Associativity::FullyAssociative,
_ => Associativity::Unknown,
}
}
fn for_l3(n: u8) -> Associativity {
Associativity::for_l2(n)
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct ApmInfo {
_eax: u32,
ebx: RasCapabilities,
ecx: u32,
edx: ApmInfoEdx,
}
impl ApmInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
_eax: data.eax,
ebx: RasCapabilities::from_bits_truncate(data.ebx),
ecx: data.ecx,
edx: ApmInfoEdx::from_bits_truncate(data.edx),
}
}
pub fn has_mca_overflow_recovery(&self) -> bool {
self.ebx.contains(RasCapabilities::MCAOVFLRECOV)
}
pub fn has_succor(&self) -> bool {
self.ebx.contains(RasCapabilities::SUCCOR)
}
pub fn has_hwa(&self) -> bool {
self.ebx.contains(RasCapabilities::HWA)
}
pub fn cpu_pwr_sample_time_ratio(&self) -> u32 {
self.ecx
}
pub fn has_ts(&self) -> bool {
self.edx.contains(ApmInfoEdx::TS)
}
pub fn has_freq_id_ctrl(&self) -> bool {
self.edx.contains(ApmInfoEdx::FID)
}
pub fn has_volt_id_ctrl(&self) -> bool {
self.edx.contains(ApmInfoEdx::VID)
}
pub fn has_thermtrip(&self) -> bool {
self.edx.contains(ApmInfoEdx::TTP)
}
pub fn has_tm(&self) -> bool {
self.edx.contains(ApmInfoEdx::TM)
}
pub fn has_100mhz_steps(&self) -> bool {
self.edx.contains(ApmInfoEdx::MHZSTEPS100)
}
pub fn has_hw_pstate(&self) -> bool {
self.edx.contains(ApmInfoEdx::HWPSTATE)
}
pub fn has_invariant_tsc(&self) -> bool {
self.edx.contains(ApmInfoEdx::INVTSC)
}
pub fn has_cpb(&self) -> bool {
self.edx.contains(ApmInfoEdx::CPB)
}
pub fn has_ro_effective_freq_iface(&self) -> bool {
self.edx.contains(ApmInfoEdx::EFFFREQRO)
}
pub fn has_feedback_iface(&self) -> bool {
self.edx.contains(ApmInfoEdx::PROCFEEDBACKIF)
}
pub fn has_power_reporting_iface(&self) -> bool {
self.edx.contains(ApmInfoEdx::PROCPWRREPORT)
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ApmInfoEdx: u32 {
const TS = 1 << 0;
const FID = 1 << 1;
const VID = 1 << 2;
const TTP = 1 << 3;
const TM = 1 << 4;
const MHZSTEPS100 = 1 << 6;
const HWPSTATE = 1 << 7;
const INVTSC = 1 << 8;
const CPB = 1 << 9;
const EFFFREQRO = 1 << 10;
const PROCFEEDBACKIF = 1 << 11;
const PROCPWRREPORT = 1 << 12;
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct RasCapabilities: u32 {
const MCAOVFLRECOV = 1 << 0;
const SUCCOR = 1 << 1;
const HWA = 1 << 2;
}
}
#[derive(PartialEq, Eq)]
pub struct ProcessorCapacityAndFeatureInfo {
eax: u32,
ebx: ProcessorCapacityAndFeatureEbx,
ecx: u32,
edx: u32,
}
impl ProcessorCapacityAndFeatureInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: ProcessorCapacityAndFeatureEbx::from_bits_truncate(data.ebx),
ecx: data.ecx,
edx: data.edx,
}
}
pub fn physical_address_bits(&self) -> u8 {
get_bits(self.eax, 0, 7) as u8
}
pub fn linear_address_bits(&self) -> u8 {
get_bits(self.eax, 8, 15) as u8
}
pub fn guest_physical_address_bits(&self) -> u8 {
get_bits(self.eax, 16, 23) as u8
}
pub fn has_cl_zero(&self) -> bool {
self.ebx.contains(ProcessorCapacityAndFeatureEbx::CLZERO)
}
pub fn has_inst_ret_cntr_msr(&self) -> bool {
self.ebx
.contains(ProcessorCapacityAndFeatureEbx::INST_RETCNT_MSR)
}
pub fn has_restore_fp_error_ptrs(&self) -> bool {
self.ebx
.contains(ProcessorCapacityAndFeatureEbx::RSTR_FP_ERR_PTRS)
}
pub fn has_invlpgb(&self) -> bool {
self.ebx.contains(ProcessorCapacityAndFeatureEbx::INVLPGB)
}
pub fn has_rdpru(&self) -> bool {
self.ebx.contains(ProcessorCapacityAndFeatureEbx::RDPRU)
}
pub fn has_mcommit(&self) -> bool {
self.ebx.contains(ProcessorCapacityAndFeatureEbx::MCOMMIT)
}
pub fn has_wbnoinvd(&self) -> bool {
self.ebx.contains(ProcessorCapacityAndFeatureEbx::WBNOINVD)
}
pub fn has_int_wbinvd(&self) -> bool {
self.ebx
.contains(ProcessorCapacityAndFeatureEbx::INT_WBINVD)
}
pub fn has_unsupported_efer_lmsle(&self) -> bool {
self.ebx
.contains(ProcessorCapacityAndFeatureEbx::EFER_LMSLE_UNSUPP)
}
pub fn has_invlpgb_nested(&self) -> bool {
self.ebx
.contains(ProcessorCapacityAndFeatureEbx::INVLPGB_NESTED)
}
pub fn perf_tsc_size(&self) -> usize {
let s = get_bits(self.ecx, 16, 17) as u8;
match s & 0b11 {
0b00 => 40,
0b01 => 48,
0b10 => 56,
0b11 => 64,
_ => unreachable!("AND with 0b11 in match"),
}
}
pub fn apic_id_size(&self) -> u8 {
get_bits(self.ecx, 12, 15) as u8
}
pub fn maximum_logical_processors(&self) -> usize {
usize::pow(2, self.apic_id_size() as u32)
}
pub fn num_phys_threads(&self) -> usize {
get_bits(self.ecx, 0, 7) as usize + 1
}
pub fn invlpgb_max_pages(&self) -> u16 {
get_bits(self.edx, 0, 15) as u16
}
pub fn max_rdpru_id(&self) -> u16 {
get_bits(self.edx, 16, 31) as u16
}
}
impl Debug for ProcessorCapacityAndFeatureInfo {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("ProcessorCapacityAndFeatureInfo")
.field("physical_address_bits", &self.physical_address_bits())
.field("linear_address_bits", &self.linear_address_bits())
.field(
"guest_physical_address_bits",
&self.guest_physical_address_bits(),
)
.field("has_cl_zero", &self.has_cl_zero())
.field("has_inst_ret_cntr_msr", &self.has_inst_ret_cntr_msr())
.field(
"has_restore_fp_error_ptrs",
&self.has_restore_fp_error_ptrs(),
)
.field("has_invlpgb", &self.has_invlpgb())
.field("has_rdpru", &self.has_rdpru())
.field("has_mcommit", &self.has_mcommit())
.field("has_wbnoinvd", &self.has_wbnoinvd())
.field("has_int_wbinvd", &self.has_int_wbinvd())
.field(
"has_unsupported_efer_lmsle",
&self.has_unsupported_efer_lmsle(),
)
.field("has_invlpgb_nested", &self.has_invlpgb_nested())
.field("perf_tsc_size", &self.perf_tsc_size())
.field("apic_id_size", &self.apic_id_size())
.field(
"maximum_logical_processors",
&self.maximum_logical_processors(),
)
.field("num_phys_threads", &self.num_phys_threads())
.field("invlpgb_max_pages", &self.invlpgb_max_pages())
.field("max_rdpru_id", &self.max_rdpru_id())
.finish()
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ProcessorCapacityAndFeatureEbx: u32 {
const CLZERO = 1 << 0;
const INST_RETCNT_MSR = 1 << 1;
const RSTR_FP_ERR_PTRS = 1 << 2;
const INVLPGB = 1 << 3;
const RDPRU = 1 << 4;
const MCOMMIT = 1 << 8;
const WBNOINVD = 1 << 9;
const INT_WBINVD = 1 << 13;
const EFER_LMSLE_UNSUPP = 1 << 20;
const INVLPGB_NESTED = 1 << 21;
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct SvmFeatures {
eax: u32,
ebx: u32,
_ecx: u32,
edx: SvmFeaturesEdx,
}
impl SvmFeatures {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: data.ebx,
_ecx: data.ecx,
edx: SvmFeaturesEdx::from_bits_truncate(data.edx),
}
}
pub fn revision(&self) -> u8 {
get_bits(self.eax, 0, 7) as u8
}
pub fn supported_asids(&self) -> u32 {
self.ebx
}
pub fn has_nested_paging(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::NP)
}
pub fn has_lbr_virtualization(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::LBR_VIRT)
}
pub fn has_svm_lock(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::SVML)
}
pub fn has_nrip(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::NRIPS)
}
pub fn has_tsc_rate_msr(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::TSC_RATE_MSR)
}
pub fn has_vmcb_clean_bits(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::VMCB_CLEAN)
}
pub fn has_flush_by_asid(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::FLUSH_BY_ASID)
}
pub fn has_decode_assists(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::DECODE_ASSISTS)
}
pub fn has_pause_filter(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::PAUSE_FILTER)
}
pub fn has_pause_filter_threshold(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::PAUSE_FILTER_THRESHOLD)
}
pub fn has_avic(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::AVIC)
}
pub fn has_vmsave_virtualization(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::VMSAVE_VIRT)
}
pub fn has_gif(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::VGIF)
}
pub fn has_gmet(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::GMET)
}
pub fn has_sss_check(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::SSS_CHECK)
}
pub fn has_spec_ctrl(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::SPEC_CTRL)
}
pub fn has_host_mce_override(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::HOST_MCE_OVERRIDE)
}
pub fn has_tlb_ctrl(&self) -> bool {
self.edx.contains(SvmFeaturesEdx::TLB_CTL)
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct SvmFeaturesEdx: u32 {
const NP = 1 << 0;
const LBR_VIRT = 1 << 1;
const SVML = 1 << 2;
const NRIPS = 1 << 3;
const TSC_RATE_MSR = 1 << 4;
const VMCB_CLEAN = 1 << 5;
const FLUSH_BY_ASID = 1 << 6;
const DECODE_ASSISTS = 1 << 7;
const PAUSE_FILTER = 1 << 10;
const PAUSE_FILTER_THRESHOLD = 1 << 12;
const AVIC = 1 << 13;
const VMSAVE_VIRT = 1 << 15;
const VGIF = 1 << 16;
const GMET = 1 << 17;
const SSS_CHECK = 1 << 19;
const SPEC_CTRL = 1 << 20;
const HOST_MCE_OVERRIDE = 1 << 23;
const TLB_CTL = 1 << 24;
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct Tlb1gbPageInfo {
eax: u32,
ebx: u32,
_ecx: u32,
_edx: u32,
}
impl Tlb1gbPageInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: data.ebx,
_ecx: data.ecx,
_edx: data.edx,
}
}
pub fn dtlb_l1_1gb_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.eax, 28, 31) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn dtlb_l1_1gb_size(&self) -> u8 {
get_bits(self.eax, 16, 27) as u8
}
pub fn itlb_l1_1gb_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.eax, 12, 15) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn itlb_l1_1gb_size(&self) -> u8 {
get_bits(self.eax, 0, 11) as u8
}
pub fn dtlb_l2_1gb_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ebx, 28, 31) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn dtlb_l2_1gb_size(&self) -> u8 {
get_bits(self.ebx, 16, 27) as u8
}
pub fn itlb_l2_1gb_associativity(&self) -> Associativity {
let assoc_bits = get_bits(self.ebx, 12, 15) as u8;
Associativity::for_l2(assoc_bits)
}
pub fn itlb_l2_1gb_size(&self) -> u8 {
get_bits(self.ebx, 0, 11) as u8
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct PerformanceOptimizationInfo {
eax: PerformanceOptimizationInfoEax,
_ebx: u32,
_ecx: u32,
_edx: u32,
}
impl PerformanceOptimizationInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: PerformanceOptimizationInfoEax::from_bits_truncate(data.eax),
_ebx: data.ebx,
_ecx: data.ecx,
_edx: data.edx,
}
}
pub fn has_fp128(&self) -> bool {
self.eax.contains(PerformanceOptimizationInfoEax::FP128)
}
pub fn has_movu(&self) -> bool {
self.eax.contains(PerformanceOptimizationInfoEax::MOVU)
}
pub fn has_fp256(&self) -> bool {
self.eax.contains(PerformanceOptimizationInfoEax::FP256)
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct PerformanceOptimizationInfoEax: u32 {
const FP128 = 1 << 0;
const MOVU = 1 << 1;
const FP256 = 1 << 2;
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct InstructionBasedSamplingCapabilities {
eax: InstructionBasedSamplingCapabilitiesEax,
_ebx: u32,
_ecx: u32,
_edx: u32,
}
impl InstructionBasedSamplingCapabilities {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: InstructionBasedSamplingCapabilitiesEax::from_bits_truncate(data.eax),
_ebx: data.ebx,
_ecx: data.ecx,
_edx: data.edx,
}
}
pub fn has_feature_flags(&self) -> bool {
self.eax
.contains(InstructionBasedSamplingCapabilitiesEax::IBSFFV)
}
pub fn has_fetch_sampling(&self) -> bool {
self.eax
.contains(InstructionBasedSamplingCapabilitiesEax::FETCH_SAM)
}
pub fn has_execution_sampling(&self) -> bool {
self.eax
.contains(InstructionBasedSamplingCapabilitiesEax::OP_SAM)
}
pub fn has_read_write_operation_counter(&self) -> bool {
self.eax
.contains(InstructionBasedSamplingCapabilitiesEax::RD_WR_OP_CNT)
}
pub fn has_operation_counter(&self) -> bool {
self.eax
.contains(InstructionBasedSamplingCapabilitiesEax::OP_CNT)
}
pub fn has_branch_target_address_reporting(&self) -> bool {
self.eax
.contains(InstructionBasedSamplingCapabilitiesEax::BRN_TRGT)
}
pub fn has_operation_counter_extended(&self) -> bool {
self.eax
.contains(InstructionBasedSamplingCapabilitiesEax::OP_CNT_EXT)
}
pub fn has_invalid_rip_indication(&self) -> bool {
self.eax
.contains(InstructionBasedSamplingCapabilitiesEax::RIP_INVALID_CHK)
}
pub fn has_fused_branch_micro_op_indication(&self) -> bool {
self.eax
.contains(InstructionBasedSamplingCapabilitiesEax::OP_BRN_FUSE)
}
pub fn has_l3_miss_filtering(&self) -> bool {
self.eax
.contains(InstructionBasedSamplingCapabilitiesEax::IBS_L3_MISS_FILTERING)
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct InstructionBasedSamplingCapabilitiesEax: u32 {
const IBSFFV = 1 << 0;
const FETCH_SAM = 1 << 1;
const OP_SAM = 1 << 2;
const RD_WR_OP_CNT = 1 << 3;
const OP_CNT = 1 << 4;
const BRN_TRGT = 1 << 5;
const OP_CNT_EXT = 1 << 6;
const RIP_INVALID_CHK = 1 << 7;
const OP_BRN_FUSE = 1 << 8;
const IBS_L3_MISS_FILTERING = 1 << 11;
}
}
#[derive(PartialEq, Eq)]
pub struct ProcessorTopologyInfo {
eax: u32,
ebx: u32,
ecx: u32,
_edx: u32,
}
impl ProcessorTopologyInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: data.ebx,
ecx: data.ecx,
_edx: data.edx,
}
}
pub fn x2apic_id(&self) -> u32 {
self.eax
}
pub fn core_id(&self) -> u8 {
get_bits(self.ebx, 0, 7) as u8
}
pub fn threads_per_core(&self) -> u8 {
get_bits(self.ebx, 8, 15) as u8 + 1
}
pub fn node_id(&self) -> u8 {
get_bits(self.ecx, 0, 7) as u8
}
pub fn nodes_per_processor(&self) -> u8 {
get_bits(self.ecx, 8, 10) as u8 + 1
}
}
impl Debug for ProcessorTopologyInfo {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("ProcessorTopologyInfo")
.field("x2apic_id", &self.x2apic_id())
.field("core_id", &self.core_id())
.field("threads_per_core", &self.threads_per_core())
.field("node_id", &self.node_id())
.field("nodes_per_processor", &self.nodes_per_processor())
.finish()
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct MemoryEncryptionInfo {
eax: MemoryEncryptionInfoEax,
ebx: u32,
ecx: u32,
edx: u32,
}
impl MemoryEncryptionInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: MemoryEncryptionInfoEax::from_bits_truncate(data.eax),
ebx: data.ebx,
ecx: data.ecx,
edx: data.edx,
}
}
pub fn has_sme(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::SME)
}
pub fn has_sev(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::SEV)
}
pub fn has_page_flush_msr(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::PAGE_FLUSH_MSR)
}
pub fn has_sev_es(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::SEV_ES)
}
pub fn has_sev_snp(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::SEV_SNP)
}
pub fn has_vmpl(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::VMPL)
}
pub fn has_hw_enforced_cache_coh(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::HWENFCACHECOH)
}
pub fn has_64bit_mode(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::HOST64)
}
pub fn has_restricted_injection(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::RESTINJECT)
}
pub fn has_alternate_injection(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::ALTINJECT)
}
pub fn has_debug_swap(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::DBGSWP)
}
pub fn has_prevent_host_ibs(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::PREVHOSTIBS)
}
pub fn has_vte(&self) -> bool {
self.eax.contains(MemoryEncryptionInfoEax::VTE)
}
pub fn c_bit_position(&self) -> u8 {
get_bits(self.ebx, 0, 5) as u8
}
pub fn physical_address_reduction(&self) -> u8 {
get_bits(self.ebx, 6, 11) as u8
}
pub fn max_encrypted_guests(&self) -> u32 {
self.ecx
}
pub fn min_sev_no_es_asid(&self) -> u32 {
self.edx
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct MemoryEncryptionInfoEax: u32 {
const SME = 1 << 0;
const SEV = 1 << 1;
const PAGE_FLUSH_MSR = 1 << 2;
const SEV_ES = 1 << 3;
const SEV_SNP = 1 << 4;
const VMPL = 1 << 5;
const HWENFCACHECOH = 1 << 10;
const HOST64 = 1 << 11;
const RESTINJECT = 1 << 12;
const ALTINJECT = 1 << 13;
const DBGSWP = 1 << 14;
const PREVHOSTIBS = 1 << 15;
const VTE = 1 << 16;
}
}
#[derive(PartialEq, Eq)]
pub struct PqosExtendedFeatureInfo<R: CpuIdReader> {
read: R,
_eax: u32,
ebx: PqosExtendedFeatureInfoEbx,
_ecx: u32,
_edx: u32,
}
impl<R: CpuIdReader> PqosExtendedFeatureInfo<R> {
pub(crate) fn new(read: R) -> Self {
let data = read.cpuid2(EAX_PQOS_EXTENDED_FEATURES, 0);
Self {
read,
_eax: data.eax,
ebx: PqosExtendedFeatureInfoEbx::from_bits_truncate(data.ebx),
_ecx: data.ecx,
_edx: data.edx,
}
}
pub fn has_l3mbe(&self) -> bool {
self.ebx.contains(PqosExtendedFeatureInfoEbx::L3MBE)
}
pub fn has_l3smbe(&self) -> bool {
self.ebx.contains(PqosExtendedFeatureInfoEbx::L3SMBE)
}
pub fn has_bmec(&self) -> bool {
self.ebx.contains(PqosExtendedFeatureInfoEbx::BMEC)
}
pub fn has_l3rr(&self) -> bool {
self.ebx.contains(PqosExtendedFeatureInfoEbx::L3RR)
}
pub fn has_abmc(&self) -> bool {
self.ebx.contains(PqosExtendedFeatureInfoEbx::ABMC)
}
pub fn has_sdciae(&self) -> bool {
self.ebx.contains(PqosExtendedFeatureInfoEbx::SDCIAE)
}
pub fn get_l3_memory_bandwidth_enforcement_info(
&self,
) -> Option<L3MemoryBandwidthEnforcementInformation> {
if self.has_l3mbe() {
Some(L3MemoryBandwidthEnforcementInformation::new(
self.read.cpuid2(EAX_PQOS_EXTENDED_FEATURES, 1),
))
} else {
None
}
}
pub fn get_l3_slow_memory_bandwidth_enforcement_info(
&self,
) -> Option<L3MemoryBandwidthEnforcementInformation> {
if self.has_l3smbe() {
Some(L3MemoryBandwidthEnforcementInformation::new(
self.read.cpuid2(EAX_PQOS_EXTENDED_FEATURES, 2),
))
} else {
None
}
}
pub fn get_bandwidth_monitoring_event_counters_info(
&self,
) -> Option<BandwidthMonitoringEventCounters> {
if self.has_bmec() {
Some(BandwidthMonitoringEventCounters::new(
self.read.cpuid2(EAX_PQOS_EXTENDED_FEATURES, 3),
))
} else {
None
}
}
pub fn get_assignable_bandwidth_monitoring_counters_info(
&self,
) -> Option<AssignableBandwidthMonitoringCounterInfo> {
if self.has_abmc() {
Some(AssignableBandwidthMonitoringCounterInfo::new(
self.read.cpuid2(EAX_PQOS_EXTENDED_FEATURES, 5),
))
} else {
None
}
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct PqosExtendedFeatureInfoEbx: u32 {
const L3MBE = 1 << 1;
const L3SMBE = 1 << 2;
const BMEC = 1 << 3;
const L3RR = 1 << 4;
const ABMC = 1 << 5;
const SDCIAE = 1 << 6;
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct PqosExtendedFeatureInfoEbx5: u32 {
const SELECT_COS = 1 << 0;
}
}
impl<R: CpuIdReader> Debug for PqosExtendedFeatureInfo<R> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("PqosExtendedFeatureInfo")
.field("has_l3mbe", &self.has_l3mbe())
.field("has_l3smbe", &self.has_l3smbe())
.field("has_bmec", &self.has_bmec())
.field("has_l3rr", &self.has_l3rr())
.field("has_abmc", &self.has_abmc())
.field("has_sdciae", &self.has_sdciae())
.finish()
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct L3MemoryBandwidthEnforcementInformation {
eax: u32,
_ebx: u32,
_ecx: u32,
edx: u32,
}
impl L3MemoryBandwidthEnforcementInformation {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
_ebx: data.ebx,
_ecx: data.ecx,
edx: data.edx,
}
}
pub fn bandwidth_length(&self) -> u32 {
self.eax
}
pub fn cos_max(&self) -> u32 {
self.edx
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct BandwidthMonitoringEventCounters {
_eax: u32,
ebx: u32,
ecx: BandwidthMonitoringEventCountersEcx,
_edx: u32,
}
impl BandwidthMonitoringEventCounters {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
_eax: data.eax,
ebx: data.ebx,
ecx: BandwidthMonitoringEventCountersEcx::from_bits_truncate(data.ecx),
_edx: data.edx,
}
}
pub fn number_events(&self) -> u32 {
get_bits(self.ebx, 0, 7)
}
pub fn has_l3_cache_lcl_bw_fill_mon(&self) -> bool {
self.ecx
.contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_LCL_BW_FILL_MON)
}
pub fn has_l3_cache_rmt_bw_fill_mon(&self) -> bool {
self.ecx
.contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_RMT_BW_FILL_MON)
}
pub fn has_l3_cache_lcl_bw_nt_wr_mon(&self) -> bool {
self.ecx
.contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_LCL_BW_NT_WR_MON)
}
pub fn has_l3_cache_rmt_bw_nt_wr_mon(&self) -> bool {
self.ecx
.contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_RMT_BW_NT_WR_MON)
}
pub fn has_l3_cache_lcl_slow_bw_fill_mon(&self) -> bool {
self.ecx
.contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_LCL_SLOW_BW_FILL_MON)
}
pub fn has_l3_cache_rmt_slow_bw_fill_mon(&self) -> bool {
self.ecx
.contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_RMT_SLOW_BW_FILL_MON)
}
pub fn has_l3_cache_vic_mon(&self) -> bool {
self.ecx
.contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_VIC_MON)
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct BandwidthMonitoringEventCountersEcx: u32 {
const L3_CACHE_LCL_BW_FILL_MON = 1 << 0;
const L3_CACHE_RMT_BW_FILL_MON = 1 << 1;
const L3_CACHE_LCL_BW_NT_WR_MON = 1 << 2;
const L3_CACHE_RMT_BW_NT_WR_MON = 1 << 3;
const L3_CACHE_LCL_SLOW_BW_FILL_MON = 1 << 4;
const L3_CACHE_RMT_SLOW_BW_FILL_MON = 1 << 5;
const L3_CACHE_VIC_MON = 1 << 6;
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct AssignableBandwidthMonitoringCounterInfo {
eax: u32,
ebx: u32,
ecx: u32,
_edx: u32,
}
impl AssignableBandwidthMonitoringCounterInfo {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: data.ebx,
ecx: data.ecx,
_edx: data.edx,
}
}
pub fn counter_size(&self) -> u8 {
get_bits(self.eax, 0, 7) as u8
}
pub fn has_overflow_bit(&self) -> bool {
(self.eax & (1 << 8)) > 0
}
pub fn max_abmc(&self) -> u16 {
get_bits(self.ebx, 0, 15) as u16
}
pub fn has_select_cos(&self) -> bool {
(self.ecx & 1) > 0
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct ExtendedFeatureIdentification2 {
eax: ExtendedFeatureIdentification2Eax,
ebx: u32,
_ecx: u32,
_edx: u32,
}
impl ExtendedFeatureIdentification2 {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: ExtendedFeatureIdentification2Eax::from_bits_truncate(data.eax),
ebx: data.ebx,
_ecx: data.ecx,
_edx: data.edx,
}
}
pub fn has_no_nested_data_bp(&self) -> bool {
self.eax
.contains(ExtendedFeatureIdentification2Eax::NO_NESTED_DATA_BP)
}
pub fn has_lfence_always_serializing(&self) -> bool {
self.eax
.contains(ExtendedFeatureIdentification2Eax::LFENCE_ALWAYS_SERIALIZING)
}
pub fn has_smm_pg_cfg_lock(&self) -> bool {
self.eax
.contains(ExtendedFeatureIdentification2Eax::SMM_PG_CFG_LOCK)
}
pub fn has_null_select_clears_base(&self) -> bool {
self.eax
.contains(ExtendedFeatureIdentification2Eax::NULL_SELECT_CLEARS_BASE)
}
pub fn has_upper_address_ignore(&self) -> bool {
self.eax
.contains(ExtendedFeatureIdentification2Eax::UPPER_ADDRESS_IGNORE)
}
pub fn has_automatic_ibrs(&self) -> bool {
self.eax
.contains(ExtendedFeatureIdentification2Eax::AUTOMATIC_IBRS)
}
pub fn has_no_smm_ctl_msr(&self) -> bool {
self.eax
.contains(ExtendedFeatureIdentification2Eax::NO_SMM_CTL_MSR)
}
pub fn has_prefetch_ctl_msr(&self) -> bool {
self.eax
.contains(ExtendedFeatureIdentification2Eax::PREFETCH_CTL_MSR)
}
pub fn has_cpuid_user_dis(&self) -> bool {
self.eax
.contains(ExtendedFeatureIdentification2Eax::CPUID_USER_DIS)
}
pub fn microcode_patch_size(&self) -> u16 {
get_bits(self.ebx, 0, 11) as u16
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ExtendedFeatureIdentification2Eax: u32 {
const NO_NESTED_DATA_BP = 1 << 0;
const LFENCE_ALWAYS_SERIALIZING = 1 << 2;
const SMM_PG_CFG_LOCK = 1 << 3;
const NULL_SELECT_CLEARS_BASE = 1 << 6;
const UPPER_ADDRESS_IGNORE = 1 << 7;
const AUTOMATIC_IBRS = 1 << 8;
const NO_SMM_CTL_MSR = 1 << 9;
const PREFETCH_CTL_MSR = 1 << 13;
const CPUID_USER_DIS = 1 << 17;
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct ExtendedPerformanceMonitoringDebug {
eax: ExtendedPerformanceMonitoringDebugEax,
ebx: u32,
_ecx: u32,
_edx: u32,
}
impl ExtendedPerformanceMonitoringDebug {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: ExtendedPerformanceMonitoringDebugEax::from_bits_truncate(data.eax),
ebx: data.ebx,
_ecx: data.ecx,
_edx: data.edx,
}
}
pub fn has_perf_mon_v2(&self) -> bool {
self.eax
.contains(ExtendedPerformanceMonitoringDebugEax::PERF_MON_V2)
}
pub fn has_lbr_stack(&self) -> bool {
self.eax
.contains(ExtendedPerformanceMonitoringDebugEax::LBR_STACK)
}
pub fn has_lbr_and_pmc_freeze(&self) -> bool {
self.eax
.contains(ExtendedPerformanceMonitoringDebugEax::LBR_AND_PMC_FREEZE)
}
pub fn num_perf_ctr_core(&self) -> u8 {
get_bits(self.ebx, 0, 3) as u8
}
pub fn num_lbr_stack_size(&self) -> u8 {
get_bits(self.ebx, 4, 9) as u8
}
pub fn num_perf_ctr_nb(&self) -> u8 {
get_bits(self.ebx, 10, 15) as u8
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct ExtendedPerformanceMonitoringDebugEax: u32 {
const PERF_MON_V2 = 1 << 0;
const LBR_STACK = 1 << 1;
const LBR_AND_PMC_FREEZE = 1 << 2;
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct MultiKeyEncryptedMemoryCapabilities {
eax: MultiKeyEncryptedMemoryCapabilitiesEax,
ebx: u32,
_ecx: u32,
_edx: u32,
}
impl MultiKeyEncryptedMemoryCapabilities {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: MultiKeyEncryptedMemoryCapabilitiesEax::from_bits_truncate(data.eax),
ebx: data.ebx,
_ecx: data.ecx,
_edx: data.edx,
}
}
pub fn has_mem_hmk(&self) -> bool {
self.eax
.contains(MultiKeyEncryptedMemoryCapabilitiesEax::MEM_HMK)
}
pub fn max_mem_hmk_encr_key_id(&self) -> u16 {
get_bits(self.ebx, 0, 15) as u16
}
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct MultiKeyEncryptedMemoryCapabilitiesEax: u32 {
const MEM_HMK = 1 << 0;
}
}
#[derive(Clone)]
pub struct ExtendedCpuTopologyIter<R: CpuIdReader> {
read: R,
level: u32,
}
impl<R: CpuIdReader> ExtendedCpuTopologyIter<R> {
pub fn new(read: R) -> Self {
Self { read, level: 0 }
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct ExtendedCpuTopologyLevel {
eax: u32,
ebx: u32,
ecx: u32,
edx: u32,
}
impl ExtendedCpuTopologyLevel {
pub(crate) fn new(data: CpuIdResult) -> Self {
Self {
eax: data.eax,
ebx: data.ebx,
ecx: data.ecx,
edx: data.edx,
}
}
pub fn mask_width(&self) -> u8 {
get_bits(self.eax, 0, 4) as u8
}
pub fn has_efficiency_ranking_available(&self) -> bool {
self.eax & (1 << 29) > 0
}
pub fn has_heterogeneous_cores(&self) -> bool {
self.eax & (1 << 30) > 0
}
pub fn has_asymmetric_topology(&self) -> bool {
self.eax & (1 << 31) > 0
}
pub fn num_logical_processors(&self) -> u16 {
get_bits(self.ebx, 0, 15) as u16
}
pub fn pwr_efficiency_ranking(&self) -> u8 {
get_bits(self.ebx, 16, 23) as u8
}
pub fn native_mode_id(&self) -> u8 {
get_bits(self.ebx, 24, 27) as u8
}
pub fn core_type(&self) -> u8 {
get_bits(self.ebx, 28, 31) as u8
}
pub fn input_ecx(&self) -> u8 {
get_bits(self.ecx, 0, 7) as u8
}
pub fn level_type(&self) -> HierarchyLevelType {
HierarchyLevelType::from(get_bits(self.ecx, 8, 15) as u8)
}
pub fn extended_apic_id(&self) -> u32 {
self.edx
}
}
impl<R: CpuIdReader> Iterator for ExtendedCpuTopologyIter<R> {
type Item = ExtendedCpuTopologyLevel;
fn next(&mut self) -> Option<ExtendedCpuTopologyLevel> {
let res = self.read.cpuid2(EAX_EXTENDED_CPU_TOPOLOGY, self.level);
self.level += 1;
let ect = ExtendedCpuTopologyLevel::new(res);
if ect.level_type() == HierarchyLevelType::Reserved {
None
} else {
Some(ect)
}
}
}
#[repr(u8)]
#[derive(PartialEq, Eq)]
pub enum HierarchyLevelType {
Reserved = 0,
Core = 1,
Complex = 2,
Die = 3,
Socket = 4,
Unknown(u8),
}
impl From<u8> for HierarchyLevelType {
fn from(value: u8) -> Self {
match value {
0 => Self::Reserved,
1 => Self::Core,
2 => Self::Complex,
3 => Self::Die,
4 => Self::Socket,
x => Self::Unknown(x),
}
}
}
impl Display for HierarchyLevelType {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
HierarchyLevelType::Reserved => write!(f, "Reserved (0)"),
HierarchyLevelType::Core => write!(f, "Core (1)"),
HierarchyLevelType::Complex => write!(f, "Complex (2)"),
HierarchyLevelType::Die => write!(f, "DIE (3)"),
HierarchyLevelType::Socket => write!(f, "Socket (4)"),
HierarchyLevelType::Unknown(x) => write!(f, "Unknown ({x})"),
}
}
}
impl Debug for HierarchyLevelType {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Self::Reserved => write!(f, "Reserved"),
Self::Core => write!(f, "Core"),
Self::Complex => write!(f, "Complex"),
Self::Die => write!(f, "Die"),
Self::Socket => write!(f, "Socket"),
Self::Unknown(arg0) => f.debug_tuple("Unknown").field(arg0).finish(),
}
}
}