raw_cpuid/
extended.rs

1//! Data-structures / interpretation for extended leafs (>= 0x8000_0000)
2use bitflags::bitflags;
3use core::fmt::{self, Debug, Display, Formatter};
4use core::mem::size_of;
5use core::slice;
6use core::str;
7
8use crate::{
9    get_bits, CpuIdReader, CpuIdResult, Vendor, EAX_EXTENDED_CPU_TOPOLOGY,
10    EAX_PQOS_EXTENDED_FEATURES,
11};
12
13/// Extended Processor and Processor Feature Identifiers (LEAF=0x8000_0001)
14///
15/// # Platforms
16/// βœ… AMD 🟑 Intel
17pub struct ExtendedProcessorFeatureIdentifiers {
18    vendor: Vendor,
19    eax: u32,
20    ebx: u32,
21    ecx: ExtendedFunctionInfoEcx,
22    edx: ExtendedFunctionInfoEdx,
23}
24
25impl ExtendedProcessorFeatureIdentifiers {
26    pub(crate) fn new(vendor: Vendor, data: CpuIdResult) -> Self {
27        Self {
28            vendor,
29            eax: data.eax,
30            ebx: data.ebx,
31            ecx: ExtendedFunctionInfoEcx::from_bits_truncate(data.ecx),
32            edx: ExtendedFunctionInfoEdx::from_bits_truncate(data.edx),
33        }
34    }
35
36    /// Extended Processor Signature.
37    ///
38    /// # AMD
39    /// The value returned is the same as the value returned in EAX for LEAF=0x0000_0001
40    /// (use `CpuId.get_feature_info` instead)
41    ///
42    /// # Intel
43    /// Vague mention of "Extended Processor Signature", not clear what it's supposed to
44    /// represent.
45    ///
46    /// # Platforms
47    /// βœ… AMD βœ… Intel
48    pub fn extended_signature(&self) -> u32 {
49        self.eax
50    }
51
52    /// Returns package type on AMD.
53    ///
54    /// Package type. If `(Family[7:0] >= 10h)`, this field is valid. If
55    /// `(Family[7:0]<10h)`, this field is reserved
56    ///
57    /// # Platforms
58    /// βœ… AMD ❌ Intel (reserved)
59    pub fn pkg_type(&self) -> u32 {
60        get_bits(self.ebx, 28, 31)
61    }
62
63    /// Returns brand ID on AMD.
64    ///
65    /// This field, in conjunction with CPUID `LEAF=0x0000_0001_EBX[8BitBrandId]`, and used
66    /// by firmware to generate the processor name string.
67    ///
68    /// # Platforms
69    /// βœ… AMD ❌ Intel (reserved)
70    pub fn brand_id(&self) -> u32 {
71        get_bits(self.ebx, 0, 15)
72    }
73
74    /// Is LAHF/SAHF available in 64-bit mode?
75    ///
76    /// # Platforms
77    /// βœ… AMD βœ… Intel
78    pub fn has_lahf_sahf(&self) -> bool {
79        self.ecx.contains(ExtendedFunctionInfoEcx::LAHF_SAHF)
80    }
81
82    /// Check support legacy cmp.
83    ///
84    /// # Platform
85    /// βœ… AMD ❌ Intel (will return false)
86    pub fn has_cmp_legacy(&self) -> bool {
87        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::CMP_LEGACY)
88    }
89
90    /// Secure virtual machine supported.
91    ///
92    /// # Platform
93    /// βœ… AMD ❌ Intel (will return false)
94    pub fn has_svm(&self) -> bool {
95        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::SVM)
96    }
97
98    /// Extended APIC space.
99    ///
100    /// This bit indicates the presence of extended APIC register space starting at offset
101    /// 400h from the β€œAPIC Base Address Register,” as specified in the BKDG.
102    ///
103    /// # Platform
104    /// βœ… AMD ❌ Intel (will return false)
105    pub fn has_ext_apic_space(&self) -> bool {
106        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::EXT_APIC_SPACE)
107    }
108
109    /// LOCK MOV CR0 means MOV CR8. See β€œMOV(CRn)” in APM3.
110    ///
111    /// # Platform
112    /// βœ… AMD ❌ Intel (will return false)
113    pub fn has_alt_mov_cr8(&self) -> bool {
114        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::ALTMOVCR8)
115    }
116
117    /// Is LZCNT available?
118    ///
119    /// # AMD
120    /// It's called ABM (Advanced bit manipulation) on AMD and also adds support for
121    /// some other instructions.
122    ///
123    /// # Platforms
124    /// βœ… AMD βœ… Intel
125    pub fn has_lzcnt(&self) -> bool {
126        self.ecx.contains(ExtendedFunctionInfoEcx::LZCNT)
127    }
128
129    /// XTRQ, INSERTQ, MOVNTSS, and MOVNTSD instruction support.
130    ///
131    /// See β€œEXTRQ”, β€œINSERTQ”,β€œMOVNTSS”, and β€œMOVNTSD” in APM4.
132    ///
133    /// # Platform
134    /// βœ… AMD ❌ Intel (will return false)
135    pub fn has_sse4a(&self) -> bool {
136        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::SSE4A)
137    }
138
139    /// Misaligned SSE mode. See β€œMisaligned Access Support Added for SSE Instructions” in
140    /// APM1.
141    ///
142    /// # Platform
143    /// βœ… AMD ❌ Intel (will return false)
144    pub fn has_misaligned_sse_mode(&self) -> bool {
145        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::MISALIGNSSE)
146    }
147
148    /// Is PREFETCHW available?
149    ///
150    /// # AMD
151    /// PREFETCH and PREFETCHW instruction support.
152    ///
153    /// # Platforms
154    /// βœ… AMD βœ… Intel
155    pub fn has_prefetchw(&self) -> bool {
156        self.ecx.contains(ExtendedFunctionInfoEcx::PREFETCHW)
157    }
158
159    /// Indicates OS-visible workaround support
160    ///
161    /// # Platform
162    /// βœ… AMD ❌ Intel (will return false)
163    pub fn has_osvw(&self) -> bool {
164        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::OSVW)
165    }
166
167    /// Instruction based sampling.
168    ///
169    /// # Platform
170    /// βœ… AMD ❌ Intel (will return false)
171    pub fn has_ibs(&self) -> bool {
172        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::IBS)
173    }
174
175    /// Extended operation support.
176    ///
177    /// # Platform
178    /// βœ… AMD ❌ Intel (will return false)
179    pub fn has_xop(&self) -> bool {
180        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::XOP)
181    }
182
183    /// SKINIT and STGI are supported.
184    ///
185    /// Indicates support for SKINIT and STGI, independent of the value of
186    /// `MSRC000_0080[SVME]`.
187    ///
188    /// # Platform
189    /// βœ… AMD ❌ Intel (will return false)
190    pub fn has_skinit(&self) -> bool {
191        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::SKINIT)
192    }
193
194    /// Watchdog timer support.
195    ///
196    /// Indicates support for MSRC001_0074.
197    ///
198    /// # Platform
199    /// βœ… AMD ❌ Intel (will return false)
200    pub fn has_wdt(&self) -> bool {
201        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::WDT)
202    }
203
204    /// Lightweight profiling support
205    ///
206    /// # Platform
207    /// βœ… AMD ❌ Intel (will return false)
208    pub fn has_lwp(&self) -> bool {
209        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::LWP)
210    }
211
212    /// Four-operand FMA instruction support.
213    ///
214    /// # Platform
215    /// βœ… AMD ❌ Intel (will return false)
216    pub fn has_fma4(&self) -> bool {
217        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::FMA4)
218    }
219
220    /// Trailing bit manipulation instruction support.
221    ///
222    /// # Platform
223    /// βœ… AMD ❌ Intel (will return false)
224    pub fn has_tbm(&self) -> bool {
225        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::TBM)
226    }
227
228    /// Topology extensions support.
229    ///
230    /// Indicates support for CPUID `Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX`.
231    ///
232    /// # Platform
233    /// βœ… AMD ❌ Intel (will return false)
234    pub fn has_topology_extensions(&self) -> bool {
235        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::TOPEXT)
236    }
237
238    /// Processor performance counter extensions support.
239    ///
240    /// Indicates support for `MSRC001_020[A,8,6,4,2,0]` and `MSRC001_020[B,9,7,5,3,1]`.
241    ///
242    /// # Platform
243    /// βœ… AMD ❌ Intel (will return false)
244    pub fn has_perf_cntr_extensions(&self) -> bool {
245        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFCTREXT)
246    }
247
248    /// NB performance counter extensions support.
249    ///
250    /// Indicates support for `MSRC001_024[6,4,2,0]` and `MSRC001_024[7,5,3,1]`.
251    ///
252    /// # Platform
253    /// βœ… AMD ❌ Intel (will return false)
254    pub fn has_nb_perf_cntr_extensions(&self) -> bool {
255        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFCTREXTNB)
256    }
257
258    /// Data access breakpoint extension.
259    ///
260    /// Indicates support for `MSRC001_1027` and `MSRC001_101[B:9]`.
261    ///
262    /// # Platform
263    /// βœ… AMD ❌ Intel (will return false)
264    pub fn has_data_access_bkpt_extension(&self) -> bool {
265        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::DATABRKPEXT)
266    }
267
268    /// Performance time-stamp counter.
269    ///
270    /// Indicates support for `MSRC001_0280` `[Performance Time Stamp Counter]`.
271    ///
272    /// # Platform
273    /// βœ… AMD ❌ Intel (will return false)
274    pub fn has_perf_tsc(&self) -> bool {
275        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFTSC)
276    }
277
278    /// Support for L3 performance counter extension.
279    ///
280    /// # Platform
281    /// βœ… AMD ❌ Intel (will return false)
282    pub fn has_perf_cntr_llc_extensions(&self) -> bool {
283        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFCTREXTLLC)
284    }
285
286    /// Support for MWAITX and MONITORX instructions.
287    ///
288    /// # Platform
289    /// βœ… AMD ❌ Intel (will return false)
290    pub fn has_monitorx_mwaitx(&self) -> bool {
291        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::MONITORX)
292    }
293
294    /// Breakpoint Addressing masking extended to bit 31.
295    ///
296    /// # Platform
297    /// βœ… AMD ❌ Intel (will return false)
298    pub fn has_addr_mask_extension(&self) -> bool {
299        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::ADDRMASKEXT)
300    }
301
302    /// Are fast system calls available.
303    ///
304    /// # Platforms
305    /// βœ… AMD βœ… Intel
306    pub fn has_syscall_sysret(&self) -> bool {
307        self.edx.contains(ExtendedFunctionInfoEdx::SYSCALL_SYSRET)
308    }
309
310    /// Is there support for execute disable bit.
311    ///
312    /// # Platforms
313    /// βœ… AMD βœ… Intel
314    pub fn has_execute_disable(&self) -> bool {
315        self.edx.contains(ExtendedFunctionInfoEdx::EXECUTE_DISABLE)
316    }
317
318    /// AMD extensions to MMX instructions.
319    ///
320    /// # Platform
321    /// βœ… AMD ❌ Intel (will return false)
322    pub fn has_mmx_extensions(&self) -> bool {
323        self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::MMXEXT)
324    }
325
326    /// FXSAVE and FXRSTOR instruction optimizations.
327    ///
328    /// # Platform
329    /// βœ… AMD ❌ Intel (will return false)
330    pub fn has_fast_fxsave_fxstor(&self) -> bool {
331        self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::FFXSR)
332    }
333
334    /// Is there support for 1GiB pages.
335    ///
336    /// # Platforms
337    /// βœ… AMD βœ… Intel
338    pub fn has_1gib_pages(&self) -> bool {
339        self.edx.contains(ExtendedFunctionInfoEdx::GIB_PAGES)
340    }
341
342    /// Check support for rdtscp instruction.
343    ///
344    /// # Platforms
345    /// βœ… AMD βœ… Intel
346    pub fn has_rdtscp(&self) -> bool {
347        self.edx.contains(ExtendedFunctionInfoEdx::RDTSCP)
348    }
349
350    /// Check support for 64-bit mode.
351    ///
352    /// # Platforms
353    /// βœ… AMD βœ… Intel
354    pub fn has_64bit_mode(&self) -> bool {
355        self.edx.contains(ExtendedFunctionInfoEdx::I64BIT_MODE)
356    }
357
358    /// 3DNow AMD extensions.
359    ///
360    /// # Platform
361    /// βœ… AMD ❌ Intel (will return false)
362    pub fn has_amd_3dnow_extensions(&self) -> bool {
363        self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::THREEDNOWEXT)
364    }
365
366    /// 3DNow extensions.
367    ///
368    /// # Platform
369    /// βœ… AMD ❌ Intel (will return false)
370    pub fn has_3dnow(&self) -> bool {
371        self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::THREEDNOW)
372    }
373}
374
375impl Debug for ExtendedProcessorFeatureIdentifiers {
376    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
377        let mut ds = f.debug_struct("ExtendedProcessorFeatureIdentifiers");
378        ds.field("extended_signature", &self.extended_signature());
379
380        if self.vendor == Vendor::Amd {
381            ds.field("pkg_type", &self.pkg_type());
382            ds.field("brand_id", &self.brand_id());
383        }
384        ds.field("ecx_features", &self.ecx);
385        ds.field("edx_features", &self.edx);
386        ds.finish()
387    }
388}
389
390bitflags! {
391    #[repr(transparent)]
392    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
393    struct ExtendedFunctionInfoEcx: u32 {
394        const LAHF_SAHF = 1 << 0;
395        const CMP_LEGACY =  1 << 1;
396        const SVM = 1 << 2;
397        const EXT_APIC_SPACE = 1 << 3;
398        const ALTMOVCR8 = 1 << 4;
399        const LZCNT = 1 << 5;
400        const SSE4A = 1 << 6;
401        const MISALIGNSSE = 1 << 7;
402        const PREFETCHW = 1 << 8;
403        const OSVW = 1 << 9;
404        const IBS = 1 << 10;
405        const XOP = 1 << 11;
406        const SKINIT = 1 << 12;
407        const WDT = 1 << 13;
408        const LWP = 1 << 15;
409        const FMA4 = 1 << 16;
410        const TBM = 1 << 21;
411        const TOPEXT = 1 << 22;
412        const PERFCTREXT = 1 << 23;
413        const PERFCTREXTNB = 1 << 24;
414        const DATABRKPEXT = 1 << 26;
415        const PERFTSC = 1 << 27;
416        const PERFCTREXTLLC = 1 << 28;
417        const MONITORX = 1 << 29;
418        const ADDRMASKEXT = 1 << 30;
419    }
420}
421
422bitflags! {
423    #[repr(transparent)]
424    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
425    struct ExtendedFunctionInfoEdx: u32 {
426        const SYSCALL_SYSRET = 1 << 11;
427        const EXECUTE_DISABLE = 1 << 20;
428        const MMXEXT = 1 << 22;
429        const FFXSR = 1 << 25;
430        const GIB_PAGES = 1 << 26;
431        const RDTSCP = 1 << 27;
432        const I64BIT_MODE = 1 << 29;
433        const THREEDNOWEXT = 1 << 30;
434        const THREEDNOW = 1 << 31;
435    }
436}
437
438/// Processor name (LEAF=0x8000_0002..=0x8000_0004).
439///
440/// ASCII string up to 48 characters in length corresponding to the processor name.
441///
442/// # Platforms
443/// βœ… AMD βœ… Intel
444pub struct ProcessorBrandString {
445    data: [CpuIdResult; 3],
446}
447
448impl ProcessorBrandString {
449    pub(crate) fn new(data: [CpuIdResult; 3]) -> Self {
450        Self { data }
451    }
452
453    /// Return the processor brand string as a rust string.
454    ///
455    /// For example:
456    /// "11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz".
457    pub fn as_str(&self) -> &str {
458        // Safety: CpuIdResult is laid out with repr(C), and the array
459        // self.data contains 3 contiguous elements.
460        let slice: &[u8] = unsafe {
461            slice::from_raw_parts(
462                self.data.as_ptr() as *const u8,
463                self.data.len() * size_of::<CpuIdResult>(),
464            )
465        };
466
467        // Brand terminated at nul byte or end, whichever comes first.
468        let slice = slice.split(|&x| x == 0).next().unwrap();
469        str::from_utf8(slice)
470            .unwrap_or("Invalid Processor Brand String")
471            .trim()
472    }
473}
474
475impl Debug for ProcessorBrandString {
476    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
477        f.debug_struct("ProcessorBrandString")
478            .field("as_str", &self.as_str())
479            .finish()
480    }
481}
482
483/// L1 Cache and TLB Information (LEAF=0x8000_0005).
484///
485/// # Availability
486/// βœ… AMD ❌ Intel (reserved=0)
487#[derive(PartialEq, Eq, Debug)]
488pub struct L1CacheTlbInfo {
489    eax: u32,
490    ebx: u32,
491    ecx: u32,
492    edx: u32,
493}
494
495impl L1CacheTlbInfo {
496    pub(crate) fn new(data: CpuIdResult) -> Self {
497        Self {
498            eax: data.eax,
499            ebx: data.ebx,
500            ecx: data.ecx,
501            edx: data.edx,
502        }
503    }
504
505    /// Data TLB associativity for 2-MB and 4-MB pages.
506    pub fn dtlb_2m_4m_associativity(&self) -> Associativity {
507        let assoc_bits = get_bits(self.eax, 24, 31) as u8;
508        Associativity::for_l1(assoc_bits)
509    }
510
511    /// Data TLB number of entries for 2-MB and 4-MB pages.
512    ///
513    /// The value returned is for the number of entries available for the 2-MB page size;
514    /// 4-MB pages require two 2-MB entries, so the number of entries available for the
515    /// 4-MB page size is one-half the returned value.
516    pub fn dtlb_2m_4m_size(&self) -> u8 {
517        get_bits(self.eax, 16, 23) as u8
518    }
519
520    /// Instruction TLB associativity for 2-MB and 4-MB pages.
521    pub fn itlb_2m_4m_associativity(&self) -> Associativity {
522        let assoc_bits = get_bits(self.eax, 8, 15) as u8;
523        Associativity::for_l1(assoc_bits)
524    }
525
526    /// Instruction TLB number of entries for 2-MB and 4-MB pages.
527    ///
528    /// The value returned is for the number of entries available for the 2-MB page size;
529    /// 4-MB pages require two 2-MB entries, so the number of entries available for the
530    /// 4-MB page size is one-half the returned value.
531    pub fn itlb_2m_4m_size(&self) -> u8 {
532        get_bits(self.eax, 0, 7) as u8
533    }
534
535    /// Data TLB associativity for 4K pages.
536    pub fn dtlb_4k_associativity(&self) -> Associativity {
537        let assoc_bits = get_bits(self.ebx, 24, 31) as u8;
538        Associativity::for_l1(assoc_bits)
539    }
540
541    /// Data TLB number of entries for 4K pages.
542    pub fn dtlb_4k_size(&self) -> u8 {
543        get_bits(self.ebx, 16, 23) as u8
544    }
545
546    /// Instruction TLB associativity for 4K pages.
547    pub fn itlb_4k_associativity(&self) -> Associativity {
548        let assoc_bits = get_bits(self.ebx, 8, 15) as u8;
549        Associativity::for_l1(assoc_bits)
550    }
551
552    /// Instruction TLB number of entries for 4K pages.
553    pub fn itlb_4k_size(&self) -> u8 {
554        get_bits(self.ebx, 0, 7) as u8
555    }
556
557    /// L1 data cache size in KB
558    pub fn dcache_size(&self) -> u8 {
559        get_bits(self.ecx, 24, 31) as u8
560    }
561
562    /// L1 data cache associativity.
563    pub fn dcache_associativity(&self) -> Associativity {
564        let assoc_bits = get_bits(self.ecx, 16, 23) as u8;
565        Associativity::for_l1(assoc_bits)
566    }
567
568    /// L1 data cache lines per tag.
569    pub fn dcache_lines_per_tag(&self) -> u8 {
570        get_bits(self.ecx, 8, 15) as u8
571    }
572
573    /// L1 data cache line size in bytes.
574    pub fn dcache_line_size(&self) -> u8 {
575        get_bits(self.ecx, 0, 7) as u8
576    }
577
578    /// L1 instruction cache size in KB
579    pub fn icache_size(&self) -> u8 {
580        get_bits(self.edx, 24, 31) as u8
581    }
582
583    /// L1 instruction cache associativity.
584    pub fn icache_associativity(&self) -> Associativity {
585        let assoc_bits = get_bits(self.edx, 16, 23) as u8;
586        Associativity::for_l1(assoc_bits)
587    }
588
589    /// L1 instruction cache lines per tag.
590    pub fn icache_lines_per_tag(&self) -> u8 {
591        get_bits(self.edx, 8, 15) as u8
592    }
593
594    /// L1 instruction cache line size in bytes.
595    pub fn icache_line_size(&self) -> u8 {
596        get_bits(self.edx, 0, 7) as u8
597    }
598}
599
600/// L2/L3 Cache and TLB Information (LEAF=0x8000_0006).
601///
602/// # Availability
603/// βœ… AMD 🟑 Intel
604#[derive(PartialEq, Eq, Debug)]
605pub struct L2And3CacheTlbInfo {
606    eax: u32,
607    ebx: u32,
608    ecx: u32,
609    edx: u32,
610}
611
612impl L2And3CacheTlbInfo {
613    pub(crate) fn new(data: CpuIdResult) -> Self {
614        Self {
615            eax: data.eax,
616            ebx: data.ebx,
617            ecx: data.ecx,
618            edx: data.edx,
619        }
620    }
621
622    /// L2 Data TLB associativity for 2-MB and 4-MB pages.
623    ///
624    /// # Availability
625    /// βœ… AMD ❌ Intel (reserved=0)
626    pub fn dtlb_2m_4m_associativity(&self) -> Associativity {
627        let assoc_bits = get_bits(self.eax, 28, 31) as u8;
628        Associativity::for_l2(assoc_bits)
629    }
630
631    /// L2 Data TLB number of entries for 2-MB and 4-MB pages.
632    ///
633    /// The value returned is for the number of entries available for the 2-MB page size;
634    /// 4-MB pages require two 2-MB entries, so the number of entries available for the
635    /// 4-MB page size is one-half the returned value.
636    ///
637    /// # Availability
638    /// βœ… AMD ❌ Intel (reserved=0)
639    pub fn dtlb_2m_4m_size(&self) -> u16 {
640        get_bits(self.eax, 16, 27) as u16
641    }
642
643    /// L2 Instruction TLB associativity for 2-MB and 4-MB pages.
644    ///
645    /// # Availability
646    /// βœ… AMD ❌ Intel (reserved=0)
647    pub fn itlb_2m_4m_associativity(&self) -> Associativity {
648        let assoc_bits = get_bits(self.eax, 12, 15) as u8;
649        Associativity::for_l2(assoc_bits)
650    }
651
652    /// L2 Instruction TLB number of entries for 2-MB and 4-MB pages.
653    ///
654    /// The value returned is for the number of entries available for the 2-MB page size;
655    /// 4-MB pages require two 2-MB entries, so the number of entries available for the
656    /// 4-MB page size is one-half the returned value.
657    ///
658    /// # Availability
659    /// βœ… AMD ❌ Intel (reserved=0)
660    pub fn itlb_2m_4m_size(&self) -> u16 {
661        get_bits(self.eax, 0, 11) as u16
662    }
663
664    /// L2 Data TLB associativity for 4K pages.
665    ///
666    /// # Availability
667    /// βœ… AMD ❌ Intel (reserved=0)
668    pub fn dtlb_4k_associativity(&self) -> Associativity {
669        let assoc_bits = get_bits(self.ebx, 28, 31) as u8;
670        Associativity::for_l2(assoc_bits)
671    }
672
673    /// L2 Data TLB number of entries for 4K pages.
674    ///
675    /// # Availability
676    /// βœ… AMD ❌ Intel (reserved=0)
677    pub fn dtlb_4k_size(&self) -> u16 {
678        get_bits(self.ebx, 16, 27) as u16
679    }
680
681    /// L2 Instruction TLB associativity for 4K pages.
682    ///
683    /// # Availability
684    /// βœ… AMD ❌ Intel (reserved=0)
685    pub fn itlb_4k_associativity(&self) -> Associativity {
686        let assoc_bits = get_bits(self.ebx, 12, 15) as u8;
687        Associativity::for_l2(assoc_bits)
688    }
689
690    /// L2 Instruction TLB number of entries for 4K pages.
691    ///
692    /// # Availability
693    /// βœ… AMD ❌ Intel (reserved=0)
694    pub fn itlb_4k_size(&self) -> u16 {
695        get_bits(self.ebx, 0, 11) as u16
696    }
697
698    /// L2 Cache Line size in bytes
699    ///
700    /// # Platforms
701    /// βœ… AMD βœ… Intel
702    pub fn l2cache_line_size(&self) -> u8 {
703        get_bits(self.ecx, 0, 7) as u8
704    }
705
706    /// L2 cache lines per tag.
707    ///
708    /// # Availability
709    /// βœ… AMD ❌ Intel (reserved=0)
710    pub fn l2cache_lines_per_tag(&self) -> u8 {
711        get_bits(self.ecx, 8, 11) as u8
712    }
713
714    /// L2 Associativity field
715    ///
716    /// # Availability
717    /// βœ… AMD βœ… Intel
718    pub fn l2cache_associativity(&self) -> Associativity {
719        let assoc_bits = get_bits(self.ecx, 12, 15) as u8;
720        Associativity::for_l2(assoc_bits)
721    }
722
723    /// Cache size in KB.
724    ///
725    /// # Platforms
726    /// βœ… AMD βœ… Intel
727    pub fn l2cache_size(&self) -> u16 {
728        get_bits(self.ecx, 16, 31) as u16
729    }
730
731    /// L2 Cache Line size in bytes
732    ///
733    /// # Platforms
734    /// βœ… AMD ❌ Intel (reserved=0)
735    pub fn l3cache_line_size(&self) -> u8 {
736        get_bits(self.edx, 0, 7) as u8
737    }
738
739    /// L2 cache lines per tag.
740    ///
741    /// # Availability
742    /// βœ… AMD ❌ Intel (reserved=0)
743    pub fn l3cache_lines_per_tag(&self) -> u8 {
744        get_bits(self.edx, 8, 11) as u8
745    }
746
747    /// L2 Associativity field
748    ///
749    /// # Availability
750    /// βœ… AMD ❌ Intel (reserved=0)
751    pub fn l3cache_associativity(&self) -> Associativity {
752        let assoc_bits = get_bits(self.edx, 12, 15) as u8;
753        Associativity::for_l3(assoc_bits)
754    }
755
756    /// Specifies the L3 cache size range
757    ///
758    /// `(L3Size[31:18] * 512KB) <= L3 cache size < ((L3Size[31:18]+1) * 512KB)`.
759    ///
760    /// # Platforms
761    /// βœ… AMD ❌ Intel (reserved=0)
762    pub fn l3cache_size(&self) -> u16 {
763        get_bits(self.edx, 18, 31) as u16
764    }
765}
766
767/// Info about cache Associativity.
768#[derive(PartialEq, Eq, Debug)]
769pub enum Associativity {
770    Disabled,
771    DirectMapped,
772    NWay(u8),
773    FullyAssociative,
774    Unknown,
775}
776
777impl Display for Associativity {
778    fn fmt(&self, f: &mut Formatter) -> fmt::Result {
779        let s = match self {
780            Associativity::Disabled => "Disabled",
781            Associativity::DirectMapped => "Direct mapped",
782            Associativity::NWay(n) => {
783                return write!(f, "NWay({})", n);
784            }
785            Associativity::FullyAssociative => "Fully associative",
786            Associativity::Unknown => "Unknown (check leaf 0x8000_001d)",
787        };
788        f.write_str(s)
789    }
790}
791
792impl Associativity {
793    /// Constructor for L1 Cache and TLB Associativity Field Encodings
794    fn for_l1(n: u8) -> Associativity {
795        match n {
796            0x0 => Associativity::Disabled, // Intel only, AMD is reserved
797            0x1 => Associativity::DirectMapped,
798            0x2..=0xfe => Associativity::NWay(n),
799            0xff => Associativity::FullyAssociative,
800        }
801    }
802
803    /// Constructor for L2 Cache and TLB Associativity Field Encodings
804    fn for_l2(n: u8) -> Associativity {
805        match n {
806            0x0 => Associativity::Disabled,
807            0x1 => Associativity::DirectMapped,
808            0x2 => Associativity::NWay(2),
809            0x4 => Associativity::NWay(4),
810            0x5 => Associativity::NWay(6), // Reserved on Intel
811            0x6 => Associativity::NWay(8),
812            // 0x7 => SDM states: "See CPUID leaf 04H, sub-leaf 2"
813            0x8 => Associativity::NWay(16),
814            0x9 => Associativity::Unknown, // Intel: Reserved, AMD: Value for all fields should be determined from Fn8000_001D
815            0xa => Associativity::NWay(32),
816            0xb => Associativity::NWay(48),
817            0xc => Associativity::NWay(64),
818            0xd => Associativity::NWay(96),
819            0xe => Associativity::NWay(128),
820            0xF => Associativity::FullyAssociative,
821            _ => Associativity::Unknown,
822        }
823    }
824
825    /// Constructor for L2 Cache and TLB Associativity Field Encodings
826    fn for_l3(n: u8) -> Associativity {
827        Associativity::for_l2(n)
828    }
829}
830
831/// Processor Power Management and RAS Capabilities (LEAF=0x8000_0007).
832///
833/// # Platforms
834/// βœ… AMD 🟑 Intel
835#[derive(Debug, PartialEq, Eq)]
836pub struct ApmInfo {
837    /// Reserved on AMD and Intel.
838    _eax: u32,
839    ebx: RasCapabilities,
840    ecx: u32,
841    edx: ApmInfoEdx,
842}
843
844impl ApmInfo {
845    pub(crate) fn new(data: CpuIdResult) -> Self {
846        Self {
847            _eax: data.eax,
848            ebx: RasCapabilities::from_bits_truncate(data.ebx),
849            ecx: data.ecx,
850            edx: ApmInfoEdx::from_bits_truncate(data.edx),
851        }
852    }
853
854    /// Is MCA overflow recovery available?
855    ///
856    /// If set, indicates that MCA overflow conditions (`MCi_STATUS[Overflow]=1`)
857    /// are not fatal; software may safely ignore such conditions. If clear, MCA
858    /// overflow conditions require software to shut down the system.
859    ///
860    /// # Platforms
861    /// βœ… AMD ❌ Intel (reserved=false)
862    pub fn has_mca_overflow_recovery(&self) -> bool {
863        self.ebx.contains(RasCapabilities::MCAOVFLRECOV)
864    }
865
866    /// Has Software uncorrectable error containment and recovery capability?
867    ///
868    /// The processor supports software containment of uncorrectable errors
869    /// through context synchronizing data poisoning and deferred error
870    /// interrupts.
871    ///
872    /// # Platforms
873    /// βœ… AMD ❌ Intel (reserved=false)
874    pub fn has_succor(&self) -> bool {
875        self.ebx.contains(RasCapabilities::SUCCOR)
876    }
877
878    /// Has Hardware assert supported?
879    ///
880    /// Indicates support for `MSRC001_10[DF:C0]`.
881    ///
882    /// # Platforms
883    /// βœ… AMD ❌ Intel (reserved=false)
884    pub fn has_hwa(&self) -> bool {
885        self.ebx.contains(RasCapabilities::HWA)
886    }
887
888    /// Specifies the ratio of the compute unit power accumulator sample period
889    /// to the TSC counter period.
890    ///
891    /// Returns a value of 0 if not applicable for the system.
892    ///
893    /// # Platforms
894    /// βœ… AMD ❌ Intel (reserved=0)
895    pub fn cpu_pwr_sample_time_ratio(&self) -> u32 {
896        self.ecx
897    }
898
899    /// Is Temperature Sensor available?
900    ///
901    /// # Platforms
902    /// βœ… AMD ❌ Intel (reserved=false)
903    pub fn has_ts(&self) -> bool {
904        self.edx.contains(ApmInfoEdx::TS)
905    }
906
907    /// Frequency ID control.
908    ///
909    /// # Note
910    /// Function replaced by `has_hw_pstate`.
911    ///
912    /// # Platforms
913    /// βœ… AMD ❌ Intel (reserved=false)
914    pub fn has_freq_id_ctrl(&self) -> bool {
915        self.edx.contains(ApmInfoEdx::FID)
916    }
917
918    /// Voltage ID control.
919    ///
920    /// # Note
921    /// Function replaced by `has_hw_pstate`.
922    ///
923    /// # Platforms
924    /// βœ… AMD ❌ Intel (reserved=false)
925    pub fn has_volt_id_ctrl(&self) -> bool {
926        self.edx.contains(ApmInfoEdx::VID)
927    }
928
929    /// Has THERMTRIP?
930    ///
931    /// # Platforms
932    /// βœ… AMD ❌ Intel (reserved=false)
933    pub fn has_thermtrip(&self) -> bool {
934        self.edx.contains(ApmInfoEdx::TTP)
935    }
936
937    /// Hardware thermal control (HTC)?
938    ///
939    /// # Platforms
940    /// βœ… AMD ❌ Intel (reserved=false)
941    pub fn has_tm(&self) -> bool {
942        self.edx.contains(ApmInfoEdx::TM)
943    }
944
945    /// Has 100 MHz multiplier Control?
946    ///
947    /// # Platforms
948    /// βœ… AMD ❌ Intel (reserved=false)
949    pub fn has_100mhz_steps(&self) -> bool {
950        self.edx.contains(ApmInfoEdx::MHZSTEPS100)
951    }
952
953    /// Has Hardware P-state control?
954    ///
955    /// MSRC001_0061 [P-state Current Limit], MSRC001_0062 [P-state Control] and
956    /// MSRC001_0063 [P-state Status] exist
957    ///
958    /// # Platforms
959    /// βœ… AMD ❌ Intel (reserved=false)
960    pub fn has_hw_pstate(&self) -> bool {
961        self.edx.contains(ApmInfoEdx::HWPSTATE)
962    }
963
964    /// Is Invariant TSC available?
965    ///
966    /// # Platforms
967    /// βœ… AMD βœ… Intel
968    pub fn has_invariant_tsc(&self) -> bool {
969        self.edx.contains(ApmInfoEdx::INVTSC)
970    }
971
972    /// Has Core performance boost?
973    ///
974    /// # Platforms
975    /// βœ… AMD ❌ Intel (reserved=false)
976    pub fn has_cpb(&self) -> bool {
977        self.edx.contains(ApmInfoEdx::CPB)
978    }
979
980    /// Has Read-only effective frequency interface?
981    ///
982    /// Indicates presence of MSRC000_00E7 [Read-Only Max Performance Frequency
983    /// Clock Count (MPerfReadOnly)] and MSRC000_00E8 [Read-Only Actual
984    /// Performance Frequency Clock Count (APerfReadOnly)].
985    ///
986    /// # Platforms
987    /// βœ… AMD ❌ Intel (reserved=false)
988    pub fn has_ro_effective_freq_iface(&self) -> bool {
989        self.edx.contains(ApmInfoEdx::EFFFREQRO)
990    }
991
992    /// Indicates support for processor feedback interface.
993    ///
994    /// # Note
995    /// This feature is deprecated.
996    ///
997    /// # Platforms
998    /// βœ… AMD ❌ Intel (reserved=false)
999    pub fn has_feedback_iface(&self) -> bool {
1000        self.edx.contains(ApmInfoEdx::PROCFEEDBACKIF)
1001    }
1002
1003    /// Has Processor power reporting interface?
1004    ///
1005    /// # Platforms
1006    /// βœ… AMD ❌ Intel (reserved=false)
1007    pub fn has_power_reporting_iface(&self) -> bool {
1008        self.edx.contains(ApmInfoEdx::PROCPWRREPORT)
1009    }
1010}
1011
1012bitflags! {
1013    #[repr(transparent)]
1014    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1015    struct ApmInfoEdx: u32 {
1016        const TS = 1 << 0;
1017        const FID = 1 << 1;
1018        const VID = 1 << 2;
1019        const TTP = 1 << 3;
1020        const TM = 1 << 4;
1021        const MHZSTEPS100 = 1 << 6;
1022        const HWPSTATE = 1 << 7;
1023        const INVTSC = 1 << 8;
1024        const CPB = 1 << 9;
1025        const EFFFREQRO = 1 << 10;
1026        const PROCFEEDBACKIF = 1 << 11;
1027        const PROCPWRREPORT = 1 << 12;
1028    }
1029}
1030
1031bitflags! {
1032    #[repr(transparent)]
1033    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1034    struct RasCapabilities: u32 {
1035        const MCAOVFLRECOV = 1 << 0;
1036        const SUCCOR = 1 << 1;
1037        const HWA = 1 << 2;
1038    }
1039}
1040
1041/// Processor Capacity Parameters and Extended Feature Identification
1042/// (LEAF=0x8000_0008).
1043///
1044/// This function provides the size or capacity of various architectural
1045/// parameters that vary by implementation, as well as an extension to the
1046/// 0x8000_0001 feature identifiers.
1047///
1048/// # Platforms
1049/// βœ… AMD 🟑 Intel
1050#[derive(PartialEq, Eq)]
1051pub struct ProcessorCapacityAndFeatureInfo {
1052    eax: u32,
1053    ebx: ProcessorCapacityAndFeatureEbx,
1054    ecx: u32,
1055    edx: u32,
1056}
1057
1058impl ProcessorCapacityAndFeatureInfo {
1059    pub(crate) fn new(data: CpuIdResult) -> Self {
1060        Self {
1061            eax: data.eax,
1062            ebx: ProcessorCapacityAndFeatureEbx::from_bits_truncate(data.ebx),
1063            ecx: data.ecx,
1064            edx: data.edx,
1065        }
1066    }
1067
1068    /// Physical Address Bits
1069    ///
1070    /// # Platforms
1071    /// βœ… AMD βœ… Intel
1072    pub fn physical_address_bits(&self) -> u8 {
1073        get_bits(self.eax, 0, 7) as u8
1074    }
1075
1076    /// Linear Address Bits
1077    ///
1078    /// # Platforms
1079    /// βœ… AMD βœ… Intel
1080    pub fn linear_address_bits(&self) -> u8 {
1081        get_bits(self.eax, 8, 15) as u8
1082    }
1083
1084    /// Guest Physical Address Bits
1085    ///
1086    /// This number applies only to guests using nested paging. When this field
1087    /// is zero, refer to the PhysAddrSize field for the maximum guest physical
1088    /// address size.
1089    ///
1090    /// # Platforms
1091    /// βœ… AMD ❌ Intel (reserved=0)
1092    pub fn guest_physical_address_bits(&self) -> u8 {
1093        get_bits(self.eax, 16, 23) as u8
1094    }
1095
1096    /// CLZERO instruction supported if set.
1097    ///
1098    /// # Platforms
1099    /// βœ… AMD ❌ Intel (reserved=false)
1100    pub fn has_cl_zero(&self) -> bool {
1101        self.ebx.contains(ProcessorCapacityAndFeatureEbx::CLZERO)
1102    }
1103
1104    /// Instruction Retired Counter MSR available if set.
1105    ///
1106    /// # Platforms
1107    /// βœ… AMD ❌ Intel (reserved=false)
1108    pub fn has_inst_ret_cntr_msr(&self) -> bool {
1109        self.ebx
1110            .contains(ProcessorCapacityAndFeatureEbx::INST_RETCNT_MSR)
1111    }
1112
1113    /// FP Error Pointers Restored by XRSTOR if set.
1114    ///
1115    /// # Platforms
1116    /// βœ… AMD ❌ Intel (reserved=false)
1117    pub fn has_restore_fp_error_ptrs(&self) -> bool {
1118        self.ebx
1119            .contains(ProcessorCapacityAndFeatureEbx::RSTR_FP_ERR_PTRS)
1120    }
1121
1122    /// INVLPGB and TLBSYNC instruction supported if set.
1123    ///
1124    /// # Platforms
1125    /// βœ… AMD ❌ Intel (reserved=false)
1126    pub fn has_invlpgb(&self) -> bool {
1127        self.ebx.contains(ProcessorCapacityAndFeatureEbx::INVLPGB)
1128    }
1129
1130    /// RDPRU instruction supported if set.
1131    ///
1132    /// # Platforms
1133    /// βœ… AMD ❌ Intel (reserved=false)
1134    pub fn has_rdpru(&self) -> bool {
1135        self.ebx.contains(ProcessorCapacityAndFeatureEbx::RDPRU)
1136    }
1137
1138    /// MCOMMIT instruction supported if set.
1139    ///
1140    /// # Platforms
1141    /// βœ… AMD ❌ Intel (reserved=false)
1142    pub fn has_mcommit(&self) -> bool {
1143        self.ebx.contains(ProcessorCapacityAndFeatureEbx::MCOMMIT)
1144    }
1145
1146    /// WBNOINVD instruction supported if set.
1147    ///
1148    /// # Platforms
1149    /// βœ… AMD βœ… Intel
1150    pub fn has_wbnoinvd(&self) -> bool {
1151        self.ebx.contains(ProcessorCapacityAndFeatureEbx::WBNOINVD)
1152    }
1153
1154    /// WBINVD/WBNOINVD are interruptible if set.
1155    ///
1156    /// # Platforms
1157    /// βœ… AMD ❌ Intel (reserved=false)
1158    pub fn has_int_wbinvd(&self) -> bool {
1159        self.ebx
1160            .contains(ProcessorCapacityAndFeatureEbx::INT_WBINVD)
1161    }
1162
1163    /// EFER.LMSLE is unsupported if set.
1164    ///
1165    /// # Platforms
1166    /// βœ… AMD ❌ Intel (reserved=false)
1167    pub fn has_unsupported_efer_lmsle(&self) -> bool {
1168        self.ebx
1169            .contains(ProcessorCapacityAndFeatureEbx::EFER_LMSLE_UNSUPP)
1170    }
1171
1172    /// INVLPGB support for invalidating guest nested translations if set.
1173    ///
1174    /// # Platforms
1175    /// βœ… AMD ❌ Intel (reserved=false)
1176    pub fn has_invlpgb_nested(&self) -> bool {
1177        self.ebx
1178            .contains(ProcessorCapacityAndFeatureEbx::INVLPGB_NESTED)
1179    }
1180
1181    /// Performance time-stamp counter size (in bits).
1182    ///
1183    /// Indicates the size of `MSRC001_0280[PTSC]`.
1184    ///
1185    /// # Platforms
1186    /// βœ… AMD ❌ Intel (reserved=false)
1187    pub fn perf_tsc_size(&self) -> usize {
1188        let s = get_bits(self.ecx, 16, 17) as u8;
1189        match s & 0b11 {
1190            0b00 => 40,
1191            0b01 => 48,
1192            0b10 => 56,
1193            0b11 => 64,
1194            _ => unreachable!("AND with 0b11 in match"),
1195        }
1196    }
1197
1198    /// APIC ID size.
1199    ///
1200    /// A value of zero indicates that legacy methods must be used to determine
1201    /// the maximum number of logical processors, as indicated by CPUID
1202    /// `Fn8000_0008_ECX[NC]`.
1203    ///
1204    /// # Platforms
1205    /// βœ… AMD ❌ Intel (reserved=0)
1206    pub fn apic_id_size(&self) -> u8 {
1207        get_bits(self.ecx, 12, 15) as u8
1208    }
1209
1210    /// The size of the `apic_id_size` field determines the maximum number of
1211    /// logical processors (MNLP) that the package could theoretically support,
1212    /// and not the actual number of logical processors that are implemented or
1213    /// enabled in the package, as indicated by CPUID `Fn8000_0008_ECX[NC]`.
1214    ///
1215    /// `MNLP = (2 raised to the power of ApicIdSize[3:0])` (if not 0)
1216    ///
1217    /// # Platforms
1218    /// βœ… AMD ❌ Intel (reserved=0)
1219    pub fn maximum_logical_processors(&self) -> usize {
1220        usize::pow(2, self.apic_id_size() as u32)
1221    }
1222
1223    /// Number of physical threads in the processor.
1224    ///
1225    /// # Platforms
1226    /// βœ… AMD ❌ Intel (reserved=0)
1227    pub fn num_phys_threads(&self) -> usize {
1228        get_bits(self.ecx, 0, 7) as usize + 1
1229    }
1230
1231    /// Maximum page count for INVLPGB instruction.
1232    ///
1233    /// # Platforms
1234    /// βœ… AMD ❌ Intel (reserved=0)
1235    pub fn invlpgb_max_pages(&self) -> u16 {
1236        get_bits(self.edx, 0, 15) as u16
1237    }
1238
1239    /// The maximum ECX value recognized by RDPRU.
1240    ///
1241    /// # Platforms
1242    /// βœ… AMD ❌ Intel (reserved=0)
1243    pub fn max_rdpru_id(&self) -> u16 {
1244        get_bits(self.edx, 16, 31) as u16
1245    }
1246}
1247
1248impl Debug for ProcessorCapacityAndFeatureInfo {
1249    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
1250        f.debug_struct("ProcessorCapacityAndFeatureInfo")
1251            .field("physical_address_bits", &self.physical_address_bits())
1252            .field("linear_address_bits", &self.linear_address_bits())
1253            .field(
1254                "guest_physical_address_bits",
1255                &self.guest_physical_address_bits(),
1256            )
1257            .field("has_cl_zero", &self.has_cl_zero())
1258            .field("has_inst_ret_cntr_msr", &self.has_inst_ret_cntr_msr())
1259            .field(
1260                "has_restore_fp_error_ptrs",
1261                &self.has_restore_fp_error_ptrs(),
1262            )
1263            .field("has_invlpgb", &self.has_invlpgb())
1264            .field("has_rdpru", &self.has_rdpru())
1265            .field("has_mcommit", &self.has_mcommit())
1266            .field("has_wbnoinvd", &self.has_wbnoinvd())
1267            .field("has_int_wbinvd", &self.has_int_wbinvd())
1268            .field(
1269                "has_unsupported_efer_lmsle",
1270                &self.has_unsupported_efer_lmsle(),
1271            )
1272            .field("has_invlpgb_nested", &self.has_invlpgb_nested())
1273            .field("perf_tsc_size", &self.perf_tsc_size())
1274            .field("apic_id_size", &self.apic_id_size())
1275            .field(
1276                "maximum_logical_processors",
1277                &self.maximum_logical_processors(),
1278            )
1279            .field("num_phys_threads", &self.num_phys_threads())
1280            .field("invlpgb_max_pages", &self.invlpgb_max_pages())
1281            .field("max_rdpru_id", &self.max_rdpru_id())
1282            .finish()
1283    }
1284}
1285
1286bitflags! {
1287    #[repr(transparent)]
1288    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1289    struct ProcessorCapacityAndFeatureEbx: u32 {
1290        const CLZERO = 1 << 0;
1291        const INST_RETCNT_MSR = 1 << 1;
1292        const RSTR_FP_ERR_PTRS = 1 << 2;
1293        const INVLPGB = 1 << 3;
1294        const RDPRU = 1 << 4;
1295        const MCOMMIT = 1 << 8;
1296        const WBNOINVD = 1 << 9;
1297        const INT_WBINVD = 1 << 13;
1298        const EFER_LMSLE_UNSUPP = 1 << 20;
1299        const INVLPGB_NESTED = 1 << 21;
1300    }
1301}
1302
1303/// Information about the SVM features that the processory supports (LEAF=0x8000_000A).
1304///
1305/// # Note
1306/// If SVM is not supported ([ExtendedProcessorFeatureIdentifiers::has_svm] is false),
1307/// this leaf is reserved ([crate::CpuId] will return None in this case).
1308///
1309/// # Platforms
1310/// βœ… AMD ❌ Intel
1311#[derive(PartialEq, Eq, Debug)]
1312pub struct SvmFeatures {
1313    eax: u32,
1314    ebx: u32,
1315    /// Reserved
1316    _ecx: u32,
1317    edx: SvmFeaturesEdx,
1318}
1319
1320impl SvmFeatures {
1321    pub(crate) fn new(data: CpuIdResult) -> Self {
1322        Self {
1323            eax: data.eax,
1324            ebx: data.ebx,
1325            _ecx: data.ecx,
1326            edx: SvmFeaturesEdx::from_bits_truncate(data.edx),
1327        }
1328    }
1329
1330    /// SVM revision number.
1331    pub fn revision(&self) -> u8 {
1332        get_bits(self.eax, 0, 7) as u8
1333    }
1334
1335    /// Number of available address space identifiers (ASID).
1336    pub fn supported_asids(&self) -> u32 {
1337        self.ebx
1338    }
1339
1340    /// Nested paging supported if set.
1341    pub fn has_nested_paging(&self) -> bool {
1342        self.edx.contains(SvmFeaturesEdx::NP)
1343    }
1344
1345    /// Indicates support for LBR Virtualization.
1346    pub fn has_lbr_virtualization(&self) -> bool {
1347        self.edx.contains(SvmFeaturesEdx::LBR_VIRT)
1348    }
1349
1350    /// Indicates support for SVM-Lock if set.
1351    pub fn has_svm_lock(&self) -> bool {
1352        self.edx.contains(SvmFeaturesEdx::SVML)
1353    }
1354
1355    /// Indicates support for NRIP save on #VMEXIT if set.
1356    pub fn has_nrip(&self) -> bool {
1357        self.edx.contains(SvmFeaturesEdx::NRIPS)
1358    }
1359
1360    /// Indicates support for MSR TSC ratio (MSR `0xC000_0104`) if set.
1361    pub fn has_tsc_rate_msr(&self) -> bool {
1362        self.edx.contains(SvmFeaturesEdx::TSC_RATE_MSR)
1363    }
1364
1365    /// Indicates support for VMCB clean bits if set.
1366    pub fn has_vmcb_clean_bits(&self) -> bool {
1367        self.edx.contains(SvmFeaturesEdx::VMCB_CLEAN)
1368    }
1369
1370    /// Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush
1371    /// only the current ASID's TLB entries.
1372    ///
1373    /// Also indicates support for the extended VMCB TLB_Control.
1374    pub fn has_flush_by_asid(&self) -> bool {
1375        self.edx.contains(SvmFeaturesEdx::FLUSH_BY_ASID)
1376    }
1377
1378    /// Indicates support for the decode assists if set.
1379    pub fn has_decode_assists(&self) -> bool {
1380        self.edx.contains(SvmFeaturesEdx::DECODE_ASSISTS)
1381    }
1382
1383    /// Indicates support for the pause intercept filter if set.
1384    pub fn has_pause_filter(&self) -> bool {
1385        self.edx.contains(SvmFeaturesEdx::PAUSE_FILTER)
1386    }
1387
1388    /// Indicates support for the PAUSE filter cycle count threshold if set.
1389    pub fn has_pause_filter_threshold(&self) -> bool {
1390        self.edx.contains(SvmFeaturesEdx::PAUSE_FILTER_THRESHOLD)
1391    }
1392
1393    /// Support for the AMD advanced virtual interrupt controller if set.
1394    pub fn has_avic(&self) -> bool {
1395        self.edx.contains(SvmFeaturesEdx::AVIC)
1396    }
1397
1398    /// VMSAVE and VMLOAD virtualization supported if set.
1399    pub fn has_vmsave_virtualization(&self) -> bool {
1400        self.edx.contains(SvmFeaturesEdx::VMSAVE_VIRT)
1401    }
1402
1403    /// GIF -- virtualized global interrupt flag if set.
1404    pub fn has_gif(&self) -> bool {
1405        self.edx.contains(SvmFeaturesEdx::VGIF)
1406    }
1407
1408    /// Guest Mode Execution Trap supported if set.
1409    pub fn has_gmet(&self) -> bool {
1410        self.edx.contains(SvmFeaturesEdx::GMET)
1411    }
1412
1413    /// SVM supervisor shadow stack restrictions if set.
1414    pub fn has_sss_check(&self) -> bool {
1415        self.edx.contains(SvmFeaturesEdx::SSS_CHECK)
1416    }
1417
1418    /// SPEC_CTRL virtualization supported if set.
1419    pub fn has_spec_ctrl(&self) -> bool {
1420        self.edx.contains(SvmFeaturesEdx::SPEC_CTRL)
1421    }
1422
1423    /// When host `CR4.MCE=1` and guest `CR4.MCE=0`, machine check exceptions (`#MC`) in a
1424    /// guest do not cause shutdown and are always intercepted if set.
1425    pub fn has_host_mce_override(&self) -> bool {
1426        self.edx.contains(SvmFeaturesEdx::HOST_MCE_OVERRIDE)
1427    }
1428
1429    /// Support for INVLPGB/TLBSYNC hypervisor enable in VMCB and TLBSYNC intercept if
1430    /// set.
1431    pub fn has_tlb_ctrl(&self) -> bool {
1432        self.edx.contains(SvmFeaturesEdx::TLB_CTL)
1433    }
1434}
1435
1436bitflags! {
1437    #[repr(transparent)]
1438    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1439    struct SvmFeaturesEdx: u32 {
1440        const NP = 1 << 0;
1441        const LBR_VIRT = 1 << 1;
1442        const SVML = 1 << 2;
1443        const NRIPS = 1 << 3;
1444        const TSC_RATE_MSR = 1 << 4;
1445        const VMCB_CLEAN = 1 << 5;
1446        const FLUSH_BY_ASID = 1 << 6;
1447        const DECODE_ASSISTS = 1 << 7;
1448        const PAUSE_FILTER = 1 << 10;
1449        const PAUSE_FILTER_THRESHOLD = 1 << 12;
1450        const AVIC = 1 << 13;
1451        const VMSAVE_VIRT = 1 << 15;
1452        const VGIF = 1 << 16;
1453        const GMET = 1 << 17;
1454        const SSS_CHECK = 1 << 19;
1455        const SPEC_CTRL = 1 << 20;
1456        const HOST_MCE_OVERRIDE = 1 << 23;
1457        const TLB_CTL = 1 << 24;
1458    }
1459}
1460
1461/// TLB 1-GiB Pages Information (LEAF=0x8000_0019).
1462///
1463/// # Platforms
1464/// βœ… AMD ❌ Intel
1465#[derive(PartialEq, Eq, Debug)]
1466pub struct Tlb1gbPageInfo {
1467    eax: u32,
1468    ebx: u32,
1469    /// Reserved
1470    _ecx: u32,
1471    /// Reserved
1472    _edx: u32,
1473}
1474
1475impl Tlb1gbPageInfo {
1476    pub(crate) fn new(data: CpuIdResult) -> Self {
1477        Self {
1478            eax: data.eax,
1479            ebx: data.ebx,
1480            _ecx: data.ecx,
1481            _edx: data.edx,
1482        }
1483    }
1484
1485    /// L1 Data TLB associativity for 1-GB pages.
1486    pub fn dtlb_l1_1gb_associativity(&self) -> Associativity {
1487        let assoc_bits = get_bits(self.eax, 28, 31) as u8;
1488        Associativity::for_l2(assoc_bits)
1489    }
1490
1491    /// L1 Data TLB number of entries for 1-GB pages.
1492    pub fn dtlb_l1_1gb_size(&self) -> u8 {
1493        get_bits(self.eax, 16, 27) as u8
1494    }
1495
1496    /// L1 Instruction TLB associativity for 1-GB pages.
1497    pub fn itlb_l1_1gb_associativity(&self) -> Associativity {
1498        let assoc_bits = get_bits(self.eax, 12, 15) as u8;
1499        Associativity::for_l2(assoc_bits)
1500    }
1501
1502    /// L1 Instruction TLB number of entries for 1-GB pages.
1503    pub fn itlb_l1_1gb_size(&self) -> u8 {
1504        get_bits(self.eax, 0, 11) as u8
1505    }
1506
1507    /// L2 Data TLB associativity for 1-GB pages.
1508    pub fn dtlb_l2_1gb_associativity(&self) -> Associativity {
1509        let assoc_bits = get_bits(self.ebx, 28, 31) as u8;
1510        Associativity::for_l2(assoc_bits)
1511    }
1512
1513    /// L2 Data TLB number of entries for 1-GB pages.
1514    pub fn dtlb_l2_1gb_size(&self) -> u8 {
1515        get_bits(self.ebx, 16, 27) as u8
1516    }
1517
1518    /// L2 Instruction TLB associativity for 1-GB pages.
1519    pub fn itlb_l2_1gb_associativity(&self) -> Associativity {
1520        let assoc_bits = get_bits(self.ebx, 12, 15) as u8;
1521        Associativity::for_l2(assoc_bits)
1522    }
1523
1524    /// L2 Instruction TLB number of entries for 1-GB pages.
1525    pub fn itlb_l2_1gb_size(&self) -> u8 {
1526        get_bits(self.ebx, 0, 11) as u8
1527    }
1528}
1529
1530/// Performance Optimization Identifier (LEAF=0x8000_001A).
1531///
1532/// # Platforms
1533/// βœ… AMD ❌ Intel
1534#[derive(PartialEq, Eq, Debug)]
1535pub struct PerformanceOptimizationInfo {
1536    eax: PerformanceOptimizationInfoEax,
1537    /// Reserved
1538    _ebx: u32,
1539    /// Reserved
1540    _ecx: u32,
1541    /// Reserved
1542    _edx: u32,
1543}
1544
1545impl PerformanceOptimizationInfo {
1546    pub(crate) fn new(data: CpuIdResult) -> Self {
1547        Self {
1548            eax: PerformanceOptimizationInfoEax::from_bits_truncate(data.eax),
1549            _ebx: data.ebx,
1550            _ecx: data.ecx,
1551            _edx: data.edx,
1552        }
1553    }
1554
1555    /// The internal FP/SIMD execution datapath is 128 bits wide if set.
1556    pub fn has_fp128(&self) -> bool {
1557        self.eax.contains(PerformanceOptimizationInfoEax::FP128)
1558    }
1559
1560    /// MOVU (Move Unaligned) SSE instructions are efficient more than
1561    /// MOVL/MOVH SSE if set.
1562    pub fn has_movu(&self) -> bool {
1563        self.eax.contains(PerformanceOptimizationInfoEax::MOVU)
1564    }
1565
1566    /// The internal FP/SIMD execution datapath is 256 bits wide if set.
1567    pub fn has_fp256(&self) -> bool {
1568        self.eax.contains(PerformanceOptimizationInfoEax::FP256)
1569    }
1570}
1571
1572bitflags! {
1573    #[repr(transparent)]
1574    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1575    struct PerformanceOptimizationInfoEax: u32 {
1576        const FP128 = 1 << 0;
1577        const MOVU = 1 << 1;
1578        const FP256 = 1 << 2;
1579    }
1580}
1581
1582/// Performance Optimization Identifier (LEAF=0x8000_001A).
1583///
1584/// # Platforms
1585/// βœ… AMD ❌ Intel
1586#[derive(PartialEq, Eq, Debug)]
1587pub struct InstructionBasedSamplingCapabilities {
1588    eax: InstructionBasedSamplingCapabilitiesEax,
1589    /// Reserved
1590    _ebx: u32,
1591    /// Reserved
1592    _ecx: u32,
1593    /// Reserved
1594    _edx: u32,
1595}
1596
1597impl InstructionBasedSamplingCapabilities {
1598    pub(crate) fn new(data: CpuIdResult) -> Self {
1599        Self {
1600            eax: InstructionBasedSamplingCapabilitiesEax::from_bits_truncate(data.eax),
1601            _ebx: data.ebx,
1602            _ecx: data.ecx,
1603            _edx: data.edx,
1604        }
1605    }
1606
1607    /// IBS feature flags valid if set.
1608    pub fn has_feature_flags(&self) -> bool {
1609        self.eax
1610            .contains(InstructionBasedSamplingCapabilitiesEax::IBSFFV)
1611    }
1612
1613    /// IBS fetch sampling supported if set.
1614    pub fn has_fetch_sampling(&self) -> bool {
1615        self.eax
1616            .contains(InstructionBasedSamplingCapabilitiesEax::FETCH_SAM)
1617    }
1618
1619    /// IBS execution sampling supported if set.
1620    pub fn has_execution_sampling(&self) -> bool {
1621        self.eax
1622            .contains(InstructionBasedSamplingCapabilitiesEax::OP_SAM)
1623    }
1624
1625    /// Read write of op counter supported if set.
1626    pub fn has_read_write_operation_counter(&self) -> bool {
1627        self.eax
1628            .contains(InstructionBasedSamplingCapabilitiesEax::RD_WR_OP_CNT)
1629    }
1630
1631    /// Op counting mode supported if set.
1632    pub fn has_operation_counter(&self) -> bool {
1633        self.eax
1634            .contains(InstructionBasedSamplingCapabilitiesEax::OP_CNT)
1635    }
1636
1637    /// Branch target address reporting supported if set.
1638    pub fn has_branch_target_address_reporting(&self) -> bool {
1639        self.eax
1640            .contains(InstructionBasedSamplingCapabilitiesEax::BRN_TRGT)
1641    }
1642
1643    /// IbsOpCurCnt and IbsOpMaxCnt extend by 7 bits if set.
1644    pub fn has_operation_counter_extended(&self) -> bool {
1645        self.eax
1646            .contains(InstructionBasedSamplingCapabilitiesEax::OP_CNT_EXT)
1647    }
1648
1649    /// Invalid RIP indication supported if set.
1650    pub fn has_invalid_rip_indication(&self) -> bool {
1651        self.eax
1652            .contains(InstructionBasedSamplingCapabilitiesEax::RIP_INVALID_CHK)
1653    }
1654
1655    /// Fused branch micro-op indication supported if set.
1656    pub fn has_fused_branch_micro_op_indication(&self) -> bool {
1657        self.eax
1658            .contains(InstructionBasedSamplingCapabilitiesEax::OP_BRN_FUSE)
1659    }
1660
1661    /// L3 Miss Filtering for IBS supported if set.
1662    pub fn has_l3_miss_filtering(&self) -> bool {
1663        self.eax
1664            .contains(InstructionBasedSamplingCapabilitiesEax::IBS_L3_MISS_FILTERING)
1665    }
1666}
1667
1668bitflags! {
1669    #[repr(transparent)]
1670    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1671    struct InstructionBasedSamplingCapabilitiesEax: u32 {
1672        const IBSFFV = 1 << 0;
1673        const FETCH_SAM = 1 << 1;
1674        const OP_SAM = 1 << 2;
1675        const RD_WR_OP_CNT = 1 << 3;
1676        const OP_CNT = 1 << 4;
1677        const BRN_TRGT = 1 << 5;
1678        const OP_CNT_EXT = 1 << 6;
1679        const RIP_INVALID_CHK = 1 << 7;
1680        const OP_BRN_FUSE = 1 << 8;
1681        const IBS_L3_MISS_FILTERING = 1 << 11;
1682    }
1683}
1684
1685/// Processor Topology Information (LEAF=0x8000_001E).
1686///
1687/// # Platforms
1688/// βœ… AMD ❌ Intel
1689#[derive(PartialEq, Eq)]
1690pub struct ProcessorTopologyInfo {
1691    eax: u32,
1692    ebx: u32,
1693    ecx: u32,
1694    /// Reserved
1695    _edx: u32,
1696}
1697
1698impl ProcessorTopologyInfo {
1699    pub(crate) fn new(data: CpuIdResult) -> Self {
1700        Self {
1701            eax: data.eax,
1702            ebx: data.ebx,
1703            ecx: data.ecx,
1704            _edx: data.edx,
1705        }
1706    }
1707
1708    /// x2APIC ID
1709    pub fn x2apic_id(&self) -> u32 {
1710        self.eax
1711    }
1712
1713    /// Core ID
1714    ///
1715    /// # Note
1716    /// `Core ID` means `Compute Unit ID` if AMD Family 15h-16h Processors.
1717    pub fn core_id(&self) -> u8 {
1718        get_bits(self.ebx, 0, 7) as u8
1719    }
1720
1721    /// Threads per core
1722    ///
1723    /// # Note
1724    /// `Threads per Core` means `Cores per Compute Unit` if AMD Family 15h-16h Processors.
1725    pub fn threads_per_core(&self) -> u8 {
1726        get_bits(self.ebx, 8, 15) as u8 + 1
1727    }
1728
1729    /// Node ID
1730    pub fn node_id(&self) -> u8 {
1731        get_bits(self.ecx, 0, 7) as u8
1732    }
1733
1734    /// Nodes per processor
1735    pub fn nodes_per_processor(&self) -> u8 {
1736        get_bits(self.ecx, 8, 10) as u8 + 1
1737    }
1738}
1739
1740impl Debug for ProcessorTopologyInfo {
1741    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
1742        f.debug_struct("ProcessorTopologyInfo")
1743            .field("x2apic_id", &self.x2apic_id())
1744            .field("core_id", &self.core_id())
1745            .field("threads_per_core", &self.threads_per_core())
1746            .field("node_id", &self.node_id())
1747            .field("nodes_per_processor", &self.nodes_per_processor())
1748            .finish()
1749    }
1750}
1751
1752/// Encrypted Memory Capabilities (LEAF=0x8000_001F).
1753///
1754/// # Platforms
1755/// βœ… AMD ❌ Intel
1756#[derive(Debug, PartialEq, Eq)]
1757pub struct MemoryEncryptionInfo {
1758    eax: MemoryEncryptionInfoEax,
1759    ebx: u32,
1760    ecx: u32,
1761    edx: u32,
1762}
1763
1764impl MemoryEncryptionInfo {
1765    pub(crate) fn new(data: CpuIdResult) -> Self {
1766        Self {
1767            eax: MemoryEncryptionInfoEax::from_bits_truncate(data.eax),
1768            ebx: data.ebx,
1769            ecx: data.ecx,
1770            edx: data.edx,
1771        }
1772    }
1773
1774    /// Secure Memory Encryption is supported if set.
1775    pub fn has_sme(&self) -> bool {
1776        self.eax.contains(MemoryEncryptionInfoEax::SME)
1777    }
1778
1779    /// Secure Encrypted Virtualization is supported if set.
1780    pub fn has_sev(&self) -> bool {
1781        self.eax.contains(MemoryEncryptionInfoEax::SEV)
1782    }
1783
1784    /// The Page Flush MSR is available if set.
1785    pub fn has_page_flush_msr(&self) -> bool {
1786        self.eax.contains(MemoryEncryptionInfoEax::PAGE_FLUSH_MSR)
1787    }
1788
1789    /// SEV Encrypted State is supported if set.
1790    pub fn has_sev_es(&self) -> bool {
1791        self.eax.contains(MemoryEncryptionInfoEax::SEV_ES)
1792    }
1793
1794    /// SEV Secure Nested Paging supported if set.
1795    pub fn has_sev_snp(&self) -> bool {
1796        self.eax.contains(MemoryEncryptionInfoEax::SEV_SNP)
1797    }
1798
1799    /// VM Permission Levels supported if set.
1800    pub fn has_vmpl(&self) -> bool {
1801        self.eax.contains(MemoryEncryptionInfoEax::VMPL)
1802    }
1803
1804    /// Hardware cache coherency across encryption domains enforced if set.
1805    pub fn has_hw_enforced_cache_coh(&self) -> bool {
1806        self.eax.contains(MemoryEncryptionInfoEax::HWENFCACHECOH)
1807    }
1808
1809    /// SEV guest execution only allowed from a 64-bit host if set.
1810    pub fn has_64bit_mode(&self) -> bool {
1811        self.eax.contains(MemoryEncryptionInfoEax::HOST64)
1812    }
1813
1814    /// Restricted Injection supported if set.
1815    pub fn has_restricted_injection(&self) -> bool {
1816        self.eax.contains(MemoryEncryptionInfoEax::RESTINJECT)
1817    }
1818
1819    /// Alternate Injection supported if set.
1820    pub fn has_alternate_injection(&self) -> bool {
1821        self.eax.contains(MemoryEncryptionInfoEax::ALTINJECT)
1822    }
1823
1824    /// Full debug state swap supported for SEV-ES guests.
1825    pub fn has_debug_swap(&self) -> bool {
1826        self.eax.contains(MemoryEncryptionInfoEax::DBGSWP)
1827    }
1828
1829    /// Disallowing IBS use by the host supported if set.
1830    pub fn has_prevent_host_ibs(&self) -> bool {
1831        self.eax.contains(MemoryEncryptionInfoEax::PREVHOSTIBS)
1832    }
1833
1834    /// Virtual Transparent Encryption supported if set.
1835    pub fn has_vte(&self) -> bool {
1836        self.eax.contains(MemoryEncryptionInfoEax::VTE)
1837    }
1838
1839    /// C-bit location in page table entry
1840    pub fn c_bit_position(&self) -> u8 {
1841        get_bits(self.ebx, 0, 5) as u8
1842    }
1843
1844    /// Physical Address bit reduction
1845    pub fn physical_address_reduction(&self) -> u8 {
1846        get_bits(self.ebx, 6, 11) as u8
1847    }
1848
1849    /// Number of encrypted guests supported simultaneouslys
1850    pub fn max_encrypted_guests(&self) -> u32 {
1851        self.ecx
1852    }
1853
1854    /// Minimum ASID value for an SEV enabled, SEV-ES disabled guest
1855    pub fn min_sev_no_es_asid(&self) -> u32 {
1856        self.edx
1857    }
1858}
1859
1860bitflags! {
1861    #[repr(transparent)]
1862    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1863    struct MemoryEncryptionInfoEax: u32 {
1864        const SME = 1 << 0;
1865        const SEV = 1 << 1;
1866        const PAGE_FLUSH_MSR = 1 << 2;
1867        const SEV_ES = 1 << 3;
1868        const SEV_SNP = 1 << 4;
1869        const VMPL = 1 << 5;
1870        const HWENFCACHECOH = 1 << 10;
1871        const HOST64 = 1 << 11;
1872        const RESTINJECT = 1 << 12;
1873        const ALTINJECT = 1 << 13;
1874        const DBGSWP = 1 << 14;
1875        const PREVHOSTIBS = 1 << 15;
1876        const VTE = 1 << 16;
1877    }
1878}
1879
1880/// Platform Quality of Service Information (LEAF=0x8000_0020).
1881///
1882/// # Platforms
1883/// βœ… AMD ❌ Intel
1884#[derive(PartialEq, Eq)]
1885pub struct PqosExtendedFeatureInfo<R: CpuIdReader> {
1886    read: R,
1887    _eax: u32,
1888    ebx: PqosExtendedFeatureInfoEbx,
1889    _ecx: u32,
1890    _edx: u32,
1891}
1892
1893impl<R: CpuIdReader> PqosExtendedFeatureInfo<R> {
1894    pub(crate) fn new(read: R) -> Self {
1895        let data = read.cpuid2(EAX_PQOS_EXTENDED_FEATURES, 0);
1896        Self {
1897            read,
1898            _eax: data.eax,
1899            ebx: PqosExtendedFeatureInfoEbx::from_bits_truncate(data.ebx),
1900            _ecx: data.ecx,
1901            _edx: data.edx,
1902        }
1903    }
1904
1905    /// Memory Bandwidth Enforcement is supported if set.
1906    pub fn has_l3mbe(&self) -> bool {
1907        self.ebx.contains(PqosExtendedFeatureInfoEbx::L3MBE)
1908    }
1909
1910    /// Slow Memory Bandwidth Enforcement is supported if set.
1911    pub fn has_l3smbe(&self) -> bool {
1912        self.ebx.contains(PqosExtendedFeatureInfoEbx::L3SMBE)
1913    }
1914
1915    /// Bandwidth Monitoring Event Configuration is supported if set.
1916    pub fn has_bmec(&self) -> bool {
1917        self.ebx.contains(PqosExtendedFeatureInfoEbx::BMEC)
1918    }
1919
1920    /// L3 Range Reservations. See β€œL3 Range Reservation” in APM
1921    /// Volume 2 is supported if set.
1922    pub fn has_l3rr(&self) -> bool {
1923        self.ebx.contains(PqosExtendedFeatureInfoEbx::L3RR)
1924    }
1925
1926    /// Assignable Bandwidth Monitoring Counters is supported if set.
1927    pub fn has_abmc(&self) -> bool {
1928        self.ebx.contains(PqosExtendedFeatureInfoEbx::ABMC)
1929    }
1930
1931    /// Smart Data Cache Injection (SDCI) Allocation Enforcement is supported if set.
1932    pub fn has_sdciae(&self) -> bool {
1933        self.ebx.contains(PqosExtendedFeatureInfoEbx::SDCIAE)
1934    }
1935
1936    /// Get L3 Memory Bandwidth Enforcement Information
1937    pub fn get_l3_memory_bandwidth_enforcement_info(
1938        &self,
1939    ) -> Option<L3MemoryBandwidthEnforcementInformation> {
1940        if self.has_l3mbe() {
1941            Some(L3MemoryBandwidthEnforcementInformation::new(
1942                self.read.cpuid2(EAX_PQOS_EXTENDED_FEATURES, 1),
1943            ))
1944        } else {
1945            None
1946        }
1947    }
1948
1949    /// Get L3 Slow Memory Bandwidth Enforcement Information
1950    pub fn get_l3_slow_memory_bandwidth_enforcement_info(
1951        &self,
1952    ) -> Option<L3MemoryBandwidthEnforcementInformation> {
1953        if self.has_l3smbe() {
1954            Some(L3MemoryBandwidthEnforcementInformation::new(
1955                self.read.cpuid2(EAX_PQOS_EXTENDED_FEATURES, 2),
1956            ))
1957        } else {
1958            None
1959        }
1960    }
1961
1962    /// Get Bandwidth Monitoring Event Counters Information
1963    pub fn get_bandwidth_monitoring_event_counters_info(
1964        &self,
1965    ) -> Option<BandwidthMonitoringEventCounters> {
1966        if self.has_bmec() {
1967            Some(BandwidthMonitoringEventCounters::new(
1968                self.read.cpuid2(EAX_PQOS_EXTENDED_FEATURES, 3),
1969            ))
1970        } else {
1971            None
1972        }
1973    }
1974
1975    /// Get Bandwidth Monitoring Event Counters Information
1976    pub fn get_assignable_bandwidth_monitoring_counters_info(
1977        &self,
1978    ) -> Option<AssignableBandwidthMonitoringCounterInfo> {
1979        if self.has_abmc() {
1980            Some(AssignableBandwidthMonitoringCounterInfo::new(
1981                self.read.cpuid2(EAX_PQOS_EXTENDED_FEATURES, 5),
1982            ))
1983        } else {
1984            None
1985        }
1986    }
1987}
1988
1989bitflags! {
1990    #[repr(transparent)]
1991    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1992    struct PqosExtendedFeatureInfoEbx: u32 {
1993        const L3MBE = 1 << 1;
1994        const L3SMBE = 1 << 2;
1995        const BMEC = 1 << 3;
1996        const L3RR = 1 << 4;
1997        const ABMC = 1 << 5;
1998        const SDCIAE = 1 << 6;
1999    }
2000}
2001
2002bitflags! {
2003    #[repr(transparent)]
2004    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
2005    struct PqosExtendedFeatureInfoEbx5: u32 {
2006        const SELECT_COS = 1 << 0;
2007    }
2008}
2009
2010impl<R: CpuIdReader> Debug for PqosExtendedFeatureInfo<R> {
2011    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
2012        f.debug_struct("PqosExtendedFeatureInfo")
2013            .field("has_l3mbe", &self.has_l3mbe())
2014            .field("has_l3smbe", &self.has_l3smbe())
2015            .field("has_bmec", &self.has_bmec())
2016            .field("has_l3rr", &self.has_l3rr())
2017            .field("has_abmc", &self.has_abmc())
2018            .field("has_sdciae", &self.has_sdciae())
2019            .finish()
2020    }
2021}
2022
2023/// L3 Memory Bandwidth Enforcement Information (LEAF=0x8000_0020_x1 and x2).
2024///
2025/// # Platforms
2026/// βœ… AMD ❌ Intel
2027#[derive(PartialEq, Eq, Debug)]
2028pub struct L3MemoryBandwidthEnforcementInformation {
2029    eax: u32,
2030    _ebx: u32,
2031    _ecx: u32,
2032    edx: u32,
2033}
2034
2035impl L3MemoryBandwidthEnforcementInformation {
2036    pub(crate) fn new(data: CpuIdResult) -> Self {
2037        Self {
2038            eax: data.eax,
2039            _ebx: data.ebx,
2040            _ecx: data.ecx,
2041            edx: data.edx,
2042        }
2043    }
2044
2045    /// Identifies the size of the bandwidth specifier field in the
2046    /// L3QOS_BW_Control_n MSRs
2047    pub fn bandwidth_length(&self) -> u32 {
2048        self.eax
2049    }
2050
2051    /// Maximum COS number supported by the L3MBE feature
2052    pub fn cos_max(&self) -> u32 {
2053        self.edx
2054    }
2055}
2056
2057/// Bandwidth Monitoring Event Counters Information (LEAF=0x8000_0020_x3).
2058///
2059/// # Platforms
2060/// βœ… AMD ❌ Intel
2061#[derive(PartialEq, Eq, Debug)]
2062pub struct BandwidthMonitoringEventCounters {
2063    _eax: u32,
2064    ebx: u32,
2065    ecx: BandwidthMonitoringEventCountersEcx,
2066    _edx: u32,
2067}
2068
2069impl BandwidthMonitoringEventCounters {
2070    pub(crate) fn new(data: CpuIdResult) -> Self {
2071        Self {
2072            _eax: data.eax,
2073            ebx: data.ebx,
2074            ecx: BandwidthMonitoringEventCountersEcx::from_bits_truncate(data.ecx),
2075            _edx: data.edx,
2076        }
2077    }
2078
2079    /// Get Number of configurable bandwidth events
2080    pub fn number_events(&self) -> u32 {
2081        get_bits(self.ebx, 0, 7)
2082    }
2083
2084    /// Reads to local DRAM memory is supported if set.
2085    pub fn has_l3_cache_lcl_bw_fill_mon(&self) -> bool {
2086        self.ecx
2087            .contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_LCL_BW_FILL_MON)
2088    }
2089
2090    /// Reads to remote DRAM memory is supported if set.
2091    pub fn has_l3_cache_rmt_bw_fill_mon(&self) -> bool {
2092        self.ecx
2093            .contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_RMT_BW_FILL_MON)
2094    }
2095
2096    /// Non-temporal writes to local memory is supported if set.
2097    pub fn has_l3_cache_lcl_bw_nt_wr_mon(&self) -> bool {
2098        self.ecx
2099            .contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_LCL_BW_NT_WR_MON)
2100    }
2101
2102    /// Non-temporal writes to remote memory is supported if set.
2103    pub fn has_l3_cache_rmt_bw_nt_wr_mon(&self) -> bool {
2104        self.ecx
2105            .contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_RMT_BW_NT_WR_MON)
2106    }
2107
2108    /// Reads to local memory identified as β€œSlow Memory” is supported if set.
2109    pub fn has_l3_cache_lcl_slow_bw_fill_mon(&self) -> bool {
2110        self.ecx
2111            .contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_LCL_SLOW_BW_FILL_MON)
2112    }
2113
2114    /// Reads to remote memory identified as β€œSlow Memory” is supported if set.
2115    pub fn has_l3_cache_rmt_slow_bw_fill_mon(&self) -> bool {
2116        self.ecx
2117            .contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_RMT_SLOW_BW_FILL_MON)
2118    }
2119
2120    /// Dirty victim writes to all types of memory is supported if set.
2121    pub fn has_l3_cache_vic_mon(&self) -> bool {
2122        self.ecx
2123            .contains(BandwidthMonitoringEventCountersEcx::L3_CACHE_VIC_MON)
2124    }
2125}
2126
2127bitflags! {
2128    #[repr(transparent)]
2129    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
2130    struct BandwidthMonitoringEventCountersEcx: u32 {
2131        const L3_CACHE_LCL_BW_FILL_MON = 1 << 0;
2132        const L3_CACHE_RMT_BW_FILL_MON = 1 << 1;
2133        const L3_CACHE_LCL_BW_NT_WR_MON = 1 << 2;
2134        const L3_CACHE_RMT_BW_NT_WR_MON = 1 << 3;
2135        const L3_CACHE_LCL_SLOW_BW_FILL_MON = 1 << 4;
2136        const L3_CACHE_RMT_SLOW_BW_FILL_MON = 1 << 5;
2137        const L3_CACHE_VIC_MON = 1 << 6;
2138    }
2139}
2140
2141/// L3 Memory Bandwidth Enforcement Information (LEAF=0x8000_0020_x5).
2142///
2143/// # Platforms
2144/// βœ… AMD ❌ Intel
2145#[derive(PartialEq, Eq, Debug)]
2146pub struct AssignableBandwidthMonitoringCounterInfo {
2147    eax: u32,
2148    ebx: u32,
2149    ecx: u32,
2150    _edx: u32,
2151}
2152
2153impl AssignableBandwidthMonitoringCounterInfo {
2154    pub(crate) fn new(data: CpuIdResult) -> Self {
2155        Self {
2156            eax: data.eax,
2157            ebx: data.ebx,
2158            ecx: data.ecx,
2159            _edx: data.edx,
2160        }
2161    }
2162
2163    /// Get QM_CTR counter width, offset from 24 bits
2164    pub fn counter_size(&self) -> u8 {
2165        get_bits(self.eax, 0, 7) as u8
2166    }
2167
2168    /// Indicates that QM_CTR bit 61 is an overflow bit if set
2169    pub fn has_overflow_bit(&self) -> bool {
2170        (self.eax & (1 << 8)) > 0
2171    }
2172
2173    /// Get Maximum supported ABMC counter ID
2174    pub fn max_abmc(&self) -> u16 {
2175        get_bits(self.ebx, 0, 15) as u16
2176    }
2177
2178    ///  Bandwidth counters can be configured to measure
2179    /// bandwidth consumed by a COS instead of an RMID if set
2180    pub fn has_select_cos(&self) -> bool {
2181        (self.ecx & 1) > 0
2182    }
2183}
2184
2185/// Extended Feature Identification 2 (LEAF=0x8000_0021).
2186///
2187/// # Platforms
2188/// βœ… AMD ❌ Intel
2189#[derive(PartialEq, Eq, Debug)]
2190pub struct ExtendedFeatureIdentification2 {
2191    eax: ExtendedFeatureIdentification2Eax,
2192    ebx: u32,
2193    _ecx: u32,
2194    _edx: u32,
2195}
2196
2197impl ExtendedFeatureIdentification2 {
2198    pub(crate) fn new(data: CpuIdResult) -> Self {
2199        Self {
2200            eax: ExtendedFeatureIdentification2Eax::from_bits_truncate(data.eax),
2201            ebx: data.ebx,
2202            _ecx: data.ecx,
2203            _edx: data.edx,
2204        }
2205    }
2206
2207    /// Processor ignores nested data breakpoints if set
2208    pub fn has_no_nested_data_bp(&self) -> bool {
2209        self.eax
2210            .contains(ExtendedFeatureIdentification2Eax::NO_NESTED_DATA_BP)
2211    }
2212
2213    /// LFENCE is always dispatch serializing if set
2214    pub fn has_lfence_always_serializing(&self) -> bool {
2215        self.eax
2216            .contains(ExtendedFeatureIdentification2Eax::LFENCE_ALWAYS_SERIALIZING)
2217    }
2218
2219    /// SMM paging configuration lock supported if set
2220    pub fn has_smm_pg_cfg_lock(&self) -> bool {
2221        self.eax
2222            .contains(ExtendedFeatureIdentification2Eax::SMM_PG_CFG_LOCK)
2223    }
2224
2225    /// Null segment selector loads also clear the destination segment register
2226    /// base and limit supported if set
2227    pub fn has_null_select_clears_base(&self) -> bool {
2228        self.eax
2229            .contains(ExtendedFeatureIdentification2Eax::NULL_SELECT_CLEARS_BASE)
2230    }
2231
2232    /// Upper Address Ignore is supported if set
2233    pub fn has_upper_address_ignore(&self) -> bool {
2234        self.eax
2235            .contains(ExtendedFeatureIdentification2Eax::UPPER_ADDRESS_IGNORE)
2236    }
2237
2238    /// Automatic IBRS if set
2239    pub fn has_automatic_ibrs(&self) -> bool {
2240        self.eax
2241            .contains(ExtendedFeatureIdentification2Eax::AUTOMATIC_IBRS)
2242    }
2243
2244    /// SMM_CTL MSR (C001_0116h) is not supported if set
2245    pub fn has_no_smm_ctl_msr(&self) -> bool {
2246        self.eax
2247            .contains(ExtendedFeatureIdentification2Eax::NO_SMM_CTL_MSR)
2248    }
2249
2250    /// Prefetch control MSR supported if set
2251    pub fn has_prefetch_ctl_msr(&self) -> bool {
2252        self.eax
2253            .contains(ExtendedFeatureIdentification2Eax::PREFETCH_CTL_MSR)
2254    }
2255
2256    /// CPUID disable for non-privileged software if set
2257    pub fn has_cpuid_user_dis(&self) -> bool {
2258        self.eax
2259            .contains(ExtendedFeatureIdentification2Eax::CPUID_USER_DIS)
2260    }
2261
2262    /// The size of the Microcode patch in 16-byte multiples. If 0, the size of the
2263    /// patch is at most 5568 (15C0h) bytes.
2264    pub fn microcode_patch_size(&self) -> u16 {
2265        get_bits(self.ebx, 0, 11) as u16
2266    }
2267}
2268
2269bitflags! {
2270    #[repr(transparent)]
2271    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
2272    struct ExtendedFeatureIdentification2Eax: u32 {
2273        const NO_NESTED_DATA_BP = 1 << 0;
2274        const LFENCE_ALWAYS_SERIALIZING = 1 << 2;
2275        const SMM_PG_CFG_LOCK = 1 << 3;
2276        const NULL_SELECT_CLEARS_BASE = 1 << 6;
2277        const UPPER_ADDRESS_IGNORE = 1 << 7;
2278        const AUTOMATIC_IBRS = 1 << 8;
2279        const NO_SMM_CTL_MSR = 1 << 9;
2280        const PREFETCH_CTL_MSR = 1 << 13;
2281        const CPUID_USER_DIS = 1 << 17;
2282    }
2283}
2284
2285/// Extended Performance Monitoring and Debug (LEAF=0x8000_0022).
2286///
2287/// # Platforms
2288/// βœ… AMD ❌ Intel
2289#[derive(PartialEq, Eq, Debug)]
2290pub struct ExtendedPerformanceMonitoringDebug {
2291    eax: ExtendedPerformanceMonitoringDebugEax,
2292    ebx: u32,
2293    _ecx: u32,
2294    _edx: u32,
2295}
2296
2297impl ExtendedPerformanceMonitoringDebug {
2298    pub(crate) fn new(data: CpuIdResult) -> Self {
2299        Self {
2300            eax: ExtendedPerformanceMonitoringDebugEax::from_bits_truncate(data.eax),
2301            ebx: data.ebx,
2302            _ecx: data.ecx,
2303            _edx: data.edx,
2304        }
2305    }
2306
2307    /// Performance Monitoring Version 2 supported if set
2308    pub fn has_perf_mon_v2(&self) -> bool {
2309        self.eax
2310            .contains(ExtendedPerformanceMonitoringDebugEax::PERF_MON_V2)
2311    }
2312
2313    /// Last Branch Record Stack supported if set
2314    pub fn has_lbr_stack(&self) -> bool {
2315        self.eax
2316            .contains(ExtendedPerformanceMonitoringDebugEax::LBR_STACK)
2317    }
2318
2319    /// Freezing Core Performance Counters and LBR Stack on Core
2320    /// Performance Counter overflow supported if set
2321    pub fn has_lbr_and_pmc_freeze(&self) -> bool {
2322        self.eax
2323            .contains(ExtendedPerformanceMonitoringDebugEax::LBR_AND_PMC_FREEZE)
2324    }
2325
2326    /// Number of Core Performance Counters
2327    pub fn num_perf_ctr_core(&self) -> u8 {
2328        get_bits(self.ebx, 0, 3) as u8
2329    }
2330
2331    /// Number of Last Branch Record Stack entries
2332    pub fn num_lbr_stack_size(&self) -> u8 {
2333        get_bits(self.ebx, 4, 9) as u8
2334    }
2335
2336    /// Number of Northbridge Performance Monitor Counters
2337    pub fn num_perf_ctr_nb(&self) -> u8 {
2338        get_bits(self.ebx, 10, 15) as u8
2339    }
2340}
2341
2342bitflags! {
2343    #[repr(transparent)]
2344    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
2345    struct ExtendedPerformanceMonitoringDebugEax: u32 {
2346        const PERF_MON_V2 = 1 << 0;
2347        const LBR_STACK = 1 << 1;
2348        const LBR_AND_PMC_FREEZE = 1 << 2;
2349    }
2350}
2351
2352/// Multi-Key Encrypted Memory Capabilities (LEAF=0x8000_0023).
2353///
2354/// # Platforms
2355/// βœ… AMD ❌ Intel
2356#[derive(PartialEq, Eq, Debug)]
2357pub struct MultiKeyEncryptedMemoryCapabilities {
2358    eax: MultiKeyEncryptedMemoryCapabilitiesEax,
2359    ebx: u32,
2360    _ecx: u32,
2361    _edx: u32,
2362}
2363
2364impl MultiKeyEncryptedMemoryCapabilities {
2365    pub(crate) fn new(data: CpuIdResult) -> Self {
2366        Self {
2367            eax: MultiKeyEncryptedMemoryCapabilitiesEax::from_bits_truncate(data.eax),
2368            ebx: data.ebx,
2369            _ecx: data.ecx,
2370            _edx: data.edx,
2371        }
2372    }
2373
2374    /// Secure Host Multi-Key Memory (MEM-HMK) Encryption Mode Supported if set
2375    pub fn has_mem_hmk(&self) -> bool {
2376        self.eax
2377            .contains(MultiKeyEncryptedMemoryCapabilitiesEax::MEM_HMK)
2378    }
2379
2380    /// Number of simultaneously available host encryption key IDs in MEM-HMK
2381    /// encryption mode.
2382    pub fn max_mem_hmk_encr_key_id(&self) -> u16 {
2383        get_bits(self.ebx, 0, 15) as u16
2384    }
2385}
2386
2387bitflags! {
2388    #[repr(transparent)]
2389    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
2390    struct MultiKeyEncryptedMemoryCapabilitiesEax: u32 {
2391        const MEM_HMK = 1 << 0;
2392    }
2393}
2394
2395/// Extended CPU Topology (LEAF=0x8000_0026).
2396///
2397/// Iterates over the extended cpu topology in order to retrieve more information for logical
2398/// processors, including asymmetric and heterogenous topology descriptions. Individual
2399/// logical processors may report different values in systems with asynchronous and
2400/// heterogeneous topologies
2401///
2402/// # Platforms
2403/// βœ… AMD ❌ Intel
2404#[derive(Clone)]
2405pub struct ExtendedCpuTopologyIter<R: CpuIdReader> {
2406    read: R,
2407    level: u32,
2408}
2409
2410impl<R: CpuIdReader> ExtendedCpuTopologyIter<R> {
2411    pub fn new(read: R) -> Self {
2412        Self { read, level: 0 }
2413    }
2414}
2415
2416/// Gives information about the current level in the cpu topology.
2417#[derive(PartialEq, Eq, Debug)]
2418pub struct ExtendedCpuTopologyLevel {
2419    eax: u32,
2420    ebx: u32,
2421    ecx: u32,
2422    edx: u32,
2423}
2424
2425impl ExtendedCpuTopologyLevel {
2426    pub(crate) fn new(data: CpuIdResult) -> Self {
2427        Self {
2428            eax: data.eax,
2429            ebx: data.ebx,
2430            ecx: data.ecx,
2431            edx: data.edx,
2432        }
2433    }
2434
2435    /// Number of bits to shift Extended APIC ID right to get a unique topology ID
2436    /// of the current hierarchy level.
2437    pub fn mask_width(&self) -> u8 {
2438        get_bits(self.eax, 0, 4) as u8
2439    }
2440
2441    /// Set to 1 if processor power efficiency ranking (PwrEfficiencyRanking) is
2442    /// available and varies between cores. Only valid for LevelType = 1h (Core).
2443    pub fn has_efficiency_ranking_available(&self) -> bool {
2444        self.eax & (1 << 29) > 0
2445    }
2446
2447    /// Set to 1 if all components at the current hierarchy level do not consist of
2448    /// the cores that report the same core type (CoreType).
2449    pub fn has_heterogeneous_cores(&self) -> bool {
2450        self.eax & (1 << 30) > 0
2451    }
2452
2453    /// Set to 1 if all components at the current hierarchy level do not report the
2454    /// same number of logical processors (NumLogProc).
2455    pub fn has_asymmetric_topology(&self) -> bool {
2456        self.eax & (1 << 31) > 0
2457    }
2458
2459    /// Number of logical processors at the current hierarchy level
2460    pub fn num_logical_processors(&self) -> u16 {
2461        get_bits(self.ebx, 0, 15) as u16
2462    }
2463
2464    /// Reports a static efficiency ranking between cores of a specific core type,
2465    /// where a lower value indicates comparatively lower power consumption
2466    /// and lower performance. Only valid for LevelType = 1h (Core)
2467    pub fn pwr_efficiency_ranking(&self) -> u8 {
2468        get_bits(self.ebx, 16, 23) as u8
2469    }
2470
2471    /// Reports a value that may be used to further differentiate implementation
2472    /// specific features. Native mode ID is used in conjunction with the family,
2473    /// model, and stepping identifiers. Refer to the Processor Programming
2474    /// Reference Manual applicable to your product for a list of Native Mode
2475    /// IDs. Only valid for LevelType = 1h (Core)
2476    pub fn native_mode_id(&self) -> u8 {
2477        get_bits(self.ebx, 24, 27) as u8
2478    }
2479
2480    /// Reports a value that may be used to distinguish between cores with
2481    /// different architectural and microarchitectural properties (for example,
2482    /// cores with different performance or power characteristics). Refer to the
2483    /// Processor Programming Reference Manual applicable to your product for
2484    /// a list of the available core types. Only valid for LevelType = 1h (Core)
2485    pub fn core_type(&self) -> u8 {
2486        get_bits(self.ebx, 28, 31) as u8
2487    }
2488
2489    /// Input ECX
2490    pub fn input_ecx(&self) -> u8 {
2491        get_bits(self.ecx, 0, 7) as u8
2492    }
2493
2494    /// Encoded hierarchy level type
2495    pub fn level_type(&self) -> HierarchyLevelType {
2496        HierarchyLevelType::from(get_bits(self.ecx, 8, 15) as u8)
2497    }
2498
2499    /// Extended APIC ID of the logical processor
2500    pub fn extended_apic_id(&self) -> u32 {
2501        self.edx
2502    }
2503}
2504
2505impl<R: CpuIdReader> Iterator for ExtendedCpuTopologyIter<R> {
2506    type Item = ExtendedCpuTopologyLevel;
2507
2508    fn next(&mut self) -> Option<ExtendedCpuTopologyLevel> {
2509        let res = self.read.cpuid2(EAX_EXTENDED_CPU_TOPOLOGY, self.level);
2510        self.level += 1;
2511
2512        let ect = ExtendedCpuTopologyLevel::new(res);
2513        if ect.level_type() == HierarchyLevelType::Reserved {
2514            None
2515        } else {
2516            Some(ect)
2517        }
2518    }
2519}
2520
2521#[repr(u8)]
2522#[derive(PartialEq, Eq)]
2523pub enum HierarchyLevelType {
2524    Reserved = 0,
2525    Core = 1,
2526    Complex = 2,
2527    Die = 3,
2528    Socket = 4,
2529    Unknown(u8),
2530}
2531
2532impl From<u8> for HierarchyLevelType {
2533    fn from(value: u8) -> Self {
2534        match value {
2535            0 => Self::Reserved,
2536            1 => Self::Core,
2537            2 => Self::Complex,
2538            3 => Self::Die,
2539            4 => Self::Socket,
2540            x => Self::Unknown(x),
2541        }
2542    }
2543}
2544
2545impl Display for HierarchyLevelType {
2546    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
2547        match self {
2548            HierarchyLevelType::Reserved => write!(f, "Reserved (0)"),
2549            HierarchyLevelType::Core => write!(f, "Core (1)"),
2550            HierarchyLevelType::Complex => write!(f, "Complex (2)"),
2551            HierarchyLevelType::Die => write!(f, "DIE (3)"),
2552            HierarchyLevelType::Socket => write!(f, "Socket (4)"),
2553            HierarchyLevelType::Unknown(x) => write!(f, "Unknown ({x})"),
2554        }
2555    }
2556}
2557
2558impl Debug for HierarchyLevelType {
2559    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
2560        match self {
2561            Self::Reserved => write!(f, "Reserved"),
2562            Self::Core => write!(f, "Core"),
2563            Self::Complex => write!(f, "Complex"),
2564            Self::Die => write!(f, "Die"),
2565            Self::Socket => write!(f, "Socket"),
2566            Self::Unknown(arg0) => f.debug_tuple("Unknown").field(arg0).finish(),
2567        }
2568    }
2569}