raw_cpuid/
extended.rs

1//! Data-structures / interpretation for extended leafs (>= 0x8000_0000)
2use bitflags::bitflags;
3use core::fmt::{self, Debug, Display, Formatter};
4use core::mem::size_of;
5use core::slice;
6use core::str;
7
8use crate::{get_bits, CpuIdResult, Vendor};
9
10/// Extended Processor and Processor Feature Identifiers (LEAF=0x8000_0001)
11///
12/// # Platforms
13/// βœ… AMD 🟑 Intel
14pub struct ExtendedProcessorFeatureIdentifiers {
15    vendor: Vendor,
16    eax: u32,
17    ebx: u32,
18    ecx: ExtendedFunctionInfoEcx,
19    edx: ExtendedFunctionInfoEdx,
20}
21
22impl ExtendedProcessorFeatureIdentifiers {
23    pub(crate) fn new(vendor: Vendor, data: CpuIdResult) -> Self {
24        Self {
25            vendor,
26            eax: data.eax,
27            ebx: data.ebx,
28            ecx: ExtendedFunctionInfoEcx::from_bits_truncate(data.ecx),
29            edx: ExtendedFunctionInfoEdx::from_bits_truncate(data.edx),
30        }
31    }
32
33    /// Extended Processor Signature.
34    ///
35    /// # AMD
36    /// The value returned is the same as the value returned in EAX for LEAF=0x0000_0001
37    /// (use `CpuId.get_feature_info` instead)
38    ///
39    /// # Intel
40    /// Vague mention of "Extended Processor Signature", not clear what it's supposed to
41    /// represent.
42    ///
43    /// # Platforms
44    /// βœ… AMD βœ… Intel
45    pub fn extended_signature(&self) -> u32 {
46        self.eax
47    }
48
49    /// Returns package type on AMD.
50    ///
51    /// Package type. If `(Family[7:0] >= 10h)`, this field is valid. If
52    /// `(Family[7:0]<10h)`, this field is reserved
53    ///
54    /// # Platforms
55    /// βœ… AMD ❌ Intel (reserved)
56    pub fn pkg_type(&self) -> u32 {
57        get_bits(self.ebx, 28, 31)
58    }
59
60    /// Returns brand ID on AMD.
61    ///
62    /// This field, in conjunction with CPUID `LEAF=0x0000_0001_EBX[8BitBrandId]`, and used
63    /// by firmware to generate the processor name string.
64    ///
65    /// # Platforms
66    /// βœ… AMD ❌ Intel (reserved)
67    pub fn brand_id(&self) -> u32 {
68        get_bits(self.ebx, 0, 15)
69    }
70
71    /// Is LAHF/SAHF available in 64-bit mode?
72    ///
73    /// # Platforms
74    /// βœ… AMD βœ… Intel
75    pub fn has_lahf_sahf(&self) -> bool {
76        self.ecx.contains(ExtendedFunctionInfoEcx::LAHF_SAHF)
77    }
78
79    /// Check support legacy cmp.
80    ///
81    /// # Platform
82    /// βœ… AMD ❌ Intel (will return false)
83    pub fn has_cmp_legacy(&self) -> bool {
84        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::CMP_LEGACY)
85    }
86
87    /// Secure virtual machine supported.
88    ///
89    /// # Platform
90    /// βœ… AMD ❌ Intel (will return false)
91    pub fn has_svm(&self) -> bool {
92        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::SVM)
93    }
94
95    /// Extended APIC space.
96    ///
97    /// This bit indicates the presence of extended APIC register space starting at offset
98    /// 400h from the β€œAPIC Base Address Register,” as specified in the BKDG.
99    ///
100    /// # Platform
101    /// βœ… AMD ❌ Intel (will return false)
102    pub fn has_ext_apic_space(&self) -> bool {
103        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::EXT_APIC_SPACE)
104    }
105
106    /// LOCK MOV CR0 means MOV CR8. See β€œMOV(CRn)” in APM3.
107    ///
108    /// # Platform
109    /// βœ… AMD ❌ Intel (will return false)
110    pub fn has_alt_mov_cr8(&self) -> bool {
111        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::ALTMOVCR8)
112    }
113
114    /// Is LZCNT available?
115    ///
116    /// # AMD
117    /// It's called ABM (Advanced bit manipulation) on AMD and also adds support for
118    /// some other instructions.
119    ///
120    /// # Platforms
121    /// βœ… AMD βœ… Intel
122    pub fn has_lzcnt(&self) -> bool {
123        self.ecx.contains(ExtendedFunctionInfoEcx::LZCNT)
124    }
125
126    /// XTRQ, INSERTQ, MOVNTSS, and MOVNTSD instruction support.
127    ///
128    /// See β€œEXTRQ”, β€œINSERTQ”,β€œMOVNTSS”, and β€œMOVNTSD” in APM4.
129    ///
130    /// # Platform
131    /// βœ… AMD ❌ Intel (will return false)
132    pub fn has_sse4a(&self) -> bool {
133        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::SSE4A)
134    }
135
136    /// Misaligned SSE mode. See β€œMisaligned Access Support Added for SSE Instructions” in
137    /// APM1.
138    ///
139    /// # Platform
140    /// βœ… AMD ❌ Intel (will return false)
141    pub fn has_misaligned_sse_mode(&self) -> bool {
142        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::MISALIGNSSE)
143    }
144
145    /// Is PREFETCHW available?
146    ///
147    /// # AMD
148    /// PREFETCH and PREFETCHW instruction support.
149    ///
150    /// # Platforms
151    /// βœ… AMD βœ… Intel
152    pub fn has_prefetchw(&self) -> bool {
153        self.ecx.contains(ExtendedFunctionInfoEcx::PREFETCHW)
154    }
155
156    /// Indicates OS-visible workaround support
157    ///
158    /// # Platform
159    /// βœ… AMD ❌ Intel (will return false)
160    pub fn has_osvw(&self) -> bool {
161        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::OSVW)
162    }
163
164    /// Instruction based sampling.
165    ///
166    /// # Platform
167    /// βœ… AMD ❌ Intel (will return false)
168    pub fn has_ibs(&self) -> bool {
169        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::IBS)
170    }
171
172    /// Extended operation support.
173    ///
174    /// # Platform
175    /// βœ… AMD ❌ Intel (will return false)
176    pub fn has_xop(&self) -> bool {
177        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::XOP)
178    }
179
180    /// SKINIT and STGI are supported.
181    ///
182    /// Indicates support for SKINIT and STGI, independent of the value of
183    /// `MSRC000_0080[SVME]`.
184    ///
185    /// # Platform
186    /// βœ… AMD ❌ Intel (will return false)
187    pub fn has_skinit(&self) -> bool {
188        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::SKINIT)
189    }
190
191    /// Watchdog timer support.
192    ///
193    /// Indicates support for MSRC001_0074.
194    ///
195    /// # Platform
196    /// βœ… AMD ❌ Intel (will return false)
197    pub fn has_wdt(&self) -> bool {
198        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::WDT)
199    }
200
201    /// Lightweight profiling support
202    ///
203    /// # Platform
204    /// βœ… AMD ❌ Intel (will return false)
205    pub fn has_lwp(&self) -> bool {
206        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::LWP)
207    }
208
209    /// Four-operand FMA instruction support.
210    ///
211    /// # Platform
212    /// βœ… AMD ❌ Intel (will return false)
213    pub fn has_fma4(&self) -> bool {
214        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::FMA4)
215    }
216
217    /// Trailing bit manipulation instruction support.
218    ///
219    /// # Platform
220    /// βœ… AMD ❌ Intel (will return false)
221    pub fn has_tbm(&self) -> bool {
222        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::TBM)
223    }
224
225    /// Topology extensions support.
226    ///
227    /// Indicates support for CPUID `Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX`.
228    ///
229    /// # Platform
230    /// βœ… AMD ❌ Intel (will return false)
231    pub fn has_topology_extensions(&self) -> bool {
232        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::TOPEXT)
233    }
234
235    /// Processor performance counter extensions support.
236    ///
237    /// Indicates support for `MSRC001_020[A,8,6,4,2,0]` and `MSRC001_020[B,9,7,5,3,1]`.
238    ///
239    /// # Platform
240    /// βœ… AMD ❌ Intel (will return false)
241    pub fn has_perf_cntr_extensions(&self) -> bool {
242        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFCTREXT)
243    }
244
245    /// NB performance counter extensions support.
246    ///
247    /// Indicates support for `MSRC001_024[6,4,2,0]` and `MSRC001_024[7,5,3,1]`.
248    ///
249    /// # Platform
250    /// βœ… AMD ❌ Intel (will return false)
251    pub fn has_nb_perf_cntr_extensions(&self) -> bool {
252        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFCTREXTNB)
253    }
254
255    /// Data access breakpoint extension.
256    ///
257    /// Indicates support for `MSRC001_1027` and `MSRC001_101[B:9]`.
258    ///
259    /// # Platform
260    /// βœ… AMD ❌ Intel (will return false)
261    pub fn has_data_access_bkpt_extension(&self) -> bool {
262        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::DATABRKPEXT)
263    }
264
265    /// Performance time-stamp counter.
266    ///
267    /// Indicates support for `MSRC001_0280` `[Performance Time Stamp Counter]`.
268    ///
269    /// # Platform
270    /// βœ… AMD ❌ Intel (will return false)
271    pub fn has_perf_tsc(&self) -> bool {
272        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFTSC)
273    }
274
275    /// Support for L3 performance counter extension.
276    ///
277    /// # Platform
278    /// βœ… AMD ❌ Intel (will return false)
279    pub fn has_perf_cntr_llc_extensions(&self) -> bool {
280        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::PERFCTREXTLLC)
281    }
282
283    /// Support for MWAITX and MONITORX instructions.
284    ///
285    /// # Platform
286    /// βœ… AMD ❌ Intel (will return false)
287    pub fn has_monitorx_mwaitx(&self) -> bool {
288        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::MONITORX)
289    }
290
291    /// Breakpoint Addressing masking extended to bit 31.
292    ///
293    /// # Platform
294    /// βœ… AMD ❌ Intel (will return false)
295    pub fn has_addr_mask_extension(&self) -> bool {
296        self.vendor == Vendor::Amd && self.ecx.contains(ExtendedFunctionInfoEcx::ADDRMASKEXT)
297    }
298
299    /// Are fast system calls available.
300    ///
301    /// # Platforms
302    /// βœ… AMD βœ… Intel
303    pub fn has_syscall_sysret(&self) -> bool {
304        self.edx.contains(ExtendedFunctionInfoEdx::SYSCALL_SYSRET)
305    }
306
307    /// Is there support for execute disable bit.
308    ///
309    /// # Platforms
310    /// βœ… AMD βœ… Intel
311    pub fn has_execute_disable(&self) -> bool {
312        self.edx.contains(ExtendedFunctionInfoEdx::EXECUTE_DISABLE)
313    }
314
315    /// AMD extensions to MMX instructions.
316    ///
317    /// # Platform
318    /// βœ… AMD ❌ Intel (will return false)
319    pub fn has_mmx_extensions(&self) -> bool {
320        self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::MMXEXT)
321    }
322
323    /// FXSAVE and FXRSTOR instruction optimizations.
324    ///
325    /// # Platform
326    /// βœ… AMD ❌ Intel (will return false)
327    pub fn has_fast_fxsave_fxstor(&self) -> bool {
328        self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::FFXSR)
329    }
330
331    /// Is there support for 1GiB pages.
332    ///
333    /// # Platforms
334    /// βœ… AMD βœ… Intel
335    pub fn has_1gib_pages(&self) -> bool {
336        self.edx.contains(ExtendedFunctionInfoEdx::GIB_PAGES)
337    }
338
339    /// Check support for rdtscp instruction.
340    ///
341    /// # Platforms
342    /// βœ… AMD βœ… Intel
343    pub fn has_rdtscp(&self) -> bool {
344        self.edx.contains(ExtendedFunctionInfoEdx::RDTSCP)
345    }
346
347    /// Check support for 64-bit mode.
348    ///
349    /// # Platforms
350    /// βœ… AMD βœ… Intel
351    pub fn has_64bit_mode(&self) -> bool {
352        self.edx.contains(ExtendedFunctionInfoEdx::I64BIT_MODE)
353    }
354
355    /// 3DNow AMD extensions.
356    ///
357    /// # Platform
358    /// βœ… AMD ❌ Intel (will return false)
359    pub fn has_amd_3dnow_extensions(&self) -> bool {
360        self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::THREEDNOWEXT)
361    }
362
363    /// 3DNow extensions.
364    ///
365    /// # Platform
366    /// βœ… AMD ❌ Intel (will return false)
367    pub fn has_3dnow(&self) -> bool {
368        self.vendor == Vendor::Amd && self.edx.contains(ExtendedFunctionInfoEdx::THREEDNOW)
369    }
370}
371
372impl Debug for ExtendedProcessorFeatureIdentifiers {
373    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
374        let mut ds = f.debug_struct("ExtendedProcessorFeatureIdentifiers");
375        ds.field("extended_signature", &self.extended_signature());
376
377        if self.vendor == Vendor::Amd {
378            ds.field("pkg_type", &self.pkg_type());
379            ds.field("brand_id", &self.brand_id());
380        }
381        ds.field("ecx_features", &self.ecx);
382        ds.field("edx_features", &self.edx);
383        ds.finish()
384    }
385}
386
387bitflags! {
388    #[repr(transparent)]
389    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
390    struct ExtendedFunctionInfoEcx: u32 {
391        const LAHF_SAHF = 1 << 0;
392        const CMP_LEGACY =  1 << 1;
393        const SVM = 1 << 2;
394        const EXT_APIC_SPACE = 1 << 3;
395        const ALTMOVCR8 = 1 << 4;
396        const LZCNT = 1 << 5;
397        const SSE4A = 1 << 6;
398        const MISALIGNSSE = 1 << 7;
399        const PREFETCHW = 1 << 8;
400        const OSVW = 1 << 9;
401        const IBS = 1 << 10;
402        const XOP = 1 << 11;
403        const SKINIT = 1 << 12;
404        const WDT = 1 << 13;
405        const LWP = 1 << 15;
406        const FMA4 = 1 << 16;
407        const TBM = 1 << 21;
408        const TOPEXT = 1 << 22;
409        const PERFCTREXT = 1 << 23;
410        const PERFCTREXTNB = 1 << 24;
411        const DATABRKPEXT = 1 << 26;
412        const PERFTSC = 1 << 27;
413        const PERFCTREXTLLC = 1 << 28;
414        const MONITORX = 1 << 29;
415        const ADDRMASKEXT = 1 << 30;
416    }
417}
418
419bitflags! {
420    #[repr(transparent)]
421    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
422    struct ExtendedFunctionInfoEdx: u32 {
423        const SYSCALL_SYSRET = 1 << 11;
424        const EXECUTE_DISABLE = 1 << 20;
425        const MMXEXT = 1 << 22;
426        const FFXSR = 1 << 24;
427        const GIB_PAGES = 1 << 26;
428        const RDTSCP = 1 << 27;
429        const I64BIT_MODE = 1 << 29;
430        const THREEDNOWEXT = 1 << 30;
431        const THREEDNOW = 1 << 31;
432    }
433}
434
435/// Processor name (LEAF=0x8000_0002..=0x8000_0004).
436///
437/// ASCII string up to 48 characters in length corresponding to the processor name.
438///
439/// # Platforms
440/// βœ… AMD βœ… Intel
441pub struct ProcessorBrandString {
442    data: [CpuIdResult; 3],
443}
444
445impl ProcessorBrandString {
446    pub(crate) fn new(data: [CpuIdResult; 3]) -> Self {
447        Self { data }
448    }
449
450    /// Return the processor brand string as a rust string.
451    ///
452    /// For example:
453    /// "11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz".
454    pub fn as_str(&self) -> &str {
455        // Safety: CpuIdResult is laid out with repr(C), and the array
456        // self.data contains 3 contiguous elements.
457        let slice: &[u8] = unsafe {
458            slice::from_raw_parts(
459                self.data.as_ptr() as *const u8,
460                self.data.len() * size_of::<CpuIdResult>(),
461            )
462        };
463
464        // Brand terminated at nul byte or end, whichever comes first.
465        let slice = slice.split(|&x| x == 0).next().unwrap();
466        str::from_utf8(slice)
467            .unwrap_or("Invalid Processor Brand String")
468            .trim()
469    }
470}
471
472impl Debug for ProcessorBrandString {
473    fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
474        f.debug_struct("ProcessorBrandString")
475            .field("as_str", &self.as_str())
476            .finish()
477    }
478}
479
480/// L1 Cache and TLB Information (LEAF=0x8000_0005).
481///
482/// # Availability
483/// βœ… AMD ❌ Intel (reserved=0)
484#[derive(PartialEq, Eq, Debug)]
485pub struct L1CacheTlbInfo {
486    eax: u32,
487    ebx: u32,
488    ecx: u32,
489    edx: u32,
490}
491
492impl L1CacheTlbInfo {
493    pub(crate) fn new(data: CpuIdResult) -> Self {
494        Self {
495            eax: data.eax,
496            ebx: data.ebx,
497            ecx: data.ecx,
498            edx: data.edx,
499        }
500    }
501
502    /// Data TLB associativity for 2-MB and 4-MB pages.
503    pub fn dtlb_2m_4m_associativity(&self) -> Associativity {
504        let assoc_bits = get_bits(self.eax, 24, 31) as u8;
505        Associativity::for_l1(assoc_bits)
506    }
507
508    /// Data TLB number of entries for 2-MB and 4-MB pages.
509    ///
510    /// The value returned is for the number of entries available for the 2-MB page size;
511    /// 4-MB pages require two 2-MB entries, so the number of entries available for the
512    /// 4-MB page size is one-half the returned value.
513    pub fn dtlb_2m_4m_size(&self) -> u8 {
514        get_bits(self.eax, 16, 23) as u8
515    }
516
517    /// Instruction TLB associativity for 2-MB and 4-MB pages.
518    pub fn itlb_2m_4m_associativity(&self) -> Associativity {
519        let assoc_bits = get_bits(self.eax, 8, 15) as u8;
520        Associativity::for_l1(assoc_bits)
521    }
522
523    /// Instruction TLB number of entries for 2-MB and 4-MB pages.
524    ///
525    /// The value returned is for the number of entries available for the 2-MB page size;
526    /// 4-MB pages require two 2-MB entries, so the number of entries available for the
527    /// 4-MB page size is one-half the returned value.
528    pub fn itlb_2m_4m_size(&self) -> u8 {
529        get_bits(self.eax, 0, 7) as u8
530    }
531
532    /// Data TLB associativity for 4K pages.
533    pub fn dtlb_4k_associativity(&self) -> Associativity {
534        let assoc_bits = get_bits(self.ebx, 24, 31) as u8;
535        Associativity::for_l1(assoc_bits)
536    }
537
538    /// Data TLB number of entries for 4K pages.
539    pub fn dtlb_4k_size(&self) -> u8 {
540        get_bits(self.ebx, 16, 23) as u8
541    }
542
543    /// Instruction TLB associativity for 4K pages.
544    pub fn itlb_4k_associativity(&self) -> Associativity {
545        let assoc_bits = get_bits(self.ebx, 8, 15) as u8;
546        Associativity::for_l1(assoc_bits)
547    }
548
549    /// Instruction TLB number of entries for 4K pages.
550    pub fn itlb_4k_size(&self) -> u8 {
551        get_bits(self.ebx, 0, 7) as u8
552    }
553
554    /// L1 data cache size in KB
555    pub fn dcache_size(&self) -> u8 {
556        get_bits(self.ecx, 24, 31) as u8
557    }
558
559    /// L1 data cache associativity.
560    pub fn dcache_associativity(&self) -> Associativity {
561        let assoc_bits = get_bits(self.ecx, 16, 23) as u8;
562        Associativity::for_l1(assoc_bits)
563    }
564
565    /// L1 data cache lines per tag.
566    pub fn dcache_lines_per_tag(&self) -> u8 {
567        get_bits(self.ecx, 8, 15) as u8
568    }
569
570    /// L1 data cache line size in bytes.
571    pub fn dcache_line_size(&self) -> u8 {
572        get_bits(self.ecx, 0, 7) as u8
573    }
574
575    /// L1 instruction cache size in KB
576    pub fn icache_size(&self) -> u8 {
577        get_bits(self.edx, 24, 31) as u8
578    }
579
580    /// L1 instruction cache associativity.
581    pub fn icache_associativity(&self) -> Associativity {
582        let assoc_bits = get_bits(self.edx, 16, 23) as u8;
583        Associativity::for_l1(assoc_bits)
584    }
585
586    /// L1 instruction cache lines per tag.
587    pub fn icache_lines_per_tag(&self) -> u8 {
588        get_bits(self.edx, 8, 15) as u8
589    }
590
591    /// L1 instruction cache line size in bytes.
592    pub fn icache_line_size(&self) -> u8 {
593        get_bits(self.edx, 0, 7) as u8
594    }
595}
596
597/// L2/L3 Cache and TLB Information (LEAF=0x8000_0006).
598///
599/// # Availability
600/// βœ… AMD 🟑 Intel
601#[derive(PartialEq, Eq, Debug)]
602pub struct L2And3CacheTlbInfo {
603    eax: u32,
604    ebx: u32,
605    ecx: u32,
606    edx: u32,
607}
608
609impl L2And3CacheTlbInfo {
610    pub(crate) fn new(data: CpuIdResult) -> Self {
611        Self {
612            eax: data.eax,
613            ebx: data.ebx,
614            ecx: data.ecx,
615            edx: data.edx,
616        }
617    }
618
619    /// L2 Data TLB associativity for 2-MB and 4-MB pages.
620    ///
621    /// # Availability
622    /// βœ… AMD ❌ Intel (reserved=0)
623    pub fn dtlb_2m_4m_associativity(&self) -> Associativity {
624        let assoc_bits = get_bits(self.eax, 28, 31) as u8;
625        Associativity::for_l2(assoc_bits)
626    }
627
628    /// L2 Data TLB number of entries for 2-MB and 4-MB pages.
629    ///
630    /// The value returned is for the number of entries available for the 2-MB page size;
631    /// 4-MB pages require two 2-MB entries, so the number of entries available for the
632    /// 4-MB page size is one-half the returned value.
633    ///
634    /// # Availability
635    /// βœ… AMD ❌ Intel (reserved=0)
636    pub fn dtlb_2m_4m_size(&self) -> u16 {
637        get_bits(self.eax, 16, 27) as u16
638    }
639
640    /// L2 Instruction TLB associativity for 2-MB and 4-MB pages.
641    ///
642    /// # Availability
643    /// βœ… AMD ❌ Intel (reserved=0)
644    pub fn itlb_2m_4m_associativity(&self) -> Associativity {
645        let assoc_bits = get_bits(self.eax, 12, 15) as u8;
646        Associativity::for_l2(assoc_bits)
647    }
648
649    /// L2 Instruction TLB number of entries for 2-MB and 4-MB pages.
650    ///
651    /// The value returned is for the number of entries available for the 2-MB page size;
652    /// 4-MB pages require two 2-MB entries, so the number of entries available for the
653    /// 4-MB page size is one-half the returned value.
654    ///
655    /// # Availability
656    /// βœ… AMD ❌ Intel (reserved=0)
657    pub fn itlb_2m_4m_size(&self) -> u16 {
658        get_bits(self.eax, 0, 11) as u16
659    }
660
661    /// L2 Data TLB associativity for 4K pages.
662    ///
663    /// # Availability
664    /// βœ… AMD ❌ Intel (reserved=0)
665    pub fn dtlb_4k_associativity(&self) -> Associativity {
666        let assoc_bits = get_bits(self.ebx, 28, 31) as u8;
667        Associativity::for_l2(assoc_bits)
668    }
669
670    /// L2 Data TLB number of entries for 4K pages.
671    ///
672    /// # Availability
673    /// βœ… AMD ❌ Intel (reserved=0)
674    pub fn dtlb_4k_size(&self) -> u16 {
675        get_bits(self.ebx, 16, 27) as u16
676    }
677
678    /// L2 Instruction TLB associativity for 4K pages.
679    ///
680    /// # Availability
681    /// βœ… AMD ❌ Intel (reserved=0)
682    pub fn itlb_4k_associativity(&self) -> Associativity {
683        let assoc_bits = get_bits(self.ebx, 12, 15) as u8;
684        Associativity::for_l2(assoc_bits)
685    }
686
687    /// L2 Instruction TLB number of entries for 4K pages.
688    ///
689    /// # Availability
690    /// βœ… AMD ❌ Intel (reserved=0)
691    pub fn itlb_4k_size(&self) -> u16 {
692        get_bits(self.ebx, 0, 11) as u16
693    }
694
695    /// L2 Cache Line size in bytes
696    ///
697    /// # Platforms
698    /// βœ… AMD βœ… Intel
699    pub fn l2cache_line_size(&self) -> u8 {
700        get_bits(self.ecx, 0, 7) as u8
701    }
702
703    /// L2 cache lines per tag.
704    ///
705    /// # Availability
706    /// βœ… AMD ❌ Intel (reserved=0)
707    pub fn l2cache_lines_per_tag(&self) -> u8 {
708        get_bits(self.ecx, 8, 11) as u8
709    }
710
711    /// L2 Associativity field
712    ///
713    /// # Availability
714    /// βœ… AMD βœ… Intel
715    pub fn l2cache_associativity(&self) -> Associativity {
716        let assoc_bits = get_bits(self.ecx, 12, 15) as u8;
717        Associativity::for_l2(assoc_bits)
718    }
719
720    /// Cache size in KB.
721    ///
722    /// # Platforms
723    /// βœ… AMD βœ… Intel
724    pub fn l2cache_size(&self) -> u16 {
725        get_bits(self.ecx, 16, 31) as u16
726    }
727
728    /// L2 Cache Line size in bytes
729    ///
730    /// # Platforms
731    /// βœ… AMD ❌ Intel (reserved=0)
732    pub fn l3cache_line_size(&self) -> u8 {
733        get_bits(self.edx, 0, 7) as u8
734    }
735
736    /// L2 cache lines per tag.
737    ///
738    /// # Availability
739    /// βœ… AMD ❌ Intel (reserved=0)
740    pub fn l3cache_lines_per_tag(&self) -> u8 {
741        get_bits(self.edx, 8, 11) as u8
742    }
743
744    /// L2 Associativity field
745    ///
746    /// # Availability
747    /// βœ… AMD ❌ Intel (reserved=0)
748    pub fn l3cache_associativity(&self) -> Associativity {
749        let assoc_bits = get_bits(self.edx, 12, 15) as u8;
750        Associativity::for_l3(assoc_bits)
751    }
752
753    /// Specifies the L3 cache size range
754    ///
755    /// `(L3Size[31:18] * 512KB) <= L3 cache size < ((L3Size[31:18]+1) * 512KB)`.
756    ///
757    /// # Platforms
758    /// βœ… AMD ❌ Intel (reserved=0)
759    pub fn l3cache_size(&self) -> u16 {
760        get_bits(self.edx, 18, 31) as u16
761    }
762}
763
764/// Info about cache Associativity.
765#[derive(PartialEq, Eq, Debug)]
766pub enum Associativity {
767    Disabled,
768    DirectMapped,
769    NWay(u8),
770    FullyAssociative,
771    Unknown,
772}
773
774impl Display for Associativity {
775    fn fmt(&self, f: &mut Formatter) -> fmt::Result {
776        let s = match self {
777            Associativity::Disabled => "Disabled",
778            Associativity::DirectMapped => "Direct mapped",
779            Associativity::NWay(n) => {
780                return write!(f, "NWay({})", n);
781            }
782            Associativity::FullyAssociative => "Fully associative",
783            Associativity::Unknown => "Unknown (check leaf 0x8000_001d)",
784        };
785        f.write_str(s)
786    }
787}
788
789impl Associativity {
790    /// Constructor for L1 Cache and TLB Associativity Field Encodings
791    fn for_l1(n: u8) -> Associativity {
792        match n {
793            0x0 => Associativity::Disabled, // Intel only, AMD is reserved
794            0x1 => Associativity::DirectMapped,
795            0x2..=0xfe => Associativity::NWay(n),
796            0xff => Associativity::FullyAssociative,
797        }
798    }
799
800    /// Constructor for L2 Cache and TLB Associativity Field Encodings
801    fn for_l2(n: u8) -> Associativity {
802        match n {
803            0x0 => Associativity::Disabled,
804            0x1 => Associativity::DirectMapped,
805            0x2 => Associativity::NWay(2),
806            0x4 => Associativity::NWay(4),
807            0x5 => Associativity::NWay(6), // Reserved on Intel
808            0x6 => Associativity::NWay(8),
809            // 0x7 => SDM states: "See CPUID leaf 04H, sub-leaf 2"
810            0x8 => Associativity::NWay(16),
811            0x9 => Associativity::Unknown, // Intel: Reserved, AMD: Value for all fields should be determined from Fn8000_001D
812            0xa => Associativity::NWay(32),
813            0xb => Associativity::NWay(48),
814            0xc => Associativity::NWay(64),
815            0xd => Associativity::NWay(96),
816            0xe => Associativity::NWay(128),
817            0xF => Associativity::FullyAssociative,
818            _ => Associativity::Unknown,
819        }
820    }
821
822    /// Constructor for L2 Cache and TLB Associativity Field Encodings
823    fn for_l3(n: u8) -> Associativity {
824        Associativity::for_l2(n)
825    }
826}
827
828/// Processor Power Management and RAS Capabilities (LEAF=0x8000_0007).
829///
830/// # Platforms
831/// βœ… AMD 🟑 Intel
832#[derive(Debug, PartialEq, Eq)]
833pub struct ApmInfo {
834    /// Reserved on AMD and Intel.
835    _eax: u32,
836    ebx: RasCapabilities,
837    ecx: u32,
838    edx: ApmInfoEdx,
839}
840
841impl ApmInfo {
842    pub(crate) fn new(data: CpuIdResult) -> Self {
843        Self {
844            _eax: data.eax,
845            ebx: RasCapabilities::from_bits_truncate(data.ebx),
846            ecx: data.ecx,
847            edx: ApmInfoEdx::from_bits_truncate(data.edx),
848        }
849    }
850
851    /// Is MCA overflow recovery available?
852    ///
853    /// If set, indicates that MCA overflow conditions (`MCi_STATUS[Overflow]=1`)
854    /// are not fatal; software may safely ignore such conditions. If clear, MCA
855    /// overflow conditions require software to shut down the system.
856    ///
857    /// # Platforms
858    /// βœ… AMD ❌ Intel (reserved=false)
859    pub fn has_mca_overflow_recovery(&self) -> bool {
860        self.ebx.contains(RasCapabilities::MCAOVFLRECOV)
861    }
862
863    /// Has Software uncorrectable error containment and recovery capability?
864    ///
865    /// The processor supports software containment of uncorrectable errors
866    /// through context synchronizing data poisoning and deferred error
867    /// interrupts.
868    ///
869    /// # Platforms
870    /// βœ… AMD ❌ Intel (reserved=false)
871    pub fn has_succor(&self) -> bool {
872        self.ebx.contains(RasCapabilities::SUCCOR)
873    }
874
875    /// Has Hardware assert supported?
876    ///
877    /// Indicates support for `MSRC001_10[DF:C0]`.
878    ///
879    /// # Platforms
880    /// βœ… AMD ❌ Intel (reserved=false)
881    pub fn has_hwa(&self) -> bool {
882        self.ebx.contains(RasCapabilities::HWA)
883    }
884
885    /// Specifies the ratio of the compute unit power accumulator sample period
886    /// to the TSC counter period.
887    ///
888    /// Returns a value of 0 if not applicable for the system.
889    ///
890    /// # Platforms
891    /// βœ… AMD ❌ Intel (reserved=0)
892    pub fn cpu_pwr_sample_time_ratio(&self) -> u32 {
893        self.ecx
894    }
895
896    /// Is Temperature Sensor available?
897    ///
898    /// # Platforms
899    /// βœ… AMD ❌ Intel (reserved=false)
900    pub fn has_ts(&self) -> bool {
901        self.edx.contains(ApmInfoEdx::TS)
902    }
903
904    /// Frequency ID control.
905    ///
906    /// # Note
907    /// Function replaced by `has_hw_pstate`.
908    ///
909    /// # Platforms
910    /// βœ… AMD ❌ Intel (reserved=false)
911    pub fn has_freq_id_ctrl(&self) -> bool {
912        self.edx.contains(ApmInfoEdx::FID)
913    }
914
915    /// Voltage ID control.
916    ///
917    /// # Note
918    /// Function replaced by `has_hw_pstate`.
919    ///
920    /// # Platforms
921    /// βœ… AMD ❌ Intel (reserved=false)
922    pub fn has_volt_id_ctrl(&self) -> bool {
923        self.edx.contains(ApmInfoEdx::VID)
924    }
925
926    /// Has THERMTRIP?
927    ///
928    /// # Platforms
929    /// βœ… AMD ❌ Intel (reserved=false)
930    pub fn has_thermtrip(&self) -> bool {
931        self.edx.contains(ApmInfoEdx::TTP)
932    }
933
934    /// Hardware thermal control (HTC)?
935    ///
936    /// # Platforms
937    /// βœ… AMD ❌ Intel (reserved=false)
938    pub fn has_tm(&self) -> bool {
939        self.edx.contains(ApmInfoEdx::TM)
940    }
941
942    /// Has 100 MHz multiplier Control?
943    ///
944    /// # Platforms
945    /// βœ… AMD ❌ Intel (reserved=false)
946    pub fn has_100mhz_steps(&self) -> bool {
947        self.edx.contains(ApmInfoEdx::MHZSTEPS100)
948    }
949
950    /// Has Hardware P-state control?
951    ///
952    /// MSRC001_0061 [P-state Current Limit], MSRC001_0062 [P-state Control] and
953    /// MSRC001_0063 [P-state Status] exist
954    ///
955    /// # Platforms
956    /// βœ… AMD ❌ Intel (reserved=false)
957    pub fn has_hw_pstate(&self) -> bool {
958        self.edx.contains(ApmInfoEdx::HWPSTATE)
959    }
960
961    /// Is Invariant TSC available?
962    ///
963    /// # Platforms
964    /// βœ… AMD βœ… Intel
965    pub fn has_invariant_tsc(&self) -> bool {
966        self.edx.contains(ApmInfoEdx::INVTSC)
967    }
968
969    /// Has Core performance boost?
970    ///
971    /// # Platforms
972    /// βœ… AMD ❌ Intel (reserved=false)
973    pub fn has_cpb(&self) -> bool {
974        self.edx.contains(ApmInfoEdx::CPB)
975    }
976
977    /// Has Read-only effective frequency interface?
978    ///
979    /// Indicates presence of MSRC000_00E7 [Read-Only Max Performance Frequency
980    /// Clock Count (MPerfReadOnly)] and MSRC000_00E8 [Read-Only Actual
981    /// Performance Frequency Clock Count (APerfReadOnly)].
982    ///
983    /// # Platforms
984    /// βœ… AMD ❌ Intel (reserved=false)
985    pub fn has_ro_effective_freq_iface(&self) -> bool {
986        self.edx.contains(ApmInfoEdx::EFFFREQRO)
987    }
988
989    /// Indicates support for processor feedback interface.
990    ///
991    /// # Note
992    /// This feature is deprecated.
993    ///
994    /// # Platforms
995    /// βœ… AMD ❌ Intel (reserved=false)
996    pub fn has_feedback_iface(&self) -> bool {
997        self.edx.contains(ApmInfoEdx::PROCFEEDBACKIF)
998    }
999
1000    /// Has Processor power reporting interface?
1001    ///
1002    /// # Platforms
1003    /// βœ… AMD ❌ Intel (reserved=false)
1004    pub fn has_power_reporting_iface(&self) -> bool {
1005        self.edx.contains(ApmInfoEdx::PROCPWRREPORT)
1006    }
1007}
1008
1009bitflags! {
1010    #[repr(transparent)]
1011    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1012    struct ApmInfoEdx: u32 {
1013        const TS = 1 << 0;
1014        const FID = 1 << 1;
1015        const VID = 1 << 2;
1016        const TTP = 1 << 3;
1017        const TM = 1 << 4;
1018        const MHZSTEPS100 = 1 << 6;
1019        const HWPSTATE = 1 << 7;
1020        const INVTSC = 1 << 8;
1021        const CPB = 1 << 9;
1022        const EFFFREQRO = 1 << 10;
1023        const PROCFEEDBACKIF = 1 << 11;
1024        const PROCPWRREPORT = 1 << 12;
1025    }
1026}
1027
1028bitflags! {
1029    #[repr(transparent)]
1030    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1031    struct RasCapabilities: u32 {
1032        const MCAOVFLRECOV = 1 << 0;
1033        const SUCCOR = 1 << 1;
1034        const HWA = 1 << 2;
1035    }
1036}
1037
1038/// Processor Capacity Parameters and Extended Feature Identification
1039/// (LEAF=0x8000_0008).
1040///
1041/// This function provides the size or capacity of various architectural
1042/// parameters that vary by implementation, as well as an extension to the
1043/// 0x8000_0001 feature identifiers.
1044///
1045/// # Platforms
1046/// βœ… AMD 🟑 Intel
1047#[derive(PartialEq, Eq)]
1048pub struct ProcessorCapacityAndFeatureInfo {
1049    eax: u32,
1050    ebx: ProcessorCapacityAndFeatureEbx,
1051    ecx: u32,
1052    edx: u32,
1053}
1054
1055impl ProcessorCapacityAndFeatureInfo {
1056    pub(crate) fn new(data: CpuIdResult) -> Self {
1057        Self {
1058            eax: data.eax,
1059            ebx: ProcessorCapacityAndFeatureEbx::from_bits_truncate(data.ebx),
1060            ecx: data.ecx,
1061            edx: data.edx,
1062        }
1063    }
1064
1065    /// Physical Address Bits
1066    ///
1067    /// # Platforms
1068    /// βœ… AMD βœ… Intel
1069    pub fn physical_address_bits(&self) -> u8 {
1070        get_bits(self.eax, 0, 7) as u8
1071    }
1072
1073    /// Linear Address Bits
1074    ///
1075    /// # Platforms
1076    /// βœ… AMD βœ… Intel
1077    pub fn linear_address_bits(&self) -> u8 {
1078        get_bits(self.eax, 8, 15) as u8
1079    }
1080
1081    /// Guest Physical Address Bits
1082    ///
1083    /// This number applies only to guests using nested paging. When this field
1084    /// is zero, refer to the PhysAddrSize field for the maximum guest physical
1085    /// address size.
1086    ///
1087    /// # Platforms
1088    /// βœ… AMD ❌ Intel (reserved=0)
1089    pub fn guest_physical_address_bits(&self) -> u8 {
1090        get_bits(self.eax, 16, 23) as u8
1091    }
1092
1093    /// CLZERO instruction supported if set.
1094    ///
1095    /// # Platforms
1096    /// βœ… AMD ❌ Intel (reserved=false)
1097    pub fn has_cl_zero(&self) -> bool {
1098        self.ebx.contains(ProcessorCapacityAndFeatureEbx::CLZERO)
1099    }
1100
1101    /// Instruction Retired Counter MSR available if set.
1102    ///
1103    /// # Platforms
1104    /// βœ… AMD ❌ Intel (reserved=false)
1105    pub fn has_inst_ret_cntr_msr(&self) -> bool {
1106        self.ebx
1107            .contains(ProcessorCapacityAndFeatureEbx::INST_RETCNT_MSR)
1108    }
1109
1110    /// FP Error Pointers Restored by XRSTOR if set.
1111    ///
1112    /// # Platforms
1113    /// βœ… AMD ❌ Intel (reserved=false)
1114    pub fn has_restore_fp_error_ptrs(&self) -> bool {
1115        self.ebx
1116            .contains(ProcessorCapacityAndFeatureEbx::RSTR_FP_ERR_PTRS)
1117    }
1118
1119    /// INVLPGB and TLBSYNC instruction supported if set.
1120    ///
1121    /// # Platforms
1122    /// βœ… AMD ❌ Intel (reserved=false)
1123    pub fn has_invlpgb(&self) -> bool {
1124        self.ebx.contains(ProcessorCapacityAndFeatureEbx::INVLPGB)
1125    }
1126
1127    /// RDPRU instruction supported if set.
1128    ///
1129    /// # Platforms
1130    /// βœ… AMD ❌ Intel (reserved=false)
1131    pub fn has_rdpru(&self) -> bool {
1132        self.ebx.contains(ProcessorCapacityAndFeatureEbx::RDPRU)
1133    }
1134
1135    /// MCOMMIT instruction supported if set.
1136    ///
1137    /// # Platforms
1138    /// βœ… AMD ❌ Intel (reserved=false)
1139    pub fn has_mcommit(&self) -> bool {
1140        self.ebx.contains(ProcessorCapacityAndFeatureEbx::MCOMMIT)
1141    }
1142
1143    /// WBNOINVD instruction supported if set.
1144    ///
1145    /// # Platforms
1146    /// βœ… AMD βœ… Intel
1147    pub fn has_wbnoinvd(&self) -> bool {
1148        self.ebx.contains(ProcessorCapacityAndFeatureEbx::WBNOINVD)
1149    }
1150
1151    /// WBINVD/WBNOINVD are interruptible if set.
1152    ///
1153    /// # Platforms
1154    /// βœ… AMD ❌ Intel (reserved=false)
1155    pub fn has_int_wbinvd(&self) -> bool {
1156        self.ebx
1157            .contains(ProcessorCapacityAndFeatureEbx::INT_WBINVD)
1158    }
1159
1160    /// EFER.LMSLE is unsupported if set.
1161    ///
1162    /// # Platforms
1163    /// βœ… AMD ❌ Intel (reserved=false)
1164    pub fn has_unsupported_efer_lmsle(&self) -> bool {
1165        self.ebx
1166            .contains(ProcessorCapacityAndFeatureEbx::EFER_LMSLE_UNSUPP)
1167    }
1168
1169    /// INVLPGB support for invalidating guest nested translations if set.
1170    ///
1171    /// # Platforms
1172    /// βœ… AMD ❌ Intel (reserved=false)
1173    pub fn has_invlpgb_nested(&self) -> bool {
1174        self.ebx
1175            .contains(ProcessorCapacityAndFeatureEbx::INVLPGB_NESTED)
1176    }
1177
1178    /// Performance time-stamp counter size (in bits).
1179    ///
1180    /// Indicates the size of `MSRC001_0280[PTSC]`.
1181    ///
1182    /// # Platforms
1183    /// βœ… AMD ❌ Intel (reserved=false)
1184    pub fn perf_tsc_size(&self) -> usize {
1185        let s = get_bits(self.ecx, 16, 17) as u8;
1186        match s & 0b11 {
1187            0b00 => 40,
1188            0b01 => 48,
1189            0b10 => 56,
1190            0b11 => 64,
1191            _ => unreachable!("AND with 0b11 in match"),
1192        }
1193    }
1194
1195    /// APIC ID size.
1196    ///
1197    /// A value of zero indicates that legacy methods must be used to determine
1198    /// the maximum number of logical processors, as indicated by CPUID
1199    /// `Fn8000_0008_ECX[NC]`.
1200    ///
1201    /// # Platforms
1202    /// βœ… AMD ❌ Intel (reserved=0)
1203    pub fn apic_id_size(&self) -> u8 {
1204        get_bits(self.ecx, 12, 15) as u8
1205    }
1206
1207    /// The size of the `apic_id_size` field determines the maximum number of
1208    /// logical processors (MNLP) that the package could theoretically support,
1209    /// and not the actual number of logical processors that are implemented or
1210    /// enabled in the package, as indicated by CPUID `Fn8000_0008_ECX[NC]`.
1211    ///
1212    /// `MNLP = (2 raised to the power of ApicIdSize[3:0])` (if not 0)
1213    ///
1214    /// # Platforms
1215    /// βœ… AMD ❌ Intel (reserved=0)
1216    pub fn maximum_logical_processors(&self) -> usize {
1217        usize::pow(2, self.apic_id_size() as u32)
1218    }
1219
1220    /// Number of physical threads in the processor.
1221    ///
1222    /// # Platforms
1223    /// βœ… AMD ❌ Intel (reserved=0)
1224    pub fn num_phys_threads(&self) -> usize {
1225        get_bits(self.ecx, 0, 7) as usize + 1
1226    }
1227
1228    /// Maximum page count for INVLPGB instruction.
1229    ///
1230    /// # Platforms
1231    /// βœ… AMD ❌ Intel (reserved=0)
1232    pub fn invlpgb_max_pages(&self) -> u16 {
1233        get_bits(self.edx, 0, 15) as u16
1234    }
1235
1236    /// The maximum ECX value recognized by RDPRU.
1237    ///
1238    /// # Platforms
1239    /// βœ… AMD ❌ Intel (reserved=0)
1240    pub fn max_rdpru_id(&self) -> u16 {
1241        get_bits(self.edx, 16, 31) as u16
1242    }
1243}
1244
1245impl Debug for ProcessorCapacityAndFeatureInfo {
1246    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
1247        f.debug_struct("ProcessorCapacityAndFeatureInfo")
1248            .field("physical_address_bits", &self.physical_address_bits())
1249            .field("linear_address_bits", &self.linear_address_bits())
1250            .field(
1251                "guest_physical_address_bits",
1252                &self.guest_physical_address_bits(),
1253            )
1254            .field("has_cl_zero", &self.has_cl_zero())
1255            .field("has_inst_ret_cntr_msr", &self.has_inst_ret_cntr_msr())
1256            .field(
1257                "has_restore_fp_error_ptrs",
1258                &self.has_restore_fp_error_ptrs(),
1259            )
1260            .field("has_invlpgb", &self.has_invlpgb())
1261            .field("has_rdpru", &self.has_rdpru())
1262            .field("has_mcommit", &self.has_mcommit())
1263            .field("has_wbnoinvd", &self.has_wbnoinvd())
1264            .field("has_int_wbinvd", &self.has_int_wbinvd())
1265            .field(
1266                "has_unsupported_efer_lmsle",
1267                &self.has_unsupported_efer_lmsle(),
1268            )
1269            .field("has_invlpgb_nested", &self.has_invlpgb_nested())
1270            .field("perf_tsc_size", &self.perf_tsc_size())
1271            .field("apic_id_size", &self.apic_id_size())
1272            .field(
1273                "maximum_logical_processors",
1274                &self.maximum_logical_processors(),
1275            )
1276            .field("num_phys_threads", &self.num_phys_threads())
1277            .field("invlpgb_max_pages", &self.invlpgb_max_pages())
1278            .field("max_rdpru_id", &self.max_rdpru_id())
1279            .finish()
1280    }
1281}
1282
1283bitflags! {
1284    #[repr(transparent)]
1285    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1286    struct ProcessorCapacityAndFeatureEbx: u32 {
1287        const CLZERO = 1 << 0;
1288        const INST_RETCNT_MSR = 1 << 1;
1289        const RSTR_FP_ERR_PTRS = 1 << 2;
1290        const INVLPGB = 1 << 3;
1291        const RDPRU = 1 << 4;
1292        const MCOMMIT = 1 << 8;
1293        const WBNOINVD = 1 << 9;
1294        const INT_WBINVD = 1 << 13;
1295        const EFER_LMSLE_UNSUPP = 1 << 20;
1296        const INVLPGB_NESTED = 1 << 21;
1297    }
1298}
1299
1300/// Information about the SVM features that the processory supports (LEAF=0x8000_000A).
1301///
1302/// # Note
1303/// If SVM is not supported ([ExtendedProcessorFeatureIdentifiers::has_svm] is false),
1304/// this leaf is reserved ([crate::CpuId] will return None in this case).
1305///
1306/// # Platforms
1307/// βœ… AMD ❌ Intel
1308#[derive(PartialEq, Eq, Debug)]
1309pub struct SvmFeatures {
1310    eax: u32,
1311    ebx: u32,
1312    /// Reserved
1313    _ecx: u32,
1314    edx: SvmFeaturesEdx,
1315}
1316
1317impl SvmFeatures {
1318    pub(crate) fn new(data: CpuIdResult) -> Self {
1319        Self {
1320            eax: data.eax,
1321            ebx: data.ebx,
1322            _ecx: data.ecx,
1323            edx: SvmFeaturesEdx::from_bits_truncate(data.edx),
1324        }
1325    }
1326
1327    /// SVM revision number.
1328    pub fn revision(&self) -> u8 {
1329        get_bits(self.eax, 0, 7) as u8
1330    }
1331
1332    /// Number of available address space identifiers (ASID).
1333    pub fn supported_asids(&self) -> u32 {
1334        self.ebx
1335    }
1336
1337    /// Nested paging supported if set.
1338    pub fn has_nested_paging(&self) -> bool {
1339        self.edx.contains(SvmFeaturesEdx::NP)
1340    }
1341
1342    /// Indicates support for LBR Virtualization.
1343    pub fn has_lbr_virtualization(&self) -> bool {
1344        self.edx.contains(SvmFeaturesEdx::LBR_VIRT)
1345    }
1346
1347    /// Indicates support for SVM-Lock if set.
1348    pub fn has_svm_lock(&self) -> bool {
1349        self.edx.contains(SvmFeaturesEdx::SVML)
1350    }
1351
1352    /// Indicates support for NRIP save on #VMEXIT if set.
1353    pub fn has_nrip(&self) -> bool {
1354        self.edx.contains(SvmFeaturesEdx::NRIPS)
1355    }
1356
1357    /// Indicates support for MSR TSC ratio (MSR `0xC000_0104`) if set.
1358    pub fn has_tsc_rate_msr(&self) -> bool {
1359        self.edx.contains(SvmFeaturesEdx::TSC_RATE_MSR)
1360    }
1361
1362    /// Indicates support for VMCB clean bits if set.
1363    pub fn has_vmcb_clean_bits(&self) -> bool {
1364        self.edx.contains(SvmFeaturesEdx::VMCB_CLEAN)
1365    }
1366
1367    /// Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush
1368    /// only the current ASID's TLB entries.
1369    ///
1370    /// Also indicates support for the extended VMCB TLB_Control.
1371    pub fn has_flush_by_asid(&self) -> bool {
1372        self.edx.contains(SvmFeaturesEdx::FLUSH_BY_ASID)
1373    }
1374
1375    /// Indicates support for the decode assists if set.
1376    pub fn has_decode_assists(&self) -> bool {
1377        self.edx.contains(SvmFeaturesEdx::DECODE_ASSISTS)
1378    }
1379
1380    /// Indicates support for the pause intercept filter if set.
1381    pub fn has_pause_filter(&self) -> bool {
1382        self.edx.contains(SvmFeaturesEdx::PAUSE_FILTER)
1383    }
1384
1385    /// Indicates support for the PAUSE filter cycle count threshold if set.
1386    pub fn has_pause_filter_threshold(&self) -> bool {
1387        self.edx.contains(SvmFeaturesEdx::PAUSE_FILTER_THRESHOLD)
1388    }
1389
1390    /// Support for the AMD advanced virtual interrupt controller if set.
1391    pub fn has_avic(&self) -> bool {
1392        self.edx.contains(SvmFeaturesEdx::AVIC)
1393    }
1394
1395    /// VMSAVE and VMLOAD virtualization supported if set.
1396    pub fn has_vmsave_virtualization(&self) -> bool {
1397        self.edx.contains(SvmFeaturesEdx::VMSAVE_VIRT)
1398    }
1399
1400    /// GIF -- virtualized global interrupt flag if set.
1401    pub fn has_gif(&self) -> bool {
1402        self.edx.contains(SvmFeaturesEdx::VGIF)
1403    }
1404
1405    /// Guest Mode Execution Trap supported if set.
1406    pub fn has_gmet(&self) -> bool {
1407        self.edx.contains(SvmFeaturesEdx::GMET)
1408    }
1409
1410    /// SVM supervisor shadow stack restrictions if set.
1411    pub fn has_sss_check(&self) -> bool {
1412        self.edx.contains(SvmFeaturesEdx::SSS_CHECK)
1413    }
1414
1415    /// SPEC_CTRL virtualization supported if set.
1416    pub fn has_spec_ctrl(&self) -> bool {
1417        self.edx.contains(SvmFeaturesEdx::SPEC_CTRL)
1418    }
1419
1420    /// When host `CR4.MCE=1` and guest `CR4.MCE=0`, machine check exceptions (`#MC`) in a
1421    /// guest do not cause shutdown and are always intercepted if set.
1422    pub fn has_host_mce_override(&self) -> bool {
1423        self.edx.contains(SvmFeaturesEdx::HOST_MCE_OVERRIDE)
1424    }
1425
1426    /// Support for INVLPGB/TLBSYNC hypervisor enable in VMCB and TLBSYNC intercept if
1427    /// set.
1428    pub fn has_tlb_ctrl(&self) -> bool {
1429        self.edx.contains(SvmFeaturesEdx::TLB_CTL)
1430    }
1431}
1432
1433bitflags! {
1434    #[repr(transparent)]
1435    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1436    struct SvmFeaturesEdx: u32 {
1437        const NP = 1 << 0;
1438        const LBR_VIRT = 1 << 1;
1439        const SVML = 1 << 2;
1440        const NRIPS = 1 << 3;
1441        const TSC_RATE_MSR = 1 << 4;
1442        const VMCB_CLEAN = 1 << 5;
1443        const FLUSH_BY_ASID = 1 << 6;
1444        const DECODE_ASSISTS = 1 << 7;
1445        const PAUSE_FILTER = 1 << 10;
1446        const PAUSE_FILTER_THRESHOLD = 1 << 12;
1447        const AVIC = 1 << 13;
1448        const VMSAVE_VIRT = 1 << 15;
1449        const VGIF = 1 << 16;
1450        const GMET = 1 << 17;
1451        const SSS_CHECK = 1 << 19;
1452        const SPEC_CTRL = 1 << 20;
1453        const HOST_MCE_OVERRIDE = 1 << 23;
1454        const TLB_CTL = 1 << 24;
1455    }
1456}
1457
1458/// TLB 1-GiB Pages Information (LEAF=0x8000_0019).
1459///
1460/// # Platforms
1461/// βœ… AMD ❌ Intel
1462#[derive(PartialEq, Eq, Debug)]
1463pub struct Tlb1gbPageInfo {
1464    eax: u32,
1465    ebx: u32,
1466    /// Reserved
1467    _ecx: u32,
1468    /// Reserved
1469    _edx: u32,
1470}
1471
1472impl Tlb1gbPageInfo {
1473    pub(crate) fn new(data: CpuIdResult) -> Self {
1474        Self {
1475            eax: data.eax,
1476            ebx: data.ebx,
1477            _ecx: data.ecx,
1478            _edx: data.edx,
1479        }
1480    }
1481
1482    /// L1 Data TLB associativity for 1-GB pages.
1483    pub fn dtlb_l1_1gb_associativity(&self) -> Associativity {
1484        let assoc_bits = get_bits(self.eax, 28, 31) as u8;
1485        Associativity::for_l2(assoc_bits)
1486    }
1487
1488    /// L1 Data TLB number of entries for 1-GB pages.
1489    pub fn dtlb_l1_1gb_size(&self) -> u8 {
1490        get_bits(self.eax, 16, 27) as u8
1491    }
1492
1493    /// L1 Instruction TLB associativity for 1-GB pages.
1494    pub fn itlb_l1_1gb_associativity(&self) -> Associativity {
1495        let assoc_bits = get_bits(self.eax, 12, 15) as u8;
1496        Associativity::for_l2(assoc_bits)
1497    }
1498
1499    /// L1 Instruction TLB number of entries for 1-GB pages.
1500    pub fn itlb_l1_1gb_size(&self) -> u8 {
1501        get_bits(self.eax, 0, 11) as u8
1502    }
1503
1504    /// L2 Data TLB associativity for 1-GB pages.
1505    pub fn dtlb_l2_1gb_associativity(&self) -> Associativity {
1506        let assoc_bits = get_bits(self.ebx, 28, 31) as u8;
1507        Associativity::for_l2(assoc_bits)
1508    }
1509
1510    /// L2 Data TLB number of entries for 1-GB pages.
1511    pub fn dtlb_l2_1gb_size(&self) -> u8 {
1512        get_bits(self.ebx, 16, 27) as u8
1513    }
1514
1515    /// L2 Instruction TLB associativity for 1-GB pages.
1516    pub fn itlb_l2_1gb_associativity(&self) -> Associativity {
1517        let assoc_bits = get_bits(self.ebx, 12, 15) as u8;
1518        Associativity::for_l2(assoc_bits)
1519    }
1520
1521    /// L2 Instruction TLB number of entries for 1-GB pages.
1522    pub fn itlb_l2_1gb_size(&self) -> u8 {
1523        get_bits(self.ebx, 0, 11) as u8
1524    }
1525}
1526
1527/// Performance Optimization Identifier (LEAF=0x8000_001A).
1528///
1529/// # Platforms
1530/// βœ… AMD ❌ Intel
1531#[derive(PartialEq, Eq, Debug)]
1532pub struct PerformanceOptimizationInfo {
1533    eax: PerformanceOptimizationInfoEax,
1534    /// Reserved
1535    _ebx: u32,
1536    /// Reserved
1537    _ecx: u32,
1538    /// Reserved
1539    _edx: u32,
1540}
1541
1542impl PerformanceOptimizationInfo {
1543    pub(crate) fn new(data: CpuIdResult) -> Self {
1544        Self {
1545            eax: PerformanceOptimizationInfoEax::from_bits_truncate(data.eax),
1546            _ebx: data.ebx,
1547            _ecx: data.ecx,
1548            _edx: data.edx,
1549        }
1550    }
1551
1552    /// The internal FP/SIMD execution datapath is 128 bits wide if set.
1553    pub fn has_fp128(&self) -> bool {
1554        self.eax.contains(PerformanceOptimizationInfoEax::FP128)
1555    }
1556
1557    /// MOVU (Move Unaligned) SSE instructions are efficient more than
1558    /// MOVL/MOVH SSE if set.
1559    pub fn has_movu(&self) -> bool {
1560        self.eax.contains(PerformanceOptimizationInfoEax::MOVU)
1561    }
1562
1563    /// The internal FP/SIMD execution datapath is 256 bits wide if set.
1564    pub fn has_fp256(&self) -> bool {
1565        self.eax.contains(PerformanceOptimizationInfoEax::FP256)
1566    }
1567}
1568
1569bitflags! {
1570    #[repr(transparent)]
1571    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1572    struct PerformanceOptimizationInfoEax: u32 {
1573        const FP128 = 1 << 0;
1574        const MOVU = 1 << 1;
1575        const FP256 = 1 << 2;
1576    }
1577}
1578
1579/// Processor Topology Information (LEAF=0x8000_001E).
1580///
1581/// # Platforms
1582/// βœ… AMD ❌ Intel
1583#[derive(PartialEq, Eq)]
1584pub struct ProcessorTopologyInfo {
1585    eax: u32,
1586    ebx: u32,
1587    ecx: u32,
1588    /// Reserved
1589    _edx: u32,
1590}
1591
1592impl ProcessorTopologyInfo {
1593    pub(crate) fn new(data: CpuIdResult) -> Self {
1594        Self {
1595            eax: data.eax,
1596            ebx: data.ebx,
1597            ecx: data.ecx,
1598            _edx: data.edx,
1599        }
1600    }
1601
1602    /// x2APIC ID
1603    pub fn x2apic_id(&self) -> u32 {
1604        self.eax
1605    }
1606
1607    /// Core ID
1608    ///
1609    /// # Note
1610    /// `Core ID` means `Compute Unit ID` if AMD Family 15h-16h Processors.
1611    pub fn core_id(&self) -> u8 {
1612        get_bits(self.ebx, 0, 7) as u8
1613    }
1614
1615    /// Threads per core
1616    ///
1617    /// # Note
1618    /// `Threads per Core` means `Cores per Compute Unit` if AMD Family 15h-16h Processors.
1619    pub fn threads_per_core(&self) -> u8 {
1620        get_bits(self.ebx, 8, 15) as u8 + 1
1621    }
1622
1623    /// Node ID
1624    pub fn node_id(&self) -> u8 {
1625        get_bits(self.ecx, 0, 7) as u8
1626    }
1627
1628    /// Nodes per processor
1629    pub fn nodes_per_processor(&self) -> u8 {
1630        get_bits(self.ecx, 8, 10) as u8 + 1
1631    }
1632}
1633
1634impl Debug for ProcessorTopologyInfo {
1635    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
1636        f.debug_struct("ProcessorTopologyInfo")
1637            .field("x2apic_id", &self.x2apic_id())
1638            .field("core_id", &self.core_id())
1639            .field("threads_per_core", &self.threads_per_core())
1640            .field("node_id", &self.node_id())
1641            .field("nodes_per_processor", &self.nodes_per_processor())
1642            .finish()
1643    }
1644}
1645
1646/// Encrypted Memory Capabilities (LEAF=0x8000_001F).
1647///
1648/// # Platforms
1649/// βœ… AMD ❌ Intel
1650#[derive(Debug, PartialEq, Eq)]
1651pub struct MemoryEncryptionInfo {
1652    eax: MemoryEncryptionInfoEax,
1653    ebx: u32,
1654    ecx: u32,
1655    edx: u32,
1656}
1657
1658impl MemoryEncryptionInfo {
1659    pub(crate) fn new(data: CpuIdResult) -> Self {
1660        Self {
1661            eax: MemoryEncryptionInfoEax::from_bits_truncate(data.eax),
1662            ebx: data.ebx,
1663            ecx: data.ecx,
1664            edx: data.edx,
1665        }
1666    }
1667
1668    /// Secure Memory Encryption is supported if set.
1669    pub fn has_sme(&self) -> bool {
1670        self.eax.contains(MemoryEncryptionInfoEax::SME)
1671    }
1672
1673    /// Secure Encrypted Virtualization is supported if set.
1674    pub fn has_sev(&self) -> bool {
1675        self.eax.contains(MemoryEncryptionInfoEax::SEV)
1676    }
1677
1678    /// The Page Flush MSR is available if set.
1679    pub fn has_page_flush_msr(&self) -> bool {
1680        self.eax.contains(MemoryEncryptionInfoEax::PAGE_FLUSH_MSR)
1681    }
1682
1683    /// SEV Encrypted State is supported if set.
1684    pub fn has_sev_es(&self) -> bool {
1685        self.eax.contains(MemoryEncryptionInfoEax::SEV_ES)
1686    }
1687
1688    /// SEV Secure Nested Paging supported if set.
1689    pub fn has_sev_snp(&self) -> bool {
1690        self.eax.contains(MemoryEncryptionInfoEax::SEV_SNP)
1691    }
1692
1693    /// VM Permission Levels supported if set.
1694    pub fn has_vmpl(&self) -> bool {
1695        self.eax.contains(MemoryEncryptionInfoEax::VMPL)
1696    }
1697
1698    /// Hardware cache coherency across encryption domains enforced if set.
1699    pub fn has_hw_enforced_cache_coh(&self) -> bool {
1700        self.eax.contains(MemoryEncryptionInfoEax::HWENFCACHECOH)
1701    }
1702
1703    /// SEV guest execution only allowed from a 64-bit host if set.
1704    pub fn has_64bit_mode(&self) -> bool {
1705        self.eax.contains(MemoryEncryptionInfoEax::HOST64)
1706    }
1707
1708    /// Restricted Injection supported if set.
1709    pub fn has_restricted_injection(&self) -> bool {
1710        self.eax.contains(MemoryEncryptionInfoEax::RESTINJECT)
1711    }
1712
1713    /// Alternate Injection supported if set.
1714    pub fn has_alternate_injection(&self) -> bool {
1715        self.eax.contains(MemoryEncryptionInfoEax::ALTINJECT)
1716    }
1717
1718    /// Full debug state swap supported for SEV-ES guests.
1719    pub fn has_debug_swap(&self) -> bool {
1720        self.eax.contains(MemoryEncryptionInfoEax::DBGSWP)
1721    }
1722
1723    /// Disallowing IBS use by the host supported if set.
1724    pub fn has_prevent_host_ibs(&self) -> bool {
1725        self.eax.contains(MemoryEncryptionInfoEax::PREVHOSTIBS)
1726    }
1727
1728    /// Virtual Transparent Encryption supported if set.
1729    pub fn has_vte(&self) -> bool {
1730        self.eax.contains(MemoryEncryptionInfoEax::VTE)
1731    }
1732
1733    /// C-bit location in page table entry
1734    pub fn c_bit_position(&self) -> u8 {
1735        get_bits(self.ebx, 0, 5) as u8
1736    }
1737
1738    /// Physical Address bit reduction
1739    pub fn physical_address_reduction(&self) -> u8 {
1740        get_bits(self.ebx, 6, 11) as u8
1741    }
1742
1743    /// Number of encrypted guests supported simultaneouslys
1744    pub fn max_encrypted_guests(&self) -> u32 {
1745        self.ecx
1746    }
1747
1748    /// Minimum ASID value for an SEV enabled, SEV-ES disabled guest
1749    pub fn min_sev_no_es_asid(&self) -> u32 {
1750        self.edx
1751    }
1752}
1753
1754bitflags! {
1755    #[repr(transparent)]
1756    #[derive(Debug, Clone, Copy, PartialEq, Eq)]
1757    struct MemoryEncryptionInfoEax: u32 {
1758        const SME = 1 << 0;
1759        const SEV = 1 << 1;
1760        const PAGE_FLUSH_MSR = 1 << 2;
1761        const SEV_ES = 1 << 3;
1762        const SEV_SNP = 1 << 4;
1763        const VMPL = 1 << 5;
1764        const HWENFCACHECOH = 1 << 10;
1765        const HOST64 = 1 << 11;
1766        const RESTINJECT = 1 << 12;
1767        const ALTINJECT = 1 << 13;
1768        const DBGSWP = 1 << 14;
1769        const PREVHOSTIBS = 1 << 15;
1770        const VTE = 1 << 16;
1771    }
1772}